licenses
sequencelengths 1
3
| version
stringclasses 636
values | tree_hash
stringlengths 40
40
| path
stringlengths 5
135
| type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MPL-2.0"
] | 0.2.2 | 3372f4dfa2499b0aa0b478a5082aff34915532e8 | docs/src/reference.md | docs | 118 | # Reference
## Index
```@index
```
```@autodocs
Modules = [KrylovPreconditioners]
Order = [:function, :type]
```
| KrylovPreconditioners | https://github.com/JuliaSmoothOptimizers/KrylovPreconditioners.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/make.jl | code | 1109 | using CANalyze
using Documenter
DocMeta.setdocmeta!(CANalyze, :DocTestSetup, :(using CANalyze); recursive=true)
makedocs(;
modules=[CANalyze],
authors="Tim Lucas Sabelmann",
repo="https://github.com/tsabelmann/CANalyze.jl/blob/{commit}{path}#{line}",
sitename="CANTools.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://tsabelmann.github.io/CANalyze.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Usage" => [
"Signal" => "examples/signal.md",
"Message" => "examples/message.md",
"Database" => "examples/database.md",
"Decode" => "examples/decode.md"
],
"Documentation" => [
"Frames" => "frames.md",
"Utils" => "utils.md",
"Signals" => "signals.md",
"Messages" => "messages.md",
"Decode" => "decode.md",
"Encode" => "encode.md"
]
],
)
deploydocs(;
repo="github.com/tsabelmann/CANalyze.jl",
devbranch="main",
target="build"
)
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | examples/database_1.jl | code | 736 | using CANalyze.Signals
using CANalyze.Messages
using CANalyze.Databases
signal1 = Signals.NamedSignal("ABC", nothing, nothing, Signals.Float32Signal(start=0, byte_order=:little_endian))
signal2 = Signals.NamedSignal("ABCD", nothing, nothing, Signals.Unsigned(start=40, length=17, factor=2, offset=20, byte_order=:big_endian))
signal3 = Signals.NamedSignal("ABCDE", nothing, nothing, Signals.Unsigned(start=32, length=8, factor=2, offset=20, byte_order=:little_endian))
m1 = Messages.Message(0x1FE, 8, "ABC", signal1; strict=true)
m2 = Messages.Message(0x1FF, 8, "ABD", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
# println(d)
m = d["ABC"]
println(m)
m = d[0x1FF]
println(m)
m = get(d, 0x1FA)
println(m)
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | examples/message_1.jl | code | 867 | using CANalyze.Decode
using CANalyze.Frames
using CANalyze.Signals
using CANalyze.Messages
signal1 = Signals.NamedSignal("ABC", nothing, nothing, Signals.Float32Signal(start=0, byte_order=:little_endian))
signal2 = Signals.NamedSignal("ABCD", nothing, nothing, Signals.Unsigned(start=40, length=17, factor=2, offset=20, byte_order=:big_endian))
signal3 = Signals.NamedSignal("ABCDE", nothing, nothing, Signals.Unsigned(start=32, length=8, factor=2, offset=20, byte_order=:little_endian))
bits1 = Signals.Bits(signal1)
println(sort(Int64[bits1.bits...]))
bits1 = Signals.Bits(signal2)
println(sort(Int64[bits1.bits...]))
bits1 = Signals.Bits(signal3)
println(sort(Int64[bits1.bits...]))
frame = Frames.CANFrame(20, [1, 2, 0xFD, 4, 5, 6, 7, 8])
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
d = Decode.decode(m, frame)
println(d)
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | examples/signal_1.jl | code | 1628 | using CANalyze.Decode
using CANalyze.Frames
using CANalyze.Signals
frame = Frames.CANFrame(20, [1, 2, 0xFD, 4, 5, 6, 7, 8])
sig1 = Signals.Unsigned{Float32}(0, 1)
sig2 = Signals.Unsigned{Float64}(start=0, length=8, factor=2, offset=20)
sig3 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig4 = Signals.Unsigned(start=0, length=8, factor=1.0, offset=-1337f0, byte_order=:little_endian)
sig5 = Signals.Signed{Float32}(0, 1)
sig6 = Signals.Signed{Float64}(start=3, length=16, factor=2, offset=20, byte_order=:big_endian)
sig7 = Signals.Signed(0, 8, 1, 0, :little_endian)
sig8 = Signals.Signed(start=0, length=8, factor=1.0, offset=-1337f0, byte_order=:little_endian)
sig9 = Signals.Raw(0, 8, :big_endian)
sig10 = Signals.Raw(start=21, length=7, byte_order=:little_endian)
println(sig1)
println(sig2)
println(sig3)
println(sig4)
println(sig5)
println(sig6)
println(sig7)
println(sig8)
println(sig9)
println(sig10)
# signal = Signals.Float32Signal(start=7, factor=1.0f0, offset=0.0f0, byte_order=:big_endian)
bits = Signals.Bits(sig6)
println(bits)
# value = Decode.decode(signal,frame)
# hex = reinterpret(UInt8, [value])
# println(value)
# println(hex)
#
# signal = Signals.Float32Signal(start=0, byte_order=:little_endian)
# value = Decode.decode(signal,frame)
# hex = reinterpret(UInt8, [value])
# println(value)
# println(hex)
# println(Signals.overlap(sig1,sig5))
s = Signals.Float32Signal(start=0, byte_order=:little_endian)
signal = Signals.NamedSignal("ABC", nothing, nothing, s)
# println(signal)
# value = Decode.decode(signal,frame)
# hex = reinterpret(UInt8, [value])
# println(value)
# println(hex)
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/CANalyze.jl | code | 309 | module CANalyze
include("Utils.jl")
using .Utils
include("Frames.jl")
using .Frames
include("Signals.jl")
using .Signals
include("Messages.jl")
using .Messages
include("Databases.jl")
using .Databases
include("Decode.jl")
using .Decode
include("Encode.jl")
using .Encode
include("IO.jl")
using .IO
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Databases.jl | code | 2243 | module Databases
import Base
import ..Messages
struct Database
frame_id_index::Dict{UInt32, Ref{Messages.Message}}
name_index::Dict{String, Ref{Messages.Message}}
function Database(messages::Set{Messages.Message})
v = [messages...]
e = enumerate(v)
l1 = [Messages.name(m1) == Messages.name(m2) for (i, m1) in e for (j, m2) in e if i < j]
l2 = [Messages.frame_id(m1) == Messages.frame_id(m2) for (i, m1) in e for (j, m2) in e if i < j]
a1 = any(l1)
a2 = any(l2)
if a1
throw(DomainError(a1, "messages with the same name"))
end
if a2
throw(DomainError(a2, "messages with the same frame_id"))
end
frame_id_index = Dict{UInt32, Ref{Messages.Message}}()
name_index = Dict{String, Ref{Messages.Message}}()
for message in messages
m = Ref(message)
frame_id_index[Messages.frame_id(message)] = m
name_index[Messages.name(message)] = m
end
new(frame_id_index, name_index)
end
end
function Database(messages::Messages.Message...)
s = Set(messages)
return Database(s)
end
function Base.getindex(db::Database, index::String)
m_ref = db.name_index[index]
return m_ref[]
end
function Base.getindex(db::Database, index::UInt32)
m_ref = db.frame_id_index[index]
return m_ref[]
end
function Base.getindex(db::Database, index::Integer)
index = convert(UInt32, index)
return db[index]
end
function Base.get(db::Database, key::String, default=nothing)
try
value = db[key]
return value
catch
return default
end
end
function Base.get(db::Database, key::UInt32, default=nothing)
try
value = db[key]
return value
catch
return default
end
end
function Base.get(db::Database, key::Integer, default=nothing)
key = convert(UInt32, key)
return get(db, key, default)
end
export Database
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Decode.jl | code | 6387 | module Decode
import ..Utils
import ..Frames
import ..Signals
import ..Messages
"""
"""
function decode(signal::Signals.NamedSignal{T},
can_frame::Frames.CANFrame)::Union{Nothing,T} where {T}
try
sig = Signals.signal(signal)
return decode(sig, can_frame)
catch
return Signals.default(signal)
end
end
"""
"""
function decode(signal::Signals.UnnamedSignal{T}, can_frame::Frames.CANFrame,
default::D)::Union{T,D} where {T,D}
try
return decode(signal, can_frame)
catch
return default
end
end
"""
"""
function decode(signal::Signals.Bit, can_frame::Frames.CANFrame)::Bool
start = Signals.start(signal)
if start >= 8*Frames.dlc(can_frame)
throw(DomainError(start, "CANFrame does not have data at bit position"))
else
mask = Utils.mask(UInt64, 1, start)
value = Utils.from_bytes(UInt64, Frames.data(can_frame))
if mask & value != 0
return true
else
return false
end
end
end
"""
"""
function decode(signal::Signals.Unsigned{T}, can_frame::Frames.CANFrame) where {T}
start = Signals.start(signal)
length = Signals.length(signal)
factor = Signals.factor(signal)
offset = Signals.offset(signal)
byte_order = Signals.byte_order(signal)
if byte_order == :little_endian
end_bit = start + length - 1
if end_bit >= 8 * Frames.dlc(can_frame)
throw(DomainError(end_bit, "The bit($end_bit) cannot be selected"))
end
value = Utils.from_bytes(UInt64, Frames.data(can_frame))
value = value >> start
elseif byte_order == :big_endian
start_bit_in_byte = start % 8
start_byte = div(start, 8)
start = 8*start_byte + (7 - start_bit_in_byte)
new_shift = Int64(8*Frames.dlc(can_frame)) - Int64(start) - Int64(length)
if new_shift < 0
throw(DomainError(new_shift, "The bits cannot be selected"))
end
value = Utils.from_bytes(UInt64, reverse(Frames.data(can_frame)))
value = value >> new_shift
else
throw(DomainError(byte_order, "Byte order not supported"))
end
value = value & Utils.mask(UInt64, length)
result = T(value) * factor + offset
return result
end
function decode(signal::Signals.Signed{T}, can_frame::Frames.CANFrame) where {T}
start = Signals.start(signal)
length = Signals.length(signal)
factor = Signals.factor(signal)
offset = Signals.offset(signal)
byte_order = Signals.byte_order(signal)
if byte_order == :little_endian
end_bit = start + length - 1
if end_bit >= 8 * Frames.dlc(can_frame)
throw(DomainError(end_bit, "The bit($end_bit) cannot be selected"))
end
value = Utils.from_bytes(Int64, Frames.data(can_frame))
value = value >> start
elseif byte_order == :big_endian
start_bit_in_byte = start % 8
start_byte = div(start, 8)
start = 8*start_byte + (7 - start_bit_in_byte)
new_shift = Int64(8*Frames.dlc(can_frame)) - Int64(start) - Int64(length)
if new_shift < 0
throw(DomainError(new_shift, "The bits cannot be selected"))
end
value = Utils.from_bytes(Int64, reverse(Frames.data(can_frame)))
value = value >> new_shift
else
throw(DomainError(byte_order, "Byte order not supported"))
end
value = value & Utils.mask(Int64, length)
# sign extend value if most-significant bit is 1
if (value >> (length - 1)) & 0x01 != 0
value = value + ~Utils.mask(Int64, length)
end
result = T(value) * factor + offset
return result
end
"""
"""
function decode(signal::Signals.FloatSignal{T}, can_frame::Frames.CANFrame) where {T}
start = Signals.start(signal)
length = Signals.length(signal)
factor = Signals.factor(signal)
offset = Signals.offset(signal)
byte_order = Signals.byte_order(signal)
if byte_order == :little_endian
end_bit = start + length - 1
if end_bit >= 8 * Frames.dlc(can_frame)
throw(DomainError(end_bit, "The bit($end_bit) cannot be selected"))
end
value = Utils.from_bytes(UInt64, Frames.data(can_frame))
value = value >> start
elseif byte_order == :big_endian
start_bit_in_byte = start % 8
start_byte = div(start, 8)
start = 8*start_byte + (7 - start_bit_in_byte)
new_shift = Int64(8*Frames.dlc(can_frame)) - Int64(start) - Int64(length)
if new_shift < 0
throw(DomainError(new_shift, "The bits cannot be selected"))
end
value = Utils.from_bytes(UInt64, reverse(Frames.data(can_frame)))
value = value >> new_shift
else
throw(DomainError(byte_order, "Byte order not supported"))
end
value = value & Utils.mask(UInt64, length)
result = Utils.from_bytes(T, Utils.to_bytes(value))
result = factor * result + offset
return result
end
"""
"""
function decode(signal::Signals.Raw, can_frame::Frames.CANFrame)::UInt64
start = Signals.start(signal)
length = Signals.length(signal)
byte_order = Signals.byte_order(signal)
if byte_order == :little_endian
end_bit = start + length - 1
if end_bit >= 8 * Frames.dlc(can_frame)
throw(DomainError(end_bit, "The bit($end_bit) cannot be selected"))
end
value = Utils.from_bytes(UInt64, Frames.data(can_frame))
value = value >> start
elseif byte_order == :big_endian
start_bit_in_byte = start % 8
start_byte = div(start, 8)
start = 8*start_byte + (7 - start_bit_in_byte)
new_shift::Int16 = Int64(8*Frames.dlc(can_frame)) - Int64(start) - Int64(length)
if new_shift < 0
throw(DomainError(new_shift, "The bits cannot be selected"))
end
value = Utils.from_bytes(UInt64, reverse(Frames.data(can_frame)))
value = value >> new_shift
else
throw(DomainError(byte_order, "Byte order not supported"))
end
result = value & Utils.mask(UInt64, length)
return UInt64(result)
end
function decode(message::Messages.Message, can_frame::Frames.CANFrame)
d = Dict()
for (key, signal) in message
value = decode(signal, can_frame)
d[key] = value
end
return d
end
export decode
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Encode.jl | code | 92 | """
"""
module Encode
import ..Utils
import ..Frames
import ..Signals
import ..Messages
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Frames.jl | code | 3109 | module Frames
import Base
"""
"""
abstract type AbstractCANFrame end
"""
"""
mutable struct CANFrame <: AbstractCANFrame
frame_id::UInt32
data::Array{UInt8,1}
is_extended::Bool
function CANFrame(frame_id::UInt32, data::AbstractArray{UInt8};
is_extended::Bool=false)
if length(data) > 8
throw(DomainError(data, "CANFrame allows a maximum of 8 bytes"))
end
return new(frame_id, data, is_extended)
end
end
"""
"""
function CANFrame(frame_id::Integer, data::A; is_extended::Bool=false) where
{A <: AbstractArray{<:Integer}}
return CANFrame(convert(UInt32, frame_id), UInt8[data...]; is_extended=is_extended)
end
"""
"""
function CANFrame(frame_id::Integer, data::Integer...; is_extended::Bool=false)
return CANFrame(convert(UInt32, frame_id), UInt8[data...]; is_extended=is_extended)
end
"""
"""
function CANFrame(frame_id::Integer; is_extended=false)
return CANFrame(convert(UInt32, frame_id), UInt8[]; is_extended=is_extended)
end
"""
"""
mutable struct CANFdFrame <: AbstractCANFrame
frame_id::UInt32
data::Array{UInt8,1}
is_extended::Bool
"""
"""
function CANFdFrame(frame_id::UInt32, data::A; is_extended::Bool=false) where
{A <: AbstractArray{UInt8}}
if length(data) > 64
throw(DomainError(data, "CANFdFrame allows a maximum of 64 bytes"))
end
return new(frame_id, data, is_extended)
end
end
"""
"""
function CANFdFrame(frame_id::Integer, data::A; is_extended::Bool=false) where
{A <: AbstractArray{<:Integer}}
return CANFdFrame(convert(UInt32, frame_id), UInt8[data...]; is_extended)
end
"""
"""
function CANFdFrame(frame_id::Integer, data::Integer...; is_extended::Bool=false)
return CANFdFrame(convert(UInt32, frame_id), UInt8[data...]; is_extended)
end
"""
"""
function Base.:(==)(lhs::AbstractCANFrame, rhs::AbstractCANFrame)::Bool
return false
end
"""
"""
function Base.:(==)(lhs::CANFrame, rhs::CANFrame)::Bool
if frame_id(lhs) != frame_id(rhs)
return false
end
if data(lhs) != data(rhs)
return false
end
if is_extended(lhs) != is_extended(rhs)
return false
end
return true
end
"""
"""
function frame_id(frame::AbstractCANFrame)::UInt32
if is_extended(frame)
return frame.frame_id & 0x1F_FF_FF_FF
else
return frame.frame_id & 0x7_FF
end
end
"""
"""
function data(frame::AbstractCANFrame)::Array{UInt8,1}
return frame.data
end
"""
"""
function dlc(frame::AbstractCANFrame)::UInt8
return length(frame.data)
end
"""
"""
function is_standard(frame::AbstractCANFrame)::Bool
return !frame.is_extended
end
"""
"""
function is_extended(frame::AbstractCANFrame)::Bool
return frame.is_extended
end
"""
"""
function max_size(::Type{AbstractCANFrame})::UInt8
return 8
end
"""
"""
function max_size(::Type{CANFrame})::UInt8
return 8
end
"""
"""
function max_size(::Type{CANFdFrame})::UInt8
return 64
end
export CANFdFrame, CANFrame
export frame_id, data, dlc, is_extended, is_standard, max_size
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Messages.jl | code | 4053 | """The module provides the Message type that bundles signals.
"""
module Messages
import Base
import ..Signals
"""
Message
Messages model bundles of signals and enable the decoding of multiple signals. Additionally,
messages are defined using the number of bytes (`dlc`), a message name (`name`), and the
internal signals (`signals`).
# Fields
- `dlc::UInt8`: the number of required bytes
- `name::String`: the name of the message
- `signals::Dict{String, Signals.NamedSignal}`: a mapping of string
"""
mutable struct Message
frame_id::UInt32
dlc::UInt8
name::String
signals::Dict{String, Signals.NamedSignal}
function Message(frame_id::UInt32, dlc::UInt8, name::String,
signals::Dict{String, Signals.NamedSignal}; strict::Bool=false)
if name == ""
throw(DomainError(name, "name cannot be the empty string"))
end
if strict
e1 = enumerate(values(signals))
l = [Signals.overlap(v1,v2) for (i,v1) in e1 for (j,v2) in e1 if i < j]
do_overlap = any(l)
if do_overlap
throw(DomainError(do_overlap, "signals overlap"))
end
for signal in values(signals)
is_ok = Signals.check(signal, dlc)
if !is_ok
throw(DomainError(is_ok, "not enough data"))
end
end
end
return new(frame_id, dlc, name, signals)
end
end
function Message(frame_id::Integer, dlc::Integer, name::String,
signals::Signals.NamedSignal...; strict::Bool=false)
frame_id = convert(UInt32, frame_id)
dlc = convert(UInt8, dlc)
sigs = Dict{String, Signals.NamedSignal}()
for signal in signals
signal_name = Signals.name(signal)
if get(sigs, signal_name, nothing) != nothing
throw(DomainError(signal_name, "signal with same name already defined"))
else
sigs[signal_name] = signal
end
end
return Message(frame_id, dlc, name, sigs; strict=strict)
end
"""
"""
function frame_id(message::Message)::UInt32
return message.frame_id & 0x7F_FF_FF_FF
end
"""
dlc(message::Message) -> UInt8
Returns the number of bytes that the message requires and operates on.
# Arguments
- `message::Message`: the message
"""
function dlc(message::Message)::UInt8
return message.dlc
end
"""
name(message::Message) -> String
Returns the message name.
# Arguments
- `message::Message`: the message
"""
function name(message::Message)::String
return message.name
end
"""
Base.getindex(message::Message, index::String) -> Signals.NamedSignal
Returns the signal with the name `index` inside `message`.
# Arguments
- `message::Message`: the message
- `index::String`: the index, i.e., the name of the signal we want to retrieve
# Throws
- `KeyError`: the signal with the name `index` does not exist inside `message`
"""
function Base.getindex(message::Message, index::String)::Signals.NamedSignal
return message.signals[index]
end
"""
Base.get(message::Message, key::String, default)
Returns the signal with the name `key` inside `message` if a signal with such a name
exists, otherwise we return `default`.
# Arguments
- `message::Message`: the message
- `key::String`: the index, i.e., the name of the signal we want to retrieve
- `default`: a default value
"""
function Base.get(message::Message, key::String, default)
return get(message.signals, key, default)
end
"""
Base.iterate(iter::Message)
Enables the iteration over the inside dictionary `signals`.
# Arguments
- `iter::Message`: the message
"""
function Base.iterate(iter::Message)
return iterate(iter.signals)
end
"""
Base.iterate(iter::Message, state)
Enables the iteration over the inside dictionary `signals`.
# Arguments
- `iter::Message`: the message
- `state`: the state of the iterator
"""
function Base.iterate(iter::Message, state)
return iterate(iter.signals, state)
end
export Message, frame_id, dlc, name
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Signals.jl | code | 14521 | """The module provides signals, a mechanism that models data retrievable from or
written to CAN-bus data. A signal models one data entity, e.g., one variable inside the
CAN-bus data.
"""
module Signals
import Base
"""
"""
abstract type AbstractSignal{T} end
"""
"""
abstract type UnnamedSignal{T} <: AbstractSignal{T} end
"""
"""
abstract type AbstractIntegerSignal{T <: Integer} <: UnnamedSignal{T} end
"""
"""
abstract type AbstractFloatSignal{T <: AbstractFloat} <: UnnamedSignal{T} end
"""
"""
struct Bit <: AbstractIntegerSignal{Bool}
start::UInt16
end
"""
"""
function Bit(start::Integer)
start = convert(UInt16, start)
return Bit(start)
end
"""
"""
function Bit(; start::Integer=0)
return Bit(start)
end
"""
"""
function start(signal::Bit)::UInt16
return signal.start
end
"""
"""
function Base.length(signal::Bit)::UInt16
return 1
end
"""
"""
function byte_order(signal::Bit)::Symbol
return :little_endian
end
"""
"""
function Base.:(==)(lhs::Bit, rhs::Bit)
return start(lhs) == start(rhs)
end
"""
"""
struct Unsigned{T} <: AbstractFloatSignal{T}
start::UInt16
length::UInt16
factor::T
offset::T
byte_order::Symbol
"""
"""
function Unsigned(start::UInt16,
length::UInt16,
factor::T,
offset::T,
byte_order::Symbol) where {T <: AbstractFloat}
if byte_order != :little_endian && byte_order != :big_endian
throw(DomainError(byte_order, "Byte order not supported"))
end
if length == 0
throw(DomainError(length, "The length has to be greater or equal to 1"))
end
return new{T}(start, length, factor, offset, byte_order)
end
end
"""
"""
function Unsigned(start::Integer,
length::Integer,
factor::Union{Integer, AbstractFloat},
offset::Union{Integer, AbstractFloat},
byte_order::Symbol)
start = convert(UInt16, start)
length = convert(UInt16, length)
if factor isa Integer && offset isa Integer
factor = convert(Float64, factor)
offset = convert(Float64, offset)
else
factor, offset = promote(factor, offset)
end
return Unsigned(start, length, factor, offset, byte_order)
end
"""
"""
function Unsigned(; start::Integer,
length::Integer,
factor::Union{Integer, AbstractFloat},
offset::Union{Integer, AbstractFloat},
byte_order::Symbol=:little_endian)
return Unsigned(start, length, factor, offset, byte_order)
end
"""
"""
function Unsigned{T}(start::Integer,
length::Integer;
factor::Union{Integer, AbstractFloat}=one(T),
offset::Union{Integer, AbstractFloat}=zero(T),
byte_order::Symbol=:little_endian) where {T}
factor = convert(T, factor)
offset = convert(T, offset)
return Unsigned(start, length, factor, offset, byte_order)
end
"""
"""
function Unsigned{T}(; start::Integer,
length::Integer,
factor::Union{Integer, AbstractFloat}=one(T),
offset::Union{Integer, AbstractFloat}=zero(T),
byte_order::Symbol=:little_endian) where {T}
factor = convert(T, factor)
offset = convert(T, offset)
return Unsigned(start, length, factor, offset, byte_order)
end
"""
"""
function start(signal::Unsigned{T})::UInt16 where {T}
return signal.start
end
"""
"""
function Base.length(signal::Unsigned{T})::UInt16 where {T}
return signal.length
end
"""
"""
function factor(signal::Unsigned{T})::T where {T}
return signal.factor
end
"""
"""
function offset(signal::Unsigned{T})::T where {T}
return signal.offset
end
"""
"""
function byte_order(signal::Unsigned{T})::Symbol where {T}
return signal.byte_order
end
"""
"""
function Base.:(==)(lhs::F, rhs::F) where {T, F <: AbstractFloatSignal{T}}
if start(lhs) != start(rhs)
return false
end
if length(lhs) != length(rhs)
return false
end
if factor(lhs) != factor(rhs)
return false
end
if offset(lhs) != offset(rhs)
return false
end
if byte_order(lhs) != byte_order(rhs)
return false
end
return true
end
struct Signed{T} <: AbstractFloatSignal{T}
start::UInt16
length::UInt16
factor::T
offset::T
byte_order::Symbol
function Signed(start::UInt16,
length::UInt16,
factor::T,
offset::T,
byte_order::Symbol) where {T <: AbstractFloat}
if byte_order != :little_endian && byte_order != :big_endian
throw(DomainError(byte_order, "Byte order not supported"))
end
if length == 0
throw(DomainError(length, "The length has to be greater or equal to 1"))
end
return new{T}(start, length, factor, offset, byte_order)
end
end
"""
"""
function Signed(start::Integer,
length::Integer,
factor::Union{Integer, AbstractFloat},
offset::Union{Integer, AbstractFloat},
byte_order::Symbol)
start = convert(UInt16, start)
length = convert(UInt16, length)
if factor isa Integer && offset isa Integer
factor = convert(Float64, factor)
offset = convert(Float64, offset)
else
factor, offset = promote(factor, offset)
end
return Signed(start, length, factor, offset, byte_order)
end
"""
"""
function Signed(; start::Integer,
length::Integer,
factor::Union{Integer, AbstractFloat},
offset::Union{Integer, AbstractFloat},
byte_order::Symbol=:little_endian)
return Signed(start, length, factor, offset, byte_order)
end
"""
"""
function Signed{T}(start::Integer,
length::Integer;
factor::Union{Integer, AbstractFloat}=one(T),
offset::Union{Integer, AbstractFloat}=zero(T),
byte_order::Symbol=:little_endian) where {T}
factor = convert(T, factor)
offset = convert(T, offset)
return Signed(start, length, factor, offset, byte_order)
end
"""
"""
function Signed{T}(; start::Integer,
length::Integer,
factor::Union{Integer, AbstractFloat}=one(T),
offset::Union{Integer, AbstractFloat}=zero(T),
byte_order::Symbol=:little_endian) where {T}
factor = convert(T, factor)
offset = convert(T, offset)
return Signed(start, length, factor, offset, byte_order)
end
"""
"""
function start(signal::Signed{T})::UInt16 where {T}
return signal.start
end
"""
"""
function Base.length(signal::Signed{T})::UInt16 where {T}
return signal.length
end
"""
"""
function factor(signal::Signed{T})::T where {T}
return signal.factor
end
"""
"""
function offset(signal::Signed{T})::T where {T}
return signal.offset
end
"""
"""
function byte_order(signal::Signed{T})::Symbol where {T}
return signal.byte_order
end
"""
"""
struct FloatSignal{T} <: AbstractFloatSignal{T}
start::UInt16
factor::T
offset::T
byte_order::Symbol
function FloatSignal(start::UInt16, factor::T, offset::T,
byte_order::Symbol) where {T <: AbstractFloat}
new{T}(start, factor, offset, byte_order)
end
end
"""
"""
function FloatSignal(start::Integer,
factor::Union{Integer,AbstractFloat},
offset::Union{Integer,AbstractFloat},
byte_order::Symbol)
start = convert(UInt16, start)
if factor isa Integer && offset isa Integer
factor = convert(Float64, factor)
offset = convert(Float64, offset)
else
factor, offset = promote(factor, offset)
end
return FloatSignal(start, factor, offset, byte_order)
end
"""
"""
function FloatSignal(; start::Integer,
factor::Union{Integer,AbstractFloat},
offset::Union{Integer,AbstractFloat},
byte_order::Symbol)
return FloatSignal(start, factor, offset, byte_order)
end
"""
"""
function FloatSignal{T}(start::Integer;
factor::Union{Integer,AbstractFloat}=one(T),
offset::Union{Integer,AbstractFloat}=zero(T),
byte_order::Symbol=:little_endian) where {T}
factor = convert(T, factor)
offset = convert(T, offset)
return FloatSignal(start, factor, offset, byte_order)
end
function FloatSignal{T}(; start::Integer,
factor::Union{Integer,AbstractFloat}=one(T),
offset::Union{Integer,AbstractFloat}=zero(T),
byte_order::Symbol=:little_endian) where {T}
factor = convert(T, factor)
offset = convert(T, offset)
return FloatSignal(start, factor, offset, byte_order)
end
const Float16Signal = FloatSignal{Float16}
const Float32Signal = FloatSignal{Float32}
const Float64Signal = FloatSignal{Float64}
"""
"""
function start(signal::FloatSignal{T})::UInt16 where {T}
return signal.start
end
function Base.length(signal::FloatSignal{T})::UInt16 where {T}
return 8sizeof(T)
end
"""
"""
function factor(signal::FloatSignal{T})::T where {T}
return signal.factor
end
"""
"""
function offset(signal::FloatSignal{T})::T where {T}
return signal.offset
end
"""
"""
function byte_order(signal::FloatSignal{T})::Symbol where {T}
return signal.byte_order
end
"""
"""
struct Raw <: AbstractIntegerSignal{UInt64}
start::UInt16
length::UInt16
byte_order::Symbol
"""
"""
function Raw(start::UInt16, length::UInt16,byte_order::Symbol)
if length == 0
throw(DomainError(length, "The length has to be greater or equal to 1"))
end
if byte_order != :little_endian && byte_order != :big_endian
throw(DomainError(byte_order, "Byte order not supported"))
end
return new(start, length, byte_order)
end
end
"""
"""
function Raw(start::Integer, length::Integer, byte_order::Symbol)
start = convert(UInt16, start)
length = convert(UInt16, length)
return Raw(start, length, byte_order)
end
"""
"""
function Raw(; start::Integer,
length::Integer,
byte_order::Symbol=:little_endian) where {T}
return Raw(start, length, byte_order)
end
"""
"""
function start(signal::Raw)::UInt16
return signal.start
end
"""
"""
function Base.length(signal::Raw)::UInt16
return signal.length
end
"""
"""
function byte_order(signal::Raw)::Symbol
return signal.byte_order
end
"""
"""
struct NamedSignal{T} <: AbstractSignal{T}
name::String
unit::Union{Nothing,String}
default::Union{Nothing,T}
signal::UnnamedSignal{T}
function NamedSignal(name::String,
unit::Union{Nothing,String},
default::Union{Nothing,T},
signal::UnnamedSignal{T}) where {T}
if name == ""
throw(DomainError(name, "name cannot be the empty string"))
end
return new{T}(name, unit, default, signal)
end
end
"""
"""
function NamedSignal(; name::String,
unit::Union{Nothing,String}=nothing,
default::Union{Nothing,T}=nothing,
signal::UnnamedSignal{T}) where {T}
return NamedSignal(name, unit, default, signal)
end
"""
"""
function name(signal::NamedSignal{T})::String where {T}
return signal.name
end
"""
"""
function unit(signal::NamedSignal{T})::Union{Nothing,String} where {T}
return signal.unit
end
"""
"""
function default(signal::NamedSignal{T})::Union{Nothing,T} where {T}
return signal.default
end
"""
"""
function signal(signal::NamedSignal{T})::UnnamedSignal{T} where {T}
return signal.signal
end
const Signal = NamedSignal
"""
"""
struct Bits
bits::Set{UInt16}
end
"""
"""
function Bits(bits::Integer...)
Bits(Set(UInt16[bits...]))
end
"""
"""
function Bits(signal::AbstractFloatSignal{T}) where {T}
bits = Set{UInt16}()
start_bit = start(signal)
if byte_order(signal) == :little_endian
for i=0:length(signal)-1
bit_pos = start_bit + i
push!(bits, bit_pos)
end
elseif byte_order(signal) == :big_endian
for j=0:length(signal)-1
push!(bits, start_bit)
if start_bit % 8 == 0
start_byte = div(start_bit,8)
start_bit = 8 * (start_byte + 1) + 7
else
start_bit -= 1
end
end
end
return Bits(bits)
end
"""
"""
function Bits(signal::AbstractIntegerSignal{T}) where {T}
bits = Set{UInt16}()
start_bit = start(signal)
if byte_order(signal) == :little_endian
for i=0:length(signal)-1
bit_pos = start_bit + i
push!(bits, bit_pos)
end
elseif byte_order(signal) == :big_endian
for j=0:length(signal)-1
push!(bits, start_bit)
if start_bit % 8 == 0
start_byte = div(start_bit,8)
start_bit = 8 * (start_byte + 1) + 7
else
start_bit -= 1
end
end
end
return Bits(bits)
end
function Bits(sig::NamedSignal{T}) where {T}
return Bits(signal(sig))
end
function Base.:(==)(lhs::Bits, rhs::Bits)::Bool
return (lhs.bits) == (rhs.bits)
end
"""
"""
function share_bits(lhs::Bits, rhs::Bits)::Bool
return !isdisjoint(lhs.bits, rhs.bits)
end
"""
"""
function overlap(lhs::AbstractSignal{R}, rhs::AbstractSignal{S})::Bool where {R,S}
lhs_bits = Bits(lhs)
rhs_bits = Bits(rhs)
return share_bits(lhs_bits, rhs_bits)
end
function check(signal::UnnamedSignal{T}, available_bytes::UInt8)::Bool where {T}
bits = Bits(signal)
max_byte = max(UInt8[div(bit,8) for bit in bits.bits]...)
if max_byte < available_bytes
return true
else
return false
end
end
"""
"""
function check(sig::NamedSignal{T}, available_bytes::UInt8)::Bool where {T}
return check(signal(sig), available_bytes)
end
export Bit, Unsigned, Signed, Raw, Float16Signal, Float32Signal, Float64Signal
export Signal, FloatSignal
export NamedSignal
export start, factor, offset, byte_order
export name, unit, default, signal
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | src/Utils.jl | code | 6383 | """The module provides utilities to convert numbers into and from byte representations,
functions to check whether the system is little-endian or big-endian, and functions to
create bitmasks.
"""
module Utils
"""
to_bytes(num::Number) -> Vector{UInt8}
Creates the byte representation of the number `num`.
# Arguments
- `num::Number`: the number from which we retrieve the bytes.
# Returns
- `Vector{UInt8}`: the bytes representation of the number `num`
# Examples
```jldoctest
using CANalyze.Utils
bytes = Utils.to_bytes(UInt16(0xAAFF))
# output
2-element Vector{UInt8}:
0xff
0xaa
```
"""
function to_bytes(num::Number)::Vector{UInt8}
return reinterpret(UInt8, [num])
end
"""
from_bytes(type::Type{T}, array::AbstractArray{UInt8}) where {T <: Number} -> T
Creates a value of type `T` constituted by the byte-array `array`. If the `array` length
is smaller than the size of `T`, `array` is filled with enough zeros.
# Arguments
- `type::Type{T}`: the type to which the byte-array is transformed
- `array::AbstractArray{UInt8}`: the byte array
# Returns
- `T`: the value constructed from the byte sequence
# Examples
```jldoctest
using CANalyze.Utils
bytes = Utils.from_bytes(UInt16, UInt8[0xFF, 0xAA])
# output
0xaaff
```
"""
function from_bytes(type::Type{T}, array::AbstractArray{UInt8})::T where {T <: Number}
if length(array) < sizeof(T)
for i=1:(sizeof(T) - length(array))
push!(array, UInt8(0))
end
end
values = reinterpret(type, array)
return values[1]
end
"""
is_little_endian() -> Bool
Returns whether the system has little-endian byte-order
# Returns
- `Bool`: The system has little-endian byte-order
"""
function is_little_endian()::Bool
x::UInt16 = 0x0001
lst = reinterpret(UInt8, [x])
if lst[1] == 0x01
return true
else
return false
end
end
"""
is_big_endian() -> Bool
Returns whether the system has big-endian byte-order
# Returns
- `Bool`: The system has big-endian byte-order
"""
function is_big_endian()::Bool
return !is_little_endian()
end
"""
mask(::Type{T}, length::UInt8, shift::UInt8) where {T <: Integer} -> T
Creates a mask of type `T` with `length` number of bits and right-shifted by `shift`
number of bits.
# Arguments
- `Type{T}`: the type of the mask
- `length::UInt8`: the number of bits
- `shift::UInt8`: the right-shift
# Returns
- `T`: the mask defined by `length` and `shift`
"""
function mask(::Type{T}, length::UInt8, shift::UInt8)::T where {T <: Integer}
ret::T = mask(T, length)
ret <<= shift
return ret
end
"""
mask(::Type{T}, length::Integer, shift::Integer) where {T <: Integer} -> T
Creates a mask of type `T` with `length` number of bits and right-shifted by `shift`
number of bits.
# Arguments
- `Type{T}`: the type of the mask
- `length::Integer`: the number of bits
- `shift::Integer`: the right-shift
# Returns
- `T`: the mask defined by `length` and `shift`
# Examples
```jldoctest
using CANalyze.Utils
m = Utils.mask(UInt64, 32, 16)
# output
0x0000ffffffff0000
```
"""
function mask(::Type{T}, length::Integer, shift::Integer)::T where {T <: Integer}
l = convert(UInt8, length)
s = convert(UInt8, shift)
return mask(T, l, s)
end
"""
mask(::Type{T}, length::UInt8) where {T <: Integer} -> T
Creates a mask of type `T` with `length` number of bits.
# Arguments
- `Type{T}`: the type of the mask
- `length::UInt8`: the number of bits
# Returns
- `T`: the mask defined by `length`
"""
function mask(::Type{T}, length::UInt8)::T where {T <: Integer}
ret = zero(T)
if length > 0
for i in 1:(length-1)
ret += 1
ret <<= 1
end
ret += 1
end
return ret
end
"""
mask(::Type{T}, length::Integer) where {T <: Integer} -> T
Creates a mask of type `T` with `length` number of bits.
# Arguments
- `Type{T}`: the type of the mask
- `length::Integer`: the number of bits
# Returns
- `T`: the mask defined by `length`
# Examples
```jldoctest
using CANalyze.Utils
m = Utils.mask(UInt64, 32)
# output
0x00000000ffffffff
```
"""
function mask(::Type{T}, length::Integer)::T where {T <: Integer}
l = convert(UInt8, length)
return mask(T, l)
end
"""
mask(::Type{T}) where {T <: Integer} -> T
Creates a full mask of type `T` with `8sizeof(T)` bits.
# Arguments
- `Type{T}`: the type of the mask
# Returns
- `T`: the full mask
# Examples
```jldoctest
using CANalyze.Utils
m = Utils.mask(UInt64)
# output
0xffffffffffffffff
```
"""
function mask(::Type{T})::T where {T <: Integer}
return full_mask(T)
end
"""
full_mask(::Type{T}) where {T <: Integer} -> T
Creates a full mask of type `T` with `8sizeof(T)` bits.
# Arguments
- `Type{T}`: the type of the mask
# Returns
- `T`: the full mask
# Examples
```jldoctest
using CANalyze.Utils
m = Utils.full_mask(Int8)
# output
-1
```
"""
function full_mask(::Type{T})::T where {T <: Integer}
ret::T = zero(T)
for i in 0:(8sizeof(T) - 2)
ret += 1
ret <<= 1
end
ret += 1
return ret
end
"""
zero_mask(::Type{T}) where {T <: Integer} -> T
Creates a zero mask of type `T` where every bit is unset.
# Arguments
- `Type{T}`: the type of the mask
# Returns
- `T`: the zero mask
# Examples
```jldoctest
using CANalyze.Utils
m = Utils.zero_mask(UInt8)
# output
0x00
```
"""
function zero_mask(::Type{T})::T where {T <: Integer}
return zero(T)
end
"""
bit_mask(::Type{T}, bits::Set{UInt16}) where {T <: Integer} -> T
Creates a bit mask of type `T` where every bit inside `bits` is set.
# Arguments
- `Type{T}`: the type of the mask
- `bits::Set{UInt16}`: the set of bits we want to set
# Returns
- `T`: the mask
# Examples
```jldoctest
using CANalyze.Utils
m = Utils.bit_mask(UInt8, Set{UInt16}([0,1,2,3,4,5,6,7]))
# output
0xff
```
"""
function bit_mask(::Type{T}, bits::Set{UInt16})::T where {T <: Integer}
result = zero(T)
for bit in bits
result |= mask(T, 1, bit)
end
return T(result)
end
function bit_mask(::Type{T}, bits::Integer...)::T where {T}
bits = Set{UInt16}(bits)
return bit_mask(T, bits)
end
function bit_mask(::Type{S}, bits::AbstractArray{T,N})::S where {S,T,N}
bits = Set{UInt16}(bits)
return bit_mask(S, bits)
end
export to_bytes, from_bytes
export is_little_endian, is_big_endian
export mask, zero_mask, full_mask, bit_mask
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/Databases.jl | code | 5213 | using Test
@info "CANalyze.Databases tests..."
@testset "database" begin
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Databases
@testset "database_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "B", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
@test true
end
@testset "database_2" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "A", signal1, signal2, signal3; strict=true)
@test_throws DomainError Databases.Database(m1, m2)
end
@testset "database_3" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xA, 8, "B", signal1, signal2, signal3; strict=true)
@test_throws DomainError Databases.Database(m1, m2)
end
@testset "get_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "B", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
@test d["A"] == m1
@test d["B"] == m2
end
@testset "get_2" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "B", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
@test_throws KeyError d["C"]
end
@testset "get_3" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "B", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
@test d[0xA] == m1
@test d[0xB] == m2
end
@testset "get_4" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "B", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
@test_throws KeyError d[0xC]
end
@testset "get_5" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m1 = Messages.Message(0xA, 8, "A", signal1, signal2, signal3; strict=true)
m2 = Messages.Message(0xB, 8, "B", signal1, signal2, signal3; strict=true)
d = Databases.Database(m1, m2)
@test get(d, "A", nothing) == m1
@test get(d, "B", nothing) == m2
@test get(d, "C", nothing) == nothing
@test get(d, 0xA, nothing) == m1
@test get(d, 0xB, nothing) == m2
@test get(d, 0xC, nothing) == nothing
end
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/Decode.jl | code | 14897 | using Test
@info "CANalyze.Decode tests..."
@testset "bit" begin
import CANalyze.Utils
import CANalyze.Frames
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Decode
@testset "bit_1" begin
for start=0:63
m = Utils.mask(UInt64, 1, start)
signal = Signals.Bit(start=start)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
@test Decode.decode(signal, frame)
end
end
@testset "bit_2" begin
for start=1:63
m = Utils.mask(UInt64, 1, start)
signal = Signals.Bit(start=start-1)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
@test !Decode.decode(signal, frame)
end
end
@testset "bit_3" begin
signal = Signals.Bit(start=8)
frame = Frames.CANFrame(0x1FF, 1)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "bit_4" begin
signal = Signals.Bit(start=8)
frame = Frames.CANFrame(0x1FF, 1)
@test Decode.decode(signal, frame, nothing) == nothing
end
end
@testset "unsigned" begin
import CANalyze.Utils
import CANalyze.Frames
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Decode
@testset "unsigned_1" begin
for start=0:63
for len=1:(64-start)
m = Utils.mask(UInt64, len, start)
signal = Signals.Unsigned{Float64}(start=start, length=len, factor=2.0,
offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
decode = Decode.decode(signal, frame)
value = Utils.mask(UInt64, len) * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
end
end
@testset "unsigned_2" begin
signal = Signals.Unsigned{Float64}(start=7, length=8, factor=2.0, offset=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, [i for i=1:8])
decode = Decode.decode(signal, frame)
value = 1 * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
@testset "unsigned_3" begin
signal = Signals.Unsigned{Float64}(start=7, length=16, factor=2.0, offset=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0xAB, 0xCD)
decode = Decode.decode(signal, frame)
value = 0xABCD * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
@testset "unsigned_4" begin
signal = Signals.Unsigned{Float64}(start=7, length=24, factor=2.0, offset=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0xAB, 0xCD, 0xEF)
decode = Decode.decode(signal, frame)
value = 0xABCDEF * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
@testset "unsigned_5" begin
signal = Signals.Unsigned{Float64}(start=8, length=1, factor=2.0, offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, 0x01)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "unsigned_6" begin
signal = Signals.Unsigned{Float64}(start=6, length=8, factor=2.0, offset=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x01)
@test_throws DomainError Decode.decode(signal, frame)
end
end
@testset "signed" begin
import CANalyze.Utils
import CANalyze.Frames
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Decode
using Random
@testset "signed_1" begin
for start=0:62
for len=1:(64-start)
m = Utils.mask(UInt64, len-1, start)
signal = Signals.Signed{Float64}(start=start, length=len, factor=2.0,
offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
decode = Decode.decode(signal, frame)
value = Utils.mask(UInt64, len-1) * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
end
end
@testset "signed_2" begin
for start=0:63
for len=1:(64-start)
m = Utils.mask(UInt64, len, start)
signal = Signals.Signed{Float64}(start=start, length=len, factor=2.0,
offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
decode = Decode.decode(signal, frame)
value = Utils.mask(Int64, len) + ~Utils.mask(Int64, len)
value = value * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
end
end
@testset "signed_3" begin
for len=1:64
for choice=1:64-len
m = Utils.bit_mask(Int64, len-1, rand(0:(len-1), choice)...)
signal = Signals.Signed{Float64}(start=0, length=len, factor=2.0,
offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
decode = Decode.decode(signal, frame)
value = m + ~Utils.mask(Int64, len)
value = value * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
end
end
@testset "signed_4" begin
signal = Signals.Signed{Float64}(start=7, length=8, factor=set=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0xFE)
decode = Decode.decode(signal, frame)
value = -2 * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
@testset "signed_5" begin
signal = Signals.Signed{Float64}(start=8, length=1, factor=2.0, offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, 0x01)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "signed_6" begin
signal = Signals.Signed{Float64}(start=6, length=8, factor=2.0, offset=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x01)
@test_throws DomainError Decode.decode(signal, frame)
end
end
@testset "float_signal" begin
import CANalyze.Utils
import CANalyze.Frames
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Decode
using Random
@testset "float_signal_1" begin
for T in [Float16, Float32, Float64]
data = [i for i=0:(sizeof(T)-1)]
signal = Signals.FloatSignal{T}(start=0, factor=2.0, offset=1337,
byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, data)
decode = Decode.decode(signal, frame)
value = reinterpret(T, data)[1] * Signals.factor(signal) + Signals.offset(signal)
@test decode == value
end
end
@testset "float_signal_2" begin
for T in [Float16, Float32, Float64]
data = [i for i=0:(sizeof(T)-1)]
signal = Signals.FloatSignal{T}(start=7, factor=2.0, offset=1337,
byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, data)
decode = Decode.decode(signal, frame)
value = reinterpret(T, reverse(data))[1] * Signals.factor(signal)
value += Signals.offset(signal)
@test decode == value
end
end
@testset "float_signal_3" begin
signal = Signals.Float16Signal(start=1, byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "float_signal_4" begin
signal = Signals.Float16Signal(start=6, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "float_signal_5" begin
signal = Signals.Float32Signal(start=1, byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02, 0x03, 0x04)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "float_signal_6" begin
signal = Signals.Float32Signal(start=6, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02, 0x03, 0x04)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "float_signal_7" begin
signal = Signals.Float64Signal(start=1, byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, 0:7)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "float_signal_8" begin
signal = Signals.Float64Signal(start=6, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0:7)
@test_throws DomainError Decode.decode(signal, frame)
end
end
@testset "raw" begin
import CANalyze.Utils
import CANalyze.Frames
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Decode
@testset "raw_1" begin
for start=0:63
for len=1:(64-start)
m = Utils.mask(UInt64, len, start)
signal = Signals.Raw(start=start, length=len, byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, Utils.to_bytes(m))
decode = Decode.decode(signal, frame)
value = Utils.mask(UInt64, len)
@test decode == value
end
end
end
@testset "raw_2" begin
signal = Signals.Raw(start=7, length=8, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, [i for i=1:8])
decode = Decode.decode(signal, frame)
@test decode == 1
end
@testset "raw_3" begin
signal = Signals.Raw(start=7, length=16, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0xAB, 0xCD)
decode = Decode.decode(signal, frame)
@test decode == 0xABCD
end
@testset "raw_4" begin
signal = Signals.Raw(start=7, length=24, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0xAB, 0xCD, 0xEF)
decode = Decode.decode(signal, frame)
@test decode == 0xABCDEF
end
@testset "raw_5" begin
signal = Signals.Raw(start=7, length=64, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, [i for i=1:8])
decode = Decode.decode(signal, frame)
@test decode == 0x01_02_03_04_05_06_07_08
end
@testset "raw_6" begin
signal = Signals.Raw(start=3, length=8, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x21, 0xAB)
decode = Decode.decode(signal, frame)
@test decode == 0x1A
end
@testset "raw_7" begin
signal = Signals.Raw(start=3, length=16, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x21, 0xAB, 0xCD)
decode = Decode.decode(signal, frame)
@test decode == 0x1ABC
end
@testset "raw_8" begin
signal = Signals.Raw(start=8, length=1, byte_order=:little_endian)
frame = Frames.CANFrame(0x1FF, 0x01)
@test_throws DomainError Decode.decode(signal, frame)
end
@testset "raw_9" begin
signal = Signals.Raw(start=6, length=8, byte_order=:big_endian)
frame = Frames.CANFrame(0x1FF, 0x01)
@test_throws DomainError Decode.decode(signal, frame)
end
end
@testset "named_signal" begin
import CANalyze.Utils
import CANalyze.Frames
import CANalyze.Signals
import CANalyze.Messages
import CANalyze.Decode
@testset "named_signal_1" begin
signal = Signals.Signed{Float64}(start=0,
length=16,
factor=2.0,
offset=1337,
byte_order=:little_endian)
named_signal = Signals.NamedSignal("SIG", nothing, nothing, signal)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02)
@test Decode.decode(signal, frame) == Decode.decode(named_signal, frame)
end
@testset "named_signal_2" begin
signal = Signals.Signed{Float64}(start=1,
length=16,
factor=2.0,
offset=1337,
byte_order=:little_endian)
named_signal = Signals.NamedSignal("SIG", nothing, nothing, signal)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02)
@test_throws DomainError Decode.decode(signal, frame)
@test Decode.decode(named_signal, frame) == nothing
end
@testset "named_signal_3" begin
signal = Signals.Signed{Float64}(start=1,
length=16,
factor=2.0,
offset=1337,
byte_order=:little_endian)
named_signal = Signals.NamedSignal("SIG", nothing, 42.0, signal)
frame = Frames.CANFrame(0x1FF, 0x01, 0x02)
@test_throws DomainError Decode.decode(signal, frame)
@test Decode.decode(named_signal, frame) == 42
end
end
@testset "message" begin
@testset "message_1" begin
sig1 = Signals.Signed{Float64}(start=0, length=8, byte_order=:little_endian)
sig2 = Signals.Signed{Float64}(start=8, length=8, byte_order=:little_endian)
sig3 = Signals.Signed{Float64}(start=16, length=8, byte_order=:little_endian)
named_signal_1 = Signals.NamedSignal("A", nothing, nothing, sig1)
named_signal_2 = Signals.NamedSignal("B", nothing, nothing, sig2)
named_signal_3 = Signals.NamedSignal("C", nothing, nothing, sig3)
signals = [named_signal_1, named_signal_2, named_signal_3]
frame = Frames.CANFrame(0x1FF, 0x01, 0x02, 0x03)
m = Messages.Message(0x1FF, 8, "M", named_signal_1, named_signal_2, named_signal_3)
value = Decode.decode(m, frame)
for signal in signals
@test value[Signals.name(signal)] == Decode.decode(signal, frame)
end
end
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/Frames.jl | code | 2882 | using Test
@info "CANalyze.Frames tests..."
@testset "equal" begin
using CANalyze.Frames
@testset "equal_1" begin
frame1 = CANFrame(0x14, Integer[]; is_extended=true)
frame2 = CANFrame(0x14; is_extended=true)
@test frame1 == frame2
end
@testset "equal_2" begin
frame1 = CANFrame(0x14, Integer[]; is_extended=true)
frame2 = CANFrame(0x15, Integer[]; is_extended=true)
@test frame1 != frame2
end
@testset "equal_3" begin
frame1 = CANFrame(0x14, Integer[]; is_extended=false)
frame2 = CANFrame(0x14, Integer[]; is_extended=true)
@test frame1 != frame2
end
@testset "equal_4" begin
frame1 = CANFrame(0x14, Integer[]; is_extended=true)
frame2 = CANFrame(0x15, Integer[]; is_extended=true)
@test frame1 != frame2
end
@testset "equal_5" begin
frame1 = CANFrame(0x14, Integer[1,2,3,4]; is_extended=true)
frame2 = CANFrame(0x14, 1, 2, 3, 4; is_extended=true)
@test frame1 == frame2
end
end
@testset "frame_id" begin
using CANalyze.Frames
@testset "frame_id_1" begin
frame = CANFrame(0x0AFF, Integer[1,2,3,4]; is_extended=true)
@test frame_id(frame) == (0x0AFF & 0x01_FF_FF_FF)
end
@testset "frame_id_1" begin
frame = CANFrame(0x0AFF, Integer[1,2,3,4]; is_extended=false)
@test frame_id(frame) == (0x0AFF & 0x7FF)
end
end
@testset "data" begin
using CANalyze.Frames
for i=0:8
frame = CANFrame(0x0AFF, Integer[j for j=1:i]; is_extended=true)
@test data(frame) == UInt8[j for j=1:i]
end
end
@testset "dlc" begin
using CANalyze.Frames
for i=0:8
frame = CANFrame(0x0AFF, Integer[j for j=1:i]; is_extended=true)
@test dlc(frame) == i
end
end
@testset "is_extended" begin
using CANalyze.Frames
@testset "is_extended_1" begin
frame = CANFrame(0x0AFF; is_extended=true)
@test is_extended(frame) == true
@test is_standard(frame) == false
end
@testset "is_extended_2" begin
frame = CANFrame(0x0AFF; is_extended=false)
@test is_extended(frame) == false
@test is_standard(frame) == true
end
end
@testset "max_size" begin
using CANalyze.Frames
@testset "max_size_1" begin
frame = CANFrame(0x0AFF; is_extended=true)
@test max_size(typeof(frame)) == 8
end
@testset "max_size_2" begin
frame = CANFdFrame(0x0AFF; is_extended=true)
@test max_size(typeof(frame)) == 64
end
end
@testset "too_much_data" begin
using CANalyze.Frames
@testset "too_much_data_1" begin
@test_throws DomainError CANFrame(0x0AFF, [i for i=1:9]; is_extended=true)
end
@testset "too_much_data_2" begin
@test_throws DomainError CANFdFrame(0x0AFF, [i for i=1:65]; is_extended=true)
end
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/Messages.jl | code | 6322 | using Test
@info "CANalyze.Messages tests..."
@testset "message" begin
import CANalyze.Signals
import CANalyze.Messages
@testset "message_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
@test_throws DomainError Messages.Message(0x1FF, 8, "", signal1, signal2, signal3; strict=true)
end
@testset "message_2" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test true
end
@testset "message_3" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 9, 2, 20, :little_endian))
@test_throws DomainError Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
end
@testset "message_4" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
@test_throws DomainError Messages.Message(0x1FF, 6, "ABC", signal1, signal2, signal3; strict=true)
end
@testset "message_4" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
@test_throws DomainError Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
end
@testset "frame_id_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test Messages.frame_id(m) == 0x1FF
end
@testset "dlc_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test Messages.dlc(m) == 8
end
@testset "name_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test Messages.name(m) == "ABC"
end
@testset "get_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test m["A"] == signal1
@test m["B"] == signal2
@test m["C"] == signal3
end
@testset "get_2" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test_throws KeyError m["D"]
end
@testset "get_3" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
@test get(m, "A", nothing) == signal1
@test get(m, "B", nothing) == signal2
@test get(m, "C", nothing) == signal3
@test get(m, "D", nothing) == nothing
end
@testset "iterate_1" begin
signal1 = Signals.NamedSignal("A", nothing, nothing, Signals.Unsigned(0, 32, 1, 0, :little_endian))
signal2 = Signals.NamedSignal("B", nothing, nothing, Signals.Unsigned(40, 17, 2, 20, :big_endian))
signal3 = Signals.NamedSignal("C", nothing, nothing, Signals.Unsigned(32, 8, 2, 20, :little_endian))
signals = [signal1, signal2, signal3]
m = Messages.Message(0x1FF, 8, "ABC", signal1, signal2, signal3; strict=true)
for (n, sig) in m
if sig in signals
@test sig == m[n]
@test sig == m[Signals.name(sig)]
else
@test false
end
end
end
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/Signals.jl | code | 21097 | using Test
@info "CANalyze.Signals tests..."
@testset "bit_signal" begin
using CANalyze.Signals
@testset "bit_signal_1" begin
signal = Bit(20)
@test true
end
@testset "bit_signal_2" begin
signal = Bit(start=20)
@test true
end
@testset "start_1" begin
signal = Bit(start=20)
@test start(signal) == 20
end
@testset "byte_order_1" begin
signal = Bit(start=20)
@test byte_order(signal) == :little_endian
end
@testset "length_1" begin
signal = Bit(start=20)
@test length(signal) == 1
end
end
@testset "unsigned_signal" begin
using CANalyze.Signals
@testset "unsigned_signal_1" begin
signal = Signals.Unsigned(0, 8, 1.0, 0.0, :little_endian)
@test true
end
@testset "unsigned_signal_2" begin
signal = Signals.Unsigned(start=0, length=8, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test true
end
@testset "unsigned_signal_3" begin
signal = Signals.Unsigned{Float16}(0, 8)
@test true
end
@testset "unsigned_signal_4" begin
signal = Signals.Unsigned{Float16}(start=0, length=8)
@test true
end
@testset "unsigned_signal_5" begin
@test_throws DomainError Signals.Unsigned{Float16}(start=0, length=0)
end
@testset "unsigned_signal_6" begin
@test_throws DomainError Signals.Unsigned{Float16}(start=0, length=0,
byte_order=:mixed_endian)
end
@testset "start_1" begin
signal = Signals.Unsigned(23, 8, 1.0, 0.0, :little_endian)
@test start(signal) == 23
end
@testset "length_1" begin
signal = Signals.Unsigned(0, 8, 1.0, 0.0, :little_endian)
@test length(signal) == 8
end
@testset "factor_1" begin
signal = Signals.Unsigned(0, 8, 1.0, 0.0, :little_endian)
@test factor(signal) == 1
end
@testset "offset_1" begin
signal = Signals.Unsigned(0, 8, 1.0, 1337, :little_endian)
@test offset(signal) == 1337
end
@testset "byte_order_1" begin
signal = Signals.Unsigned(0, 8, 1.0, 0.0, :little_endian)
@test byte_order(signal) == :little_endian
end
@testset "byte_order_2" begin
signal = Signals.Unsigned(0, 8, 1.0, 0.0, :big_endian)
@test byte_order(signal) == :big_endian
end
end
@testset "signed_signal" begin
using CANalyze.Signals
@testset "signed_signal_1" begin
signal = Signals.Signed(0, 8, 1.0, 0.0, :little_endian)
@test true
end
@testset "signed_signal_2" begin
signal = Signals.Signed(start=0, length=8, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test true
end
@testset "signed_signal_3" begin
signal = Signals.Signed{Float16}(0, 8)
@test true
end
@testset "signed_signal_4" begin
signal = Signals.Signed{Float16}(start=0, length=8)
@test true
end
@testset "signed_signal_5" begin
@test_throws DomainError Signals.Signed{Float16}(start=0, length=0)
end
@testset "signed_signal_6" begin
@test_throws DomainError Signals.Signed{Float16}(start=0, length=0,
byte_order=:mixed_endian)
end
@testset "start_1" begin
signal = Signals.Signed(23, 8, 1.0, 0.0, :little_endian)
@test start(signal) == 23
end
@testset "length_1" begin
signal = Signals.Signed(0, 8, 1.0, 0.0, :little_endian)
@test length(signal) == 8
end
@testset "factor_1" begin
signal = Signals.Signed(0, 8, 1.0, 0.0, :little_endian)
@test factor(signal) == 1
end
@testset "offset_1" begin
signal = Signals.Signed(0, 8, 1.0, 1337, :little_endian)
@test offset(signal) == 1337
end
@testset "byte_order_1" begin
signal = Signals.Signed(0, 8, 1.0, 0.0, :little_endian)
@test byte_order(signal) == :little_endian
end
@testset "byte_order_2" begin
signal = Signals.Signed(0, 8, 1.0, 0.0, :big_endian)
@test byte_order(signal) == :big_endian
end
end
@testset "float16_signal" begin
using CANalyze.Signals
@testset "float16_signal_1" begin
signal = Signals.Float16Signal(0)
@test true
end
@testset "float16_signal_2" begin
signal = Signals.Float16Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test true
end
@testset "float16_signal_3" begin
signal = Signals.Float16Signal(start=0, factor=1, offset=0,
byte_order=:little_endian)
@test true
end
@testset "start_1" begin
signal = Signals.Float16Signal(start=42, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test start(signal) == 42
end
@testset "length_1" begin
signal = Signals.Float16Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test length(signal) == 16
end
@testset "factor_1" begin
signal = Signals.Float16Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test factor(signal) == 1
end
@testset "offset_1" begin
signal = Signals.Float16Signal(start=0, factor=1.0, offset=1337,
byte_order=:little_endian)
@test offset(signal) == 1337
end
@testset "byte_order_1" begin
signal = Signals.Float16Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test byte_order(signal) == :little_endian
end
@testset "byte_order_2" begin
signal = Signals.Float16Signal(start=0, factor=1.0, offset=0.0,
byte_order=:big_endian)
@test byte_order(signal) == :big_endian
end
end
@testset "float32_signal" begin
using CANalyze.Signals
@testset "float32_signal_1" begin
signal = Signals.Float32Signal(0)
@test true
end
@testset "float32_signal_2" begin
signal = Signals.Float32Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test true
end
@testset "float32_signal_3" begin
signal = Signals.Float32Signal(start=0, factor=1, offset=0,
byte_order=:little_endian)
@test true
end
@testset "start_1" begin
signal = Signals.Float32Signal(start=42, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test start(signal) == 42
end
@testset "length_1" begin
signal = Signals.Float32Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test length(signal) == 32
end
@testset "factor_1" begin
signal = Signals.Float32Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test factor(signal) == 1
end
@testset "offset_1" begin
signal = Signals.Float32Signal(start=0, factor=1.0, offset=1337,
byte_order=:little_endian)
@test offset(signal) == 1337
end
@testset "byte_order_1" begin
signal = Signals.Float32Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test byte_order(signal) == :little_endian
end
@testset "byte_order_2" begin
signal = Signals.Float32Signal(start=0, factor=1.0, offset=0.0,
byte_order=:big_endian)
@test byte_order(signal) == :big_endian
end
end
@testset "float64_signal" begin
using CANalyze.Signals
@testset "float64_signal_1" begin
signal = Signals.Float64Signal(0)
@test true
end
@testset "float64_signal_2" begin
signal = Signals.Float64Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test true
end
@testset "float64_signal_3" begin
signal = Signals.Float64Signal(start=0, factor=1, offset=0,
byte_order=:little_endian)
@test true
end
@testset "start_1" begin
signal = Signals.Float64Signal(start=42, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test start(signal) == 42
end
@testset "length_1" begin
signal = Signals.Float64Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test length(signal) == 64
end
@testset "factor_1" begin
signal = Signals.Float64Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test factor(signal) == 1
end
@testset "offset_1" begin
signal = Signals.Float64Signal(start=0, factor=1.0, offset=1337,
byte_order=:little_endian)
@test offset(signal) == 1337
end
@testset "byte_order_1" begin
signal = Signals.Float64Signal(start=0, factor=1.0, offset=0.0,
byte_order=:little_endian)
@test byte_order(signal) == :little_endian
end
@testset "byte_order_2" begin
signal = Signals.Float64Signal(start=0, factor=1.0, offset=0.0,
byte_order=:big_endian)
@test byte_order(signal) == :big_endian
end
end
@testset "float_signal" begin
using CANalyze.Signals
@testset "float_signal_1" begin
signal = Signals.FloatSignal(start=0, factor=2, offset=-1337,
byte_order=:big_endian)
@test true
end
end
@testset "raw_signal" begin
using CANalyze.Signals
@testset "raw_signal_1" begin
signal = Signals.Raw(0, 8, :little_endian)
@test true
end
@testset "raw_signal_2" begin
signal = Signals.Raw(start=0, length=8, byte_order=:little_endian)
@test true
end
@testset "raw_signal_3" begin
@test_throws DomainError Signals.Raw(start=0, length=0, byte_order=:little_endian)
end
@testset "raw_signal_4" begin
@test_throws DomainError Signals.Raw(start=0, length=1, byte_order=:mixed_endian)
end
@testset "start_1" begin
signal = Signals.Raw(start=42, length=8, byte_order=:little_endian)
@test start(signal) == 42
end
@testset "length_1" begin
signal = Signals.Raw(start=0, length=23, byte_order=:little_endian)
@test length(signal) == 23
end
@testset "byte_order_1" begin
signal = Signals.Raw(start=0, length=8, byte_order=:little_endian)
@test byte_order(signal) == :little_endian
end
@testset "byte_order_2" begin
signal = Signals.Raw(start=0, length=8, byte_order=:big_endian)
@test byte_order(signal) == :big_endian
end
end
@testset "named_signal" begin
using CANalyze.Signals
@testset "named_signal_1" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal("ABC", nothing, nothing, s)
@test true
end
@testset "named_signal_2" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit=nothing, default=nothing,
signal=s)
@test true
end
@testset "named_signal_3" begin
s = Signals.Raw(0, 8, :little_endian)
@test_throws DomainError Signals.NamedSignal(name="",
unit=nothing,
default=nothing,
signal=s)
end
@testset "name_1" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit=nothing, default=nothing,
signal=s)
@test name(signal) == "ABC"
end
@testset "unit_1" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit=nothing, default=nothing,
signal=s)
@test unit(signal) == nothing
end
@testset "unit_2" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit="Ah", default=nothing,
signal=s)
@test unit(signal) == "Ah"
end
@testset "default_1" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit="Ah", default=nothing,
signal=s)
@test default(signal) == nothing
end
@testset "default_2" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit="Ah", default=UInt(1337),
signal=s)
@test default(signal) == 1337
end
@testset "signal_1" begin
s = Signals.Raw(0, 8, :little_endian)
signal = Signals.NamedSignal(name="ABC", unit="Ah", default=nothing,
signal=s)
@test Signals.signal(signal) == s
end
end
@testset "bits" begin
using CANalyze.Signals
@testset "bit_1" begin
signal = Bit(42)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(42)
end
@testset "unsigned_1" begin
signal = Signals.Unsigned(7, 5, 1.0, 0.0, :little_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(7, 8, 9, 10, 11)
end
@testset "unsigned_2" begin
signal = Signals.Unsigned(7, 5, 1.0, 0.0, :big_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(7, 6, 5, 4, 3)
end
@testset "signed_1" begin
signal = Signals.Signed(7, 5, 1.0, 0.0, :little_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(7, 8, 9, 10, 11)
end
@testset "signed_2" begin
signal = Signals.Signed(3, 5, 1.0, 0.0, :big_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(3, 2, 1, 0, 15)
end
@testset "float16_1" begin
signal = Signals.Float16Signal(0; byte_order=:little_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(Set{UInt16}([i for i=0:15]))
end
@testset "float16_2" begin
signal = Signals.Float16Signal(0; byte_order=:big_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(0, 15, 14, 13, 12, 11, 10, 9, 8, 23, 22, 21, 20, 19, 18, 17)
end
@testset "float32_1" begin
signal = Signals.Float32Signal(0; byte_order=:little_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(Set{UInt16}([i for i=0:31]))
end
@testset "float32_2" begin
signal = Signals.Float32Signal(39; byte_order=:big_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(Set{UInt16}([i for i=32:63]))
end
@testset "float64_1" begin
signal = Signals.Float64Signal(0; byte_order=:little_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(Set{UInt16}([i for i=0:63]))
end
@testset "float64_2" begin
signal = Signals.Float64Signal(7; byte_order=:big_endian)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(Set{UInt16}([i for i=0:63]))
end
@testset "named_signal_1" begin
s = Signals.Float64Signal(7; byte_order=:big_endian)
signal = Signals.NamedSignal("ABC", nothing, nothing, s)
bits = Signals.Bits(signal)
@test bits == Signals.Bits(Set{UInt16}([i for i=0:63]))
end
end
@testset "share_bits" begin
using CANalyze.Signals
@testset "share_bits_1" begin
sig1 = Signals.Float16Signal(0; byte_order=:little_endian)
sig2 = Signals.Float16Signal(0; byte_order=:little_endian)
bits1 = Signals.Bits(sig1)
bits2 = Signals.Bits(sig2)
@test Signals.share_bits(bits1, bits2)
end
@testset "share_bits_2" begin
sig1 = Signals.Float16Signal(0; byte_order=:little_endian)
sig2 = Signals.Float16Signal(16; byte_order=:little_endian)
bits1 = Signals.Bits(sig1)
bits2 = Signals.Bits(sig2)
@test !Signals.share_bits(bits1, bits2)
end
end
@testset "overlap" begin
using CANalyze.Signals
@testset "overlap_1" begin
sig1 = Signals.Float16Signal(0; byte_order=:little_endian)
sig2 = Signals.Float16Signal(0; byte_order=:little_endian)
@test Signals.overlap(sig1, sig2)
end
@testset "overlap_2" begin
sig1 = Signals.Float16Signal(0; byte_order=:little_endian)
sig2 = Signals.Float16Signal(16; byte_order=:little_endian)
@test !Signals.overlap(sig1, sig2)
end
end
@testset "check" begin
using CANalyze.Signals
@testset "bit_1" begin
signal = Signals.Bit(0)
@test Signals.check(signal, UInt8(1))
end
@testset "bit_2" begin
signal = Signals.Bit(8)
@test !Signals.check(signal, UInt8(1))
end
@testset "unsigned_1" begin
signal = Signals.Unsigned(7, 5, 1.0, 0.0, :little_endian)
@test Signals.check(signal, UInt8(2))
end
@testset "unsigned_2" begin
signal = Signals.Unsigned(7, 9, 1.0, 0.0, :big_endian)
@test Signals.check(signal, UInt8(2))
end
@testset "signed_1" begin
signal = Signals.Signed(7, 5, 1.0, 0.0, :little_endian)
@test Signals.check(signal, UInt8(2))
end
@testset "signed_2" begin
signal = Signals.Signed(7, 9, 1.0, 0.0, :big_endian)
@test Signals.check(signal, UInt8(2))
end
@testset "float16_1" begin
signal = Signals.Float16Signal(0; byte_order=:little_endian)
@test Signals.check(signal, UInt8(2))
end
@testset "float16_2" begin
signal = Signals.Float16Signal(0; byte_order=:big_endian)
@test !Signals.check(signal, UInt8(2))
end
@testset "float32_1" begin
signal = Signals.Float32Signal(0; byte_order=:little_endian)
@test Signals.check(signal, UInt8(4))
end
@testset "float32_2" begin
signal = Signals.Float32Signal(0; byte_order=:big_endian)
@test !Signals.check(signal, UInt8(4))
end
@testset "float64_1" begin
signal = Signals.Float64Signal(0; byte_order=:little_endian)
@test Signals.check(signal, UInt8(8))
end
@testset "float64_2" begin
signal = Signals.Float64Signal(0; byte_order=:big_endian)
@test !Signals.check(signal, UInt8(8))
end
@testset "raw_1" begin
signal = Signals.Raw(0, 64, :little_endian)
@test Signals.check(signal, UInt8(8))
end
@testset "raw_2" begin
signal = Signals.Raw(0, 64, :big_endian)
@test Signals.check(signal, UInt8(9))
end
@testset "named_signal_1" begin
s = Signals.Raw(0, 64, :big_endian)
signal = Signals.NamedSignal("ABC", nothing, nothing, s)
@test Signals.check(signal, UInt8(9))
end
end
@testset "equal" begin
using CANalyze.Signals
@testset "bit_1" begin
bit1 = Signals.Bit(20)
bit2 = Signals.Bit(20)
@test bit1 == bit2
end
@testset "bit_2" begin
bit1 = Signals.Bit(20)
bit2 = Signals.Bit(21)
@test !(bit1 == bit2)
end
@testset "unsigned_1" begin
sig1 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig2 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
@test sig1 == sig2
end
@testset "unsigned_2" begin
sig1 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig2 = Signals.Unsigned(1, 8, 1, 0, :little_endian)
@test !(sig1 == sig2)
end
@testset "unsigned_3" begin
sig1 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig2 = Signals.Unsigned(0, 9, 1, 0, :little_endian)
@test !(sig1 == sig2)
end
@testset "unsigned_4" begin
sig1 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig2 = Signals.Unsigned(0, 8, 2, 0, :little_endian)
@test !(sig1 == sig2)
end
@testset "unsigned_5" begin
sig1 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig2 = Signals.Unsigned(0, 8, 1, -1, :little_endian)
@test !(sig1 == sig2)
end
@testset "unsigned_6" begin
sig1 = Signals.Unsigned(0, 8, 1, 0, :little_endian)
sig2 = Signals.Unsigned(0, 8, 1, 0, :big_endian)
@test !(sig1 == sig2)
end
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/Utils.jl | code | 5062 | using Test
@info "CANalyze.Utils tests..."
@testset "endian" begin
using CANalyze.Utils
@testset "is_little_or_big_endian" begin
is_little = is_little_endian()
is_big = is_big_endian()
@test (is_little || is_big) == true
end
@testset "is_little_and_big_endian" begin
is_little = is_little_endian()
is_big = is_big_endian()
@test (is_little && is_big) == false
end
end
@testset "convert" begin
using CANalyze.Utils
@testset "to_bytes_1" begin
value = 1337
new_value = from_bytes(typeof(value), to_bytes(value))
@test value == new_value
end
@testset "from_bytes_1" begin
types = [UInt8, UInt16, UInt32, UInt64, UInt128]
for (i, type) in enumerate(types)
array = [UInt8(j) for j=1:2^(i-1)]
new_array = to_bytes(from_bytes(type, array))
@test array == new_array
end
end
@testset "from_bytes_2" begin
types = [Int8, Int16, Int32, Int64, Int128]
for (i, type) in enumerate(types)
array = [UInt8(j) for j=1:2^(i-1)]
new_array = to_bytes(from_bytes(type, array))
@test array == new_array
end
end
@testset "from_bytes_4" begin
types = [Float16, Float32, Float64]
for (i, type) in enumerate(types)
array = [UInt8(j) for j=1:2^i]
new_array = to_bytes(from_bytes(type, array))
@test array == new_array
end
end
end
@testset "mask" begin
using CANalyze.Utils
@testset "zero_mask" begin
@test zero_mask(UInt8) == zero(UInt8)
@test zero_mask(UInt16) == zero(UInt16)
@test zero_mask(UInt32) == zero(UInt32)
@test zero_mask(UInt64) == zero(UInt64)
@test zero_mask(UInt128) == zero(UInt128)
end
@testset "full_mask_1" begin
@test full_mask(UInt8) == UInt8(0xFF)
@test full_mask(UInt16) == UInt16(0xFFFF)
@test full_mask(UInt32) == UInt32(0xFFFFFFFF)
@test full_mask(UInt64) == UInt64(0xFFFFFFFFFFFFFFFF)
@test full_mask(UInt128) == UInt128(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
end
@testset "full_mask_2" begin
@test mask(UInt8) == UInt8(0xFF)
@test mask(UInt16) == UInt16(0xFFFF)
@test mask(UInt32) == UInt32(0xFFFFFFFF)
@test mask(UInt64) == UInt64(0xFFFFFFFFFFFFFFFF)
@test mask(UInt128) == UInt128(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
end
@testset "mask_1" begin
@test mask(UInt8, UInt8(0)) == UInt8(0b0)
@test mask(UInt8, UInt8(1)) == UInt8(0b1)
@test mask(UInt8, UInt8(2)) == UInt8(0b11)
@test mask(UInt8, UInt8(3)) == UInt8(0b111)
@test mask(UInt8, UInt8(4)) == UInt8(0b1111)
@test mask(UInt8, UInt8(5)) == UInt8(0b11111)
@test mask(UInt8, UInt8(6)) == UInt8(0b111111)
@test mask(UInt8, UInt8(7)) == UInt8(0b1111111)
@test mask(UInt8, UInt8(8)) == UInt8(0b11111111)
end
@testset "mask_2" begin
@test mask(UInt16, UInt8(0)) == UInt16(0b0)
value = UInt16(2)
for i in 1:16
@test mask(UInt16, UInt8(i)) == (value - 1)
value *= 2
end
end
@testset "mask_3" begin
@test mask(UInt32, UInt8(0)) == UInt32(0b0)
value = UInt16(2)
for i in 1:32
@test mask(UInt32, UInt8(i)) == (value - 1)
value *= 2
end
end
@testset "mask_4" begin
@test mask(UInt64, UInt8(0)) == UInt64(0b0)
value = UInt64(2)
for i in 1:64
@test mask(UInt64, UInt8(i)) == (value - 1)
value *= 2
end
end
@testset "mask_5" begin
@test mask(UInt128, UInt8(0)) == UInt128(0b0)
value = UInt128(2)
for i in 1:16
@test mask(UInt128, UInt8(i)) == (value - 1)
value *= 2
end
end
@testset "shifted_mask_1" begin
for i in 0:8
@test mask(UInt8, i, 0) == mask(UInt8, i)
end
end
@testset "shifted_mask_2" begin
for i in 0:16
@test mask(UInt16, i, 0) == mask(UInt16, i)
end
end
@testset "shifted_mask_3" begin
for i in 0:32
@test mask(UInt32, i, 0) == mask(UInt32, i)
end
end
@testset "shifted_mask_4" begin
for i in 0:64
@test mask(UInt64, i, 0) == mask(UInt64, i)
end
end
@testset "shifted_mask_5" begin
for i in 0:128
@test mask(UInt128, i, 0) == mask(UInt128, i)
end
end
@testset "bit_mask_1" begin
for T in [UInt8, UInt16, UInt32, UInt64, UInt128, Int8, Int16, Int32, Int64, Int128]
s = 8*sizeof(T) - 1
@test bit_mask(T, 0:s) == full_mask(T)
end
end
@testset "bit_mask_2" begin
for T in [UInt8, UInt16, UInt32, UInt64, UInt128, Int8, Int16, Int32, Int64, Int128]
s = 8*sizeof(T) - 1
@test bit_mask(T, s) == mask(T, 1, s)
end
end
end
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | test/runtests.jl | code | 169 | using Test
@info "Starting tests..."
include("Utils.jl")
include("Frames.jl")
include("Signals.jl")
include("Messages.jl")
include("Databases.jl")
include("Decode.jl")
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | README.md | docs | 1274 | # CANalyze.jl
[![Build status](https://github.com/tsabelmann/CANalyze.jl/workflows/CI/badge.svg)](https://github.com/tsabelmann/CANalyze.jl/actions)
[![codecov](https://codecov.io/gh/tsabelmann/CANalyze.jl/branch/main/graph/badge.svg?token=V7VSDSOX1H)](https://codecov.io/gh/tsabelmann/CANalyze.jl)
[![Documentation](https://img.shields.io/badge/docs-latest-blue.svg)](https://tsabelmann.github.io/CANalyze.jl/dev)
[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)
*Julia package for analyzing CAN-bus data using messages and variables*
## Installation
Start julia and open the package mode by entering `]`. Then enter
```julia
add CANalyze
```
This will install the packages `CANalyze.jl` and all its dependencies.
## License / Terms of Usage
The source code of this project is licensed under the MIT license. This implies that
you are free to use, share, and adapt it. However, please give appropriate credit
by citing the project.
## Contact
If you have problems using the software, find mistakes, or have general questions please use
the [issue tracker](https://github.com/tsabelmann/CANTools.jl/issues) to contact us.
## Contributors
* [Tim Lucas Sabelmann](https://github.com/tsabelmann) | CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/decode.md | docs | 103 | ```@meta
CurrentModule = CANalyze
```
# CANTools.Decode
```@autodocs
Modules = [CANalyze.Decode]
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/encode.md | docs | 103 | ```@meta
CurrentModule = CANalyze
```
# CANalyze.Encode
```@autodocs
Modules = [CANalyze.Encode]
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/frames.md | docs | 103 | ```@meta
CurrentModule = CANalyze
```
# CANalyze.Frames
```@autodocs
Modules = [CANalyze.Frames]
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/index.md | docs | 1272 | # CANalyze.jl
[![Build status](https://github.com/tsabelmann/CANalyze.jl/workflows/CI/badge.svg)](https://github.com/tsabelmann/CANalyze.jl/actions)
[![codecov](https://codecov.io/gh/tsabelmann/CANalyze.jl/branch/main/graph/badge.svg?token=V7VSDSOX1H)](https://codecov.io/gh/tsabelmann/CANalyze.jl)
[![Documentation](https://img.shields.io/badge/docs-latest-blue.svg)](https://tsabelmann.github.io/CANalyze.jl/dev)
[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)
*Julia package for analyzing CAN-bus data using messages and variables*
## Installation
Start julia and open the package mode by entering `]`. Then enter
```julia
add CANalyze
```
This will install the packages `CANalyze.jl` and all its dependencies.
## License / Terms of Usage
The source code of this project is licensed under the MIT license. This implies that
you are free to use, share, and adapt it. However, please give appropriate credit
by citing the project.
## Contact
If you have problems using the software, find mistakes, or have general questions please use
the [issue tracker](https://github.com/tsabelmann/CANTools.jl/issues) to contact us.
## Contributors
* [Tim Lucas Sabelmann](https://github.com/tsabelmann) | CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/messages.md | docs | 107 | ```@meta
CurrentModule = CANalyze
```
# CANalyze.Messages
```@autodocs
Modules = [CANalyze.Messages]
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/signals.md | docs | 105 | ```@meta
CurrentModule = CANalyze
```
# CANalyze.Signals
```@autodocs
Modules = [CANalyze.Signals]
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/utils.md | docs | 101 | ```@meta
CurrentModule = CANalyze
```
# CANalyze.Utils
```@autodocs
Modules = [CANalyze.Utils]
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/examples/database.md | docs | 1106 | ```@meta
CurrentModule = CANalyze
```
# Database
```julia
using CANalyze.Signals
using CANalyze.Messages
using CANalyze.Databases
sig1 = NamedSignal("A", nothing, nothing, Float32Signal(start=0, byte_order=:little_endian))
sig2 = NamedSignal("B", nothing, nothing, Unsigned(start=40,
length=17,
factor=2,
offset=20,
byte_order=:big_endian))
sig3 = NamedSignal("C", nothing, nothing, Unsigned(start=32,
length=8,
factor=2,
offset=20,
byte_order=:little_endian))
message1 = Message(0x1FD, 8, "A", sig1; strict=true)
message1 = Message(0x1FE, 8, "B", sig1, sig2; strict=true)
message2 = Message(0x1FF, 8, "C", sig1, sig2, sig3; strict=true)
database = Database(message1, message2, message3)
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/examples/decode.md | docs | 1432 | ```@meta
CurrentModule = CANalyze
```
# Decode
## Signal
```julia
using CANalyze.Frames
using CANalyze.Signals
using CANalyze.Decode
sig1 = Unsigned(start=0, length=8, factor=1.0, offset=-1337f0, byte_order=:little_endian)
sig2 = NamedSignal("A", nothing, nothing, Float32Signal(start=0, byte_order=:little_endian
frame = CANFrame(20, [1, 2, 3, 4, 5, 6, 7, 8])
value1 = decode(sig1, frame)
value2 = decode(sig2, frame)
```
## Message
```julia
using CANalyze.Frames
using CANalyze.Signals
using CANalyze.Messages
using CANalyze.Decode
sig1 = NamedSignal("A", nothing, nothing, Float32Signal(start=0, byte_order=:little_endian))
sig2 = NamedSignal("B", nothing, nothing, Unsigned(start=40,
length=17,
factor=2,
offset=20,
byte_order=:big_endian))
sig3 = NamedSignal("C", nothing, nothing, Unsigned(start=32,
length=8,
factor=2,
offset=20,
byte_order=:little_endian))
message = Message(0x1FF, 8, "ABC", sig1, sig2, sig3; strict=true)
frame = CANFrame(20, [1, 2, 3, 4, 5, 6, 7, 8])
value = decode(message, frame)
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/examples/message.md | docs | 919 | ```@meta
CurrentModule = CANalyze
```
# Message
```julia
using CANalyze.Signals
using CANalyze.Messages
sig1 = NamedSignal("A", nothing, nothing, Float32Signal(start=0, byte_order=:little_endian))
sig2 = NamedSignal("B", nothing, nothing, Unsigned(start=40,
length=17,
factor=2,
offset=20,
byte_order=:big_endian))
sig3 = NamedSignal("C", nothing, nothing, Unsigned(start=32,
length=8,
factor=2,
offset=20,
byte_order=:little_endian))
message = Message(0x1FF, 8, "ABC", sig1, sig2, sig3; strict=true)
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.6.0 | 2bb2fd8988fa976a3e057bbe6197414d77d5e29d | docs/src/examples/signal.md | docs | 2264 | ```@meta
CurrentModule = CANalyze
```
# Signals
Signals are the basic blocks of the CAN-bus data analysis, i.e., decoding or
encoding CAN-bus data.
## Bit
```julia
using CANalyze.Signals
bit1 = Bit(20)
bit2 = Bit(start=20)
```
## Unsigned
```julia
using CANalyze.Signals
sig1 = Unsigned{Float32}(0, 1)
sig2 = Unsigned{Float64}(start=0, length=8, factor=2, offset=20)
sig3 = Unsigned(0, 8, 1, 0, :little_endian)
sig4 = Unsigned(start=0, length=8, factor=1.0, offset=-1337f0, byte_order=:little_endian)
```
## Signed
```julia
using CANalyze.Signals
sig1 = Signed{Float32}(0, 1)
sig2 = Signed{Float64}(start=3, length=16, factor=2, offset=20, byte_order=:big_endian)
sig3 = Signed(0, 8, 1, 0, :little_endian)
sig4 = Signed(start=0, length=8, factor=1.0, offset=-1337f0, byte_order=:little_endian)
```
## FloatSignal
```julia
using CANalyze.Signals
sig1 = FloatSignal(0, 1.0, 0.0, :little_endian)
sig2 = FloatSignal(start=0, factor=1.0, offset=0.0, byte_order=:little_endian)
```
## Float16Signal
```julia
using CANalyze.Signals
sig1 = FloatSignal{Float16}(0)
sig2 = FloatSignal{Float16}(0, factor=1.0, offset=0.0, byte_order=:little_endian)
sig3 = FloatSignal{Float16}(start=0, factor=1.0, offset0.0, byte_order=:little_endian)
```
## Float32Signal
```julia
using CANalyzes
sig1 = FloatSignal{Float32}(0)
sig2 = FloatSignal{Float32}(0, factor=1.0, offset=0.0, byte_order=:little_endian)
sig3 = FloatSignal{Float32}(start=0, factor=1.0, offset0.0, byte_order=:little_endian)
```
## Float64Signal
```julia
using CANalyze.Signals
sig1 = FloatSignal{Float64}(0)
sig2 = FloatSignal{Float64}(0, factor=1.0, offset=0.0, byte_order=:little_endian)
sig3 = FloatSignal{Float64}(start=0, factor=1.0, offset=0.0, byte_order=:little_endian)
```
## Raw
```julia
using CANalyze.Signals
sig1 = Raw(0, 8, :big_endian)
sig2 = Raw(start=21, length=7, byte_order=:little_endian)
```
## NamedSignal
```julia
using CANalyze.Signals
sig1 = NamedSignal("ABC",
nothing,
nothing,
Float32Signal(start=0, byte_order=:little_endian))
sig2 = NamedSignal(name="ABC",
unit=nothing,
default=nothing,
signal=Float32Signal(start=0, byte_order=:little_endian))
```
| CANalyze | https://github.com/tsabelmann/CANalyze.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | _readme/generate_examples.jl | code | 1662 | using SummaryTables
using Typst_jll
using DataFrames
using Statistics
function save_svg(tbl, svgfile)
mktempdir() do dir
cd(dir) do
open("input.typ", "w") do io
println(io, """
#set page(margin: 3pt, width: auto, height: auto)
#set text(12pt)
""")
show(io, MIME"text/typst"(), tbl)
end
run(`$(Typst_jll.typst()) compile input.typ output.svg`)
end
mv(joinpath(dir, "output.svg"), svgfile, force = true)
end
return
end
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
tbl = table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
save_svg(tbl, joinpath(@__DIR__, "table_one.svg"))
data = DataFrame(
concentration = [1.2, 4.5, 2.0, 1.5, 0.1, 1.8, 3.2, 1.8, 1.2, 0.2,
1.7, 4.2, 1.0, 0.9, 0.3, 1.7, 3.7, 1.2, 1.0, 0.2],
id = repeat([1, 2, 3, 4], inner = 5),
dose = repeat([100, 200], inner = 10),
time = repeat([0, 0.5, 1, 2, 3], 4)
)
tbl = listingtable(
data,
:concentration => "Concentration (ng/mL)",
rows = [:dose => "Dose (mg)", :id => "ID"],
cols = :time => "Time (hr)",
summarize_rows = :dose => [
length => "N",
mean => "Mean",
std => "SD",
]
)
save_svg(tbl, joinpath(@__DIR__, "listingtable.svg"))
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/make.jl | code | 579 | using Documenter, SummaryTables
makedocs(
sitename = "SummaryTables.jl",
pages = [
"index.md",
"output.md",
"Predefined Tables" => [
"predefined_tables/listingtable.md",
"predefined_tables/summarytable.md",
"predefined_tables/table_one.md",
],
"Custom Tables" => [
"custom_tables/table.md",
"custom_tables/cell.md",
"custom_tables/cellstyle.md",
],
]
)
deploydocs(
repo = "github.com/PumasAI/SummaryTables.jl.git",
push_preview = true,
) | SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/SummaryTables.jl | code | 731 | module SummaryTables
#
# Imports and exports.
#
using Tables
using CategoricalArrays
using DataFrames
using Statistics
import EnumX
import HypothesisTests
import OrderedCollections
import MultipleTesting
import StatsBase
import Printf
import NaturalSort
import WriteDocx
import SHA
export table_one
export listingtable
export summarytable
export Cell
export CellStyle
export Table
export Annotated
export Concat
export Multiline
export Pagination
export ReplaceMissing
export Replace
export Superscript
export Subscript
const DEFAULT_ROWGAP = 6.0
include("cells.jl")
include("table_one.jl")
include("table.jl")
include("helpers.jl")
include("latex.jl")
include("html.jl")
include("docx.jl")
include("typst.jl")
end # module
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/cells.jl | code | 19578 | """
CellStyle(;
bold::Bool = false,
italic::Bool = false,
underline::Bool = false,
halign::Symbol = :center,
valign::Symbol = :top,
indent_pt::Float64 = 0.0,
border_bottom::Bool = false,
merge::Bool = false,
mergegroup::UInt8 = 0,
)
Create a `CellStyle` object which determines the visual appearance of `Cell`s.
Keyword arguments:
- `bold` renders text `bold` if `true`.
- `italic` renders text `italic` if `true`.
- `underline` underlines text if `true`.
- `halign` determines the horizontal alignment within the cell, either `:left`, `:center` or `:right`.
- `valign` determines the vertical alignment within the cell, either `:top`, `:center` or `:bottom`.
- `indent_pt` adds left indentation in points to the cell text.
- `border_bottom` adds a bottom border to the cell if `true`.
- `merge` causes adjacent cells which are `==` equal to be rendered as a single merged cell.
- `mergegroup` is a number that can be used to differentiate between two otherwise equal adjacent groups of cells that should not be merged together.
"""
Base.@kwdef struct CellStyle
indent_pt::Float64 = 0.0
bold::Bool = false
italic::Bool = false
underline::Bool = false
border_bottom::Bool = false
halign::Symbol = :center
valign::Symbol = :top
merge::Bool = false
mergegroup::UInt8 = 0
end
@eval function CellStyle(c::CellStyle; kwargs...)
Base.Cartesian.@ncall $(length(fieldnames(CellStyle))) CellStyle i -> begin
name = $(fieldnames(CellStyle))[i]
get(kwargs, name, getfield(c, name))
end
end
struct SpannedCell
span::Tuple{UnitRange{Int64},UnitRange{Int64}}
value
style::CellStyle
function SpannedCell(span::Tuple{UnitRange{Int64},UnitRange{Int64}}, value, style)
rowstart = span[1].start
colstart = span[2].start
if rowstart < 1
error("SpannedCell must not begin at a row lower than 1, but begins at row $(rowstart).")
end
if colstart < 1
error("SpannedCell must not begin at a column lower than 1, but begins at column $(colstart).")
end
new(span, value, style)
end
end
SpannedCell(rows::Union{Int,UnitRange{Int}}, cols::Union{Int,UnitRange{Int}}, value, style = CellStyle()) = SpannedCell((_to_range(rows), _to_range(cols)), value, style)
_to_range(i::Int) = i:i
_to_range(ur::UnitRange{Int}) = ur
# the old type never did anything, so now we just make any old use of this a no-op basically
const CellList = Vector{SpannedCell}
"""
Cell(value, style::CellStyle)
Cell(value; [bold, italic, underline, halign, valign, border_bottom, indent_pt, merge, mergegroup])
Construct a `Cell` with value `value` and `CellStyle` `style`, which can also be created implicitly with keyword arguments.
For explanations of the styling options, refer to `CellStyle`.
A cell with value `nothing` is displayed as an empty cell (styles might still apply).
The type of `value` can be anything.
Some types with special behavior are:
- `Multiline` for content broken over multiple lines in a cell. This object may not be used nested in other values, only as the top-level value.
- `Concat` for stringing together multiple values without having to interpolate them into a `String`, which keeps their own special behaviors intact.
- `Superscript` and `Subscript`
- `Annotated` for a value with an optional superscript label and a footnote annotation.
"""
struct Cell
value
style::CellStyle
end
Base.adjoint(c::Cell) = c # simplifies making row vectors out of column vectors of Cells with '
Cell(value; kwargs...) = Cell(value, CellStyle(; kwargs...))
Cell(cell::Cell; kwargs...) = Cell(cell.value, CellStyle(cell.style; kwargs...))
Cell(cell::Cell, value; kwargs...) = Cell(value, CellStyle(cell.style; kwargs...))
Base.broadcastable(c::Cell) = Ref(c)
@inline Base.getproperty(c::Cell, s::Symbol) = hasfield(Cell, s) ? getfield(c, s) : getproperty(c.style, s)
Base.propertynames(c::Cell) = (fieldnames(Cell)..., propertynames(c.style)...)
struct Table
cells::Matrix{Cell}
header::Union{Nothing, Int}
footer::Union{Nothing, Int}
footnotes::Vector{Any}
rowgaps::Vector{Pair{Int,Float64}}
colgaps::Vector{Pair{Int,Float64}}
postprocess::Vector{Any}
round_digits::Int
round_mode::Union{Nothing,Symbol}
trailing_zeros::Bool
end
function Table(cells, header, footer;
round_digits = 3,
round_mode = :auto,
trailing_zeros = false,
footnotes = [],
postprocess = [],
rowgaps = Pair{Int,Float64}[],
colgaps = Pair{Int,Float64}[],
)
Table(cells, header, footer, footnotes, rowgaps, colgaps, postprocess, round_digits, round_mode, trailing_zeros)
end
"""
function Table(cells;
header = nothing,
footer = nothing,
round_digits = 3,
round_mode = :auto,
trailing_zeros = false,
footnotes = [],
postprocess = [],
rowgaps = Pair{Int,Float64}[],
colgaps = Pair{Int,Float64}[],
)
Create a `Table` which can be rendered in multiple formats, such as HTML or LaTeX.
## Arguments
- `cells::AbstractMatrix{<:Cell}`: The matrix of `Cell`s that make up the table.
## Keyword arguments
- `header`: The index of the last row of the header, `nothing` if no header is specified.
- `footer`: The index of the first row of the footer, `nothing` if no footer is specified.
- `footnotes`: A vector of objects printed as footnotes that are not derived from `Annotated`
values and therefore don't get labels with counterparts inside the table.
- `round_digits = 3`: Float values will be rounded to this precision before printing.
- `round_mode = :auto`: How the float values are rounded, options are `:auto`, `:digits` or `:sigdigits`.
If `round_mode === nothing`, no rounding will be applied and `round_digits` and `trailing_zeros`
will have no effect.
- `trailing_zeros = false`: Controls if float values keep trailing zeros, for example `4.0` vs `4`.
- `postprocess = []`: A list of post-processors which will be applied left to right to the table before displaying the table.
A post-processor can either work element-wise or on the whole table object. See the `postprocess_table` and
`postprocess_cell` functions for defining custom postprocessors.
- `rowgaps = Pair{Int,Float64}[]`: A list of pairs `index => gap_pt`. For each pair, a visual gap
the size of `gap_pt` is added between the rows `index` and `index+1`.
- `colgaps = Pair{Int,Float64}[]`: A list of pairs `index => gap_pt`. For each pair, a visual gap
the size of `gap_pt` is added between the columns `index` and `index+1`.
## Round mode
Consider the numbers `0.006789`, `23.4567`, `456.789` or `12345.0`.
Here is how these numbers are formatted with the different available rounding modes:
- `:auto` rounds to `n` significant digits but doesn't zero out additional digits before the comma unlike `:sigdigits`.
For example, `round_digits = 3` would result in `0.00679`, `23.5`, `457.0` or `12345.0`.
Numbers at orders of magnitude >= 6 or <= -5 are displayed in exponential notation as in Julia.
- `:digits` rounds to `n` digits after the comma and shows possibly multiple trailing zeros.
For example, `round_digits = 3` would result in `0.007`, `23.457` or `456.789` or `12345.000`.
Numbers are never shown with exponential notation.
- `:sigdigits` rounds to `n` significant digits and zeros out additional digits before the comma unlike `:auto`.
For example, `round_digits = 3` would result in `0.00679`, `23.5`, `457.0` or `12300.0`.
Numbers at orders of magnitude >= 6 or <= -5 are displayed in exponential notation as in Julia.
"""
Table(cells; header = nothing, footer = nothing, kwargs...) = Table(cells, header, footer; kwargs...)
# non-public-API method to keep old code working in the meantime
function Table(cells::AbstractVector{SpannedCell}, args...; kwargs...)
sz = reduce(cells; init = (0, 0)) do sz, cell
max.(sz, (cell.span[1].stop, cell.span[2].stop))
end
m = fill(Cell(nothing), sz...)
visited = zeros(Bool, sz...)
mergegroup = 0
for cell in cells
is_spanned = length(cell.span[1]) > 1 || length(cell.span[2]) > 1
if is_spanned
mergegroup = mod(mergegroup + 1, 255)
end
for row in cell.span[1]
for col in cell.span[2]
if visited[row, col]
error("Tried to fill cell $row,$col twice. First value was $(m[row, col].value) and second $(cell.value).")
end
visited[row, col] = true
if is_spanned
m[row, col] = Cell(cell.value, CellStyle(cell.style; merge = true, mergegroup))
else
m[row, col] = Cell(cell.value, cell.style)
end
end
end
end
return Table(m, args...; kwargs...)
end
function to_spanned_cells(m::AbstractMatrix{<:Cell})
cells = Vector{SpannedCell}()
sizehint!(cells, length(m))
visited = zeros(Bool, size(m))
nrow, ncol = size(m)
for row in 1:nrow
for col in 1:ncol
visited[row, col] && continue
c = m[row, col]
lastrow = row
for _row in row+1:nrow
if !visited[_row, col] && c.merge && m[_row, col] == c
lastrow = _row
else
break
end
end
lastcol = col
for _col in col+1:ncol
if !visited[row, _col] && c.merge && m[row, _col] == c
lastcol = _col
else
break
end
end
for _row in row+1:lastrow
for _col in col+1:lastcol
_c = m[_row, _col]
if _c != c
error("Cell $c was detected to span over [$(row:lastrow),$(col:lastcol)] but at $_row,$_col the value was $_c. This is not allowed. Cells spanning multiple rows and columns must always span a full rectangle.")
end
end
end
push!(cells, SpannedCell((row:lastrow,col:lastcol), c.value, c.style))
visited[row:lastrow,col:lastcol] .= true
end
end
return cells
end
"""
Multiline(args...)
Create a `Multiline` object which renders each `arg` on a separate line.
A `Multiline` value may only be used as the top-level value of a cell, so
`Cell(Multiline(...))` is allowed but `Cell(Concat(Multiline(...), ...))` is not.
"""
struct Multiline
values::Vector{Any}
end
Multiline(args...) = Multiline(Any[args...])
"""
Concat(args...)
Create a `Concat` object which can be used to concatenate the representations
of multiple values in a single table cell while keeping the conversion semantics
of each `arg` in `args` intact.
## Example
```julia
Concat(
"Some text and an ",
Annotated("annotated", "Some annotation"),
" value",
)
# will be rendered as "Some text and an annotatedΒΉ value"
```
"""
struct Concat
args::Tuple
Concat(args...) = new(args)
end
struct Annotated
value
annotation
label
end
struct AutoNumbering end
"""
Annotated(value, annotation; label = AutoNumbering())
Create an `Annotated` object which will be given a footnote annotation
in the `Table` where it is used.
If the `label` keyword is `AutoNumbering()`, annotations will be given number labels
from 1 to N in the order of their appearance. If it is `nothing`, no label will be
shown. Any other `label` will be used directly as the footnote label.
Each unique label must be paired with a unique annotation, but the same
combination can exist multiple times in a single table.
"""
Annotated(value, annotation; label = AutoNumbering()) = Annotated(value, annotation, label)
struct ResolvedAnnotation
value
label
end
# Signals that a given annotation should have no label.
# This is useful for cases where the value itself is the label
# for example when printing NA or - for a missing value.
# You would not want a superscript label for every one of those.
struct NoLabel end
function resolve_annotations(cells::AbstractVector{<:SpannedCell})
annotations = collect_annotations(cells)
k = 1
for (annotation, label) in annotations
if label === AutoNumbering()
annotations[annotation] = k
k += 1
elseif label === nothing
annotations[annotation] = NoLabel()
end
end
labels = Set()
for label in values(annotations)
label === NoLabel() && continue
label β labels && error("Found the same label $(repr(label)) twice with different annotations.")
push!(labels, label)
end
# put all non-integer labels (so all manual labels) behind the auto-incremented labels
# the remaining order will be corresponding to the elements in the list
annotations = OrderedCollections.OrderedDict(sort(collect(annotations), by = x -> !(last(x) isa Int)))
cells = map(cells) do cell
SpannedCell(cell.span, resolve_annotation(cell.value, annotations), cell.style)
end
return cells, annotations
end
function collect_annotations(cells)
annotations = OrderedCollections.OrderedDict()
for cell in cells
collect_annotations!(annotations, cell.value)
end
return annotations
end
collect_annotations!(annotations, x) = nothing
function collect_annotations!(annotations, c::Concat)
for arg in c.args
collect_annotations!(annotations, arg)
end
end
function collect_annotations!(annotations, x::Annotated)
if haskey(annotations, x.annotation)
if annotations[x.annotation] != x.label
error("Found the same annotation $(repr(x.annotation)) with two different labels: $(repr(x.label)) and $(repr(annotations[x.annotation])).")
end
else
annotations[x.annotation] = x.label
end
return
end
resolve_annotation(x, annotations) = x
function resolve_annotation(a::Annotated, annotations)
ResolvedAnnotation(a.value, annotations[a.annotation])
end
function resolve_annotation(c::Concat, annotations)
new_args = map(c.args) do arg
resolve_annotation(arg, annotations)
end
Concat(new_args...)
end
function create_cell_matrix(cells)
nrows = 0
ncols = 0
for cell in cells
nrows = max(nrows, cell.span[1].stop)
ncols = max(ncols, cell.span[2].stop)
end
matrix = zeros(Int, nrows, ncols)
for (i, cell) in enumerate(cells)
enter_cell!(matrix, cell, i)
end
matrix
end
function enter_cell!(matrix, cell, i)
for row in cell.span[1], col in cell.span[2]
v = matrix[row, col]
if v == 0
matrix[row, col] = i
else
error(
"""
Can't place cell $i in [$row, $col] as cell $v is already there.
Value of cell $i: $(cell.value)
"""
)
end
end
end
"""
postprocess_table
Overload `postprocess_table(t::Table, postprocessor::YourPostProcessor)`
to enable using `YourPostProcessor` as a table postprocessor by passing
it to the `postprocess` keyword argument of `Table`.
The function must always return a `Table`.
Use `postprocess_cell` instead if you do not need to modify table attributes
during postprocessing but only individual cells.
"""
function postprocess_table end
"""
postprocess_cell
Overload `postprocess_cell(c::Cell, postprocessor::YourPostProcessor)`
to enable using `YourPostProcessor` as a cell postprocessor by passing
it to the `postprocess` keyword argument of `Table`.
The function must always return a `Cell`. It will be applied on every cell
of the table that is being postprocessed, all other table attributes will
be left unmodified.
Use `postprocess_table` instead if you need to modify table attributes
during postprocessing.
"""
function postprocess_cell end
function postprocess_cell(cell::Cell, any)
error("""
`postprocess_cell` is not implemented for postprocessor type `$(typeof(any))`.
To use this object for postprocessing, either implement `postprocess_table(::Table, ::$(typeof(any)))` or
`postprocess_cell(::Cell, ::$(typeof(any)))` for it.
""")
end
function postprocess_table(ct::Table, any)
new_cl = map(ct.cells) do cell
new_cell = postprocess_cell(cell, any)
if !(new_cell isa Cell)
error("`postprocess_cell` called with `$(any)` returned an object of type `$(typeof(new_cell))` instead of `Cell`.")
end
return new_cell
end
Table(new_cl, ct.header, ct.footer, ct.footnotes, ct.rowgaps, ct.colgaps, [], ct.round_digits, ct.round_mode, ct.trailing_zeros)
end
function postprocess_table(ct::Table, v::AbstractVector)
for postprocessor in v
ct = postprocess_table(ct, postprocessor)
!(ct isa Table) && error("Postprocessor $postprocessor caused `postprocess_table` not to return a `Table` but a `$(typeof(ct))`")
end
return ct
end
"""
Replace(f, with)
Replace(f; with)
This postprocessor replaces all cell values for which `f(value) === true`
with the value `with`.
If `with <: Function` then the new value will be `with(value)`, instead.
## Examples
```
Replace(x -> x isa String, "A string was here")
Replace(x -> x isa String, uppercase)
Replace(x -> x isa Int && iseven(x), "An even Int was here")
```
"""
struct Replace{F,W}
f::F
with::W
end
Replace(f; with) = Replace(f, with)
"""
ReplaceMissing(; with = Annotated("-", "- No value"; label = NoLabel()))
This postprocessor replaces all `missing` cell values with the value in `with`.
"""
ReplaceMissing(; with = Annotated("-", "- No value"; label = NoLabel())) =
Replace(ismissing, with)
function postprocess_cell(cell::Cell, r::Replace)
matches = r.f(cell.value)
if !(matches isa Bool)
error("`Replace` predicate `$(r.f)` did not return a `Bool` but a value of type `$(typeof(matches))`.")
end
fn(_, with) = with
fn(x, with::Function) = with(x)
value = matches ? fn(cell.value, r.with) : cell.value
return Cell(value, cell.style)
end
struct Rounder
round_digits::Int
round_mode::Symbol
trailing_zeros::Bool
end
struct RoundedFloat
f::Float64
round_digits::Int
round_mode::Symbol
trailing_zeros::Bool
end
apply_rounder(x, r::Rounder) = x
apply_rounder(x::AbstractFloat, r::Rounder) = RoundedFloat(x, r.round_digits, r.round_mode, r.trailing_zeros)
apply_rounder(x::Concat, r::Rounder) = Concat(map(arg -> apply_rounder(arg, r), x.args)...)
apply_rounder(x::Multiline, r::Rounder) = Multiline(map(arg -> apply_rounder(arg, r), x.values))
apply_rounder(x::Annotated, r::Rounder) = Annotated(apply_rounder(x.value, r), x.annotation, x.label)
function postprocess_cell(cell::Cell, r::Rounder)
Cell(apply_rounder(cell.value, r), cell.style)
end
struct Superscript
super
end
struct Subscript
sub
end
apply_rounder(x::Superscript, r::Rounder) = Superscript(apply_rounder(x.super, r))
apply_rounder(x::Subscript, r::Rounder) = Subscript(apply_rounder(x.sub, r))
function postprocess(ct::Table)
# every table has float rounding / formatting applied as the very last step
pp = ct.postprocess
if ct.round_mode !== nothing
rounder = Rounder(ct.round_digits, ct.round_mode, ct.trailing_zeros)
pp = [ct.postprocess; rounder]
end
return postprocess_table(ct, pp)
end
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/docx.jl | code | 10929 | const DOCX_OUTER_RULE_SIZE = 8 * WriteDocx.eighthpt
const DOCX_INNER_RULE_SIZE = 4 * WriteDocx.eighthpt
const DOCX_ANNOTATION_FONTSIZE = 8 * WriteDocx.pt
"""
to_docx(ct::Table)
Creates a `WriteDocx.Table` node for `Table` `ct` which can be inserted into
a `WriteDocx` document.
"""
function to_docx(ct::Table)
ct = postprocess(ct)
cells = sort(to_spanned_cells(ct.cells), by = x -> (x.span[1].start, x.span[2].start))
cells, annotations = resolve_annotations(cells)
matrix = create_cell_matrix(cells)
running_index = 0
tablerows = WriteDocx.TableRow[]
function full_width_border_row(sz)
WriteDocx.TableRow(
[WriteDocx.TableCell([WriteDocx.Paragraph([])],
WriteDocx.TableCellProperties(
gridspan = size(matrix, 2),
borders = WriteDocx.TableCellBorders(
bottom = WriteDocx.TableCellBorder(
color = WriteDocx.automatic,
size = sz,
style = WriteDocx.BorderStyle.single,
),
start = WriteDocx.TableCellBorder(color = WriteDocx.automatic, size = sz, style = WriteDocx.BorderStyle.none),
stop = WriteDocx.TableCellBorder(color = WriteDocx.automatic, size = sz, style = WriteDocx.BorderStyle.none),
),
hide_mark = true,
))]
)
end
push!(tablerows, full_width_border_row(DOCX_OUTER_RULE_SIZE))
validate_rowgaps(ct.rowgaps, size(matrix, 1))
validate_colgaps(ct.colgaps, size(matrix, 2))
rowgaps = Dict(ct.rowgaps)
colgaps = Dict(ct.colgaps)
for row in 1:size(matrix, 1)
rowcells = WriteDocx.TableCell[]
for col in 1:size(matrix, 2)
index = matrix[row, col]
if index == 0
push!(rowcells, WriteDocx.TableCell([
WriteDocx.Paragraph([
WriteDocx.Run([
WriteDocx.Text("")
])
])
]))
else
cell = cells[index]
is_firstcol = col == cell.span[2].start
if !is_firstcol
continue
end
push!(rowcells, docx_cell(row, col, cell, rowgaps, colgaps))
running_index = index
end
end
push!(tablerows, WriteDocx.TableRow(rowcells))
if row == ct.header
push!(tablerows, full_width_border_row(DOCX_INNER_RULE_SIZE))
end
end
push!(tablerows, full_width_border_row(DOCX_OUTER_RULE_SIZE))
if !isempty(annotations) || !isempty(ct.footnotes)
elements = []
for (i, (annotation, label)) in enumerate(annotations)
i > 1 && push!(elements, WriteDocx.Run([WriteDocx.Text(" ")]))
if label !== NoLabel()
push!(elements, WriteDocx.Run([WriteDocx.Text(docx_sprint(label)), WriteDocx.Text(" ")],
WriteDocx.RunProperties(valign = WriteDocx.VerticalAlignment.superscript)))
end
push!(elements, WriteDocx.Run([WriteDocx.Text(docx_sprint(annotation))],
WriteDocx.RunProperties(size = DOCX_ANNOTATION_FONTSIZE)))
end
for (i, footnote) in enumerate(ct.footnotes)
(!isempty(annotations) || i > 1) && push!(elements, WriteDocx.Run([WriteDocx.Text(" ")]))
push!(elements, WriteDocx.Run([WriteDocx.Text(docx_sprint(footnote))],
WriteDocx.RunProperties(size = DOCX_ANNOTATION_FONTSIZE)))
end
annotation_row = WriteDocx.TableRow([WriteDocx.TableCell(
[WriteDocx.Paragraph(elements)],
WriteDocx.TableCellProperties(gridspan = size(matrix, 2))
)])
push!(tablerows, annotation_row)
end
tablenode = WriteDocx.Table(tablerows,
WriteDocx.TableProperties(
margins = WriteDocx.TableLevelCellMargins(
# Word already has relatively broadly spaced tables,
# so we keep margins to a minimum. A little bit on the left
# and right is needed to separate the columns from each other
top = WriteDocx.pt * 0,
bottom = WriteDocx.pt * 0,
start = WriteDocx.pt * 1.5,
stop = WriteDocx.pt * 1.5,
),
# this spacing allows adjacent column underlines to be ever-so-slightly spaced apart,
# which is otherwise not possible to achieve in Word (aside from adding empty spacing columns maybe)
spacing = 1 * WriteDocx.pt,
)
)
return tablenode
end
function paragraph_and_run_properties(st::CellStyle)
para = WriteDocx.ParagraphProperties(
justification = st.halign === :center ? WriteDocx.Justification.center :
st.halign === :left ? WriteDocx.Justification.start :
st.halign === :right ? WriteDocx.Justification.stop :
error("Unhandled halign $(st.halign)"),
)
run = WriteDocx.RunProperties(
bold = st.bold ? true : nothing, # TODO: fix bug in WriteDocx?
italic = st.italic ? true : nothing, # TODO: fix bug in WriteDocx?
)
return para, run
end
function hardcoded_styles(class::Nothing)
WriteDocx.ParagraphProperties(), (;)
end
function cell_properties(cell::SpannedCell, row, col, vertical_merge, gridspan, rowgaps, colgaps)
cs = cell.style
pt = WriteDocx.pt
bottom_rowgap = get(rowgaps, cell.span[1].stop, nothing)
if bottom_rowgap === nothing
if cs.border_bottom # borders need a bit of spacing to look ok
bottom_margin = 2.0 * pt
else
bottom_margin = nothing
end
else
bottom_margin = 0.5 * bottom_rowgap * pt
end
top_rowgap = get(rowgaps, cell.span[1].start-1, nothing)
top_margin = top_rowgap === nothing ? nothing : 0.5 * top_rowgap * pt
left_colgap = get(colgaps, cell.span[2].start-1, nothing)
if left_colgap === nothing
if cs.indent_pt != 0
left_margin = cs.indent_pt * pt
else
left_margin = nothing
end
else
if cs.indent_pt != 0
left_margin = (cs.indent_pt + 0.5 * left_colgap) * pt
else
left_margin = 0.5 * left_colgap * pt
end
end
right_colgap = get(colgaps, cell.span[2].stop, nothing)
right_margin = right_colgap === nothing ? nothing : 0.5 * right_colgap * pt
left_end = col == cell.span[2].start
right_end = col == cell.span[2].stop
top_end = row == cell.span[1].start
bottom_end = row == cell.span[1].stop
# spanned cells cannot have margins in the interior
if !right_end
right_margin = nothing
end
if !left_end
left_margin = nothing
end
if !top_end
top_margin = nothing
end
if !bottom_end
bottom_margin = nothing
end
WriteDocx.TableCellProperties(;
margins = WriteDocx.TableCellMargins(
start = left_margin,
bottom = bottom_margin,
top = top_margin,
stop = right_margin,
),
borders = cs.border_bottom ? WriteDocx.TableCellBorders(
bottom = WriteDocx.TableCellBorder(color = WriteDocx.automatic, size = DOCX_INNER_RULE_SIZE, style = WriteDocx.BorderStyle.single),
start = WriteDocx.TableCellBorder(color = WriteDocx.automatic, size = DOCX_INNER_RULE_SIZE, style = WriteDocx.BorderStyle.none), # the left/right none styles keep adjacent cells' bottom borders from merging together
stop = WriteDocx.TableCellBorder(color = WriteDocx.automatic, size = DOCX_INNER_RULE_SIZE, style = WriteDocx.BorderStyle.none),
) : nothing,
valign = cs.valign === :center ? WriteDocx.VerticalAlign.center :
cs.valign === :bottom ? WriteDocx.VerticalAlign.bottom :
cs.valign === :top ? WriteDocx.VerticalAlign.top :
error("Unhandled valign $(cs.valign)"),
vertical_merge,
gridspan,
)
end
function docx_cell(row, col, cell, rowgaps, colgaps)
ncols = length(cell.span[2])
is_firstrow = row == cell.span[1].start
is_firstcol = col == cell.span[2].start
vertical_merge = length(cell.span[1]) == 1 ? nothing : is_firstrow
gridspan = ncols > 1 ? ncols : nothing
paraproperties, runproperties = paragraph_and_run_properties(cell.style)
runs = if is_firstrow && is_firstcol
if cell.value === nothing
WriteDocx.Run[]
else
to_runs(cell.value, runproperties)
end
else
[WriteDocx.Run([WriteDocx.Text("")], runproperties)]
end
cellprops = cell_properties(cell, row, col, vertical_merge, gridspan, rowgaps, colgaps)
WriteDocx.TableCell([
WriteDocx.Paragraph(runs, paraproperties),
], cellprops)
end
to_runs(x, props) = [WriteDocx.Run([WriteDocx.Text(docx_sprint(x))], props)]
function to_runs(c::Concat, props)
runs = WriteDocx.Run[]
for arg in c.args
append!(runs, to_runs(arg, props))
end
return runs
end
# make a new property object where each field that's not nothing in x2 replaces the equivalent
# from x1, however, if the elements are both also property objects, merge those separately
@generated function merge_props(x1::T, x2::T) where {T<:Union{WriteDocx.TableCellProperties,WriteDocx.RunProperties,WriteDocx.ParagraphProperties,WriteDocx.TableCellBorders,WriteDocx.TableCellMargins}}
FN = fieldnames(T)
N = fieldcount(T)
quote
Base.Cartesian.@ncall $N $T i -> begin
f1 = getfield(x1, $FN[i])
f2 = getfield(x2, $FN[i])
merge_props(f1, f2)
end
end
end
merge_props(x, y) = y === nothing ? x : y
function to_runs(s::Superscript, props::WriteDocx.RunProperties)
props = merge_props(props, WriteDocx.RunProperties(valign = WriteDocx.VerticalAlignment.superscript))
return to_runs(s.super, props)
end
function to_runs(s::Subscript, props::WriteDocx.RunProperties)
props = merge_props(props, WriteDocx.RunProperties(valign = WriteDocx.VerticalAlignment.subscript))
return to_runs(s.sub, props)
end
function to_runs(m::Multiline, props)
runs = WriteDocx.Run[]
for (i, val) in enumerate(m.values)
i > 1 && push!(runs, WriteDocx.Run([WriteDocx.Break()])),
append!(runs, to_runs(val, props))
end
return runs
end
function to_runs(r::ResolvedAnnotation, props)
runs = to_runs(r.value, props)
if r.label !== NoLabel()
props = merge_props(props, WriteDocx.RunProperties(valign = WriteDocx.VerticalAlignment.superscript))
push!(runs, WriteDocx.Run([WriteDocx.Text(docx_sprint(r.label))], props))
end
return runs
end
docx_sprint(x) = sprint(x) do io, x
_showas(io, MIME"text"(), x)
end | SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/helpers.jl | code | 4359 | function _showas(io::IO, mime::MIME, value)
fn(io::IO, ::MIME"text/html", value::AbstractString) = _str_html_escaped(io, value)
fn(io::IO, ::MIME"text/html", value) = _str_html_escaped(io, repr(value))
fn(io::IO, ::MIME"text/latex", value::AbstractString) = _str_latex_escaped(io, value)
fn(io::IO, ::MIME"text/latex", value) = _str_latex_escaped(io, repr(value))
fn(io::IO, ::MIME"text/typst", value::AbstractString) = _str_typst_escaped(io, value)
fn(io::IO, ::MIME"text/typst", value) = _str_typst_escaped(io, repr(value))
fn(io::IO, ::MIME, value) = print(io, value)
return showable(mime, value) ? show(io, mime, value) : fn(io, mime, value)
end
function _showas(io::IO, m::MIME, r::RoundedFloat)
f = r.f
mode = r.round_mode
digits = r.round_digits
s = if mode === :auto
string(auto_round(f, target_digits = digits))
elseif mode === :sigdigits
string(round(f, sigdigits = digits))
elseif mode === :digits
fmt = Printf.Format("%.$(digits)f")
Printf.format(fmt, f)
else
error("Unknown round mode $mode")
end
if !r.trailing_zeros
s = replace(s, r"^(\d+)$|^(\d+)\.0*$|^(\d+\.[1-9]*?)0*$" => s"\1\2\3")
end
_showas(io, m, s)
end
_showas(io::IO, m::MIME, c::CategoricalValue) = _showas(io, m, CategoricalArrays.DataAPI.unwrap(c))
function _showas(io::IO, m::MIME, c::Concat)
for arg in c.args
_showas(io, m, arg)
end
end
format_value(x) = x
"""
auto_round(number; target_digits)
Rounds a floating point number to a target number of digits that are not leading zeros.
For example, with 3 target digits, desirable numbers would be 123.0, 12.3, 1.23,
0.123, 0.0123 etc. Numbers larger than the number of digits are only rounded to the next integer
(compare with `round(1234, sigdigits = 3)` which rounds to `1230.0`).
Numbers are rounded to `target_digits` significant digits when the floored base 10
exponent is -5 and lower or 6 and higher, as these numbers print with `e` notation by default in Julia.
```
auto_round( 1234567, target_digits = 4) = 1.235e6
auto_round( 123456.7, target_digits = 4) = 123457.0
auto_round( 12345.67, target_digits = 4) = 12346.0
auto_round( 1234.567, target_digits = 4) = 1235.0
auto_round( 123.4567, target_digits = 4) = 123.5
auto_round( 12.34567, target_digits = 4) = 12.35
auto_round( 1.234567, target_digits = 4) = 1.235
auto_round( 0.1234567, target_digits = 4) = 0.1235
auto_round( 0.01234567, target_digits = 4) = 0.01235
auto_round( 0.001234567, target_digits = 4) = 0.001235
auto_round( 0.0001234567, target_digits = 4) = 0.0001235
auto_round( 0.00001234567, target_digits = 4) = 1.235e-5
auto_round( 0.000001234567, target_digits = 4) = 1.235e-6
auto_round(0.0000001234567, target_digits = 4) = 1.235e-7
```
"""
function auto_round(number; target_digits::Int)
!isfinite(number) && return number
target_digits < 1 && throw(ArgumentError("target_digits needs to be 1 or more"))
order_of_magnitude = number == 0 ? 0 : log10(abs(number))
oom = floor(Int, order_of_magnitude)
ndigits = max(0, -oom + target_digits - 1)
if -5 < oom < 6
round(number, digits = ndigits)
else
# this relies on Base printing e notation >= 6 and <= -5
round(number, sigdigits = target_digits)
end
end
natural_lt(x::AbstractString, y::AbstractString) = NaturalSort.natural(x, y)
natural_lt(x, y) = x < y
function validate_rowgaps(rowgaps, nrows)
nrows == 1 && !isempty(rowgaps) && error("No row gaps allowed for a table with one row.")
for (m, _) in rowgaps
if m < 1
error("A row gap index of $m is invalid, must be at least 1.")
end
if m >= nrows
error("A row gap index of $m is invalid for a table with $nrows rows. The maximum allowed is $(nrows - 1).")
end
end
end
function validate_colgaps(colgaps, ncols)
ncols == 1 && !isempty(colgaps) && error("No column gaps allowed for a table with one column.")
for (m, _) in colgaps
if m < 1
error("A column gap index of $m is invalid, must be at least 1.")
end
if m >= ncols
error("A column gap index of $m is invalid for a table with $ncols columns. The maximum allowed is $(ncols - 1).")
end
end
end | SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/html.jl | code | 9908 | Base.show(io::IO, ::MIME"juliavscode/html", ct::Table) = show(io, MIME"text/html"(), ct)
function Base.show(io::IO, ::MIME"text/html", ct::Table)
ct = postprocess(ct)
cells = sort(to_spanned_cells(ct.cells), by = x -> (x.span[1].start, x.span[2].start))
cells, annotations = resolve_annotations(cells)
matrix = create_cell_matrix(cells)
_io = IOBuffer()
# The final table has a hash-based class name so that several different renderings (maybe even across
# SummaryTables.jl versions) don't conflict and influence each other.
hash_placeholder = "<<HASH>>" # should not collide because it's not valid HTML and <> are not allowed otherwise
println(_io, "<table class=\"st-$(hash_placeholder)\">")
print(_io, """
<style>
.st-$(hash_placeholder) {
border: none;
margin: 0 auto;
padding: 0.25rem;
border-collapse: separate;
border-spacing: 0.85em 0.2em;
line-height: 1.2em;
}
.st-$(hash_placeholder) tr td {
vertical-align: top;
padding: 0;
border: none;
}
.st-$(hash_placeholder) br {
line-height: 0em;
margin: 0;
}
.st-$(hash_placeholder) sub {
line-height: 0;
}
.st-$(hash_placeholder) sup {
line-height: 0;
}
</style>
""")
# border-collapse requires a separate row/cell to insert a border, it can't be put on <tfoot>
println(_io, " <tr><td colspan=\"$(size(matrix, 2))\" style=\"border-bottom: 1.5px solid black; padding: 0\"></td></tr>")
validate_rowgaps(ct.rowgaps, size(matrix, 1))
validate_colgaps(ct.colgaps, size(matrix, 2))
rowgaps = Dict(ct.rowgaps)
colgaps = Dict(ct.colgaps)
running_index = 0
for row in 1:size(matrix, 1)
if row == ct.footer
print(_io, " <tfoot>\n")
# border-collapse requires a separate row/cell to insert a border, it can't be put on <tfoot>
print(_io, " <tr><td colspan=\"$(size(matrix, 2))\" style=\"border-bottom:1px solid black;padding:0\"></td></tr>")
end
print(_io, " <tr>\n")
for col in 1:size(matrix, 2)
index = matrix[row, col]
if index > running_index
print(_io, " ")
print_html_cell(_io, cells[index], rowgaps, colgaps)
running_index = index
print(_io, "\n")
elseif index == 0
print(_io, " ")
print_empty_html_cell(_io)
print(_io, "\n")
end
end
print(_io, " </tr>\n")
if row == ct.header
# border-collapse requires a separate row/cell to insert a border, it can't be put on <thead>
print(_io, " <tr><td colspan=\"$(size(matrix, 2))\" style=\"border-bottom:1px solid black;padding:0\"></td></tr>")
end
end
# border-collapse requires a separate row/cell to insert a border, it can't be put on <tfoot>
println(_io, " <tr><td colspan=\"$(size(matrix, 2))\" style=\"border-bottom: 1.5px solid black; padding: 0\"></td></tr>")
if !isempty(annotations) || !isempty(ct.footnotes)
print(_io, " <tr><td colspan=\"$(size(matrix, 2))\" style=\"font-size: 0.8em;\">")
for (i, (annotation, label)) in enumerate(annotations)
i > 1 && print(_io, " ")
if label !== NoLabel()
print(_io, "<sup>")
_showas(_io, MIME"text/html"(), label)
print(_io, "</sup> ")
end
_showas(_io, MIME"text/html"(), annotation)
end
for (i, footnote) in enumerate(ct.footnotes)
(!isempty(annotations) || i > 1) && print(_io, " ")
_showas(_io, MIME"text/html"(), footnote)
end
println(_io, "</td></tr>")
end
print(_io, "</table>")
s = String(take!(_io))
short_hash = first(bytes2hex(SHA.sha256(s)), 8)
s2 = replace(s, hash_placeholder => short_hash)
print(io, s2)
end
function _showas(io::IO, ::MIME"text/html", m::Multiline)
for (i, value) in enumerate(m.values)
i > 1 && print(io, "<br>")
_showas(io, MIME"text/html"(), value)
end
end
function _showas(io::IO, ::MIME"text/html", r::ResolvedAnnotation)
_showas(io, MIME"text/html"(), r.value)
if r.label !== NoLabel()
print(io, "<sup>")
_showas(io, MIME"text/html"(), r.label)
print(io, "</sup>")
end
end
function _showas(io::IO, ::MIME"text/html", s::Superscript)
print(io, "<sup>")
_showas(io, MIME"text/html"(), s.super)
print(io, "</sup>")
end
function _showas(io::IO, ::MIME"text/html", s::Subscript)
print(io, "<sub>")
_showas(io, MIME"text/html"(), s.sub)
print(io, "</sub>")
end
function print_html_cell(io, cell::SpannedCell, rowgaps, colgaps)
print(io, "<td")
nrows, ncols = map(length, cell.span)
if nrows > 1
print(io, " rowspan=\"$nrows\"")
end
if ncols > 1
print(io, " colspan=\"$ncols\"")
end
print(io, " style=\"")
if cell.style.bold
print(io, "font-weight:bold;")
end
if cell.style.italic
print(io, "font-style:italic;")
end
if cell.style.underline
print(io, "text-decoration:underline;")
end
padding_left = get(colgaps, cell.span[2].start-1, nothing)
if cell.style.indent_pt != 0 || padding_left !== nothing
pl = something(padding_left, 0.0) / 2 + cell.style.indent_pt
print(io, "padding-left:$(pl)pt;")
end
padding_right = get(colgaps, cell.span[2].stop, nothing)
if padding_right !== nothing
print(io, "padding-right:$(padding_right/2)pt;")
end
if cell.style.border_bottom
print(io, "border-bottom:1px solid black; ")
end
padding_bottom = get(rowgaps, cell.span[1].stop, nothing)
if padding_bottom !== nothing
print(io, "padding-bottom: $(padding_bottom/2)pt;")
elseif cell.style.border_bottom
print(io, "padding-bottom: 0.25em;") # needed to make border bottoms look less cramped
end
padding_top = get(rowgaps, cell.span[1].start-1, nothing)
if padding_top !== nothing
print(io, "padding-top: $(padding_top/2)pt;")
end
if cell.style.valign β (:top, :center, :bottom)
error("Invalid valign $(repr(cell.style.valign)). Options are :top, :center, :bottom.")
end
if cell.style.valign !== :top
v = cell.style.valign === :center ? "middle" : "bottom"
print(io, "vertical-align:$v;")
end
if cell.style.halign β (:left, :center, :right)
error("Invalid halign $(repr(cell.style.halign)). Options are :left, :center, :right.")
end
print(io, "text-align:$(cell.style.halign);")
print(io, "\">")
if cell.value !== nothing
_showas(io, MIME"text/html"(), cell.value)
end
print(io, "</td>")
return
end
function print_empty_html_cell(io)
print(io, "<td class=\"st-empty\"></td>")
end
function print_html_styles(io, table_styles)
println(io, "<style>")
for (key, dict) in _sorted_dict(table_styles)
println(io, key, " {")
for (subkey, value) in _sorted_dict(dict)
println(io, " ", subkey, ": ", value, ";")
end
println(io, "}")
end
println(io, "</style>")
end
function _sorted_dict(d)
ps = collect(pairs(d))
sort!(ps, by = first)
end
# Escaping functions, copied from PrettyTables, MIT licensed.
function _str_html_escaped(
io::IO,
s::AbstractString,
replace_newline::Bool = false,
escape_html_chars::Bool = true,
)
a = Iterators.Stateful(s)
for c in a
if isascii(c)
c == '\n' ? (replace_newline ? print(io, "<BR>") : print(io, "\\n")) :
c == '&' ? (escape_html_chars ? print(io, "&") : print(io, c)) :
c == '<' ? (escape_html_chars ? print(io, "<") : print(io, c)) :
c == '>' ? (escape_html_chars ? print(io, ">") : print(io, c)) :
c == '"' ? (escape_html_chars ? print(io, """) : print(io, c)) :
c == '\'' ? (escape_html_chars ? print(io, "'") : print(io, c)) :
c == '\0' ? print(io, escape_nul(peek(a))) :
c == '\e' ? print(io, "\\e") :
c == '\\' ? print(io, "\\\\") :
'\a' <= c <= '\r' ? print(io, '\\', "abtnvfr"[Int(c)-6]) :
# c == '%' ? print(io, "\\%") :
isprint(c) ? print(io, c) :
print(io, "\\x", string(UInt32(c), base = 16, pad = 2))
elseif !Base.isoverlong(c) && !Base.ismalformed(c)
isprint(c) ? print(io, c) :
c <= '\x7f' ? print(io, "\\x", string(UInt32(c), base = 16, pad = 2)) :
c <= '\uffff' ? print(io, "\\u", string(UInt32(c), base = 16, pad = Base.need_full_hex(peek(a)) ? 4 : 2)) :
print(io, "\\U", string(UInt32(c), base = 16, pad = Base.need_full_hex(peek(a)) ? 8 : 4))
else # malformed or overlong
u = bswap(reinterpret(UInt32, c))
while true
print(io, "\\x", string(u % UInt8, base = 16, pad = 2))
(u >>= 8) == 0 && break
end
end
end
end
function _str_html_escaped(
s::AbstractString,
replace_newline::Bool = false,
escape_html_chars::Bool = true
)
return sprint(
_str_html_escaped,
s,
replace_newline,
escape_html_chars;
sizehint = lastindex(s)
)
end
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/latex.jl | code | 8723 | function Base.show(io::IO, ::MIME"text/latex", ct::Table)
ct = postprocess(ct)
cells = sort(to_spanned_cells(ct.cells), by = x -> (x.span[1].start, x.span[2].start))
cells, annotations = resolve_annotations(cells)
matrix = create_cell_matrix(cells)
validate_rowgaps(ct.rowgaps, size(matrix, 1))
validate_colgaps(ct.colgaps, size(matrix, 2))
rowgaps = Dict(ct.rowgaps)
colgaps = Dict(ct.colgaps)
column_alignment_counts = StatsBase.countmap((cell.span[2], cell.style.halign) for cell in cells if cell.value !== nothing)
alignments = (:center, :left, :right)
column_alignments = map(1:size(matrix,2)) do i_col
i_max = argmax(get(column_alignment_counts, (i_col:i_col, al), 0) for al in alignments)
return alignments[i_max]
end
colspec = let
iob = IOBuffer()
for (icol, al) in enumerate(column_alignments)
char = al === :center ? 'c' :
al === :right ? 'r' :
al === :left ? 'l' : error("Invalid align $al")
print(iob, char)
if haskey(colgaps, icol)
print(iob, "@{\\hskip $(colgaps[icol])pt}")
end
end
String(take!(iob))
end
print(io, """
\\begin{table}[!ht]
\\setlength\\tabcolsep{0pt}
\\centering
\\begin{threeparttable}
\\begin{tabular}{@{\\extracolsep{2ex}}*{$(size(matrix, 2))}{$colspec}}
\\toprule
""")
running_index = 0
bottom_borders = Dict{Int, Vector{UnitRange}}()
for row in axes(matrix, 1)
for col in axes(matrix, 2)
index = matrix[row, col]
column_align = column_alignments[col]
if index == 0
col > 1 && print(io, " & ")
print_empty_latex_cell(io)
else
cell = cells[index]
if cell.style.border_bottom && col == cell.span[2].start
lastrow = cell.span[1].stop
ranges = get!(bottom_borders, lastrow) do
UnitRange[]
end
border_columns = cell.span[2]
push!(ranges, border_columns)
end
halign_char = cell.style.halign === :left ? 'l' :
cell.style.halign === :center ? 'c' :
cell.style.halign === :right ? 'r' :
error("Unknown halign $(cell.style.halign)")
valign_char = cell.style.valign === :top ? 't' :
cell.style.valign === :center ? 'c' :
cell.style.valign === :bottom ? 'b' :
error("Unknown valign $(cell.style.valign)")
nrow = length(cell.span[1])
ncol = length(cell.span[2])
use_multicolumn = ncol > 1 || cell.style.halign !== column_align
if index > running_index
# this is the top-left part of a new cell which can be a single or multicolumn/row cell
col > 1 && print(io, " & ")
if cell.value !== nothing
use_multicolumn && print(io, "\\multicolumn{$ncol}{$halign_char}{")
nrow > 1 && print(io, "\\multirow[$valign_char]{$nrow}{*}{")
print_latex_cell(io, cell)
nrow > 1 && print(io, "}")
use_multicolumn && print(io, "}")
end
running_index = index
elseif col == cell.span[2][begin]
# we need to print additional multicolumn statements in the second to last
# row of a multirow
col > 1 && print(io, " & ")
if ncol > 1
print(io, "\\multicolumn{$ncol}{$halign_char}{}")
end
end
end
end
print(io, " \\\\")
if haskey(rowgaps, row)
print(io, "[$(rowgaps[row])pt]")
end
println(io)
# draw any bottom borders that have been registered to be drawn below this row
if haskey(bottom_borders, row)
for range in bottom_borders[row]
print(io, "\\cmidrule{$(range.start)-$(range.stop)}")
end
print(io, "\n")
end
if row == ct.header
print(io, "\\midrule\n")
end
if row + 1 == ct.footer
print(io, "\\midrule\n")
end
end
print(io, "\\bottomrule\n")
print(io, raw"""
\end{tabular}
""")
if !isempty(annotations) || !isempty(ct.footnotes)
println(io, raw"\begin{tablenotes}[flushleft,para]")
println(io, raw"\footnotesize")
for (annotation, label) in annotations
if label !== NoLabel()
print(io, raw"\item[")
_showas(io, MIME"text/latex"(), label)
print(io, "]")
end
_showas(io, MIME"text/latex"(), annotation)
println(io)
end
for footnote in ct.footnotes
print(io, raw"\item[]")
_showas(io, MIME"text/latex"(), footnote)
println(io)
end
println(io, raw"\end{tablenotes}")
end
print(io, raw"""
\end{threeparttable}
\end{table}
""")
# after end{tabular}:
return
end
function get_class_styles(class, table_styles)
properties = Dict{Symbol, Any}()
if haskey(table_styles, class)
merge!(properties, table_styles[class])
end
return properties
end
print_empty_latex_cell(io) = nothing
function print_latex_cell(io, cell::SpannedCell)
cell.value === nothing && return
st = cell.style
st.indent_pt > 0 && print(io, "\\hspace{$(st.indent_pt)pt}")
st.bold && print(io, "\\textbf{")
st.italic && print(io, "\\textit{")
st.underline && print(io, "\\underline{")
_showas(io, MIME"text/latex"(), cell.value)
st.underline && print(io, "}")
st.italic && print(io, "}")
st.bold && print(io, "}")
return
end
function _showas(io::IO, ::MIME"text/latex", m::Multiline)
print(io, "\\begin{tabular}{@{}c@{}}")
for (i, value) in enumerate(m.values)
i > 1 && print(io, " \\\\ ")
_showas(io, MIME"text/latex"(), value)
end
print(io, "\\end{tabular}")
end
function _showas(io::IO, m::MIME"text/latex", s::Superscript)
print(io, "\\textsuperscript{")
_showas(io, m, s.super)
print(io, "}")
end
function _showas(io::IO, m::MIME"text/latex", s::Subscript)
print(io, "\\textsubscript{")
_showas(io, m, s.sub)
print(io, "}")
end
function _showas(io::IO, ::MIME"text/latex", r::ResolvedAnnotation)
_showas(io, MIME"text/latex"(), r.value)
if r.label !== NoLabel()
print(io, "\\tnote{")
_showas(io, MIME"text/latex"(), r.label)
print(io, "}")
end
end
function _str_latex_escaped(io::IO, s::AbstractString)
escapable_special_chars = raw"&%$#_{}"
a = Iterators.Stateful(s)
for c in a
if c in escapable_special_chars
print(io, '\\', c)
elseif c === '\\'
print(io, "\\textbackslash{}")
elseif c === '~'
print(io, "\\textasciitilde{}")
elseif c === '^'
print(io, "\\textasciicircum{}")
elseif isascii(c)
c == '\0' ? print(io, Base.escape_nul(peek(a))) :
c == '\e' ? print(io, "\\e") :
# c == '\\' ? print(io, "\\\\") :
'\a' <= c <= '\r' ? print(io, '\\', "abtnvfr"[Int(c)-6]) :
c == '%' ? print(io, "\\%") :
isprint(c) ? print(io, c) :
print(io, "\\x", string(UInt32(c), base = 16, pad = 2))
elseif !Base.isoverlong(c) && !Base.ismalformed(c)
isprint(c) ? print(io, c) :
c <= '\x7f' ? print(io, "\\x", string(UInt32(c), base = 16, pad = 2)) :
c <= '\uffff' ? print(io, "\\u", string(UInt32(c), base = 16, pad = Base.need_full_hex(peek(a)) ? 4 : 2)) :
print(io, "\\U", string(UInt32(c), base = 16, pad = Base.need_full_hex(peek(a)) ? 8 : 4))
else # malformed or overlong
u = bswap(reinterpret(UInt32, c))
while true
print(io, "\\x", string(u % UInt8, base = 16, pad = 2))
(u >>= 8) == 0 && break
end
end
end
end
function _str_latex_escaped(s::AbstractString)
return sprint(_str_latex_escaped, s, sizehint=lastindex(s))
end
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/table.jl | code | 35500 | """
Specifies one variable to group over and an associated name for display.
"""
struct Group
symbol::Symbol
name
end
Group(s::Symbol) = Group(s, string(s))
Group(p::Pair{Symbol, <:Any}) = Group(p[1], p[2])
make_groups(v::AbstractVector) = map(Group, v)
make_groups(x) = [Group(x)]
"""
Specifies one function to summarize the raw values of one group with,
and an associated name for display.
"""
struct SummaryAnalysis
func
name
end
SummaryAnalysis(p::Pair{<:Function, <:Any}) = SummaryAnalysis(p[1], p[2])
SummaryAnalysis(f::Function) = SummaryAnalysis(f, string(f))
"""
Stores the index of the grouping variable under which the summaries defined in
`analyses` should be run. An index of `0` means that one summary block is appended
after all columns or rows, an index of `1` means on summary block after each group
from the first grouping key of rows or columns, and so on.
"""
struct Summary
groupindex::Int
analyses::Vector{SummaryAnalysis}
end
function Summary(p::Pair{Symbol, <:Vector}, symbols)
sym = p[1]
summary_index = findfirst(==(sym), symbols)
if summary_index === nothing
error("Summary variable :$(sym) is not a grouping variable.")
end
Summary(summary_index, SummaryAnalysis.(p[2]))
end
function Summary(v::Vector, _)
summary_index = 0
Summary(summary_index, SummaryAnalysis.(v))
end
# The variable that is used to populate the raw-value cells.
struct Variable
symbol::Symbol
name
end
Variable(s::Symbol) = Variable(s, string(s))
Variable(p::Pair{Symbol, <:Any}) = Variable(p[1], p[2])
struct ListingTable
gdf::DataFrames.GroupedDataFrame
variable::Variable
row_keys::Vector{<:Tuple}
col_keys::Vector{<:Tuple}
rows::Vector{Group}
columns::Vector{Group}
rowsummary::Summary
gdf_rowsummary::DataFrames.GroupedDataFrame
colsummary::Summary
gdf_colsummary::DataFrames.GroupedDataFrame
end
struct Pagination{T<:NamedTuple}
options::T
end
Pagination(; kwargs...) = Pagination(NamedTuple(sort(collect(pairs(kwargs)), by = first)))
"""
Page{M}
Represents one page of a `PaginatedTable`.
It has two public fields:
- `table::Table`: A part of the full table, created according to the chosen `Pagination`.
- `metadata::M`: Information about which part of the full table this page contains. This is different for each
table function that takes a `Pagination` argument because each such function may use its own logic
for how to split pages.
"""
struct Page{M}
metadata::M
table::Table
end
function Base.show(io::IO, M::MIME"text/plain", p::Page)
indent = " " ^ get(io, :indent, 0)
i_page = get(io, :i_page, nothing)
print(io, indent, "Page")
i_page !== nothing && print(io, " $i_page")
println(io)
show(IOContext(io, :indent => get(io, :indent, 0) + 2), M, p.metadata)
end
"""
GroupKey
Holds the group column names and values for one group of a dataset.
This struct has only one field:
- `entries::Vector{Pair{Symbol,Any}}`: A vector of `column_name => group_value` pairs.
"""
struct GroupKey
entries::Vector{Pair{Symbol,Any}}
end
GroupKey(g::DataFrames.GroupKey) = GroupKey(collect(pairs(g)))
"""
ListingPageMetadata
Describes which row and column group sections of a full listing table
are included in a given page. There are two fields:
- `rows::Vector{GroupKey}`
- `cols::Vector{GroupKey}`
Each `Vector{GroupKey}` holds all group keys that were relevant for pagination
along that side of the listing table. A vector is empty if the table was not
paginated along that side.
"""
Base.@kwdef struct ListingPageMetadata
rows::Vector{GroupKey} = []
cols::Vector{GroupKey} = []
end
function Base.show(io::IO, M::MIME"text/plain", p::ListingPageMetadata)
indent = " " ^ get(io, :indent, 0)
println(io, indent, "ListingPageMetadata")
print(io, indent, " rows:")
isempty(p.rows) && print(io, " no pagination")
for r in p.rows
print(io, "\n ", indent,)
print(io, "[", join(("$key => $value" for (key, value) in r.entries), ", "), "]")
end
print(io, "\n", indent, " cols:")
isempty(p.cols) && print(io, " no pagination")
for c in p.cols
print(io, "\n ", indent)
print(io, "[", join(("$key => $value" for (key, value) in c.entries), ", "), "]")
end
end
"""
PaginatedTable{M}
The return type for all table functions that take a `Pagination` argument to split the table
into pages according to table-specific pagination rules.
This type only has one field:
- `pages::Vector{Page{M}}`: Each `Page` holds a table and metadata of type `M` which depends on the table function that creates the `PaginatedTable`.
To get the table of page 2, for a `PaginatedTable` stored in variable `p`, access `p.pages[2].table`.
"""
struct PaginatedTable{M}
pages::Vector{Page{M}}
end
function Base.show(io::IO, M::MIME"text/plain", p::PaginatedTable)
len = length(p.pages)
print(io, "PaginatedTable with $(len) page$(len == 1 ? "" : "s")")
for (i, page) in enumerate(p.pages)
print(io, "\n")
show(IOContext(io, :indent => 2, :i_page => i), M, page)
end
end
# a basic interactive display of the different pages in the PaginatedTable, which is much
# nicer than just having the textual overview that you get printed out in the REPL
function Base.show(io::IO, M::Union{MIME"text/html",MIME"juliavscode/html"}, p::PaginatedTable)
println(io, "<div>")
println(io, """
<script>
function showPaginatedPage(el, index){
const container = el.parentElement.querySelector('div');
for (var i = 0; i<container.children.length; i++){
container.children[i].style.display = i == index ? 'block' : 'none';
}
}
</script>
""")
for i in 1:length(p.pages)
println(io, """
<button onclick="showPaginatedPage(this, $(i-1))">
Page $i
</button>
""")
end
println(io, "<div>")
for (i, page) in enumerate(p.pages)
println(io, "<div style=\"display:$(i == 1 ? "block" : "none")\">")
println(io, "<h3>Page $i</h3>")
show(io, M, page.table)
println(io, "\n</div>")
end
println(io, "</div>")
println(io, "</div>")
return
end
"""
listingtable(table, variable, [pagination];
rows = [],
cols = [],
summarize_rows = [],
summarize_cols = [],
variable_header = true,
table_kwargs...
)
Create a listing table `Table` from `table` which displays raw values from column `variable`.
## Arguments
- `table`: Data source which must be convertible to a `DataFrames.DataFrame`.
- `variable`: Determines which variable's raw values are shown. Can either be a `Symbol` such as `:ColumnA`, or alternatively a `Pair` where the second element is the display name, such as `:ColumnA => "Column A"`.
- `pagination::Pagination`: If a pagination object is passed, the return type changes to `PaginatedTable`.
The `Pagination` object may be created with keywords `rows` and/or `cols`.
These must be set to `Int`s that determine how many group sections along each side are included in one page.
These group sections are determined by the summary structure, because pagination never splits a listing table
within rows or columns that are being summarized together.
If `summarize_rows` or `summarize_cols` is empty or unset, each group along that side is its own section.
If `summarize_rows` or `summarize_cols` has a group passed via the `column => ...` syntax, the group sections
along that side are determined by `column`. If no such `column` is passed (i.e., the summary
along that side applies to the all groups) there is only one section along that side, which means
that this side cannot be paginated into more than one page.
## Keyword arguments
- `rows = []`: Grouping structure along the rows. Should be a `Vector` where each element is a grouping variable, specified as a `Symbol` such as `:Column1`, or a `Pair`, where the first element is the symbol and the second a display name, such as `:Column1 => "Column 1"`. Specifying multiple grouping variables creates nested groups, with the last variable changing the fastest.
- `cols = []`: Grouping structure along the columns. Follows the same structure as `rows`.
- `summarize_rows = []`: Specifies functions to summarize `variable` with along the rows.
Should be a `Vector`, where each entry is one separate summary.
Each summary can be given as a `Function` such as `mean` or `maximum`, in which case the display name is the function's name.
Alternatively, a display name can be given using the pair syntax, such as `mean => "Average"`.
By default, one summary is computed over all groups.
You can also pass `Symbol => [...]` where `Symbol` is a grouping column, to compute one summary for each level of that group.
- `summarize_cols = []`: Specifies functions to summarize `variable` with along the columns. Follows the same structure as `summarize_rows`.
- `variable_header = true`: Controls if the cell with the name of the summarized `variable` is shown.
- `sort = true`: Sort the input table before grouping. Pre-sort as desired and set to `false` when you want to maintain a specific group order or are using non-sortable objects as group keys.
All other keywords are forwarded to the `Table` constructor, refer to its docstring for details.
## Example
```
using Statistics
tbl = [
:Apples => [1, 2, 3, 4, 5, 6, 7, 8],
:Batch => [1, 1, 1, 1, 2, 2, 2, 2],
:Checked => [true, false, true, false, true, false, true, false],
:Delivery => ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b'],
]
listingtable(
tbl,
:Apples => "Number of apples",
rows = [:Batch, :Checked => "Checked for spots"],
cols = [:Delivery],
summarize_cols = [sum => "overall"],
summarize_rows = :Batch => [mean => "average", sum]
)
```
"""
function listingtable(table, variable, pagination::Union{Nothing,Pagination} = nothing; rows = [],
cols = [],
summarize_rows = [],
summarize_cols = [],
variable_header = true,
sort = true,
table_kwargs...)
df = DataFrames.DataFrame(table)
var = Variable(variable)
rowgroups = make_groups(rows)
colgroups = make_groups(cols)
rowsymbols = [r.symbol for r in rowgroups]
rowsummary = Summary(summarize_rows, rowsymbols)
colsymbols = [c.symbol for c in colgroups]
colsummary = Summary(summarize_cols, colsymbols)
if pagination === nothing
return _listingtable(df, var, rowgroups, colgroups, rowsummary, colsummary; variable_header, sort, table_kwargs...)
else
sd = setdiff(keys(pagination.options), [:rows, :cols])
if !isempty(sd)
throw(ArgumentError("`listingtable` only accepts `rows` and `cols` as pagination arguments. Found $(join(sd, ", ", " and "))"))
end
paginate_cols = get(pagination.options, :cols, nothing)
paginate_rows = get(pagination.options, :rows, nothing)
paginated_colgroupers = colsymbols[1:(isempty(colsummary.analyses) ? end : colsummary.groupindex)]
paginated_rowgroupers = rowsymbols[1:(isempty(rowsummary.analyses) ? end : rowsummary.groupindex)]
pages = Page{ListingPageMetadata}[]
rowgrouped = DataFrames.groupby(df, paginated_rowgroupers; sort)
rowgroup_indices = 1:length(rowgrouped)
for r_indices in Iterators.partition(rowgroup_indices, something(paginate_rows, length(rowgroup_indices)))
colgrouped = DataFrames.groupby(DataFrame(rowgrouped[r_indices]), paginated_colgroupers; sort)
colgroup_indices = 1:length(colgrouped)
for c_indices in Iterators.partition(colgroup_indices, something(paginate_cols, length(colgroup_indices)))
t = _listingtable(DataFrame(colgrouped[c_indices]), var, rowgroups, colgroups, rowsummary, colsummary; variable_header, sort, table_kwargs...)
push!(pages, Page(
ListingPageMetadata(
cols = paginate_cols === nothing ? GroupKey[] : GroupKey.(keys(colgrouped)[c_indices]),
rows = paginate_rows === nothing ? GroupKey[] : GroupKey.(keys(rowgrouped)[r_indices]),
),
t,
))
end
end
return PaginatedTable(pages)
end
end
struct TooManyRowsError <: Exception
msg::String
end
Base.show(io::IO, t::TooManyRowsError) = print(io, "TooManyRowsError: ", t.msg)
struct SortingError <: Exception end
function Base.showerror(io::IO, ::SortingError)
print(io, """
Sorting the input dataframe for grouping failed.
This can happen when a column contains special objects intended for table formatting which are not sortable, for example `Concat`, `Multiline`, `Subscript` or `Superscript`.
Consider pre-sorting your dataframe and retrying with `sort = false`.
Note that group keys will appear in the order they are present in the dataframe, so usually you should sort in the same order that the groups are given to the table function.
""")
end
function _listingtable(
df::DataFrames.DataFrame,
variable::Variable,
rowgroups::Vector{Group},
colgroups::Vector{Group},
rowsummary::Summary,
colsummary::Summary;
variable_header::Bool,
sort::Bool,
celltable_kws...)
rowsymbols = [r.symbol for r in rowgroups]
colsymbols = [c.symbol for c in colgroups]
groups = vcat(rowsymbols, colsymbols)
# remove unneeded columns from the dataframe
used_columns = [variable.symbol; rowsymbols; colsymbols]
if sort && !isempty(groups)
try
df = Base.sort(df, groups, lt = natural_lt)
catch e
throw(SortingError())
end
end
gdf = DataFrames.groupby(df, groups, sort = false)
for group in gdf
if size(group, 1) > 1
nonuniform_columns = filter(names(df, DataFrames.Not(used_columns))) do name
length(Set((getproperty(group, name)))) > 1
end
throw(TooManyRowsError("""
Found a group which has more than one value. This is not allowed, only one value of "$(variable.symbol)" per table cell may exist.
$(repr(DataFrames.select(group, used_columns), context = :limit => true))
Filter your dataset or use additional row or column grouping factors.
$(!isempty(nonuniform_columns) ?
"The following columns in the dataset are not uniform in this group and could potentially be used: $nonuniform_columns." :
"There are no other non-uniform columns in this dataset.")
"""))
end
end
rowsummary_groups = vcat(rowsymbols[1:rowsummary.groupindex], colsymbols)
gdf_rowsummary = DataFrames.combine(
DataFrames.groupby(df, rowsummary_groups),
[variable.symbol => a.func => "____$i" for (i, a) in enumerate(rowsummary.analyses)]...,
ungroup = false
)
colsummary_groups = vcat(rowsymbols, colsymbols[1:colsummary.groupindex])
gdf_colsummary = DataFrames.combine(
DataFrames.groupby(df, colsummary_groups),
[variable.symbol => a.func => "____$i" for (i, a) in enumerate(colsummary.analyses)]...,
ungroup = false
)
gdf_rows = DataFrames.groupby(df, rowsymbols, sort = false)
row_keys = Tuple.(keys(gdf_rows))
gdf_cols = DataFrames.groupby(df, colsymbols, sort = false)
col_keys = Tuple.(keys(gdf_cols))
lt = ListingTable(
gdf,
variable,
row_keys,
col_keys,
rowgroups,
colgroups,
rowsummary,
gdf_rowsummary,
colsummary,
gdf_colsummary,
)
cl, i_header, rowgap_indices = get_cells(lt; variable_header)
Table(cl, i_header, nothing; rowgaps = rowgap_indices .=> DEFAULT_ROWGAP, celltable_kws...)
end
function get_cells(l::ListingTable; variable_header::Bool)
cells = SpannedCell[]
row_summaryindex = l.rowsummary.groupindex
col_summaryindex = l.colsummary.groupindex
rowparts = partition(l.row_keys, by = x -> x[1:row_summaryindex])
colparts = partition(l.col_keys, by = x -> x[1:col_summaryindex])
lengths_rowparts = map(length, rowparts)
cumsum_lengths_rowparts = cumsum(lengths_rowparts)
n_row_summaries = length(l.rowsummary.analyses)
lengths_colparts = map(length, colparts)
cumsum_lengths_colparts = cumsum(lengths_colparts)
n_col_summaries = length(l.colsummary.analyses)
n_rowgroups = length(l.rows)
n_colgroups = length(l.columns)
colheader_offset = 2 * n_colgroups + (variable_header ? 1 : 0)
rowheader_offset = n_rowgroups
rowgap_indices = Int[]
# group headers for row groups
for (i_rowgroup, rowgroup) in enumerate(l.rows)
cell = SpannedCell(colheader_offset, i_rowgroup, rowgroup.name, listingtable_row_header())
push!(cells, cell)
end
for (i_colpart, colpart) in enumerate(colparts)
coloffset = rowheader_offset +
(i_colpart == 1 ? 0 : cumsum_lengths_colparts[i_colpart-1]) +
(i_colpart-1) * n_col_summaries
colrange = coloffset .+ (1:length(colpart))
# variable headers on top of each column part
if variable_header
cell = SpannedCell(colheader_offset, colrange, l.variable.name, listingtable_variable_header())
push!(cells, cell)
end
values_spans = nested_run_length_encodings(colpart)
all_spanranges = [spanranges(spans) for (values, spans) in values_spans]
# column headers on top of each column part
for i_colgroupkey in 1:n_colgroups
headerspanranges = i_colgroupkey == 1 ? [1:length(colpart)] : all_spanranges[i_colgroupkey-1]
for headerspanrange in headerspanranges
header_offset_range = headerspanrange .+ coloffset
class = length(headerspanrange) > 1 ? listingtable_column_header_spanned() : listingtable_column_header()
cell = SpannedCell(i_colgroupkey * 2 - 1, header_offset_range, l.columns[i_colgroupkey].name, class)
push!(cells, cell)
end
values, _ = values_spans[i_colgroupkey]
ranges = all_spanranges[i_colgroupkey]
for (value, range) in zip(values, ranges)
label_offset_range = range .+ coloffset
cell = SpannedCell(i_colgroupkey * 2, label_offset_range, format_value(value), listingtable_column_header_key())
push!(cells, cell)
end
end
# column analysis headers after each column part
for (i_colsumm, summ_ana) in enumerate(l.colsummary.analyses)
summ_coloffset = coloffset + length(colpart)
push!(cells, SpannedCell(
colheader_offset,
summ_coloffset + i_colsumm,
summ_ana.name,
listingtable_column_analysis_header()
))
end
end
for (i_rowpart, rowpart) in enumerate(rowparts)
rowgroupoffset = i_rowpart == 1 ? 0 : cumsum_lengths_rowparts[i_rowpart-1]
rowsummoffset = (i_rowpart - 1) * n_row_summaries
rowoffset = rowgroupoffset + rowsummoffset + colheader_offset
all_rowspans = nested_run_length_encodings(rowpart)
# row groups to the left of each row part
for i_rowgroupkey in 1:n_rowgroups
values, spans = all_rowspans[i_rowgroupkey]
ranges = spanranges(spans)
for (value, range) in zip(values, ranges)
offset_range = range .+ rowoffset
cell = SpannedCell(offset_range, i_rowgroupkey, format_value(value), listingtable_row_key())
push!(cells, cell)
end
end
summ_rowoffset = rowoffset + length(rowpart)
if !isempty(l.rowsummary.analyses)
push!(rowgap_indices, summ_rowoffset)
if i_rowpart < length(rowparts)
push!(rowgap_indices, summ_rowoffset + length(l.rowsummary.analyses))
end
end
# row analysis headers below each row part
for (i_rowsumm, summ_ana) in enumerate(l.rowsummary.analyses)
push!(cells, SpannedCell(
summ_rowoffset + i_rowsumm,
n_rowgroups,
summ_ana.name,
listingtable_row_analysis_header()
))
end
# this loop goes over each block of rowparts x colparts
for (i_colpart, colpart) in enumerate(colparts)
colgroupoffset = i_colpart == 1 ? 0 : cumsum_lengths_colparts[i_colpart-1]
colsummoffset = (i_colpart - 1) * n_col_summaries
coloffset = colgroupoffset + colsummoffset + rowheader_offset
# populate raw value cells for the current block
for (i_row, rowkey) in enumerate(rowpart)
for (i_col, colkey) in enumerate(colpart)
fullkey = (rowkey..., colkey...)
data = get(l.gdf, fullkey, nothing)
if data === nothing
value = ""
else
value = only(getproperty(data, l.variable.symbol))
end
row = rowoffset + i_row
col = coloffset + i_col
cell = SpannedCell(row, col, format_value(value), listingtable_body())
push!(cells, cell)
end
end
# populate row analysis cells for the current block
for i_rowsumm in eachindex(l.rowsummary.analyses)
summ_rowoffset = rowoffset + length(rowpart)
for (i_col, colkey) in enumerate(colpart)
partial_rowkey = first(rowpart)[1:row_summaryindex]
summkey = (partial_rowkey..., colkey...)
datacol_index = length(summkey) + i_rowsumm
data = get(l.gdf_rowsummary, summkey, nothing)
if data === nothing
value = ""
else
value = only(data[!, datacol_index])
end
cell = SpannedCell(
summ_rowoffset + i_rowsumm,
coloffset + i_col,
format_value(value),
listingtable_row_analysis_body()
)
push!(cells, cell)
end
end
# populate column analysis cells for the current block
for i_colsumm in eachindex(l.colsummary.analyses)
summ_coloffset = coloffset + length(colpart)
for (i_row, rowkey) in enumerate(rowpart)
partial_colkey = first(colpart)[1:col_summaryindex]
summkey = (rowkey..., partial_colkey...)
datacol_index = length(summkey) + i_colsumm
data = get(l.gdf_colsummary, summkey, nothing)
if data === nothing
value = ""
else
value = only(data[!, datacol_index])
end
cell = SpannedCell(
rowoffset + i_row,
summ_coloffset + i_colsumm,
format_value(value),
listingtable_column_analysis_body()
)
push!(cells, cell)
end
end
end
end
cells, colheader_offset, rowgap_indices
end
listingtable_row_header() = CellStyle(halign = :left, bold = true)
listingtable_variable_header() = CellStyle(bold = true)
listingtable_row_key() = CellStyle(halign = :left)
listingtable_body() = CellStyle()
listingtable_column_header() = CellStyle(bold = true)
listingtable_column_header_spanned() = CellStyle(border_bottom = true, bold = true)
listingtable_column_header_key() = CellStyle()
listingtable_row_analysis_header() = CellStyle(halign = :left, bold = true)
listingtable_row_analysis_body() = CellStyle()
listingtable_column_analysis_header() = CellStyle(halign = :right, bold = true)
listingtable_column_analysis_body() = CellStyle(halign = :right)
function nested_run_length_encodings(gdf_keys)
n_entries = length(gdf_keys)
n_levels = length(first(gdf_keys))
spans = Tuple{Vector{Any},Vector{Int}}[]
for level in 1:n_levels
keys = Any[]
lengths = Int[]
prev_key = first(gdf_keys)[level]
current_length = 1
starts_of_previous_level = level == 1 ? Int[] : cumsum([1; spans[level-1][2][1:end-1]])
for (i, entrykeys) in zip(2:length(gdf_keys), gdf_keys[2:end])
key = entrykeys[level]
is_previous_level_start = i in starts_of_previous_level
if !is_previous_level_start && key == prev_key
current_length += 1
else
push!(lengths, current_length)
push!(keys, prev_key)
current_length = 1
end
prev_key = key
end
push!(lengths, current_length)
push!(keys, prev_key)
push!(spans, (keys, lengths))
end
return spans
end
function spanranges(spans)
start = 1
stop = 0
map(spans) do span
stop = start + span - 1
range = start:stop
start += span
return range
end
end
# split a collection into parts where each element in a part `isequal` for `by(element)`
function partition(collection; by)
parts = Vector{eltype(collection)}[]
part = eltype(collection)[]
for element in collection
if isempty(part)
push!(part, element)
else
if isequal(by(last(part)), by(element))
push!(part, element)
else
push!(parts, part)
part = eltype(collection)[element]
end
end
end
push!(parts, part)
parts
end
struct SummaryTable
gdf::DataFrames.GroupedDataFrame
variable::Variable
row_keys::Vector{<:Tuple}
col_keys::Vector{<:Tuple}
rows::Vector{Group}
columns::Vector{Group}
summary::Summary
gdf_summary::DataFrames.GroupedDataFrame
end
"""
summarytable(table, variable;
rows = [],
cols = [],
summary = [],
variable_header = true,
celltable_kws...
)
Create a summary table `Table` from `table`, which summarizes values from column `variable`.
## Arguments
- `table`: Data source which must be convertible to a `DataFrames.DataFrame`.
- `variable`: Determines which variable from `table` is summarized. Can either be a `Symbol` such as `:ColumnA`, or alternatively a `Pair` where the second element is the display name, such as `:ColumnA => "Column A"`.
## Keyword arguments
- `rows = []`: Grouping structure along the rows. Should be a `Vector` where each element is a grouping variable, specified as a `Symbol` such as `:Column1`, or a `Pair`, where the first element is the symbol and the second a display name, such as `:Column1 => "Column 1"`. Specifying multiple grouping variables creates nested groups, with the last variable changing the fastest.
- `cols = []`: Grouping structure along the columns. Follows the same structure as `rows`.
- `summary = []`: Specifies functions to summarize `variable` with.
Should be a `Vector`, where each entry is one separate summary.
Each summary can be given as a `Function` such as `mean` or `maximum`, in which case the display name is the function's name.
Alternatively, a display name can be given using the pair syntax, such as `mean => "Average"`.
By default, one summary is computed over all groups.
You can also pass `Symbol => [...]` where `Symbol` is a grouping column, to compute one summary for each level of that group.
- `variable_header = true`: Controls if the cell with the name of the summarized `variable` is shown.
- `sort = true`: Sort the input table before grouping. Pre-sort as desired and set to `false` when you want to maintain a specific group order or are using non-sortable objects as group keys.
All other keywords are forwarded to the `Table` constructor, refer to its docstring for details.
## Example
```
using Statistics
tbl = [
:Apples => [1, 2, 3, 4, 5, 6, 7, 8],
:Batch => [1, 1, 1, 1, 2, 2, 2, 2],
:Delivery => ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b'],
]
summarytable(
tbl,
:Apples => "Number of apples",
rows = [:Batch],
cols = [:Delivery],
summary = [length => "N", mean => "average", sum]
)
```
"""
function summarytable(
table, variable;
rows = [],
cols = [],
summary = [],
variable_header = true,
celltable_kws...
)
df = DataFrames.DataFrame(table)
var = Variable(variable)
rowgroups = make_groups(rows)
colgroups = make_groups(cols)
rowsymbols = [r.symbol for r in rowgroups]
_summary = Summary(summary, rowsymbols)
if isempty(_summary.analyses)
throw(ArgumentError("No summary analyses defined."))
end
_summarytable(df, var, rowgroups, colgroups, _summary; variable_header, celltable_kws...)
end
function _summarytable(
df::DataFrames.DataFrame,
variable::Variable,
rowgroups::Vector{Group},
colgroups::Vector{Group},
summary::Summary;
variable_header::Bool,
sort = true,
celltable_kws...)
rowsymbols = [r.symbol for r in rowgroups]
colsymbols = [c.symbol for c in colgroups]
groups = vcat(rowsymbols, colsymbols)
# remove unneeded columns from the dataframe
used_columns = [variable.symbol; rowsymbols; colsymbols]
_df = DataFrames.select(df, used_columns)
if !isempty(groups) && sort
try
Base.sort!(_df, groups, lt = natural_lt)
catch e
throw(SortingError())
end
end
gdf = DataFrames.groupby(_df, groups, sort = false)
gdf_summary = DataFrames.combine(
DataFrames.groupby(_df, groups),
[variable.symbol => a.func => "____$i" for (i, a) in enumerate(summary.analyses)]...,
ungroup = false
)
gdf_rows = DataFrames.groupby(_df, rowsymbols, sort = false)
row_keys = Tuple.(keys(gdf_rows))
gdf_cols = DataFrames.groupby(_df, colsymbols, sort = false)
col_keys = Tuple.(keys(gdf_cols))
st = SummaryTable(
gdf,
variable,
row_keys,
col_keys,
rowgroups,
colgroups,
summary,
gdf_summary,
)
cl, i_header = get_cells(st; variable_header)
Table(cl, i_header, nothing; celltable_kws...)
end
function get_cells(l::SummaryTable; variable_header::Bool)
cells = SpannedCell[]
n_row_summaries = length(l.summary.analyses)
n_rowgroups = length(l.rows)
n_colgroups = length(l.columns)
colheader_offset = if n_colgroups == 0 && n_rowgroups > 0
1
else
2 * n_colgroups + (variable_header ? 1 : 0)
end
rowheader_offset = n_rowgroups + 1
# group headers for row groups
for (i_rowgroup, rowgroup) in enumerate(l.rows)
cell = SpannedCell(colheader_offset, i_rowgroup, rowgroup.name, summarytable_row_header())
push!(cells, cell)
end
# variable headers on top of each column part
if variable_header
colrange = rowheader_offset .+ (1:length(l.col_keys))
cell = SpannedCell(colheader_offset, colrange, l.variable.name, summarytable_column_header())
push!(cells, cell)
end
values_spans_cols = nested_run_length_encodings(l.col_keys)
all_spanranges_cols = [spanranges(spans) for (values, spans) in values_spans_cols]
# column headers on top of each column part
for i_colgroupkey in 1:n_colgroups
headerspanranges = i_colgroupkey == 1 ? [1:length(l.col_keys)] : all_spanranges_cols[i_colgroupkey-1]
for headerspanrange in headerspanranges
header_offset_range = headerspanrange .+ rowheader_offset
class = length(headerspanrange) > 1 ? summarytable_column_header_spanned() : summarytable_column_header()
cell = SpannedCell(i_colgroupkey * 2 - 1, header_offset_range, l.columns[i_colgroupkey].name, class)
push!(cells, cell)
end
values, _ = values_spans_cols[i_colgroupkey]
ranges = all_spanranges_cols[i_colgroupkey]
for (value, range) in zip(values, ranges)
label_offset_range = range .+ rowheader_offset
cell = SpannedCell(i_colgroupkey * 2, label_offset_range, format_value(value), summarytable_body())
push!(cells, cell)
end
end
values_spans_rows = nested_run_length_encodings(l.row_keys)
all_spanranges_rows = [spanranges(spans) for (values, spans) in values_spans_rows]
for (i_rowkey, rowkey) in enumerate(l.row_keys)
rowgroupoffset = (i_rowkey - 1) * n_row_summaries
rowoffset = rowgroupoffset + colheader_offset
# row group keys to the left
for i_rowgroupkey in 1:n_rowgroups
# show key only once per span
spanranges = all_spanranges_rows[i_rowgroupkey]
ith_span = findfirst(spanrange -> first(spanrange) == i_rowkey, spanranges)
if ith_span === nothing
continue
end
spanrange = spanranges[ith_span]
range = 1:n_row_summaries * length(spanrange)
offset_range = range .+ rowoffset
key = rowkey[i_rowgroupkey]
cell = SpannedCell(offset_range, i_rowgroupkey, format_value(key), summarytable_row_key())
push!(cells, cell)
end
# row analysis headers to the right of each row key
for (i_rowsumm, summ_ana) in enumerate(l.summary.analyses)
summ_rowoffset = rowoffset + 1
push!(cells, SpannedCell(
summ_rowoffset + i_rowsumm - 1,
n_rowgroups + 1,
summ_ana.name,
summarytable_analysis_header()
))
end
# populate row analysis cells
for i_rowsumm in eachindex(l.summary.analyses)
summ_rowoffset = rowoffset
for (i_col, colkey) in enumerate(l.col_keys)
summkey = (rowkey..., colkey...)
datacol_index = length(summkey) + i_rowsumm
data = get(l.gdf_summary, summkey, nothing)
if data === nothing
value = ""
else
value = only(data[!, datacol_index])
end
cell = SpannedCell(
summ_rowoffset + i_rowsumm,
rowheader_offset + i_col,
format_value(value),
summarytable_body()
)
push!(cells, cell)
end
end
end
cells, colheader_offset
end
summarytable_column_header() = CellStyle(halign = :center, bold = true)
summarytable_column_header_spanned() = CellStyle(halign = :center, bold = true, border_bottom = true)
summarytable_analysis_header() = CellStyle(halign = :left, bold = true)
summarytable_body() = CellStyle()
summarytable_row_header() = CellStyle(halign = :left, bold = true)
summarytable_row_key() = CellStyle(halign = :left)
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/table_one.jl | code | 15530 | default_tests() = (
categorical = HypothesisTests.ChisqTest,
nonnormal = HypothesisTests.KruskalWallisTest,
minmax = HypothesisTests.UnequalVarianceTTest,
normal = HypothesisTests.UnequalVarianceTTest,
)
hformatter(num::Real) = num < 0.001 ? "<0.001" : string(round(num; digits = 3))
hformatter((a, b)::Tuple{<:Real,<:Real}; digits = 3) = "($(round(a; digits)), $(round(b; digits)))"
hformatter(::Tuple{Nothing,Nothing}) = ""
hformatter(::Vector) = "" # TODO
hformatter(other) = ""
## Categorical:
function htester(data::Matrix, test, combine)
data = identity.(data)
try
if size(data) == (2, 2)
a, c, b, d = data
test = HypothesisTests.FisherExactTest
return test, test(a, b, c, d)
else
return test, test(data)
end
catch _
return nothing, nothing
end
end
## Continuous:
function htester(data::Vector, test::Type{HypothesisTests.KruskalWallisTest}, combine)
try
return test, test(data...)
catch _
return nothing, nothing
end
end
function htester(data::Vector, test, combine)
try
if length(data) > 2
# test each unique pair of vectors from data
results = [test(a, b) for (i, a) in pairs(data) for b in data[i+1:end]]
pvalues = HypothesisTests.pvalue.(results)
return test, MultipleTesting.combine(pvalues, combine)
else
return test, test(data...)
end
catch _
return nothing, nothing
end
end
## P-Values:
get_pvalue(n::Real) = n
get_pvalue(::Nothing) = nothing
get_pvalue(result) = HypothesisTests.pvalue(result)
## CI:
function get_confint(result)
try
return HypothesisTests.confint(result)
catch _
return nothing, nothing
end
end
get_confint(::Real) = (nothing, nothing)
get_confint(::Nothing) = (nothing, nothing)
## Test name:
get_testname(test) = string(nameof(test))
get_testname(::Nothing) = ""
##
struct Analysis
variable::Symbol
func::Function
name
end
function Analysis(s::Symbol, df::DataFrames.DataFrame)
Analysis(s, default_analysis(df[!, s]), string(s))
end
function Analysis(p::Pair{Symbol, <:Any}, df::DataFrames.DataFrame)
sym, rest = p
Analysis(sym, rest, df)
end
function Analysis(sym::Symbol, name, df::DataFrames.DataFrame)
Analysis(sym, default_analysis(df[!, sym]), name)
end
function Analysis(sym::Symbol, funcvec::AbstractVector, df::DataFrames.DataFrame)
Analysis(sym, to_func(funcvec), df)
end
function Analysis(sym::Symbol, f::Function, df::DataFrames.DataFrame)
Analysis(sym, f, string(sym))
end
function Analysis(s::Symbol, f::Function, name, df::DataFrames.DataFrame)
Analysis(s, f, name)
end
function Analysis(sym::Symbol, p::Pair, df::DataFrames.DataFrame)
funcs, name = p
Analysis(sym, funcs, name, df)
end
make_analyses(v::AbstractVector, df::DataFrame) = map(x -> Analysis(x, df), v)
make_analyses(x, df::DataFrame) = [Analysis(x, df)]
to_func(f::Function) = f
function to_func(v::AbstractVector)
return function(col)
result_name_pairs = map(v) do el
f, name = func_and_name(el)
f(col) => name
end
Tuple(result_name_pairs)
end
end
func_and_name(p::Pair{<:Function, <:Any}) = p
func_and_name(f::Function) = f => string(f)
not_computable_annotation() = Annotated("NC", "NC - Not computable", label = nothing)
function guard_statistic(stat)
function (vec)
sm = skipmissing(vec)
if isempty(sm)
missing
else
stat(sm)
end
end
end
function default_analysis(v::AbstractVector{<:Union{Missing, <:Real}})
anymissing = any(ismissing, v)
function (col)
allmissing = isempty(skipmissing(col))
_mean = guard_statistic(mean)(col)
_sd = guard_statistic(std)(col)
mean_sd = if allmissing
not_computable_annotation()
else
Concat(_mean, " (", _sd, ")")
end
_median = guard_statistic(median)(col)
_min = guard_statistic(minimum)(col)
_max = guard_statistic(maximum)(col)
med_min_max = if allmissing
not_computable_annotation()
else
Concat(_median, " [", _min, ", ", _max, "]")
end
if anymissing
nm = count(ismissing, col)
_mis = Concat(nm, " (", nm / length(col) * 100, "%)")
end
(
mean_sd => "Mean (SD)",
med_min_max => "Median [Min, Max]",
(anymissing ? (_mis => "Missing",) : ())...
)
end
end
default_analysis(c::CategoricalArray) = level_analyses(c)
default_analysis(v::AbstractVector{<:Union{Missing, Bool}}) = level_analyses(v)
# by default we just count levels for all datatypes that are not known
default_analysis(v) = level_analyses(v)
function level_analyses(c)
has_missing = any(ismissing, c) # if there's any missing, we report them for every col in c
function (col)
_levels = levels(c) # levels are computed for the whole column, not per group, so they are always exhaustive
lvls = tuple(_levels...)
cm = StatsBase.countmap(col)
n = length(col)
_entry(n_lvl) = Concat(n_lvl, " (", n_lvl / n * 100, "%)")
entries = map(lvls) do lvl
n_lvl = get(cm, lvl, 0)
s = _entry(n_lvl)
s => lvl
end
if has_missing
n_missing = count(ismissing, col)
entries = (entries..., _entry(n_missing) => "Missing")
end
return entries
end
end
"""
table_one(table, analyses; keywords...)
Construct a "Table 1" which summarises the patient baseline
characteristics from the provided `table` dataset. This table is commonly used
in biomedical research papers.
It can handle both continuous and categorical columns in `table` and summary
statistics and hypothesis testing are able to be customised by the user. Tables
can be stratified by one, or more, variables using the `groupby` keyword.
## Keywords
- `groupby`: Which columns to stratify the dataset with, as a `Vector{Symbol}`.
- `nonnormal`: A vector of column names where hypothesis tests for the `:nonnormal` type are chosen.
- `minmax`: A vector of column names where hypothesis tests for the `:minmax` type are chosen.
- `tests`: a `NamedTuple` of hypothesis test types to use for `categorical`, `nonnormal`, `minmax`, and `normal` variables.
- `combine`: an object from `MultipleTesting` to use when combining p-values.
- `show_overall`: display the "Overall" column summary. Default is `true`.
- `show_n`: Display the number of rows for each group key next to its label.
- `show_pvalues`: display the `P-Value` column. Default is `false`.
- `show_testnames`: display the `Test` column. Default is `false`.
- `show_confints`: display the `CI` column. Default is `false`.
- `sort`: Sort the input table before grouping. Default is `true`. Pre-sort as desired and set to `false` when you want to maintain a specific group order or are using non-sortable objects as group keys.
All other keywords are forwarded to the `Table` constructor, refer to its docstring for details.
"""
function table_one(
table,
analyses;
groupby = [],
show_overall = true,
show_pvalues = false,
show_tests = true,
show_confints = false,
show_n = false,
compare_groups::Vector = [],
nonnormal = [],
minmax = [],
tests = default_tests(),
combine = MultipleTesting.Fisher(),
sort = true,
celltable_kws...
)
df = DataFrames.DataFrame(table)
groups = make_groups(groupby)
n_groups = length(groups)
show_overall || n_groups > 0 || error("Overall can't be false if there are no groups.")
_analyses = make_analyses(analyses, df)
typedict = Dict(map(_analyses) do analysis
type = if getproperty(df, analysis.variable) isa CategoricalVector
:categorical
elseif analysis.variable in nonnormal
:nonnormal
elseif analysis.variable in minmax
:minmax
else
:normal
end
analysis.variable => type
end)
cells = SpannedCell[]
groupsymbols = [g.symbol for g in groups]
if sort && !isempty(groupsymbols)
try
Base.sort!(df, groupsymbols, lt = natural_lt)
catch e
throw(SortingError())
end
end
gdf = DataFrames.groupby(df, groupsymbols, sort = false)
calculate_comparisons = length(gdf) >= 2 && show_pvalues
if calculate_comparisons
compare_groups = [make_testfunction(show_pvalues, show_tests, show_confints, typedict, merge(default_tests(), tests), combine); compare_groups]
end
rows_per_group = [nrow(g) for g in gdf]
funcvector = [a.variable => a.func for a in _analyses]
df_analyses = DataFrames.combine(gdf, funcvector; ungroup = false)
if show_overall
df_overall = DataFrames.combine(df, funcvector)
end
analysis_labels = map(n_groups+1:n_groups+length(_analyses)) do i_col
col = df_analyses[1][!, i_col]
x = only(col)
if x isa Tuple
map(last, x)
else
error("Expected a tuple")
end
end
n_values_per_analysis = map(length, analysis_labels)
analysis_offsets = [0; cumsum(1 .+ n_values_per_analysis)[1:end-1]]
header_offset = n_groups == 0 ? 2 : n_groups * 2 + 1
values_spans = nested_run_length_encodings(keys(gdf))
all_spanranges = [spanranges(spans) for (values, spans) in values_spans]
n_groupcols = n_groups == 0 ? 0 : maximum(x -> x.stop, all_spanranges[1])
if show_overall
coloffset = 1
title = if show_n
Multiline("Overall", "(n=$(nrow(df)))")
else
"Overall"
end
cell = SpannedCell(n_groups == 0 ? 1 : 2 * n_groups, 2, title, tableone_column_header())
push!(cells, cell)
else
coloffset = 0
end
# add column headers for groups
for i_groupkey in 1:n_groups
headerspanranges = i_groupkey == 1 ? [1:length(keys(gdf))] : all_spanranges[i_groupkey-1]
for headerspanrange in headerspanranges
header_offset_range = headerspanrange .+ 1 .+ coloffset
cell = SpannedCell(i_groupkey * 2 - 1, header_offset_range, groups[i_groupkey].name, tableone_column_header_spanned())
push!(cells, cell)
end
values, _ = values_spans[i_groupkey]
ranges = all_spanranges[i_groupkey]
for (value, range) in zip(values, ranges)
label_offset_range = range .+ 1 .+ coloffset
n_entries = sum(rows_per_group[range])
label = if show_n
Multiline(format_value(value), "(n=$n_entries)")
else
format_value(value)
end
cell = SpannedCell(i_groupkey * 2, label_offset_range, label, tableone_column_header_key())
push!(cells, cell)
end
end
for (i_ana, analysis) in enumerate(_analyses)
offset = header_offset + analysis_offsets[i_ana]
cell = SpannedCell(offset, 1, analysis.name, tableone_variable_header())
push!(cells, cell)
for (i_func, funcname) in enumerate(analysis_labels[i_ana])
cell = SpannedCell(offset + i_func, 1, funcname, tableone_analysis_name())
push!(cells, cell)
end
if show_overall
val = only(df_overall[!, i_ana])
for i_func in 1:n_values_per_analysis[i_ana]
cell = SpannedCell(offset + i_func, 2, val[i_func][1], tableone_body())
push!(cells, cell)
end
end
if n_groups > 0
for (i_group, ggdf) in enumerate(df_analyses)
val = only(ggdf[!, n_groups + i_ana])
for i_func in 1:n_values_per_analysis[i_ana]
cell = SpannedCell(offset + i_func, coloffset + 1 + i_group, val[i_func][1], tableone_body())
push!(cells, cell)
end
end
end
end
compcolumn_offset = n_groupcols + (show_overall ? 1 : 0) + 1
for comp in compare_groups
# the logic here is much less clean than it could be because of the way
# column names have to be passed via pairs, and it cannot be guaranteed from typing
# that all are compatible, so it has to be runtime checked
values = map(_analyses) do analysis
val = comp(analysis.variable, [getproperty(g, analysis.variable) for g in gdf])
@assert val isa Tuple && all(x -> x isa Pair, val) "A comparison function has to return a tuple of value => name pairs. Function $comp returned $val"
val
end
nvalues = length(first(values))
@assert all(==(nvalues), map(length, values)) "All comparison tuples must have the same length. Found\n$values"
colnames = [map(last, v) for v in values]
unique_colnames = unique(colnames)
@assert length(unique_colnames) == 1 "All column names must be the same, found $colnames"
# set column headers
for (ival, name) in enumerate(only(unique_colnames))
cell = SpannedCell(header_offset-1, compcolumn_offset+ival, name, tableone_column_header())
push!(cells, cell)
end
for (j, val) in enumerate(values)
# set column values
for (ival, (value, _)) in enumerate(val)
cell = SpannedCell(analysis_offsets[j] + header_offset, compcolumn_offset+ival, value, tableone_body())
push!(cells, cell)
end
end
compcolumn_offset += nvalues
end
Table(cells, header_offset-1, nothing; celltable_kws...)
end
tableone_column_header() = CellStyle(halign = :center, bold = true)
tableone_column_header_spanned() = CellStyle(halign = :center, bold = true, border_bottom = true)
tableone_column_header_key() = CellStyle(halign = :center)
tableone_variable_header() = CellStyle(bold = true, halign = :left)
tableone_body() = CellStyle()
tableone_analysis_name() = CellStyle(indent_pt = 12, halign = :left)
formatted(f::Function, s::String) = formatted((f), s)
function formatted(fs::Tuple, s::String)
function (col)
values = map(fs) do f
f(col)
end
Printf.format(Printf.Format(s), values...)
end
end
function make_testfunction(show_pvalues::Bool, show_tests::Bool, show_confint::Bool, typedict, testdict, combine)
function testfunction(variable, cols)
cols_nomissing = map(collect β skipmissing, cols)
variabletype = typedict[variable]
test = testdict[variabletype]
if variabletype === :categorical
# concatenate the level counts into a matrix which Chi Square Test needs
matrix = hcat([map(l -> count(==(l), col), levels(col)) for col in cols_nomissing]...)
used_test, result = htester(matrix, test, combine)
else
used_test, result = htester(cols_nomissing, test, combine)
end
testname = get_testname(used_test)
pvalue = hformatter(get_pvalue(result))
confint = hformatter(get_confint(result))
(
(show_pvalues ? (pvalue => "P-Value",) : ())...,
(show_tests ? (testname => "Test",) : ())...,
(show_confint ? (confint => "CI",) : ())...,
)
end
end
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | src/typst.jl | code | 4592 | function Base.show(io::IO, M::MIME"text/typst", ct::Table)
ct = postprocess(ct)
cells = sort(to_spanned_cells(ct.cells), by = x -> (x.span[1].start, x.span[2].start))
cells, annotations = resolve_annotations(cells)
matrix = create_cell_matrix(cells)
validate_rowgaps(ct.rowgaps, size(matrix, 1))
validate_colgaps(ct.colgaps, size(matrix, 2))
rowgaps = Dict(ct.rowgaps)
colgaps = Dict(ct.colgaps)
print(io, """
#[
#import "@preview/tablex:0.0.8": tablex, cellx, hlinex
#tablex(
columns: $(size(matrix, 2)),
auto-vlines: false,
auto-hlines: false,
column-gutter: 0.25em,
""")
println(io, " hlinex(y: 0, stroke: 1pt),")
running_index = 0
for row in 1:size(matrix, 1)
if row == ct.footer
println(io, " hlinex(y: $(row-1), stroke: 0.75pt),")
end
for col in 1:size(matrix, 2)
index = matrix[row, col]
if index > running_index
cell = cells[index]
if cell.value !== nothing
print(io, " cellx(colspan: $(length(cell.span[2])), x: $(col-1), y: $(row-1), align: $(typst_align(cell.style)))[")
cell.style.bold && print(io, "*")
cell.style.italic && print(io, "_")
cell.style.underline && print(io, "#underline[")
cell.style.indent_pt > 0 && print(io, "#h($(cell.style.indent_pt)pt)")
_showas(io, M, cell.value)
cell.style.underline && print(io, "]")
cell.style.italic && print(io, "_")
cell.style.bold && print(io, "*")
print(io, "],\n")
end
if cell.style.border_bottom
println(io, " hlinex(y: $(row), start: $(cell.span[2].start-1), end: $(cell.span[2].stop), stroke: 0.75pt),")
end
running_index = index
end
end
if row == ct.header
println(io, " hlinex(y: $(row), stroke: 0.75pt),")
end
end
println(io, " hlinex(y: $(size(matrix, 1)), stroke: 1pt),")
if !isempty(annotations) || !isempty(ct.footnotes)
print(io, " cellx(colspan: $(size(matrix, 2)), x: 0, y: $(size(matrix, 1)))[")
for (i, (annotation, label)) in enumerate(annotations)
i > 1 && print(io, "#h(1.5em, weak: true)")
if label !== NoLabel()
print(io, "#super[")
_showas(io, MIME"text/typst"(), label)
print(io, "]")
end
print(io, "#text(size: 0.8em)[")
_showas(io, MIME"text/typst"(), annotation)
print(io, "]")
end
for (i, footnote) in enumerate(ct.footnotes)
(!isempty(annotations) || i > 1) && print(io, "#h(1.5em, weak: true)")
_showas(io, MIME"text/typst"(), footnote)
end
print(io, "],") # cellx()[
end
println(io, "\n)") # tablex(
println(io, "]") # #[
return
end
function _showas(io::IO, M::MIME"text/typst", m::Multiline)
for (i, v) in enumerate(m.values)
i > 1 && print(io, " #linebreak() ")
_showas(io, M, v)
end
end
function typst_align(s::CellStyle)
string(
s.halign === :left ? "left" : s.halign === :right ? "right" : s.halign === :center ? "center" : error("Invalid halign $(s.halign)"),
" + ",
s.valign === :top ? "top" : s.valign === :bottom ? "bottom" : s.valign === :center ? "horizon" : error("Invalid valign $(s.valign)"),
)
end
function _showas(io::IO, ::MIME"text/typst", r::ResolvedAnnotation)
_showas(io, MIME"text/typst"(), r.value)
if r.label !== NoLabel()
print(io, "#super[")
_showas(io, MIME"text/typst"(), r.label)
print(io, "]")
end
end
function _showas(io::IO, ::MIME"text/typst", s::Superscript)
print(io, "#super[")
_showas(io, MIME"text/typst"(), s.super)
print(io, "]")
end
function _showas(io::IO, ::MIME"text/typst", s::Subscript)
print(io, "#sub[")
_showas(io, MIME"text/typst"(), s.sub)
print(io, "]")
end
function _str_typst_escaped(io::IO, s::AbstractString)
escapable_special_chars = raw"\$#*_"
a = Iterators.Stateful(s)
for c in a
if c in escapable_special_chars
print(io, '\\', c)
else
print(io, c)
end
end
end
function _str_typst_escaped(s::AbstractString)
return sprint(_str_typst_escaped, s, sizehint=lastindex(s))
end
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | test/runtests.jl | code | 27410 | using SummaryTables
using SummaryTables: Table, SpannedCell, to_docx, CellStyle
using SummaryTables: WriteDocx
using SummaryTables: SortingError
const W = WriteDocx
using Test
using DataFrames
using Statistics
using ReferenceTests
using tectonic_jll
using Typst_jll
# Wrapper type to dispatch to the right `show` implementations.
struct AsMIME{M}
object
end
Base.show(io::IO, m::AsMIME{M}) where M = show(io, M(), m.object)
function Base.show(io::IO, m::AsMIME{M}) where M <: MIME"text/latex"
print(io, raw"""
\documentclass{article}
\usepackage{threeparttable}
\usepackage{multirow}
\usepackage{booktabs}
\begin{document}
"""
)
show(io, M(), m.object)
print(io, raw"\end{document}")
end
as_html(object) = AsMIME{MIME"text/html"}(object)
as_latex(object) = AsMIME{MIME"text/latex"}(object)
as_docx(object) = nothing
as_typst(object) = AsMIME{MIME"text/typst"}(object)
function run_reftest(table, path, func)
path_full = joinpath(@__DIR__, path * extension(func))
if func === as_docx
# TODO: Real reference tests once WriteDocx has stabilized more
@test_nowarn mktempdir() do dir
tablenode = to_docx(table)
doc = W.Document(
W.Body([
W.Section([tablenode])
]),
W.Styles([])
)
W.save(joinpath(dir, "test.docx"), doc)
end
else
@test_reference path_full func(table)
if func === as_latex
latex_render_test(path_full)
end
if func === as_typst
typst_render_test(path_full)
end
end
end
function latex_render_test(filepath)
mktempdir() do path
texpath = joinpath(path, "input.tex")
pdfpath = joinpath(path, "input.pdf")
cp(filepath, texpath)
tectonic_jll.tectonic() do bin
run(`$bin $texpath`)
end
@test isfile(pdfpath)
end
end
function typst_render_test(filepath)
mktempdir() do path
typ_path = joinpath(path, "input.typ")
pdfpath = joinpath(path, "input.pdf")
cp(filepath, typ_path)
Typst_jll.typst() do bin
run(`$bin compile $typ_path`)
end
@test isfile(pdfpath)
end
end
extension(f::typeof(as_html)) = ".txt"
extension(f::typeof(as_latex)) = ".latex.txt"
extension(f::typeof(as_docx)) = ".docx"
extension(f::typeof(as_typst)) = ".typ.txt"
# This can be removed for `@test_throws` once CI only uses Julia 1.8 and up
macro test_throws_message(message::String, exp)
quote
threw_exception = false
try
$(esc(exp))
catch e
threw_exception = true
@test occursin($message, e.msg) # Currently only works for ErrorException
end
@test threw_exception
end
end
@testset "SummaryTables" begin
df = DataFrame(
value1 = 1:8,
value2 = ["a", "b", "c", "a", "b", "c", "a", "b"],
group1 = repeat(["a", "b"], inner = 4),
group3 = repeat(repeat(["c", "d"], inner = 2), 2),
group2 = repeat(["e", "f"], 4),
)
df2 = DataFrame(
dose = repeat(["1 mg", "50 mg", "5 mg", "10 mg"], 3),
id = repeat(["5", "50", "8", "10", "1", "80"], inner = 2),
value = [1, 2, 3, 4, 2, 3, 4, 5, 5, 2, 1, 4],
)
unsortable_df = let
parameters = repeat([
Concat("T", Subscript("max")),
Concat("C", Superscript("max")),
Multiline("One Line", "Another Line")
], inner = 4)
_df = DataFrame(;
parameters,
value = eachindex(parameters),
group = repeat(1:4, 3),
group2 = repeat(1:2, 6),
)
sort!(_df, [:group2, :group])
end
@testset for func in [as_html, as_latex, as_docx, as_typst]
reftest(t, path) = @testset "$path" run_reftest(t, path, func)
@testset "table_one" begin
@test_throws MethodError table_one(df)
t = table_one(df, [:value1])
reftest(t, "references/table_one/one_row")
t = table_one(df, [:value1 => "Value 1"])
reftest(t, "references/table_one/one_row_renamed")
t = table_one(df, [:value1, :value2])
reftest(t, "references/table_one/two_rows")
t = table_one(df, [:value1, :value2], groupby = [:group1])
reftest(t, "references/table_one/two_rows_one_group")
t = table_one(df, [:value1, :value2], groupby = [:group1, :group2])
reftest(t, "references/table_one/two_rows_two_groups")
t = table_one(df, [:value1], groupby = [:group1, :group2], show_pvalues = true)
reftest(t, "references/table_one/one_row_two_groups_pvalues")
t = table_one(df, [:value1], groupby = [:group1], show_pvalues = true, show_tests = true, show_confints = true)
reftest(t, "references/table_one/one_row_one_group_pvalues_tests_confints")
function summarizer(col)
m = mean(col)
s = std(col)
(m => "Mean", s => "SD")
end
t = table_one(df, [:value1 => [mean, std => "SD"], :value1 => summarizer])
reftest(t, "references/table_one/vector_and_function_arguments")
t = table_one(df2, :value, groupby = :dose)
reftest(t, "references/table_one/natural_sort_order")
@test_throws SortingError t = table_one(unsortable_df, [:value], groupby = :parameters)
t = table_one(unsortable_df, [:value], groupby = :parameters, sort = false)
reftest(t, "references/table_one/sort_false")
t = table_one(
(;
empty = Union{Float64,Missing}[missing, missing, missing, 1, 2, 3],
group = [1, 1, 1, 2, 2, 2]
),
[:empty],
groupby = :group
)
reftest(t, "references/table_one/all_missing_group")
data = (; x = [1, 2, 3, 4, 5, 6], y = ["A", "A", "B", "B", "B", "A"], z = ["C", "C", "C", "D", "D", "D"])
t = table_one(data, :x, groupby = [:y, :z], sort = false)
reftest(t, "references/table_one/nested_spans_bad_sort")
data = (;
category = ["a", "b", "c", "b", missing, "b", "c", "c"],
group = [1, 1, 1, 1, 2, 2, 2, 2]
)
t = table_one(data, [:category], groupby = :group)
reftest(t, "references/table_one/category_with_missing")
end
@testset "listingtable" begin
@test_throws MethodError listingtable(df)
@test_throws SummaryTables.TooManyRowsError listingtable(df, :value1)
@test_throws SummaryTables.TooManyRowsError listingtable(df, :value2)
@test_throws SummaryTables.TooManyRowsError listingtable(df, :value1, rows = [:group1])
t = listingtable(df, :value1, rows = [:group1, :group2, :group3])
reftest(t, "references/listingtable/rows_only")
t = listingtable(df, :value1, cols = [:group1, :group2, :group3])
reftest(t, "references/listingtable/cols_only")
t = listingtable(df, :value1, rows = [:group1, :group2], cols = [:group3])
reftest(t, "references/listingtable/two_rows_one_col")
t = listingtable(df, :value1, rows = [:group1], cols = [:group2, :group3])
reftest(t, "references/listingtable/one_row_two_cols")
t = listingtable(df, :value1,
rows = [:group1, :group2],
cols = [:group3],
summarize_rows = [mean]
)
reftest(t, "references/listingtable/summarize_end_rows")
t = listingtable(df, :value1,
rows = [:group1, :group2],
cols = [:group3],
summarize_rows = [mean, std]
)
reftest(t, "references/listingtable/summarize_end_rows_two_funcs")
t = listingtable(df, :value1,
rows = [:group1, :group2],
cols = [:group3],
summarize_rows = :group2 => [mean]
)
reftest(t, "references/listingtable/summarize_last_group_rows")
t = listingtable(df, :value1,
rows = [:group1, :group2],
cols = [:group3],
summarize_rows = :group1 => [mean]
)
reftest(t, "references/listingtable/summarize_first_group_rows")
t = listingtable(df, :value1,
cols = [:group1, :group2],
rows = [:group3],
summarize_cols = [mean, std]
)
reftest(t, "references/listingtable/summarize_end_cols_two_funcs")
t = listingtable(df, :value1,
cols = [:group1, :group2],
rows = [:group3],
summarize_cols = :group2 => [mean]
)
reftest(t, "references/listingtable/summarize_last_group_cols")
t = listingtable(df, :value1,
cols = [:group1, :group2],
rows = [:group3],
summarize_cols = :group1 => [mean]
)
reftest(t, "references/listingtable/summarize_first_group_cols")
t = listingtable(df, :value1 => "Value 1",
rows = [:group1 => "Group 1", :group2 => "Group 2"],
cols = [:group3 => "Group 3"],
summarize_rows = [mean => "Mean", minimum => "Minimum"]
)
reftest(t, "references/listingtable/renaming")
t = listingtable(df, :value1 => "Value 1",
rows = [:group1],
cols = [:group2, :group3],
variable_header = false,
)
reftest(t, "references/listingtable/no_variable_header")
t = listingtable(df2, :value, rows = [:id, :dose])
reftest(t, "references/listingtable/natural_sort_order")
t = listingtable(df2, :value, rows = [:id, :dose], summarize_rows = [mean, mean], summarize_cols = [mean, mean])
reftest(t, "references/listingtable/two_same_summarizers")
@test_throws SortingError t = listingtable(unsortable_df, :value, rows = :parameters, cols = [:group2, :group])
t = listingtable(unsortable_df, :value, cols = :parameters, rows = [:group2, :group], sort = false)
reftest(t, "references/listingtable/sort_false")
pt = listingtable(df, :value1, Pagination(rows = 1);
rows = [:group1, :group2], cols = :group3)
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_rows=1_$i")
end
pt = listingtable(df, :value1, Pagination(rows = 2);
rows = [:group1, :group2], cols = :group3)
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_rows=2_$i")
end
pt = listingtable(df, :value1, Pagination(cols = 1);
cols = [:group1, :group2], rows = :group3)
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_cols=1_$i")
end
pt = listingtable(df, :value1, Pagination(cols = 2);
cols = [:group1, :group2], rows = :group3)
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_cols=2_$i")
end
pt = listingtable(df, :value1, Pagination(rows = 1, cols = 2);
cols = [:group1, :group2], rows = :group3)
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_rows=1_cols=2_$i")
end
if func === as_html
reftest(pt, "references/paginated_table_interactive")
end
pt = listingtable(df, :value1, Pagination(rows = 1);
rows = [:group1, :group2], cols = :group3, summarize_rows = [mean, std])
@test length(pt.pages) == 1
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_rows=2_summarized_$i")
end
pt = listingtable(df, :value1, Pagination(rows = 1);
rows = [:group1, :group2], cols = :group3, summarize_rows = :group2 => [mean, std])
@test length(pt.pages) == 4
for (i, page) in enumerate(pt.pages)
reftest(page.table, "references/listingtable/pagination_rows=2_summarized_grouplevel_2_$i")
end
pt = listingtable(df, :value1, Pagination(rows = 1);
rows = [:group1, :group2], cols = :group3, summarize_rows = :group1 => [mean, std])
@test length(pt.pages) == 2
for (i, page) in enumerate(pt.pages)
reftest(t, "references/listingtable/pagination_rows=2_summarized_grouplevel_1_$i")
end
end
@testset "summarytable" begin
@test_throws ArgumentError("No summary analyses defined.") t = summarytable(df, :value1)
t = summarytable(df, :value1, summary = [mean])
reftest(t, "references/summarytable/no_group_one_summary")
t = summarytable(df, :value1, summary = [mean, std => "SD"])
reftest(t, "references/summarytable/no_group_two_summaries")
t = summarytable(df, :value1, rows = [:group1 => "Group 1"], summary = [mean])
reftest(t, "references/summarytable/one_rowgroup_one_summary")
t = summarytable(df, :value1, rows = [:group1 => "Group 1"], summary = [mean, std])
reftest(t, "references/summarytable/one_rowgroup_two_summaries")
t = summarytable(df, :value1, rows = [:group1 => "Group 1"], cols = [:group2 => "Group 2"], summary = [mean])
reftest(t, "references/summarytable/one_rowgroup_one_colgroup_one_summary")
t = summarytable(df, :value1, rows = [:group1 => "Group 1"], cols = [:group2 => "Group 2"], summary = [mean, std])
reftest(t, "references/summarytable/one_rowgroup_one_colgroup_two_summaries")
t = summarytable(df, :value1, rows = [:group1 => "Group 1", :group2], cols = [:group3 => "Group 3"], summary = [mean, std])
reftest(t, "references/summarytable/two_rowgroups_one_colgroup_two_summaries")
t = summarytable(df, :value1, rows = [:group1 => "Group 1", :group2], cols = [:group3 => "Group 3"], summary = [mean, std], variable_header = false)
reftest(t, "references/summarytable/two_rowgroups_one_colgroup_two_summaries_no_header")
t = summarytable(df, :value1, summary = [mean, mean])
reftest(t, "references/summarytable/two_same_summaries")
t = summarytable(df2, :value, rows = [:id, :dose], summary = [mean])
reftest(t, "references/summarytable/natural_sort_order")
@test_throws SortingError t = summarytable(unsortable_df, :value, rows = :parameters, cols = [:group2, :group], summary = [mean])
t = summarytable(unsortable_df, :value, cols = :parameters, rows = [:group2, :group], summary = [mean], sort = false)
reftest(t, "references/summarytable/sort_false")
end
@testset "annotations" begin
t = Table(
[
SpannedCell(1, 1, Annotated("A", "Note 1")),
SpannedCell(1, 2, Annotated("B", "Note 2")),
SpannedCell(2, 1, Annotated("C", "Note 3")),
SpannedCell(2, 2, Annotated("D", "Note 1")),
],
nothing,
nothing,
)
reftest(t, "references/annotations/automatic_annotations")
t = Table(
[
SpannedCell(1, 1, Annotated("A", "Note 1", label = "X")),
SpannedCell(1, 2, Annotated("B", "Note 2", label = "Y")),
SpannedCell(2, 1, Annotated("C", "Note 3")),
SpannedCell(2, 2, Annotated("D", "Note 4")),
],
nothing,
nothing,
)
reftest(t, "references/annotations/manual_annotations")
t = Table(
[
SpannedCell(1, 1, Annotated("A", "Note 1", label = "A")),
SpannedCell(1, 2, Annotated("A", "Note 1", label = "B")),
],
nothing,
nothing,
)
if func !== as_docx # TODO needs logic rework for this backend
@test_throws_message "Found the same annotation" show(devnull, func(t))
end
t = Table(
[
SpannedCell(1, 1, Annotated("A", "Note 1", label = "A")),
SpannedCell(1, 2, Annotated("A", "Note 2", label = "A")),
],
nothing,
nothing,
)
if func !== as_docx # TODO needs logic rework for this backend
@test_throws_message "Found the same label" show(devnull, func(t))
end
t = Table(
[
SpannedCell(1, 1, Annotated(0.1235513245, "Note 1", label = "A")),
],
nothing,
nothing,
)
reftest(t, "references/annotations/annotated_float")
end
@testset "manual footnotes" begin
t = Table(
[
SpannedCell(1, 1, "Cell 1"),
SpannedCell(1, 2, "Cell 2"),
],
nothing,
nothing,
footnotes = ["First footnote.", "Second footnote."]
)
reftest(t, "references/manual_footnotes/footnotes")
t = Table(
[
SpannedCell(1, 1, Annotated("Cell 1", "Note 1")),
SpannedCell(1, 2, "Cell 2"),
],
nothing,
nothing,
footnotes = ["First footnote.", "Second footnote."]
)
reftest(t, "references/manual_footnotes/footnotes_and_annotated")
end
@testset "Replace" begin
t = Table(
[
SpannedCell(1, 1, missing),
SpannedCell(1, 2, missing),
SpannedCell(2, 1, 1),
SpannedCell(2, 2, 2),
],
nothing,
nothing,
postprocess = [ReplaceMissing()]
)
reftest(t, "references/replace/replacemissing_default")
t = Table(
[
SpannedCell(1, 1, missing),
SpannedCell(1, 2, nothing),
SpannedCell(2, 1, 1),
SpannedCell(2, 2, 2),
],
nothing,
nothing,
postprocess = [ReplaceMissing(with = "???")]
)
reftest(t, "references/replace/replacemissing_custom")
t = Table(
[
SpannedCell(1, 1, missing),
SpannedCell(1, 2, nothing),
SpannedCell(2, 1, 1),
SpannedCell(2, 2, 2),
],
nothing,
nothing,
postprocess = [Replace(x -> x isa Int, "an Int was here")]
)
reftest(t, "references/replace/replace_predicate_value")
t = Table(
[
SpannedCell(1, 1, missing),
SpannedCell(1, 2, nothing),
SpannedCell(2, 1, 1),
SpannedCell(2, 2, 2),
],
nothing,
nothing,
postprocess = [Replace(x -> x isa Int, x -> x + 10)]
)
reftest(t, "references/replace/replace_predicate_function")
end
@testset "Global rounding" begin
cells = [
SpannedCell(1, 1, sqrt(2)),
SpannedCell(1, 2, 12352131.000001),
SpannedCell(2, 1, sqrt(11251231251243123)),
SpannedCell(2, 2, sqrt(0.00000123124)),
SpannedCell(3, 1, Concat(1.23456, " & ", 0.0012345)),
SpannedCell(3, 2, Multiline(1.23456, 0.0012345)),
]
t = Table(
cells,
nothing,
nothing,
)
reftest(t, "references/global_rounding/default")
t = Table(
cells,
nothing,
nothing,
round_mode = nothing,
)
reftest(t, "references/global_rounding/no_rounding")
for round_mode in [:auto, :sigdigits, :digits]
for trailing_zeros in [true, false]
for round_digits in [1, 3]
t = Table(
cells,
nothing,
nothing;
round_mode,
trailing_zeros,
round_digits
)
reftest(t, "references/global_rounding/$(round_mode)_$(trailing_zeros)_$(round_digits)")
end
end
end
end
@testset "Character escaping" begin
cells = [
SpannedCell(1, 1, "& % \$ # _ { } ~ ^ \\ < > \" ' ")
]
t = Table(
cells,
nothing,
nothing,
)
reftest(t, "references/character_escaping/problematic_characters")
end
@testset "Styles" begin
cells = [
SpannedCell(1, 1, "Row 1"),
SpannedCell(2, 1, "Row 2"),
SpannedCell(3, 1, "Row 3"),
SpannedCell(1:3, 2, "top", CellStyle(valign = :top)),
SpannedCell(1:3, 3, "center", CellStyle(valign = :center)),
SpannedCell(1:3, 4, "bottom", CellStyle(valign = :bottom)),
]
t = Table(
cells,
nothing,
nothing,
)
reftest(t, "references/styles/valign")
end
@testset "Row and column gaps" begin
if func !== as_docx # TODO needs logic rework for this backend
t = Table([SpannedCell(1, 1, "Row 1")], rowgaps = [1 => 5.0])
@test_throws_message "No row gaps allowed for a table with one row" show(devnull, func(t))
t = Table([SpannedCell(1, 1, "Column 1")], colgaps = [1 => 5.0])
@test_throws_message "No column gaps allowed for a table with one column" show(devnull, func(t))
t = Table([SpannedCell(1, 1, "Row 1"), SpannedCell(2, 1, "Row 2")], rowgaps = [1 => 5.0, 2 => 5.0])
@test_throws_message "A row gap index of 2 is invalid for a table with 2 rows" show(devnull, func(t))
t = Table([SpannedCell(1, 1, "Column 1"), SpannedCell(1, 2, "Column 2")], colgaps = [1 => 5.0, 2 => 5.0])
@test_throws_message "A column gap index of 2 is invalid for a table with 2 columns" show(devnull, func(t))
t = Table([SpannedCell(1, 1, "Row 1"), SpannedCell(2, 1, "Row 2")], rowgaps = [0 => 5.0])
@test_throws_message "A row gap index of 0 is invalid, must be at least 1" show(devnull, func(t))
t = Table([SpannedCell(1, 1, "Column 1"), SpannedCell(1, 2, "Column 2")], colgaps = [0 => 5.0])
@test_throws_message "A column gap index of 0 is invalid, must be at least 1" show(devnull, func(t))
end
t = Table([SpannedCell(i, j, "$i, $j") for i in 1:4 for j in 1:4], rowgaps = [1 => 4.0, 2 => 8.0], colgaps = [2 => 4.0, 3 => 8.0])
reftest(t, "references/row_and_column_gaps/singlecell")
t = Table([SpannedCell(2:4, 1, "Spanned rows"), SpannedCell(1, 2:4, "Spanned columns")], rowgaps = [1 => 4.0], colgaps = [2 => 4.0])
reftest(t, "references/row_and_column_gaps/spanned_cells")
end
end
end
@testset "auto rounding" begin
@test SummaryTables.auto_round( 1234567, target_digits = 4) == 1.235e6
@test SummaryTables.auto_round( 123456.7, target_digits = 4) == 123457
@test SummaryTables.auto_round( 12345.67, target_digits = 4) == 12346
@test SummaryTables.auto_round( 1234.567, target_digits = 4) == 1235
@test SummaryTables.auto_round( 123.4567, target_digits = 4) == 123.5
@test SummaryTables.auto_round( 12.34567, target_digits = 4) == 12.35
@test SummaryTables.auto_round( 1.234567, target_digits = 4) == 1.235
@test SummaryTables.auto_round( 0.1234567, target_digits = 4) == 0.1235
@test SummaryTables.auto_round( 0.01234567, target_digits = 4) == 0.01235
@test SummaryTables.auto_round( 0.001234567, target_digits = 4) == 0.001235
@test SummaryTables.auto_round( 0.0001234567, target_digits = 4) == 0.0001235
@test SummaryTables.auto_round( 0.00001234567, target_digits = 4) == 1.235e-5
@test SummaryTables.auto_round( 0.000001234567, target_digits = 4) == 1.235e-6
@test SummaryTables.auto_round(0.0000001234567, target_digits = 4) == 1.235e-7
@test SummaryTables.auto_round(0.1, target_digits = 4) == 0.1
@test SummaryTables.auto_round(0.0, target_digits = 4) == 0
@test SummaryTables.auto_round(1.0, target_digits = 4) == 1
end
@testset "Formatted float strings" begin
RF = SummaryTables.RoundedFloat
str(rf) = sprint(io -> SummaryTables._showas(io, MIME"text"(), rf))
x = 0.006789
@test str(RF(x, 3, :auto, true)) == "0.00679"
@test str(RF(x, 3, :sigdigits, true)) == "0.00679"
@test str(RF(x, 3, :digits, true)) == "0.007"
@test str(RF(x, 2, :auto, true)) == "0.0068"
@test str(RF(x, 2, :sigdigits, true)) == "0.0068"
@test str(RF(x, 2, :digits, true)) == "0.01"
x = 0.120
@test str(RF(x, 3, :auto, true)) == "0.12"
@test str(RF(x, 3, :sigdigits, true)) == "0.12"
@test str(RF(x, 3, :digits, true)) == "0.120"
@test str(RF(x, 3, :auto, false)) == "0.12"
@test str(RF(x, 3, :sigdigits, false)) == "0.12"
@test str(RF(x, 3, :digits, false)) == "0.12"
x = 1.0
@test str(RF(x, 3, :auto, true)) == "1.0"
@test str(RF(x, 3, :sigdigits, true)) == "1.0"
@test str(RF(x, 3, :digits, true)) == "1.000"
@test str(RF(x, 3, :auto, false)) == "1"
@test str(RF(x, 3, :sigdigits, false)) == "1"
@test str(RF(x, 3, :digits, false)) == "1"
x = 12345678.910
@test str(RF(x, 3, :auto, true)) == "1.23e7"
@test str(RF(x, 3, :sigdigits, true)) == "1.23e7"
@test str(RF(x, 3, :digits, true)) == "12345678.910"
@test str(RF(x, 3, :auto, false)) == "1.23e7"
@test str(RF(x, 3, :sigdigits, false)) == "1.23e7"
@test str(RF(x, 3, :digits, false)) == "12345678.91"
end
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | README.md | docs | 2942 | # SummaryTables.jl
<div align="center">
<picture>
<img alt="SummaryTables.jl logo"
src="/docs/src/assets/logo.png" width="150">
</picture>
</div>
[![][docs-stable-img]][docs-stable-url]
[![][docs-master-img]][docs-master-url]
[docs-stable-img]: https://img.shields.io/badge/Docs-Stable-lightgrey.svg
[docs-stable-url]: https://pumasai.github.io/SummaryTables.jl/stable/
[docs-master-img]: https://img.shields.io/badge/Docs-Dev-blue.svg
[docs-master-url]: https://pumasai.github.io/SummaryTables.jl/dev/
SummaryTables.jl is a Julia package for creating publication-ready tables in HTML, docx, LaTeX and Typst formats.
Tables are formatted in a minimalistic style without vertical lines.
SummaryTables offers the `table_one`, `summarytable` and `listingtable` functions to generate pharmacological tables from Tables.jl-compatible data structures, as well as a low-level API to construct tables of any shape manually.
## Examples
```julia
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
```
![](/_readme/table_one.svg)
```julia
data = DataFrame(
concentration = [1.2, 4.5, 2.0, 1.5, 0.1, 1.8, 3.2, 1.8, 1.2, 0.2,
1.7, 4.2, 1.0, 0.9, 0.3, 1.7, 3.7, 1.2, 1.0, 0.2],
id = repeat([1, 2, 3, 4], inner = 5),
dose = repeat([100, 200], inner = 10),
time = repeat([0, 0.5, 1, 2, 3], 4)
)
tbl = listingtable(
data,
:concentration => "Concentration (ng/mL)",
rows = [:dose => "Dose (mg)", :id => "ID"],
cols = :time => "Time (hr)",
summarize_rows = :dose => [
length => "N",
mean => "Mean",
std => "SD",
]
)
```
![](/_readme/listingtable.svg)
## Comparison with PrettyTables.jl
[PrettyTables.jl](https://github.com/ronisbr/PrettyTables.jl/) is a well-known Julia package whose main function is formatting tabular data, for example as the backend to [DataFrames.jl](https://github.com/JuliaData/DataFrames.jl).
PrettyTables supports plain-text output because it is often used for rendering tables to the REPL, however this also means that it does not support merging cells vertically or horizontally in its current state, which is difficult to realize with plain text.
In contrast, SummaryTables's main purpose is to offer convenience functions for creating specific scientific tables which are out-of-scope for PrettyTables.
For our desired aesthetics, we also needed low-level control over certain output formats, for example for controlling cell border behavior in docx, which were unlikely to be added to PrettyTables at the time of writing this package.
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/api.md | docs | 49 | # API
```@autodocs
Modules = [SummaryTables]
``` | SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/index.md | docs | 1812 | # SummaryTables
SummaryTables is focused on creating tables for publications in LaTeX, docx and HTML formats.
It offers both convenient predefined table functions that are inspired by common table formats in the pharma space, as well as an API to create completely custom tables.
It deliberately uses an opinionated, limited styling API so that styling can be as consistent as possible across the different backends.
```@example
using SummaryTables
using DataFrames
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
```
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
concentration = [1.2, 4.5, 2.0, 1.5, 0.1, 1.8, 3.2, 1.8, 1.2, 0.2],
id = repeat([1, 2], inner = 5),
time = repeat([0, 0.5, 1, 2, 3], 2)
)
listingtable(
data,
:concentration => "Concentration (ng/mL)",
rows = :id => "ID",
cols = :time => "Time (hr)",
summarize_rows = [
length => "N",
mean => "Mean",
std => "SD",
]
)
```
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
concentration = [1.2, 4.5, 2.0, 1.5, 0.1, 1.8, 3.2, 1.8, 1.2, 0.2],
id = repeat([1, 2], inner = 5),
time = repeat([0, 0.5, 1, 2, 3], 2)
)
summarytable(
data,
:concentration => "Concentration (ng/mL)",
cols = :time => "Time (hr)",
summary = [
length => "N",
mean => "Mean",
std => "SD",
]
)
``` | SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/output.md | docs | 4632 | # Output
## HTML
In IDEs that support the `MIME"text/html"` or `MIME"juliavscode/html"` types, just `display`ing a `Table` will render it in HTML for you.
All examples in this documentation are rendered this way.
Alternatively, you can print HTML to any IO object via `show(io, MIME"text/html", table)`.
## LaTeX
You can print LaTeX code to any IO via `show(io, MIME"text/latex", table)`.
Keep in mind that the `threeparttable`, `multirow` and `booktabs` packages need to separately be included in your preamble due to the way LaTeX documents are structured.
```@example
using SummaryTables
using DataFrames
using tectonic_jll
mkpath(joinpath(@__DIR__, "outputs"))
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
tbl = table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
# render latex in a temp directory
mktempdir() do dir
texfile = joinpath(dir, "main.tex")
open(texfile, "w") do io
# add the necessary packages in the preamble
println(io, raw"""
\documentclass{article}
\usepackage{threeparttable}
\usepackage{multirow}
\usepackage{booktabs}
\begin{document}
""")
# print the table as latex code
show(io, MIME"text/latex"(), tbl)
println(io, raw"\end{document}")
end
# render the tex file to pdf
tectonic_jll.tectonic() do bin
run(`$bin $texfile`)
end
cp(joinpath(dir, "main.pdf"), joinpath(@__DIR__, "outputs", "example.pdf"))
end
nothing # hide
```
Download `example.pdf`:
```@raw html
<a href="./../outputs/example.pdf"><img src="./../assets/icon_pdf.png" width="60">
```
## docx
To get docx output, you need to use the WriteDocx.jl package because this format is not plain-text like LaTeX or HTML.
The table node you get out of the `to_docx` function can be placed into
sections on the same level as paragraphs.
```@example
using SummaryTables
using DataFrames
import WriteDocx as W
mkpath(joinpath(@__DIR__, "outputs"))
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
tbl = table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
doc = W.Document(
W.Body([
W.Section([
SummaryTables.to_docx(tbl)
])
])
)
W.save(joinpath(@__DIR__, "outputs", "example.docx"), doc)
nothing # hide
```
Download `example.docx`:
```@raw html
<a href="./../outputs/example.docx"><img src="./../assets/icon_docx.png" width="60">
```
## Typst
You can print [Typst](https://github.com/typst/typst) table code to any IO via `show(io, MIME"text/typst", table)`.
The Typst backend is using the [tablex](https://github.com/PgBiel/typst-tablex/) package.
Due to the way Typst's package manager works, you do not have to add any other information to your `.typ` file to make SummaryTables's typst code work.
```@example
using SummaryTables
using DataFrames
using Typst_jll
mkpath(joinpath(@__DIR__, "outputs"))
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
tbl = table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
# render latex in a temp directory
mktempdir() do dir
typfile = joinpath(dir, "example.typ")
open(typfile, "w") do io
# print the table as latex code
show(io, MIME"text/typst"(), tbl)
end
# render the tex file to pdf
Typst_jll.typst() do bin
run(`$bin compile $typfile`)
end
cp(joinpath(dir, "example.pdf"), joinpath(@__DIR__, "outputs", "example_typst.pdf"))
end
nothing # hide
```
Download `example_typst.pdf`:
```@raw html
<a href="./../outputs/example_typst.pdf"><img src="./../assets/icon_pdf.png" width="60">
```
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/custom_tables/cell.md | docs | 3814 | # `Cell`
## Argument 1: `value`
This is the content of the `Cell`.
How it is rendered is decided by the output format and what `show` methods are defined for the type of `value` and the respective output `MIME` type.
If no output-specific `MIME` type has a `show` method, the fallback is always the generic text output.
The following are some types which receive special handling by SummaryTables.
### Special `Cell` value types
#### Floating point numbers
Most tables display floating point numbers, however, the formatting of these numbers can vary.
SummaryTables postprocesses every table in order to find unformatted floating point numbers.
These are then given the default, table-wide, formatting.
```@example
using SummaryTables
cells = [
Cell(1.23456) Cell(12.3456)
Cell(0.123456) Cell(0.0123456)
]
Table(cells)
```
```@example
using SummaryTables
cells = [
Cell(1.23456) Cell(12.3456)
Cell(0.123456) Cell(0.0123456)
]
Table(cells; round_mode = :digits, round_digits = 5, trailing_zeros = true)
```
#### `Concat`
All the arguments of `Concat` are concatenated together in the final output.
Note that this is usually preferrable to string-interpolating multiple values because you lose special handling of the value types (like floating point rounding behavior or special LaTeX formatting) if you turn them into strings.
```@example
using SummaryTables
using Statistics
some_numbers = [1, 2, 4, 7, 8, 13, 27]
mu = mean(some_numbers)
sd = std(some_numbers)
cells = [
Cell("Mean (SD) interpolated") Cell("$mu ($sd)")
Cell("Mean (SD) Concat") Cell(Concat(mu, " (", sd, ")"))
]
Table(cells)
```
#### `Multiline`
Use the `Multiline` type to force linebreaks between different values in a cell.
A `Multiline` value may not be nested inside other values in a cell, it may only be the outermost value.
All nested values retain their special behaviors, so using `Multiline` is preferred over hardcoding linebreaks in the specific output formats yourself.
```@example
using SummaryTables
cells = [
Cell(Multiline("A1 a", "A1 b")) Cell("B1")
Cell("A2") Cell("B2")
]
Table(cells)
```
#### `Annotated`
To annotate elements in a table with footnotes, use the `Annotated` type.
It takes an arbitrary `value` to annotate as well as an `annotation` which becomes a footnote in the table.
You can also pass the `label` keyword if you don't want an auto-incrementing number as the label.
You can also pass `label = nothing` if you want a footnote without label.
```@example
using SummaryTables
cells = [
Cell(Annotated("A1", "This is the first cell")) Cell("B1")
Cell(Annotated("A2", "A custom label", label = "x")) Cell("B2")
Cell(Annotated("-", "- A missing value", label = nothing)) Cell("B3")
]
Table(cells)
```
#### `Superscript`
Displays the wrapped value in superscript style.
Use this instead of hardcoding output format specific commands.
```@example
using SummaryTables
cells = [
Cell("Without superscript") Cell(Concat("With ", Superscript("superscript")));
]
Table(cells)
```
#### `Subscript`
Displays the wrapped value in subscript style.
Use this instead of hardcoding output format specific commands.
```@example
using SummaryTables
cells = [
Cell("Without subscript") Cell(Concat("With ", Subscript("subscript")));
]
Table(cells)
```
## Optional argument 2: `cellstyle`
You may pass the style settings of a `Cell` as a positional argument of type [`CellStyle`](@ref).
It is usually more convenient, however, to use the keyword arguments to `Cell` instead.
```@example
using SummaryTables
Table([
Cell("A1", CellStyle(bold = true)) Cell("B1", CellStyle(underline = true))
Cell("A2", CellStyle(italic = true)) Cell("B2", CellStyle(indent_pt = 10))
])
```
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/custom_tables/cellstyle.md | docs | 3268 | # `CellStyle`
## Keyword: `bold`
Makes the text in the cell bold.
```@example
using SummaryTables
cells = reshape([
Cell("Some text in bold", bold = true),
], :, 1)
Table(cells)
```
## Keyword: `italic`
Makes the text in the cell italic.
```@example
using SummaryTables
cells = reshape([
Cell("Some text in italic", italic = true),
], :, 1)
Table(cells)
```
## Keyword: `underline`
Underlines the text in the cell.
```@example
using SummaryTables
cells = reshape([
Cell(Multiline("Some", "text", "that is", "underlined"), underline = true),
], :, 1)
Table(cells)
```
## Keyword: `halign`
Aligns the cell content horizontally either at the `:left`, the `:center` or the `:right`.
```@example
using SummaryTables
cells = reshape([
Cell("A wide cell"),
Cell(":left", halign = :left),
Cell(":center", halign = :center),
Cell(":right", halign = :right),
], :, 1)
Table(cells)
```
## Keyword: `valign`
Aligns the cell content vertically either at the `:top`, the `:center` or the `:bottom`.
```@example
using SummaryTables
cells = reshape([
Cell(Multiline("A", "tall", "cell")),
Cell(":top", valign = :top),
Cell(":center", valign = :center),
Cell(":bottom", valign = :bottom),
], 1, :)
Table(cells)
```
## Keyword: `indent_pt`
Indents the content of the cell on the left by the given number of `pt` units.
This can be used to give hierarchical structure to adjacent rows.
```@example
using SummaryTables
C(value; kwargs...) = Cell(value; halign = :left, kwargs...)
cells = [
C("Group A") C("Group B")
C("Subgroup A", indent_pt = 6) C("Subgroup B", indent_pt = 6)
C("Subgroup A", indent_pt = 6) C("Subgroup B", indent_pt = 6)
]
Table(cells)
```
## Keyword: `border_bottom`
Adds a border at the bottom of the cell.
This option is meant for horizontally merged cells functioning as subheaders.
```@example
using SummaryTables
header_cell = Cell("header", border_bottom = true, merge = true)
cells = [
header_cell header_cell
Cell("body") Cell("body")
]
Table(cells)
```
## Keyword: `merge`
All adjacent cells that are `==` equal to each other and have `merge = true` will be rendered as one merged cell.
```@example
using SummaryTables
merged_cell = Cell("merged", valign = :center, merge = true)
cells = [
Cell("A1") Cell("B1") Cell("C1") Cell("D1")
Cell("A2") merged_cell merged_cell Cell("D2")
Cell("A3") merged_cell merged_cell Cell("D3")
Cell("A4") Cell("B4") Cell("C4") Cell("D4")
]
Table(cells)
```
## Keyword: `mergegroup`
Because adjacent cells that are `==` equal to each other are merged when `merge = true` is set, you can optionally
set the `mergegroup` keyword of adjacent cells to a different value to avoid merging them even if their values are otherwise equal.
```@example
using SummaryTables
merged_cell_1 = Cell("merged", valign = :center, merge = true, mergegroup = 1)
merged_cell_2 = Cell("merged", valign = :center, merge = true, mergegroup = 2)
cells = [
Cell("A1") Cell("B1") Cell("C1") Cell("D1")
Cell("A2") merged_cell_1 merged_cell_2 Cell("D2")
Cell("A3") merged_cell_1 merged_cell_2 Cell("D3")
Cell("A4") Cell("B4") Cell("C4") Cell("D4")
]
Table(cells)
```
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/custom_tables/table.md | docs | 1986 | # `Table`
You can build custom tables using the `Table` type.
## Argument 1: `cells`
The table's content is given as an `AbstractMatrix` of `Cell`s:
```@example
using SummaryTables
cells = [Cell("$col$row") for row in 1:5, col in 'A':'E']
Table(cells)
```
## Keyword: `header`
You can pass an `Int` to mark the last row of the header section.
A divider line is placed after this row.
```@example
using SummaryTables
cells = [Cell("$col$row") for row in 1:5, col in 'A':'E']
Table(cells; header = 1)
```
## Keyword: `footer`
You can pass an `Int` to mark the first row of the footer section.
A divider line is placed before this row.
```@example
using SummaryTables
cells = [Cell("$col$row") for row in 1:5, col in 'A':'E']
Table(cells; footer = 5)
```
## Keyword: `footnotes`
The `footnotes` keyword allows to add custom footnotes to the table which do not correspond to specific [`Annotated`](@ref) values in the table.
```@example
using SummaryTables
cells = [Cell("$col$row") for row in 1:5, col in 'A':'E']
Table(cells; footnotes = ["Custom footnote 1", "Custom footnote 2"])
```
## Keyword: `rowgaps`
It can be beneficial for the readability of larger tables to add gaps between certain rows.
These gaps can be passed as a `Vector` of `Pair`s where the first element is the index of the row gap and the second element is the gap size in `pt`.
```@example
using SummaryTables
cells = [Cell("$col$row") for row in 1:9, col in 'A':'E']
Table(cells; rowgaps = [3 => 8.0, 6 => 8.0])
```
## Keyword: `colgaps`
It can be beneficial for the readability of larger tables to add gaps between certain columns.
These gaps can be passed as a `Vector` of `Pair`s where the first element is the index of the column gap and the second element is the gap size in `pt`.
```@example
using SummaryTables
cells = [Cell("$col$row") for row in 1:5, col in 'A':'I']
Table(cells; colgaps = [3 => 8.0, 6 => 8.0])
```
## Types of cell values
TODO: List the different options here
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/predefined_tables/listingtable.md | docs | 13711 | # `listingtable`
## Synopsis
A listing table displays the raw data from one column of a source table, with optional summary sections interleaved between.
The row and column structure of the listing table is defined by grouping columns from the source table.
Each row of data has to have its own cell in the listing table, therefore the grouping applied along rows and columns must be exhaustive, i.e., no two rows may end up in the same group together.
Here is an example of a hypothetical clinical trial with drug concentration measurements of two participants with five time points each.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
concentration = [1.2, 4.5, 2.0, 1.5, 0.1, 1.8, 3.2, 1.8, 1.2, 0.2],
id = repeat([1, 2], inner = 5),
time = repeat([0, 0.5, 1, 2, 3], 2)
)
listingtable(
data,
:concentration => "Concentration (ng/mL)",
rows = :id => "ID",
cols = :time => "Time (hr)",
summarize_rows = [
length => "N",
mean => "Mean",
std => "SD",
]
)
```
## Argument 1: `table`
The first argument can be any object that is a table compatible with the `Tables.jl` API.
Here are some common examples:
### `DataFrame`
```@example
using DataFrames
using SummaryTables
data = DataFrame(value = 1:6, group1 = repeat(["A", "B", "C"], 2), group2 = repeat(["D", "E"], inner = 3))
listingtable(data, :value, rows = :group1, cols = :group2)
```
### `NamedTuple` of `Vector`s
```@example
using SummaryTables
data = (; value = 1:6, group1 = repeat(["A", "B", "C"], 2), group2 = repeat(["D", "E"], inner = 3))
listingtable(data, :value, rows = :group1, cols = :group2)
```
### `Vector` of `NamedTuple`s
```@example
using SummaryTables
data = [
(value = 1, group1 = "A", group2 = "D")
(value = 2, group1 = "B", group2 = "D")
(value = 3, group1 = "C", group2 = "D")
(value = 4, group1 = "A", group2 = "E")
(value = 5, group1 = "B", group2 = "E")
(value = 6, group1 = "C", group2 = "E")
]
listingtable(data, :value, rows = :group1, cols = :group2)
```
## Argument 2: `variable`
The second argument primarily selects the table column whose data should populate the cells of the listing table.
The column name is specified with a `Symbol`:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value1 = 1:6,
value2 = 7:12,
group1 = repeat(["A", "B", "C"], 2),
group2 = repeat(["D", "E"], inner = 3)
)
listingtable(data, :value1, rows = :group1, cols = :group2)
```
Here we choose to list column `:value2` instead:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value1 = 1:6,
value2 = 7:12,
group1 = repeat(["A", "B", "C"], 2),
group2 = repeat(["D", "E"], inner = 3)
)
listingtable(data, :value2, rows = :group1, cols = :group2)
```
By default, the variable name is used as the label as well.
You can pass a different label as the second element of a `Pair` using the `=>` operators.
The label can be of any type (refer to [Types of cell values](@ref) for a list).
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value1 = 1:6,
value2 = 7:12,
group1 = repeat(["A", "B", "C"], 2),
group2 = repeat(["D", "E"], inner = 3)
)
listingtable(data, :value1 => "Value", rows = :group1, cols = :group2)
```
## Optional argument 3: `pagination`
A listing table can grow large, in which case it may make sense to split it into multiple pages.
You can pass a `Pagination` object with `rows` and / or `cols` keyword arguments.
The `Int` you pass to `rows` and / or `cols` determines how many "sections" of the table along that dimension are included in a single page.
If there are no summary statistics, a "section" is a single row or column.
If there are summary statistics, a "section" includes all the rows or columns that are summarized together (as it would not make sense to split summarized groups across multiple pages).
If the `pagination` argument is provided, the return type of `listingtable` changes to `PaginatedTable{ListingPageMetadata}`.
This object has an interactive HTML representation for convenience the exact form of which should not be considered stable across SummaryTables versions.
The `PaginatedTable` should be deconstructed into separate `Table`s when you want to include these in a document.
Here is an example listing table without pagination:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:30,
group1 = repeat(["A", "B", "C", "D", "E"], 6),
group2 = repeat(["F", "G", "H", "I", "J", "K"], inner = 5)
)
listingtable(data, :value, rows = :group1, cols = :group2)
```
And here is the same table paginated into groups of 3 sections along the both the rows and columns.
Note that there are only five rows in the original table, which is not divisible by 3, so two pages have only two rows.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:30,
group1 = repeat(["A", "B", "C", "D", "E"], 6),
group2 = repeat(["F", "G", "H", "I", "J", "K"], inner = 5)
)
listingtable(data, :value, Pagination(rows = 3, cols = 3), rows = :group1, cols = :group2)
```
We can also paginate only along the rows:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:30,
group1 = repeat(["A", "B", "C", "D", "E"], 6),
group2 = repeat(["F", "G", "H", "I", "J", "K"], inner = 5)
)
listingtable(data, :value, Pagination(rows = 3), rows = :group1, cols = :group2)
```
Or only along the columns:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:30,
group1 = repeat(["A", "B", "C", "D", "E"], 6),
group2 = repeat(["F", "G", "H", "I", "J", "K"], inner = 5)
)
listingtable(data, :value, Pagination(cols = 3), rows = :group1, cols = :group2)
```
## Keyword: `rows`
The `rows` keyword determines the grouping structure along the rows.
It can either be a `Symbol` specifying a grouping column, a `Pair{Symbol,Any}` where the second element overrides the group's label, or a `Vector` with multiple groups of the aforementioned format.
This example uses a single group with default label.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:5,
group = ["A", "B", "C", "D", "E"],
)
listingtable(data, :value, rows = :group)
```
The label can be overridden using the `Pair` operator.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:5,
group = ["A", "B", "C", "D", "E"],
)
listingtable(data, :value, rows = :group => "Group")
```
Multiple groups are possible as well, in that case you get a nested display where the last group changes the fastest.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:5,
group1 = ["F", "F", "G", "G", "G"],
group2 = ["A", "B", "C", "D", "E"],
)
listingtable(data, :value, rows = [:group1, :group2 => "Group 2"])
```
## Keyword: `cols`
The `cols` keyword determines the grouping structure along the columns.
It can either be a `Symbol` specifying a grouping column, a `Pair{Symbol,Any}` where the second element overrides the group's label, or a `Vector` with multiple groups of the aforementioned format.
This example uses a single group with default label.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:5,
group = ["A", "B", "C", "D", "E"],
)
listingtable(data, :value, cols = :group)
```
The label can be overridden using the `Pair` operator.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:5,
group = ["A", "B", "C", "D", "E"],
)
listingtable(data, :value, cols = :group => "Group")
```
Multiple groups are possible as well, in that case you get a nested display where the last group changes the fastest.
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:5,
group1 = ["F", "F", "G", "G", "G"],
group2 = ["A", "B", "C", "D", "E"],
)
listingtable(data, :value, cols = [:group1, :group2 => "Group 2"])
```
## Keyword: `summarize_rows`
This keyword takes a list of aggregation functions which are used to summarize the listed variable along the rows.
A summary function should take a vector of values (usually that will be numbers) and output one summary value.
This value can be of any type that SummaryTables can show in a cell (refer to [Types of cell values](@ref) for a list).
```@example
using DataFrames
using SummaryTables
using Statistics: mean, std
data = DataFrame(
value = 1:24,
group1 = repeat(["A", "B", "C", "D", "E", "F"], 4),
group2 = repeat(["G", "H", "I", "J"], inner = 6),
)
mean_sd(values) = Concat(mean(values), " (", std(values), ")")
listingtable(data,
:value,
rows = :group1,
cols = :group2,
summarize_rows = [
mean,
std => "SD",
mean_sd => "Mean (SD)",
]
)
```
By default, one summary will be calculated over all rows of a given column.
You can also choose to compute one summary for each group of a row grouping column, which makes sense if there is more than one row grouping column.
In this example, one summary is computed for each level of the `group1` column.
```@example
using DataFrames
using SummaryTables
using Statistics: mean, std
data = DataFrame(
value = 1:24,
group1 = repeat(["X", "Y"], 12),
group2 = repeat(["A", "B", "C"], 8),
group3 = repeat(["G", "H", "I", "J"], inner = 6),
)
mean_sd(values) = Concat(mean(values), " (", std(values), ")")
listingtable(data,
:value,
rows = [:group1, :group2],
cols = :group3,
summarize_rows = :group1 => [
mean,
std => "SD",
mean_sd => "Mean (SD)",
]
)
```
## Keyword: `summarize_cols`
This keyword takes a list of aggregation functions which are used to summarize the listed variable along the columns.
A summary function should take a vector of values (usually that will be numbers) and output one summary value.
This value can be of any type that SummaryTables can show in a cell (refer to [Types of cell values](@ref) for a list).
```@example
using DataFrames
using SummaryTables
using Statistics: mean, std
data = DataFrame(
value = 1:24,
group1 = repeat(["A", "B", "C", "D", "E", "F"], 4),
group2 = repeat(["G", "H", "I", "J"], inner = 6),
)
mean_sd(values) = Concat(mean(values), " (", std(values), ")")
listingtable(data,
:value,
rows = :group1,
cols = :group2,
summarize_cols = [
mean,
std => "SD",
mean_sd => "Mean (SD)",
]
)
```
By default, one summary will be calculated over all columns of a given row.
You can also choose to compute one summary for each group of a column grouping column, which makes sense if there is more than one column grouping column.
In this example, one summary is computed for each level of the `group1` column.
```@example
using DataFrames
using SummaryTables
using Statistics: mean, std
data = DataFrame(
value = 1:24,
group1 = repeat(["X", "Y"], 12),
group2 = repeat(["A", "B", "C"], 8),
group3 = repeat(["G", "H", "I", "J"], inner = 6),
)
mean_sd(values) = Concat(mean(values), " (", std(values), ")")
listingtable(data,
:value,
cols = [:group1, :group2],
rows = :group3,
summarize_cols = :group1 => [
mean,
std => "SD",
mean_sd => "Mean (SD)",
]
)
```
## Keyword: `variable_header`
If you set `variable_header = false`, you can hide the header cell with the variable label, which makes the table layout a little more compact.
Here is a table with the header cell:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:6,
group1 = repeat(["A", "B", "C"], 2),
group2 = repeat(["D", "E"], inner = 3)
)
listingtable(data, :value, rows = :group1, cols = :group2, variable_header = true)
```
And here is a table without it:
```@example
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:6,
group1 = repeat(["A", "B", "C"], 2),
group2 = repeat(["D", "E"], inner = 3)
)
listingtable(data, :value, rows = :group1, cols = :group2, variable_header = false)
```
## Keyword: `sort`
By default, group entries are sorted.
If you need to maintain the order of entries from your dataset, set `sort = false`.
Notice how in the following two examples, the group indices are `"dos"`, `"tres"`, `"uno"` when sorted, but `"uno"`, `"dos"`, `"tres"` when not sorted.
If we want to preserve the natural order of these groups ("uno", "dos", "tres" meaning "one", "two", "three" in Spanish but having a different alphabetical order) we need to set `sort = false`.
```@example sort
using DataFrames
using SummaryTables
data = DataFrame(
value = 1:6,
group1 = repeat(["uno", "dos", "tres"], inner = 2),
group2 = repeat(["cuatro", "cinco"], 3),
)
listingtable(data, :value, rows = :group1, cols = :group2)
```
```@example sort
listingtable(data, :value, rows = :group1, cols = :group2, sort = false)
```
!!! warning
If you have multiple groups, `sort = false` can lead to splitting of higher-level groups if they are not correctly ordered in the source data.
Compare the following two tables.
In the second one, the group "A" is split by "B" so the label appears twice.
```@example bad_sort
using SummaryTables
using DataFrames
data = DataFrame(
value = 1:4,
group1 = ["A", "B", "B", "A"],
group2 = ["C", "D", "C", "D"],
)
listingtable(data, :value, rows = [:group1, :group2])
```
```@example bad_sort
data = DataFrame(
value = 1:4,
group1 = ["A", "B", "B", "A"],
group2 = ["C", "D", "C", "D"],
)
listingtable(data, :value, rows = [:group1, :group2], sort = false)
```
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/predefined_tables/summarytable.md | docs | 8756 | # `summarytable`
## Synopsis
A summary table summarizes the raw data from one column of a source table for different groups defined by grouping columns.
It is similar to a [`listingtable`](@ref) without the raw values.
Here is an example of a hypothetical clinical trial with drug concentration measurements of two participants with five time points each.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
concentration = [1.2, 4.5, 2.0, 1.5, 0.1, 1.8, 3.2, 1.8, 1.2, 0.2],
id = repeat([1, 2], inner = 5),
time = repeat([0, 0.5, 1, 2, 3], 2)
)
summarytable(
data,
:concentration => "Concentration (ng/mL)",
cols = :time => "Time (hr)",
summary = [
length => "N",
mean => "Mean",
std => "SD",
]
)
```
## Argument 1: `table`
The first argument can be any object that is a table compatible with the `Tables.jl` API.
Here are some common examples:
### `DataFrame`
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:6,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value, cols = :group, summary = [mean, std])
```
### `NamedTuple` of `Vector`s
```@example
using SummaryTables
using Statistics
data = (; value = 1:6, group = repeat(["A", "B", "C"], 2))
summarytable(data, :value, cols = :group, summary = [mean, std])
```
### `Vector` of `NamedTuple`s
```@example
using SummaryTables
using Statistics
data = [
(value = 1, group = "A")
(value = 2, group = "B")
(value = 3, group = "C")
(value = 4, group = "A")
(value = 5, group = "B")
(value = 6, group = "C")
]
summarytable(data, :value, cols = :group, summary = [mean, std])
```
## Argument 2: `variable`
The second argument primarily selects the table column whose data should populate the cells of the summary table.
The column name is specified with a `Symbol`:
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value1 = 1:6,
value2 = 7:12,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value1, cols = :group, summary = [mean, std])
```
Here we choose to list column `:value2` instead:
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value1 = 1:6,
value2 = 7:12,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value2, cols = :group, summary = [mean, std])
```
By default, the variable name is used as the label as well.
You can pass a different label as the second element of a `Pair` using the `=>` operators.
The label can be of any type (refer to [Types of cell values](@ref) for a list).
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value1 = 1:6,
value2 = 7:12,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value1 => "Value", cols = :group, summary = [mean, std])
```
## Keyword: `rows`
The `rows` keyword determines the grouping structure along the rows.
It can either be a `Symbol` specifying a grouping column, a `Pair{Symbol,Any}` where the second element overrides the group's label, or a `Vector` with multiple groups of the aforementioned format.
This example uses a single group with default label.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:6,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value, rows = :group, summary = [mean, std])
```
The label can be overridden using the `Pair` operator.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:6,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value, rows = :group => "Group", summary = [mean, std])
```
Multiple groups are possible as well, in that case you get a nested display where the last group changes the fastest.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:12,
group1 = repeat(["A", "B"], inner = 6),
group2 = repeat(["C", "D", "E"], 4),
)
summarytable(data, :value, rows = [:group1, :group2 => "Group 2"], summary = [mean, std])
```
## Keyword: `cols`
The `cols` keyword determines the grouping structure along the columns.
It can either be a `Symbol` specifying a grouping column, a `Pair{Symbol,Any}` where the second element overrides the group's label, or a `Vector` with multiple groups of the aforementioned format.
This example uses a single group with default label.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:6,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value, cols = :group, summary = [mean, std])
```
The label can be overridden using the `Pair` operator.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:6,
group = repeat(["A", "B", "C"], 2),
)
summarytable(data, :value, cols = :group => "Group", summary = [mean, std])
```
Multiple groups are possible as well, in that case you get a nested display where the last group changes the fastest.
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:12,
group1 = repeat(["A", "B"], inner = 6),
group2 = repeat(["C", "D", "E"], 4),
)
summarytable(data, :value, cols = [:group1, :group2 => "Group 2"], summary = [mean, std])
```
## Keyword: `summary`
This keyword takes a list of aggregation functions which are used to summarize the chosen variable.
A summary function should take a vector of values (usually that will be numbers) and output one summary value.
This value can be of any type that SummaryTables can show in a cell (refer to [Types of cell values](@ref) for a list).
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:24,
group1 = repeat(["A", "B", "C", "D"], 6),
group2 = repeat(["E", "F", "G"], inner = 8),
)
mean_sd(values) = Concat(mean(values), " (", std(values), ")")
summarytable(
data,
:value,
rows = :group1,
cols = :group2,
summary = [
mean,
std => "SD",
mean_sd => "Mean (SD)",
]
)
```
## Keyword: `variable_header`
If you set `variable_header = false`, you can hide the header cell with the variable label, which makes the table layout a little more compact.
Here is a table with the header cell:
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:24,
group1 = repeat(["A", "B", "C", "D"], 6),
group2 = repeat(["E", "F", "G"], inner = 8),
)
summarytable(
data,
:value,
rows = :group1,
cols = :group2,
summary = [mean, std],
)
```
And here is a table without it:
```@example
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:24,
group1 = repeat(["A", "B", "C", "D"], 6),
group2 = repeat(["E", "F", "G"], inner = 8),
)
summarytable(
data,
:value,
rows = :group1,
cols = :group2,
summary = [mean, std],
variable_header = false,
)
```
## Keyword: `sort`
By default, group entries are sorted.
If you need to maintain the order of entries from your dataset, set `sort = false`.
Notice how in the following two examples, the group indices are `"dos"`, `"tres"`, `"uno"` when sorted, but `"uno"`, `"dos"`, `"tres"` when not sorted.
If we want to preserve the natural order of these groups ("uno", "dos", "tres" meaning "one", "two", "three" in Spanish but having a different alphabetical order) we need to set `sort = false`.
```@example sort
using DataFrames
using SummaryTables
using Statistics
data = DataFrame(
value = 1:18,
group1 = repeat(["uno", "dos", "tres"], inner = 6),
group2 = repeat(["cuatro", "cinco"], 9),
)
summarytable(data, :value, rows = :group1, cols = :group2, summary = [mean, std])
```
```@example sort
summarytable(data, :value, rows = :group1, cols = :group2, summary = [mean, std], sort = false)
```
!!! warning
If you have multiple groups, `sort = false` can lead to splitting of higher-level groups if they are not correctly ordered in the source data.
Compare the following two tables.
In the second one, the group "A" is split by "B" so the label appears twice.
```@example bad_sort
using SummaryTables
using DataFrames
using Statistics
data = DataFrame(
value = 1:4,
group1 = ["A", "B", "B", "A"],
group2 = ["C", "D", "C", "D"],
)
summarytable(data, :value, rows = [:group1, :group2], summary = [mean])
```
```@example bad_sort
data = DataFrame(
value = 1:4,
group1 = ["A", "B", "B", "A"],
group2 = ["C", "D", "C", "D"],
)
summarytable(data, :value, rows = [:group1, :group2], summary = [mean], sort = false)
```
| SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 1.0.0 | 1381f670499344657ae955634ab2ff4776b394c9 | docs/src/predefined_tables/table_one.md | docs | 7299 | # `table_one`
## Synopsis
"Table 1" is a common term for the first table in a paper that summarizes demographic and other individual data of the population that is being studied.
In general terms, it is a table where different columns from the source table are summarized separately, stacked along the rows.
The types of analysis can be chosen manually, or will be selected given the column types.
Optionally, there can be grouping applied along the columns as well.
In this example, several variables of a hypothetical population are analyzed split by sex.
```@example
using SummaryTables
using DataFrames
data = DataFrame(
sex = ["m", "m", "m", "m", "f", "f", "f", "f", "f", "f"],
age = [27, 45, 34, 85, 55, 44, 24, 29, 37, 76],
blood_type = ["A", "0", "B", "B", "B", "A", "0", "A", "A", "B"],
smoker = [true, false, false, false, true, true, true, false, false, false],
)
table_one(
data,
[:age => "Age (years)", :blood_type => "Blood type", :smoker => "Smoker"],
groupby = :sex => "Sex",
show_n = true
)
```
## Argument 1: `table`
The first argument can be any object that is a table compatible with the `Tables.jl` API.
Here are some common examples:
### `DataFrame`
```@example
using DataFrames
using SummaryTables
data = DataFrame(x = [1, 2, 3], y = ["4", "5", "6"])
table_one(data, [:x, :y])
```
### `NamedTuple` of `Vector`s
```@example
using SummaryTables
data = (; x = [1, 2, 3], y = ["4", "5", "6"])
table_one(data, [:x, :y])
```
### `Vector` of `NamedTuple`s
```@example
using SummaryTables
data = [(; x = 1, y = "4"), (; x = 2, y = "5"), (; x = 3, y = "6")]
table_one(data, [:x, :y])
```
## Argument 2: `analyses`
The second argument takes a vector specifying analyses, with one entry for each "row section" of the resulting table.
If only one analysis is passed, the vector can be omitted.
Each analysis can have up to three parts: the variable, the analysis function and the label.
The variable is passed as a `Symbol`, corresponding to a column in the input data, and must always be specified.
The other two parts are optional.
If you specify only variables, the analysis functions are chosen automatically based on the columns, and the labels are equal to the variable names.
Number variables show the mean, standard deviation, median, minimum and maximum.
String variables or other non-numeric variables show counts and percentages of each element type.
```@example
using SummaryTables
data = (; x = [1, 2, 3], y = ["a", "b", "a"])
table_one(data, [:x, :y])
```
In the next example, we rename the `x` variable by passing a `String` in a `Pair`.
```@example
using SummaryTables
data = (; x = [1, 2, 3], y = ["a", "b", "a"])
table_one(data, [:x => "Variable X", :y])
```
Labels can be any type except `<:Function` (that type signals that an analysis function has been passed).
One example of a non-string label is `Concat` in conjunction with `Superscript`.
```@example
using SummaryTables
data = (; x = [1, 2, 3], y = ["a", "b", "a"])
table_one(data, [:x => Concat("X", Superscript("with superscript")), :y])
```
Any object which is a subtype of `Function` is assumed to be an analysis function.
An analysis function takes a data column as input and returns a `Tuple` where each entry corresponds to one analysis row.
Each of these rows consists of a `Pair` where the left side is the analysis result and the right side the label.
Here's an example of a custom number column analysis function.
Note the use of `Concat` to build content out of multiple parts.
This is preferred to interpolating into a string because interpolation destroys the original objects and takes away the possibility for automatic rounding or other special post-processing or display behavior.
```@example
using SummaryTables
using Statistics
data = (; x = [1, 2, 3])
function custom_analysis(column)
(
minimum(column) => "Minimum",
maximum(column) => "Maximum",
Concat(mean(column), " (", std(column), ")") => "Mean (SD)",
)
end
table_one(data, :x => custom_analysis)
```
Finally, all three parts, variable, analysis function and label can be combined as well:
```@example
using SummaryTables
using Statistics
data = (; x = [1, 2, 3])
function custom_analysis(column)
(
minimum(column) => "Minimum",
maximum(column) => "Maximum",
Concat(mean(column), " (", std(column), ")") => "Mean (SD)",
)
end
table_one(data, :x => custom_analysis => "Variable X")
```
## Keyword: `groupby`
The `groupby` keyword takes a vector of column name symbols with optional labels.
If there is only one grouping column, the vector can be omitted.
Each analysis is then computed separately for each group.
```@example
using SummaryTables
data = (; x = [1, 2, 3, 4, 5, 6], y = ["a", "a", "a", "b", "b", "b"])
table_one(data, :x, groupby = :y)
```
In this example, we rename the grouping column:
```@example
using SummaryTables
data = (; x = [1, 2, 3, 4, 5, 6], y = ["a", "a", "a", "b", "b", "b"])
table_one(data, :x, groupby = :y => "Column Y")
```
If there are multiple grouping columns, they are shown in a nested fashion, with the first group at the highest level:
```@example
using SummaryTables
data = (;
x = [1, 2, 3, 4, 5, 6],
y = ["a", "a", "b", "b", "c", "c"],
z = ["d", "e", "d", "e", "d", "e"],
)
table_one(data, :x, groupby = [:y, :z => "Column Z"])
```
## Keyword: `show_n`
When `show_n` is set to `true`, the size of each group is shown under its name.
```@example
using SummaryTables
data = (; x = [1, 2, 3, 4, 5, 6], y = ["a", "a", "a", "a", "b", "b"])
table_one(data, :x, groupby = :y, show_n = true)
```
## Keyword: `show_overall`
When `show_overall` is set to `false`, the column summarizing all groups together is hidden.
Use this only when `groupby` is set, otherwise the resulting table will be empty.
```@example
using SummaryTables
data = (; x = [1, 2, 3, 4, 5, 6], y = ["a", "a", "a", "a", "b", "b"])
table_one(data, :x, groupby = :y, show_overall = false)
```
## Keyword: `sort`
By default, group entries are sorted.
If you need to maintain the order of entries from your dataset, set `sort = false`.
Notice how in the following two examples, the group indices are `"dos"`, `"tres"`, `"uno"` when sorted, but `"uno"`, `"dos"`, `"tres"` when not sorted.
If we want to preserve the natural order of these groups ("uno", "dos", "tres" meaning "one", "two", "three" in Spanish but having a different alphabetical order) we need to set `sort = false`.
```@example sort
using SummaryTables
data = (; x = [1, 2, 3, 4, 5, 6], y = ["uno", "uno", "dos", "dos", "tres", "tres"])
table_one(data, :x, groupby = :y)
```
```@example sort
table_one(data, :x, groupby = :y, sort = false)
```
!!! warning
If you have multiple groups, `sort = false` can lead to splitting of higher-level groups if they are not correctly ordered in the source data.
Compare the following two tables.
In the second one, the group "A" is split by "B" so the label appears twice.
```@example bad_sort
using SummaryTables
data = (; x = [1, 2, 3, 4, 5, 6], y = ["A", "A", "B", "B", "B", "A"], z = ["C", "C", "C", "D", "D", "D"])
table_one(data, :x, groupby = [:y, :z])
```
```@example bad_sort
table_one(data, :x, groupby = [:y, :z], sort = false)
``` | SummaryTables | https://github.com/PumasAI/SummaryTables.jl.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | docs/make.jl | code | 79 | using Documenter, TopologyPreprocessing
makedocs(sitename="My Documentation")
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/Barcodes.jl | code | 20085 | using Eirene
using Pipe
#%%
# ==============================
# ======== Tested code ========
#%%
# ================================
# ======== Untested code ========
"""
get_barcodes(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Calls Eirene.barcode for 'dim' in range from `min_dim` up to 'max_dim' and
stack the resulting Arrays into a vector.
The returned value is a Vector of Arrays{Float64,2}. Each array is of
different size, because of different number of detected cycles. First column
of each array contains birth step, second column contains death step.
Arrays in returned vector correspond to Betti curve dimensions form range
`min_dim` up to 'max_dim'.
"""
function get_barcodes(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1, sorted::Bool = false)
barcodes = Matrix{Float64}[]
for d = min_dim:max_dim
result = barcode(results_eirene, dim = d)
if isempty(result) && d > 1
result = zeros(size(barcodes[d-1]))
end
if sorted
result = result[sortperm(result[:, 1]), :]
end
push!(barcodes, result)
end
return barcodes
end
"""
plot_barcodes(barcodes; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of barcodesstored in `barcodes` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
"""
function plot_barcodes!(barcodes::Vector, plot_ref;
min_dim::Integer = 1,
barcodes_labels::Bool = true,
default_labels::Bool = true,
sort_by_birth::Bool = false,
alpha::Float64 = 1.0,
kwargs...)#; plot_size = (width=1200, height=800),
# TODO add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
# TODO add ordering of bars to firstly birth time, then death time
# TODO dims should be given as range, not a single min dim
# barcodes = all_barcodes_geom
max_dim = size(barcodes, 1) - (1 - min_dim) # TODO not sure if this is correct solution
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 8
end
colors_set = get_bettis_color_palete(min_dim = min_dim)
dims_indices = 1:length(min_dim:max_dim)
all_sizes = [size(barcodes[k], 1) for k = dims_indices]
ranges_sums = vcat(0, [sum(all_sizes[1:k]) for k = dims_indices])
y_val_ranges = [ranges_sums[k]+1:ranges_sums[k+1] for k = dims_indices]
if sort_by_birth
sort_barcodes!(barcodes, min_dim, max_dim)
end
total_cycles = sum([size(barcodes[k], 1) for (k, dim) in enumerate(min_dim:max_dim)])
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
for (p, dim) = enumerate(min_dim:max_dim)# 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# @info p, dim
args = (lc = colors_set[p], linewidth = lw)
# b = barcodes[p][1:1:(end-1),:]
b = barcodes[p][:, :]
all_infs = findall(x -> isinf(x), b)
for k in all_infs
b[k] = 1
end
# if dim == 0
# b = sort(b, dims = 1)
# end
total_bars = size(b, 1)
y_vals = [[k, k] for k in y_val_ranges[p]]
lc = colors_set[p]
for k = 1:total_bars
# TODO change label to empty one
plot!(b[k, :], y_vals[k]; label = "", lc = lc, alpha = alpha)#; args...)
end
if false && betti_labels
label = "Ξ²$(dim)"
end
# plot!(label=label)
end
# display(plot_ref)
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
all_labels = reshape(["Ξ²$(k)" for k in 1:max_dim], (1, max_dim))
plot!(label = all_labels)
plot!(legend = true)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Birth/Death")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Cycle")
end
# ylims!(0, 2*total_cycles)
ylims!(0, total_cycles + 2)
return plot_ref
end
function plot_barcodes(barcodes::Vector;
kwargs...)
plot_ref = plot(; kwargs...)
plot_barcodes!(barcodes, plot_ref;
kwargs...)
end
function sort_barcodes!(barcodes, min_dim, max_dim)
sorted_barcodes = copy(barcodes)
for (dim_index, dim) = enumerate(min_dim:max_dim)
# if dim == 0
# permutation = sortperm(barcodes[dim_index][:, 2])
# else
permutation = sortperm(barcodes[dim_index][:, 1])
# end
sorted_barcodes[dim_index] = barcodes[dim_index][permutation, :]
# end
end
barcodes = sorted_barcodes
# for (dim_index, dim) = enumerate(min_dim:max_dim)
# if dim == 0
# sort!(barcodes[dim_index], dims = 1)
# else
# sort!(barcodes[dim_index], dims = 1)
# end
# end
end
# TODO This has to be imported from other file
# function get_bettis_color_palete(; min_dim = 1, use_set::Integer = 1)
# """
# function get_bettis_color_palete()
#
# Generates vector with colours used for Betti plots. Designed for Betti plots consistency.
# """
# # TODO what does the number in the function below is used for?
#
# if use_set == 1
# cur_colors = [Gray(bw) for bw = 0.0:0.025:0.5]
# if min_dim == 0
# colors_set = [RGB(87 / 256, 158 / 256, 0 / 256)]
# else
# colors_set = RGB[]
# end
# colors_set = vcat(
# colors_set,
# [
# RGB(255 / 256, 206 / 256, 0 / 256),
# RGB(248 / 256, 23 / 256, 0 / 256),
# RGB(97 / 256, 169 / 256, 255 / 256),
# RGB(163 / 256, 0 / 256, 185 / 256),
# RGB(33 / 256, 96 / 256, 45 / 256),
# RGB(4 / 256, 0 / 256, 199 / 256),
# RGB(135 / 256, 88 / 256, 0 / 256),
# ],
# cur_colors,
# )
# else
# use_set == 2
# cur_colors = get_color_palette(:auto, 1)
# cur_colors3 = get_color_palette(:lightrainbow, 1)
# cur_colors2 = get_color_palette(:cyclic1, 1)
# if min_dim == 0
# # colors_set = [cur_colors[3], cur_colors[5], [:red], cur_colors[1]] #cur_colors[7],
# colors_set = [cur_colors3[3], cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# else
# colors_set = [cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# # colors_set = [cur_colors[5], [:red], cur_colors[1], cur_colors[14]]
# end
# # for c = [collect(11:25);]
# # push!(colors_set, cur_colors2[c])
# # end
# colors_set = vcat(colors_set, [cur_colors2[c] for c in [collect(11:25);]])
# end
#
# return colors_set
# end
function get_birth_death_ratio(barcodes; max_dim::Integer = 3)
# birth_death_raio_Ο = [[all_barcodes_geom[k][m,2]/all_barcodes_geom[k][m,1] for m= 1:size(all_barcodes_geom[k],1)] for k in 1:max_dim]
birth_death_ratio_Ο = [barcodes[k][:, 2] ./ barcodes[k][:, 1] for k in 1:max_dim]
return birth_death_ratio_Ο
end
function get_barcode_lifetime(barcodes; max_dim::Integer = 3)
lifetime = [barcodes[k][:, 2] .- barcodes[k][:, 1] for k in 1:max_dim]
return lifetime
end
#%%
"""
get_barcode_max_lifetime(lifetimes, min_dim, max_dim)
Returns the maximal life times of barcode for all dimensions.
"""
function get_barcode_max_lifetime(lifetimes)
total_lifetimes = length(lifetimes)
all_max_lifetimes = zeros(total_lifetimes, 1)
for k in 1:total_lifetimes
all_max_lifetimes[k] = findmax(lifetimes[k])[1]
end
return all_max_lifetimes
end
"""
boxplot_birth_death(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
function boxplot_birth_death(birth_death_ratio_Ο, min_dim::Integer, max_dim::Integer)
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
total_plots = size(birth_death_ratio_Ο, 1)
for k in 1:total_plots
StatsPlots.boxplot!(bplot, birth_death_ratio_Ο[k], labels = "Ξ²$(k)", color = data_colors[k])
StatsPlots.dotplot!(bplot, birth_death_ratio_Ο[k], color = data_colors[k])
end
return bplot
end
"""
boxplot_birth_death(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
function boxplot_lifetime(barcode_lifetime, min_dim::Integer, max_dim::Integer)
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
total_plots = size(barcode_lifetime, 1)
for k in 1:total_plots
StatsPlots.boxplot!(bplot, barcode_lifetime[k], labels = "Ξ²$(k)", color = data_colors[k])
StatsPlots.dotplot!(bplot, barcode_lifetime[k], color = data_colors[k])
end
return bplot
end
#%%
"""
get_barcode_max_db_ratios(lifetimes, min_dim, max_dim)
Returns the maximal life times of barcode for all dimensions.
"""
function get_barcode_max_db_ratios(db_ratos)
total_db = length(db_ratos)
all_max_db = zeros(total_db, 1)
for k in 1:total_db
all_max_db[k] = findmax(db_ratos[k])[1]
end
return all_max_db
end
"""
get_normalised_barcodes(barcodes::Vector, betti_numbers::Array)
Returns the barcodes which values are within [0,1] range.
1. get corresponding bettis
2. get max number of steps
3. divide all values by total number of steps.
"""
function get_normalised_barcodes(barcodes, betti_numbers::Array)
if typeof(betti_numbers) == Vector
total_steps = size(betti_numbers[1], 1)
else
total_steps = size(betti_numbers, 1)
end
return barcodes ./ total_steps
end
"""
get_normalised_barcodes_collection(barcodes_collection, bettis_collection)
Applies get_normalised_barcodes to the collection of barcodes and corresponding
betti curves.
"""
function get_normalised_barcodes_collection(barcodes_collection, bettis_collection)
if size(barcodes_collection, 1) != size(bettis_collection, 1)
throw(BoundsError(barcodes_collection, bettis_collection,
"Both collections must have same number of elements",
))
else
total_collections = size(barcodes_collection, 1)
end
normed_collection = deepcopy(barcodes_collection)
for k = 1:total_collections
normed_collection[k] = get_normalised_barcodes(barcodes_collection[k], bettis_collection[k])
end
return normed_collection
end
"""
plot_bd_diagram(barcodes;
dims::Range,
use_js::Bool=false,
kwargs...)
Creates a birth/death diagram from `barcodes` and returns the handlers to
the plots.
By default, dims is set to range '1:length(barcodes)', which plots all of
the diagrams. If set to an integer, plots only 1 dimension.
If 'use_js' is set to true, plotly backend is used for plotting.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO dims are defined as if the dimensions always starts at 1- this has to be changed
"""
function plot_bd_diagram(barcodes; dims = 1:length(barcodes),
use_js::Bool = false,
class_sizes = [],
class_labels = [],
normilised_diagonal::Bool = true,
alpha=0.4,
kwargs...)
# TODO max min should be ready to use from input data- might be better to have better structures as an inupt
# max_dim = size(barcodes, 1)
# min_dim = findmin(dims)[1]
min_dim = dims[1]
max_dim = dims[end]
if max_dim > length(barcodes)
throw(DimensionMismatch("Can not have dims range larger than barcodes length"))
end
# all_dims = min_dim:max_dim
# if findmax(dims)[1] > max_dim
# throw(DomainError(
# min_dim,
# "\'dims\' must be less than maximal dimension in \'bettis\'",
# ))
# end
# lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
# if !isnothing(lw_pos)
# lw = kwargs[lw_pos]
# else
# lw = 2
# end
colors_set = TopologyPreprocessing.get_bettis_color_palete(min_dim = min_dim)
if use_js
plotly()
else
gr()
end
plot_ref = plot(; xlims = (0, 1), ylims = (0, 1), kwargs...)
# Add diagonal
if normilised_diagonal
max_coord = 1
else
max_x = max([k for k in vcat([barcodes[d][:,1] for (d, dim) in dims|> enumerate]...) if !isinf(k) ]...)
max_y = max([k for k in vcat([barcodes[d][:,2] for (d, dim) in dims|> enumerate]...) if !isinf(k) ]...)
max_coord = max(max_x, max_y)
end
scaling_factor = 1.05
min_val = -0.05
plot!([0, scaling_factor*max_coord], [0, scaling_factor*max_coord], label = "")
xlims!(min_val, scaling_factor*max_coord)
ylims!(min_val, scaling_factor*max_coord)
for (p, dim) in enumerate(dims)
# colors_set[p]
my_vec = barcodes[p]
# TODO class size is not a default and basic bd diagram property- should be factored out to other function
if class_labels != [] && class_sizes != []
labels = ["class/size $(class_labels[k])/$(class_sizes[class_labels[k]])" for k in 1:size(class_labels, 1)]
elseif class_sizes == []
labels = ["class: $(k)" for k in 1:size(my_vec, 1)]
else
labels = ["class/size $(k)/$(class_sizes[k])" for k in 1:size(my_vec, 1)]
end
args = (color = colors_set[p],
# linewidth = lw,
label = "Ξ²$(dim)",
aspect_ratio = 1,
size = (600, 600),
legend = :bottomright,
framestyle=:origin,
alpha=alpha,
# hover = labels,
kwargs...)
if class_labels != []
class_sizes != []
for x = class_labels
plot!(my_vec[Int(x), 1], my_vec[Int(x), 2], seriestype = :scatter; args...)
end
end
plot!(my_vec[:, 1], my_vec[:, 2], seriestype = :scatter; args...)
end
xlabel!("birth")
ylabel!("death")
return plot_ref
end
#
"""
plot_all_bd_diagrams(barcodes_collection;
min_dim::Integer=1,
betti_labels::Bool=true,
default_labels::Bool=true,
all_legend=false,
my_alpha=0.12,
aspect_ratio=1,
kwargs...)
Creates a set of birth/death diagrams from `barcodes_collection`
and returns a dictionary with the handlers to the plots.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
"""
function plot_all_bd_diagrams(barcodes_collection;
min_dim::Integer = 1,
betti_labels::Bool = true,
default_labels::Bool = true,
all_legend = false,
my_alpha = 0.12,
aspect_ratio = 1,
base_w = 600,
base_h = 600,
kwargs...)
total_dims = size(barcodes_collection[1], 1)
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
title_pos = findfirst(x -> x == :title, keys(kwargs))
if !isnothing(title_pos)
my_title = kwargs[title_pos]
else
my_title = "Birth death diagram"
end
colors_set = TopologyPreprocessing.get_bettis_color_palete(min_dim = min_dim)
plot_dict = Dict()
for b = 1:total_dims
args = (lc = colors_set[b],
linewidth = lw,
label = false,
aspect_ratio = aspect_ratio,
size = (base_w, base_h),
kwargs...)
plot_dict["Ξ²$(b)"] = scatter(; xlims = (0, 1), ylims = (0, 1), dpi = 300, args...)
for bars = barcodes_collection
barcode = bars[b]
scatter!(barcode[:, 1], barcode[:, 2],
markeralpha = my_alpha,
markercolor = colors_set[b],
dpi = 300)
end
plot!(legend = all_legend)
plot!(title = (my_title * ", Ξ²$(b)"))
# legend_pos = findfirst(x -> x == :legend, keys(kwargs))
# if !isnothing(legend_pos)
# plot!(legend = kwargs[legend_pos])
# else
# plot!(legend = betti_labels)
# end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Birth")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Death")
end
end
return plot_dict
end
## ===-
# Simpler plotting
"""
plot_bd_diagram(barcodes;
dims::Range,
use_js::Bool=false,
kwargs...)
Creates a birth/death diagram from `barcodes` and returns the handlers to
the plots.
By default, dims is set to range '1:length(barcodes)', which plots all of
the diagrams. If set to an integer, plots only 1 dimension.
If 'use_js' is set to true, plotly backend is used for plotting.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO dims are defined as if the dimensions always starts at 1- this has to be changed
"""
function plot_simple_bd_diagram(barcodes; dims = 1:length(barcodes), max_bd = 0, use_js::Bool = false,
kwargs...)
# TODO max min should be ready to use from input data- might be better to have better structures as an inupt
max_dim = size(barcodes, 1)
min_dim = findmin(dims)[1]
# all_dims = min_dim:max_dim
if findmax(dims)[1] > max_dim
throw(DomainError(
min_dim,
"\'dims\' must be less than maximal dimension in \'bettis\'",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
colors_set = TopologyPreprocessing.get_bettis_color_palete(min_dim = 1)
if use_js
plotly()
else
gr()
end
plot_ref = plot(; kwargs...)
for (p, dim) in dims |> enumerate
# colors_set[p]
my_vec = barcodes[p]
args = (color = colors_set[p],
linewidth = lw,
aspect_ratio = 1,
size = (600, 600),
legend = :bottomright,
kwargs...)
# scatter!(my_vec[:, 1], my_vec[:, 2], args...)
plot!(my_vec[:, 1], my_vec[:, 2], seriestype = :scatter; args...)
end
# Add diagonal
if max_bd > 0
max_x = max_bd
max_y = max_bd
plot!([0, max_y], [0, max_y], label = "")
else
all_births = vcat([barcodes[d][:, 1] for d in dims]...)
all_deaths = vcat([barcodes[d][:, 2] for d in dims]...)
max_x = findmax(all_births)[1]
max_y = findmax(all_deaths)[1]
plot!([0, max_y], [0, max_y], label = "")
end
return plot_ref
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/BettiCurves.jl | code | 28399 | # ==============================
# ======== Tested code ========
using Eirene
using Plots
using StatsPlots
#%%
"""
get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Calls Eirene.betticurve for 'dim' in range from `min_dim` up to 'max_dim' and
stack the resulting Arrays into a vector.
The returned value is a Vector of Arrays{Float64,2}. Each array is of size
(n,2), where n is the maximal number of steps taken to compute Betti curve of dimensions
ranging form `min_dim` to `max_dim`. First column of each array contains numbered steps.
Second column are the Betti curve values for corresponding step.
Arrays in returned vector correspond to Betti curve dimensions form range
`min_dim` up to 'max_dim'.
"""
function get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
bettis = Matrix{Float64}[]
for d = min_dim:max_dim
result = betticurve(results_eirene, dim = d)
if isempty(result) && d > 1
result = zeros(size(bettis[d-1]))
end
push!(bettis, result)
end
return bettis
end
# TODO add get_bettis_from_matrix, to wrap C= eirene...; get bettis
#%%
"""
normalise_bettis(bettis::Vector)
normalise_bettis(bettis::Array)
Normalise the number of steps for Betti curves. 'bettis' can be either vector of
arrays (each array contain Betti curve of different dimension) or an array
containing Betti curve of a single dimension.
"""
function normalise_bettis(bettis::Vector)
@debug "Vector version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)[1][1]
max_dim = size(norm_bettis)[1]
@debug "typeof(max_dim) :" typeof(max_dim[1])
for d = 1:(max_dim)
if !isempty(norm_bettis[d])
norm_bettis[d][:, 1] /= findmax(norm_bettis[d][:, 1])[1]
end
end
return norm_bettis
end
#%%
function normalise_bettis(bettis::Array)
@debug "Array version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)
if !isempty(norm_bettis)
norm_bettis[:, 1] /= findmax(norm_bettis[:, 1])[1]
end
return norm_bettis
end
#%%
# function vectorize_bettis(betti_curves::Array{Matrix{Float64,2}})
"""
vectorize_bettis(betti_curves::Matrix{Float64})
Reshapes the 'betti_curves' from type Array{Matrices{Float64,2}} into
Matrix{Float64}.
The resulting matrix size is (n, k), where 'n' is equal to the number of
rows in each matrix, 'k' is equal to the number of matrices.
TODO: Change the name- it takse vector and returns a matrix.
TODO: get bettis could have an arguent betti_type which would determine resulting type
"""
function vectorize_bettis(betti_curves::Vector{Array{Float64,2}})
first_betti = 1
last_betti = size(betti_curves,1)
return hcat([betti_curves[k][:, 2] for k = first_betti:last_betti]...)
end
#%%
@deprecate vectorize_bettis(eirene_results::Dict, maxdim::Integer, mindim::Integer) vectorize_bettis(betti_curves)
# ===
#%%
"""
get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Takes the eirene result and computes Betti curves for dimensions in range
'mindim:maxdim'. Every Betti curve is stored in successive column of the
resulting array.
TODO: this should return a matrix, where first col are indices and rest are B values (1st col is missing now)
"""
function get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
all_bettis = get_bettis(results_eirene, max_dim, min_dim = min_dim)
bettis_vector = vectorize_bettis(all_bettis)
return bettis_vector
end
# ==
#%%
"""
plot_bettis(bettis; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: min_dim is not included in all_dims variable
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
function plot_bettis(bettis::Vector;
min_dim::Integer = 1,
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
kwargs...)#; plot_size = (width=1200, height=800),
max_dim = size(bettis, 1)
all_dims = 1:max_dim
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension.",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# Create iterator for all loops
all_iterations = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
if use_edge_density
for p = all_iterations
max_step = findmax(bettis[p][:, 1])[1]
bettis[p][:, 1] ./= max_step
end
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = all_iterations
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "Ξ²$(p)")
end
plot!(bettis[index][:, 1], bettis[index][:, 2]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
"""
plot_bettis(bettis::Array; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
function plot_bettis(bettis::Array;
# min_dim::Integer = 1,
dims_range=1:size(bettis,2),
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
max_dim = dims_range[end]
min_dim = dims_range[1]
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
total_steps = size(bettis, 1)
if normalised
x_vals = range(0, stop=1, length=total_steps)
else
x_vals = range(0, stop=total_steps)
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# if use_edge_density
# # for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for (index, p) in enumerate(min_dim:max_dim)
# max_step = findmax(bettis[:, 1])[1]
# bettis[p][:, 1] ./=max_step
# end
# end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "Ξ²$(p)")
end
plot!(x_vals, bettis[:, index]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
# ======= Untested code
# TODO add default kwargs paring function -> parse_kwargs()
"""
plot_all_bettis ...
"""
function plot_all_bettis(bettis_collection;
min_dim::Integer = 1,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
total_dims = size(bettis_collection[1],2)
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
max_y_val = find_max_betti(bettis_collection)
plot_ref = plot(; kwargs...)
for b = 1:total_dims
args = (lc = colors_set[b], linewidth = lw, alpha=0.12,label=false, ylims=(0,max_y_val))
for bettis = bettis_collection
betti_vals = bettis[:,b]
total_steps = size(bettis, 1)
x_vals = range(0, stop=1, length=total_steps)
plot!(x_vals, betti_vals; args...)
end
# my_label = "Ξ²$(b)"
# betti_vals = results_d["bettis_collection"][:hc][end]
# x_vals = range(0, stop=1, length=size(betti_vals, 1))
# plot!(x_vals, betti_vals; lc = colors_set[b], linewidth = 1, alpha=0.1,label=my_label, ylims=(0,max_y_val))
end
plot!(legend=true)
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
return plot_ref
end
"""
find_max_betti(bettis_collection::Array)
Returns the highest Betti curve value from all dimensions.
"""
function find_max_betti(bettis_collection::Array)
if typeof(bettis_collection) == Vector
bettis_collection = vectorize_bettis(bettis_collection)
end
max_y_val = 0
for betti_set in bettis_collection
local_max = findmax(betti_set)[1]
if local_max > max_y_val
max_y_val = local_max
end
end
return max_y_val
end
# ======= Untested code == end
#%%
"""
printready_plot_bettis(kwargs)
Creates a plot using 'plot_bettis' with arguments which were tested to be very
good for using them in prints. Used arguments are:
"""
function printready_plot_bettis(kwargs)
return nothing
end
#%%
"""
function get_bettis_color_palete()
Generates vector with colours used for Betti plots. Designed for Betti plots consistency.
"""
function get_bettis_color_palete(; min_dim = 1, use_set::Integer = 1)
# TODO what does the number in the function below is used for?
if use_set == 1
cur_colors = [Gray(bw) for bw = 0.0:0.025:0.5]
if min_dim == 0
colors_set = [RGB(87 / 256, 158 / 256, 0 / 256)]
else
colors_set = []
end
max_RGB = 256
colors_set = vcat(
colors_set,
[
RGB(255 / max_RGB, 206 / max_RGB, 0 / max_RGB),
RGB(248 / max_RGB, 23 / max_RGB, 0 / max_RGB),
RGB(97 / max_RGB, 169 / max_RGB, 255 / max_RGB),
RGB(163 / max_RGB, 0 / max_RGB, 185 / max_RGB),
RGB(33 / max_RGB, 96 / max_RGB, 45 / max_RGB),
RGB(4 / max_RGB, 0 / max_RGB, 199 / max_RGB),
RGB(135 / max_RGB, 88 / max_RGB, 0 / max_RGB),
],
cur_colors,
)
else
use_set == 2
cur_colors = get_color_palette(:auto, 1)
cur_colors3 = get_color_palette(:lightrainbow, 1)
cur_colors2 = get_color_palette(:cyclic1, 1)
if min_dim == 0
# colors_set = [cur_colors[3], cur_colors[5], [:red], cur_colors[1]] #cur_colors[7],
colors_set = [cur_colors3[3], cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
else
colors_set = [cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# colors_set = [cur_colors[5], [:red], cur_colors[1], cur_colors[14]]
end
# for c = [collect(11:25);]
# push!(colors_set, cur_colors2[c])
# end
colors_set = vcat(colors_set, [cur_colors2[c] for c in [collect(11:25);]])
end
return colors_set
end
# ==============================
# ======= Untested code =======
# using Measures
# using Plots.PlotMeasures
#
# # Source: https://github.com/JuliaPlots/Plots.jl/issues/897
# function setdefaultplottingparams(;upscale=2)
# #8x upscaling in resolution
# fntsm = Plots.font("sans-serif", pointsize=round(12.0*upscale))
# fntlg = Plots.font("sans-serif", pointsize=round(18.0*upscale))
# default(titlefont=fntlg, guidefont=fntlg, tickfont=fntsm, legendfont=fntsm)
# default(size=(800*upscale,600*upscale)) #Plot canvas size
# default(dpi=500) #Only for PyPlot - presently broken
# end
#%%
"""
plot_bettis_collection(bettis_collection, bett_num; step=1, show_plt=true, R=0., G=0.4, B=1.0)
PLots collection of Betti curves of rank 'bett-num'. Every successive plot has
lower opacity than predecessor. 'step' defines step between collection elements
that are ploted. By default, plot is displayed after carteation. This can be
disabled by setting 'show_plt' to false.
Color of the plot can be set with 'R', 'G', 'B' parameters.
"""
function plot_bettis_collection(bettis_collection,
bett_num,
max_rank;
step = 1,
show_plt = true,
R = 0.0,
G = 0.4,
B = 1.0)
step > 0 || error("Steps should be natural number!")
bettis_total = size(bettis_collection, 1)
colors_set = zeros(Float64, bettis_total, 4)
colors_set[:, 1] .= R
colors_set[:, 2] .= G
colors_set[:, 3] .= B
max_betti = get_max_betti_from_collection(bettis_collection)
@info "max_betti" max_betti
x = 0
y = bettis_total * 0.1
va_range = collect(range(bettis_total + x, y, length = bettis_total))
colors_set[:, 4] .= va_range / findmax(va_range)[1]
rgba_set = RGBA[]
for k = 1:size(colors_set, 1)
push!(
rgba_set,
RGBA(colors_set[k, 1], colors_set[k, 2], colors_set[k, 3], colors_set[k, 4]),
)
end
plt_reference = plot(1, title = "Betti curves collection, rank $(bett_num)", label = "")
for b = 1:step:bettis_total
betti = bettis_collection[b]
x_vals_1 = (1:size(betti[:, bett_num], 1)) / size(betti[:, bett_num], 1)
plot!(x_vals_1, betti[:, bett_num], lc = rgba_set[b], label = "rank=$(max_rank-b)")
plot!(ylim = (0, max_betti))
end
xlabel!("Normalised steps")
ylabel!("Number of cycles")
plot!(legend = true)
show_plt && display(plt_reference)
return plt_reference
end
#%%
"""
get_max_bettis(bettis)
Returns the maximal bettis of Betti curves for all dimensions.
"""
function get_max_bettis(bettis)
all_max_bettis = findmax(bettis, dims=1)[1]
return all_max_bettis
end
# TODO change name
# TODO check what for dim is used, change to min dim
function get_max_betti_from_collection(bettis_collection; dim = 1)
max_betti = 0
for betti in bettis_collection
# global max_betti
local_max = findmax(betti)[1]
if (local_max > max_betti)
max_betti = local_max
end
end
return max_betti
end
#%%
"""
plot_and_save_bettis(eirene_results, plot_title::String,
results_path::String; extension = ".png",
data_size::String="", do_save=true,
extend_title=true, do_normalise=true, max_dim=3,
legend_on=true)
Plot Betti curves from 0 up to `max_dim` using `eirene_results` from Eirene library and
returns handler for figure. Optionally, if `do_save` is set, saves the figure
or if `do_normalise` is set, sets the steps range to be normalised to the
horizontal axis maximal value.
"""
function plot_and_save_bettis(bettis,
plot_title::String,
results_path::String;
file_name = "",
extension = ".png",
do_save = true,
do_normalise = true,
min_dim = 0,
max_dim = 3,
legend_on = true,
kwargs...)
bettis = get_bettis(eirene_results, max_dim)
if do_normalise
bettis = normalise_bettis(bettis)
end
plot_ref =
plot_bettis(bettis, plot_title, legend_on = legend_on, min_dim = min_dim, kwargs...)
if do_save
if isempty(file_name)
file_name = plot_title * extension
elseif isempty(findall(x -> x == extension[2:end], split(file_name, ".")))
#check for the extension in file name
file_name *= extension
end
save_figure_with_params(
plot_ref,
results_path;
extension = extension,
prefix = split(file_name, ".")[1],
)
end
return plot_ref
end
# TODO merge functions for getting betti curves
# Original function returns 2 different types of betti curves. If no default
# value parameters is given, it returns vector of matrices. If num of steps is
# given, then it return matrix maxdim x numsteps.
# """
# bettis_eirene(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf, mindim=1)
#
# Takes the `matr` and computes Betti curves up to `maxdim`. Return matrix only
# with betti curve values
#
#
# Function taken from: https://github.com/alexyarosh/hyperbolic
# """
#%%
@deprecate bettis_eirene(matr, maxdim; mintime = -Inf, maxtime = Inf, numofsteps = Inf, mindim = 1) get_bettis(results_eirene, max_dim; min_dim = 1)
@deprecate get_bettis_from_image(img_name, plot_params; file_path = "", plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true) get_bettis(results_eirene, max_dim; min_dim = 1)
@deprecate get_bettis_from_image2(img_name;file_path = "",plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true) get_bettis_from_image(img_name, plot_params; file_path = "", plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true)
@deprecate plot_and_save_bettis2(eirene_results, plot_title::String, results_path::String; file_name = "", extension = ".png", data_size::String = "", do_save = true, extend_title = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true) plot_and_save_bettis(bettis, plot_title::String, results_path::String; file_name = "", extension = ".png", do_save = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true, kwargs...)
@deprecate get_and_plot_bettis(eirene_results; max_dim = 3, min_dim = 1, plot_title = "", legend_on = false) get_bettis(results_eirene, max_dim; min_dim = 1)
#%%
"""
lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
Takes ordered matrix 'input_matrix' and reduces the resolution of values in the
matrix into 'total_bins' bins.
"""
function lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
new_ordered_matrix = zeros(size(ordered_matrix))
max_val = findmax(ordered_matrix)[1]
min_val = findmin(ordered_matrix)[1]
bin_step = max_val Γ· total_bins
old_bins = min_val:bin_step:max_val
for bin = 1:total_bins
@debug "First step threshold is $(old_bins[bin])"
indices = findall(x -> (x >= old_bins[bin]), ordered_matrix)
new_ordered_matrix[indices] .= bin - 1
end
@debug "Max_val in new matrix is " findmax(new_ordered_matrix)
@debug "And should be " total_bins - 1
return new_ordered_matrix
end
#%%
"""
average_bettis(bettis_matrix; up_factor=8)
Takes the average values of betti curves stored in 'bettis_matrix'.
'bettis_matrix' consist of different simulations(first index of the matrix),
different ranks (third index of the matrix). Second index of the matrices
(saples) may vary accross many simulations and for this reason, all betti curves
are upsampled by a factor of 'upsample_factor' and then the average for every
dimension is computed.
"""
function average_bettis(bettis_matrix::Matrix; up_factor = 8)
bettis_matrix_backup = copy(bettis_matrix)
simulations = size(bettis_matrix, 1)
dimensions = size(bettis_matrix[1], 1)
max_samples = 0
for k = 1:simulations
# global max_samples
current_len = length(bettis_matrix[k][1][:, 1])
if max_samples < current_len
max_samples = current_len
end
end
bettis_size = size(bettis_matrix)
total_upsamples = (max_samples - 1) * up_factor + 1
x_resampled = range(0, 1, step = total_upsamples)
avg_bettis = zeros(total_upsamples, dimensions)
std_bettis = copy(avg_bettis)
resampled_bettis = zeros(simulations, total_upsamples, dimensions)
# resample betti curves
for simulation = 1:simulations, betti = 1:dimensions
resampled_bettis[simulation, :, betti] =
upsample_vector2(bettis_matrix[simulation][betti][:, 2], total_upsamples)
end
# average and std Betti
for dimension = 1:dimensions
avg_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
std_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
end
return avg_bettis, std_bettis
end
#%%
function upsample_vector2(input_vector, total_upsamples)
total_orig_samples = size(input_vector, 1) - 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_upsamples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
#%%
"""
upsample_vector(input_vector; upsample_factor::Int=8)
Takes an 'input_vector' and returns a vector which has 'upsample_factor' many
times more samples. New samples are interpolated with 'spl' function from
'Dierckx' package.
"""
function upsample_vector(input_vector; upsample_factor::Int = 8)
total_orig_samples = size(input_vector, 1) - 1
total_samples = upsample_factor * total_orig_samples + 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_samples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
# =========--=======-========-==========-=======-
# From bettis areas
# Area under Betti curve functions
#%%
"""
get_area_under_betti_curve(betti_curves, min_dim, max_dim)
Computes the area under Betti curves stored in 'betti_curves', where each row is
a Betti curve and each column is a value.
"""
function get_area_under_betti_curve(betti_curves::Union{Matrix{Float64}, Array{Array{Float64,2}}};do_normalised::Bool=false)
#TODO check this part
if size(betti_curves,2) < 2
bettis_vector = vectorize_bettis(betti_curves)
else
bettis_vector = betti_curves
end
# @info sum(bettis_vector, dims=1)
bettis_area = sum(bettis_vector, dims=1)
if do_normalised
total_steps = size(bettis_vector,1)
bettis_area ./= total_steps
end
# @info bettis_area
return bettis_area
end
@deprecate get_area_under_betti_curve(C, min_dim, max_dim) get_area_under_betti_curve(betti_curves; do_normalised=false)
#%%
"""
get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
Computes topology of every matrix in dataset, computes Betti curves for dimensions
min_dim up to max_dim and returns vector (or matrix) of areas under Betti curves.
"""
function get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
areas_vector = Array[]
for data = dataset
@info "Computing topology."
C = eirene(data, maxdim=max_dim,)
matrix_bettis = get_bettis(C,max_dim, min_dim=min_dim)
push!(areas_vector, get_area_under_betti_curve(matrix_bettis))
end
if return_matrix
return vcat([areas_vector[k] for k=1:10]...)
else
return areas_vector
end
end
#%%
"""
get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
function get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
for (index, value) in enumerate(min_dim:max_dim)
StatsPlots.boxplot!(bplot, areas_matrix[:,index], labels="Ξ²$(value)", color=data_colors[value])
end
return bplot
end
# function get_bettis_collection_from_matrices(ordered_matrices_collection; max_dim::Int=3, min_dim::Int=1)
# bettis_collection = Array[]
#
# for matrix = ordered_matrices_collection
# @debug "Computing Bettis..."
# eirene_geom = eirene(matrix,maxdim=max_B_dim,model="vr")
#
# bettis = reshape_bettis(get_bettis(eirene_geom, max_B_dim))
# push!(bettis_collection, bettis)
# end
#
# return bettis_collection
# end
#
# #%%
# TODO find what are the alternative functions for the functions below
# @deprecate get_dataset_topology(dataset; min_dim::Integer=1, max_dim::Integer=3, get_curves::Bool=true, get_areas::Bool=true, get_persistence_diagrams::Bool=true, do_normalise::Bool=true)
# @deprecate get_bettis_collection(ordered_matrices_collection; max_B_dim=3)
# @deprecate reshape_bettis(bettis)
# @deprecate print_hmap_with_bettis(ordered_matrices_collection, bettis_collection, plot_data::PlottingData)
# @deprecate make_hm_and_betti_plot(ordered_geom_gr, bettis, title_hmap, title_bettis, max_betti)
# @deprecate matrix_analysis(test_data::PlottingData;generation_function=get_geom_matrix)
# @deprecate multiscale_matrix_testing(sample_space_dims = 3, maxsim = 5, min_B_dim = 1, max_B_dim = 3, size_start = 10, size_step = 5, size_stop = 50; do_random = true, control_saving = false, perform_eavl = false)
# @deprecate plot_betti_numbers(betti_numbers, edge_density, title="Geometric matrix"; stop=0.6)
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/MatrixOrganization.jl | code | 23592 | using LinearAlgebra
import Plots.plot as plot
# using Plots
using Random
include("PlottingWrappers.jl")
include("PointsSubstitution.jl")
"""
function expand_matrix(input_matrix, expansion_size, last_components
Takes 'input_matrix' (an ordering matrix used for creating cliques) and and adds
2Γ'expansion_size' number of rows. 'last_components' are the values in original
matrix that are added last to the clique.
Results may be be plotted with same funtion with sufix "with_plot".
"""
function expand_matrix(input_matrix, expansion_size, last_components)
new_comp = last_components
matrix_size = size(input_matrix, 1)
for mat_sizes = matrix_size:2:(matrix_size+2expansion_size)
input_matrix, new_comp = add_step_to_matrix(input_matrix, new_comp)
end
return input_matrix
end
function expand_matrix_with_plot(args...)
input_matrix = expand_matrix_with_plot(args...)
expand_plt_ref = plot_square_heatmap(input_matrix, 1, size(input_matrix, 1);
plt_title="Original, size:$(matrix_size)",
color_palete=:lightrainbow)
display(expand_plt_ref)
return input_matrix, expand_plt_ref
end
# Shuffle matrix entries
"""
function shuffle_matrix(input_matrix, shuffles; do_plot=false)
Takes symmetric 'input_matrix' and randomly swaps rows 'shuffles' many times.
Results may be plotted by setting 'do_plot=true'.
"""
function shuffle_matrix(input_matrix, total_shuffles)
matrix_size = size(input_matrix, 1)
rows = randcycle(matrix_size)
shuffled_ord_mat = copy(input_matrix)
for k = 1:total_shuffles
# global shuffled_ord_mat, rows
srcs, trgts = rand(rows, 2)
swap_rows!(shuffled_ord_mat, srcs, trgts)
end
return shuffled_ord_mat
end
function shuffle_matrix_with_plotting(args...)
shuffled_ord_mat = shuffle_matrix(args...)
if do_plot
shuff_plt_ref = plot_square_heatmap(shuffled_ord_mat, 1, size(shuffled_ord_mat, 1);
plt_title="Shuffled, size:$(matrix_size)",
color_palete=:lightrainbow)
display(shuff_plt_ref)
end
return input_matrix, shuff_plt_ref
end
"""
function organize_shuff_matrix(input_matrix; do_plots=false)
Reorganizes 'input_matrix' so that values highest values in a row are positioned
next to the diagonal.
Results may be plotted by setting 'do_plot=true'.
"""
function organize_shuff_matrix(input_matrix)
unscrambled_matrix = copy(input_matrix)
matrix_size = size(input_matrix, 1)
for k = matrix_size:-2:2
max_row_val = findmax(unscrambled_matrix[k, :])[2]
# put to the previous last position
swap_rows!(unscrambled_matrix, max_row_val, k - 1)
# skip 1 row and work on next one
end
return unscrambled_matrix
end
function organize_shuff_matrix_with_plotting(input_matrix)
unscrambled_matrix = organize_shuff_matrix(input_matrix)
reorganized_plt_ref = plot_square_heatmap(unscrambled_matrix, 1, size(unscrambled_matrix, 1);
plt_title="unscrambled_matrix, size:$(matrix_size)",
color_palete=:lightrainbow
)
display(reorganized_plt_ref)
return unscrambled_matrix, reorganized_plt_ref
end
"""
function order_max_vals_near_diagonal(input_matrix; do_plots=false, direction=:descending)
Orders values in 'input_matrix' so that values next to diagonal are descending
(by default).
TODO- not working- Optionally, ascending order can be used by setting 'direction' to
':ascending'.
Results may be plotted by setting 'do_plot=true'.
"""
function order_max_vals_near_diagonal(input_matrix; direction=:descending)
# Find max values next to the diagonal
matrix_size = size(input_matrix, 1)
if direction == :descending
# ordering_function = findmax
new_ord_value = -1
iteration_values = matrix_size:-2:2
# iteration_values = 2:2:matrix_size
elseif direction == :ascending
# ordering_function = findmin
new_ord_value = findmax(input_matrix)[1] * 2
iteration_values = 2:2:matrix_size
else
# @error "Unknow ordering was given"
throw("Unknow ordering was given")
end
reordered_matrix = copy(input_matrix)
row_indices = 1:2:matrix_size
col_indices = 2:2:matrix_size
coord_set = [CartesianIndex(row_indices[k], col_indices[k]) for k = 1:matrix_sizeΓ·2]
diag_max_values = reordered_matrix[coord_set]
for k = iteration_values
max_val, max_ind = findmax(diag_max_values)
(direction == :descending) ? (position = floor(k Γ· 2)) : (position = floor(k Γ· 2))
diag_max_values[max_ind] = diag_max_values[position]
diag_max_values[position] = new_ord_value
max_ind *= 2
swap_rows!(reordered_matrix, k, max_ind)
swap_rows!(reordered_matrix, k - 1, max_ind - 1)
end
return reordered_matrix
end
function order_max_vals_near_diagonal_with_plotting(input_matrix; kwargs...)
reordered_matrix = order_max_vals_near_diagonal(input_matrix; kwargs...)
reorganized_plt_ref = plot_square_heatmap(reordered_matrix, 1, size(reordered_matrix, 1);
plt_title="reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
display(reorganized_plt_ref)
return reordered_matrix, reorganized_plt_ref
end
"""
function fine_tune_matrix(input_matrix; do_plots=false)
Check if velues next to the maximal values are organized in descending order.
"""
function fine_tune_matrix(input_matrix)
# Find max values next to the diagonal
matrix_size = size(input_matrix, 1)
fine_tune_matrix = copy(input_matrix)
# if direction == :descending
# # ordering_function = findmax
# new_ord_value = -1
# iteration_values = matrix_size:-2:2
# # iteration_values = 2:2:matrix_size
#
# elseif direction == :ascending
# # ordering_function = findmin
# new_ord_value = findmax(input_matrix)[1]*2
# iteration_values = 2:2:matrix_size
# else
# # @error "Unknow ordering was given"
# throw("Unknow ordering was given")
# end
for k = 2:2:matrix_size-1
if fine_tune_matrix[k-1, k+1] > fine_tune_matrix[k, k+1]
swap_rows!(fine_tune_matrix, k, k - 1)
end
end
return fine_tune_matrix
end
function fine_tune_matrix_with_ploting(input_matrix)
fine_tune_matrix = fine_tune_matrix(input_matrix)
fine_tuned_plt_ref = plot_square_heatmap(fine_tune_matrix, 1, size(reordered_matrix, 1);
plt_title="fine_tuned, size:$(matrix_size)",
color_palete=:lightrainbow)
display(fine_tuned_plt_ref)
return fine_tune_matrix, fine_tuned_plt_ref
end
# TODO separate plotting from processing
function order_max_vals_by_row_avg(input_matrix; do_plots=false)
# Find max values next to the diagonal
matrix_size = size(input_matrix,1)
# Row average
row_avg = reshape(mean(input_matrix, dims=1),(matrix_size, 1))
# Using funciton below, because sortperm is not working on Array{Float64,2}
sorted_rows_indexes = [findall(x->x==sort(row_avg, dims=1)[k], row_avg)[1][1] for k=1:matrix_size]
matrix_indices = collect(range(1,matrix_size))
# Sort indices by values (highest to lowest)
# Create a list of indices, which corresponding valeus are ordered
sorted_indices = sort!([1:matrix_size;],
by=i->(sorted_rows_indexes[i],matrix_indices[i]))
sorted_matrix = copy(input_matrix)
for k = 1:matrix_sizeΓ·2 #iteration_values
max_ind = sorted_indices[k]
sorted_indices[k] = k
sorted_indices[max_ind] = max_ind
swap_rows!(sorted_matrix, k, max_ind)
# swap_rows!(sorted_matrix, k-1, max_ind-1)
end
# TODO separate plotting from processing
reorganized_plt_ref = plot_square_heatmap(sorted_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
# TODO separate plotting from processing
input_mat_plt_ref = plot_square_heatmap(input_matrix, 1,size(reordered_matrix,1);
plt_title = "input_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
common_plot1 = plot(input_mat_plt_ref, reorganized_plt_ref, layout=(1,2),
size=(800,400))
# reordered_matrix = copy(input_matrix)
# row_indices = 1:2:matrix_size
# col_indices = 2:2:matrix_size
# coord_set = [CartesianIndex(row_indices[k], col_indices[k]) for k=1:matrix_sizeΓ·2]
#
#
# for k = iteration_values
# max_val, max_ind = findmax(diag_max_values)
# position = floor(kΓ·2)
# diag_max_values[max_ind] = diag_max_values[position]
# diag_max_values[position] = new_ord_value
# max_ind *= 2
#
# swap_rows!(reordered_matrix, k, max_ind)
# swap_rows!(reordered_matrix, k-1, max_ind-1)
# end
if do_plots
# TODO separate plotting from processing
reorganized_plt_ref = plot_square_heatmap(sorted_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
display(reorganized_plt_ref)
end
return reordered_matrix, reorganized_plt_ref
end
function order_max_vals_near_diagonal2(input_matrix; do_final_plot=false, do_all_plots = false, direction=:descending)
# Find max values next to the diagonal
matrix_size = size(input_matrix,1)
reordered_matrix = copy(input_matrix)
reorganized_plt_ref = []
# for every row in matrix
for k = 1:2:matrix_size-1
# global reordered_matrix
# TODO separate plotting from processing
reorganized_plt_ref_pt0 = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
max_val, max_ind = findmax(reordered_matrix[k:end, k:end])
# Take the smaller coordinate
# (max_ind[1] < max_ind[2]) ? (target_row = max_ind[1]) : (target_row = max_ind[2])
target_row = max_ind[1]+k-1
reordered_matrix = swap_rows(reordered_matrix, k, target_row)
# TODO separate plotting from processing
reorganized_plt_ref_pt1 = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
val, second_target = findmax(reordered_matrix[k,k:end])
second_target = second_target+k-1
reordered_matrix = swap_rows(reordered_matrix, k+1, second_target)
# end
#
#
if do_all_plots
# TODO separate plotting from processing
reorganized_plt_ref_pt2 = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
reorganized_plt_ref = plot(reorganized_plt_ref_pt0, reorganized_plt_ref_pt1, reorganized_plt_ref_pt2, layout=(1,3), size=(1400,400))
display(reorganized_plt_ref)
end
end
if do_final_plot
# TODO separate plotting from processing
reorganized_plt_ref = plot_square_heatmap(reordered_matrix, 1,size(reordered_matrix,1);
plt_title = "reordered_matrix, size:$(matrix_size)",
color_palete=:lightrainbow)
# display(reorganized_plt_ref)
else
reorganized_plt_ref=[]
end
return reordered_matrix, reorganized_plt_ref
end
"""
function get_key_for_value(d::Dict, target_value)
Returns key of the dictionary which corresponds to the given target value.
"""
function get_key_for_value(d::Dict, target_value)
for (key, value) in d
if value == target_value
return key
end
end
end
function order_max_vals_near_diagonal3(input_matrix, ordering; direction=:descending)
# Find max values next to the diagonal
matrix_size = size(input_matrix,1)
reordered_matrix = deepcopy(input_matrix)
new_ordering = Dict()
# for every row in matrix
for m = 1:2:matrix_size-1
# global reordered_matrix
max_val, max_ind = findmax(reordered_matrix[m:end, m:end])
# Take the smaller coordinate
first_target = max_ind[1]+m-1
reordered_matrix = swap_rows(reordered_matrix, m, first_target)
# check for duplicates
val, second_target = findmax(reordered_matrix[m,m:end])
second_target = second_target+m-1
if first_target == second_target
@debug "are same"
second_target -= 1
end
reordered_matrix = swap_rows(reordered_matrix, m+1, second_target)
# find key which initially had region = first_target
region1 = get_key_for_value(ordering, first_target)
region2 = get_key_for_value(ordering, second_target)
if region1 in keys(new_ordering)
@warn "repeated"
end
if region2 in keys(new_ordering)
@warn "repeated2"
end
new_ordering[region1] = m
new_ordering[region2] = m +1
println("Replaced $(region1) fom $(ordering[region1]) to $(m)")
println("Replaced $(region2) fom $(ordering[region2]) to $(m+1)")
end
return reordered_matrix, new_ordering
end
##
# """
# matrix_poling!(input_matrix; method = "avg_pooling")
#
# Takes a matrix and changes it's values to the same value, according to 'method'.
# Possible methods are:
# - 'max_pooling'- finds maximal value and replaces all values with the maximal
# value.
# - 'avg_pooling'- changes values to the average value
# - 'gauss_pooling'- uses gausian kernel as weights to the values in the matrix
# """
# function matrix_poling!(input_matrix::Array; method::String = "max_pooling")
# if method == "max_pooling"
# max_val = findmax(input_matrix)[1]
# input_matrix .= max_val
# end
# return input_matrix
# end
# matrix_poling(mat[1:3,1:3]; method = "gauss_pooling")
function matrix_poling(input_matrix::Array; method = "avg_pooling", kernel_size=3,gauss_sigma=1)
out_matrix = copy(input_matrix)
if method == "max_pooling"
max_val = findmax(out_matrix)[1]
out_matrix .= max_val
elseif method == "avg_pooling"
avg_val = mean(out_matrix)
out_matrix .= floor(Int,avg_val)
elseif method == "gauss_pooling"
@debug "Gauss pooling"
# gauss_kernel = ones(kernel_size,kernel_size)
# gauss_kernel[kernel_sizeΓ·2+1,kernel_sizeΓ·2+1] = 2
filtering_kernel = Kernel.gaussian(gauss_sigma)
# gauss_kernel2 = imfilter(gauss_kernel, filtering_kernel)
# gauss_kernel3 = gauss_kernel2.-findmin(gauss_kernel2)[1]/findmax(gauss_kernel2)[1]
# gauss_kernel3 = gauss_kernel3./sum(gauss_kernel3)
out_matrix = imfilter(out_matrix, filtering_kernel)
# out_matrix += out_matrix.*gauss_kernel3
out_matrix .= floor.(Int,out_matrix)
end
return out_matrix
end
function subsample_matrix(square_matrix::Array; subsamp_size::Int=2, method="max_pooling")
if !issymmetric(square_matrix)
error("Input matrix is not square")
return
end
if subsamp_size == 2
reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
return reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
elseif subsamp_size%2 == 0
new_matrix = reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
return reorganize_matrix(new_matrix; subsamp_size=2, method=method)
else
new_matrix = reorganize_matrix(square_matrix; subsamp_size=subsamp_size, method=method)
return new_matrix
end
end
#= Should the result be overlapping or not? Options:
- filter it as a whole image, return diagonal
- filter subimages- the problem is that htere will be edge effect at every border of cells
- filter subimages and assign midde value to whole patch
- filter whole upper diagonal matrix
Plot gaussian kernel
Should we care about
=#
function reorganize_matrix(square_matrix::Array; subsamp_size::Int=2, method="max_pooling", overlap::Int=0,gauss_sigma=1)
if method == "gauss_pooling"
(subsamp_size >= 3) || error("Can not do gaussian pooling for area smaller than 3x3")
end
@debug method
# Subsample upper half
square_matrix2 = Float64.(copy(square_matrix))
total_rows, total_cols = size(square_matrix)
size_mismatch_flag = false
# if total_rows%2 != 0
# total_rows -= 1
# end
# if total_cols%2 != 0
# total_cols -= 1
# end
if method == "gauss_pooling"
square_matrix2 = zeros(Int,size(square_matrix))
square_matrix2[1:end-1,2:end] = UpperTriangular(square_matrix[1:end-1,2:end])
# flip matrix
do_matrix_flip = true
if do_matrix_flip
square_matrix3 = zeros(Float64,size(square_matrix))
for row in 0:total_rows-1
for col in 0:total_cols-1
square_matrix3[row+1,col+1] = square_matrix[end-row,end-col]
end
end
square_matrix3[1:end-1,:] = square_matrix3[2:end,:]
square_matrix3[:,2:end] = square_matrix3[:,1:end-1]
else
square_matrix3 = copy(square_matrix2)
square_matrix3[1:end-1,:] = square_matrix2[2:end,:]
square_matrix3[:,1:end-1] = square_matrix2[:,2:end]
end
for row in 1:total_rows
for col in 1:row
square_matrix2[row,col] = square_matrix3[row,col]
end
end
filtering_kernel = Kernel.gaussian(gauss_sigma)
square_matrix2 = imfilter(square_matrix2, filtering_kernel)
square_matrix2 .= Int.(floor.(Int,square_matrix2))
elseif method == "row_pooling"
function gauss_func(Ο,len;ΞΌ=0)
maxv = lenΓ·2
minv= -lenΓ·2
if len%2 == 0
maxv-=1
end
x = collect(minv:1:maxv)
return exp.(-(((x.-ΞΌ)./Ο)./2).^2)./(Ο*sqrt(2Ο))
end
# Take 'subsamp_size'in total in horizontal and in vertical line from
# current matrix element
# subsamp_size = 5
val_range = subsamp_sizeΓ·2
r = (subsamp_sizeΓ·2)*2
# total_rows = 266
# total_cols = 266
# row = 3
for row = 1:1:(total_rows-1)
for col = (row+1):1:total_cols
if row < r && col <= r
row_range = row - 1
col_range = val_range + (val_range-row_rangeΓ·2)
else
row_range = val_range
end
if row > total_rows-r && col >= total_cols-r
col_range = total_cols - row -1
row_range = val_range + (val_range-col_range)
else
col_range = val_range
end
r_beg = row - row_range
r_end = row + row_range
c_beg = col - col_range
c_end = col + col_range
# if r_beg < 1 && r_end > total_rows
if r_beg < 1
r_end += abs(r_beg)+1
r_beg = 1
end
if r_end > col
r_beg -= abs(r_end-col)
if r_beg <1
r_beg=1
end
r_end = col-1
end
# end # if both
# if c_beg < row+1 && c_end > total_cols
if c_beg < row+1
c_end += abs(c_beg-(row+1))
c_beg = row+1
end
if c_end > total_cols
c_beg -= abs(total_rows-c_end)
c_end = total_cols
end
vrange = r_beg:r_end
try
square_matrix2[row,col] += sum(
square_matrix[vrange,col]
.* gauss_func(gauss_sigma,length(vrange))
)
vrange = c_beg:c_end
square_matrix2[row,col] += sum(
square_matrix[row,c_beg:c_end] .*
gauss_func(gauss_sigma,length(vrange))
)
catch e
@error "Failed to compute row pooling"
@error "row" row
@error "col" col
square_matrix2[row,col] = 0
break
# error(e)
end
end # for col
end # for rows
else
step = subsamp_size-overlap
for row = 1:step:(total_rows-2)
for col = (row+subsamp_size):step:total_cols
r_beg = row
r_end = row+subsamp_size-1
c_beg = col
c_end = col+subsamp_size-1
if r_end > total_rows || c_end > total_cols
size_mismatch_flag = true
continue
end
square_matrix2[r_beg:r_end,c_beg:c_end] =
matrix_poling(square_matrix[r_beg:r_end,c_beg:c_end]; method=method,kernel_size=subsamp_size, gauss_sigma=gauss_sigma)
end # for col
size_mismatch_flag && continue
end # for rows
end # if method
# Copy over lower half
for row in 2:total_rows
for col in 1:row-1
square_matrix2[row,col] = square_matrix2[col,row]
end
end
# keep same values on diagonal
for row in 1:total_rows
square_matrix2[row,row] = square_matrix[row,row]
end
return square_matrix2
end
function pool_matrix(square_matrix::Array; method="max_pooling")
out_matrix = copy(square_matrix)
pool_matrix!(square_matrix; method=method)
return out_matrix
end
"""
add_random_patch(input_matrix; patch_size=1, total_patches=1, locations)
Takes a matrix and replaces some values with random values. Returns a new matrix
with replaced values and indicies where replacement took place.
Values can be
replaced by setting 'patch_size' to values bigger than 1. If the input matrix
is symmetric, then output matrix will be symmetric as well (values from above
diagnoal will be copied over values from below diagonal).
"""
function add_random_patch(input_matrix::Matrix; patch_size=1, total_patches=1, locations=CartesianIndex(0))
total_rows, total_cols = size(input_matrix)
max_row = total_rows-patch_size+1
max_col = total_cols-patch_size+1
output_matrix = copy(input_matrix)
max_val = findmax(output_matrix)[1]
min_val = findmin(output_matrix)[1]
matrix_type = typeof(output_matrix[1])
if patch_size>total_rows || patch_size>total_cols
error(DimensionMismatch,": Patch size is bigger than the matrix!")
end
# ===
issymmetric(input_matrix) ? (symmetrize_matrix = true) : (symmetrize_matrix = false)
if locations == CartesianIndex(0)
@debug "Locations were not specified- random locations will be used"
if symmetrize_matrix
possible_indices = findall(x->true,UpperTriangular(output_matrix))
possible_indices = possible_indices[findall(x->x[1]<=x[2], possible_indices)]
possible_indices = possible_indices[findall(x->x[1]<=max_row, possible_indices)]
possible_indices = possible_indices[findall(x->x[2]<=max_col, possible_indices)]
else
possible_indices = possible_indices = findall(x->true,output_matrix)
end
tartget_indices = possible_indices[randcycle(length(possible_indices))]
else
wrong_indices = findall(x->x[1]>max_row || x[2]>max_col, locations)
if isempty(wrong_indices)
tartget_indices = locations
total_patches = size(locations)[1]
else
error(DimensionMismatch,": Given indices are bigger than the matrix dimensions!")
end
end
changed_indices = CartesianIndex[]
for replacement=1:total_patches
row = tartget_indices[replacement][1]
col = tartget_indices[replacement][2]
r_range = row:row+patch_size-1
c_range = col:col+patch_size-1
for ind in CartesianIndices((r_range,c_range))
push!(changed_indices,ind)
end
new_rand_matrix = floor.(matrix_type, rand(patch_size,patch_size) .* (max_val-min_val+1) .+ min_val)
output_matrix[r_range,c_range] .= new_rand_matrix
end
if symmetrize_matrix
# Inverse second column
changed_indices2 = [changed_indices changed_indices]
for ind = 1:size(changed_indices)[1]
c_ind = changed_indices2[ind,2]
changed_indices2[ind,2] = CartesianIndex(c_ind[2],c_ind[1])
end
# Copy over lower half
for row in 2:total_rows
for col in 1:row-1
output_matrix[row,col] = output_matrix[col,row]
end
end
@debug "Returned symmetric matrix" output_matrix
return output_matrix, changed_indices2
else
return output_matrix, changed_indices
end
end
function scramble_matrix(in_matrix::Array; k::Int=2, max_iterations=-1)
out_matrix = copy(in_matrix)
total_rows, total_cols = size(in_matrix)
counter = 0
if max_iterations < 1
max_iterations = (total_cols*(total_cols-1))/2
end
for row = 1:k:total_rows-k
# @info "row:" row
for col=total_cols:-k:row+1
# @info "col:" col
if row == col-k+1
# @info "shoulbreak"
continue
end
indices = collect(CartesianIndices((row:row+k-1,col-k+1:col)))
permut_indices = shuffle(indices)
out_matrix[indices] .= in_matrix[permut_indices]
counter +=1
if counter >= max_iterations
break
end
end
if counter >= max_iterations
break
end
end
return out_matrix
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/MatrixProcessing.jl | code | 15396 | using LinearAlgebra
using StatsBase
"""
shift_to_non_negative(matrix::Array)
Returns a matrix in which values are non-negative. This is done by finding the
minimal value in the input matrix and adding its absolute value to the matrix
elements.
"""
function shift_to_non_negative(matrix::Array)
min_val = findmin(matrix)[1]
if min_val < 0
return matrix .-= min_val
else
return matrix
end
end
"""
normalize_to_01(matrix::Array; use_factor=false, norm_factor=256)
Returns a matrix which values are in range [0, 1]. If 'use_factor' is set to
'true' then values are normalized to 'norm_factor' (by default set to 256).
If the values in the input matrix are below 0, then they are shifted so that only positive numbers are in
the matrix (the values are normalized to new maximal value or norm_factor).
"""
function normalize_to_01(matrix::Array; use_factor = false, norm_factor = 256)
normalized_matrix = copy(matrix)
min_val = findmin(normalized_matrix)[1]
if min_val < 0
normalized_matrix .+= abs(min_val)
else
normalized_matrix .-= abs(min_val)
end
max_val = findmax(normalized_matrix)[1]
if use_factor
if max_val > norm_factor
@warn "Maximal values exceed \'norm_factor\'."
end
normalized_matrix = normalized_matrix ./ norm_factor
else
normalized_matrix = normalized_matrix ./ max_val
end
return normalized_matrix
end
# function symmetrize_image(image)
"""
function diagonal_symmetrize(image::Matrix; below_over_upper::Bool=false)
Takes an 'image' in the form of a matrix and return a copy which is symmetric
with respect to diagonal- values above diagonal are copied over values below the
diagonal. This can be inverted by setting 'below_over_upper=true'.
If the input matrix is not square, then square matrix is created by taking
matrix of k times 'k' elements, 'k=min(r,c)' where 'r' is number of rows and 'c'
is number of columns.
"""
function diagonal_symmetrize(image::Matrix; below_over_upper::Bool = false)
w, h = size(image)
mat_size = findmin([w, h])[1]
img = copy(image[1:mat_size, 1:mat_size])
# Get all cartesian indices from input matrix
matrix_indices = CartesianIndices((1:mat_size, 1:mat_size))
# Filter out indices below diagonal
if below_over_upper
matrix_indices = findall(x -> x[1] > x[2], matrix_indices)
else
matrix_indices = findall(x -> x[2] > x[1], matrix_indices)
end
# how many elements are above diagonal
repetition_number = Int(ceil((mat_size * (mat_size - 1)) / 2))
# Substitute elements
for k = 1:repetition_number
# n_pos = matrix_indices[k]
mat_ind = matrix_indices[k]
# ordered_matrix[mat_ind] = k
img[mat_ind[2], mat_ind[1]] = img[mat_ind]
end
try
checksquare(img)
catch err
if isa(err, DimensionMismatch)
@error "Resulting matrix is not a square matrix"
throw(err)
end
end
# issymmetric(Float64.(img))
return img
end
# =====
# matrix ordering
"""
function get_ordered_matrix(in_matrix::Matrix;
assign_same_values::Bool = false,
force_symmetry::Bool = false,
small_dist_grouping::Bool = false,
min_dist::Number = 1e-16,
total_dist_groups::Int = 0,
ordering_start::Int=1)
Takes a @input_matrix and returns ordered form of this matrix.
The ordered form is a matrix which elements represent ordering from smallest to
highest values in @input_matrix.
If @input_matrix is symmetric, then ordering happens only with upper diagonal.
Lower diagonal is symetrically copied from values above diagonal.
By default, if there is a geoup of entriess with the same value, they all are
assigned with the same ordering number. This can be changed with
@assign_same_values parameter.
Symetry ordering can be froced with @force_symmetry parameter.
By setting 'small_dist_grouping' to true, all the values that difference is
lower than 'min_dist', will be assigned with the same order number.
# Examples
```julia-repl
julia> a = [0 11 12;
11 0 13;
12 13 0];
julia> get_ordered_matrix(a)
3Γ3 Array{Int64,2}:
0 1 2
1 0 3
2 3 0
```
```julia-repl
julia> b = [38 37 36 30;
37 34 30 32;
36 30 31 30;
30 32 30 29]
julia> get_ordered_matrix(b; assign_same_values=false)
4Γ4 Array{Int64,2}:
0 6 5 2
6 0 1 4
5 1 0 3
2 4 3 0
julia> get_ordered_matrix(b; assign_same_values=true)
4Γ4 Array{Int64,2}:
0 4 3 1
4 0 1 2
3 1 0 1
1 2 1 0
```
"""
function get_ordered_matrix(in_matrix::Matrix;
assign_same_values::Bool = false,
force_symmetry::Bool = false,
small_dist_grouping::Bool = false,
min_dist::Number = 1e-16,
total_dist_groups::Int = 0,
ordering_start::Int=1)
# TODO Symmetry must be forced for matrix in which there are NaN elements- needs
# to be further investigated
# TODO not working for negative only values
# TODO check for square matrix
# ==
mat_size = size(in_matrix)
ord_mat = zeros(Int, mat_size)
# how many elements are above diagonal
if issymmetric(in_matrix) || force_symmetry
matrix_indices =
generate_indices(mat_size, symmetry_order = true, include_diagonal = false)
do_symmetry = true
else
matrix_indices = generate_indices(mat_size, symmetry_order = false)
do_symmetry = false
end
total_elements = length(matrix_indices)
# Collect vector of indices
all_ind_collected = arr_to_vec(matrix_indices)
# Sort indices vector according to inpu array
# TODO Cant this be done with sortperm? in_matrix > UpperTriangular |> sortperm
index_sorting = sort_indices_by_values(in_matrix, all_ind_collected)
ordering_number = ordering_start
for k = 1:total_elements
# global ordering_number
next_sorted_pos = index_sorting[k]
mat_ind = matrix_indices[next_sorted_pos]
if assign_same_values && k != 1
prev_sorted_pos = index_sorting[k-1]
prev_mat_ind = matrix_indices[prev_sorted_pos]
cond1 = in_matrix[prev_mat_ind] == in_matrix[mat_ind]
cond2 = small_dist_grouping
cond3 = abs(in_matrix[prev_mat_ind] - in_matrix[mat_ind]) < min_dist
if cond1 || (cond2 && cond3)
ordering_number -= 1
end
end
set_values!(ord_mat, mat_ind, ordering_number; do_symmetry = do_symmetry)
ordering_number += 1
# else
# set_values!(ord_mat, mat_ind, ordering_number; do_symmetry=do_symmetry)
# ordering_number+=1
# end
end
return ord_mat
end
# TODO this one has to be specified for 3 dim matrix
function get_ordered_matrix(input_array::Array{Any,3}; do_slices = true, dims = 0)
arr_size = size(input_array)
out_arr = zeros(Int, arr_size)
if do_slices
# param check
if dims > length(arr_size)
throw(DomainError("Given dimension is greater than total size of array."))
elseif dims > 0
throw(DomainError("Given dimension must be positive value."))
elseif dims <= 3
throw(DomainError("Given dimension must be lower than 3."))
end
for dim = 1:arr_size[dims]
if dims == 1
out_arr[dim, :, :] = get_ordered_matrix(input_array[dim, :, :])
elseif dims == 2
out_arr[:, dim, :] = get_ordered_matrix(input_array[:, dim, :])
elseif dims == 3
out_arr[:, :, dim] = get_ordered_matrix(input_array[:, :, dim])
end
end
else
out_arr = get_ordered_matrix(input_array)
end
end
function get_ordered_matrix(input_array::Array)
out_array = copy(input_array)
arr_size = size(input_array)
total_elements = length(input_array)
# Collect vector of indices
all_ind_collected = collect(reshape(generate_indices(arr_size), (length(input_array))))
# Sort indices vector according to inpu array
index_sorting = sort!(
[1:total_elements;],
by = i -> (input_array[all_ind_collected][i], all_ind_collected[i]),
)
for k = 1:total_elements
target = index_sorting[k]
out_array[target] = k
end
return out_array
end
# Care must be taken so that values from 'input_matrix' are within distance
# groups, otherwise error is thrown.
"""
function group_distances(input_matrix::Array, total_dist_groups::Int)
Takes a matrix and rearranges values into 'total_dist_groups' number of groups.
Every group is assigned with number value from range '<0,1>'.
"""
function group_distances(input_matrix::Array, total_dist_groups::Int)
normed_matrix = normalize_to_01(input_matrix)
target_matrix = copy(normed_matrix)
h, w = size(input_matrix)
if h * w < total_dist_groups
throw(DomainError("Total number of groups exceed total number of entries in input matrix"))
end
total_borders = total_dist_groups + 1
range_val = collect(range(0, 1, length = total_borders))
for k = 2:total_borders
indices = findall(x -> x >= range_val[k-1] && x <= range_val[k], normed_matrix)
target_matrix[indices] .= range_val[k]
end
unique(target_matrix)
# Sets last range to values smaller than unity, just in case this might cause trobules
# normed_matrix[normed_matrix .> range_val[end-1]] .= 0.99
return target_matrix
end
"""
generate_indices(matrix_size::Tuple; symmetry_order::Bool=false, include_diagonal::Bool=true)
Return all the possible indices of the matrix of size 'matrix_size'.
'matrix_size' may be a tuple or a series of integer arguments corresponding to
the lengths in each dimension.
If 'symetry_order' is set to'true', then only indices of values below diagonal
are returned.
"""
function generate_indices(matrix_size::Tuple;
symmetry_order::Bool = false,
include_diagonal::Bool = true)
# Get all cartesian indices from input matrix
matrix_indices = CartesianIndices(matrix_size)
# Filter out indices below diagonal
if symmetry_order
matrix_indices = findall(x -> x[1] <= x[2], matrix_indices)
else
matrix_indices = findall(x -> true, matrix_indices)
end
if !include_diagonal
filter!(x -> x[1] != x[2], matrix_indices)
end
return matrix_indices
end
"""
generate_indices(matrix_size::Int; symmetry_order::Bool=false, include_diagonal::Bool=true)
Generate indices for a matrix of given dimensions. 'generate_indices' is a
series of integer arguments corresponding to the lengths in each dimension.
"""
function generate_indices(matrix_size::Int;
symmetry_order::Bool = false,
include_diagonal::Bool = true)
return generate_indices(
(matrix_size, matrix_size);
symmetry_order = symmetry_order,
include_diagonal = include_diagonal,
)
end
"""
arr_to_vec(some_array::Array)
Takes an array and reshapes it into a vector.
"""
function arr_to_vec(some_array::Array)
return collect(reshape(some_array, length(some_array)))
end
function cartesianInd_to_vec(some_array::CartesianIndices)
return collect(reshape(some_array, length(some_array)))
end
"""
sort_indices_by_values(values_matrix::T, index_vector) where {T<:VecOrMat}
Sorts the 'index_vector' according to corresponding values in the 'values_matrix'
and returns a Vector of intigers which is an list of ordering of
'sorted index_vector'.
"""
function sort_indices_by_values(values_matrix::T, index_vector) where {T<:VecOrMat}
if !isa(index_vector, Vector)
throw(TypeError(
:sort_indices_by_values,
"\'index_vector\' must be a vector, otherwise an ordering list can no be created!",
Vector,
typeof(index_vector),
))
end
total_elements = length(index_vector)
return sort!(
[1:total_elements;],
by = i -> (values_matrix[index_vector][i], index_vector[i]),
)
end
"""
set_values!(input_matrix::Matrix, position::CartesianIndex, target_value::Number; do_symmetry=false)
Assigns 'target_value' to indices at 'input_matrix[position[1], position[2]]'.
If 'do_symmetry' is set to 'true', then the 'target_value' is also assigned at
position 'input_matrix[position[2], position[1]]'.
"""
function set_values!(input_matrix::Matrix,
position::CartesianIndex,
target_value::Number;
do_symmetry::Bool = false)
input_matrix[position[1], position[2]] = target_value
if do_symmetry
input_matrix[position[2], position[1]] = target_value
end
return input_matrix
end
# matrix ordering
# =====
function get_high_dim_ordered_matrix(input_matrix)
matrix_size = size(input_matrix)
ordered_matrix_3D = zeros(Int, matrix_size)
for slice = 1:matrix_size[1]
ordered_matrix_3D[slice, :, :] = get_ordered_matrix(input_matrix[slice, :, :])
end
return ordered_matrix_3D
end
"""
reduce_arrs_to_min_len(arrs)
Takes vector of vectors of different length and returns array of arrays which
are of the same length. Length in the output is the shortest vector length from
the input- values above this size are discarded.
"""
function reduce_arrs_to_min_len(arrs::Array)
@debug "Argument specific"
new_arr = copy(arrs)
simulation = size(new_arr, 1)
min_size = Inf
for m = 1:simulation
@debug "Simulation number" m
current_size = size(new_arr[m], 1)
@debug "Current size: " current_size
if convert(Float64, current_size) < min_size
min_size = current_size
@debug "min size changed to: " min_size
end
end
# min_size = Int.(min_size)
@debug "Concatenating"
for m = 1:simulation
new_arr[m] = new_arr[m][1:min_size, :]
end
min_size = Inf
return new_arr
end
"""
increase_arrs_to_max_len(arrs)
Takes vector of vectors of different length and returns array of arrays which
are of the same length. Length in the output is the longest vector length from
the input- values above this size are discarded.
"""
function increase_arrs_to_max_len(arrs)
new_arr = copy(arrs)
simulation = size(new_arr, 1)
max_size = 0
for m = 1:simulation
@debug "Simulation number" m
current_size = size(new_arr[m], 1)
@debug "Current size: " current_size
if convert(Float64, current_size) > max_size
max_size = current_size
@debug "min size changed to: " max_size
end
end
# max_size = Int.(max_size)
@debug "Concatenating"
for m = 1:simulation
correct_len_arr = zeros(Int, max_size, 3)
correct_len_arr[1:size(arrs[m], 1), :] = new_arr[m][:, :]
new_arr[m] = correct_len_arr
end
# min_size = Inf
return new_arr
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/MatrixToolbox.jl | code | 5883 | using Distances
using Random
# export generate_random_point_cloud,
# generate_geometric_matrix,
# generate_shuffled_matrix,
# generate_random_matrix,
# generate_matrix_ordering,
# # generate_set_of_graphs,
# # plot_betti_numbers,
# # save_matrix_to_file;
"""
Returns a random matrix of size @number_of_points x @dimensions in which every
column is a point and every n-th row is a position in the n-th dimension.
"""
generate_random_point_cloud(number_of_points = 12, dimensions=2) =
rand(Float64, dimensions, number_of_points)
"""
Return a matrix which stores the pariwise distances between every point in the
@random_points matrix.
"""
function generate_geometric_matrix(random_points)
geometric_matrix = Distances.pairwise(Euclidean(), random_points, dims=2)
return geometric_matrix
end
"""
Returns a ordered geometric matrix, which was generated by samping 'dims'
dimensional unit cuve with 'total_points' samples.
"""
function get_ordered_geom_matrix(dims::Integer, total_points::Integer)
# TODO to be completed
end
"""
Returns a symetric matrix with randomly permuted valuse from the @input_matrix.
"""
function generate_shuffled_matrix(input_matrix)
matrix_size = size(input_matrix,1)
indicies_collection = findall(x->x>0, input_matrix)
rand!(indicies_collection, indicies_collection)
shuffeled_matrix = copy(input_matrix)
# Swap the elements
n=1
for k in 1:matrix_size
for m in k+1:matrix_size
a = indicies_collection[n][1]
b = indicies_collection[n][2]
shuffeled_matrix[k,m] = input_matrix[a,b]
shuffeled_matrix[m,k] = input_matrix[b,a]
shuffeled_matrix[a,b] = input_matrix[k,m]
shuffeled_matrix[b,a] = input_matrix[m,k]
n +=1
end
end
return shuffeled_matrix
end
"""
Returns matrix with random values which are symmetric accros the diagonal. The
matrix has @matrix_size rows and @matrix_size columns.
"""
function generate_random_matrix(matrix_size)
elemnts_above_diagonal = Int((matrix_size^2-matrix_size)/2)
random_matrix = zeros(matrix_size, matrix_size)
set_of_random_numbers = rand(elemnts_above_diagonal)
h = 1
for k in 1:matrix_size
for m in k+1:matrix_size
random_matrix[k,m] = set_of_random_numbers[h]
random_matrix[m,k] = set_of_random_numbers[h]
h += 1
end
end
return random_matrix
end
# function generate_set_of_graphs(matrix_size, matrix_ordering)
# """
# Returns set of graphs generated from the @matrix_ordering. In every succesive
# graph, single connection between points is added.
#
# NOTE: the function does not take the coordinates of the numbered vertices.
# """
# vetrices = matrix_size
# edges = matrix_ordering
# num_of_edges = size(edges)[2]
#
# set_of_graphs = [a=Graph(vetrices) for a=1:num_of_edges]
# edges_counter = zeros(Int, num_of_edges)
# edge_density = zeros(num_of_edges)
#
# k=1
# for k in range(1,stop=num_of_edges)~
# add_edge!(set_of_graphs[k], edges[1,k], edges[2,k]);
# edges_counter[k] = ne(set_of_graphs[k])
# edge_density[k] = edges_counter[k]/binomial(matrix_size,2)
#
# if k<num_of_edges # if is used to eliminate copying at last iteration
# set_of_graphs[k+1] = copy(set_of_graphs[k])
# end
# end
# return set_of_graphs, edge_density
# end
# function save_matrix_to_file(matrix, filename)
# """
# Saves given @matrix to the csv file with the name @filename. If there is no path
# added to the @filename, then file saved is in local folder.
# """
# open(filename, "w") do io
# writedlm(io, matrix, ',')
# end
# end
# =====
# Copied form Julia learning repo
"""
Returns ordering of the @geometric_matrix given as an input. If value @ascending
is set to true, the values are number from the lowest value, to the highest. If
false, the values are numbered from highest to the lowest.
"""
function generate_matrix_ordering(geometric_matrix, ascending = true)
matrix_size = size(geometric_matrix, 2)
elemnts_above_diagonal = Int((matrix_size^2-matrix_size)/2)
matrix_ordering = zeros(Int, 2,elemnts_above_diagonal)
A = copy(geometric_matrix)
(ascending) ? (method=findmax) : (method=findmin)
for element in 1:elemnts_above_diagonal
# Find maximal distance
minimal_value = method(A)
# Get the coordinates (only 2 dimensions, because it is distance matrix)
matrix_ordering[1,element] = Int(minimal_value[2][1])
matrix_ordering[2,element] = Int(minimal_value[2][2])
#
# # Zero minval in A (above and below diagonal) so next minval can be found
A[matrix_ordering[1,element], matrix_ordering[2,element]] = 0.0
A[matrix_ordering[2,element], matrix_ordering[1,element]] = 0.0
end
# change from min to max order to the max to min order (? necessary ?)
if ascending
matrix_ordering = matrix_ordering[:,end:-1:1]
end
return matrix_ordering
end
"""
function get_geometric_matrix(points, dimensions; save_as_file=false)
Created a point cloud with 'points' number of points from 'dimension'
dimensional eucidean unit cube and computes distances between the points.
Distance matrix may be saved to csv file by setting 'save_as_file' to 'true'.
"""
function get_geometric_matrix(points, dimensions; save_as_file=false)
point_cloud = generate_random_point_cloud(points,dimensions)
geom_mat = generate_geometric_matrix(point_cloud)
if save_as_file
open("geometric_matrix_points$(points)_dims$(dimensions).csv", "w") do io
writedlm(io, geom_mat, ',')
end
end
return geom_mat
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/PlottingWrappers.jl | code | 7899 | import Plots.plot as plot
import Plots.plot! as plot!
import Plots.heatmap as heatmap
import Plots.@layout as @layout
# include("TopologyStructures.jl")
"""
plot_square_heatmap(matrix, tick_step, tick_end;
plt_title, img_size=(900, 800), img_dpi=300)
Takes matrix and plots it as a heatmap. Funtion returns the handler to the
heatmap.
"""
function plot_square_heatmap(matrix, tick_step, tick_end;
plt_title="", yflip_matrix=true,
plot_params= (dpi=300,
size=(900,800),
lw=1,
thickness_scaling=1,
top_margin= 0,
left_margin=[0 0],
bottom_margin= 0
),
color_palete=:lightrainbow,
add_labels=true)
heat_map = heatmap(matrix, color=color_palete,
title=plt_title,
size=plot_params.size, dpi=plot_params.dpi,
ticks=0:tick_step:tick_end);
yflip_matrix && plot!( yflip = true,);
if add_labels
xlabel!("Matrix index")
ylabel!("Matrix index")
end
return heat_map
end
#%%
"""
row_plot(bd_plots::Dict;base_h = 600, base_w = 600, kwargs...)
Plots all the plots from the input dictionary 'bd_plots' in 'layout=(1,n)',
where 'n' is total number of plots.
By default, the plots dimensions are: the height='base_h'; the
width= n * base_w.
"""
function row_plot(bd_plots::Dict;base_h = 800, base_w = 800,
top_margin= 10mm,
left_margin=[10mm 10mm],
bottom_margin= 10mm,
kwargs...)
total_dims = length(bd_plots)
all_keys = keys(bd_plots)
all_plts = tuple()
for k = 1:total_dims
all_plts = (all_plts..., bd_plots["Ξ²$(k)"])
end
nice_plot = plot(all_plts...,
layout=(1,total_dims),
size=(total_dims*base_w,base_h),
# left_margin=left_margin,
# top_margin=top_margin,
# bottom_margin=bottom_margin,
thickness_scaling=2,
margin=2mm,
kwargs...)
return nice_plot
end
#%%
"""
plotimg(matrix_to_plot)
Display an image as a plot. The values from the input matrix are adjusted to the
value range of [0, 1].
If @cut_off is true then the matrix values above 256 are set to 256 and then all
values are normalized to the value 256. If @cut_off is false, then values are
normalized to maximal value.
"""
function plotimg(matrix_to_plot, cut_off=false)
matrix_type = typeof(matrix_to_plot)
min_val = findmin(matrix_to_plot)[1]
int_types_arr = [Matrix{UInt8}; Matrix{UInt16}; Matrix{UInt32};
Matrix{UInt64}; Matrix{UInt128}; Matrix{Int8};
Matrix{Int16}; Matrix{Int32}; Matrix{Int64};
Matrix{Int128}]
float_types_arr = [Matrix{Float16} Matrix{Float32} Matrix{Float64}]
if min_val<0
matrix_to_plot = shift_to_non_negative(matrix_to_plot)
end
max_val = findmax(matrix_to_plot)[1]
if max_val > 256 && cut_off
matrix_to_plot[findall(x -> x>256, matrix_to_plot)] = 256
end
if in(matrix_type, int_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot)
elseif in(matrix_type, float_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot, max_val)
end
return colorview(Gray, matrix_to_plot)
end
#%%
"""
plot_image_analysis(plots_set; description::NamedTuple, original_img, kwargs...)
Takes set of plots and puts them in 2 coulm layout. If 'description' is given,
adds entry with the data processing description. If 'original_img' is given, it
is also displayed next to the descrtions field.
'kwargs' are plot properties.
"""
function plot_image_analysis(plots_set; description::NamedTuple, original_img, kwargs...)
kwarg_keys = kwargs.keys()
(!isempty(original)) ? (orig_img_flag = true) : (orig_img_flag = false)
(!isempty(description)) ? (desc_flag = true) : (desc_flag = false)
l = @layout [a{0.2w} [grid(3,3) b{0.2h}]]
total_plot_sets = 7
total_cols = 2
total_rows = ceil(Int,total_plot_sets/total_cols)
if orig_img_flag || desc_flag
total_rows +=1
end
height_unit = 1/total_rows
matrix = [1 2 3;
4 5 6;
7 8 9]
l = @layout [a{0.4w,} b{0.4w,};
# grid(1,4);
# grid(1,4);
# grid(1,4);
# grid(1,4);
]
# [grid(2,2) grid(2,2)]]
# [a [grid(4,2) b]]]
data = [rand(10, 4), rand(11, 4)]
l = @layout [a{0.4w} b
c d e f
c d e f
c d e f
c d e f
c d e f]
ref = plot(grid=false,
axis=false,
layout = l,
legend = false,
# seriestype = [:scatter :path],
dpi=300,
size=(900,1200),
)
ref.series_list
p2 = plot!(ref.subplots[18],rand(10, 1),seriestype = :scatter,axis=true,grid=true, title="")
p2 = plot!(ref.subplots[21],rand(10, 10),seriestype = :heatmap, legend=true, xlabel="index", ylabel="index")
annotate!(ref.subplots[0], 0, 0, "my text", :red)
p1.subplots
# color scheme
end
# TODO add depreciation for this function
# """
# get_all_plots_from_set(orig_matrix::TopologyMatrixSet; name_prefix="")
#
# Takes a collection of matrix computed for topological analysis and creates set
# of their heatmaps and related Betti curves.
#
# """
# function get_all_plots_from_set(orig_matrix::TopologyMatrixSet; name_prefix="")
# # ===
# # Get heatmaps
# original_heatmaps_set = TopologyMatrixHeatmapsSet(orig_matrix)
# # patched_heatmaps_set = TopologyMatrixHeatmapsSet(patched_matrix)
#
# # ===
# # Get Betti plots
# original_bettis = TopologyMatrixBettisSet(orig_matrix)
# original_bettis_plots = TopologyMatrixBettisPlots(original_bettis)
# # patched_bettis_plots = TopologyMatrixBettisPlots(patched_bettis)
#
# mat_size = size(orig_matrix.ordered_matrix,1)
# common_plots_set = Any[]
# for k = 1:size(orig_matrix.description_vector,1)
# matrix_type = orig_matrix.description_vector[k]
#
#
# # ===
# # Common plot
# common_plot1 = plot(original_heatmaps_set.heatmap_plots_set[k],
# original_bettis_plots.betti_plots_set[k],
# layout=(1,2), size=(800,400))
# plot!(common_plot1, title = matrix_type*"_r$(orig_matrix.ranks_collection[k])")
# # met_par.do_dsiplay && display(common_plot1)
#
# push!(common_plots_set, common_plot1)
# end
#
# # load image
# file_path = orig_matrix.params.img_path*orig_matrix.params.file_name
# if isfile(file_path)
# img1_gray = Gray.(load(file_path))
# additional_plot = plot(img1_gray, legend = false);
# else
# # TODO Change empty plot for plot with properties
# additional_plot = plot(legend = false);
# end
#
# parameters_list_plot = plot()
# first_plot = plot(additional_plot, parameters_list_plot)
#
# plt_size = size(common_plots_set,1)
#
# all_plot1 = plot(additional_plot,
# common_plots_set[1], # original matrix
# common_plots_set[2], # original reordered- highest values located next to diagonal
# common_plots_set[3], # max pooling of values in subsquares, original matrirx
# common_plots_set[4], # max pooling of values in subsquares, reorganized matrix
# common_plots_set[5], # renumbered max pooling of values in subsquares, reorganized matrix
# common_plots_set[6], # renumbered max pooling of original matrix
# common_plots_set[7], # reordered renumbered max pooling of original matrix
# layout=(plt_sizeΓ·2+1,2), size=(1200*2,plt_sizeΓ·2*400))
# return all_plot1
# end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/PointsSubstitution.jl | code | 7271 | # Set of functions
#
# After matrices generation, Betti curves will be generated
# For better optimization, operations on indices should be done first
# TODO Check if the threshold of values is applied here and if it has some
# consequence on results
using Eirene
using Random
include("MatrixToolbox.jl")
struct PlottingData
mat_size::Int64
dim::Int
src_pts_number::Int
trgt_pts_number::Int
src_points::Vector{Int}
trgt_points::Array{Int}
targets::Vector{Int}
# Constructor for input data
function PlottingData(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int)
steps_set = [0]
src_points = [0]
trgt_points = [0]
new(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int, src_points,
trgt_points, steps_set)
end
function PlottingData(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int, src_points::Vector{Int},
trgt_points::Array{Int}, trgt_sptep::Int)
if trgt_sptep == 0
steps_set = [trgt_pts_number]
else
steps_set = collect(1:trgt_sptep:trgt_pts_number)
# Make sure that all points are used
isempty(findall(x->x==trgt_pts_number, steps_set)) && push!(steps_set, trgt_pts_number)
end
new(mat_size::Int, dim::Int, src_pts_number::Int,
trgt_pts_number::Int, src_points::Vector{Int},
trgt_points::Array{Int}, steps_set)
end
end
function get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
total_pts_number = src_pts_number + src_pts_number*trgt_pts_number
total_pts_number > mat_size && error("Too many points to substitute!")
elements_collection = randcycle(mat_size)
src_points = elements_collection[1:src_pts_number]
strat_val = src_pts_number+1
stop_val = total_pts_number
trgt_points = elements_collection[strat_val:stop_val]
trgt_points = reshape(trgt_points, trgt_pts_number, src_pts_number)
return src_points, trgt_points
end
# ===
function replace_matrix_rows(matrix, srcs, trgts)
replacement_row = get_row(matrix, pt_src)
new_matrix = set_row(matrix, trgt_points, replacement_row)
end
function get_row(matrix, pt_src)
return matrix[pt_src,:]
end
function set_row!(matrix::Array, pt_trgt::Int, replacement_row::Array)
@debug "set_row! func"
replacement_row[pt_trgt] = 0
matrix[pt_trgt, :] .= replacement_row
matrix[:, pt_trgt] .= replacement_row
return matrix
end
function set_row(matrix::Array, pt_trgt::Int, replacement_row::Array)
@debug "set_row func"
new_matrix = copy(matrix)
return set_row!(new_matrix, pt_trgt, replacement_row)
end
function set_row(matrix::Array, pt_trgt::Array, replacement_row::Array)
@debug "set_row func"
new_matrix = copy(matrix)
for point in pt_trgt
new_matrix = set_row!(new_matrix, point, replacement_row)
end
return new_matrix
end
function matrix_organization(matr, src_points, trgt_points)
working_matrix = copy(matr)
mat_size = size(matr, 1)
src_number = size(src_points,1)
swapping_sources = Any[]
step = 0
matrix_indices = CartesianIndices((1:mat_size, 1:mat_size))
matrix_indices = findall(x->x[1]<x[2], matrix_indices)
sorted_values = matr[matrix_indices]
ordered_indices = sort!([1:mat_size;],
by=i->(sorted_values[i],matrix_indices[i]))
sort(matr[:,1])
# matr[src_points[src_pt],1]
for src_pt = 1:src_number
# find all source
target_set = findall(x-> x==matr[src_points[src_pt],1], matr[:,1])
# swap all equivalents
for tragt = target_set
swap_rows!(working_matrix, tragt, mat_size-step)
step +=1
end
end
return working_matrix
end
# matrix = ordered_matrices_collection[15]
function swap_rows!(matrix, src_row_num, trgt_row_num)
src_backup = copy(matrix[src_row_num,:])
trgt_backup = copy(matrix[trgt_row_num,:])
matrix[src_row_num, trgt_row_num] = matrix[trgt_row_num, trgt_row_num]
matrix[trgt_row_num, src_row_num] = matrix[src_row_num,src_row_num]
matrix[src_row_num,src_row_num] = trgt_backup[src_row_num]
matrix[trgt_row_num, trgt_row_num] = src_backup[trgt_row_num]
src_backup = copy(matrix[src_row_num,:])
matrix[src_row_num,:] .= matrix[trgt_row_num, :]
matrix[trgt_row_num, :] .= src_backup
matrix[:, src_row_num] = matrix[src_row_num,:]
matrix[:, trgt_row_num] = matrix[trgt_row_num,:]
end
function swap_rows(matrix, src_row_num, trgt_row_num)
new_matrix = copy(matrix)
swap_rows!(new_matrix, src_row_num, trgt_row_num)
return new_matrix
end
function ordering_matrix_analysis(test_data::PlottingData;generation_function=get_geom_matrix)
mat_size = test_data.mat_size
dim = test_data.dim
src_pts_number = test_data.src_pts_number
trgt_pts_number = test_data.trgt_pts_number
trgt_steps = 0
src_points, trgt_points = get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
distance_matrix = generation_function(mat_size, dim)
distance_matrices_collection = get_dist_mat_collection(distance_matrix, src_points, trgt_points, trgt_steps)
ordered_matrices_collection = get_ordered_set(distance_matrices_collection)
bettis_collection = get_bettis_collection(ordered_matrices_collection)
plot_data = PlottingData(mat_size, dim, src_pts_number, trgt_pts_number, src_points, trgt_points, trgt_steps)
plotting_data = print_hmap_with_bettis(ordered_matrices_collection,
bettis_collection, plot_data)
return distance_matrices_collection, ordered_matrices_collection, bettis_collection, plot_data
end
# =================================
# Matrix modification functions
function make_matrix_steps!(input_matrix, step_number; step_size=2 )
# input_matrix = ord_mat
# step_number = 13
rows = step_number:(step_number+step_size-1)
cols = 1:step_number-1
min_value = findmin(input_matrix[rows,cols])[1]
input_matrix[rows,cols] .= min_value
input_matrix[cols,rows] .= min_value
end
"""
function add_step_to_matrix(input_matrix, last_components)
Takes a symmetric matrix 'input_matrix' and appends 2 columns and 2 rows such that
resulting geometric object structure is bigger by 1 dimension. 'last_components'
determines which etries in the matrix are used for closing high dimensional simplices.
"""
function add_step_to_matrix(input_matrix, last_components)
matrix_size = size(input_matrix,1)
new_matrix = zeros(Int,matrix_size +2, matrix_size+2)
new_matrix[1:matrix_size,1:matrix_size] .= input_matrix
min_closing_component = findmin(input_matrix[last_components])[1]
new_row1 = range(min_closing_component, length=matrix_size)
new_row2 = range(findmax(new_row1)[1]+1, length=matrix_size)
last = 2
new_matrix[matrix_size+1,1:end-last] = new_row1
new_matrix[matrix_size+2,1:end-last] = new_row2
new_matrix[1:end-last,matrix_size+1] = new_row1
new_matrix[1:end-last,matrix_size+2] = new_row2
# Adjust last components
max_new_matrix = findmax(new_matrix)[1]
new_matrix[last_components].=input_matrix[last_components].+(max_new_matrix-min_closing_component+1)
new_matrix[end-1,end ] = findmax(new_matrix)[1]+1
new_matrix[end, end-1] = findmax(new_matrix)[1]
new_max_val = findmax(new_matrix)[2]
new_component = copy(last_components)
push!(new_component, new_max_val)
push!(new_component, CartesianIndex(new_max_val[2], new_max_val[1]))
return new_matrix, new_component
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/TopologyPreprocessing.jl | code | 1230 | module TopologyPreprocessing
# MatrixOrganization.jl
export matrix_poling,
subsample_matrix,
add_random_patch
# MatrixProcessing.jl
export shift_to_non_negative,
normalize_to_01,
diagonal_symmetrize,
group_distances,
generate_indices,
reduce_arrs_to_min_len,
increase_arrs_to_max_len,
get_ordered_matrix,
group_distances,
generate_indices,
arr_to_vec,
cartesianInd_to_vec,
sort_indices_by_values,
set_values!
# BettiCurves.jl
export get_bettis,
normalise_bettis,
get_vectorized_bettis,
plot_bettis,
get_bettis_color_palete
# BarCodes.jl
export get_barcodes,
plot_barcodes,
plot_barcodes!,
get_birth_death_ratio,
get_barcode_lifetime,
get_barcode_max_lifetime,
boxplot_birth_death,
boxplot_lifetime,
get_barcode_max_db_ratios
include("MatrixOrganization.jl")
include("MatrixProcessing.jl")
include("BettiCurves.jl")
include("Barcodes.jl")
end # module
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/VPDistance.jl | code | 4597 | # # ======================
# # Example usage
# using CSV
# using Plots
#
# file_name = "sts_for_VP_test.csv"
# csv_matrix = CSV.read(file_name)[:,2:end]
# almost_not_csv_matrix = Matrix(csv_matrix)
#
# file_name2 = "spikes.csv"
# spikes = load_csv_file_to_array(file_name2)
#
# file_name3 = "th_chunks.csv"
# th_chunks = load_csv_file_to_array(file_name3)
#
# sts = generate_spike_matrix(spikes; th_chunks=th_chunks)
# VPd = [get_selfdist(s; n_chan=32, cost=60., dt=0.01) for s in sts]
#
#
# plot_set = Any[]
# for matrix in VPd
# push!(plot_set, heatmap(matrix, color=:Reds_9, colorbar=false, yflip = true))
# end
#
# plot(plot_set[1], plot_set[2], plot_set[3],
# plot_set[4], plot_set[5], plot_set[6],
# plot_set[7], plot_set[8], plot_set[9],
# layout=(1,9), size=(9*1200,1100), legend=false, colorbar=false)
"""
get_selfdist(st_inp; n_chan=32, cost=60., dt=0.01)
Method for computing pair-wise spike distances from a range of spike trains.
Function copied from Mikolaj SETCOmodel
Inputs:
st_inp: [2 x N] array with spike times and indices of neurons.
N - number of spikes generated, 1st row - index of neuron generating given spikes, 2nd row - spike time.
n_chan - number of neurons (default: 32)
cost - cost parameter for VP spike distance, in ms (default: 60 ms)
dt - simulation timestep, in ms (default: 0.01 ms -> 100 kHz)
Output:
pc - [n_chan x n_chan] matrix containing pairwise VP spikes distances for each pair of neurons.
"""
function get_selfdist(st_inp; n_chan=32, cost=60., dt=0.01)
sts_new = Any[]
for i in 1:n_chan
push!(sts_new, st_inp[2,findall(x->x==i, st_inp[1,:])])
end
# sts = [st_inp[0,st_inp[1,:]==i] for i in 1:n_chan]
pc = zeros(n_chan, n_chan)
for i in 1:n_chan, j in 1:n_chan
pc[i,j] = spkd(sts_new[i], sts_new[j], dt/(cost))
end
return pc
end
# TODO Add test with MATLAB code run for some spike train and compare with
# results from this
"""
spkd(s1, s2, cost)
Fast implementation of victor-purpura spike distance (faster than neo & elephant python packages)
Direct Python port of http://www-users.med.cornell.edu/~jdvicto/pubalgor.htmlself.
The below code was tested against the original implementation and yielded exact results.
All credits go to the authors of the original code.
Code was translated from Frotran to Matlab, from Matlab to Python, from
Python to Julia. It was veryfied with MATLAB code.
Input:
s1,2: pair of vectors of spike times
cost: cost parameter for computing Victor-Purpura spike distance.
(Note: the above need to have the same units!)
Output:
d: VP spike distance.
"""
function spkd(s1, s2, cost)
nspi=length(s1)
nspj=length(s2)
# Why not to use this?
if cost==0
return d=abs(nspi-nspj)
elseif cost==Inf
return d=nspi+nspj
end
scr=zeros(nspi+1, nspj+1)
# initialize margins with cost of adding a spike
scr[:,1]=0:nspi
scr[1,:]=0:nspj
for i = 2:nspi+1, j = 2:nspj+1
component1 = scr[i-1,j]+1
component2 = scr[i,j-1]+1
component3 = scr[i-1,j-1]+cost*abs(s1[i-1]-s2[j-1])
scr[i,j] = min(component1, component2, component3)
end
d=scr[end,end]
return d
end
"""
generate_spike_matrix(spikes; th_chunks=[[0,0]])
Generates matrix of the form [2xN] array with spike times and indices of
neurons. N - number of spikes generated, 1st row - index of neuron generating
given spikes, 2nd row - spike time.
Resulting matrix is time sorted. Resulting matrix may be splint into fragments
by setting 'th_chunks' to a list of fragments in a way such that
'th_chunks[k,1]' is a starting index and 'th_chunks[k,2]' is an ending index of
k'th fragment.
"""
function generate_spike_matrix(spikes; th_chunks=[[0,0]], val_range=20:52)
if th_chunks == [[0,0]]
th_chunks[1,1] = 1
th_chunks[1,2] = size(spikes,1)
end
spike_train_simplified = Any[]
for i = 1:size(th_chunks,1)
#1 Get a chunk of spike trains of interest
syllable_spikes = spikes[th_chunks[i, 1]:th_chunks[i, 2], val_range]
# find all spikes
all_spikes = findall(x->x==1,syllable_spikes)
# convert to [2xN] matrix
total_spikes = length(all_spikes)
sorted_spikes = zeros(Int, 2,total_spikes)
for k = 1:total_spikes
sorted_spikes[1,k] = all_spikes[k][2]
sorted_spikes[2,k] = all_spikes[k][1]
end
push!(spike_train_simplified, sorted_spikes[:,sortperm(sorted_spikes[2,:])])
end
return spike_train_simplified
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/AverageBettis.jl | code | 5963 | # Module taken from: https://github.com/alexyarosh/hyperbolic
using Plots
using Eirene
using Ripserer
using Statistics
# compute the persistent betti numbers of the Vietoris-Rips complex given by the distance matrix `matr`
# if mintime, maxtime and numofsteps are specified -- returns a `numofsteps x maxdim` array
# if either of the keyword arguments is not specified or is set to Inf, returns `maxdim` arrays for method=:eirene, or error for :ripser
# function bettis(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf, method=:ripser)
# if (method == :ripser) || (method == :Ripser)
# if VERSION < v"0.7.0"
# error("Ripser requires at least Julia v 0.7.0")
# end
# return bettis_ripser(matr, maxdim, mintime=mintime, maxtime=maxtime, numofsteps=numofsteps)
# elseif (method == :eirene) || (method == :Eirene)
# return bettis_eirene(matr, maxdim, mintime=mintime, maxtime=maxtime, numofsteps=numofsteps)
# else
# error("Method $(method) is not supported. Supported methods are: method=:eirene, method=:ripser")
# end
# end
function bettis_ripser(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf)
if (mintime == -Inf) || (maxtime == -Inf) || (numofsteps == -Inf)
error("To use Ripser, specify parameters mintime, maxtime, numofsteps")
end
r = ripser(matr, dim_max = maxdim, threshold = maxtime)
int_length = maxtime-mintime
step_length= int_length/numofsteps
betts = zeros(numofsteps, maxdim)
for dim=1:maxdim
ints = r[dim+1]
for intl in ints
st = Int(ceil((intl[1]-mintime)/step_length))
if intl[2] == Inf
fin = numofsteps
else
fin = Int(ceil((intl[2]-mintime)/step_length))
end
betts[st:fin, dim] = map(x->x+1, betts[st:fin, dim])
end
end
return betts
end
#
# # Original function returns 2 different types of betti curves. If no default
# # value parameters is given, it returns vector of matrices. If num of steps is
# # given, then it return matrix maxdim x numsteps.
# function bettis_eirene(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf)
# c = eirene(matr, minrad = mintime, maxrad= maxtime, numrad= numofsteps, maxdim=maxdim)
#
# int_length = maxtime-mintime
# step_length= int_length/numofsteps
#
# if (mintime == -Inf) || (maxtime == Inf) || (numofsteps == Inf)
# # return [betticurve(c, dim=maxdim) for d=1:maxdim]
# return hcat([betticurve(c, dim=d)[:,2] for d=1:maxdim]...)
# end
#
# betts = zeros(numofsteps, maxdim)
# # For every dimension compute betti curve
# for dim=1:maxdim
# bet = betticurve(c, dim=dim)
#
# #for every element in betti curve return betti value if index is positive
# for i=1:size(bet,1)
# b = bet[i,:]
# ind = Int(ceil((b[1]-mintime)/step_length))
# if ind > 0
# betts[ind,dim]=b[2]
# else
# betts[1,dim]=b[2]
# end
# end
# end
# return betts
# end
# average betti numbers over arrs
# assuming arrs is an array of arrays, where each arrs[j] is the same size
function average_bettis(arrs; maxdim=-1)
if size(arrs,2) > 1
return arrs
end
md = maxdim
if maxdim == -1
md = size(arrs[1],2)
end
numofints = size(arrs[1],1)
av_bet = zeros(numofints,md)
for i=1:numofints
for d=1:md
av_bet[i,d] = mean([arrs[j][i,d] for j=1:length(arrs)])
end
end
return av_bet
end
# compute standard deviation of betti numbers over arrays in arrs
# assuming arrs is an array of arrays, where each arrs[j] is the same size
function std_bettis(arrs; maxdim=-1)
md = maxdim
if maxdim == -1
md = size(arrs[1],2)
end
numofints = size(arrs[1],1)
std_bet = zeros(numofints,md)
if size(arrs,2) > 1
return std_bet
end
for i=1:numofints
for d=1:md
std_bet[i,d] = std([arrs[j][i,d] for j=1:length(arrs)])
end
end
return std_bet
end
# plot average curves at values `xval`, with averages given by `means` and standard deviations given by `std`
function plot_averages(xvals, means, stds; ribbon=true, label="", linestyle=:solid, color=:auto)
if ribbon
return plot(xvals, means, ribbon=stds,fillalpha=.3, labels=label, linestyle=linestyle, color=color)
else
return plot(xvals, means, labels=label, linestyle=linestyle, c=color)
end
end
function plot_averages!(xvals, means, stds; ribbon=true, label="", linestyle=:solid, color=:auto)
if ribbon
return plot!(xvals, means, ribbon=stds,fillalpha=.3, labels=label, linestyle=linestyle, color=color)
else
return plot!(xvals, means, labels=label, linestyle=linestyle, c=color)
end
end
function load_bettis(filename)
dict = load(filename)
for (matr_name, matr) in dict
return matr
end
end
# plot average curves at values `xval`, given that the bettis numbers are saved in `file`
function plot_averages(xvals, file::String; dim=1, ribbon=true, label="", linestyle=:solid, color=:auto)
matr = load_bettis(file)
av = average_bettis(matr)[:,dim]
if ribbon
st = std_bettis(matr)[:,dim]
return plot(xvals, av, ribbon=st,fillalpha=.3, labels=label, linestyle=linestyle, c=color)
else
return plot(xvals, av, labels=label, linestyle=linestyle, c=color)
end
end
function plot_averages!(xvals, file::String; dim=1, ribbon=true, label="", linestyle=:solid, color=:auto)
matr = load_bettis(file)
av = average_bettis(matr)[:,dim]
if ribbon
st = std_bettis(matr)[:,dim]
return plot!(xvals, av, ribbon=st,fillalpha=.3, labels=label, linestyle=linestyle, c=color)
else
return plot!(xvals, av, labels=label, linestyle=linestyle, c=color)
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/BettiCurves.jl | code | 48032 | # ==============================
# ======== Tested code ========
using Eirene
using Plots
using StatsPlots
#%%
function get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
"""
get_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Calls Eirene.betticurve for 'dim' in range from `min_dim` up to 'max_dim' and
stack the resulting Arrays into a vector.
The returned value is a Vector of Arrays{Float64,2}. Each array is of size
(n,2), where n is the maximal number of steps taken to compute Betti curve of dimensions
ranging form `min_dim` to `max_dim`. First column of each array contains numbered steps.
Second column are the Betti curve values for corresponding step.
Arrays in returned vector correspond to Betti curve dimensions form range
`min_dim` up to 'max_dim'.
"""
bettis = Matrix{Float64}[]
for d = min_dim:max_dim
result = betticurve(results_eirene, dim = d)
if isempty(result) && d > 1
result = zeros(size(bettis[d-1]))
end
push!(bettis, result)
end
return bettis
end
# TODO add get_bettis_from_matrix, to wrap C= eirene...; get bettis
#%%
function normalise_bettis(bettis::Vector)
"""
normalise_bettis(bettis::Vector)
normalise_bettis(bettis::Array)
Normalise the number of steps for Betti curves. 'bettis' can be either vector of
arrays (each array contain Betti curve of different dimension) or an array
containing Betti curve of a single dimension.
"""
@debug "Vector version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)[1][1]
max_dim = size(norm_bettis)[1]
@debug "typeof(max_dim) :" typeof(max_dim[1])
for d = 1:(max_dim)
if !isempty(norm_bettis[d])
norm_bettis[d][:, 1] /= findmax(norm_bettis[d][:, 1])[1]
end
end
return norm_bettis
end
#%%
function normalise_bettis(bettis::Array)
@debug "Array version"
norm_bettis = copy(bettis)
@debug "norm_bettis size :" size(norm_bettis)
if !isempty(norm_bettis)
norm_bettis[:, 1] /= findmax(norm_bettis[:, 1])[1]
end
return norm_bettis
end
#%%
# function vectorize_bettis(betti_curves::Array{Matrix{Float64,2}})
function vectorize_bettis(betti_curves::Vector{Array{Float64,2}})
"""
vectorize_bettis(betti_curves::Matrix{Float64})
Reshapes the 'betti_curves' from type Array{Matrices{Float64,2}} into
Matrix{Float64}.
The resulting matrix size is (n, k), where 'n' is equal to the number of
rows in each matrix, 'k' is equal to the number of matrices.
TODO: Change the name- it takse vector and returns a matrix.
TODO: get bettis could have an arguent betti_type which would determine resulting type
"""
first_betti = 1
last_betti = size(betti_curves,1)
return hcat([betti_curves[k][:, 2] for k = first_betti:last_betti]...)
end
#%%
@deprecate vectorize_bettis(eirene_results::Dict, maxdim::Integer, mindim::Integer) vectorize_bettis(betti_curves)
# ===
#%%
function get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int = 1)
"""
get_vectorized_bettis(results_eirene::Dict, max_dim::Integer; min_dim::Int=1)
Takes the eirene result and computes Betti curves for dimensions in range
'mindim:maxdim'. Every Betti curve is stored in successive column of the
resulting array.
TODO: this should return a matrix, where first col are indices and rest are B values (1st col is missing now)
"""
all_bettis = get_bettis(results_eirene, max_dim, min_dim = min_dim)
bettis_vector = vectorize_bettis(all_bettis)
return bettis_vector
end
# ==
#%%
function plot_bettis(bettis::Vector;
min_dim::Integer = 1,
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
kwargs...)#; plot_size = (width=1200, height=800),
"""
plot_bettis(bettis; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: min_dim is not included in all_dims variable
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
max_dim = size(bettis, 1)
all_dims = 1:max_dim
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# Create iterator for all loops
all_iterations = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
if use_edge_density
for p = all_iterations
max_step = findmax(bettis[p][:, 1])[1]
bettis[p][:, 1] ./= max_step
end
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = all_iterations
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "Ξ²$(p)")
end
plot!(bettis[index][:, 1], bettis[index][:, 2]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
function plot_bettis(bettis::Array;
min_dim::Integer = 1,
use_edge_density::Bool=true,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
"""
plot_bettis(bettis::Array; min_dim::Integer=1, betti_labels::Bool=true, default_labels::Bool=true kwargs...)
Creates a plot for set of betti numbers stored in `bettis` and return the
handler to the plot.
'kwargs' are plot parameters
Some of the possible 'kwargs' are:
- title::String
- legend:Bool
- size::Tuple{T, T} where {T::Number}
- lw::Integer or linewidth:Integer
(for more, see plots documentation):
TODO: min_dim is not included in all_dims variable
TODO: add change of x label based on x values- so it is either edge density for 0:1 range values or Filtration step otherwise
"""
max_dim = size(bettis, 2)-1-min_dim
all_dims = 1:max_dim
if min_dim > max_dim
throw(DomainError(
min_dim,
"\'min_dim\' must be greater that maximal dimension in \'bettis\'",
))
end
total_steps = size(bettis, 1)
if normalised
x_vals = range(0, stop=1, length=total_steps)
else
x_vals = range(0, stop=total_steps)
end
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
# if use_edge_density
# # for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for (index, p) in enumerate(min_dim:max_dim)
# max_step = findmax(bettis[:, 1])[1]
# bettis[p][:, 1] ./=max_step
# end
# end
colors_set = get_bettis_color_palete(min_dim=min_dim)
plot_ref = plot(; kwargs...)
# for p = min_dim:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
# for p = 1:(max_dim) #TODO ths can not be starting from min_dim, because it may be 0
for (index, p) in enumerate(min_dim:max_dim)
args = (lc = colors_set[index], linewidth = lw)
if betti_labels
args = (args..., label = "Ξ²$(p)")
end
plot!(x_vals, bettis[:, index]; args...)
end
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
# set tlims to integer values
max_ylim = findmax(ceil.(Int, ylims(plot_ref)))[1]
if max_ylim <=3
ylims!((0, 3))
end
if use_edge_density
xlims!((0, 1))
end
return plot_ref
end
# ======= Untested code
# TODO add default kwargs paring function -> parse_kwargs()
function plot_all_bettis(bettis_collection;
min_dim::Integer = 1,
betti_labels::Bool = true,
default_labels::Bool = true,
normalised=true,
kwargs...)#; plot_size = (width=1200, height=800),
"""
plot_all_bettis ...
"""
total_dims = size(bettis_collection[1],2)
lw_pos = findfirst(x -> x == :lw || x == :linewidth, keys(kwargs))
if !isnothing(lw_pos)
lw = kwargs[lw_pos]
else
lw = 2
end
colors_set = get_bettis_color_palete(min_dim=min_dim)
max_y_val = find_max_betti(bettis_collection)
plot_ref = plot(; kwargs...)
for b = 1:total_dims
args = (lc = colors_set[b], linewidth = lw, alpha=0.12,label=false, ylims=(0,max_y_val))
for bettis = bettis_collection
betti_vals = bettis[:,b]
total_steps = size(bettis, 1)
x_vals = range(0, stop=1, length=total_steps)
plot!(x_vals, betti_vals; args...)
end
# my_label = "Ξ²$(b)"
# betti_vals = results_d["bettis_collection"][:hc][end]
# x_vals = range(0, stop=1, length=size(betti_vals, 1))
# plot!(x_vals, betti_vals; lc = colors_set[b], linewidth = 1, alpha=0.1,label=my_label, ylims=(0,max_y_val))
end
plot!(legend=true)
legend_pos = findfirst(x -> x == :legend, keys(kwargs))
if !isnothing(legend_pos)
plot!(legend = kwargs[legend_pos])
else
plot!(legend = betti_labels)
end
x_pos = findfirst(x -> x == :xlabel, keys(kwargs))
y_pos = findfirst(x -> x == :ylabel, keys(kwargs))
if !isnothing(x_pos)
xlabel!(kwargs[x_pos])
elseif default_labels
xlabel!("Edge density")
end
if !isnothing(y_pos)
ylabel!(kwargs[y_pos])
elseif default_labels
ylabel!("Number of cycles")
end
return plot_ref
end
function find_max_betti(bettis_collection::Array)
"""
find_max_betti(bettis_collection::Array)
Returns the highest Betti curve value from all dimensions.
"""
if typeof(bettis_collection) == Vector
bettis_collection = vectorize_bettis(bettis_collection)
end
max_y_val = 0
for betti_set in bettis_collection
local_max = findmax(betti_set)[1]
if local_max > max_y_val
max_y_val = local_max
end
end
return max_y_val
end
# ======= Untested code == end
#%%
function printready_plot_bettis(kwargs)
"""
printready_plot_bettis(kwargs)
Creates a plot using 'plot_bettis' with arguments which were tested to be very
good for using them in prints. Used arguments are:
"""
return nothing
end
#%%
function get_bettis_color_palete(; min_dim = 1, use_set::Integer = 1)
"""
function get_bettis_color_palete()
Generates vector with colours used for Betti plots. Designed for Betti plots consistency.
"""
# TODO what does the number in the function below is used for?
if use_set == 1
cur_colors = [Gray(bw) for bw = 0.0:0.025:0.5]
if min_dim == 0
colors_set = [RGB(87 / 256, 158 / 256, 0 / 256)]
else
colors_set = []
end
max_RGB = 256
colors_set = vcat(
colors_set,
[
RGB(255 / max_RGB, 206 / max_RGB, 0 / max_RGB),
RGB(248 / max_RGB, 23 / max_RGB, 0 / max_RGB),
RGB(97 / max_RGB, 169 / max_RGB, 255 / max_RGB),
RGB(163 / max_RGB, 0 / max_RGB, 185 / max_RGB),
RGB(33 / max_RGB, 96 / max_RGB, 45 / max_RGB),
RGB(4 / max_RGB, 0 / max_RGB, 199 / max_RGB),
RGB(135 / max_RGB, 88 / max_RGB, 0 / max_RGB),
],
cur_colors,
)
else
use_set == 2
cur_colors = get_color_palette(:auto, 1)
cur_colors3 = get_color_palette(:lightrainbow, 1)
cur_colors2 = get_color_palette(:cyclic1, 1)
if min_dim == 0
# colors_set = [cur_colors[3], cur_colors[5], [:red], cur_colors[1]] #cur_colors[7],
colors_set = [cur_colors3[3], cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
else
colors_set = [cur_colors[5], cur_colors3[end], cur_colors[1]] #cur_colors[7],
# colors_set = [cur_colors[5], [:red], cur_colors[1], cur_colors[14]]
end
# for c = [collect(11:25);]
# push!(colors_set, cur_colors2[c])
# end
colors_set = vcat(colors_set, [cur_colors2[c] for c in [collect(11:25);]])
end
return colors_set
end
# ==============================
# ======= Untested code =======
# using Measures
# using Plots.PlotMeasures
#
# # Source: https://github.com/JuliaPlots/Plots.jl/issues/897
# function setdefaultplottingparams(;upscale=2)
# #8x upscaling in resolution
# fntsm = Plots.font("sans-serif", pointsize=round(12.0*upscale))
# fntlg = Plots.font("sans-serif", pointsize=round(18.0*upscale))
# default(titlefont=fntlg, guidefont=fntlg, tickfont=fntsm, legendfont=fntsm)
# default(size=(800*upscale,600*upscale)) #Plot canvas size
# default(dpi=500) #Only for PyPlot - presently broken
# end
#%%
function plot_bettis_collection(bettis_collection,
bett_num,
max_rank;
step = 1,
show_plt = true,
R = 0.0,
G = 0.4,
B = 1.0)
"""
plot_bettis_collection(bettis_collection, bett_num; step=1, show_plt=true, R=0., G=0.4, B=1.0)
PLots collection of Betti curves of rank 'bett-num'. Every successive plot has
lower opacity than predecessor. 'step' defines step between collection elements
that are ploted. By default, plot is displayed after carteation. This can be
disabled by setting 'show_plt' to false.
Color of the plot can be set with 'R', 'G', 'B' parameters.
"""
step > 0 || error("Steps should be natural number!")
bettis_total = size(bettis_collection, 1)
colors_set = zeros(Float64, bettis_total, 4)
colors_set[:, 1] .= R
colors_set[:, 2] .= G
colors_set[:, 3] .= B
max_betti = get_max_betti_from_collection(bettis_collection)
@info "max_betti" max_betti
x = 0
y = bettis_total * 0.1
va_range = collect(range(bettis_total + x, y, length = bettis_total))
colors_set[:, 4] .= va_range / findmax(va_range)[1]
rgba_set = RGBA[]
for k = 1:size(colors_set, 1)
push!(
rgba_set,
RGBA(colors_set[k, 1], colors_set[k, 2], colors_set[k, 3], colors_set[k, 4]),
)
end
plt_reference = plot(1, title = "Betti curves collection, rank $(bett_num)", label = "")
for b = 1:step:bettis_total
betti = bettis_collection[b]
x_vals_1 = (1:size(betti[:, bett_num], 1)) / size(betti[:, bett_num], 1)
plot!(x_vals_1, betti[:, bett_num], lc = rgba_set[b], label = "rank=$(max_rank-b)")
plot!(ylim = (0, max_betti))
end
xlabel!("Normalised steps")
ylabel!("Number of cycles")
plot!(legend = true)
show_plt && display(plt_reference)
return plt_reference
end
#%%
function get_max_bettis(bettis)
"""
get_max_bettis(bettis)
Returns the maximal bettis of Betti curves for all dimensions.
"""
all_max_bettis = findmax(bettis, dims=1)[1]
return all_max_bettis
end
# TODO change name
# TODO check what for dim is used, change to min dim
function get_max_betti_from_collection(bettis_collection; dim = 1)
max_betti = 0
for betti in bettis_collection
# global max_betti
local_max = findmax(betti)[1]
if (local_max > max_betti)
max_betti = local_max
end
end
return max_betti
end
#%%
function plot_and_save_bettis(bettis,
plot_title::String,
results_path::String;
file_name = "",
extension = ".png",
do_save = true,
do_normalise = true,
min_dim = 0,
max_dim = 3,
legend_on = true,
kwargs...)
"""
plot_and_save_bettis(eirene_results, plot_title::String,
results_path::String; extension = ".png",
data_size::String="", do_save=true,
extend_title=true, do_normalise=true, max_dim=3,
legend_on=true)
Plot Betti curves from 0 up to `max_dim` using `eirene_results` from Eirene library and
returns handler for figure. Optionally, if `do_save` is set, saves the figure
or if `do_normalise` is set, sets the steps range to be normalised to the
horizontal axis maximal value.
"""
bettis = get_bettis(eirene_results, max_dim)
if do_normalise
bettis = normalise_bettis(bettis)
end
plot_ref =
plot_bettis(bettis, plot_title, legend_on = legend_on, min_dim = min_dim, kwargs...)
if do_save
if isempty(file_name)
file_name = plot_title * extension
elseif isempty(findall(x -> x == extension[2:end], split(file_name, ".")))
#check for the extension in file name
file_name *= extension
end
save_figure_with_params(
plot_ref,
results_path;
extension = extension,
prefix = split(file_name, ".")[1],
)
end
return plot_ref
end
# TODO merge functions for getting betti curves
# Original function returns 2 different types of betti curves. If no default
# value parameters is given, it returns vector of matrices. If num of steps is
# given, then it return matrix maxdim x numsteps.
# """
# bettis_eirene(matr, maxdim; mintime=-Inf, maxtime=Inf, numofsteps=Inf, mindim=1)
#
# Takes the `matr` and computes Betti curves up to `maxdim`. Return matrix only
# with betti curve values
#
#
# Function taken from: https://github.com/alexyarosh/hyperbolic
# """
#%%
@deprecate bettis_eirene(matr, maxdim; mintime = -Inf, maxtime = Inf, numofsteps = Inf, mindim = 1) get_bettis(results_eirene, max_dim; min_dim = 1)
#%%
function get_bettis_from_image(img_name,
plot_params;
file_path = "",
plot_heatmaps = true,
save_heatmaps = false,
plot_betti_figrues = true)
"""
function get_bettis_from_image(img_name)
Computes Betti curves for the image file indicated by @img_name. If the image is
not symmetric, then it is the elements below diagonal are copied over the
elmenents above the diagonal.
"""
file_n = split(img_name, ".")[1]
img1_gray = Gray.(load(file_path * img_name))
img_size = size(img1_gray)
C_ij = Float64.(img1_gray)
if !issymmetric(C_ij)
img1_gray = symmetrize_image(img1_gray)
C_ij = Float64.(img1_gray)
end
img_size = size(C_ij, 1)
# C_ij =-C_ij
# C_ij .+= 1
# ==============================================================================
# =============================== Ordered matrix ===============================
if size(C_ij, 1) > 80
@warn "Running Eirene for big matrix: " img_size
@warn "Eirene may have trobules with big matrices/images."
end
ordered_matrix = get_ordered_matrix(C_ij; assing_same_values = false)
# ==============================================================================
# ============================ Persistance homology ============================
C = eirene(ordered_matrix, maxdim = 3, model = "vr")
# ==============================================================================
# ================================ Plot results ================================
# TODO separate plotting from processing
if plot_heatmaps
full_ordered_matrix = get_ordered_matrix(C_ij; assing_same_values = false)
heat_map2 = plot_square_heatmap(
full_ordered_matrix,
10,
img_size;
plt_title = "Order matrix of $(file_n)",
plot_params = plot_params,
)
if save_heatmaps
heatm_details = "_heatmap_$(file_n)"
savefig(heat_map2, heatmaps_path * "ordering" * heatm_details)
end
end
if plot_betti_figrues
plot_title = "Betti curves of $(file_n), size=$(img_size) "
figure_name = "betti_$(file_n)_n$(img_size)"
ref = plot_and_save_bettis(C,
plot_title,
figure_path,;
file_name = figure_name,
plot_params = plot_params,
do_save = false,
extend_title = false,
do_normalise = false,
max_dim = 3,
legend_on = true,
min_dim = 1)
end
display(img1_gray)
display(heat_map2)
display(ref)
end
# ===============================================
@deprecate get_bettis_from_image2(img_name;file_path = "",plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true) get_bettis_from_image(img_name, plot_params; file_path = "", plot_heatmaps = true, save_heatmaps = false, plot_betti_figrues = true)
@deprecate plot_and_save_bettis2(eirene_results, plot_title::String, results_path::String; file_name = "", extension = ".png", data_size::String = "", do_save = true, extend_title = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true) plot_and_save_bettis(bettis, plot_title::String, results_path::String; file_name = "", extension = ".png", do_save = true, do_normalise = true, min_dim = 0, max_dim = 3, legend_on = true, kwargs...)
#%%
function get_and_plot_bettis(eirene_results;
max_dim = 3,
min_dim = 1,
plot_title = "",
legend_on = false)
bettis = get_bettis(eirene_results, max_dim)
norm_bettis = normalise_bettis(bettis)
plot_ref =
plot_bettis2(norm_bettis, plot_title, legend_on = legend_on, min_dim = min_dim)
# display(plot_ref)
return plot_ref
end
#%%
function lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
"""
lower_ordmat_resolution(ordered_matrix::Array, total_bins::Int)
Takes ordered matrix 'input_matrix' and reduces the resolution of values in the
matrix into 'total_bins' bins.
"""
new_ordered_matrix = zeros(size(ordered_matrix))
max_val = findmax(ordered_matrix)[1]
min_val = findmin(ordered_matrix)[1]
bin_step = max_val Γ· total_bins
old_bins = min_val:bin_step:max_val
for bin = 1:total_bins
@debug "First step threshold is $(old_bins[bin])"
indices = findall(x -> (x >= old_bins[bin]), ordered_matrix)
new_ordered_matrix[indices] .= bin - 1
end
@debug "Max_val in new matrix is " findmax(new_ordered_matrix)
@debug "And should be " total_bins - 1
return new_ordered_matrix
end
#%%
function average_bettis(bettis_matrix::Matrix; up_factor = 8)
"""
average_bettis(bettis_matrix; up_factor=8)
Takes the average values of betti curves stored in 'bettis_matrix'.
'bettis_matrix' consist of different simulations(first index of the matrix),
different ranks (third index of the matrix). Second index of the matrices
(saples) may vary accross many simulations and for this reason, all betti curves
are upsampled by a factor of 'upsample_factor' and then the average for every
dimension is computed.
"""
bettis_matrix_backup = copy(bettis_matrix)
simulations = size(bettis_matrix, 1)
dimensions = size(bettis_matrix[1], 1)
max_samples = 0
for k = 1:simulations
# global max_samples
current_len = length(bettis_matrix[k][1][:, 1])
if max_samples < current_len
max_samples = current_len
end
end
bettis_size = size(bettis_matrix)
total_upsamples = (max_samples - 1) * up_factor + 1
x_resampled = range(0, 1, step = total_upsamples)
avg_bettis = zeros(total_upsamples, dimensions)
std_bettis = copy(avg_bettis)
resampled_bettis = zeros(simulations, total_upsamples, dimensions)
# resample betti curves
for simulation = 1:simulations, betti = 1:dimensions
resampled_bettis[simulation, :, betti] =
upsample_vector2(bettis_matrix[simulation][betti][:, 2], total_upsamples)
end
# average and std Betti
for dimension = 1:dimensions
avg_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
std_bettis[:, dimension] = mean(resampled_bettis[:, :, dimension], dims = 1)
end
return avg_bettis, std_bettis
end
#%%
function upsample_vector2(input_vector, total_upsamples)
total_orig_samples = size(input_vector, 1) - 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_upsamples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
#%%
function upsample_vector(input_vector; upsample_factor::Int = 8)
"""
upsample_vector(input_vector; upsample_factor::Int=8)
Takes an 'input_vector' and returns a vector which has 'upsample_factor' many
times more samples. New samples are interpolated with 'spl' function from
'Dierckx' package.
"""
total_orig_samples = size(input_vector, 1) - 1
total_samples = upsample_factor * total_orig_samples + 1
x_vals = range(0, 1, length = total_orig_samples + 1)
spl = Spline1D(x_vals, input_vector)
x_upsampled = range(0, 1, length = total_samples)
y_upsampled = spl(x_upsampled)
# ref = plot(range(0, 1, length=total_orig_samples), input_vector);
# plot!(x_vals, y_upsampled);
# display(ref)
return y_upsampled
end
# =========--=======-========-==========-=======-
# From bettis areas
# Area under Betti curve functions
#%%
function get_area_under_betti_curve(betti_curves::Union{Matrix{Float64}, Array{Array{Float64,2}}};do_normalised::Bool=false)
"""
get_area_under_betti_curve(betti_curves, min_dim, max_dim)
Computes the area under Betti curves stored in 'betti_curves', where each row is
a Betti curve and each column is a value.
"""
#TODO check this part
if size(betti_curves,2) < 2
bettis_vector = vectorize_bettis(betti_curves)
else
bettis_vector = betti_curves
end
# @info sum(bettis_vector, dims=1)
bettis_area = sum(bettis_vector, dims=1)
if do_normalised
total_steps = size(bettis_vector,1)
bettis_area ./= total_steps
end
# @info bettis_area
return bettis_area
end
# function get_area_under_betti_curve(C, min_dim, max_dim)
# """
# get_area_under_betti_curve(C, min_dim, max_dim)
#
# Computes the Betti curves and returns their area under curve.
# """
# all_bettis = get_bettis(C,max_dim, min_dim=min_dim)
# bettis_vector = hcat([all_bettis[k][:,2] for k=min_dim:max_dim]...)
# # @info sum(bettis_vector, dims=1)
#
#
# total_steps = size(bettis_vector,1)
#
# bettis_area = sum(bettis_vector, dims=1) ./ total_steps
# # @info bettis_area
# return bettis_area
# end
#%%
function get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
"""
get_dataset_bettis_areas(dataset; min_dim::Integer=1, max_dim::Integer=3, return_matrix::Bool=true)
Computes topology of every matrix in dataset, computes Betti curves for dimensions
min_dim up to max_dim and returns vector (or matrix) of areas under Betti curves.
"""
areas_vector = Array[]
for data = dataset
@info "Computing topology."
C = eirene(data, maxdim=max_dim,)
matrix_bettis = get_bettis(C,max_dim, min_dim=min_dim)
push!(areas_vector, get_area_under_betti_curve(matrix_bettis))
end
if return_matrix
return vcat([areas_vector[k] for k=1:10]...)
else
return areas_vector
end
end
# struct TopologyData
# min_dim::Integer
# max_dim::Integer
#
# do_normalise::Bool=true
#
# betti_curves
# normed_bettis
# betti_areas::Matrix{Int}
#
# # Constructor for input data
# function TopologyData(my_matrix::Matrix, max_dim::Int; min_dim::Int, do_normalise::Bool=true)
# min_dim = min_dim
# max_dim = max_dim
#
# @info "Computing topology for maxdim =" max_dim
# C = eirene(my_matrix, maxdim=max_dim)
# betti_curves = get_bettis(C, max_dim, min_dim=min_dim)
# normed_bettis = normalise_bettis(betti_curves)
# betti_areas = get_area_under_betti_curve(betti_curves; do_normalised=do_normalise)
# end
# end
#%%
function get_dataset_topology(dataset;
min_dim::Integer=1,
max_dim::Integer=3,
get_curves::Bool=true,
get_areas::Bool=true,
get_persistence_diagrams::Bool=true,
do_normalise::Bool=true)
topology_set = TopologyData[]
for some_matrix in dataset
resulting_topology = TopologyData(some_matrix, max_dim, min_dim=min_dim, do_normalise=do_normalise)
push!(topology_set, resulting_topology)
end
return topology_set
end
#%%
function get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
"""
get_area_boxes(areas_matrix, min_dim::Integer, max_dim::Integer)
Plots the boxplot of area under betti curves.
"""
bplot = StatsPlots.boxplot()
data_colors = get_bettis_color_palete()
for (index, value) in enumerate(min_dim:max_dim)
StatsPlots.boxplot!(bplot, areas_matrix[:,index], labels="Ξ²$(value)", color=data_colors[value])
end
return bplot
end
function get_bettis_collection_from_matrices(ordered_matrices_collection; max_dim::Int=3, min_dim::Int=1)
bettis_collection = Array[]
for matrix = ordered_matrices_collection
@debug "Computing Bettis..."
eirene_geom = eirene(matrix,maxdim=max_B_dim,model="vr")
bettis = reshape_bettis(get_bettis(eirene_geom, max_B_dim))
push!(bettis_collection, bettis)
end
return bettis_collection
end
# =========--=======-========-==========-=======-
# Code from Points substitution:
# Compute series of betti curves
# function get_bettis_collection(ordered_matrices_collection; max_B_dim=3)
# bettis_collection = Array[]
#
# for matrix = ordered_matrices_collection
# @debug "Computing Bettis..."
# eirene_geom = eirene(matrix,maxdim=max_B_dim,model="vr")
#
# bettis = reshape_bettis(get_bettis(eirene_geom, max_B_dim))
# push!(bettis_collection, bettis)
# end
#
# return bettis_collection
# end
#
# # Plot series of betti curves with their heatmaps
# function reshape_bettis(bettis)
# bettis_count = size(bettis,1)
# output_betti = zeros(size(bettis[1],1), bettis_count)
#
# for betti = 1:bettis_count
# output_betti[:,betti] = bettis[betti][:,2]
# end
# return output_betti
# end
#
# function get_ord_mat_collection(matrix_collection)
# mat_size = size(matrix_collection[1],1)
# ordered_mat_coll = [zeros(Int, mat_size,mat_size) for k=1:length(matrix_collection)]
#
# size(matrix_collection)
# for matrix = 1:length(matrix_collection)
# ordered_mat_coll[matrix] = Int.(get_ordered_matrix(matrix_collection[matrix]))
# end
# return ordered_mat_coll
# end
#
#
#
#
#
# function print_hmap_with_bettis(ordered_matrices_collection, bettis_collection,
# plot_data::PlottingData)
# num_plots = size(ordered_matrices_collection,1)
# sources = 1:(plot_data.src_pts_number)
# targets = 1:(plot_data.trgt_pts_number)
# plot_set = Any[]
#
# max_betti = get_max_betti_from_collection(bettis_collection;dim=1)
#
# index = 1
# for src = 1:size(sources,1), trgt = 1:size(targets,1)
# # index = src * trgt
# ordered_geom_gr = ordered_matrices_collection[index]
# bettis = bettis_collection[index]
# title_hmap = "trgt:$(targets[trgt])_src:$(sources[src])_r:$(rank(ordered_geom_gr))"
# title_bettis = "gr_trg=$(targets[trgt])_src=$(sources[src])_steps=$(size(bettis,1))"
# push!(plot_set, make_hm_and_betti_plot(ordered_geom_gr, bettis, title_hmap, title_bettis, max_betti))
# index +=1
# end
#
# return plot_set
# end
#
# function make_hm_and_betti_plot(ordered_geom_gr, bettis, title_hmap, title_bettis, max_betti)
# # @debug "src" src
# # @debug "trgt" trgt
# hmap_plot = plot_square_heatmap(ordered_geom_gr, 10,size(ordered_geom_gr,1);plt_title = title_hmap)
# plot!(yflip = true,)
#
# bettis_plot_ref = plot(title=title_bettis);
# max_dim = size(bettis,2)
# for p = 1:max_dim
# x_vals = collect(1:size(bettis[:,1],1))./size(bettis[:,1])
#
# plot!(x_vals, bettis[:,p], label="\\beta_"*string(p));
# plot!(legend=true, )
# end
#
# plot!(ylim=(0,max_betti))
# plot!(xlim=(0,1))
# ylabel!("Number of cycles")
# xlabel!("Steps")
#
# final_plot = plot(hmap_plot, bettis_plot_ref, layout = 2,
# top_margin=2mm,
# left_margin=0mm,
# bottom_margin=2mm,
# size=(600,300))
# display(final_plot)
# return final_plot
# end
#
# # TODO BUG: substitution does not work- all the plots are the same
# function main_generation1()
# mat_size = 6
# dim = 80
# src_pts_number = 1
# trgt_pts_number = 2
# trgt_steps = 0
#
# src_points, trgt_points =
# get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
#
# matrix_collection =
# get_matrix_collection(mat_size, dim, src_points, trgt_points; trgt_step=trgt_steps)
#
# ordered_matrices_collection = get_ord_mat_collection(matrix_collection)
#
# bettis_collection = get_bettis_collection(ordered_matrices_collection)
#
#
# plot_data = PlottingData(mat_size, dim, src_pts_number, trgt_pts_number, src_points, trgt_points, trgt_steps)
# # plot_data = PlottingData2(mat_size , dim, )
#
# plotting_data = print_hmap_with_bettis(ordered_matrices_collection,
# bettis_collection, plot_data)
# end
#
#
# function get_geom_matrix(mat_size, dim)
# # TODO change the matrix collection shape to be a matrix, not a vector
# point_cloud = generate_random_point_cloud(mat_size, dim)
# matrix_collection = generate_geometric_matrix(point_cloud)
# # matrix_collection = get_ordered_matrix(matrix_collection; assing_same_values=true)
#
# return matrix_collection
# end
#
# function get_rand_matrix(mat_size, dim)
# matrix_collection = generate_random_matrix(mat_size)
# matrix_collection = get_ordered_matrix(matrix_collection; assing_same_values=true)
#
# return matrix_collection
# end
#
# # TODO Analyse zero point behaviour
# function get_dist_mat_collection(dist_matrix, src_points, trgt_points, trgt_steps; do_ordering=false)
# dist_matrix_backup = copy(dist_matrix)
# mat_size = size(dist_matrix,1)
# src_points_num = size(src_points,1)
# trgt_points_num = size(trgt_points,1)
# # ordered_mat_coll = [zeros(Int, mat_size,mat_size) for k=1:(src_points_num*trgt_points_num)]
# ordered_mat_coll = Array[]
#
# swapping_iterator = 0
#
# for srcs = 1:src_points_num
# # replacement_row = get_row(dist_matrix, src_points[srcs])
#
# for target = 1:trgt_points_num
# @debug "src:" src_points[srcs]
# @debug "trgt:" trgt_points[target, srcs]
# replacement_row = get_row(dist_matrix_backup, src_points[srcs])
# # dist_matrix_backup .=
# set_row!(dist_matrix_backup, trgt_points[target, srcs], replacement_row)
# # ordered_mat_coll[srcs * target] = copy(dist_matrix_backup)
# if do_ordering
# swap_rows!(dist_matrix_backup, trgt_points[target, srcs], mat_size-swapping_iterator)
# swapping_iterator +=1
# end
# push!(ordered_mat_coll, copy(dist_matrix_backup))
# end
# end
#
# return ordered_mat_coll
# end
#
# function get_ordered_set(distance_matrices_collection)
# result = copy(distance_matrices_collection)
#
# for matrix = 1:size(distance_matrices_collection,1)
# result[matrix] = get_ordered_matrix(distance_matrices_collection[matrix];assing_same_values=true )
# end
# return result
# end
#
# function matrix_analysis(test_data::PlottingData;generation_function=get_geom_matrix)
# mat_size = test_data.mat_size
# dim = test_data.dim
# src_pts_number = test_data.src_pts_number
# trgt_pts_number = test_data.trgt_pts_number
# trgt_steps = 0
#
# src_points, trgt_points = get_replacing_points(mat_size, src_pts_number, trgt_pts_number)
# distance_matrix = generation_function(mat_size, dim)
#
# distance_matrices_collection = get_dist_mat_collection(distance_matrix, src_points, trgt_points, trgt_steps)
# ordered_matrices_collection = get_ordered_set(distance_matrices_collection)
# bettis_collection = get_bettis_collection(ordered_matrices_collection)
#
# plot_data = PlottingData(mat_size, dim, src_pts_number, trgt_pts_number, src_points, trgt_points, trgt_steps)
#
# plots_set = print_hmap_with_bettis(ordered_matrices_collection,
# bettis_collection, plot_data)
#
#
# return distance_matrices_collection, ordered_matrices_collection, bettis_collection, plot_data, plots_set
# end
#%%
# This does not belong here
function multiscale_matrix_testing(sample_space_dims = 3,
maxsim = 5,
min_B_dim = 1,
max_B_dim = 3,
size_start = 10,
size_step = 5,
size_stop = 50;
do_random = true,
control_saving = false,
perform_eavl = false)
"""
multiscale_matrix_testing(sample_space_dims = 3,
maxsim=5,
min_B_dim = 1,
max_B_dim = 3,
size_start = 10,
size_step = 5,
size_stop = 80; do_random=true)
Function for testing the average number of cycles from geometric and random
matrices.
It is possible to save intermidiate results- for that, @control_saving must be
set true.
Performance of computation of Betti curves can be monitored, if the
@perform_eavl is set too true. Bydefault, it is set to false.
"""
num_of_bettis = length(collect(min_B_dim:max_B_dim))
if length(sample_space_dims) > 1
@warn "Can not do random processing for multiple dimensions"
do_random = false
end
geom_mat_results = Any[]
if do_random
rand_mat_results = Any[]
result_list = [geom_mat_results, rand_mat_results]
else
result_list = [geom_mat_results]
end
for sample_space_dim in sample_space_dims
if !do_random
@info "Sampling space size: " sample_space_dim
end
repetitions = size_start:size_step:size_stop
for space_samples in repetitions
@info "Generating data for: " space_samples
# ==========================================
# ============= Generate data ==============
# ===
# Generate random matrix
if do_random
symm_mat_rand = [generate_random_matrix(space_samples) for i = 1:maxsim]
ordered_mat_rand = [
get_ordered_matrix(symm_mat_rand[i]; assing_same_values = false)
for i = 1:maxsim
]
end
# ===
# Generate geometric matrix
pts_rand = [
generate_random_point_cloud(sample_space_dim, space_samples)
for i = 1:maxsim
]
symm_mat_geom = [generate_geometric_matrix(pts_rand[i]') for i = 1:maxsim]
ordered_mat_geom = [
get_ordered_matrix(symm_mat_geom[i]; assign_same_values = false)
for i = 1:maxsim
]
# ======================================================================
# ========================= Do the Betti analysis ======================
if do_random
set = [ordered_mat_geom, ordered_mat_rand]
else
set = [ordered_mat_geom]
end
for matrix_set in set
@debug("Betti analysis!")
# ===
# Generate bettis
many_bettis = Array[]
if perform_eavl
many_timings = Float64[]
many_bytes = Float64[]
many_gctime = Float64[]
many_memallocs = Base.GC_Diff[]
end
for i = 1:maxsim
if i % 10 == 0
@info "Computing Bettis for: " i
end
if perform_eavl
results, timing, bytes, gctime, memallocs = @timed bettis_eirene(
matrix_set[i],
max_B_dim,
mindim = min_B_dim,
)
push!(many_bettis, results)
push!(many_timings, timing)
push!(many_bytes, bytes)
push!(many_gctime, gctime)
push!(many_memallocs, memallocs)
else
push!(
many_bettis,
bettis_eirene(matrix_set[i], max_B_dim, mindim = min_B_dim),
)
end
end
# ===
# Get maximal number of cycles from each Betti from simulations
max_cycles = zeros(maxsim, max_B_dim)
for i = 1:maxsim, betti_dim = 1:max_B_dim
@debug("\tFindmax in bettis")
max_cycles[i, betti_dim] = findmax(many_bettis[i][:, betti_dim])[1]
end
# ===
# Get the statistics
avg_cycles = zeros(1, length(min_B_dim:max_B_dim))
std_cycles = zeros(1, length(min_B_dim:max_B_dim))
k = 1
for betti_dim = min_B_dim:max_B_dim
avg_cycles[k] = mean(max_cycles[:, betti_dim])
std_cycles[k] = std(max_cycles[:, betti_dim])
k += 1
end
# ===
# Put results into dictionary
betti_statistics = Dict()
if matrix_set == ordered_mat_geom
@debug("Saving ordered")
betti_statistics["matrix_type"] = "ordered"
betti_statistics["space_dim"] = sample_space_dim
result_list = geom_mat_results
else
@debug("Saving radom")
betti_statistics["matrix_type"] = "random"
result_list = rand_mat_results
end
betti_statistics["space_samples"] = space_samples
betti_statistics["simualtions"] = maxsim
betti_statistics["min_betti_dim"] = min_B_dim
betti_statistics["max_betti_dim"] = max_B_dim
betti_statistics["avg_cycles"] = avg_cycles
betti_statistics["std_cycles"] = std_cycles
if perform_eavl
betti_statistics["many_timings"] = many_timings
betti_statistics["many_bytes"] = many_bytes
betti_statistics["many_gctime"] = many_gctime
betti_statistics["many_memallocs"] = many_memallocs
end
push!(result_list, betti_statistics)
end # matrix type loop
@debug("===============")
if control_saving
if do_random
save(
"multiscale_matrix_testing_$(space_samples)_$(sample_space_dim).jld",
"rand_mat_results",
rand_mat_results,
"geom_mat_results",
geom_mat_results,
)
else
save(
"multiscale_matrix_testing_dimension_$(space_samples)_$(sample_space_dim).jld",
"geom_mat_results",
geom_mat_results,
)
end
end
end # matrix_size_loop
end # sampled space dimension
if do_random
return geom_mat_results, rand_mat_results
else
return geom_mat_results
end
end
# function plot_betti_numbers(betti_numbers, edge_density, title="Geometric matrix"; stop=0.6)
# """
# Plots Betti curves. The betti numbers should be obtained with the clique-top
# library.
# """
# p1 = plot(edge_density, betti_numbers[:,1], label="beta_0", title=title, legend=:topleft) #, ylims = (0,maxy)
# plot!(edge_density, betti_numbers[:,2], label="beta_1")
# if size(betti_numbers,2)>2
# plot!(edge_density, betti_numbers[:,3], label="beta_2")
# end
#
# return p1
# end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/DirOperations.jl | code | 388 | """
check_existing_dir(dir_path::String)
Checks if the directory under `dir_path` exists. If not, throws IOError
"""
function check_existing_dir(dir_path::String)
if !isdir(dir_path)
@warn "Folder $(data_path) does not exists in current directory."
@warn "Terminating execution."
throw(ErrorException("Can nor find folder \""*dir_path*"\"."))
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/GeometricSampling.jl | code | 4819 | using LinearAlgebra
# integrate sinh^(n)(ax) from 0 to r
function integrate_sinh(n; r=1.0, a=1.0)
if n==0
return r
elseif n==1
return (cosh(a*r)-1)/a
else
return (sinh(a*r)^(n-1))*cosh(a*r)/(a*n) - (n-1)/n * integrate_sinh(n-2,r=r, a=a)
end
end
function hyp_radial_density(r, d; curvature=-1.0, radius=1.0)
# k = 1/(curvature^2)
k = 1.0
hyp_r = (sinh(r/k))^(d-1)/integrate_sinh(d-1,r=radius, a=radius/k)
return hyp_r
end
function euc_radial_density(r, d; radius=1.0)
return d*(r^(d-1))/radius^d
end
# rejection sampling n=numofpts points from density dens, where the argument lies between 0 and maxval
function rejection_sampling(dens::Function, maxval,numofpts=1)
max_val = dens(maxval)
iter = 1
rands = Array{Float64,1}(undef, numofpts)
while iter <= numofpts
x = rand()*maxval
val = dens(x)
u = rand()
if u*max_val < val
rands[iter] = x
iter+=1
end
end
return rands
end
function sample_hyp_rad(d, numofpts=1; curvature=-1.0, radius=1.0)
rands = rejection_sampling(x->hyp_radial_density(x,d,curvature=curvature, radius=radius), radius,numofpts)
# ...radius within the Poincare ball
euc_rands = map(x->tanh(x/2.0),rands)
return euc_rands
end
function sample_euc_rad(d, numofpts=1; radius=1.0)
rands = rejection_sampling(x->euc_radial_density(x,d,radius=radius), radius, numofpts)
return rands
end
function sample_sph(d, numofpts=1; curvature=1.0)
rands = []
i=0
while i<=numofpts
vec = randn(d+1)
if vec[d+1]>0
push!(rands, normalize(vec))
i+=1
end
end
return rands
end
function sample_sphere(d, numofpts=1)
vecs = randn(d, numofpts)
rands = []
for i=1:numofpts
push!(rands, normalize(vecs[:,i]))
end
return rands
end
function sample_hyp(d, numofpts=1; radius=1.0, curvature=-1)
sphere_pts = sample_sphere(d,numofpts)
radii = sample_hyp_rad(d, numofpts, radius=radius, curvature=curvature)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
return ball_pts
end
function sample_euc(d, numofpts=1; radius=1.0)
sphere_pts = sample_sphere(d,numofpts)
radii = sample_euc_rad(d, numofpts, radius=radius)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
return ball_pts
end
function sample_ball(d, numofpts=1; radius=1.0, curvature=0.0)
sphere_pts = sample_sphere(d,numofpts)
if curvature < 0
radii = sample_hyp_rad(d, numofpts, radius=radius, curvature=curvature)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
elseif curvature == 0.0
radii = sample_euc_rad(d, numofpts, radius=radius)
ball_pts = [radii[i]*sphere_pts[i] for i=1:numofpts]
elseif curvature > 0
ball_pts = sample_sph(d, numofpts, curvature=curvature)
end
end
function hyp_distance(pts; curvature=-1.0)
distances = zeros(length(pts), length(pts))
for i=1:length(pts)
for j=1:i-1
nx = 1-(norm(pts[i]))^2
ny = 1-(norm(pts[j]))^2
delta = 2 * norm(pts[i]-pts[j])^2/(nx*ny)
distances[i,j] = acosh(1+delta)
distances[j,i] = distances[i,j]
end
end
return distances
end
function euc_distance(pts)
distances = zeros(length(pts), length(pts))
for i=1:length(pts)
for j=1:i-1
distances[i,j] = norm(pts[i]-pts[j])
distances[j,i] = distances[i,j]
end
end
return distances
end
function sph_distance(pts; curvature=1.0)
distances = zeros(length(pts), length(pts))
for i=1:length(pts)
for j=1:i-1
distances[i,j] = acos(dot(pts[i],pts[j]))
distances[j,i] = distances[i,j]
end
end
return distances
end
function distance_matrix(pts; curvature=0.0)
if curvature < 0
return hyp_distance(pts, curvature=curvature)
elseif curvature == 0
return euc_distance(pts)
elseif curvature > 0
return sph_distance(pts, curvature=curvature)
end
end
function to_density(matr)
dens_matr = zeros(size(matr,1), size(matr,2))
n = size(matr)[1]
all_entries = sort(setdiff(unique(matr), 0.0))
total = binomial(n,2)
for i=1:n
for j=i+1:n
dens_matr[i,j] = (findfirst(x->x==matr[i,j], all_entries))/total
dens_matr[j,i] = dens_matr[i,j]
end
end
return dens_matr
end
function to_density!(matr)
matr = to_density(matr)
return matr
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/ImageProcessing.jl | code | 20266 | using Statistics
using Combinatorics
# using ImageFiltering
"""
rotate_img_around_center(img, angle = 5pi/6)
Function rotates a single image (or a frame) around the center of the image by
@angle radians.
"""
function rotate_img_around_center(img, angle = 5pi/6)
ΞΈ = angle
rot = recenter(RotMatrix(ΞΈ), [size(img)...] .Γ· 2) # a rotation around the center
x_translation = 0
y_translation = 0
tform = rot β Translation(y_translation, x_translation)
img2 = warp(img, rot, axes(img))
return img2
end
"""
get_local_gradients(video_array, centers, sub_img_size)
Computes the gradients in the subimage, takes the mean of sum of absolute
values of both hotizontal and vertical gradients as a representative of a
subimage.
"""
function get_local_gradients(video_array, centers, sub_img_size)
@debug "Entering get_local_gradients"
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
@debug "starting frame procesing"
for frame = 1:len
img = video_array[frame]
img_grad = imgradients(img, KernelFactors.ando3, "replicate")
img_grad_abs = map(abs, img_grad[1]) + map(abs, img_grad[2])
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
sub_img = img_grad_abs[(c_x-half_size):(c_x+half_size),
(c_y-half_size):(c_y+half_size)]
extracted_pixels[index_x, index_y, frame] =mean(sub_img)
end
end
# @debug "Next frame" frame
end
return extracted_pixels
end
"""
get_img_gradient(img)
Computes the gradients in the `img`.
"""
function get_img_gradient(img)
@debug "Entering get_local_gradients"
img_grad = imgradients(img, KernelFactors.ando3, "replicate")
grad_1 = img_grad[1] .+ abs(findmin(img_grad[1])[1])
grad_1 ./= findmax(grad_1)[1]
grad_2 = img_grad[2] .+ abs(findmin(img_grad[2])[1])
grad_2 ./= findmax(grad_2)[1]
grad_sum = grad_1 + grad_2
grad_sum .+= abs(findmin(grad_sum)[1])
grad_sum ./= findmax(grad_sum)[1]
return grad_sum
end
"""
get_local_img_correlations(img, centers, sub_img_size, shift;
with_gradient=false)
Computes the correlation between the subimages and subimages shifted by values
from range -`shift`:`shift` and returns array of size
length(`centers`) x length(`centers`).
Each of the subimage is center around values stored in @centers
"""
function get_local_img_correlations(img, centers, sub_img_size::Int;
with_gradient=false)
# TODO BUG- function is not workig for even numbers
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size#
h, w = size(img)
extracted_pixels = zeros(sub_img_size, sub_img_size)
local_correlation = zeros(size(centers,1))
if with_gradient
img = get_img_gradient(img)
end
position = 1;
for index = centers
c_x = index[1]
c_y = index[2]
c_x_range = (c_x-half_range):(c_x+half_range)
c_y_range = (c_y-half_range):(c_y+half_range)
subimage = img[c_x_range,c_y_range]
center = img[c_x_range, c_y_range]
corelation = center .* subimage
corelation = sum(corelation)
local_correlation[position] += corelation
local_correlation[position] /= 256*(sub_img_size^2)^2
position += 1;
end
return local_correlation
end
"""
get_local_img_correlations(img,centers, masks)
Takes `img` and computes crosscorrelation with set of `masks` around the
`centers`. Crosscorrelation is computed as convolution of the mask and the area
around coordinates stored in `centres`.
"""
function get_local_img_correlations(img, centers, masks::Vector; with_gradient=false)
masks_num = length(masks)
sub_img_size = size(masks[1],1)
# half_size = ceil(Int,(sub_img_size-1)/2)
half_size = (sub_img_size)Γ·2
half_range = half_size
h, w = size(img)
local_correlation = zeros(masks_num, size(centers,1) )
index = centers[1]
masks_num = length(masks)
if with_gradient
img = get_img_gradient(img)
end
# position = 1;
# for index = centers
for pos = 1:length(centers)
# global position
index = centers[pos]
c_x = index[1]
c_y = index[2]
c_x_range = (c_x-half_range):(c_x+half_range)
c_y_range = (c_y-half_range):(c_y+half_range)
center = img[c_x_range, c_y_range]
# mask_pos = 1
# for mask in masks
for mask_pos = 1:length(masks)
mask = masks[mask_pos]
corelation = center .* mask
corelation = sum(corelation)
local_correlation[mask_pos, pos] += corelation
local_correlation[mask_pos, pos] /= (sub_img_size^2)
# local_correlation[position, mask_pos ] = sum(imfilter(center, mask))/(sub_img_size^2)
# mask_pos +=1
end
# position += 1;
end
return local_correlation
end
"""
extract_pixels_from_img(img, indicies_set, video_dim_tuple)
Takes every frame from @video_array and extracts pixels which indicies are in
@indicies_set, thus creating video with only chosen indicies.
"""
function extract_pixels_from_img(img, indicies_set, video_dim_tuple)
rows = size(indicies_set,2)
columns = size(indicies_set,2)
video_length = video_dim_tuple[3]
extracted_pixels = zeros(rows, columns, video_length)
extracted_pixels[:,:,] =
img[indicies_set[1,:],indicies_set[2,:]]
return extracted_pixels
end
"""
Returns evenly distributed centers of size `image_size`
"""
function get_local_img_centers(points_per_dim, img_size, shift=0, sub_img_size=0 )
# TODO Applied teproray solution here, so it works only for local gradients
# start = 0
# (points_per_dim>shift) ? start_ind = ceil(Int, points_per_dim/2)+ shift :
# start=shift
start_ind = ceil(Int, sub_img_size/2)
min_va, = findmin(img_size)
last_ind = min_va - start_ind
set = broadcast(floor, Int, range(start_ind, step=sub_img_size, stop=last_ind))
num_indexes = size(set,1)
centers = Any[]
for row = 1:num_indexes
for column = 1:num_indexes
push!(centers, CartesianIndex(set[row], set[column]))
end
end
return centers
end
"""
get_img_local_centers(img_size, sub_img_size=10)
Tiles the image of size @img_size into square subimages of size @sub_img_size
and returns vector with CartesianIndex coordinates of the subimages centre in
original image.
By default, takes smaller value from @img_size and then divides it by
@sub_img_size. Resulting value will be the number of returned subimages per
dimension. If @use_square is set to false, then evry dimension is treated
separately, resulting in rectangular grid.
It is possible to set overlap of the tiles with @overlap parameter. By default
it is set to zero, but can be any pixel value smaller that @sub_img_size. If
@overlap is set to value in range (0,1], a fraction of @sub_img_size is used.
"""
function get_img_local_centers(img_size, sub_img_size=10; use_square=true,
overlap=0)
@assert sub_img_size <= findmin(img_size)[1] "@sub_img_size is bigger than image!"
@assert sub_img_size > 0 "sub_img_size must be positive number"
@assert overlap<=sub_img_size "The overlap is biger than subimage size!"
@assert overlap >= 0 "overlap must be positive"
centers = CartesianIndex[]
start_ind = ceil(Int, sub_img_size/2)
if 2*start_ind == sub_img_size
start_ind +=1
end
if overlap>0 && overlap<1
overlap = floor(Int, sub_img_size*overlap)
end
if use_square
size_v = findmin(img_size)[1]
size_h = findmin(img_size)[1]
else
size_v = img_size[1]
size_h = img_size[2]
end
last_ind_v = size_v - start_ind # TODO check if it is starting at 1st row, not second
last_ind_h = size_h - start_ind
val_range_v = floor.(Int, range(start_ind, step=sub_img_size-overlap, stop=last_ind_v))
val_range_h = floor.(Int, range(start_ind, step=sub_img_size-overlap, stop=last_ind_h))
if isempty(val_range_v) && size_v <= sub_img_size
val_range_v = [start_ind]
end
if isempty(val_range_h) && size_h <= sub_img_size
val_range_h = [start_ind]
end
num_indexes_v = size(val_range_v,1)
num_indexes_h = size(val_range_h,1)
for row = 1:num_indexes_v, column = 1:num_indexes_h
push!(centers, CartesianIndex(val_range_v[row], val_range_h[column]))
end
return centers
end
"""
vectorize_img(video)
Rearrenges the video so that set of n frames (2D matrix varying in
time) the set of vectors is returned, in which each row is a pixel, and each
column is the value of the pixel in n-th frame.
"""
function vectorize_img(img)
rows, columns = size(img)
num_of_elements = rows*columns
vectorized_img = zeros(num_of_elements)
index = 1;
for row=1:rows
for column=1:columns
vectorized_img[index] = img[row, column];
index = index+1;
end
end
return vectorized_img
end
"""
get_video_mask(points_per_dim, video_dimensions; distribution="uniform", sorted=true, x=1, y=1)
Returns matrix of size @points_per_dim x 2 in which indicies of video frame are
stored. The indicies are chosen based one the @distribution argument. One option
is uniform distribution, the second is random distribution.
Uniform distribution: distance between the points in given dimension is the
even, but vertical distance may be different from horizontal distance between points. This depends on the size of a frame in a image.
Random distribution: the distance between the points is not constant, because
the points are chosen randomly in the ranges 1:horizontal size of frame,
1:vertical size of frame. The returned values may be sorted in ascending order,
if @sorted=true.
"""
function get_video_mask(points_per_dim, video_dimensions;
distribution="uniform", sorted=true, patch_params)
video_height, video_width, = video_dimensions
x=patch_params["x"]
y=patch_params["y"]
spread=patch_params["spread"]
if x == 1
x = floor(Int,video_width/2)
@warn "Given x is to close to the border. Seeting the value to " x
elseif x < ceil(Int,points_per_dim/2)
x = ceil(Int,points_per_dim/2)
@warn "Given x is to close to the border. Seeting the value to " x
elseif x > video_width-ceil(Int,points_per_dim/2)
x = video_width - ceil(Int,points_per_dim/2)
@warn "Given x is to close to the border. Seeting the value to " x
end
if y == 1
y = floor(Int,video_height/2)
@warn "Given y is to close to the border. Seeting the value to " y
elseif y < ceil(Int,points_per_dim/2)
y = ceil(Int,points_per_dim/2)
@warn "Given y is to close to the border. Seeting the value to " y
elseif y > video_height-ceil(Int,points_per_dim/2)
y = video_height - ceil(Int,points_per_dim/2)
@warn "Given y is to close to the border. Seeting the value to " y
end
if spread*points_per_dim+x > video_width || spread*points_per_dim+y > video_height
@warn "Given patch parameters might result in indicies exceeding frame size."
end
if distribution == "uniform"
columns = points_per_dim
rows = points_per_dim
# +1 is used so that the number of points returned is as requested
row_step = floor(Int,video_height/rows)
column_step = floor(Int,video_width/columns)
(video_height/row_step != points_per_dim) ? row_step+=1 : row_step
(video_width/column_step !=
points_per_dim) ? column_step+=1 : video_width
vertical_indicies = collect(1:row_step:video_height)
horizontal_indicies = collect(1:column_step:video_width)
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies = reshape(horizontal_indicies, (1,points_per_dim))
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "random"
vertical_indicies = rand(1:video_height,1, points_per_dim)
horizontal_indicies = rand(1:video_width,1, points_per_dim)
if sorted
vertical_indicies = sort(vertical_indicies[1,:])
horizontal_indicies = sort(horizontal_indicies[1,:])
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies =
reshape(horizontal_indicies, (1,points_per_dim))
end
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "patch"
indicies_set = [collect(1:spread:(spread*points_per_dim)).+x collect(1:spread:(spread*points_per_dim)).+y]'
end
return indicies_set
end
"""
get_gabor_mask_set(;filt_size=25, Ο=[2], theta_rad=[0], Ξ»=[15], Ξ³=[0.2],
psi_rad=[0], re_part=true, im_part=false)
Returns set of gabor filters generated with given parameters. Parameters are described
below. Function uses Kernel.gabor() from ImageFiltering.
# Arguments
- `filt_size=30` : controls the patch in which filter is created, not wavelet itself
- `Ο=2` : controls the width of the waves and thus number of cycles per unit
- `theta_rad=0` : is the rotation in radians,
- `Ξ»=15` : controls the number of waves within the window- higher values- less waves
- `Ξ³=0.2` : is the aspect ratio; small values give long filters
- `psi_rad=0` : phase in radians
- `re_part::Bool`: determines if real part of the Gabor filter is returned; real
part is normalized to be in range [-0.5,0.5]
- `im_part::Bool`: determines if imaginary part of the Gabor filter is returned
imaginary part is normalized to be in range [-0.5,0.5]
if both `re_part` and `im_part` are true, then absolute value of complex number
of form `re_part + im_part im` is returned (it is also normalized to range
[-0.5,0.5]).
"""
function get_gabor_mask_set(;filt_size=25, Ο=[2], theta_rad=[0], Ξ»=[15], Ξ³=[0.2],
psi_rad=[0], re_part=true, im_part=false,
do_norm=true, do_negative=true)
kernels = Any[]
for sigma = Ο
for angle1 = theta_rad
ΞΈ = angle1; #pi*(angle1/180)
for lambda in Ξ»
for gamma in Ξ³
for angle2 in psi_rad
Ο = angle2; #pi*(angle2/180)
kernel = Kernel.gabor(filt_size, filt_size,
sigma,
ΞΈ,
lambda,
gamma,
Ο)
if re_part && !im_part
# @debug "Doing real part"
if do_norm
kernel[1] .+= abs(findmin(kernel[1])[1])
kernel[1] ./= findmax(kernel[1])[1]
# @debug "Doing norm"
if do_negative
kernel[1] .-= 0.5
end
end
push!(kernels,Gray.((kernel[1])))
elseif im_part && !re_part
if do_norm
kernel[2] .+= abs(findmin(kernel[2])[1])
kernel[2] ./= findmax(kernel[2])[1]
if do_negative
kernel[2] .-= 0.5;
end
end
push!(kernels,Gray.((kernel[2])))
else
@debug "Using abs(re(A)+im(A))"
result = abs.(kernel[1] + kernel[2]im);
if do_norm
result .+= abs(findmin(result)[1])
result ./= findmax(result)[1]
end
push!(kernels,Gray.())
end
end # angle2
end # gamma
end # lambda
end # angle1
end # sigmas
return kernels
end
"""
rearrange_filters_arr(im_filter; showing_number=-1)
Creates image with elements stored in `im_filters`. `showing_number` determines
how many of the element from `im_fiter` are displayed.
`im_filters` is an array with elements of type Matrix{Gray}.
"""
function rearrange_filters_arr(im_filter; showing_number=-1, columns=-1)
mask_size = size(im_filter[1],1)
im_filter_num = length(im_filter)
if showing_number == -1 || showing_number > im_filter_num
max_indeks = im_filter_num
else
max_indeks = showing_number
end
if columns == -1
columns = Int(ceil(sqrt(im_filter_num)))
end
rows= Int(ceil(im_filter_num/columns))
all_filters = zeros(Gray, rows*mask_size, columns*mask_size)
mask_index = 1
for row in 1:rows
start_row = (row-1)*mask_size+1
row_range = start_row:(start_row+mask_size-1)
for col = 1:columns
start_col = (col-1)*mask_size+1
col_range = start_col:(start_col+mask_size-1)
if mask_index > max_indeks
break
else
all_filters[row_range, col_range] = im_filter[mask_index]
mask_index += 1
end
end
if mask_index > max_indeks
break
end
end
return all_filters
end
# TODO remove img size from arguments
function get_local_correlations(method::String, img, img_size, sub_img_size;
masks = 0,
points_per_dim=1,
shift=0,
with_grad = true,
overlap = 0,
use_square=true)
if method == "correlation"
@debug "local correlation"
centers = get_local_img_centers(points_per_dim, img_size, shift,
sub_img_size)
extracted_pixels_matrix = get_local_img_correlations(img, centers,
sub_img_size, shift)
elseif method == "gradient_gabor"
@info "local gradient gabor comparison"
centers = get_img_local_centers(img_size, sub_img_size)
local_correlations = get_local_img_correlations(img, centers, masks;
with_gradient = with_grad)
elseif method == "gabor"
@debug "local gabor comparison"
centers = get_img_local_centers(img_size, sub_img_size; overlap = overlap, use_square=use_square)
local_correlations = get_local_img_correlations(img, centers, masks )
elseif method == "gradient"
@debug "local gradient analysis"
centers = get_local_img_centers(points_per_dim, img_size, shift,
sub_img_size)
local_correlations = get_local_img_correlations(img, centers, sub_img_size;
with_gradient=with_grad)
else
indicies_set = get_video_mask(points_per_dim, img_size,
distribution="uniform", patch_params=patch_params)
local_correlations = extract_pixels_from_img(img, indicies_set,
img_size)
end
return local_correlations
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/PairwiseCorrelations.jl | code | 3382 |
# ============================================================================
# exported from MatrixProcessing on 10.09.2020
"""
get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
Computes pairwise correlation of the input signals accordingly to the formula
presented in paper "Clique topology reveals intrinsic geometric structure
in neural correlations" by Chad Giusti et al.
The Computations are done only for upper half of the matrix, the lower half is
a copy of upper half. Computation-wise the difference is at level of 1e-16, but
this causes that inverse is not the same as non-inverse matrix.
"""
function get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
number_of_signals = size(vectorized_video,1)
T = size(vectorized_video,2)
C_ij = zeros(number_of_signals,number_of_signals);
# log_C_ij = zeros(number_of_signals,number_of_signals);
# this is given in frames
lags = -tau_max:1:tau_max
for row=1:number_of_signals
for column=row:number_of_signals
signal_ij = vectorized_video[row,:];
signal_ji = vectorized_video[column,:];
# cross_corelation
ccg_ij = crosscov(signal_ij, signal_ji, lags);
ccg_ij = ccg_ij ./ T;
A = sum(ccg_ij[tau_max+1:end]);
B = sum(ccg_ij[1:tau_max+1]);
r_i_r_j = 1;
C_ij[row, column] = max(A, B)/(tau_max*r_i_r_j);
C_ij[column, row] = C_ij[row, column]
# log_C_i_j[row, column] = log10(abs(C_ij[row, column]));
end
end
return C_ij
end
"""
get_subimg_correlations(video_array, centers, sub_img_size, shift)
Computes the correlation between the subimages and subimages shifted by values
from range -@shift:@shift and returns array with frames of size
length(@centers) x length(@centers) with the number of frames equal to the
number of rames in @video_array.
Each of the subimage is center around values stored in @centers
"""
# TODO Check if this is the same as some of the functions from the ImageProcessing
function get_subimg_correlations(video_array, centers, sub_img_size, shift)
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size + shift
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
for frame = 1:len
img = video_array[frame]
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
subimage = img[(c_x-half_range):(c_x+half_range),
(c_y-half_range):(c_y+half_range)]
center = img[(c_x-half_size):(c_x+half_size), (c_y-half_size):(c_y+half_size)]
for left_boundary = 1:(2*shift+1)
for lower_boundary = 1:(2*shift+1)
corelation = center .* subimage[left_boundary:left_boundary+sub_img_size-1, lower_boundary:lower_boundary+sub_img_size-1]
corelation = sum(corelation)
extracted_pixels[index_x, index_y, frame] += corelation
end
end
extracted_pixels[index_x, index_y, frame] /= 256*(sub_img_size^2)*(shift*2)^2
end
end
end
return extracted_pixels
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/SavingFigures.jl | code | 959 | """
Save figures.
"""
function save_figure(plot_ref, results_path, plot_title; extension=".png" )
full_path = results_path*plot_title*extension
savefig(plot_ref, full_path)
@info "File saved under: " full_path
end
"""
Save betti curves.
"""
function save_betti(plot_ref, results_path, plot_title)
full_title = "betti_curves_"*plot_title;
save_figure(plot_ref, results_path, full_title)
end
"""
Save figures with set of parameters given as 'kwargs'.
"""
function save_figure_with_params(plot_reference, results_path; extension=".png", prefix="", kwargs... )
plot_title = ""
kwargs_kyes = keys(kwargs)
kwargs_vals = values(kwargs)
total_params = size(collect(kwargs_vals),1)
for param = 1:total_params
plot_title *= "$(kwargs_kyes[param])_$(kwargs_vals[param])_"
end
full_path = results_path*prefix*plot_title*extension
# savefig(plot_ref, full_path)
@info "File saved as: " full_path
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/TestingPairwiseCorrelationMatrix.jl | code | 6669 | using Eirene
using Plots
include("clique_top_Julia/CliqueTop.jl")
include("VideoProcessing.jl")
include("MatrixToolbox.jl")
include("Settings.jl")
function testing_pariwise_corr()
do_clique_top = test_params["do_clique_top"]
do_eirene = test_params["do_eirene"]
save_figures = test_params["save_figures"]
plot_betti_figrues = test_params["plot_betti_figrues"]
plot_vectorized_video = test_params["plot_vectorized_video"]
size_limiter = test_params["size_limiter"]
ind_distrib = test_params["ind_distrib"]
videos_set = test_params["videos_set"]
tau_max_set = test_params["tau_max_set"]
points_per_dim_set = test_params["points_per_dim_set"]
shifts_set = test_params["shifts_set"]
patch_params = test_params["patch_params"]
video_path = test_params["video_path"]
results_path = test_params["results_path"]
videos = test_params["videos_names"]
do_local_corr = false
do_local_grad = false
if ind_distrib == "local_corr"
shift_set = test_params["shift_set"]
sub_img_size_set = [9]
do_local_corr = true
do_local_grad = false
@debug "Doing local correlation" do_local_corr
elseif ind_distrib == "local_grad"
shift_set = [1]
sub_img_size_set = test_params["sub_img_size_set"]
do_local_corr = false
do_local_grad = true
@debug "Doing local gradient" do_local_grad
else
shift_set = [1]
sub_img_size_set = [9]
do_local_corr = false
do_local_grad = false
end
@info "Using following distribution: " test_params["ind_distrib"]
@debug "All videos are: " videos_names
@debug "Video set is : " videos_set
for video in videos_set
choice = videos_names[video]
@info "Selected video: " choice
@debug "Path and choice is:" video_path*choice
video_array = get_video_array_from_file(video_path*choice)
@info "Array extracted."
video_dimensions = get_video_dimension(video_array)
for points_per_dim in points_per_dim_set
for shift in shift_set, sub_img_size in sub_img_size_set
if do_local_corr
centers = get_local_centers(points_per_dim, video_dimensions, shift, sub_img_size)
extracted_pixels_matrix = get_subimg_correlations(video_array, centers, sub_img_size, shift)
elseif do_local_grad
centers = get_local_centers(points_per_dim, video_dimensions, shift, sub_img_size)
extracted_pixels_matrix = get_local_gradients(video_array, centers, sub_img_size)
else
indicies_set = get_video_mask(points_per_dim, video_dimensions, distribution=ind_distrib, patch_params)
extracted_pixels_matrix = extract_pixels_from_video(video_array, indicies_set, video_dimensions)
end
@info "Pixels extracted."
vectorized_video = vectorize_video(extracted_pixels_matrix)
@info "Video is vectorized, proceeding to Pairwise correlation."
for tau in tau_max_set
## Compute pairwise correlation
C_ij = get_pairwise_correlation_matrix(vectorized_video, tau)
# set the diagonal to zero
for diag_elem in 1:size(C_ij,1)
C_ij[diag_elem,diag_elem] = 0
end
@info "Pairwise correlation finished, proceeding to persistance homology."
# Compute persistance homology with CliqueTopJulia
size_limiter = test_params["size_limiter"]
@debug "using size limiter = " size_limiter
if size_limiter > size(C_ij,1)
@warn "Used size limiter is larger than matrix dimension: " size_limiter size(C_ij,1)
@warn "Using maximal size instead"
size_limiter = size(C_ij,1)
end
@debug "do_clique_top: " do_clique_top
@debug "test_params['do_clique_top']: " test_params["do_clique_top"]
if do_clique_top
@debug pwd()
@time c_ij_betti_num, edge_density, persistence_intervals, unbounded_intervals = compute_clique_topology(C_ij[1:size_limiter, 1:size_limiter], edgeDensity = 0.6)
end
@debug "do_eirene: " do_eirene
if do_eirene
C = eirene(C_ij[1:size_limiter, 1:size_limiter],maxdim=3,model="vr")
end
# ---------------------------------------------------------
# Plot results
@debug "Proceeding to plotting."
if plot_vectorized_video
vector_plot_ref = heatmap(vectorized_video, color=:grays)
if save_figures
name = split(choice, ".")[1]
name = "vec_" * name * "_sz$(size_limiter)_p$(points_per_dim)_tau$(tau).png"
savefig(vector_plot_ref, name)
end #save vec
end #plot vec
if plot_betti_figrues && do_clique_top
betti_numbers = c_ij_betti_num
title = "Betti curves for pairwise corr. matrix"
p1 = plot_betti_numbers(c_ij_betti_num, edge_density, title);
heat_map1 = heatmap(C_ij, color=:lightrainbow, title="Pariwise Correlation matrix, number of points: $(points_per_dim)");
betti_plot_clq_ref = plot(p1, heat_map1, layout = (2,1))
if save_figures
saving_figures(betti_plot_clq_ref, results_cliq_path, choice, points_per_dim, tau, size_limiter)
end#save fig
end #plot cliq
if plot_betti_figrues && do_eirene
p1, heat_map1 = plot_eirene_betti_curves(C, C_ij)
betti_plot_ei_ref = plot(p1, heat_map1, layout = (2,1))
if save_figures
saving_figures(betti_plot_ei_ref, results_cliq_path, choice, points_per_dim, tau, size_limiter)
end#save fig
end #plot eirene
end #for tau
end #for shift
end #for points_per_dim
end #for video set
@info "Finished testing"
end #func
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/TopologyStructures.jl | code | 9266 | #=
Creted: 2020-05-04
Author: Emil Dmitruk
Structures for storing matrices used at different stages of preprocessing for
topological analysis with Eirene library.
=#
# TODO draw a diagram of all structures
# ===
struct MethodsParams
plot_filters::Bool
plot_heatmaps::Bool
plot_betti_figrues::Bool
lower_dist_mat_resolution::Bool
lower_ord_mat_resolution::Bool
legend_on::Bool
do_dsiplay::Bool
save_gabor_params::Bool
save_subelements::Bool
save_figure::Bool
function MethodsParams(;plot_filters = false,
plot_heatmaps = true,
plot_betti_figrues = true,
lower_dist_mat_resolution = false,
lower_ord_mat_resolution = false,
legend_on = true,
do_dsiplay = false,
save_gabor_params = false,
save_subelements = false,
save_figure = false)
new(plot_filters, plot_heatmaps, plot_betti_figrues,
lower_dist_mat_resolution, lower_ord_mat_resolution,
legend_on, do_dsiplay, save_gabor_params, save_subelements,
save_figure)
end
end
struct ImageTopologyParams
total_bins::Int
max_size_limiter::Int
min_B_dim::Int
max_B_dim::Int
file_name::String
img_path::String
gruping::Bool
sub_img_size::Int
sub_sample_size::Int
pooling_method::String
gabor_set::Int
overlap::Number
gaussian_blurr::Number
function ImageTopologyParams(;total_bins = 5, max_size_limiter = 200,
min_B_dim = 1, max_B_dim = 4,
file_name = "", img_path="img/",
gruping = true, sub_img_size = 33, sub_sample_size=2,
pooling_method = "avg_pooling", gabor_set = 4, overlap = 0.0,
gaussian_blurr=0.25)
new(total_bins, max_size_limiter, min_B_dim, max_B_dim,
file_name, img_path, gruping, sub_img_size, sub_sample_size,
pooling_method, gabor_set, overlap, gaussian_blurr)
end
end
function get_params_description(par::ImageTopologyParams)
if par.pooling_method == "gauss_pooling"
met = "_$(par.pooling_method)$(ceil(Int, par.gaussian_blurr*100))"
else
met = "_$(par.pooling_method)"
end
return "file_$(split(par.file_name,'.')[1])"*
"_subimgsize$(par.sub_img_size)"*
"_maxB_$(par.max_B_dim)"*
"_minB_$(par.min_B_dim)"*
"_gaborset_$(par.gabor_set)"*
"_poolingsize_$(par.sub_sample_size)"*
"$(met)_"*
"_overlap_$(Int(par.overlap*10))_"
end
function get_ord_mat_from_img(par::ImageTopologyParams, met_par::MethodsParams; get_distances=false)
@info "Current img_size" par.sub_img_size
@info "Using file: " par.file_name
@debug "Used params: " par.total_bins, par.gabor_set
size_limiter = par.max_size_limiter
# =============================== Get image masks ======================
masks = get_filter_set(par.gabor_set, par.sub_img_size; plot_filters = false,
save_gabor_params = met_par.save_gabor_params)
# =============================== Get image ================================
file_n = split(par.file_name, ".")[1]
loaded_img = load(par.img_path*par.file_name)
img1_gray = Gray.(loaded_img)
img_size = size(img1_gray)
# ================================ Process Image =======================
# get the correlation matrix accordingly to choosen method
local_correlations = get_local_correlations("gabor", img1_gray,img_size,
par.sub_img_size; masks = masks,
overlap=par.overlap)
# ======================== Compute pairwise correlation ================
dist_mat = pairwise(Euclidean(), local_correlations, dims=2)
# =============================== Ordered matrix =======================
size_limiter = size(dist_mat,1)
if size_limiter > par.max_size_limiter
@warn "Restricting matrix size, because matrix is too big"
size_limiter = par.max_size_limiter
end
# ===
# Matrix gupping
met_par.lower_dist_mat_resolution && group_distances!(dist_mat, par.total_bins)
# ===
# Matrix ordering
ordered_matrix = get_ordered_matrix(dist_mat[1:size_limiter,1:size_limiter];
assign_same_values=par.gruping)
if met_par.lower_ord_mat_resolution
ordered_matrix = lower_ordmat_resolution(ordered_matrix, par.total_bins)
end
if get_distances
return dist_mat
else
return ordered_matrix
end
end
# ===
struct TopologyMatrixSet
file_name::String
# 2 below are not necessary, if params are includede in this structure
sub_sample_size::Int
pooling_method::String
# Matrices
ordered_matrix::Array
reordered_matrix
# reordered_map_ref
pooled_matrix
pooled_reord_matrix
renum_pooled_orig_matrix
renum_pooled_reord_matrix
reordered_renum_pooled_orig_matrix
matrix_collection
description_vector
ranks_collection
params::ImageTopologyParams
function TopologyMatrixSet(input_matrix::Array, params::ImageTopologyParams
; description_vector)
# TODO add parameter which describes which methods should be used
# @warn "Using constant in structure definition- TODO: change to variable"
# file_name = images_set[6]
# sub_sample_size = 2
# pooling_method = "avg_pooling"
# ===
file_name = params.file_name
reordered_matrix, reordered_map_ref =
order_max_vals_near_diagonal2(input_matrix; do_final_plot=false, do_all_plots = false);
pooled_matrix = reorganize_matrix(input_matrix; subsamp_size=params.sub_sample_size, method=params.pooling_method, gauss_sigma=params.gaussian_blurr)
pooled_reord_matrix = reorganize_matrix(reordered_matrix; subsamp_size=params.sub_sample_size, method=params.pooling_method, gauss_sigma=params.gaussian_blurr)
# =
# gaussian_blurr = g_blurr
# used_kernel = Kernel.gaussian(gaussian_blurr)
# pooled_matrix = ceil.(Int,imfilter(input_matrix, used_kernel))
# pooled_reord_matrix = ceil.(Int,imfilter(reordered_matrix, used_kernel))
# =
renum_pooled_orig_matrix = get_ordered_matrix(pooled_matrix; assign_same_values=true)
renum_pooled_reord_matrix = get_ordered_matrix(pooled_reord_matrix; assign_same_values=true)
reordered_renum_pooled_orig_matrix, reordered_renum_pooled_orig_matrix_ref =
order_max_vals_near_diagonal2(renum_pooled_orig_matrix; do_final_plot=false, do_all_plots = false);
matrix_collection = Array[]
push!(matrix_collection, input_matrix)
push!(matrix_collection, reordered_matrix)
push!(matrix_collection, pooled_matrix)
push!(matrix_collection, pooled_reord_matrix)
push!(matrix_collection, renum_pooled_orig_matrix)
push!(matrix_collection, renum_pooled_reord_matrix)
push!(matrix_collection, reordered_renum_pooled_orig_matrix)
ranks_collection = zeros(Int,size(matrix_collection)[1])
for mat = 1: size(matrix_collection)[1]
ranks_collection[mat] = rank(matrix_collection[mat])
end
new(file_name, params.sub_sample_size, params.pooling_method, input_matrix,
reordered_matrix,
# reordered_map_ref,
pooled_matrix, pooled_reord_matrix,
renum_pooled_orig_matrix, renum_pooled_reord_matrix,
reordered_renum_pooled_orig_matrix,
matrix_collection, description_vector, ranks_collection, params)
end
end
# ===
struct TopologyMatrixBettisSet
min_B_dim::Int
max_B_dim::Int
bettis_collection
function TopologyMatrixBettisSet(top_mat_set::TopologyMatrixSet;min_B_dim=1, max_B_dim=3)
bettis_collection = Any[]
for matrix = top_mat_set.matrix_collection
# ===
# Persistent homology
eirene_geom = eirene(matrix,maxdim=top_mat_set.params.max_B_dim,model="vr")
bett_geom = get_bettis(eirene_geom, top_mat_set.params.max_B_dim, min_dim = top_mat_set.params.min_B_dim)
push!(bettis_collection,bett_geom)
end
new(min_B_dim, max_B_dim, bettis_collection)
end
end
# ===
struct MatrixHeatmap
heat_map
matrix_property::String
function MatrixHeatmap(in_array, description)
hmap_len = size(in_array)[1]
ordered_map1 = plot_square_heatmap(in_array, 5, hmap_len; plt_title=description,)
new(ordered_map1, description)
end
end
# ===
struct TopologyMatrixHeatmapsSet
heatmaps::Array{MatrixHeatmap}
heatmap_plots_set
function TopologyMatrixHeatmapsSet(topology_matrix::TopologyMatrixSet)
heatmaps = [MatrixHeatmap(topology_matrix.ordered_matrix,"original"),
MatrixHeatmap(topology_matrix.reordered_matrix,"reordered"),
MatrixHeatmap(topology_matrix.pooled_matrix,"pooled_origi"),
MatrixHeatmap(topology_matrix.pooled_reord_matrix,"pooled_reordered"),
MatrixHeatmap(topology_matrix.renum_pooled_orig_matrix,"renum_pooled_orig"),
MatrixHeatmap(topology_matrix.renum_pooled_reord_matrix,"renum_pooled_reord"),
MatrixHeatmap(topology_matrix.reordered_renum_pooled_orig_matrix,"reordered_renum_pooled_original"),
]
heatmap_plots_set = Any[]
for hmap in heatmaps
push!(heatmap_plots_set,hmap.heat_map)
end
new(heatmaps,heatmap_plots_set)
end
end
# ===
struct BettiPlot
betti_plot
function BettiPlot(in_array; min_B_dim=1)
betti_plot = plot_bettis2(in_array, "", legend_on=false, min_dim=min_B_dim);
xlabel!("Steps");
new(betti_plot)
end
end
# ===
struct TopologyMatrixBettisPlots
betti_plots_set
function TopologyMatrixBettisPlots(bettis_collection::TopologyMatrixBettisSet)
total_bettis = size(bettis_collection.bettis_collection)[1]
betti_plots_set = Any[]
for bett = 1:total_bettis
betti_plot = BettiPlot(bettis_collection.bettis_collection[bett])
push!(betti_plots_set, betti_plot.betti_plot)
end
new(betti_plots_set)
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | src/archive/VideoProcessing.jl | code | 18524 | # import Makie
import VideoIO
using StatsBase
using Images
using ImageFeatures
# using TestImages
# using ImageDraw
using CoordinateTransformations
# using Makie
# using Logging
export get_video_array_from_file,
get_video_dimension,
get_video_mask,
extract_pixels_from_video,
vectorize_video,
get_pairwise_correlation_matrix,
get_average_from_tiles,
rotate_img_around_center,
rotate_vid_around_center,
export_images_to_vid,
rotate_and_save_video,
get_local_correlations,
get_local_centers,
get_local_gradients,
normalize_to_01,
shift_to_non_negative,
plotimg;
"""
get_video_array_from_file(video_name)
Returns array to which video frames are copied. Frames are in grayscale.
Function opens stream, then loads the file and gets all the frames from a
video.
"""
function get_video_array_from_file(video_name)
video_streamer = VideoIO.open(video_name) # candle
video_array = Vector{Array{UInt8}}(undef,0);
video_file = VideoIO.openvideo(video_streamer, target_format=VideoIO.AV_PIX_FMT_GRAY8)
while !eof(video_file)
push!(video_array,reinterpret(UInt8, read(video_file)))
end
close(video_file)
return video_array
end
"""
get_video_dimension(video_array)
Returns the tuple which contains width, height and the number of the frames of
array in whcih video was loaded.
"""
function get_video_dimension(video_array)
v_hei = size(video_array[1],1)
v_wid = size(video_array[1],2)
v_len = size(video_array,1)
video_dim_tuple = (video_height=v_hei, video_width=v_wid, video_length=v_len)
return video_dim_tuple
end
"""
get_video_mask(points_per_dim, video_dimensions; distribution="uniform", sorted=true, x=1, y=1)
Returns matrix of size @points_per_dim x 2 in which indicies of video frame are
stored. The indicies are chosen based one the @distribution argument. One option
is uniform distribution, the second is random distribution.
Uniform distribution: distance between the points in given dimension is the
even, but vertical distance may be different from horizontal distance between points. This depends on the size of a frame in a image.
Random distribution: the distance between the points is not constant, because
the points are chosen randomly in the ranges 1:horizontal size of frame,
1:vertical size of frame. The returned values may be sorted in ascending order,
if @sorted=true.
"""
function get_video_mask(points_per_dim, video_dimensions;
distribution="uniform", sorted=true, patch_params)
video_height, video_width, = video_dimensions
y=patch_params["y"]
spread=patch_params["spread"]
if x == 1
x = Int64(floor(video_width/2))
@warn "Given x is to close to the border. Seeting the value to " x
elseif x < Int64(ceil(points_per_dim/2))
x = Int64(ceil(points_per_dim/2))
@warn "Given x is to close to the border. Seeting the value to " x
elseif x > video_width-Int64(ceil(points_per_dim/2))
x = video_width - Int64(ceil(points_per_dim/2))
@warn "Given x is to close to the border. Seeting the value to " x
end
if y == 1
y = Int64(floor(video_height/2))
@warn "Given y is to close to the border. Seeting the value to " y
elseif y < Int64(ceil(points_per_dim/2))
y = Int64(ceil(points_per_dim/2))
@warn "Given y is to close to the border. Seeting the value to " y
elseif y > video_height-Int64(ceil(points_per_dim/2))
y = video_height - Int64(ceil(points_per_dim/2))
@warn "Given y is to close to the border. Seeting the value to " y
end
if spread*points_per_dim+x > video_width || spread*points_per_dim+y > video_height
@warn "Given patch parameters might result in indicies exceeding frame size."
end
if distribution == "uniform"
columns = points_per_dim
rows = points_per_dim
# +1 is used so that the number of points returned is as requested
row_step = Int64(floor(video_height/rows))
column_step = Int64(floor(video_width/columns))
(video_height/row_step != points_per_dim) ? row_step+=1 : row_step
(video_width/column_step !=
points_per_dim) ? column_step+=1 : video_width
vertical_indicies = collect(1:row_step:video_height)
horizontal_indicies = collect(1:column_step:video_width)
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies = reshape(horizontal_indicies, (1,points_per_dim))
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "random"
vertical_indicies = rand(1:video_height,1, points_per_dim)
horizontal_indicies = rand(1:video_width,1, points_per_dim)
if sorted
vertical_indicies = sort(vertical_indicies[1,:])
horizontal_indicies = sort(horizontal_indicies[1,:])
vertical_indicies = reshape(vertical_indicies, (1,points_per_dim))
horizontal_indicies =
reshape(horizontal_indicies, (1,points_per_dim))
end
indicies_set = [vertical_indicies; horizontal_indicies]
elseif distribution == "patch"
indicies_set = [collect(1:spread:(spread*points_per_dim)).+x collect(1:spread:(spread*points_per_dim)).+y]'
end
return indicies_set
end
"""
extract_pixels_from_video(video_array, indicies_set, video_dim_tuple)
Takes every frame from @video_array and extracts pixels which indicies are in
@indicies_set, thus creating video with only chosen indicies.
"""
function extract_pixels_from_video(video_array, indicies_set, video_dim_tuple)
rows = size(indicies_set,2)
columns = size(indicies_set,2)
video_length = video_dim_tuple[3]
extracted_pixels = zeros(rows, columns, video_length)
for frame_number in 1:video_length
extracted_pixels[:,:,frame_number] =
video_array[frame_number][indicies_set[1,:],indicies_set[2,:]]
end
return extracted_pixels
end
"""
vectorize_video(video)
Rearrenges the video so that set of n frames (2D matrix varying in
time) the set of vectors is returned, in which each row is a pixel, and each
column is the value of the pixel in n-th frame.
"""
function vectorize_video(video)
video_length = size(video, 3)
rows = size(video,1)
columns = size(video,2)
number_of_vectors = rows*columns
vectorized_video = zeros(number_of_vectors, video_length)
index = 1;
for row=1:rows
for column=1:columns
vectorized_video[index,:] = video[row, column,:];
index = index+1;
end
end
return vectorized_video
end
"""
get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
Computes pairwise correlation of the input signals accordingly to the formula
presented in paper "Clique topology reveals intrinsic geometric structure
in neural correlations" by Chad Giusti et al.
The Computations are done only for upper half of the matrix, the lower half is
a copy of upper half. Computation-wise the difference is at level of 1e-16, but
this causes that inverse is not the same as non-inverse matrix.
"""
function get_pairwise_correlation_matrix(vectorized_video, tau_max=25)
number_of_signals = size(vectorized_video,1)
T = size(vectorized_video,2)
C_ij = zeros(number_of_signals,number_of_signals);
# log_C_ij = zeros(number_of_signals,number_of_signals);
# this is given in frames
lags = -tau_max:1:tau_max
for row=1:number_of_signals
for column=row:number_of_signals
signal_ij = vectorized_video[row,:];
signal_ji = vectorized_video[column,:];
# cross_corelation
ccg_ij = crosscov(signal_ij, signal_ji, lags);
ccg_ij = ccg_ij ./ T;
A = sum(ccg_ij[tau_max+1:end]);
B = sum(ccg_ij[1:tau_max+1]);
r_i_r_j = 1;
C_ij[row, column] = max(A, B)/(tau_max*r_i_r_j);
C_ij[column, row] = C_ij[row, column]
# log_C_i_j[row, column] = log10(abs(C_ij[row, column]));
end
end
return C_ij
end
"""
get_average_from_tiles(extracted_pixels_matrix, N)
Fnction takes a 3D array in which video is stored and splits every frame into
non overlaping tiles of size NxN. If size of @extracted_pixels_matrix is not
square of N, then only N^2 x N^2 matrix will be used for averaging.
"""
function get_average_from_tiles(extracted_pixels_matrix, N)
# N = size(extracted_pixels,1)
num_frames = size(extracted_pixels_matrix,3)
mask_matrix = ones(N, N)
result_matrix = zeros(N, N, num_frames)
col_index = 1
row_index = 1
for frame = 1:num_frames
for col = 1:N:N^2
for row = 1:N:N^2
result_matrix[mod(col,N), mod(row,N), frame] =
dot(extracted_pixels_matrix[col:(col+N-1),
row:(row+N-1), frame], mask_matrix) ./N^2
row_index += 1
end
col_index += 1
end
end
return result_matrix
end
"""
rotate_img_around_center(img, angle = 5pi/6)
Function rotates a single image (or a frame) around the center of the image by
@angle radians.
"""
function rotate_img_around_center(img, angle = 5pi/6)
ΞΈ = angle
rot = recenter(RotMatrix(ΞΈ), [size(img)...] .Γ· 2) # a rotation around the center
x_translation = 0
y_translation = 0
tform = rot β Translation(y_translation, x_translation)
img2 = warp(img, rot, axes(img))
return img2
end
"""
rotate_vid_around_center(img, rotation = 5pi/6)
Function rotates a video around the center of the image by @rotation radians and
the outout into matrix.
"""
function rotate_vid_around_center(src_vid_path,src_vid_name; rotation = 5pi/6)
video_array = []
video_src_strm = VideoIO.open(src_vid_path*src_vid_name)
video_src = VideoIO.openvideo(video_src_strm,
target_format=VideoIO.AV_PIX_FMT_GRAY8)
while !eof(video_src)
img = read(video_src)
img = rotate_img_around_center(img, rotation)
push!(video_array,img)
end
close(video_src)
return video_array
end
"""
export_images_to_vid(video_array, dest_file)
Exports set of images stored in @video_array to the dest_file.
"""
function export_images_to_vid(video_array, dest_file)
@debug "Exporting set of images to file"
fname = dest_file
video_dimensions = get_video_dimension(video_array)
h = video_dimensions.video_height
w = video_dimensions.video_width
nframes = video_dimensions.video_length
overwrite=true
fps=30
options = ``
ow = overwrite ? `-y` : `-n`
open(`ffmpeg
-loglevel warning
$ow
-f rawvideo
-pix_fmt rgb24
-s:v $(h)x$(w)
-r $fps
-i pipe:0
$options
-vf "transpose=0"
-pix_fmt yuv420p
$fname`, "w") do out
for i = 1:nframes
write(out, convert.(RGB{N0f8}, clamp01.(video_array[i])))
end
end
@debug "Video was saved"
end
"""
rotate_and_save_video(src_vid_path, src_vid_name, dest_vid_name;
rotation=5pi/6)
Fuction opens the @src_vid_name file, collects all the frames and then rotates
the frame aroung the center and saves new video as @dest_vid_name at
@src_vid_path.
Function was tested for following extensions;
.mov
A solution for writing to a video file was taken from:
https://discourse.julialang.org/t/creating-a-video-from-a-stack-of-images/646/7
"""
function rotate_and_save_video(src_vid_path, src_vid_name, dest_vid_name, rotation=5pi/6)
@debug src_vid_path src_vid_name dest_vid_name
if !isfile(src_vid_path*src_vid_name)
@warn "Source file at given path does not exist. Please give another name."
return
elseif isfile(src_vid_path*dest_vid_name)
@warn "File with destination video name at src_video_path already exists. Please give another name."
return
end
video_array = rotate_vid_around_ceter(src_vid_path, src_vid_name)
@debug "Video was rotated"
export_images_to_exist_vid(video_array, src_vid_path*dest_vid_name)
@info "The file was created:\n $fname"
end
"""
get_local_correlations(video_array, centers, sub_img_size, shift)
Computes the correlation between the subimages and subimages shifted by values
from range -@shift:@shift and returns array with frames of size
length(@centers) x length(@centers) with the number of frames equal to the
number of rames in @video_array.
Each of the subimage is center around values stored in @centers
"""
function get_local_correlations(video_array, centers, sub_img_size, shift)
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size + shift
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
for frame = 1:len
img = video_array[frame]
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
subimage = img[(c_x-half_range):(c_x+half_range),
(c_y-half_range):(c_y+half_range)]
center = img[(c_x-half_size):(c_x+half_size), (c_y-half_size):(c_y+half_size)]
for left_boundary = 1:(2*shift+1)
for lower_boundary = 1:(2*shift+1)
corelation = center .* subimage[left_boundary:left_boundary+sub_img_size-1, lower_boundary:lower_boundary+sub_img_size-1]
corelation = sum(corelation)
extracted_pixels[index_x, index_y, frame] += corelation
end
end
extracted_pixels[index_x, index_y, frame] /= 256*(sub_img_size^2)*(shift*2)^2
end
end
end
return extracted_pixels
end
function get_local_centers(points_per_dim, video_dimensions, shift=0, sub_img_size=0 )
/# TODO Applied teproray solution here, so it works only for local gradients
# start = 0
# (points_per_dim>shift) ? start_ind = ceil(Int, points_per_dim/2)+ shift :
# start=shift
start_ind = ceil(Int, sub_img_size/2)
min_va, = findmin(video_dimensions)
last_ind = min_va - start_ind
set = broadcast(floor, Int, range(start_ind, stop=last_ind, length=points_per_dim))
centers = [set set]'
return centers
end
"""
get_local_gradients(video_array, centers, sub_img_size)
Computes the gradients in the subimage, takes the mean of sum of absolute
values of both hotizontal and vertical gradients as a representative of a
subimage.
"""
function get_local_gradients(video_array, centers, sub_img_size)
@debug "Entering get_local_gradients"
half_size = ceil(Int,(sub_img_size-1)/2)
half_range = half_size
h, w, len = get_video_dimension(video_array)
extracted_pixels = zeros(sub_img_size, sub_img_size, len)
@debug "starting frame procesing"
for frame = 1:len
img = video_array[frame]
img_grad = imgradients(img, KernelFactors.ando3, "replicate")
img_grad_abs = map(abs, img_grad[1]) + map(abs, img_grad[2])
for index_x = 1:size(centers,2)
c_x = centers[2, index_x]
for index_y = 1:size(centers,2)
c_y = centers[1, index_y]
sub_img = img_grad_abs[(c_x-half_size):(c_x+half_size),
(c_y-half_size):(c_y+half_size)]
extracted_pixels[index_x, index_y, frame] =mean(sub_img)
end
end
# @debug "Next frame" frame
end
return extracted_pixels
end
"""
normalize_to_01(matrix, norm_factor=256)
Returns a matrix which values are in range [0, 1]. If the values in the input
matrix are below 0, then they are shifted so that only positive numbers are in
the matrix. If the values in the matrix of shifted matrix excced value of the
@norm_factor parameter, then the matrix is normalized to the maximal value from
the matrix.
"""
function normalize_to_01(matrix, norm_factor=256)
normalized_matrix = copy(matrix)
min_val = findmin(normalized_matrix)[1]
max_val = findmax(normalized_matrix)[1]
if min_val < 0
normalized_matrix .-= min_val
end
if max_val > norm_factor
@warn "Values normalized to maximal value, not notmalization factor."
normalized_matrix = normalized_matrix./max_val
else
normalized_matrix = normalized_matrix./norm_factor
end
return normalized_matrix
end
"""
shift_to_non_negative(matrix)
Returns a matrix in which values are non-negative. This is done by finding the
minimal value in the input matrix and adding its absolute value to the matix
elements.
"""
function shift_to_non_negative(matrix)
min_val = findmin(matrix)[1]
if min_val < 0
return matrix .-= min_val
else
return matrix
end
end
"""
plotimg(matrix_to_plot)
Display an image as a plot. The values from the input matrix are adjusted to the
value range of [0, 1].
If @cut_off is true then the matrix values above 256 are set to 256 and then all
values are normalized to the value 256. If @cut_off is false, then values are
normalized to maximal value.
"""
function plotimg(matrix_to_plot, cut_off=false)
matrix_type = typeof(matrix_to_plot)
min_val = findmin(matrix_to_plot)[1]
int_types_arr = [Matrix{UInt8}; Matrix{UInt16}; Matrix{UInt32};
Matrix{UInt64}; Matrix{UInt128}; Matrix{Int8};
Matrix{Int16}; Matrix{Int32}; Matrix{Int64};
Matrix{Int128}]
float_types_arr = [Matrix{Float16} Matrix{Float32} Matrix{Float64}]
if min_val<0
matrix_to_plot = shift_to_non_negative(matrix_to_plot)
end
max_val = findmax(matrix_to_plot)[1]
if max_val > 256 && cut_off
matrix_to_plot[findall(x -> x>256, matrix_to_plot)] = 256
end
if in(matrix_type, int_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot)
elseif in(matrix_type, float_types_arr)
matrix_to_plot = normalize_to_01(matrix_to_plot, max_val)
end
return colorview(Gray, matrix_to_plot)
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | test/bettiCurves_tests.jl | code | 12072 | using TopologyPreprocessing
using Test
using Eirene
#%%
@testset "BettiCurves.jl" begin
sample_distance_matrix1 = [0 1 25 4 5 9 13 17;
1 0 2 26 6 10 14 18;
25 2 0 3 7 11 15 19;
4 26 3 0 8 12 16 20;
5 6 7 8 0 21 27 24;
9 10 11 12 21 0 22 28;
13 14 15 16 27 22 0 23;
17 18 19 20 24 28 23 0 ]
sample_distance_matrix2 = [1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;]
# get_bettis
for matrix = [sample_distance_matrix1, sample_distance_matrix2]
for max_B_dim = 1:4
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim)
@test length(all_bettis) == max_B_dim
@test all_bettis isa Vector{Array{Float64,2}}
end
for max_B_dim = 1:4, min_B_dim = 1:3
if min_B_dim > max_B_dim
@debug "Continue at " min_B_dim, max_B_dim
continue
end
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim, min_dim=min_B_dim)
@test length(all_bettis) == max_B_dim - (min_B_dim-1)
@test all_bettis isa Vector{Array{Float64,2}}
end
end
# normalise_bettis
# as betticurve results
for matrix = [sample_distance_matrix1, sample_distance_matrix2]
for max_B_dim = 1:4, min_B_dim = 1:3
if min_B_dim > max_B_dim
@debug "Continue at " min_B_dim, max_B_dim
continue
end
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
betti_result = betticurve(eirene_results, dim=max_B_dim)
normed_all_bettis = normalise_bettis(betti_result)
@test typeof(normed_all_bettis) == typeof(betti_result)
@test length(normed_all_bettis) != max_B_dim
@test size(normed_all_bettis) == size(betti_result)
@test normed_all_bettis isa Array{Float64,2}
# Betti values are unchanged:
@test normed_all_bettis[:,2] == betti_result[:,2]
# Max val is 1
@test findmax(normed_all_bettis[:,1])[1] == 1.
end
end
# as get_bettis results
for matrix = [sample_distance_matrix1, sample_distance_matrix2]
for max_B_dim = 1:4, min_B_dim = 1:3
if min_B_dim > max_B_dim
@debug "Continue at " min_B_dim, max_B_dim
continue
end
eirene_results = eirene(matrix, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim)
normed_all_bettis = normalise_bettis(all_bettis)
@test typeof(normed_all_bettis) == typeof(all_bettis)
@test length(normed_all_bettis) == max_B_dim
@test normed_all_bettis isa Vector{Array{Float64,2}}
# Betti values are unchanged:
@test normed_all_bettis[max_B_dim][:,2] == all_bettis[max_B_dim][:,2]
# Max val is 1
@test findmax(normed_all_bettis[max_B_dim][:,1])[1] == 1.
end
end
# get_vectorized_bettis
let max_B_dim = 5,
min_B_dim = 1,
eirene_results = eirene(sample_distance_matrix1, model="vr", maxdim = max_B_dim)
let eirene_bettis = get_bettis(eirene_results, max_B_dim, min_dim=min_B_dim),
vectorized_bettis = get_vectorized_bettis(eirene_results, max_B_dim, min_dim=min_B_dim)
@test size(vectorized_bettis)[2] == max_B_dim - (min_B_dim-1)
for d in min_B_dim:max_B_dim
@test vectorized_bettis[:,d] == eirene_bettis[d][:,2]
end
end
end
end
@testset "BettiCurves.jl -> plot bettis" begin
# TODO remove tests which test Plots.plot function and not plot_bettis functionality
sample_distance_matrix1 = [0 1 25 4 5 9 13 17;
1 0 2 26 6 10 14 18;
25 2 0 3 7 11 15 19;
4 26 3 0 8 12 16 20;
5 6 7 8 0 21 27 24;
9 10 11 12 21 0 22 28;
13 14 15 16 27 22 0 23;
17 18 19 20 24 28 23 0 ]
sample_distance_matrix2 = [1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;]
# plot_bettis tests for get_bettis:
let max_B_dim = 5,
min_B_dim = 1,
eirene_results = eirene(sample_distance_matrix1, model="vr", maxdim = max_B_dim)
all_bettis = get_bettis(eirene_results, max_B_dim)
p = plot_bettis(all_bettis);
@test length(p.series_list) == max_B_dim-(min_B_dim-1)
@test p.attr[:plot_title] == ""
@test_throws DomainError plot_bettis(all_bettis, min_dim=max_B_dim+1)
for (dim_index, dim)= enumerate(min_B_dim:max_B_dim)
@test p.series_list[dim_index][:label] == "Ξ²$(dim)"
if !isnan(all_bettis[dim_index][:,1][1])
@test p.series_list[dim_index][:x] == all_bettis[dim_index][:,1]
@test p.series_list[dim_index][:y] == all_bettis[dim_index][:,2]
end
end
# for dim = min_B_dim:max_B_dim
# p = plot_bettis(all_bettis, min_dim = dim);
# @test length(p.series_list) == max_B_dim-(dim-1)
# end
let p1 = plot_bettis(all_bettis, betti_labels=false)
for (dim_index, dim)= enumerate(min_B_dim:max_B_dim)
@test p1.series_list[dim][:label] == "y$(dim)"
if !isnan(all_bettis[dim_index][:,1][1])
@test p1.series_list[dim][:x] == all_bettis[dim][:,1]
@test p1.series_list[dim][:y] == all_bettis[dim][:,2]
end
end
end
let lw=4,
p1 = plot_bettis(all_bettis, betti_labels=true, lw=lw)
for (dim_index, dim)= enumerate(min_B_dim:max_B_dim)
@test p1.series_list[dim_index][:label] == "Ξ²$(dim)"
@test p1.series_list[dim_index][:linewidth] == lw
end
end
let plt_title = "test_title",
p1 = plot_bettis(all_bettis, title=plt_title, lw=9, xlabel="2")
@test_skip p1.attr[:plot_title] == plt_title # why plot-title is not returning the title?
for dim = min_B_dim:max_B_dim
@test p1.series_list[dim][:label] == "Ξ²$(dim)"
end
end
let plt_title = "test_title",
lw = 9,
p1 = plot_bettis(all_bettis, title=plt_title, lw=lw, xlabel="2", default_labels=false)
@test_skip p1.attr[:plot_title] == plt_title # why plot-title is not returning the title?
for dim = min_B_dim:max_B_dim
@test p1.series_list[dim][:label] == "Ξ²$(dim)"
@test p1.series_list[dim][:linewidth] == lw
# @test for xlabel
# @test for no label
end
end
end
# plot_bettis tests for get_vectorized_bettis:
let max_B_dim = 5,
min_B_dim = 1,
eirene_results = eirene(sample_distance_matrix1, model="vr", maxdim = max_B_dim)
all_bettis = get_vectorized_bettis(eirene_results, max_B_dim)
end
end
@testset "BettiCurves.jl -> area under betti curves" begin
let sample_distance_matrix1 = [0 1 25 4 5 9 13 17;
1 0 2 26 6 10 14 18;
25 2 0 3 7 11 15 19;
4 26 3 0 8 12 16 20;
5 6 7 8 0 21 27 24;
9 10 11 12 21 0 22 28;
13 14 15 16 27 22 0 23;
17 18 19 20 24 28 23 0 ],
sample_distance_matrix2 = [1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;],
max_B_dim = 5,
min_B_dim = 1
#==
checks if the size is anyhow changed during proces;
checks is the values Array{Matrix} and reshaped matrix are the same
==#
for min_B_dim in [1, 2, 3, 4, 5]
eirene_results1 =
eirene(sample_distance_matrix1, model = "vr", maxdim = max_B_dim)
eirene_results2 =
eirene(sample_distance_matrix2, model = "vr", maxdim = max_B_dim)
bettis_collection = [
get_bettis(eirene_results1, max_B_dim),
get_bettis(eirene_results2, max_B_dim),
]
for bettis_col in bettis_collection
total_vecs = length(bettis_col)
vec_len, vec_width = size(bettis_col[1])
reshaped_betti = TopologyPreprocessing.vectorize_bettis(bettis_col)
@test vec_len .== size(reshaped_betti, 1)
@test total_vecs .== size(reshaped_betti, 2)
for k = 1:total_vecs
@test reshaped_betti[:, k] == bettis_col[k][:, 2]
end
end
end
#==
checks if get vectorized bettis has same values as get_bettis
==#
for min_B_dim in [1, 2, 3, 4, 5]
eirene_results1 =
eirene(sample_distance_matrix1, model = "vr", maxdim = max_B_dim)
eirene_results2 =
eirene(sample_distance_matrix2, model = "vr", maxdim = max_B_dim)
bettis_collection = [
get_bettis(eirene_results1, max_B_dim),
get_bettis(eirene_results2, max_B_dim),
]
vec_bett_collection = [
get_vectorized_bettis(eirene_results1, max_B_dim),
get_vectorized_bettis(eirene_results2, max_B_dim),
]
for index = 1:length(bettis_collection)
bettis_col = bettis_collection[index]
vec_bettis_col = vec_bett_collection[index]
total_vecs = length(bettis_col)
for k = 1:total_vecs
@test vec_bettis_col[:, k] == bettis_col[k][:, 2]
end
end
end
end
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | test/matrixOrganisation_tests.jl | code | 9341 | using TopologyPreprocessing
using Test
using Random
Random.seed!(1234)
# using MatrixOrganization
@testset "MatrixOrganization.jl -> matrix pooling" begin
in_vector = [1 2 3 4 3 2 1]
in_matrix = [1 2 3; 5 6 7; 8 9 0]
in_matrix2 = [1 2 3; 5 6 7; 8 9 0]
sqr_matrix0 = [1 2 3; 2 6 7; 3 7 0]
resulting_matrix0_2 = [1 2 3; 2 6 7; 3 7 0]
sqr_matrix1 = [ 0 1 13 4 5 9;
1 0 2 14 6 10;
13 2 0 3 7 11;
4 14 3 0 8 12;
5 6 7 8 0 15;
9 10 11 12 15 0]
resulting_matrix1_2 = [0 1 14 14 10 10;
1 0 14 14 10 10;
14 14 0 3 12 12;
14 14 3 0 12 12;
10 10 12 12 0 15;
10 10 12 12 15 0]
sqr_matrix2 = [ 0 1 113 4 5 9 13 17 81 82 83 84;
1 0 2 114 6 10 14 18 85 86 87 88;
113 2 0 3 7 11 15 19 89 90 91 92;
4 114 3 0 8 12 16 20 93 94 95 96;
5 6 7 8 0 21 115 24 29 30 31 32;
9 10 11 12 21 0 22 116 33 34 35 36;
13 14 15 16 115 22 0 23 37 38 39 40;
17 18 19 20 24 116 23 0 41 42 43 44;
81 85 89 93 29 33 37 41 0 25 117 28;
82 86 90 94 30 34 38 42 25 0 26 118;
83 87 91 95 31 35 39 43 117 26 0 27;
84 88 92 96 32 36 40 44 28 118 27 0]
result_matrix2_2 = [ 0 1 114 114 10 10 18 18 86 86 88 88;
1 0 114 114 10 10 18 18 86 86 88 88;
114 114 0 3 12 12 20 20 94 94 96 96;
114 114 3 0 12 12 20 20 94 94 96 96;
10 10 12 12 0 21 116 116 34 34 36 36;
10 10 12 12 21 0 116 116 34 34 36 36;
18 18 20 20 116 116 0 23 42 42 44 44;
18 18 20 20 116 116 23 0 42 42 44 44;
86 86 94 94 34 34 42 42 0 25 118 118;
86 86 94 94 34 34 42 42 25 0 118 118;
88 88 96 96 36 36 44 44 118 118 0 27;
88 88 96 96 36 36 44 44 118 118 27 0]
result_matrix2_3 = [ 0 1 113 114 114 114 89 89 89 92 92 92;
1 0 2 114 114 114 89 89 89 92 92 92;
113 2 0 114 114 114 89 89 89 92 92 92;
114 114 114 0 8 12 116 116 116 96 96 96;
114 114 114 8 0 21 116 116 116 96 96 96;
114 114 114 12 21 0 116 116 116 96 96 96;
89 89 89 116 116 116 0 23 37 117 117 117;
89 89 89 116 116 116 23 0 41 117 117 117;
89 89 89 116 116 116 37 41 0 117 117 117;
92 92 92 96 96 96 117 117 117 0 26 118;
92 92 92 96 96 96 117 117 117 26 0 27;
92 92 92 96 96 96 117 117 117 118 27 0]
result_matrix2_4 = [ 0 1 114 114 20 20 20 20 96 96 96 96;
1 0 114 114 20 20 20 20 96 96 96 96;
114 114 0 3 20 20 20 20 96 96 96 96;
114 114 3 0 20 20 20 20 96 96 96 96;
20 20 20 20 0 21 116 116 44 44 44 44;
20 20 20 20 21 0 116 116 44 44 44 44;
20 20 20 20 116 116 0 23 44 44 44 44;
20 20 20 20 116 116 23 0 44 44 44 44;
96 96 96 96 44 44 44 44 0 25 118 118;
96 96 96 96 44 44 44 44 25 0 118 118;
96 96 96 96 44 44 44 44 118 118 0 27;
96 96 96 96 44 44 44 44 118 118 27 0]
@test matrix_poling(in_vector) !== in_vector
@test matrix_poling(in_vector, method="max_pooling") == [4 4 4 4 4 4 4]
# @test matrix_poling!(in_vector) === in_vector
# @test matrix_poling!(in_vector) == [4 4 4 4 4 4 4]
@test matrix_poling(in_matrix) !== in_matrix
@test matrix_poling(in_matrix, method="max_pooling") == 9 .* ones(size(in_matrix))
# @test matrix_poling!(in_matrix) === in_matrix
# @test matrix_poling!(in_matrix) == 9 .* ones(size(in_matrix))
@test matrix_poling(in_matrix2[1:2,1:2], method="max_pooling") == 6 .* ones(size(in_matrix2[1:2,1:2]))
@test matrix_poling(in_matrix2[1:2,1:2], method="max_pooling") != in_matrix2[1:2,1:2]
# ====
# Subsampling matrix
# Function is supposed to work only on upper half, and here the upper half is too small, so there are no operations
@test subsample_matrix(sqr_matrix0, subsamp_size=2, method="max_pooling") == resulting_matrix0_2
@test subsample_matrix(sqr_matrix1, subsamp_size=2, method="max_pooling") == resulting_matrix1_2
@test subsample_matrix(sqr_matrix2, subsamp_size=2, method="max_pooling") == result_matrix2_2
@test subsample_matrix(sqr_matrix2, subsamp_size=3, method="max_pooling") == result_matrix2_3
@test subsample_matrix(sqr_matrix2, subsamp_size=4, method="max_pooling") == result_matrix2_4
end
@testset "MatrixOrganization.jl -> add_random_patch" begin
# TODO set seed for add_random_path
# TODO Seed has to be set for this test
in_vector = [1, 2, 3, 4, 3, 2, 1]
sqr_matrix0 = [ 1 2 3;
2 6 7;
3 7 0]
sqr_matrix1 = [1 2 3 4 5;
2 1 6 7 8;
3 6 1 9 10;
4 7 9 1 11;
5 8 10 11 1]
sqr_matrix2 = [ 0 1 13 4 5 9;
1 0 2 14 6 10;
13 2 0 3 7 11;
4 14 3 0 8 12;
5 6 7 8 0 15;
9 10 11 12 15 0]
sqr_matrix3 = [ 0 1 113 4 5 9 13 17 81 82 83 84;
1 0 2 114 6 10 14 18 85 86 87 88;
113 2 0 3 7 11 15 19 89 90 91 92;
4 114 3 0 8 12 16 20 93 94 95 96;
5 6 7 8 0 21 115 24 29 30 31 32;
9 10 11 12 21 0 22 116 33 34 35 36;
13 14 15 16 115 22 0 23 37 38 39 40;
17 18 19 20 24 116 23 0 41 42 43 44;
81 85 89 93 29 33 37 41 0 25 117 28;
82 86 90 94 30 34 38 42 25 0 26 118;
83 87 91 95 31 35 39 43 117 26 0 27;
84 88 92 96 32 36 40 44 28 118 27 0]
function get_unchanged_indices(input_matrix,ind)
indices = CartesianIndices(size(input_matrix))
indices = findall(x->x!=ind,indices)
for i = ind
indices = indices[findall(x->x!=i,indices)]
end
return indices
end
out_m, ind = add_random_patch(sqr_matrix0)
indices = get_unchanged_indices(sqr_matrix0,ind)
@test size(ind) == (1,2)
@test sqr_matrix0[indices] == out_m[indices]
big_sqr_matrix0 = sqr_matrix0 .*100
out_m, ind = add_random_patch(big_sqr_matrix0, patch_size=1,total_patches=2)
indices = get_unchanged_indices(big_sqr_matrix0,ind)
@test size(ind) == (2,2)
@test big_sqr_matrix0[indices] == out_m[indices]
@test sum(big_sqr_matrix0[ind] .!= out_m[ind]) == 4
@test sum(big_sqr_matrix0[ind] .== out_m[ind]) == 0
out_m, ind = add_random_patch(sqr_matrix1, patch_size=1,total_patches=2)
indices = get_unchanged_indices(sqr_matrix1,ind)
@test size(ind) == (2,2)
@test sqr_matrix1[indices] == out_m[indices]
# TODO those 2 tests fails when random value is the equal to one that is replaced
# @test sum(sqr_matrix1[ind] .!= out_m[ind]) == 4
# @test sum(sqr_matrix1[ind] .== out_m[ind]) == 0
# ===
input_matrix = sqr_matrix1
function test_adding_rand_patch(input_matrix, t_patches,p_size)
out_m, ind = add_random_patch(input_matrix, patch_size=p_size, total_patches=t_patches)
indices = get_unchanged_indices(input_matrix,ind)
@test size(ind) == (t_patches*p_size^2,2)
@test input_matrix[indices] == out_m[indices]
# For values from range, tests below does not make sense:
# @test sum(input_matrix[ind] .!= out_m[ind]) == length(ind)
# @test sum(input_matrix[ind] .== out_m[ind]) == 0
end
t_patches = 1
p_size = 2
test_adding_rand_patch(sqr_matrix0, t_patches,p_size)
test_adding_rand_patch(sqr_matrix1, t_patches,p_size)
test_adding_rand_patch(sqr_matrix2, t_patches,p_size)
test_adding_rand_patch(sqr_matrix3, t_patches,p_size)
t_patches = 1
p_size = 3
test_adding_rand_patch(sqr_matrix0, t_patches,p_size)
test_adding_rand_patch(sqr_matrix1, t_patches,p_size)
test_adding_rand_patch(sqr_matrix2, t_patches,p_size)
test_adding_rand_patch(sqr_matrix3, t_patches,p_size)
t_patches = 1
p_size = 4
correct_error = 3
# TODO change this into test_throws
try
add_random_patch(sqr_matrix0, patch_size=p_size, total_patches=t_patches)
catch err
my_error = 0
if isa(err, DomainError)
println("DomainError")
my_error = 1
elseif isa(err, DimensionMismatch)
println("DimensionMismatch")
my_error = 2
else
println("Unknow error")
my_error = 3
end
@test my_error == correct_error
end
test_adding_rand_patch(sqr_matrix1, t_patches, p_size)
test_adding_rand_patch(sqr_matrix2, t_patches, p_size)
test_adding_rand_patch(sqr_matrix3, t_patches, p_size)
t_patches = 3
p_size = 5
test_adding_rand_patch(sqr_matrix2, t_patches,p_size)
test_adding_rand_patch(sqr_matrix3, t_patches,p_size)
# ===
# locations
locations1 = [CartesianIndex(1,2), CartesianIndex(2,3)]
locations2 = [CartesianIndex(1,2), CartesianIndex(99,3)]
t_patches = 1
p_size = 1
out_m, ind = add_random_patch(sqr_matrix1, patch_size=p_size, total_patches=t_patches,locations=locations1)
indices = get_unchanged_indices(sqr_matrix1,ind)
@test ind[:,1] == locations1
@test size(ind) == (size(locations1)[1]*p_size^2,2)
@test sqr_matrix1[indices] == out_m[indices]
# The number of
@test sum(sqr_matrix1[locations1] .!= out_m[locations1]) == length(locations1)
@test sum(sqr_matrix1[locations1] .== out_m[locations1]) == 0
correct_error = 0
try
out_m, ind = add_random_patch(sqr_matrix1, patch_size=p_size, total_patches=t_patches,locations=locations2)
catch err
# global correct_error
if isa(err, DomainError)
correct_error = 1
else
correct_error = 2
end
finally
# global correct_error
@test correct_error == 2
end
# TODO test for index below diagonal
# TODO too many indices
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | test/matrixProcessing_tests.jl | code | 28975 | using TopologyPreprocessing
using Test
using LinearAlgebra
# using MatrixOrganization
@testset "MatrixProcessing.jl -> basics" begin
# Ints
positive_matrix = [1 2 3;
4 5 6]
negative_matrix = [1 2 -3;
-4 5 6]
@test shift_to_non_negative(positive_matrix) == positive_matrix
@test isempty(findall(x->x<0, shift_to_non_negative(negative_matrix)))
# Floats
positive_matrix2 = [1. 2 3; 4 5 6]
negative_matrix2 = [1 2 -3; -4 5 6]
@test shift_to_non_negative(positive_matrix2) == positive_matrix2
@test isempty(findall(x->x<0, shift_to_non_negative(negative_matrix2)))
positive_3d_matrix = rand(2,3,4)
negative_3d_matrix = rand(2,3,4).*2 .-1
@test shift_to_non_negative(positive_3d_matrix) == positive_3d_matrix
@test isempty(findall(x->x<0, shift_to_non_negative(negative_3d_matrix)))
# =====================
@test findmin(normalize_to_01(negative_matrix))[1] == 0
@test findmax(normalize_to_01(negative_matrix))[1] == 1
powers_of_2 = [0 ; [2^k for k in 0:2:8]]
powers_of_3 = [0 ; [3^k for k in 0:2:8]]
@test findmin(normalize_to_01(powers_of_2, use_factor=true))[1] == 0
@test findmax(normalize_to_01(powers_of_2, use_factor=true))[1] == 1
@test findmin(normalize_to_01(powers_of_3, use_factor=true))[1] == 0
@test findmax(normalize_to_01(powers_of_3, use_factor=true))[1] > 1
@test findmin(normalize_to_01(powers_of_3, use_factor=true, norm_factor=3^8))[1] == 0
@test findmax(normalize_to_01(powers_of_3, use_factor=true, norm_factor=3^8))[1] == 1
# =====================
square_matrix = [1 2 3;
4 5 6;
7 8 9]
@test issymmetric(diagonal_symmetrize(square_matrix))
@test LinearAlgebra.checksquare(diagonal_symmetrize(square_matrix)) == 3
@test issymmetric(diagonal_symmetrize(positive_matrix))
@test LinearAlgebra.checksquare(diagonal_symmetrize(positive_matrix)) == 2
@test diagonal_symmetrize(square_matrix)[end,1] == square_matrix[1,end]
@test diagonal_symmetrize(square_matrix)[2,1] == square_matrix[1,2]
@test diagonal_symmetrize(square_matrix, below_over_upper=true)[1,end] == square_matrix[end,1]
@test diagonal_symmetrize(square_matrix, below_over_upper=true)[1,2] == square_matrix[2,1]
end
@testset "MatrixProcessing.jl -> distances and indices" begin
# Ints
positive_matrix = [1 2 3;4 5 6]
positive_matrix2 = [1. 2 3; 4 5 6]
positive_3d_matrix = rand(2,3,4)
negative_matrix = [1 2 -3;-4 5 6]
negative_matrix2 = [1 2 -3; -4 5 6]
negative_3d_matrix = rand(2,3,4).*2 .-1
negative_6d_matrix = rand(2,3,4,5,6,7).*2 .-1
powers_of_2 = [0 ; [2^k for k in 0:2:8]]
powers_of_3 = [0 ; [3^k for k in 0:2:8]]
square_matrix = [1 2 3;
4 5 6;
7 8 9]
# ========================================================
@test length(unique(group_distances(square_matrix,1))) == 1
@test length(unique(group_distances(square_matrix,2))) == 2
@test length(unique(group_distances(square_matrix,9))) == 9
@test_throws DomainError group_distances(square_matrix,20)
@test length(unique(group_distances(positive_matrix,1))) == 1
@test length(unique(group_distances(positive_matrix,2))) == 2
@test length(unique(group_distances(positive_matrix,6))) == 6
@test_throws DomainError group_distances(positive_matrix,20)
@test length(unique(group_distances(positive_3d_matrix,2))) == 2
@test length(unique(group_distances(negative_3d_matrix,2))) == 2
@test length(unique(group_distances(negative_6d_matrix,2))) == 2
group_distances(square_matrix,2)
# ========================================================
@test length(generate_indices(3))==3^2
@test length(generate_indices(223))==223^2
n=3
@test length(generate_indices(n, symmetry_order=true, include_diagonal=false)) == ((n^2 - n) Γ· 2)
n=9
@test length(generate_indices(n, symmetry_order=true, include_diagonal=false)) == ((n^2 - n) Γ· 2)
index_matrix1 = generate_indices(n, symmetry_order=true, include_diagonal=false)
@test length(findall(x-> x == CartesianIndex(n,n),index_matrix1)) == 0
@test length(findall(x-> x == CartesianIndex(n-2,n-1),index_matrix1)) == 1
@test length(findall(x-> x == CartesianIndex(n-1,n-2),index_matrix1)) == 0
n=223
@test length(generate_indices(n, symmetry_order=true, include_diagonal=false)) == ((n^2 - n) Γ· 2)
@test length(generate_indices(n, symmetry_order=true, include_diagonal=true)) == ((n^2 - n) Γ· 2 + n)
n=3
index_matrix2 = generate_indices(n, symmetry_order=true, include_diagonal=true)
@test length(index_matrix2) == (((n-1)*n)Γ·2 +n)
@test findmax(index_matrix2)[1] == CartesianIndex(n,n)
@test length(findall(x-> x == CartesianIndex(1,n),index_matrix2)) == 1
@test length(findall(x-> x == CartesianIndex(1,1),index_matrix2)) == 1
@test length(findall(x-> x == CartesianIndex(n,n),index_matrix2)) == 1
n=9
@test length(generate_indices(n, symmetry_order=true, include_diagonal=true)) == (((n-1)*n)Γ·2 +n)
n=223
@test length(generate_indices(n, symmetry_order=true, include_diagonal=true)) == (((n-1)*n)Γ·2 +n)
n=9
@test length(generate_indices((n,n), symmetry_order=true, include_diagonal=true)) == (((n-1)*n)Γ·2 +n)
n=223
@test length(generate_indices((n,n), symmetry_order=true, include_diagonal=true)) == (((n-1)*n)Γ·2 +n)
end
# @testset "MatrixProcessing.jl -> arrays of arrays" begin
# # Need to be taken from Bettis
# # arr_of_arrs
#
# # reduce_arrs_to_min_len(arr_of_arrs)
#
# end
@testset "MatrixProcessing.jl -> matrix ordering helping functions" begin
let testing_matrix0 = Array{Float64,2}(undef, 2, 3),
testing_matrix1 = [1 2 3; 4 5 6; 7 8 9],
testing_matrix2 = ones((2,3,4))
testing_matrix3 = [1 2 3; 4 5 6]
testing_matrix4 = [1, 4, 7, 2, 5, 8, 3, 6, 9]
@test arr_to_vec(testing_matrix0) isa Vector
@test length(testing_matrix0) == length(arr_to_vec(testing_matrix0))
@test arr_to_vec(testing_matrix1) isa Vector
@test length(testing_matrix1) == length(arr_to_vec(testing_matrix1))
@test arr_to_vec(testing_matrix1) == [1, 4, 7, 2, 5, 8, 3, 6, 9]
@test arr_to_vec(testing_matrix2) isa Vector
@test length(testing_matrix2) == length(arr_to_vec(testing_matrix2))
@test arr_to_vec(testing_matrix3) isa Vector
@test length(testing_matrix3) == length(arr_to_vec(testing_matrix3))
@test arr_to_vec(testing_matrix3) == [1, 4, 2, 5, 3, 6]
@test arr_to_vec(testing_matrix4) isa Vector
@test length(testing_matrix4) == length(arr_to_vec(testing_matrix4))
@test arr_to_vec(testing_matrix4) == [1, 4, 7, 2, 5, 8, 3, 6, 9]
end
let testing_matrix1 = CartesianIndices((2,2)),
testing_matrix2 = CartesianIndices((9,1))
@test cartesianInd_to_vec(testing_matrix1) isa Vector
@test length(cartesianInd_to_vec(testing_matrix1)) == length(testing_matrix1)
@test cartesianInd_to_vec(testing_matrix2) isa Vector
@test length(cartesianInd_to_vec(testing_matrix2)) == length(testing_matrix2)
end
let vals_matrix1a = [1 2 3; 4 5 6],
vals_matrix1b = [6 5 4; 3 2 1],
vals_matrix1c = [4 5 6; 1 2 3]
let index_matrix1a = [CartesianIndex(1,1), CartesianIndex(1,2), CartesianIndex(1,3), CartesianIndex(2,1), CartesianIndex(2,2), CartesianIndex(2,3)]
@test length(sort_indices_by_values(vals_matrix1a, index_matrix1a)) == length(vals_matrix1a)
@test sort_indices_by_values(vals_matrix1a, index_matrix1a) == collect(1:6)
@test length(sort_indices_by_values(vals_matrix1b, index_matrix1a)) == length(vals_matrix1b)
@test sort_indices_by_values(vals_matrix1b, index_matrix1a) == collect(6:-1:1)
@test length(sort_indices_by_values(vals_matrix1c, index_matrix1a)) == length(vals_matrix1c)
@test sort_indices_by_values(vals_matrix1c, index_matrix1a) == [4, 5, 6, 1, 2, 3]
end
let index_matrix1b = CartesianIndices((2,3))
@test_throws TypeError sort_indices_by_values(vals_matrix1a, index_matrix1b)
end
end
let vals_matrix2 = [1 2 3; 4 5 6; 7 8 9],
index_matrix2a = CartesianIndices((3,3)),
index_matrix2b = [CartesianIndex(1,1) CartesianIndex(1,2) CartesianIndex(1,3);
CartesianIndex(2,1) CartesianIndex(2,2) CartesianIndex(2,3);
CartesianIndex(3,1) CartesianIndex(3,2) CartesianIndex(3,3)],
index_matrix2c = [CartesianIndex(1,1), CartesianIndex(1,2), CartesianIndex(1,3),
CartesianIndex(2,1), CartesianIndex(2,2), CartesianIndex(2,3),
CartesianIndex(3,1), CartesianIndex(3,2), CartesianIndex(3,3)]
@test_throws TypeError sort_indices_by_values(vals_matrix2, index_matrix2a)
@test_throws TypeError sort_indices_by_values(vals_matrix2, index_matrix2b)
@test sort_indices_by_values(vals_matrix2, index_matrix2c) isa Vector
@test sort_indices_by_values(vals_matrix2, index_matrix2c) == 1:9
@test length(sort_indices_by_values(vals_matrix2, index_matrix2c)) == length(vals_matrix2)
end
let vals_matrix3 = [1, 4, 7, 2, 5, 8, 3, 6, 9],
index_matrix3a = CartesianIndices((9,1)),
index_matrix3b = CartesianIndices((9,)),
index_matrix3c = [1, 4, 7, 2, 5, 8, 3, 6, 9]
@test_throws TypeError sort_indices_by_values(vals_matrix3, index_matrix3a)
@test_throws TypeError sort_indices_by_values(vals_matrix3, index_matrix3b)
@test sort_indices_by_values(vals_matrix3, index_matrix3c) isa Vector
@test sort_indices_by_values(vals_matrix3, index_matrix3c) == 1:9
@test length(sort_indices_by_values(vals_matrix3, index_matrix3c)) == length(vals_matrix3)
end
let target_coords1 = CartesianIndex(2,3),
target_value = -20
let some_matrix = [1 2 3; 4 5 6; 7 8 9]
set_values!(some_matrix, target_coords1, target_value; do_symmetry=false)
@test some_matrix[target_coords1] == target_value
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9]
another_matrix = set_values!(some_matrix, target_coords1, target_value; do_symmetry=false)
@test some_matrix[target_coords1] == target_value
@test another_matrix[target_coords1] == target_value
@test another_matrix === some_matrix
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9]
another_matrix = set_values!(some_matrix, target_coords1, target_value; do_symmetry=true)
@test some_matrix[target_coords1] == target_value
@test some_matrix[target_coords1[1],target_coords1[2]] == target_value
@test some_matrix[target_coords1[2],target_coords1[1]] == target_value
@test another_matrix === some_matrix
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9],
some_matrix2 = [1 2 3; 4 5 6; 7 8 9],
target_coords2 = CartesianIndex(8,9)
@test_throws BoundsError set_values!(some_matrix, target_coords2, target_value; do_symmetry=false)
@test some_matrix == some_matrix2
end
let some_matrix = [1 2 3; 4 5 6; 7 8 9],
target_coords3 = CartesianIndices((2,2))
@test_throws MethodError set_values!(some_matrix, target_coords3, target_value; do_symmetry=false)
end
end
end
@testset "MatrixProcessing.jl -> matrix ordering" begin
"""
check_for_min_val_position(input_matrix::Matrix; force_symmetry=false, assign_same_values=false)
Takes an 'input_matrix' and checks if its ordered form has the minimum value
in the same position as the minimum value of 'input_matrix'.
"""
function check_for_min_val_position(input_matrix::Matrix; force_symmetry=false, assign_same_values=false)
# check if min val in uniquely value matrix is in the same position
ordered_matrix = get_ordered_matrix(input_matrix, force_symmetry=force_symmetry, assign_same_values=assign_same_values)
if issymmetric(input_matrix) || force_symmetry
off_diag_ind = generate_indices(size(input_matrix),include_diagonal=false)
else
off_diag_ind = generate_indices(size(input_matrix),include_diagonal=true)
end
min_val = findmin(input_matrix[off_diag_ind])[1]
# min_orig = findmin(input_matrix[off_diag_ind])[2]
all_min_input = findall(x->x==min_val,input_matrix[off_diag_ind])
all_min_ordered = findall(x->x==1,ordered_matrix[off_diag_ind])
return off_diag_ind[all_min_input] == off_diag_ind[all_min_ordered]
end
# ==
let power_matrix = zeros(Int, (2,3,4))
for k = 1:4
power_matrix[:,:,k] = reshape([(k+1)^n for n =1:6], (2,3))
end
ordered_power_matrix = copy(power_matrix)
ordered_power_matrix[:, :, 1] =[1 6 12;
3 8 13]
ordered_power_matrix[:, :, 2] =[2 11 17;
7 15 20]
ordered_power_matrix[:, :, 3] = [4 14 21;
9 18 23]
ordered_power_matrix[:, :, 4] =[5 16 22;
10 19 24]
@test !isempty(get_ordered_matrix(power_matrix) == ordered_power_matrix)
@test get_ordered_matrix(power_matrix) == ordered_power_matrix
# @test_throws MethodError get_ordered_matrix(power_matrix)
end
# ==
let square_matrix1 = [1 2 3; 4 5 6; 7 8 9],
square_matrix2 = [1 4 7; 2 5 8; 3 6 9]
@test get_ordered_matrix(10 .*square_matrix1) == (square_matrix1 )
@test get_ordered_matrix(10 .*square_matrix2) == (square_matrix2 )
@test sum(get_ordered_matrix(square_matrix1) .== square_matrix1 ) == 9
@test sum(get_ordered_matrix(square_matrix2) .== square_matrix2 ) == 9
@test get_ordered_matrix(10 .*square_matrix1, force_symmetry=true) == get_ordered_matrix(square_matrix1, force_symmetry=true)
@test get_ordered_matrix(10 .*square_matrix2, force_symmetry=true) == get_ordered_matrix(square_matrix2, force_symmetry=true)
# check if min val in uniquely value matrix is in the same position
let input_matrix = 10square_matrix1
@test check_for_min_val_position(input_matrix)
end
end
# ==
let square_matrix_same_vals1 = [1 2 3; 3 4 5; 6 7 8],
square_matrix_same_vals2 = [1 3 6; 2 4 7; 3 5 8]
@test get_ordered_matrix(10 .*square_matrix_same_vals1, assign_same_values=true) == (square_matrix_same_vals1 )
@test get_ordered_matrix(10 .*square_matrix_same_vals2, assign_same_values=true) == (square_matrix_same_vals2 )
# forcing symmetry test
some_ord_mat = get_ordered_matrix(10 .*square_matrix_same_vals1, force_symmetry=true, assign_same_values=false)
# remove 1, because 0 is not off diagonal
@test length(unique(some_ord_mat))-1 == (size(square_matrix_same_vals1,1)*(size(square_matrix_same_vals1,1)-1))/2
end
# ==
let square_matrix_same_vals3 = [1 2 3; 3 4 3; 5 6 7],
square_matrix_same_vals4 = [1 3 3; 2 4 6; 3 3 7]
@test get_ordered_matrix(10 .*square_matrix_same_vals3, force_symmetry=true, assign_same_values=true) ==
get_ordered_matrix(square_matrix_same_vals3, force_symmetry=true, assign_same_values=true)
@test get_ordered_matrix(10 .*square_matrix_same_vals4, force_symmetry=true, assign_same_values=true) ==
get_ordered_matrix(square_matrix_same_vals4, force_symmetry=true, assign_same_values=true)
end
# ==================
# Tests on symmetric matrices
let test_mat_b1 = [ 1 1 1 4 5 9 ;
1 1 2 3 6 10;
1 2 1 3 7 11;
4 3 3 1 8 12;
5 6 7 8 1 13;
9 10 11 12 13 1 ;]
# no same values
let test_mat_b1_ord1 = [ 0 1 2 6 7 11;
1 0 3 4 8 12;
2 3 0 5 9 13;
6 4 5 0 10 14;
7 8 9 10 0 15;
11 12 13 14 15 0],
test_mat_b1_indices = generate_indices(size(test_mat_b1))
ord_mat_b1_1 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=false, assign_same_values=false)
@test issymmetric(ord_mat_b1_1)
@test ord_mat_b1_1[test_mat_b1_indices] == test_mat_b1_ord1[test_mat_b1_indices]
ord_mat_b1_2 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b1_2)
@test ord_mat_b1_2[test_mat_b1_indices] == test_mat_b1_ord1[test_mat_b1_indices]
end
# assign same values
let test_mat_b1_ord2 = [ 0 1 1 4 5 9 ;
1 0 2 3 6 10;
1 2 0 3 7 11;
4 3 3 0 8 12;
5 6 7 8 0 13;
9 10 11 12 13 0 ;]
let ord_mat_b1_3 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=false, assign_same_values=true)
@test issymmetric(ord_mat_b1_3)
# Removed, because ordered diagonal is all 1: @test ord_mat_b1_3 == (test_mat_b1 )
@test ord_mat_b1_3 == test_mat_b1_ord2
end
let ord_mat_b1_4 = get_ordered_matrix(10 .*test_mat_b1, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat_b1_4)
# Removed, because ordered diagonal is all 1: @test ord_mat_b1_4 == (test_mat_b1 )
@test ord_mat_b1_4 == test_mat_b1_ord2
end
end
let input_matrix = 10 .*test_mat_b1
@test check_for_min_val_position(input_matrix; force_symmetry=false, assign_same_values=true)
end
end
# ==
# Non-symmetric matrix test
let test_mat_b2 = [ 1 1 3 4 5 9 ;
1 1 2 3 6 10;
14 2 1 3 7 11;
4 15 3 1 8 12;
5 5 7 8 1 13;
9 10 11 12 13 1 ;]
let ord_mat_b2_1 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=false, assign_same_values=false)
@test !issymmetric(ord_mat_b2_1)
@test findall(x->x<=8,ord_mat_b2_1) == findall(x->x==1,test_mat_b2) # all values with one are first 8 values used for ordering
@test length(unique(ord_mat_b2_1)) == length(test_mat_b2) #check if all values are unique
end
let test_mat_b2_ord1 = [0 1 6 4 7 11;
1 0 2 5 8 12;
6 2 0 3 9 13;
4 5 3 0 10 14;
7 8 9 10 0 15;
11 12 13 14 15 0 ;]
test_mat_b2_indices = generate_indices(size(test_mat_b2))
filter!(x->x!=CartesianIndex(1,3), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(1,4), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(2,4), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(3,4), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(3,1), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(4,1), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(4,2), test_mat_b2_indices)
filter!(x->x!=CartesianIndex(4,3), test_mat_b2_indices)
ord_mat_b2_2 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b2_2)
@test ord_mat_b2_2[test_mat_b2_indices] == test_mat_b2_ord1[test_mat_b2_indices]
# forcing symmetry test:
@test length(unique(ord_mat_b2_2))-1 == (size(test_mat_b2,1)*(size(test_mat_b2,1)-1))/2
end
# TODO to fix tests, add 1 to all values off diagonal for the input matrix
let test_mat_b2_ord2 = [0 0 2 3 4 8;
0 0 1 2 5 9;
13 1 0 2 6 10;
3 14 2 0 7 11;
4 4 6 7 0 12;
8 9 10 11 12 0 ;]
ord_mat_b2_3 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=false, assign_same_values=true)
@test_skip !issymmetric(ord_mat_b2_3)
@test_skip ord_mat_b2_3 == test_mat_b2_ord2
end
# TODO to fix tests, add 1 to all values off diagonal for the input matrix
let test_mat_b2_ord3 = [0 0 2 3 4 8
0 0 1 2 5 9
2 1 0 2 6 10
3 2 2 0 7 11
4 5 6 7 0 12
8 9 10 11 12 0 ;]
ord_mat_b2_4 = get_ordered_matrix(10 .*test_mat_b2, force_symmetry=true, assign_same_values=true)
@test_skip issymmetric(ord_mat_b2_4)
@test_skip ord_mat_b2_4 == test_mat_b2_ord3
end
end
# ==
let test_mat_b3 = [ 1 1 3 4 5 9 ;
1 1 2 3 6 10;
3 2 1 3 7 11;
4 3 3 1 8 12;
5 6 7 8 1 13;
9 10 11 12 13 1 ;]
let test_mat_b3_ord1 = [ 0 0 5 3 6 10;
0 0 1 4 7 11;
5 1 0 2 8 12;
3 4 2 0 9 13;
6 7 8 9 0 14;
10 11 12 13 14 0 ;],
test_mat_b3_indices = generate_indices(size(test_mat_b3))
filter!(x->x!=CartesianIndex(1,3), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(1,4), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(2,4), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(3,4), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(3,1), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(4,1), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(4,2), test_mat_b3_indices)
filter!(x->x!=CartesianIndex(4,3), test_mat_b3_indices)
ord_mat_b3_1 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=false, assign_same_values=false)
@test issymmetric(ord_mat_b3_1)
@test_skip ord_mat_b3_1[test_mat_b3_indices] == test_mat_b3_ord1[test_mat_b3_indices]
ord_mat_b3_2 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b3_2)
@test_skip ord_mat_b3_2[test_mat_b3_indices] == test_mat_b3_ord1[test_mat_b3_indices]
end
# TODO remove tests that do not add anything new for testing and are just another similar case
let test_mat_b3_ord2 = [ 0 0 2 3 4 8 ;
0 0 1 2 5 9 ;
2 1 0 2 6 10;
3 2 2 0 7 11;
4 5 6 7 0 12;
8 9 10 11 12 0 ;]
ord_mat_b3_3 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=false, assign_same_values=true)
@test issymmetric(ord_mat_b3_3)
@test_skip ord_mat_b3_3 == (test_mat_b3 .-1)
@test_skip ord_mat_b3_3 == test_mat_b3_ord2
ord_mat_b3_4 = get_ordered_matrix(10 .*test_mat_b3, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat_b3_4)
@test_skip ord_mat_b3_4 == (test_mat_b3 .-1)
@test_skip ord_mat_b3_4 == test_mat_b3_ord2
end
let input_matrix = 10 .*test_mat_b3
@test check_for_min_val_position(input_matrix; force_symmetry=false, assign_same_values=true)
end
end
# ==
let test_mat_b4 = [ 1 1 41 4 5 9 13 17 25 33;
1 1 2 42 6 10 14 18 26 34;
41 2 1 3 7 11 15 19 27 35;
4 42 3 1 8 12 16 20 28 36;
5 6 7 8 1 21 43 24 29 37;
9 10 11 12 21 1 22 44 30 38;
13 14 15 16 43 22 1 23 31 39;
17 18 19 20 24 44 23 1 32 40;
25 26 27 28 29 30 31 32 1 45;
33 34 35 36 37 38 39 40 45 1;]
@test_skip get_ordered_matrix(10 .*test_mat_b4, force_symmetry=false, assign_same_values=false) == (test_mat_b4 .-1)
@test_skip get_ordered_matrix(10 .*test_mat_b4, force_symmetry=false, assign_same_values=true) == (test_mat_b4 .-1)
let ord_mat = get_ordered_matrix(10 .*test_mat_b4, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat)
@test_skip ord_mat == (test_mat_b4 .-1)
end
let ord_mat = get_ordered_matrix(10 .*test_mat_b4, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat)
@test_skip ord_mat == (test_mat_b4 .-1)
end
let input_matrix = 10test_mat_b4
@test check_for_min_val_position(input_matrix)
end
end
# ==
let test_mat_b5 = -[1 1 3 4 5 9 ;
1 1 2 3 6 10;
14 2 1 3 7 11;
4 15 3 1 8 12;
5 5 7 8 1 13;
9 10 11 12 13 1 ;]
let ord_mat_b5_1 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=false, assign_same_values=false)
@test !issymmetric(ord_mat_b5_1)
@test_skip findall(x->x>=28,ord_mat_b5_1) == findall(x->x==-1,test_mat_b5) # all values with one are first 8 values used for ordering
@test length(unique(ord_mat_b5_1)) == length(test_mat_b5) #check if all values are unique
end
let test_mat_b5_ord1 = [ 0 14 9 11 8 4
14 0 13 10 7 3
9 13 0 12 6 2
11 10 12 0 5 1
8 7 6 5 0 0
4 3 2 1 0 0 ;]
test_mat_b5_indices = generate_indices(size(test_mat_b5))
filter!(x->x!=CartesianIndex(1,3), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(1,4), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(2,4), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(3,4), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(3,1), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(4,1), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(4,2), test_mat_b5_indices)
filter!(x->x!=CartesianIndex(4,3), test_mat_b5_indices)
ord_mat_b5_2 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=true, assign_same_values=false)
@test issymmetric(ord_mat_b5_2)
@test_skip ord_mat_b5_2[test_mat_b5_indices] == test_mat_b5_ord1[test_mat_b5_indices]
# forcing symmetry test:
@test length(unique(ord_mat_b5_2))-1 == (size(test_mat_b5,1)*(size(test_mat_b5,1)-1))/2
end
let test_mat_b5_ord2 = [14 14 12 11 10 6;
14 14 13 12 9 5;
1 13 14 12 8 4;
11 0 12 14 7 3;
10 10 8 7 14 2;
6 5 4 3 2 14],
ord_mat_b5_3 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=false, assign_same_values=true)
@test !issymmetric(ord_mat_b5_3)
@test_skip ord_mat_b5_3 == test_mat_b5_ord2
end
let test_mat_b5_ord3 = [0 12 10 9 8 4;
12 0 11 10 7 3;
10 11 0 10 6 2;
9 10 10 0 5 1;
8 7 6 5 0 0;
4 3 2 1 0 0]
ord_mat_b5_4 = get_ordered_matrix(10 .*test_mat_b5, force_symmetry=true, assign_same_values=true)
@test issymmetric(ord_mat_b5_4)
@test_skip ord_mat_b5_4 == test_mat_b5_ord3
end
end
# ==================
end
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | test/runtests.jl | code | 464 | using SafeTestsets
# TODO add description to the tests
## ===-===-===-===-===-===-===-===-
@safetestset "MatrixProcessing tests" begin
include("matrixProcessing_tests.jl")
end
## ===-===-===-===-===-===-===-===-
@safetestset "MatrixOrganization tests" begin
include("matrixOrganisation_tests.jl")
end
## ===-===-===-===-===-===-===-===-
@safetestset "BettiCurves tests" begin
include("bettiCurves_tests.jl")
end
## ===-===-===-===-===-===-===-===-
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | README.md | docs | 352 | # TopologyPreprocessing
![Continuous Integration (CI) status](https://github.com/edd26/TopologyPreprocessing/actions/workflows/CI_julia.yml/badge.svg)
## Installation
This package registeration is being processed now. After being registered, to install it, run the following.
```julia
julia> using Pkg
julia> Pkg.add("TopologyPreprocessing")
```
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.6 | 523f376b635406653aedc47262f302626d592d57 | docs/src/index.md | docs | 114 | # Example.jl Documentation
```@contents
```
## Functions
```@docs
get_barcodes(x)
```
## Index
```@index
```
| TopologyPreprocessing | https://github.com/edd26/TopologyPreprocessing.git |
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | docs/make.jl | code | 558 | using NicePipes
using Documenter
makedocs(;
modules=[NicePipes],
authors="Simeon Schaub <[email protected]> and contributors",
repo="https://github.com/simeonschaub/NicePipes.jl/blob/{commit}{path}#L{line}",
sitename="NicePipes.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://simeonschaub.github.io/NicePipes.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/simeonschaub/NicePipes.jl",
)
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | src/NicePipes.jl | code | 2084 | module NicePipes
if VERSION < v"1.3.0-rc4"
@warn "Can't use binary artifacts, using your system's `grep` and `sed`."
grep(f) = f("grep")
sed(f) = f("sed")
else
using grep_jll, sed_jll
end
struct ShPipe{T,C}
val::T
cmd::C
args::Cmd
end
# like Base.open, but doesn't throw if exitcode is non-zero and always returns process instead
# of return value of f
function _open(f::Function, cmds::Base.AbstractCmd, args...; kwargs...)
P = open(cmds, args...; kwargs...)
ret = try
f(P)
catch
kill(P)
rethrow()
finally
close(P.in)
end
wait(P)
return P
end
function Base.show(io_out::IO, x::ShPipe)
x.cmd() do cmd
p = _open(`$cmd $(x.args)`, "w", io_out) do io_in
show(io_in, MIME("text/plain"), x.val)
end
if x.cmd === grep && p.exitcode == 1
println(io_out, "No matches found!")
elseif p.exitcode != 0
print(io_out, "Command $(p.cmd) failed with exit code $(p.exitcode)")
elseif x.cmd === grep
# delete additional newline
print(io_out, "\033[1A")
end
end
return nothing
end
struct ShPipeEndpoint{C}
cmd::C
args::Cmd
end
macro p_cmd(s)
cmd, args = match(r"^(.*)\s(.*)$", s).captures
return :(ShPipeEndpoint(f->f($cmd)), @cmd($args))
end
(endpoint::ShPipeEndpoint)(val) = ShPipe(val, endpoint.cmd, endpoint.args)
Base.:|(val, endpoint::ShPipeEndpoint) = val |> endpoint
macro special_command(cmd)
return quote
export $(Symbol('@', cmd))
macro $cmd(args...)
args = map(args) do arg
# interpret raw_str as raw string
if Meta.isexpr(arg, :macrocall) && arg.args[1] === Symbol("@raw_str")
arg = arg.args[3]
end
return arg isa String ? string('"', arg, '"') : arg
end
args = join(args, ' ')
return :(ShPipeEndpoint($$cmd, @cmd($args)))
end
end |> esc
end
@special_command grep
@special_command sed
end
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | test/runtests.jl | code | 751 | using NicePipes
using Test
@static if Sys.iswindows()
const LE = "\r\n"
else
const LE = "\n"
end
function test_show(x, show_x)
io = IOBuffer()
show(io, x)
output = String(take!(io))
output = replace(output, LE*"\e[1A" => "")
@test output == show_x
end
@testset "NicePipes.jl" begin
a = ["foo", "bar"]
show_a = [
"2-element $(Vector{String}):",
" \"foo\"",
" \"bar\"",
]
test_show((a | @grep foo), show_a[2])
test_show((a | @grep -iv FoO), join(show_a[[1, 3]], LE))
test_show((3 | @grep 4), "No matches found!\n")
test_show((a | @sed "/foo/d"), join(show_a[[1, 3]], LE))
test_show((a | @sed raw"s/f\(o\+\)/b\1/g"), show_a[1] * LE * " \"boo\"" * LE * show_a[3])
end
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | README.md | docs | 1702 | # NicePipes
[![Build Status](https://github.com/simeonschaub/NicePipes.jl/workflows/CI/badge.svg)](https://github.com/simeonschaub/NicePipes.jl/actions)
[![Coverage](https://codecov.io/gh/simeonschaub/NicePipes.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/simeonschaub/NicePipes.jl)
[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://simeonschaub.github.io/NicePipes.jl/stable)
[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://simeonschaub.github.io/NicePipes.jl/dev)
[![pkgeval](https://juliahub.com/docs/NicePipes/pkgeval.svg)](https://juliahub.com/ui/Packages/NicePipes/tVmGC)
Pipe REPL `show` output into unix tools:
```julia
julia> using NicePipes
julia> methods(+) | @grep BigFloat
[15] +(c::BigInt, x::BigFloat) in Base.MPFR at mpfr.jl:414
[22] +(a::BigFloat, b::BigFloat, c::BigFloat, d::BigFloat, e::BigFloat) in Base.MPFR at mpfr.jl:564
[23] +(a::BigFloat, b::BigFloat, c::BigFloat, d::BigFloat) in Base.MPFR at mpfr.jl:557
[24] +(a::BigFloat, b::BigFloat, c::BigFloat) in Base.MPFR at mpfr.jl:551
[25] +(x::BigFloat, c::BigInt) in Base.MPFR at mpfr.jl:410
[26] +(x::BigFloat, y::BigFloat) in Base.MPFR at mpfr.jl:379
[27] +(x::BigFloat, c::Union{UInt16, UInt32, UInt64, UInt8}) in Base.MPFR at mpfr.jl:386
[28] +(x::BigFloat, c::Union{Int16, Int32, Int64, Int8}) in Base.MPFR at mpfr.jl:394
[29] +(x::BigFloat, c::Union{Float16, Float32, Float64}) in Base.MPFR at mpfr.jl:402
[61] +(c::Union{UInt16, UInt32, UInt64, UInt8}, x::BigFloat) in Base.MPFR at mpfr.jl:390
[62] +(c::Union{Int16, Int32, Int64, Int8}, x::BigFloat) in Base.MPFR at mpfr.jl:398
[63] +(c::Union{Float16, Float32, Float64}, x::BigFloat) in Base.MPFR at mpfr.jl:406
```
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
[
"MIT"
] | 0.1.3 | 32574567d25366649afcc1445f543cdf953c0282 | docs/src/index.md | docs | 107 | ```@meta
CurrentModule = NicePipes
```
# NicePipes
```@index
```
```@autodocs
Modules = [NicePipes]
```
| NicePipes | https://github.com/simeonschaub/NicePipes.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs/make.jl | code | 820 | using Documenter, DynamicNLPModels
const _PAGES = [
"Introduction" => "index.md",
"Quick Start"=>"guide.md",
"API Manual" => "api.md"
]
makedocs(
sitename = "DynamicNLPModels",
authors = "David Cole, Sungho Shin, Francois Pacaud",
format = Documenter.LaTeX(platform="docker"),
pages = _PAGES
)
makedocs(
sitename = "DynamicNLPModels",
modules = [DynamicNLPModels],
authors = "David Cole, Sungho Shin, Francois Pacaud",
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
sidebar_sitename = true,
collapselevel = 1,
),
pages = _PAGES,
clean = false,
)
deploydocs(
repo = "github.com/MadNLP/DynamicNLPModels.jl.git",
target = "build",
devbranch = "main",
devurl = "dev",
push_preview = true,
)
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | examples/Madnlp_densekkt_example.jl | code | 2952 | using DynamicNLPModels, Random, LinearAlgebra, SparseArrays
using MadNLP, QuadraticModels, MadNLPGPU, CUDA, NLPModels
# Extend MadNLP functions
function MadNLP.jac_dense!(nlp::DenseLQDynamicModel{T, V, M1, M2, M3}, x, jac) where {T, V, M1<: AbstractMatrix, M2 <: AbstractMatrix, M3 <: AbstractMatrix}
NLPModels.increment!(nlp, :neval_jac)
J = nlp.data.A
copyto!(jac, J)
end
function MadNLP.hess_dense!(nlp::DenseLQDynamicModel{T, V, M1, M2, M3}, x, w1l, hess; obj_weight = 1.0) where {T, V, M1<: AbstractMatrix, M2 <: AbstractMatrix, M3 <: AbstractMatrix}
NLPModels.increment!(nlp, :neval_hess)
H = nlp.data.H
copyto!(hess, H)
end
# Time horizon
N = 3
# generate random Q, R, A, and B matrices
Random.seed!(10)
Q_rand = Random.rand(2, 2)
Q = Q_rand * Q_rand' + I
R_rand = Random.rand(1, 1)
R = R_rand * R_rand' + I
A_rand = rand(2, 2)
A = A_rand * A_rand' + I
B = rand(2, 1)
# generate upper and lower bounds
sl = rand(2)
ul = fill(-15.0, 1)
su = sl .+ 4
uu = ul .+ 10
s0 = sl .+ 2
# Define K matrix for numerical stability of condensed problem
K = - [1.41175 2.47819;] # found from MatrixEquations.jl; ared(A, B, 1, 1)
# Build model for 1 D heat transfer
lq_dense = DenseLQDynamicModel(s0, A, B, Q, R, N; K = K, sl = sl, su = su, ul = ul, uu = uu)
lq_sparse = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, su = su, ul = ul, uu = uu)
# Solve the dense problem
dense_options = Dict{Symbol, Any}(
:kkt_system => MadNLP.DENSE_CONDENSED_KKT_SYSTEM,
:linear_solver=> LapackCPUSolver,
:max_iter=> 50,
:jacobian_constant=>true,
:hessian_constant=>true,
:lapack_algorithm=>MadNLP.CHOLESKY
)
d_ips = MadNLP.InteriorPointSolver(lq_dense, option_dict = dense_options)
sol_ref_dense = MadNLP.optimize!(d_ips)
# Solve the sparse problem
sparse_options = Dict{Symbol, Any}(
:max_iter=>50,
:jacobian_constant=>true,
:hessian_constant=>true,
)
s_ips = MadNLP.InteriorPointSolver(lq_sparse, option_dict = sparse_options)
sol_ref_sparse = MadNLP.optimize!(s_ips)
# Solve the dense problem on the GPU
gpu_options = Dict{Symbol, Any}(
:kkt_system=>MadNLP.DENSE_CONDENSED_KKT_SYSTEM,
:linear_solver=>LapackGPUSolver,
:max_iter=>50,
:jacobian_constant=>true,
:hessian_constant=>true,
:lapack_algorithm=>MadNLP.CHOLESKY
)
gpu_ips = MadNLPGPU.CuInteriorPointSolver(lq_dense, option_dict = gpu_options)
sol_ref_gpu = MadNLP.optimize!(gpu_ips)
println("States from dense problem on CPU are ", get_s(sol_ref_dense, lq_dense))
println("States from dense problem on GPU are ", get_s(sol_ref_gpu, lq_dense))
println("States from sparse problem on CPU are ", get_s(sol_ref_sparse, lq_sparse))
println()
println("Inputs from dense problem on CPU are ", get_u(sol_ref_dense, lq_dense))
println("Inputs from dense problem on GPU are ", get_u(sol_ref_gpu, lq_dense))
println("Inputs from sparse problem on CPU are ", get_u(sol_ref_sparse, lq_sparse))
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | src/DynamicNLPModels.jl | code | 542 | module DynamicNLPModels
import NLPModels
import QuadraticModels
import LinearAlgebra
import SparseArrays
import LinearOperators
import CUDA
import CUDA: CUBLAS
import SparseArrays: SparseMatrixCSC
export LQDynamicData, SparseLQDynamicModel, DenseLQDynamicModel
export get_u, get_s, get_jacobian, add_jtsj!, reset_s0!
include(joinpath("LinearQuadratic", "LinearQuadratic.jl"))
include(joinpath("LinearQuadratic", "sparse.jl"))
include(joinpath("LinearQuadratic", "dense.jl"))
include(joinpath("LinearQuadratic", "tools.jl"))
end # module
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | src/LinearQuadratic/LinearQuadratic.jl | code | 12791 | abstract type AbstractLQDynData{T, V} end
@doc raw"""
LQDynamicData{T,V,M,MK} <: AbstractLQDynData{T,V}
A struct to represent the features of the optimization problem
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, ..., N-1\\
&\; s^l \le s \le s^u \\
&\; u^l \le u \le u^u \\
&\; s_0 = s0
\end{aligned}
```
---
Attributes include:
- `s0`: initial state of system
- `A` : constraint matrix for system states
- `B` : constraint matrix for system inputs
- `Q` : objective function matrix for system states from 0:(N-1)
- `R` : objective function matrix for system inputs from 0:(N-1)
- `N` : number of time steps
- `Qf`: objective function matrix for system state at time N
- `S` : objective function matrix for system states and inputs
- `ns`: number of state variables
- `nu`: number of input varaibles
- `E` : constraint matrix for state variables
- `F` : constraint matrix for input variables
- `K` : feedback gain matrix
- 'w' : constant term for dynamic constraints
- `sl`: vector of lower bounds on state variables
- `su`: vector of upper bounds on state variables
- `ul`: vector of lower bounds on input variables
- `uu`: vector of upper bounds on input variables
- `gl`: vector of lower bounds on constraints
- `gu`: vector of upper bounds on constraints
see also `LQDynamicData(s0, A, B, Q, R, N; ...)`
"""
struct LQDynamicData{T, V, M, MK} <: AbstractLQDynData{T, V}
s0::V
A::M
B::M
Q::M
R::M
N::Int
Qf::M
S::M
ns::Int
nu::Int
E::M
F::M
K::MK
w::V
sl::V
su::V
ul::V
uu::V
gl::V
gu::V
end
@doc raw"""
LQDynamicData(s0, A, B, Q, R, N; ...) -> LQDynamicData{T, V, M, MK}
A constructor for building an object of type `LQDynamicData` for the optimization problem
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; gl \le E s_i + F u_i \le gu \quad \forall i = 0, 1, ..., N-1\\
&\; sl \le s \le su \\
&\; ul \le u \le uu \\
&\; s_0 = s0
\end{aligned}
```
---
- `s0`: initial state of system
- `A` : constraint matrix for system states
- `B` : constraint matrix for system inputs
- `Q` : objective function matrix for system states from 0:(N-1)
- `R` : objective function matrix for system inputs from 0:(N-1)
- `N` : number of time steps
The following attributes of the `LQDynamicData` type are detected automatically from the length of s0 and size of R
- `ns`: number of state variables
- `nu`: number of input varaibles
The following keyward arguments are also accepted
- `Qf = Q`: objective function matrix for system state at time N; dimensions must be ns x ns
- `S = nothing`: objective function matrix for system state and inputs
- `E = zeros(eltype(Q), 0, ns)` : constraint matrix for state variables
- `F = zeros(eltype(Q), 0, nu)` : constraint matrix for input variables
- `K = nothing` : feedback gain matrix
- `w = zeros(eltype(Q), ns * N)` : constant term for dynamic constraints
- `sl = fill(-Inf, ns)`: vector of lower bounds on state variables
- `su = fill(Inf, ns)` : vector of upper bounds on state variables
- `ul = fill(-Inf, nu)`: vector of lower bounds on input variables
- `uu = fill(Inf, nu)` : vector of upper bounds on input variables
- `gl = fill(-Inf, size(E, 1))` : vector of lower bounds on constraints
- `gu = fill(Inf, size(E, 1))` : vector of upper bounds on constraints
"""
function LQDynamicData(
s0::V,
A::M,
B::M,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = _init_similar(Q, size(Q, 1), size(R, 1), T),
E::M = _init_similar(Q, 0, length(s0), T),
F::M = _init_similar(Q, 0, size(R, 1), T),
K::MK = nothing,
w::V = _init_similar(s0, length(s0) * N, T),
sl::V = (similar(s0) .= -Inf),
su::V = (similar(s0) .= Inf),
ul::V = (similar(s0, size(R, 1)) .= -Inf),
uu::V = (similar(s0, size(R, 1)) .= Inf),
gl::V = (similar(s0, size(E, 1)) .= -Inf),
gu::V = (similar(s0, size(F, 1)) .= Inf),
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
if size(Q, 1) != size(Q, 2)
error("Q matrix is not square")
end
if size(R, 1) != size(R, 1)
error("R matrix is not square")
end
if size(A, 2) != length(s0)
error("Number of columns of A are not equal to the number of states")
end
if size(B, 2) != size(R, 1)
error("Number of columns of B are not equal to the number of inputs")
end
if length(s0) != size(Q, 1)
error("size of Q is not consistent with length of s0")
end
if !all(sl .<= su)
error("lower bound(s) on s is > upper bound(s)")
end
if !all(ul .<= uu)
error("lower bound(s) on u is > upper bound(s)")
end
if !all(sl .<= s0) || !all(s0 .<= su)
error("s0 is not within the given upper and lower bounds")
end
if size(E, 1) != size(F, 1)
error("E and F have different numbers of rows")
end
if !all(gl .<= gu)
error("lower bound(s) on Es + Fu is > upper bound(s)")
end
if size(E, 2) != size(Q, 1)
error("Dimensions of E are not the same as number of states")
end
if size(F, 2) != size(R, 1)
error("Dimensions of F are not the same as the number of inputs")
end
if length(gl) != size(E, 1)
error("Dimensions of gl do not match E and F")
end
if length(gu) != size(E, 1)
error("Dimensions of gu do not match E and F")
end
if size(S, 1) != size(Q, 1) || size(S, 2) != size(R, 1)
error("Dimensions of S do not match dimensions of Q and R")
end
if K != nothing
if size(K, 1) != size(R, 1) || size(K, 2) != size(Q, 1)
error("Dimensions of K do not match number of states and inputs")
end
end
if Int(size(w, 1)) != Int(size(s0, 1) * N)
error("Dimensions of w do not match ns")
end
ns = size(Q, 1)
nu = size(R, 1)
LQDynamicData{T, V, M, MK}(
s0,
A,
B,
Q,
R,
N,
Qf,
S,
ns,
nu,
E,
F,
K,
w,
sl,
su,
ul,
uu,
gl,
gu,
)
end
abstract type AbstractDynamicModel{T, V} <: QuadraticModels.AbstractQuadraticModel{T, V} end
struct SparseLQDynamicModel{T, V, M1, M2, M3, MK} <: AbstractDynamicModel{T, V}
meta::NLPModels.NLPModelMeta{T, V}
counters::NLPModels.Counters
data::QuadraticModels.QPData{T, V, M1, M2}
dynamic_data::LQDynamicData{T, V, M3, MK}
end
"""
Struct containing block matrices used for creating and resetting the `DenseLQDynamicModel`. A and B matrices are given in part by
Jerez, Kerrigan, and Constantinides in section 4 of "A sparse and condensed QP formulation for predictive control of LTI systems"
(doi:10.1016/j.automatica.2012.03.010). States are eliminated by the equation ``x = Ax_0 + Bu + \\hat{A}w`` where ``x = [x_0^T, x_1^T, ..., x_N^T]``
and ``u = [u_0^T, u_1^T, ..., u_{N-1}^T]``
---
- `A` : block A matrix given by Jerez et al. with ``n_s(N + 1)`` rows and ns columns
- `B` : block B matrix given by Jerez et al. with ``n_s(N)`` rows and nu columns
- `Aw` : length ``n_s(N + 1)`` vector corresponding to the linear term of the dynamic constraints
- `h` : ``n_u(N) \\times n_s`` matrix for building the linear term of the objective function. Just needs to be
multiplied by `s0`.
- `h01`: ns x ns matrix for building the constant term fo the objective function. This can be found by
taking ``s_0^T`` `h01` ``s_0``
- `h02`: similar to `h01`, but one side is multiplied by `Aw` rather than by `As0`. This will just
be multiplied by `s0` once
- `h_constant` : linear term in the objective function that arises from `Aw`. Not a function of `s0`
- `h0_constant`: constant term in the objective function that arises from `Aw`. Not a function of `s0`
- `d` : length ``n_c(N)`` term for the constraint bounds corresponding to `E` and `F`. Must be multiplied by `s0` and
subtracted from `gl` and `gu`. Equal to the blocks (E + FK) A (see Jerez et al.)
- `dw` : length ``n_c(N)`` term for the constraint bounds that arises from `w`. Equal to the blocks (E + FK) Aw
- `KA` : size ``n_u(N)`` x ns matrix. Needs to be multiplied by `s0` and subtracted from `ul` and `uu` to update
the algebraic constraints corresponding to the input bounds
- `KAw`: similar to `KA`, but it is multiplied by Aw rather than A
See also `reset_s0!`
"""
mutable struct DenseLQDynamicBlocks{T, V, M}
A::M
B::M
Aw::V # Aw = block_matrix_A * w (result is a Vector; block_matrix A is like block_B, but with I instead of B)
h::M # h = (QB + SKB + K^T R K B + K^T S^T B)^T A + (S + K^T R)^T A
h01::M # h01 = A^T((Q + KTRK + KTST + SK))A where Q, K, R, S, and A are block matrices just needs to be multiplied by s0 on each side
h02::V # h02 = wT block_matrix_AT (Q + KTRK + KTSK + SK) A; just needs to be multiplied by s0 on right
h_constant::V # h_constant = BT (Q + KTRK + SK + KTST) block_matrix_A w + (RK + ST)B block_matrix_A w
h0_constant::T # h0_constant = wT block_matrix_AT (Q + KTRK + KTSK + SK) block_matrix_A w
d::M # d = (E + FK) A
dw::V # dw = (E + FK) block_matrix_A w - constant term to be subtracted from d
KA::M
KAw::V
end
struct DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK} <: AbstractDynamicModel{T, V}
meta::NLPModels.NLPModelMeta{T, V}
counters::NLPModels.Counters
data::QuadraticModels.QPData{T, V, M1, M2}
dynamic_data::LQDynamicData{T, V, M3, MK}
blocks::DenseLQDynamicBlocks{T, V, M4}
end
"""
LQJacobianOperator{T, V, M}
Struct for storing the implicit Jacobian matrix. All data for the Jacobian can be stored
in the first `nu` columns of `J`. This struct contains the needed data and storage arrays for
calculating ``Jx``, ``J^T x``, and ``J^T \\Sigma J``. ``Jx`` and ``J^T x`` are performed through extensions
to `LinearAlgebra.mul!()`.
---
Attributes
- `truncated_jac1`: Matrix of first `nu` columns of the Jacobian corresponding to Ax + Bu constraints
- `truncated_jac2`: Matrix of first `nu` columns of the Jacobian corresponding to state variable bounds
- `truncated_jac3`: Matrix of first `nu` columns of the Jacobian corresponding to input variable bounds
- `N` : number of time steps
- `nu` : number of inputs
- `nc` : number of algebraic constraints of the form gl <= Es + Fu <= gu
- `nsc`: number of bounded state variables
- `nuc`: number of bounded input variables (if `K` is defined)
- `SJ1`: placeholder for storing data when calculating `Ξ£J`
- `SJ2`: placeholder for storing data when calculating `Ξ£J`
- `SJ3`: placeholder for storing data when calculating `Ξ£J`
- `H_sub_block`: placeholder for storing data when adding `J^T Ξ£J` to the Hessian
"""
struct LQJacobianOperator{T, M, A} <: LinearOperators.AbstractLinearOperator{T}
truncated_jac1::A # tensor of Jacobian blocks corresponding Ex + Fu constraints
truncated_jac2::A # tensor of Jacobian blocks corresponding to state variable limits
truncated_jac3::A # tensor of Jacobian blocks corresponding to input variable limits
N::Int # number of time steps
nu::Int # number of inputs
nc::Int # number of inequality constraints
nsc::Int # number of state variables that are constrained
nuc::Int # number of input variables that are constrained
# Storage tensors for building Jx and J^Tx
x1::A
x2::A
x3::A
y::A
# Storage tensors for building J^TΞ£J
SJ1::M
SJ2::M
SJ3::M
# Storage block for adding J^TΞ£J to H
H_sub_block::M
end
function _init_similar(mat, dim1::Number, dim2::Number, dim3::Number, T::DataType)
new_mat = similar(mat, dim1, dim2, dim3)
fill!(new_mat, zero(T))
return new_mat
end
function _init_similar(mat, dim1::Number, dim2::Number, T = eltype(mat))
new_mat = similar(mat, dim1, dim2)
fill!(new_mat, zero(T))
return new_mat
end
function _init_similar(mat, dim1::Number, T = eltype(mat))
new_mat = similar(mat, dim1)
fill!(new_mat, zero(T))
return new_mat
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | src/LinearQuadratic/dense.jl | code | 36525 | @doc raw"""
DenseLQDynamicModel(dnlp::LQDynamicData; implicit = false) -> DenseLQDynamicModel
DenseLQDynamicModel(s0, A, B, Q, R, N; implicit = false ...) -> DenseLQDynamicModel
A constructor for building a `DenseLQDynamicModel <: QuadraticModels.AbstractQuadraticModel`
Input data is for the problem of the form
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; gl \le E s_i + F u_i \le gu \quad \forall i = 0, 1, ..., N-1\\
&\; sl \le s \le su \\
&\; ul \le u \le uu \\
&\; s_0 = s0
\end{aligned}
```
---
Data is converted to the form
```math
\begin{aligned}
\min &\; \frac{1}{2} z^T H z \\
\textrm{s.t.} &\; \textrm{lcon} \le Jz \le \textrm{ucon}\\
&\; \textrm{lvar} \le z \le \textrm{uvar}
\end{aligned}
```
Resulting `H`, `J`, `h`, and `h0` matrices are stored within `QuadraticModels.QPData` as `H`, `A`, `c`, and `c0` attributes respectively
If `K` is defined, then `u` variables are replaced by `v` variables. The bounds on `u` are transformed into algebraic constraints,
and `u` can be queried by `get_u` and `get_s` within `DynamicNLPModels.jl`
Keyword argument `implicit = false` determines how the Jacobian is stored within the `QPData`. If `implicit = false`, the full, dense
Jacobian matrix is stored. If `implicit = true`, only the first `nu` columns of the Jacobian are stored with the Linear Operator `LQJacobianOperator`.
"""
function DenseLQDynamicModel(
dnlp::LQDynamicData{T, V, M};
implicit::Bool = false,
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
if implicit
_build_implicit_dense_lq_dynamic_model(dnlp)
else
_build_dense_lq_dynamic_model(dnlp)
end
end
function DenseLQDynamicModel(
s0::V,
A::M,
B::M,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = _init_similar(Q, size(Q, 1), size(R, 1), T),
E::M = _init_similar(Q, 0, length(s0), T),
F::M = _init_similar(Q, 0, size(R, 1), T),
K::MK = nothing,
w::V = _init_similar(s0, length(s0) * N, T),
sl::V = (similar(s0) .= -Inf),
su::V = (similar(s0) .= Inf),
ul::V = (similar(s0, size(R, 1)) .= -Inf),
uu::V = (similar(s0, size(R, 1)) .= Inf),
gl::V = (similar(s0, size(E, 1)) .= -Inf),
gu::V = (similar(s0, size(F, 1)) .= Inf),
implicit = false,
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
dnlp = LQDynamicData(
s0,
A,
B,
Q,
R,
N;
Qf = Qf,
S = S,
E = E,
F = F,
K = K,
w = w,
sl = sl,
su = su,
ul = ul,
uu = uu,
gl = gl,
gu = gu,
)
DenseLQDynamicModel(dnlp; implicit = implicit)
end
function _build_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
G = _init_similar(Q, nc * N, nu, T)
J = _init_similar(Q, nc * N + num_real_bounds_s * N, nu * N, T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
_set_J1_dense!(J, G, N)
As0 = _init_similar(s0, ns * (N + 1), T)
LinearAlgebra.mul!(As0, block_A, s0)
lvar = repeat(ul, N)
uvar = repeat(uu, N)
# Convert state variable constraints to algebraic constraints
offset_s = N * nc
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
J[
(offset_s + 1 + (i - 1) * ns):(offset_s + ns * N),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[1:(ns * (N - i + 1)), :])
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] .=
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
for j = 1:(N - i + 1)
J[
(offset_s + 1 + (i + j - 2) * num_real_bounds_s):(offset_s + (i + j - 1) * num_real_bounds_s),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[(1 + (j - 1) * ns):(j * ns), :][bool_vec_s, :])
end
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
lcon = _init_similar(s0, length(dl) + length(lcon2), T)
ucon = _init_similar(s0, length(du) + length(ucon2), T)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(1 + length(dl)):(length(dl) + num_real_bounds_s * N)] = lcon2
ucon[(1 + length(du)):(length(du) + num_real_bounds_s * N)] = ucon2
end
nvar = nu * N
nnzj = size(J, 1) * size(J, 2)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
ncon = size(J, 1)
c = _init_similar(s0, nvar, T)
c .= H_blocks.c
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: AbstractMatrix{T}}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
block_KAw = dense_blocks.KAw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
bool_vec_u = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds_u = sum(bool_vec_u)
G = _init_similar(Q, nc * N, nu, T)
J = _init_similar(Q, (nc + num_real_bounds_s + num_real_bounds_u) * N, nu * N, T)
As0 = _init_similar(s0, ns * (N + 1), T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
KAs0_bounds = _init_similar(s0, num_real_bounds_u * N, T)
KBI = _init_similar(Q, nu * N, nu, T)
KAs0 = _init_similar(s0, nu * N, T)
KAs0_block = _init_similar(s0, nu, T)
KB = _init_similar(Q, nu, nu, T)
I_mat = _init_similar(Q, nu, nu, T)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
_set_J1_dense!(J, G, N)
LinearAlgebra.mul!(As0, block_A, s0)
# Convert state variable constraints to algebraic constraints
offset_s = nc * N
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
J[
(offset_s + 1 + (i - 1) * ns):(offset_s + ns * N),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[1:(ns * (N - i + 1)), :])
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] =
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
for j = 1:(N - i + 1)
J[
(offset_s + 1 + (i + j - 2) * num_real_bounds_s):(offset_s + (i + j - 1) * num_real_bounds_s),
(1 + nu * (i - 1)):(nu * i),
] = @view(block_B[(1 + (j - 1) * ns):(j * ns), :][bool_vec_s, :])
end
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
# Convert bounds on u to algebraic constraints
for i = 1:N
if i == 1
KB = I_mat
else
B_row_range = (1 + (i - 2) * ns):((i - 1) * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(KB, K, B_sub_block)
end
KBI[(1 + nu * (i - 1)):(nu * i), :] = KB
LinearAlgebra.mul!(KAs0_block, K, As0[(1 + ns * (i - 1)):(ns * i)])
KAs0[(1 + nu * (i - 1)):(nu * i)] = KAs0_block
end
offset_u = nc * N + num_real_bounds_s * N
if num_real_bounds_u == length(ul)
KAs0_bounds .= KAs0 .+ block_KAw
for i = 1:N
J[
(offset_u + 1 + (i - 1) * nu):(offset_u + nu * N),
(1 + nu * (i - 1)):(nu * i),
] = @view(KBI[1:(nu * (N - i + 1)), :])
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_u):(i * num_real_bounds_u)
KAs0_bounds[row_range] =
KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .+
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
for j = 1:(N - i + 1)
J[
(offset_u + 1 + (i + j - 2) * num_real_bounds_u):(offset_u + (i + j - 1) * num_real_bounds_u),
(1 + nu * (i - 1)):(nu * i),
] = @view(KBI[(1 + (j - 1) * nu):(j * nu), :][bool_vec_u, :])
end
end
ul = ul[bool_vec_u]
uu = uu[bool_vec_u]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, KAs0_bounds, lcon3)
LinearAlgebra.axpy!(-1, KAs0_bounds, ucon3)
lcon = _init_similar(s0, size(J, 1), T)
ucon = _init_similar(s0, size(J, 1), T)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(length(dl) + 1):(length(dl) + length(lcon2))] = lcon2
ucon[(length(du) + 1):(length(du) + length(ucon2))] = ucon2
end
if length(lcon3) > 0
lcon[(length(dl) + length(lcon2) + 1):(length(dl) + length(lcon2) + length(
lcon3,
))] = lcon3
ucon[(length(du) + length(ucon2) + 1):(length(du) + length(ucon2) + length(
ucon3,
))] = ucon3
end
nvar = nu * N
nnzj = size(J, 1) * size(J, 2)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
ncon = size(J, 1)
c = _init_similar(s0, nvar, T)
c .= H_blocks.c
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_implicit_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
nvar = nu * N
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
G = _init_similar(Q, nc * N, nu, T)
Jac1 = _init_similar(Q, nc, nu, N, T)
Jac2 = _init_similar(Q, num_real_bounds_s, nu, N, T)
Jac3 = _init_similar(Q, 0, nu, N, T)
As0 = _init_similar(s0, ns * (N + 1), T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
c = _init_similar(s0, nvar, T)
x0 = _init_similar(s0, nvar, T)
lcon = _init_similar(s0, nc * N + num_real_bounds_s * N, T)
ucon = _init_similar(s0, nc * N + num_real_bounds_s * N, T)
x1 = _init_similar(Q, nc, 1, N, T)
x2 = _init_similar(Q, num_real_bounds_s, 1, N, T)
x3 = _init_similar(Q, 0, 1, N, T)
y = _init_similar(Q, nu, 1, N, T)
SJ1 = _init_similar(Q, nc, nu, T)
SJ2 = _init_similar(Q, num_real_bounds_s, nu, T)
SJ3 = _init_similar(Q, 0, nu, T)
H_sub_block = _init_similar(Q, nu, nu, T)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
for i = 1:N
Jac1[:, :, i] = @view G[(1 + nc * (i - 1)):(nc * i), :]
end
LinearAlgebra.mul!(As0, block_A, s0)
lvar = repeat(ul, N)
uvar = repeat(uu, N)
# Convert state variable constraints to algebraic constraints
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
Jac2[:, :, i] = @view block_B[(1 + ns * (i - 1)):(ns * i), :]
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] .=
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
Jac2[:, :, i] = @view block_B[(1 + (i - 1) * ns):(i * ns), :][bool_vec_s, :]
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(1 + length(dl)):(length(dl) + num_real_bounds_s * N)] = lcon2
ucon[(1 + length(du)):(length(du) + num_real_bounds_s * N)] = ucon2
end
ncon = (nc + num_real_bounds_s) * N
nnzj = ncon * size(H, 2)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
J = LQJacobianOperator{T, M, AbstractArray{T}}(
Jac1,
Jac2,
Jac3,
N,
nu,
nc,
num_real_bounds_s,
0,
x1,
x2,
x3,
y,
SJ1,
SJ2,
SJ3,
H_sub_block,
)
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = x0,
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_implicit_dense_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: AbstractMatrix{T}}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
dense_blocks = _build_block_matrices(A, B, K, N, w, nc)
block_A = dense_blocks.A
block_B = dense_blocks.B
block_d = dense_blocks.d
block_Aw = dense_blocks.Aw
block_dw = dense_blocks.dw
block_KAw = dense_blocks.KAw
H_blocks = _build_H_blocks(Q, R, block_A, block_B, block_Aw, S, Qf, K, s0, N)
H = H_blocks.H
c0 = H_blocks.c0
dense_blocks.h .= H_blocks.block_h
dense_blocks.h01 .= H_blocks.block_h01
dense_blocks.h02 .= H_blocks.block_h02
dense_blocks.h_constant .= H_blocks.h_constant
dense_blocks.h0_constant = H_blocks.h0_constant
bool_vec_s = (su .!= Inf .|| sl .!= -Inf)
num_real_bounds_s = sum(bool_vec_s)
bool_vec_u = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds_u = sum(bool_vec_u)
G = _init_similar(Q, nc * N, nu, T)
Jac1 = _init_similar(Q, nc, nu, N, T)
Jac2 = _init_similar(Q, num_real_bounds_s, nu, N, T)
Jac3 = _init_similar(Q, num_real_bounds_u, nu, N, T)
As0 = _init_similar(s0, ns * (N + 1), T)
As0_bounds = _init_similar(s0, num_real_bounds_s * N, T)
KAs0_bounds = _init_similar(s0, num_real_bounds_u * N, T)
KBI = _init_similar(Q, nu * N, nu, T)
KAs0 = _init_similar(s0, nu * N, T)
KAs0_block = _init_similar(s0, nu, T)
KB = _init_similar(Q, nu, nu, T)
lcon = _init_similar(s0, (nc + num_real_bounds_s + num_real_bounds_u) * N, T)
ucon = _init_similar(s0, (nc + num_real_bounds_s + num_real_bounds_u) * N, T)
I_mat = _init_similar(Q, nu, nu, T)
x1 = _init_similar(Q, nc, 1, N, T)
x2 = _init_similar(Q, num_real_bounds_s, 1, N, T)
x3 = _init_similar(Q, num_real_bounds_u, 1, N, T)
y = _init_similar(Q, nu, 1, N, T)
SJ1 = _init_similar(Q, nc, nu, T)
SJ2 = _init_similar(Q, num_real_bounds_s, nu, T)
SJ3 = _init_similar(Q, num_real_bounds_u, nu, T)
H_sub_block = _init_similar(Q, nu, nu, T)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
dl = repeat(gl, N)
du = repeat(gu, N)
_set_G_blocks!(G, dl, du, block_B, block_A, block_d, block_Aw, block_dw, s0, E, F, K, N)
for i = 1:N
Jac1[:, :, i] = @view G[(1 + nc * (i - 1)):(nc * i), :]
end
LinearAlgebra.mul!(As0, block_A, s0)
# Convert state variable constraints to algebraic constraints
offset_s = nc * N
if num_real_bounds_s == length(sl)
As0_bounds .= As0[(1 + ns):(ns * (N + 1))] .+ block_Aw[(1 + ns):(ns * (N + 1))]
for i = 1:N
Jac2[:, :, i] = @view block_B[(1 + ns * (i - 1)):(ns * i), :]
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_s):(i * num_real_bounds_s)
As0_bounds[row_range] .=
As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .+
block_Aw[(1 + ns * i):(ns * (i + 1))][bool_vec_s]
Jac2[:, :, i] = @view block_B[(1 + (i - 1) * ns):(i * ns), :][bool_vec_s, :]
end
sl = sl[bool_vec_s]
su = su[bool_vec_s]
end
# Convert bounds on u to algebraic constraints
for i = 1:N
if i == 1
KB = I_mat
else
B_row_range = (1 + (i - 2) * ns):((i - 1) * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(KB, K, B_sub_block)
end
KBI[(1 + nu * (i - 1)):(nu * i), :] = KB
LinearAlgebra.mul!(KAs0_block, K, As0[(1 + ns * (i - 1)):(ns * i)])
KAs0[(1 + nu * (i - 1)):(nu * i)] = KAs0_block
end
offset_u = nc * N + num_real_bounds_s * N
if num_real_bounds_u == length(ul)
KAs0_bounds .= KAs0 .+ block_KAw
for i = 1:N
Jac3[:, :, i] = @view KBI[(1 + (i - 1) * nu):(i * nu), :]
end
else
for i = 1:N
row_range = (1 + (i - 1) * num_real_bounds_u):(i * num_real_bounds_u)
KAs0_bounds[row_range] =
KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .+
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
Jac3[:, :, i] = @view KBI[(1 + (i - 1) * nu):(i * nu), :][bool_vec_u, :]
end
ul = ul[bool_vec_u]
uu = uu[bool_vec_u]
end
lcon2 = repeat(sl, N)
ucon2 = repeat(su, N)
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
LinearAlgebra.axpy!(-1, As0_bounds, lcon2)
LinearAlgebra.axpy!(-1, As0_bounds, ucon2)
LinearAlgebra.axpy!(-1, KAs0_bounds, lcon3)
LinearAlgebra.axpy!(-1, KAs0_bounds, ucon3)
lcon[1:length(dl)] = dl
ucon[1:length(du)] = du
if length(lcon2) > 0
lcon[(length(dl) + 1):(length(dl) + length(lcon2))] = lcon2
ucon[(length(du) + 1):(length(du) + length(ucon2))] = ucon2
end
if length(lcon3) > 0
lcon[(length(dl) + length(lcon2) + 1):(length(dl) + length(lcon2) + length(
lcon3,
))] = lcon3
ucon[(length(du) + length(ucon2) + 1):(length(du) + length(ucon2) + length(
ucon3,
))] = ucon3
end
nvar = nu * N
ncon = (nc + num_real_bounds_s + num_real_bounds_u) * N
nnzj = ncon * size(H, 1)
nh = size(H, 1)
nnzh = div(nh * (nh + 1), 2)
c = _init_similar(s0, nvar, T)
c .= H_blocks.c
J = LQJacobianOperator{T, M, AbstractArray{T}}(
Jac1,
Jac2,
Jac3,
N,
nu,
nc,
num_real_bounds_s,
num_real_bounds_u,
x1,
x2,
x3,
y,
SJ1,
SJ2,
SJ3,
H_sub_block,
)
DenseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
dense_blocks,
)
end
function _build_block_matrices(
A::M,
B::M,
K,
N,
w::V,
nc,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}}
ns = size(A, 2)
nu = size(B, 2)
if K == nothing
K = _init_similar(A, nu, ns, T)
end
# Define block matrices
block_A = _init_similar(A, ns * (N + 1), ns, T)
block_B = _init_similar(B, ns * N, nu, T)
block_Aw = _init_similar(w, ns * (N + 1), T)
block_h = _init_similar(A, nu * N, ns, T)
block_h01 = _init_similar(A, ns, ns, T)
block_h02 = _init_similar(w, ns, T)
h_const = _init_similar(w, nu * N, T)
h0_const = T(0)
block_d = _init_similar(A, nc * N, ns, T)
block_dw = _init_similar(w, nc * N, T)
block_KA = _init_similar(A, nu * N, ns, T)
block_KAw = _init_similar(w, nu * N, T)
A_k = copy(A)
BK = _init_similar(A, ns, ns, T)
KA = _init_similar(A, nu, ns, T)
KAw = _init_similar(w, nu, T)
Aw = _init_similar(A, ns, T)
AB_klast = _init_similar(A, size(B, 1), size(B, 2), T)
AB_k = _init_similar(A, size(B, 1), size(B, 2), T)
block_B[1:ns, :] = B
block_A[LinearAlgebra.diagind(block_A)] .= T(1)
LinearAlgebra.mul!(BK, B, K)
LinearAlgebra.axpy!(1, BK, A_k)
A_klast = copy(A_k)
A_knext = copy(A_k)
block_A[(ns + 1):(ns * 2), :] = A_k
LinearAlgebra.mul!(AB_k, A_k, B, 1, 0)
block_B[(1 + ns):(2 * ns), :] = AB_k
AB_klast = copy(AB_k)
# Fill the A and B matrices
LinearAlgebra.mul!(KA, K, A_k)
block_KA[1:nu, :] .= K
block_KA[(1 + nu):(2 * nu), :] .= KA
for i = 2:(N - 1)
LinearAlgebra.mul!(AB_k, A_k, AB_klast)
LinearAlgebra.mul!(A_knext, A_k, A_klast)
block_A[(ns * i + 1):(ns * (i + 1)), :] = A_knext
block_B[(1 + (i) * ns):((i + 1) * ns), :] = AB_k
LinearAlgebra.mul!(KA, K, A_knext)
block_KA[(1 + nu * i):(nu * (i + 1)), :] .= KA
AB_klast = copy(AB_k)
A_klast = copy(A_knext)
end
LinearAlgebra.mul!(A_knext, A_k, A_klast)
block_A[(ns * N + 1):(ns * (N + 1)), :] = A_knext
for i = 1:N
A_view = @view block_A[(1 + (i - 1) * ns):(i * ns), :]
for j = (i + 1):(N + 1)
LinearAlgebra.mul!(Aw, A_view, w[(1 + (j - i - 1) * ns):((j - i) * ns)])
block_Aw[(1 + (j - 1) * ns):(j * ns)] .+= Aw
end
end
for i = 1:N
Aw_view = @view block_Aw[(1 + (i - 1) * ns):(i * ns)]
LinearAlgebra.mul!(KAw, K, Aw_view)
block_KAw[(1 + (i - 1) * nu):(i * nu)] .= KAw
end
DenseLQDynamicBlocks{T, V, M}(
block_A,
block_B,
block_Aw,
block_h,
block_h01,
block_h02,
h_const,
h0_const,
block_d,
block_dw,
block_KA,
block_KAw,
)
end
function _build_H_blocks(
Q,
R,
block_A::M,
block_B::M,
Aw,
S,
Qf,
K,
s0,
N,
) where {T, M <: AbstractMatrix{T}}
ns = size(Q, 1)
nu = size(R, 1)
if K == nothing
K = _init_similar(Q, nu, ns, T)
end
H = _init_similar(block_A, nu * N, nu * N, T)
# block_h01, block_h02, and block_h are stored in DenseLQDynamicBlocks to provide quick updates when redefining s0
# block_h01 = A^T((Q + KTRK + 2 * SK))A where Q, K, R, S, and A are block matrices
# block_h02 = A^T((Q + KTRK + 2 * SK))block_matrix_A w
# block_h = (QB + SKB + K^T R K B + K^T S^T B)^T A + (S + K^T R)^T A
block_h01 = _init_similar(Q, ns, ns, T)
block_h02 = _init_similar(s0, ns, T)
block_h = _init_similar(block_A, nu * N, ns, T)
h_constant = _init_similar(s0, nu * N, T)
h0_constant = T(0)
# quad term refers to the summation of Q, K^T RK, SK, and K^T S^T that is left and right multiplied by B in the Hessian
quad_term = _init_similar(Q, ns, ns, T)
quad_term_B = _init_similar(block_B, size(block_B, 1), size(block_B, 2), T)
QfB = _init_similar(block_B, size(block_B, 1), size(block_B, 2), T)
quad_term_AB = _init_similar(block_A, ns, nu, T)
QfAB = _init_similar(block_A, ns, nu, T)
RK_STB = _init_similar(block_B, nu, nu, T)
BQB = _init_similar(block_B, nu, nu, T)
BQfB = _init_similar(block_B, nu, nu, T)
SK = _init_similar(Q, ns, ns, T)
RK = _init_similar(Q, nu, ns, T)
KTRK = _init_similar(Q, ns, ns, T)
RK_ST = _init_similar(Q, nu, ns, T)
QB_block_vec = _init_similar(quad_term_B, ns * (N + 1), nu, T)
h = _init_similar(s0, nu * N, T)
BTQA = _init_similar(Q, nu, ns, T)
RK_STA = _init_similar(Q, nu, ns, T)
BTQAw = _init_similar(s0, nu, T)
RK_STAw = _init_similar(s0, nu, T)
QA = _init_similar(Q, ns, ns, T)
KTRKA = _init_similar(Q, ns, ns, T)
SKA = _init_similar(Q, ns, ns, T)
KTSTA = _init_similar(Q, ns, ns, T)
QAw = _init_similar(s0, ns, T)
KTRKAw = _init_similar(s0, ns, T)
SKAw = _init_similar(s0, ns, T)
KTSTAw = _init_similar(s0, ns, T)
AQAs0 = _init_similar(s0, ns, T)
LinearAlgebra.mul!(SK, S, K)
LinearAlgebra.mul!(RK, R, K)
LinearAlgebra.mul!(KTRK, K', RK)
LinearAlgebra.axpy!(1.0, Q, quad_term)
LinearAlgebra.axpy!(1.0, SK, quad_term)
# axpy!(1.0, SK', quad_term) includes scalar operations because of the adjoint
# .+= is more efficient with adjoint
quad_term .+= SK'
LinearAlgebra.axpy!(1.0, KTRK, quad_term)
LinearAlgebra.copyto!(RK_ST, RK)
RK_ST .+= S'
for i = 1:N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(quad_term_AB, quad_term, B_sub_block)
LinearAlgebra.mul!(QfAB, Qf, B_sub_block)
quad_term_B[(1 + (i - 1) * ns):(i * ns), :] = quad_term_AB
QfB[(1 + (i - 1) * ns):(i * ns), :] = QfAB
for j = 1:(N + 1 - i)
right_block = block_B[(1 + (j - 1 + i - 1) * ns):((j + i - 1) * ns), :]
LinearAlgebra.mul!(BQB, quad_term_AB', right_block)
LinearAlgebra.mul!(BQfB, QfAB', right_block)
for k = 1:(N - j - i + 2)
row_range = (1 + nu * (k + (j - 1) - 1)):(nu * (k + (j - 1)))
col_range = (1 + nu * (k - 1)):(nu * k)
if k == N - j - i + 2
view(H, row_range, col_range) .+= BQfB
else
view(H, row_range, col_range) .+= BQB
end
end
end
LinearAlgebra.mul!(RK_STB, RK_ST, B_sub_block)
for m = 1:(N - i)
row_range = (1 + nu * (m - 1 + i)):(nu * (m + i))
col_range = (1 + nu * (m - 1)):(nu * m)
view(H, row_range, col_range) .+= RK_STB
end
view(H, (1 + nu * (i - 1)):(nu * i), (1 + nu * (i - 1)):(nu * i)) .+= R
end
for i = 1:N
# quad_term_B = QB + SKB + KTRKB + KTSTB = (Q + SK + KTRK + KTST) B
fill!(QB_block_vec, T(0))
rows_QB = 1:(ns * (N - i))
rows_QfB = (1 + ns * (N - i)):(ns * (N - i + 1))
QB_block_vec[(1 + ns * i):(ns * N), :] = quad_term_B[rows_QB, :]
QB_block_vec[(1 + ns * N):(ns * (N + 1)), :] = QfB[rows_QfB, :]
LinearAlgebra.mul!(BTQA, QB_block_vec', block_A)
LinearAlgebra.mul!(RK_STA, RK_ST, block_A[(ns * (i - 1) + 1):(ns * i), :])
LinearAlgebra.mul!(BTQAw, QB_block_vec', Aw)
LinearAlgebra.mul!(RK_STAw, RK_ST, Aw[(1 + ns * (i - 1)):(ns * i)])
h_view = @view block_h[(1 + nu * (i - 1)):(nu * i), :]
LinearAlgebra.axpy!(1, BTQA, h_view)
LinearAlgebra.axpy!(1, RK_STA, h_view)
h_constant_view = @view h_constant[(1 + nu * (i - 1)):(nu * i)]
LinearAlgebra.axpy!(1, BTQAw, h_constant_view)
LinearAlgebra.axpy!(1, RK_STAw, h_constant_view)
A_view = @view block_A[(1 + ns * (i - 1)):(ns * i), :]
Aw_view = @view Aw[(1 + ns * (i - 1)):(ns * i)]
LinearAlgebra.mul!(QA, Q, A_view)
LinearAlgebra.mul!(KTRKA, KTRK, A_view)
LinearAlgebra.mul!(SKA, SK, A_view)
LinearAlgebra.mul!(KTSTA, SK', A_view)
LinearAlgebra.mul!(QAw, Q, Aw_view)
LinearAlgebra.mul!(KTRKAw, KTRK, Aw_view)
LinearAlgebra.mul!(SKAw, SK, Aw_view)
LinearAlgebra.mul!(KTSTAw, SK', Aw_view)
LinearAlgebra.mul!(block_h01, A_view', QA, 1, 1)
LinearAlgebra.mul!(block_h01, A_view', KTRKA, 1, 1)
LinearAlgebra.mul!(block_h01, A_view', SKA, 1, 1)
LinearAlgebra.mul!(block_h01, A_view', KTSTA, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', QAw, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', KTRKAw, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', SKAw, 1, 1)
LinearAlgebra.mul!(block_h02, A_view', KTSTAw, 1, 1)
h0_constant += LinearAlgebra.dot(Aw_view, QAw)
h0_constant += LinearAlgebra.dot(Aw_view, KTRKAw)
h0_constant += LinearAlgebra.dot(Aw_view, SKAw)
h0_constant += LinearAlgebra.dot(Aw_view, KTSTAw)
end
A_view = @view block_A[(1 + ns * N):(ns * (N + 1)), :]
Aw_view = @view Aw[(1 + ns * N):(ns * (N + 1))]
LinearAlgebra.mul!(QA, Qf, A_view)
LinearAlgebra.mul!(block_h01, A_view', QA, 1, 1)
LinearAlgebra.mul!(QAw, Qf, Aw_view)
LinearAlgebra.mul!(block_h02, A_view', QAw, 1, 1)
h0_constant += LinearAlgebra.dot(Aw_view, QAw)
#LinearAlgebra.mul!(h0_constant, Aw_view', QAw, 1, 1)
LinearAlgebra.mul!(h, block_h, s0)
LinearAlgebra.mul!(AQAs0, block_h01, s0)
h0 = LinearAlgebra.dot(AQAs0, s0)
h0 += h0_constant
h0 += LinearAlgebra.dot(block_h02, s0) * T(2)
h += h_constant
return (
H = H,
c = h,
c0 = h0 / T(2),
block_h = block_h,
block_h01 = block_h01,
block_h02 = block_h02,
h_constant = h_constant,
h0_constant = h0_constant / T(2),
)
end
function _set_G_blocks!(
G,
dl,
du,
block_B::M,
block_A::M,
block_d::M,
block_Aw,
block_dw,
s0,
E,
F,
K::MK,
N,
) where {T, M <: AbstractMatrix{T}, MK <: Nothing}
ns = size(E, 2)
nu = size(F, 2)
nc = size(E, 1)
G[1:nc, :] = F
EB = _init_similar(block_B, nc, nu, T)
EA = _init_similar(block_B, nc, ns, T)
d = _init_similar(block_dw, nc, T)
for i = 1:N
if i != N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(EB, E, B_sub_block)
G[(1 + nc * i):(nc * (i + 1)), :] = EB
end
A_view = @view block_A[(1 + ns * (i - 1)):(ns * i), :]
Aw_view = @view block_Aw[(1 + ns * (i - 1)):(ns * i)]
LinearAlgebra.mul!(EA, E, A_view)
LinearAlgebra.mul!(d, E, Aw_view)
block_d[(1 + nc * (i - 1)):(nc * i), :] .= EA
block_dw[(1 + nc * (i - 1)):(nc * i)] .= d
end
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
LinearAlgebra.axpy!(-1, block_dw, dl)
LinearAlgebra.axpy!(-1, block_dw, du)
end
function _set_G_blocks!(
G,
dl,
du,
block_B,
block_A,
block_d,
block_Aw,
block_dw,
s0,
E,
F,
K::MK,
N,
) where {T, MK <: AbstractMatrix{T}}
ns = size(E, 2)
nu = size(F, 2)
nc = size(E, 1)
G[1:nc, :] = F
E_FK = _init_similar(E, nc, ns, T)
E_FKA = _init_similar(E, nc, ns, T)
FK = _init_similar(E, nc, ns, T)
EB = _init_similar(E, nc, nu, T)
d = _init_similar(s0, nc, T)
LinearAlgebra.copyto!(E_FK, E)
LinearAlgebra.mul!(FK, F, K)
LinearAlgebra.axpy!(1.0, FK, E_FK)
for i = 1:N
if i != N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
LinearAlgebra.mul!(EB, E_FK, B_sub_block)
G[(1 + nc * i):(nc * (i + 1)), :] = EB
end
A_view = @view block_A[(1 + ns * (i - 1)):(ns * i), :]
Aw_view = @view block_Aw[(1 + ns * (i - 1)):(ns * i)]
LinearAlgebra.mul!(E_FKA, E_FK, A_view)
LinearAlgebra.mul!(d, E_FK, Aw_view)
block_d[(1 + nc * (i - 1)):(nc * i), :] .= E_FKA
block_dw[(1 + nc * (i - 1)):(nc * i)] .= d
end
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
LinearAlgebra.axpy!(-1, block_dw, dl)
LinearAlgebra.axpy!(-1, block_dw, du)
end
function _set_J1_dense!(J1, G, N)
# Only used for explicit Jacobian, not implicit Jacobian
nu = size(G, 2)
nc = Int(size(G, 1) / N)
for i = 1:N
col_range = (1 + nu * (i - 1)):(nu * i)
J1[(1 + nc * (i - 1)):(nc * N), col_range] = G[1:((N - i + 1) * nc), :]
end
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | src/LinearQuadratic/sparse.jl | code | 42029 | @doc raw"""
SparseLQDynamicModel(dnlp::LQDynamicData) -> SparseLQDynamicModel
SparseLQDynamicModel(s0, A, B, Q, R, N; ...) -> SparseLQDynamicModel
A constructor for building a `SparseLQDynamicModel <: QuadraticModels.AbstractQuadraticModel`
Input data is for the problem of the form
```math
\begin{aligned}
\min \frac{1}{2} &\; \sum_{i = 0}^{N - 1}(s_i^T Q s_i + 2 u_i^T S^T x_i + u_i^T R u_i) + \frac{1}{2} s_N^T Q_f s_N \\
\textrm{s.t.} &\; s_{i+1} = A s_i + B u_i + w_i \quad \forall i=0, 1, ..., N-1 \\
&\; u_i = Kx_i + v_i \quad \forall i = 0, 1, ..., N - 1 \\
&\; gl \le E s_i + F u_i \le gu \quad \forall i = 0, 1, ..., N-1\\
&\; sl \le s \le su \\
&\; ul \le u \le uu \\
&\; s_0 = s0
\end{aligned}
```
---
Data is converted to the form
```math
\begin{aligned}
\min &\; \frac{1}{2} z^T H z \\
\textrm{s.t.} &\; \textrm{lcon} \le Jz \le \textrm{ucon}\\
&\; \textrm{lvar} \le z \le \textrm{uvar}
\end{aligned}
```
Resulting `H` and `J` matrices are stored as `QuadraticModels.QPData` within the `SparseLQDynamicModel` struct and
variable and constraint limits are stored within `NLPModels.NLPModelMeta`
If `K` is defined, then `u` variables are replaced by `v` variables, and `u` can be queried by `get_u` and `get_s` within `DynamicNLPModels.jl`
"""
function SparseLQDynamicModel(
dnlp::LQDynamicData{T, V, M},
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
_build_sparse_lq_dynamic_model(dnlp)
end
function SparseLQDynamicModel(
s0::V,
A::M,
B::M,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = _init_similar(Q, size(Q, 1), size(R, 1), T),
E::M = _init_similar(Q, 0, length(s0), T),
F::M = _init_similar(Q, 0, size(R, 1), T),
K::MK = nothing,
w::V = _init_similar(s0, length(s0) * N, T),
sl::V = (similar(s0) .= -Inf),
su::V = (similar(s0) .= Inf),
ul::V = (similar(s0, size(R, 1)) .= -Inf),
uu::V = (similar(s0, size(R, 1)) .= Inf),
gl::V = (similar(s0, size(E, 1)) .= -Inf),
gu::V = (similar(s0, size(F, 1)) .= Inf),
) where {
T,
V <: AbstractVector{T},
M <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix{T}},
}
dnlp = LQDynamicData(
s0,
A,
B,
Q,
R,
N;
Qf = Qf,
S = S,
E = E,
F = F,
K = K,
w = w,
sl = sl,
su = su,
ul = ul,
uu = uu,
gl = gl,
gu = gu,
)
SparseLQDynamicModel(dnlp)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(Int, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
H_nzval = zeros(T, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(Int, N * (ns^2 + ns * nu + ns) + N * (nc * ns + nc * nu))
J_nzval = zeros(T, N * (ns^2 + ns * nu + ns) + N * (nc * ns + nc * nu))
_set_sparse_H!(H_colptr, H_rowval, H_nzval, Q, R, N; Qf = Qf, S = S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(J_colptr, J_rowval, J_nzval, A, B, E, F, K, N)
J = SparseArrays.SparseMatrixCSC(
(nc + ns) * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
SparseArrays.dropzeros!(H)
SparseArrays.dropzeros!(J)
c0 = zero(T)
nvar = ns * (N + 1) + nu * N
c = _init_similar(s0, nvar, T)
lvar = _init_similar(s0, nvar, T)
uvar = _init_similar(s0, nvar, T)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * nc, T)
ucon = _init_similar(s0, ns * N + N * nc, T)
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
for j = 1:N
lvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = ul
uvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = uu
end
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, MK <: AbstractMatrix}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
bool_vec = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds = sum(bool_vec)
# Transform u variables to v variables
new_Q = _init_similar(Q, size(Q, 1), size(Q, 2), T)
new_S = _init_similar(S, size(S, 1), size(S, 2), T)
new_A = _init_similar(A, size(A, 1), size(A, 2), T)
new_E = _init_similar(E, size(E, 1), size(E, 2), T)
KTR = _init_similar(Q, size(K, 2), size(R, 2), T)
SK = _init_similar(Q, size(S, 1), size(K, 2), T)
KTRK = _init_similar(Q, size(K, 2), size(K, 2), T)
BK = _init_similar(Q, size(B, 1), size(K, 2), T)
FK = _init_similar(Q, size(F, 1), size(K, 2), T)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(Int, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
H_nzval = zeros(T, (ns + nu) * N * ns + (ns + nu) * N * nu + ns * ns)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(
Int,
N * (ns^2 + ns * nu + ns) +
N * (nc * ns + nc * nu) +
N * (ns * num_real_bounds + num_real_bounds),
)
J_nzval = zeros(
T,
N * (ns^2 + ns * nu + ns) +
N * (nc * ns + nc * nu) +
N * (ns * num_real_bounds + num_real_bounds),
)
LinearAlgebra.copyto!(new_Q, Q)
LinearAlgebra.copyto!(new_S, S)
LinearAlgebra.copyto!(new_A, A)
LinearAlgebra.copyto!(new_E, E)
LinearAlgebra.mul!(KTR, K', R)
LinearAlgebra.axpy!(1, KTR, new_S)
LinearAlgebra.mul!(SK, S, K)
LinearAlgebra.mul!(KTRK, KTR, K)
LinearAlgebra.axpy!(1, SK, new_Q)
LinearAlgebra.axpy!(1, SK', new_Q)
LinearAlgebra.axpy!(1, KTRK, new_Q)
LinearAlgebra.mul!(BK, B, K)
LinearAlgebra.axpy!(1, BK, new_A)
LinearAlgebra.mul!(FK, F, K)
LinearAlgebra.axpy!(1, FK, new_E)
# Get H and J matrices from new matrices
_set_sparse_H!(H_colptr, H_rowval, H_nzval, new_Q, R, N; Qf = Qf, S = new_S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
new_A,
B,
new_E,
F,
K,
bool_vec,
N,
num_real_bounds,
)
J = SparseArrays.SparseMatrixCSC(
ns * N + nc * N + num_real_bounds * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
SparseArrays.dropzeros!(H)
SparseArrays.dropzeros!(J)
# Remove algebraic constraints if u variable is unbounded on both upper and lower ends
lcon3 = _init_similar(ul, nu * N, T)
ucon3 = _init_similar(ul, nu * N, T)
ul = ul[bool_vec]
uu = uu[bool_vec]
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
nvar = ns * (N + 1) + nu * N
lvar = similar(s0, nvar)
fill!(lvar, -Inf)
uvar = similar(s0, nvar)
fill!(uvar, Inf)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
ucon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
if length(lcon3) > 0
lcon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = lcon3
ucon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = ucon3
end
c0 = zero(T)
c = _init_similar(s0, nvar, T)
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: SparseMatrixCSC{T}, MK <: Nothing}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
SparseArrays.dropzeros!(A)
SparseArrays.dropzeros!(B)
SparseArrays.dropzeros!(Q)
SparseArrays.dropzeros!(R)
SparseArrays.dropzeros!(Qf)
SparseArrays.dropzeros!(E)
SparseArrays.dropzeros!(F)
SparseArrays.dropzeros!(S)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(
Int,
length(Q.rowval) * N +
length(R.rowval) * N +
2 * length(S.rowval) * N +
length(Qf.rowval),
)
H_nzval = zeros(
T,
length(Q.nzval) * N +
length(R.nzval) * N +
2 * length(S.nzval) * N +
length(Qf.nzval),
)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(
Int,
length(A.rowval) * N +
length(B.rowval) * N +
length(E.rowval) * N +
length(F.rowval) * N +
ns * N,
)
J_nzval = zeros(
T,
length(A.nzval) * N +
length(B.nzval) * N +
length(E.nzval) * N +
length(F.nzval) * N +
ns * N,
)
_set_sparse_H!(H_colptr, H_rowval, H_nzval, Q, R, N; Qf = Qf, S = S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(J_colptr, J_rowval, J_nzval, A, B, E, F, K, N)
J = SparseArrays.SparseMatrixCSC(
(nc + ns) * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
c0 = zero(T)
nvar = ns * (N + 1) + nu * N
c = _init_similar(s0, nvar, T)
lvar = _init_similar(s0, nvar, T)
uvar = _init_similar(s0, nvar, T)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * nc, T)
ucon = _init_similar(s0, ns * N + N * nc, T)
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
for j = 1:N
lvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = ul
uvar[((N + 1) * ns + (j - 1) * nu + 1):((N + 1) * ns + j * nu)] = uu
end
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
function _build_sparse_lq_dynamic_model(
dnlp::LQDynamicData{T, V, M, MK},
) where {T, V <: AbstractVector{T}, M <: SparseMatrixCSC{T}, MK <: SparseMatrixCSC{T}}
s0 = dnlp.s0
A = dnlp.A
B = dnlp.B
Q = dnlp.Q
R = dnlp.R
N = dnlp.N
Qf = dnlp.Qf
S = dnlp.S
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.F
K = dnlp.K
w = dnlp.w
sl = dnlp.sl
su = dnlp.su
ul = dnlp.ul
uu = dnlp.uu
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
SparseArrays.dropzeros!(A)
SparseArrays.dropzeros!(B)
SparseArrays.dropzeros!(Q)
SparseArrays.dropzeros!(R)
SparseArrays.dropzeros!(Qf)
SparseArrays.dropzeros!(E)
SparseArrays.dropzeros!(F)
SparseArrays.dropzeros!(S)
SparseArrays.dropzeros!(K)
bool_vec = (ul .!= -Inf .|| uu .!= Inf)
num_real_bounds = sum(bool_vec)
# Transform u variables to v variables
new_Q = _init_similar(Q, size(Q, 1), size(Q, 2), T)
new_S = _init_similar(S, size(S, 1), size(S, 2), T)
new_A = _init_similar(A, size(A, 1), size(A, 2), T)
new_E = _init_similar(E, size(E, 1), size(E, 2), T)
KTR = _init_similar(Q, size(K, 2), size(R, 2), T)
SK = _init_similar(Q, size(S, 1), size(K, 2), T)
KTRK = _init_similar(Q, size(K, 2), size(K, 2), T)
BK = _init_similar(Q, size(B, 1), size(K, 2), T)
FK = _init_similar(Q, size(F, 1), size(K, 2), T)
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(
Int,
length(Q.rowval) * N +
length(R.rowval) * N +
2 * length(S.rowval) * N +
length(Qf.rowval),
)
H_nzval = zeros(
T,
length(Q.nzval) * N +
length(R.nzval) * N +
2 * length(S.nzval) * N +
length(Qf.nzval),
)
LinearAlgebra.copyto!(new_Q, Q)
LinearAlgebra.copyto!(new_S, S)
LinearAlgebra.copyto!(new_A, A)
LinearAlgebra.copyto!(new_E, E)
LinearAlgebra.mul!(KTR, K', R)
LinearAlgebra.axpy!(1, KTR, new_S)
LinearAlgebra.mul!(SK, S, K)
LinearAlgebra.mul!(KTRK, KTR, K)
LinearAlgebra.axpy!(1, SK, new_Q)
LinearAlgebra.axpy!(1, SK', new_Q)
LinearAlgebra.axpy!(1, KTRK, new_Q)
LinearAlgebra.mul!(BK, B, K)
LinearAlgebra.axpy!(1, BK, new_A)
LinearAlgebra.mul!(FK, F, K)
LinearAlgebra.axpy!(1, FK, new_E)
SparseArrays.dropzeros!(new_Q)
SparseArrays.dropzeros!(new_A)
SparseArrays.dropzeros!(new_E)
SparseArrays.dropzeros!(new_S)
K_sparse = K[bool_vec, :]
H_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
H_rowval = zeros(
Int,
length(Q.rowval) * N +
length(R.rowval) * N +
2 * length(new_S.rowval) * N +
length(Qf.rowval),
)
H_nzval = zeros(
T,
length(Q.nzval) * N +
length(R.nzval) * N +
2 * length(new_S.nzval) * N +
length(Qf.nzval),
)
J_colptr = zeros(Int, ns * (N + 1) + nu * N + 1)
J_rowval = zeros(
Int,
length(new_A.rowval) * N +
length(B.rowval) * N +
length(new_E.rowval) * N +
length(F.rowval) * N +
ns * N +
length(K_sparse.rowval) * N +
num_real_bounds * N,
)
J_nzval = zeros(
T,
length(new_A.nzval) * N +
length(B.nzval) * N +
length(new_E.nzval) * N +
length(F.nzval) * N +
ns * N +
length(K_sparse.nzval) * N +
num_real_bounds * N,
)
# Get H and J matrices from new matrices
_set_sparse_H!(H_colptr, H_rowval, H_nzval, new_Q, R, N; Qf = Qf, S = new_S)
H = SparseArrays.SparseMatrixCSC(
(N + 1) * ns + nu * N,
(N + 1) * ns + nu * N,
H_colptr,
H_rowval,
H_nzval,
)
_set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
new_A,
B,
new_E,
F,
K,
bool_vec,
N,
num_real_bounds,
)
J = SparseArrays.SparseMatrixCSC(
ns * N + nc * N + num_real_bounds * N,
(N + 1) * ns + nu * N,
J_colptr,
J_rowval,
J_nzval,
)
# Remove algebraic constraints if u variable is unbounded on both upper and lower ends
lcon3 = _init_similar(ul, nu * N, T)
ucon3 = _init_similar(ul, nu * N, T)
ul = ul[bool_vec]
uu = uu[bool_vec]
lcon3 = repeat(ul, N)
ucon3 = repeat(uu, N)
nvar = ns * (N + 1) + nu * N
lvar = similar(s0, nvar)
fill!(lvar, -Inf)
uvar = similar(s0, nvar)
fill!(uvar, Inf)
lvar[1:ns] = s0
uvar[1:ns] = s0
lcon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
ucon = _init_similar(s0, ns * N + N * length(gl) + length(lcon3))
ncon = size(J, 1)
nnzj = length(J.rowval)
nnzh = length(H.rowval)
lcon[1:(ns * N)] .= -w
ucon[1:(ns * N)] .= -w
for i = 1:N
lvar[(i * ns + 1):((i + 1) * ns)] = sl
uvar[(i * ns + 1):((i + 1) * ns)] = su
lcon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gl
ucon[(ns * N + 1 + (i - 1) * nc):(ns * N + i * nc)] = gu
end
if length(lcon3) > 0
lcon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = lcon3
ucon[(1 + ns * N + N * nc):(ns * N + nc * N + num_real_bounds * N)] = ucon3
end
c0 = zero(T)
c = _init_similar(s0, nvar, T)
SparseLQDynamicModel(
NLPModels.NLPModelMeta(
nvar,
x0 = _init_similar(s0, nvar, T),
lvar = lvar,
uvar = uvar,
ncon = ncon,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
lin = 1:ncon,
islp = (ncon == 0);
),
NLPModels.Counters(),
QuadraticModels.QPData(c0, c, H, J),
dnlp,
)
end
#set the data needed to build a SparseArrays.SparseMatrixCSC matrix. H_colptr, H_rowval, and H_nzval
#are set so that they can be passed to SparseMatrixCSC() to obtain the `H` matrix such that
# z^T H z = sum_{i=1}^{N-1} s_i^T Q s + sum_{i=1}^{N-1} u^T R u + s_N^T Qf s_n .
function _set_sparse_H!(
H_colptr,
H_rowval,
H_nzval,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = zeros(T, size(Q, 1), size(R, 1)),
) where {T, M <: AbstractMatrix{T}}
ns = size(Q, 1)
nu = size(R, 1)
for i = 1:N
for j = 1:ns
H_nzval[(1 + (i - 1) * (ns^2 + nu * ns) + (j - 1) * (ns + nu)):(ns * j + nu * (j - 1) + (i - 1) * (ns^2 + nu * ns))] =
@view Q[:, j]
H_nzval[(1 + (i - 1) * (ns^2 + nu * ns) + j * ns + (j - 1) * nu):((i - 1) * (ns^2 + nu * ns) + j * (ns + nu))] =
@view S[j, :]
H_rowval[(1 + (i - 1) * (ns^2 + nu * ns) + (j - 1) * ns + (j - 1) * nu):(ns * j + nu * (j - 1) + (i - 1) * (ns^2 + nu * ns))] =
(1 + (i - 1) * ns):(ns * i)
H_rowval[(1 + (i - 1) * (ns^2 + nu * ns) + j * ns + (j - 1) * nu):((i - 1) * (ns^2 + nu * ns) + j * (ns + nu))] =
(1 + (N + 1) * ns + nu * (i - 1)):((N + 1) * ns + nu * i)
H_colptr[((i - 1) * ns + j)] =
1 + (ns + nu) * (j - 1) + (i - 1) * (ns * nu + ns * ns)
end
end
for j = 1:ns
H_nzval[(1 + N * (ns^2 + nu * ns) + (j - 1) * ns):(ns * j + N * (ns^2 + nu * ns))] =
@view Qf[:, j]
H_rowval[(1 + N * (ns^2 + nu * ns) + (j - 1) * ns):(ns * j + N * (ns^2 + nu * ns))] =
(1 + N * ns):((N + 1) * ns)
H_colptr[(N * ns + j)] = 1 + ns * (j - 1) + N * (ns * nu + ns * ns)
end
offset = ns^2 * (N + 1) + ns * nu * N
for i = 1:N
for j = 1:nu
H_nzval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * (nu + ns)):(offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns)] =
@view S[:, j]
H_nzval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns):(offset + (i - 1) * (nu^2 + ns * nu) + j * (ns + nu))] =
@view R[:, j]
H_rowval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * (nu + ns)):(offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns)] =
(1 + (i - 1) * ns):(i * ns)
H_rowval[(1 + offset + (i - 1) * (nu^2 + ns * nu) + (j - 1) * nu + j * ns):(offset + (i - 1) * (nu^2 + ns * nu) + j * (ns + nu))] =
(1 + (N + 1) * ns + (i - 1) * nu):((N + 1) * ns + i * nu)
H_colptr[(N + 1) * ns + (i - 1) * nu + j] =
1 + offset + (ns + nu) * (j - 1) + (nu^2 + ns * nu) * (i - 1)
end
end
H_colptr[ns * (N + 1) + nu * N + 1] = length(H_nzval) + 1
end
function _set_sparse_H!(
H_colptr,
H_rowval,
H_nzval,
Q::M,
R::M,
N;
Qf::M = Q,
S::M = spzeros(T, size(Q, 1), size(R, 1)),
) where {T, M <: SparseMatrixCSC{T}}
ST = SparseArrays.sparse(S')
ns = size(Q, 1)
nu = size(R, 1)
H_colptr[1] = 1
for i = 1:N
for j = 1:ns
Q_offset = length(Q.colptr[j]:(Q.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (i - 1) + j]):(H_colptr[ns * (i - 1) + j] + Q_offset - 1)] =
Q.nzval[Q.colptr[j]:(Q.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (i - 1) + j]):(H_colptr[ns * (i - 1) + j] + Q_offset - 1)] =
Q.rowval[Q.colptr[j]:(Q.colptr[j + 1] - 1)] .+ ns * (i - 1)
ST_offset = length(ST.colptr[j]:(ST.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (i - 1) + j] + Q_offset):(H_colptr[ns * (i - 1) + j] + Q_offset + ST_offset - 1)] =
ST.nzval[ST.colptr[j]:(ST.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (i - 1) + j] + Q_offset):(H_colptr[ns * (i - 1) + j] + Q_offset + ST_offset - 1)] =
ST.rowval[ST.colptr[j]:(ST.colptr[j + 1] - 1)] .+
(nu * (i - 1) + ns * (N + 1))
H_colptr[ns * (i - 1) + j + 1] =
H_colptr[ns * (i - 1) + j] + Q_offset + ST_offset
end
end
for j = 1:ns
Qf_offset = length(Qf.colptr[j]:(Qf.colptr[j + 1] - 1))
H_nzval[(H_colptr[N * ns + j]):(H_colptr[N * ns + j] + Qf_offset - 1)] =
Qf.nzval[Qf.colptr[j]:(Qf.colptr[j + 1] - 1)]
H_rowval[(H_colptr[N * ns + j]):(H_colptr[N * ns + j] + Qf_offset - 1)] =
Qf.rowval[Qf.colptr[j]:(Qf.colptr[j + 1] - 1)] .+ (ns * N)
H_colptr[ns * N + j + 1] = H_colptr[ns * N + j] + Qf_offset
end
for i = 1:N
for j = 1:nu
S_offset = length(S.colptr[j]:(S.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j]):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset - 1)] =
S.nzval[S.colptr[j]:(S.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j]):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset - 1)] =
S.rowval[S.colptr[j]:(S.colptr[j + 1] - 1)] .+ ((i - 1) * ns)
R_offset = length(R.colptr[j]:(R.colptr[j + 1] - 1))
H_nzval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset + R_offset - 1)] =
R.nzval[R.colptr[j]:(R.colptr[j + 1] - 1)]
H_rowval[(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset):(H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset + R_offset - 1)] =
R.rowval[R.colptr[j]:(R.colptr[j + 1] - 1)] .+ ((i - 1) * nu + ns * (N + 1))
H_colptr[ns * (N + 1) + nu * (i - 1) + j + 1] =
H_colptr[ns * (N + 1) + nu * (i - 1) + j] + S_offset + R_offset
end
end
end
# set the data needed to build a SparseArrays.SparseMatrixCSC matrix. J_colptr, J_rowval, and J_nzval
# are set so that they can be passed to SparseMatrixCSC() to obtain the Jacobian, `J`. The Jacobian
# contains the data for the following constraints:
# As_i + Bu_i = s_{i + 1}
# gl <= Es_i + Fu_i <= get_u
# If `K` is defined, then this matrix also contains the constraints
# ul <= Kx_i + v_i <= uu
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A,
B,
E,
F,
K::MK,
bool_vec,
N,
nb,
) where {T, MK <: AbstractMatrix{T}}
# nb = num_real_bounds
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
I_mat = _init_similar(A, nu, nu)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
# Set the first block column of A, E, and K
for j = 1:ns
J_nzval[(1 + (j - 1) * (ns + nc + nb)):((j - 1) * (nc + nb) + j * ns)] =
@view A[:, j]
J_nzval[(1 + (j - 1) * (nc + nb) + j * ns):(j * (ns + nc) + (j - 1) * nb)] =
@view E[:, j]
J_nzval[(1 + j * (ns + nc) + (j - 1) * nb):(j * (ns + nc + nb))] =
@view K[:, j][bool_vec]
J_rowval[(1 + (j - 1) * (ns + nc + nb)):((j - 1) * (nc + nb) + j * ns)] = 1:ns
J_rowval[(1 + (j - 1) * (nc + nb) + j * ns):(j * (ns + nc) + (j - 1) * nb)] =
(1 + ns * N):(nc + ns * N)
J_rowval[(1 + j * (ns + nc) + (j - 1) * nb):(j * (ns + nc + nb))] =
(1 + (ns + nc) * N):((ns + nc) * N + nb)
J_colptr[j] = 1 + (j - 1) * (ns + nc + nb)
end
# Set the remaining block columns corresponding to states: -I, A, E, K
for i = 2:N
offset = (i - 1) * ns * (ns + nc + nb) + (i - 2) * ns
for j = 1:ns
J_nzval[1 + offset + (j - 1) * (ns + nc + nb + 1)] = T(-1)
J_nzval[(1 + offset + (j - 1) * (ns + nc + nb) + j):(offset + j * ns + (j - 1) * (nc + nb) + j)] =
@view A[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * (nc + nb) + j):(offset + j * (ns + nc) + (j - 1) * nb + j)] =
@view E[:, j]
J_nzval[(1 + offset + j * (ns + nc) + (j - 1) * nb + j):(offset + j * (ns + nc + nb) + j)] =
@view K[:, j][bool_vec]
J_rowval[1 + offset + (j - 1) * (ns + nc + nb + 1)] = ns * (i - 2) + j
J_rowval[(1 + offset + (j - 1) * (ns + nc + nb) + j):(offset + j * ns + (j - 1) * (nc + nb) + j)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * (nc + nb) + j):(offset + j * (ns + nc) + (j - 1) * nb + j)] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_rowval[(1 + offset + j * (ns + nc) + (j - 1) * nb + j):(offset + j * (ns + nc + nb) + j)] =
(1 + N * (ns + nc) + (i - 1) * nb):(N * (ns + nc) + i * nb)
J_colptr[(i - 1) * ns + j] = 1 + (j - 1) * (ns + nc + nb + 1) + offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[j + ns * (ns + nc + nb + 1) * N - ns] = T(-1)
J_rowval[j + ns * (ns + nc + nb + 1) * N - ns] = j + (N - 1) * ns
J_colptr[ns * N + j] = 1 + ns * (ns + nc + nb + 1) * N - ns + (j - 1)
end
# Set the remaining block columns corresponding to inputs: B, F, I
nscol_offset = N * (ns^2 + nc * ns + nb * ns + ns)
for i = 1:N
offset = (i - 1) * (nu * ns + nu * nc + nb) + nscol_offset
bool_offset = 0
for j = 1:nu
J_nzval[(1 + offset + (j - 1) * (ns + nc) + bool_offset):(offset + j * ns + (j - 1) * nc + bool_offset)] =
@view B[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * nc + bool_offset):(offset + j * (ns + nc) + bool_offset)] =
@view F[:, j]
if bool_vec[j]
J_nzval[1 + offset + j * (ns + nc) + bool_offset] = T(1)
J_rowval[1 + offset + j * (ns + nc) + bool_offset] =
(N * (ns + nc) + (i - 1) * nb + 1 + (bool_offset))
end
J_rowval[(1 + offset + (j - 1) * (ns + nc) + bool_offset):(offset + j * ns + (j - 1) * nc + bool_offset)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * nc + bool_offset):(offset + j * (ns + nc) + bool_offset)] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_colptr[(ns * (N + 1) + (i - 1) * nu + j)] =
1 + offset + (j - 1) * (ns + nc) + bool_offset
bool_offset += bool_vec[j]
end
end
J_colptr[ns * (N + 1) + nu * N + 1] = length(J_nzval) + 1
end
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A::M,
B::M,
E,
F,
K::MK,
N,
) where {T, M <: AbstractMatrix{T}, MK <: Nothing}
# nb = num_real_bounds
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
# Set the first block column of A, E, and K
for j = 1:ns
J_nzval[(1 + (j - 1) * (ns + nc)):((j - 1) * nc + j * ns)] = @view A[:, j]
J_nzval[(1 + (j - 1) * nc + j * ns):(j * (ns + nc))] = @view E[:, j]
J_rowval[(1 + (j - 1) * (ns + nc)):((j - 1) * nc + j * ns)] = 1:ns
J_rowval[(1 + (j - 1) * nc + j * ns):(j * (ns + nc))] = (1 + ns * N):(nc + ns * N)
J_colptr[j] = 1 + (j - 1) * (ns + nc)
end
# Set the remaining block columns corresponding to states: -I, A, E, K
for i = 2:N
offset = (i - 1) * ns * (ns + nc) + (i - 2) * ns
for j = 1:ns
J_nzval[1 + offset + (j - 1) * (ns + nc + 1)] = T(-1)
J_nzval[(1 + offset + (j - 1) * (ns + nc) + j):(offset + j * ns + (j - 1) * nc + j)] =
@view A[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * nc + j):(offset + j * (ns + nc) + j)] =
@view E[:, j]
J_rowval[1 + offset + (j - 1) * (ns + nc + 1)] = ns * (i - 2) + j
J_rowval[(1 + offset + (j - 1) * (ns + nc) + j):(offset + j * ns + (j - 1) * nc + j)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * nc + j):(offset + j * (ns + nc) + j)] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_colptr[(i - 1) * ns + j] = 1 + (j - 1) * (ns + nc + 1) + offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[j + ns * (ns + nc + 1) * N - ns] = T(-1)
J_rowval[j + ns * (ns + nc + 1) * N - ns] = j + (N - 1) * ns
J_colptr[ns * N + j] = 1 + ns * (ns + nc + 1) * N - ns + (j - 1)
end
# Set the remaining block columns corresponding to inputs: B, F, I
nscol_offset = N * (ns^2 + nc * ns + ns)
for i = 1:N
offset = (i - 1) * (nu * ns + nu * nc) + nscol_offset
for j = 1:nu
J_nzval[(1 + offset + (j - 1) * (ns + nc)):(offset + j * ns + (j - 1) * nc)] =
@view B[:, j]
J_nzval[(1 + offset + j * ns + (j - 1) * nc):(offset + j * (ns + nc))] =
@view F[:, j]
J_rowval[(1 + offset + (j - 1) * (ns + nc)):(offset + j * ns + (j - 1) * nc)] =
(1 + (i - 1) * ns):(i * ns)
J_rowval[(1 + offset + j * ns + (j - 1) * nc):(offset + j * (ns + nc))] =
(1 + N * ns + (i - 1) * nc):(N * ns + i * nc)
J_colptr[(ns * (N + 1) + (i - 1) * nu + j)] = 1 + offset + (j - 1) * (ns + nc)
end
end
J_colptr[ns * (N + 1) + nu * N + 1] = length(J_nzval) + 1
end
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A::M,
B::M,
E::M,
F::M,
K::MK,
bool_vec,
N,
nb,
) where {T, M <: SparseMatrixCSC{T}, MK <: SparseMatrixCSC{T}}
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
I_mat = _init_similar(K, nu, nu)
I_mat[LinearAlgebra.diagind(I_mat)] .= T(1)
KI = I_mat[bool_vec, :]
K_sparse = K[bool_vec, :]
J_colptr[1] = 1
# Set the first block column of A, E, and K
for j = 1:ns
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)]
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
J_nzval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N)
K_offset = length(K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j] + A_offset + E_offset):(J_colptr[j] + A_offset + E_offset + K_offset - 1)] =
K_sparse.nzval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j] + A_offset + E_offset):(J_colptr[j] + A_offset + E_offset + K_offset - 1)] =
K_sparse.rowval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)] .+
((ns + nc) * N)
)
J_colptr[j + 1] = J_colptr[j] + A_offset + E_offset + K_offset
end
# Set the remaining block columns corresponding to states: -I, A, E, K
for i = 2:N
for j = 1:ns
J_nzval[J_colptr[j + (i - 1) * ns]] = T(-1)
J_rowval[J_colptr[j + (i - 1) * ns]] = ns * (i - 2) + j
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)] .+ (ns * (i - 1))
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
K_offset = length(K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset + K_offset - 1)] =
K_sparse.nzval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset + K_offset - 1)] =
K_sparse.rowval[K_sparse.colptr[j]:(K_sparse.colptr[j + 1] - 1)] .+
((ns + nc) * N + nb * (i - 1))
)
J_colptr[ns * (i - 1) + j + 1] =
J_colptr[ns * (i - 1) + j] + 1 + A_offset + E_offset + K_offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[J_colptr[ns * N + j]] = T(-1)
J_rowval[J_colptr[ns * N + j]] = ns * (N - 1) + j
J_colptr[ns * N + j + 1] = J_colptr[ns * N + j] + 1
end
# Set the remaining block columns corresponding to inputs: B, F, I
for i = 1:N
offset = ns * (N + 1) + nu * (i - 1)
for j = 1:nu
B_offset = length(B.colptr[j]:(B.colptr[j + 1] - 1))
J_nzval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.nzval[B.colptr[j]:(B.colptr[j + 1] - 1)]
J_rowval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.rowval[B.colptr[j]:(B.colptr[j + 1] - 1)] .+ (ns * (i - 1))
F_offset = length(F.colptr[j]:(F.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.nzval[F.colptr[j]:(F.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.rowval[F.colptr[j]:(F.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
KI_offset = length(KI.colptr[j]:(KI.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[offset + j] + B_offset + F_offset):(J_colptr[offset + j] + B_offset + F_offset + KI_offset - 1)] =
KI.nzval[KI.colptr[j]:(KI.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[offset + j] + B_offset + F_offset):(J_colptr[offset + j] + B_offset + F_offset + KI_offset - 1)] =
KI.rowval[KI.colptr[j]:(KI.colptr[j + 1] - 1)] .+
((ns + nc) * N + nb * (i - 1))
)
J_colptr[offset + j + 1] =
J_colptr[offset + j] + B_offset + F_offset + KI_offset
end
end
end
function _set_sparse_J!(
J_colptr,
J_rowval,
J_nzval,
A::M,
B::M,
E::M,
F::M,
K::MK,
N,
) where {T, M <: SparseMatrixCSC{T}, MK <: Nothing}
ns = size(A, 2)
nu = size(B, 2)
nc = size(E, 1)
J_colptr[1] = 1
# Set the first block column of A, E, and K
for j = 1:ns
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j]):(J_colptr[j] + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)]
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
J_nzval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j] + A_offset):(J_colptr[j] + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N)
J_colptr[j + 1] = J_colptr[j] + A_offset + E_offset
end
# Set the remaining block columns corresponding to states: -I, A, E
for i = 2:N
for j = 1:ns
J_nzval[J_colptr[j + (i - 1) * ns]] = T(-1)
J_rowval[J_colptr[j + (i - 1) * ns]] = ns * (i - 2) + j
A_offset = length(A.colptr[j]:(A.colptr[j + 1] - 1))
J_nzval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.nzval[A.colptr[j]:(A.colptr[j + 1] - 1)]
J_rowval[(J_colptr[j + (i - 1) * ns] + 1):(J_colptr[j + (i - 1) * ns] + 1 + A_offset - 1)] =
A.rowval[A.colptr[j]:(A.colptr[j + 1] - 1)] .+ (ns * (i - 1))
E_offset = length(E.colptr[j]:(E.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.nzval[E.colptr[j]:(E.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[j + (i - 1) * ns] + 1 + A_offset):(J_colptr[j + (i - 1) * ns] + 1 + A_offset + E_offset - 1)] =
E.rowval[E.colptr[j]:(E.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
J_colptr[ns * (i - 1) + j + 1] =
J_colptr[ns * (i - 1) + j] + 1 + A_offset + E_offset
end
end
# Set the column corresponding to states at N + 1, which are a single block of -I
for j = 1:ns
J_nzval[J_colptr[ns * N + j]] = T(-1)
J_rowval[J_colptr[ns * N + j]] = ns * (N - 1) + j
J_colptr[ns * N + j + 1] = J_colptr[ns * N + j] + 1
end
# Set the remaining block columns corresponding to inputs: B, F
for i = 1:N
offset = ns * (N + 1) + nu * (i - 1)
for j = 1:nu
B_offset = length(B.colptr[j]:(B.colptr[j + 1] - 1))
J_nzval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.nzval[B.colptr[j]:(B.colptr[j + 1] - 1)]
J_rowval[(J_colptr[offset + j]):(J_colptr[offset + j] + B_offset - 1)] =
B.rowval[B.colptr[j]:(B.colptr[j + 1] - 1)] .+ (ns * (i - 1))
F_offset = length(F.colptr[j]:(F.colptr[j + 1] - 1))
(
J_nzval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.nzval[F.colptr[j]:(F.colptr[j + 1] - 1)]
)
(
J_rowval[(J_colptr[offset + j] + B_offset):(J_colptr[offset + j] + B_offset + F_offset - 1)] =
F.rowval[F.colptr[j]:(F.colptr[j + 1] - 1)] .+ (ns * N + nc * (i - 1))
)
J_colptr[offset + j + 1] = J_colptr[offset + j] + B_offset + F_offset
end
end
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | src/LinearQuadratic/tools.jl | code | 30656 | """
get_u(solution_ref, lqdm::SparseLQDynamicModel) -> u <: vector
get_u(solution_ref, lqdm::DenseLQDynamicModel) -> u <: vector
Query the solution `u` from the solver. If `K = nothing`, the solution for `u` is queried from `solution_ref.solution`
If `K <: AbstractMatrix`, `solution_ref.solution` returns `v`, and `get_u` solves for `u` using the `K` matrix (and the `A` and `B` matrices if `lqdm <: DenseLQDynamicModel`)
"""
function get_u(
solver_status,
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
MK <: AbstractMatrix{T},
}
solution = solver_status.solution
ns = lqdm.dynamic_data.ns
nu = lqdm.dynamic_data.nu
N = lqdm.dynamic_data.N
K = lqdm.dynamic_data.K
u = zeros(T, nu * N)
for i = 1:N
start_v = (i - 1) * nu + 1
end_v = i * nu
start_s = (i - 1) * ns + 1
end_s = i * ns
Ks = zeros(T, size(K, 1), 1)
s = solution[start_s:end_s]
v = solution[(ns * (N + 1) + start_v):(ns * (N + 1) + end_v)]
LinearAlgebra.mul!(Ks, K, s)
LinearAlgebra.axpy!(1, v, Ks)
u[start_v:end_v] = Ks
end
return u
end
function get_u(
solver_status,
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
M4 <: AbstractMatrix{T},
MK <: AbstractMatrix{T},
}
dnlp = lqdm.dynamic_data
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
K = dnlp.K
block_A = lqdm.blocks.A
block_B = lqdm.blocks.B
block_Aw = lqdm.blocks.Aw
v = solver_status.solution
As0 = zeros(T, ns * (N + 1))
Bv = zeros(T, ns)
s = zeros(T, ns * (N + 1))
for i = 1:N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
for j = 1:(N - i + 1)
v_sub_vec = v[(1 + nu * (j - 1)):(nu * j)]
LinearAlgebra.mul!(Bv, B_sub_block, v_sub_vec)
s[(1 + ns * (i + j - 1)):(ns * (i + j))] .+= Bv
end
end
LinearAlgebra.mul!(As0, block_A, dnlp.s0)
LinearAlgebra.axpy!(1, As0, s)
LinearAlgebra.axpy!(1, block_Aw, s)
Ks = _init_similar(dnlp.s0, size(K, 1), T)
u = copy(v)
for i = 1:N
LinearAlgebra.mul!(Ks, K, s[(1 + ns * (i - 1)):(ns * i)])
u[(1 + nu * (i - 1)):(nu * i)] .+= Ks
end
return u
end
function get_u(
solver_status,
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
MK <: Nothing,
}
solution = solver_status.solution
ns = lqdm.dynamic_data.ns
nu = lqdm.dynamic_data.nu
N = lqdm.dynamic_data.N
u = solution[(ns * (N + 1) + 1):end]
return u
end
function get_u(
solver_status,
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
M4 <: AbstractMatrix{T},
MK <: Nothing,
}
return copy(solver_status.solution)
end
"""
get_s(solution_ref, lqdm::SparseLQDynamicModel) -> s <: vector
get_s(solution_ref, lqdm::DenseLQDynamicModel) -> s <: vector
Query the solution `s` from the solver. If `lqdm <: SparseLQDynamicModel`, the solution is queried directly from `solution_ref.solution`
If `lqdm <: DenseLQDynamicModel`, then `solution_ref.solution` returns `u` (if `K = nothing`) or `v` (if `K <: AbstactMatrix`), and `s` is found form
transforming `u` or `v` into `s` using `A`, `B`, and `K` matrices.
"""
function get_s(
solver_status,
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix},
}
solution = solver_status.solution
ns = lqdm.dynamic_data.ns
N = lqdm.dynamic_data.N
s = solution[1:(ns * (N + 1))]
return s
end
function get_s(
solver_status,
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {
T,
V <: AbstractVector{T},
M1 <: AbstractMatrix{T},
M2 <: AbstractMatrix{T},
M3 <: AbstractMatrix{T},
M4 <: AbstractMatrix{T},
MK <: Union{Nothing, AbstractMatrix},
}
dnlp = lqdm.dynamic_data
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
block_A = lqdm.blocks.A
block_B = lqdm.blocks.B
block_Aw = lqdm.blocks.Aw
v = solver_status.solution
As0 = zeros(T, ns * (N + 1))
Bv = zeros(T, ns)
s = zeros(T, ns * (N + 1))
for i = 1:N
B_row_range = (1 + (i - 1) * ns):(i * ns)
B_sub_block = view(block_B, B_row_range, :)
for j = 1:(N - i + 1)
v_sub_vec = v[(1 + nu * (j - 1)):(nu * j)]
LinearAlgebra.mul!(Bv, B_sub_block, v_sub_vec)
s[(1 + ns * (i + j - 1)):(ns * (i + j))] .+= Bv
end
end
LinearAlgebra.mul!(As0, block_A, dnlp.s0)
LinearAlgebra.axpy!(1, As0, s)
LinearAlgebra.axpy!(1, block_Aw, s)
return s
end
for field in fieldnames(LQDynamicData)
method = Symbol("get_", field)
@eval begin
@doc """
$($method)(LQDynamicData)
$($method)(SparseLQDynamicModel)
$($method)(DenseLQDynamicModel)
Return the value of $($(QuoteNode(field))) from `LQDynamicData` or `SparseLQDynamicModel.dynamic_data` or `DenseLQDynamicModel.dynamic_data`
"""
$method(dyn_data::LQDynamicData) = getproperty(dyn_data, $(QuoteNode(field)))
end
@eval $method(dyn_model::SparseLQDynamicModel) = $method(dyn_model.dynamic_data)
@eval $method(dyn_model::DenseLQDynamicModel) = $method(dyn_model.dynamic_data)
@eval export $method
end
for field in [:A, :B, :Q, :R, :Qf, :E, :F, :S, :K]
method = Symbol("set_", field, "!")
@eval begin
@doc """
$($method)(LQDynamicData, row, col, val)
$($method)(SparseLQDynamicModel, row, col, val)
$($method)(DenseLQDynamicModel, row, col, val)
Set the value of entry $($(QuoteNode(field)))[row, col] to val for `LQDynamicData`, `SparseLQDynamicModel.dynamic_data`, or `DenseLQDynamicModel.dynamic_data`
"""
$method(dyn_data::LQDynamicData, row, col, val) = (dyn_data.$field[row, col] = val)
end
@eval $method(dyn_model::SparseLQDynamicModel, row, col, val) =
(dyn_model.dynamic_data.$field[row, col] = val)
@eval $method(dyn_model::DenseLQDynamicModel, row, col, val) =
(dyn_model.dynamic_data.$field[row, col] = val)
@eval export $method
end
for field in [:s0, :sl, :su, :ul, :uu, :gl, :gu]
method = Symbol("set_", field, "!")
@eval begin
@doc """
$($method)(LQDynamicData, index, val)
$($method)(SparseLQDynamicModel, index, val)
$($method)(DenseLQDynamicModel, index, val)
Set the value of entry $($(QuoteNode(field)))[index] to val for `LQDynamicData`, `SparseLQDynamicModel.dynamic_data`, or `DenseLQDynamicModel.dynamic_data`
"""
$method(dyn_data::LQDynamicData, index, val) = (dyn_data.$field[index] = val)
end
@eval $method(dyn_model::SparseLQDynamicModel, index, val) =
(dyn_model.dynamic_data.$field[index] = val)
@eval $method(dyn_model::DenseLQDynamicModel, index, val) =
(dyn_model.dynamic_data.$field[index] = val)
@eval export $method
end
function fill_structure!(S::SparseMatrixCSC, rows, cols)
count = 1
@inbounds for col = 1:size(S, 2), k = S.colptr[col]:(S.colptr[col + 1] - 1)
rows[count] = S.rowval[k]
cols[count] = col
count += 1
end
end
function fill_coord!(S::SparseMatrixCSC, vals, obj_weight)
count = 1
@inbounds for col = 1:size(S, 2), k = S.colptr[col]:(S.colptr[col + 1] - 1)
vals[count] = obj_weight * S.nzval[k]
count += 1
end
end
function NLPModels.hess_structure!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
fill_structure!(qp.data.H, rows, cols)
return rows, cols
end
function NLPModels.hess_structure!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
count = 1
for j = 1:(qp.meta.nvar)
for i = j:(qp.meta.nvar)
rows[count] = i
cols[count] = j
count += 1
end
end
return rows, cols
end
function NLPModels.hess_coord!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector{T},
vals::AbstractVector{T};
obj_weight::Real = one(eltype(x)),
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
NLPModels.increment!(qp, :neval_hess)
fill_coord!(qp.data.H, vals, obj_weight)
return vals
end
function NLPModels.hess_coord!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector{T},
vals::AbstractVector{T};
obj_weight::Real = one(eltype(x)),
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
NLPModels.increment!(qp, :neval_hess)
count = 1
for j = 1:(qp.meta.nvar)
for i = j:(qp.meta.nvar)
vals[count] = obj_weight * qp.data.H[i, j]
count += 1
end
end
return vals
end
NLPModels.hess_coord!(
qp::SparseLQDynamicModel,
x::AbstractVector,
y::AbstractVector,
vals::AbstractVector;
obj_weight::Real = one(eltype(x)),
) = NLPModels.hess_coord!(qp, x, vals, obj_weight = obj_weight)
NLPModels.hess_coord!(
qp::DenseLQDynamicModel,
x::AbstractVector,
y::AbstractVector,
vals::AbstractVector;
obj_weight::Real = one(eltype(x)),
) = NLPModels.hess_coord!(qp, x, vals, obj_weight = obj_weight)
function NLPModels.jac_structure!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
fill_structure!(qp.data.A, rows, cols)
return rows, cols
end
function NLPModels.jac_structure!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
rows::AbstractVector{<:Integer},
cols::AbstractVector{<:Integer},
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
count = 1
for j = 1:(qp.meta.nvar)
for i = 1:(qp.meta.ncon)
rows[count] = i
cols[count] = j
count += 1
end
end
return rows, cols
end
function NLPModels.jac_coord!(
qp::SparseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector,
vals::AbstractVector,
) where {T, V, M1 <: SparseMatrixCSC, M2 <: SparseMatrixCSC, M3 <: AbstractMatrix}
NLPModels.increment!(qp, :neval_jac)
fill_coord!(qp.data.A, vals, one(T))
return vals
end
function NLPModels.jac_coord!(
qp::DenseLQDynamicModel{T, V, M1, M2, M3},
x::AbstractVector,
vals::AbstractVector,
) where {T, V, M1 <: Matrix, M2 <: Matrix, M3 <: Matrix}
NLPModels.increment!(qp, :neval_jac)
count = 1
for j = 1:(qp.meta.nvar)
for i = 1:(qp.meta.ncon)
vals[count] = qp.data.A[i, j]
count += 1
end
end
return vals
end
function _dnlp_unsafe_wrap(
tensor::A,
dims::Tuple,
shift = 1,
) where {T, A <: AbstractArray{T}}
return unsafe_wrap(Matrix{T}, pointer(tensor, shift), dims)
end
function _dnlp_unsafe_wrap(
tensor::A,
dims::Tuple,
shift = 1,
) where {T, A <: CUDA.CuArray{T, 3, CUDA.Mem.DeviceBuffer}}
return unsafe_wrap(
CUDA.CuArray{T, 2, CUDA.Mem.DeviceBuffer},
pointer(tensor, shift),
dims,
)
end
function LinearAlgebra.mul!(
y::V,
Jac::LQJacobianOperator{T, M, A},
x::V,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
fill!(y, zero(T))
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
for i = 1:N
sub_B1 = _dnlp_unsafe_wrap(J1, (nc, nu), (1 + (i - 1) * (nc * nu)))
sub_B2 = _dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (i - 1) * (nsc * nu)))
sub_B3 = _dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (i - 1) * (nuc * nu)))
for j = 1:(N - i + 1)
sub_x = view(x, (1 + (j - 1) * nu):(j * nu))
LinearAlgebra.mul!(
view(y, (1 + nc * (j + i - 2)):(nc * (j + i - 1))),
sub_B1,
sub_x,
1,
1,
)
LinearAlgebra.mul!(
view(y, (1 + nc * N + nsc * (j + i - 2)):(nc * N + nsc * (j + i - 1))),
sub_B2,
sub_x,
1,
1,
)
LinearAlgebra.mul!(
view(
y,
(1 + nc * N + nsc * N + nuc * (j + i - 2)):(nc * N + nsc * N + nuc * (j + i - 1)),
),
sub_B3,
sub_x,
1,
1,
)
end
end
end
function LinearAlgebra.mul!(
x::V,
Jac::LQJacobianOperator{T, M, A},
y::V,
) where {
T,
V <: CUDA.CuArray{T, 1, CUDA.Mem.DeviceBuffer},
M <: AbstractMatrix{T},
A <: AbstractArray{T},
}
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
x1 = Jac.x1
x2 = Jac.x2
x3 = Jac.x3
y1 = Jac.y
fill!(x1, zero(T))
fill!(x2, zero(T))
fill!(x3, zero(T))
for i = 1:N
y1 .= y[(1 + (i - 1) * nu):(i * nu)]
x1_view = view(x1, :, :, i:N)
x2_view = view(x2, :, :, i:N)
x3_view = view(x3, :, :, i:N)
J1_view = view(J1, :, :, 1:(N - i + 1))
J2_view = view(J2, :, :, 1:(N - i + 1))
J3_view = view(J3, :, :, 1:(N - i + 1))
y1_view = view(y1, :, :, i:N)
CUBLAS.gemm_strided_batched!('N', 'N', 1, J1_view, y1_view, 1, x1_view)
CUBLAS.gemm_strided_batched!('N', 'N', 1, J2_view, y1_view, 1, x2_view)
CUBLAS.gemm_strided_batched!('N', 'N', 1, J3_view, y1_view, 1, x3_view)
end
x[1:(nc * N)] .= reshape(x1, nc * N)
x[(1 + nc * N):((nc + nsc) * N)] .= reshape(x2, nsc * N)
x[(1 + (nc + nsc) * N):((nc + nsc + nuc) * N)] .= reshape(x3, nuc * N)
end
function LinearAlgebra.mul!(
y::V,
Jac::LinearOperators.AdjointLinearOperator{T, LQJacobianOperator{T, M, A}},
x::V,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
fill!(y, zero(T))
jac_op = get_jacobian(Jac)
J1 = jac_op.truncated_jac1
J2 = jac_op.truncated_jac2
J3 = jac_op.truncated_jac3
N = jac_op.N
nu = jac_op.nu
nc = jac_op.nc
nsc = jac_op.nsc
nuc = jac_op.nuc
for i = 1:N
sub_B1 = _dnlp_unsafe_wrap(J1, (nc, nu), (1 + (i - 1) * (nc * nu)))
sub_B2 = _dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (i - 1) * (nsc * nu)))
sub_B3 = _dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (i - 1) * (nuc * nu)))
for j = 1:(N - i + 1)
x1 = view(x, (1 + (j + i - 2) * nc):((j + i - 1) * nc))
x2 = view(x, (1 + nc * N + (j + i - 2) * nsc):(nc * N + (j + i - 1) * nsc))
x3 = view(
x,
(1 + nc * N + nsc * N + (j + i - 2) * nuc):(nc * N + nsc * N + (j + i - 1) * nuc),
)
LinearAlgebra.mul!(view(y, (1 + nu * (j - 1)):(nu * j)), sub_B1', x1, 1, 1)
LinearAlgebra.mul!(view(y, (1 + nu * (j - 1)):(nu * j)), sub_B2', x2, 1, 1)
LinearAlgebra.mul!(view(y, (1 + nu * (j - 1)):(nu * j)), sub_B3', x3, 1, 1)
end
end
end
function LinearAlgebra.mul!(
y::V,
Jac::LinearOperators.AdjointLinearOperator{T, LQJacobianOperator{T, M, A}},
x::V,
) where {
T,
V <: CUDA.CuArray{T, 1, CUDA.Mem.DeviceBuffer},
M <: AbstractMatrix{T},
A <: AbstractArray{T},
}
fill!(y, zero(T))
jac_op = get_jacobian(Jac)
J1 = jac_op.truncated_jac1
J2 = jac_op.truncated_jac2
J3 = jac_op.truncated_jac3
N = jac_op.N
nu = jac_op.nu
nc = jac_op.nc
nsc = jac_op.nsc
nuc = jac_op.nuc
x1 = jac_op.x1
x2 = jac_op.x2
x3 = jac_op.x3
y1 = jac_op.y
x1 .= reshape(x[1:(nc * N)], (nc, 1, N))
x2 .= reshape(x[(1 + nc * N):((nc + nsc) * N)], (nsc, 1, N))
x3 .= reshape(x[(1 + (nc + nsc) * N):((nc + nsc + nuc) * N)], (nuc, 1, N))
for i = 1:N
fill!(y1, zero(T))
y1_view = view(y1, :, :, 1:(N - i + 1))
x1_view = view(x1, :, :, i:N)
x2_view = view(x2, :, :, i:N)
x3_view = view(x3, :, :, i:N)
J1_view = view(J1, :, :, 1:(N - i + 1))
J2_view = view(J2, :, :, 1:(N - i + 1))
J3_view = view(J3, :, :, 1:(N - i + 1))
CUBLAS.gemm_strided_batched!('T', 'N', 1, J1_view, x1_view, 1, y1_view)
CUBLAS.gemm_strided_batched!('T', 'N', 1, J2_view, x2_view, 1, y1_view)
CUBLAS.gemm_strided_batched!('T', 'N', 1, J3_view, x3_view, 1, y1_view)
view(y, (1 + (i - 1) * nu):(i * nu)) .= sum(y1_view, dims = (2, 3))
end
end
"""
get_jacobian(lqdm::DenseLQDynamicModel) -> LQJacobianOperator
get_jacobian(Jac::AdjointLinearOpeartor{T, LQJacobianOperator}) -> LQJacobianOperator
Gets the `LQJacobianOperator` from `DenseLQDynamicModel` (if the `QPdata` contains a `LQJacobian Operator`)
or returns the `LQJacobian Operator` from the adjoint of the `LQJacobianOperator`
"""
function get_jacobian(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
) where {T, V, M1, M2, M3, M4, MK}
return lqdm.data.A
end
function get_jacobian(
Jac::LinearOperators.AdjointLinearOperator{T, LQJacobianOperator{T, M, A}},
) where {T, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
return Jac'
end
function Base.length(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
return length(Jac.truncated_jac1) +
length(Jac.truncated_jac2) +
length(Jac.truncated_jac3)
end
function Base.size(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
return (
size(Jac.truncated_jac1, 1) +
size(Jac.truncated_jac2, 1) +
size(Jac.truncated_jac3, 1),
size(Jac.truncated_jac1, 2),
)
end
function Base.eltype(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
return T
end
function Base.isreal(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
return isreal(Jac.truncated_jac1) &&
isreal(Jac.truncated_jac2) &&
isreal(Jac.truncated_jac3)
end
function Base.show(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
show(Jac.truncated_jac1)
end
function Base.display(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
display(Jac.truncated_jac1)
end
"""
LinearOperators.reset!(Jac::LQJacobianOperator{T, V, M})
Resets the values of attributes `SJ1`, `SJ2`, and `SJ3` to zero
"""
function LinearOperators.reset!(
Jac::LQJacobianOperator{T, M, A},
) where {T, M <: AbstractMatrix{T}, A <: AbstractMatrix{T}}
fill!(Jac.SJ1, T(0))
fill!(Jac.SJ2, T(0))
fill!(Jac.SJ3, T(0))
end
function NLPModels.jac_op(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
x::V,
) where {T, V <: AbstractVector{T}, M1, M2 <: LQJacobianOperator, M3, M4, MK}
return lqdm.data.A
end
"""
add_jtsj!(H::M, Jac::LQJacobianOperator{T, V, M}, Ξ£::V, alpha::Number = 1, beta::Number = 1)
Generates `Jac' Ξ£ Jac` and adds it to the matrix `H`.
`alpha` and `beta` are scalar multipliers such `beta H + alpha Jac' Ξ£ Jac` is stored in `H`, overwriting the existing value of `H`
"""
function add_jtsj!(
H::M,
Jac::LQJacobianOperator{T, M, A},
Ξ£::V,
alpha::Number = 1,
beta::Number = 1,
) where {T, V <: AbstractVector{T}, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
Ξ£J1 = Jac.SJ1
Ξ£J2 = Jac.SJ2
Ξ£J3 = Jac.SJ3
LinearAlgebra.lmul!(beta, H)
for i = 1:N
left_block1 = _dnlp_unsafe_wrap(J1, (nc, nu), (1 + (i - 1) * (nc * nu)))
left_block2 = _dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (i - 1) * (nsc * nu)))
left_block3 = _dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (i - 1) * (nuc * nu)))
for j = 1:(N + 1 - i)
Ξ£_range1 = (1 + (N - j) * nc):((N - j + 1) * nc)
Ξ£_range2 = (1 + nc * N + (N - j) * nsc):(nc * N + (N - j + 1) * nsc)
Ξ£_range3 =
(1 + (nc + nsc) * N + (N - j) * nuc):((nc + nsc) * N + (N - j + 1) * nuc)
Ξ£J1 .= left_block1 .* view(Ξ£, Ξ£_range1)
Ξ£J2 .= left_block2 .* view(Ξ£, Ξ£_range2)
Ξ£J3 .= left_block3 .* view(Ξ£, Ξ£_range3)
for k = 1:(N - j - i + 2)
right_block1 =
_dnlp_unsafe_wrap(J1, (nc, nu), (1 + (k + i - 2) * (nc * nu)))
right_block2 =
_dnlp_unsafe_wrap(J2, (nsc, nu), (1 + (k + i - 2) * (nsc * nu)))
right_block3 =
_dnlp_unsafe_wrap(J3, (nuc, nu), (1 + (k + i - 2) * (nuc * nu)))
row_range = (1 + nu * (N - i - j + 1)):(nu * (N - i - j + 2))
col_range = (1 + nu * (N - i - k - j + 2)):(nu * (N - i - k - j + 3))
LinearAlgebra.mul!(
view(H, row_range, col_range),
Ξ£J1',
right_block1,
alpha,
1,
)
LinearAlgebra.mul!(
view(H, row_range, col_range),
Ξ£J2',
right_block2,
alpha,
1,
)
LinearAlgebra.mul!(
view(H, row_range, col_range),
Ξ£J3',
right_block3,
alpha,
1,
)
end
end
end
end
function add_jtsj!(
H::M,
Jac::LQJacobianOperator{T, M, A},
Ξ£::V,
alpha::Number = 1,
beta::Number = 1,
) where {T, V <: CUDA.CuVector, M <: AbstractMatrix{T}, A <: AbstractArray{T}}
J1 = Jac.truncated_jac1
J2 = Jac.truncated_jac2
J3 = Jac.truncated_jac3
N = Jac.N
nu = Jac.nu
nc = Jac.nc
nsc = Jac.nsc
nuc = Jac.nuc
Ξ£J1 = Jac.SJ1
Ξ£J2 = Jac.SJ2
Ξ£J3 = Jac.SJ3
H_sub_block = Jac.H_sub_block
LinearAlgebra.lmul!(beta, H)
for i = 1:N
left_block1 = view(J1, :, :, i)
left_block2 = view(J2, :, :, i)
left_block3 = view(J3, :, :, i)
for j = 1:(N + 1 - i)
Ξ£_range1 = (1 + (N - j) * nc):((N - j + 1) * nc)
Ξ£_range2 = (1 + nc * N + (N - j) * nsc):(nc * N + (N - j + 1) * nsc)
Ξ£_range3 =
(1 + (nc + nsc) * N + (N - j) * nuc):((nc + nsc) * N + (N - j + 1) * nuc)
Ξ£J1 .= left_block1 .* view(Ξ£, Ξ£_range1)
Ξ£J2 .= left_block2 .* view(Ξ£, Ξ£_range2)
Ξ£J3 .= left_block3 .* view(Ξ£, Ξ£_range3)
for k = 1:(N - j - i + 2)
right_block1 = view(J1, :, :, (k + i - 1))
right_block2 = view(J2, :, :, (k + i - 1))
right_block3 = view(J3, :, :, (k + i - 1))
row_range = (1 + nu * (N - i - j + 1)):(nu * (N - i - j + 2))
col_range = (1 + nu * (N - i - k - j + 2)):(nu * (N - i - k - j + 3))
LinearAlgebra.mul!(H_sub_block, Ξ£J1', right_block1)
H[row_range, col_range] .+= alpha .* H_sub_block
LinearAlgebra.mul!(H_sub_block, Ξ£J2', right_block2)
H[row_range, col_range] .+= alpha .* H_sub_block
LinearAlgebra.mul!(H_sub_block, Ξ£J3', right_block3)
H[row_range, col_range] .+= alpha .* H_sub_block
end
end
end
end
"""
reset_s0!(lqdm::SparseLQDynamicModel, s0)
reset_s0!(lqdm::DenseLQDynamicModel, s0)
Resets `s0` within `lqdm.dynamic_data`. For a `SparseLQDynamicModel`, this updates the variable bounds which fix the value of `s0`.
For a `DenseLQDynamicModel`, also resets the constraint bounds on the Jacobian and resets the linear and constant terms within the
objective function (i.e., `lqdm.data.c` and `lqdm.data.c0`). This provides a way to update the model after each sample period.
"""
function reset_s0!(
lqdm::SparseLQDynamicModel{T, V, M1, M2, M3, MK},
s0::V,
) where {T, V <: AbstractVector{T}, M1, M2, M3, MK}
dnlp = lqdm.dynamic_data
ns = dnlp.ns
lqdm.dynamic_data.s0 .= s0
lqdm.meta.lvar[1:ns] .= s0
lqdm.meta.uvar[1:ns] .= s0
end
function reset_s0!(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
s0::V,
) where {T, V <: AbstractVector{T}, M1, M2, M3, M4, MK <: Nothing}
dnlp = lqdm.dynamic_data
dense_blocks = lqdm.blocks
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.E
ul = dnlp.ul
uu = dnlp.uu
sl = dnlp.sl
su = dnlp.su
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
# Get matrices for multiplying by s0
block_A = dense_blocks.A
block_Aw = dense_blocks.Aw
block_h = dense_blocks.h
block_h0 = dense_blocks.h01
block_d = dense_blocks.d
block_dw = dense_blocks.dw
block_h02 = dense_blocks.h02
h_constant = dense_blocks.h_constant
h0_constant = dense_blocks.h0_constant
lcon = lqdm.meta.lcon
ucon = lqdm.meta.ucon
# Reset s0
lqdm.dynamic_data.s0 .= s0
As0 = _init_similar(s0, ns * (N + 1), T)
Qs0 = _init_similar(s0, ns, T)
dl = repeat(gl, N)
du = repeat(gu, N)
bool_vec_s = (sl .!= -Inf .|| su .!= Inf)
nsc = sum(bool_vec_s)
sl = sl[bool_vec_s]
su = su[bool_vec_s]
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
# Reset constraint bounds corresponding to E and F matrices
lcon[1:(nc * N)] .= dl
ucon[1:(nc * N)] .= du
lcon[1:(nc * N)] .-= block_dw
ucon[1:(nc * N)] .-= block_dw
LinearAlgebra.mul!(As0, block_A, s0)
# reset linear term
LinearAlgebra.mul!(lqdm.data.c, block_h, s0)
lqdm.data.c += h_constant
# reset constant term
LinearAlgebra.mul!(Qs0, block_h0, s0)
lqdm.data.c0 = LinearAlgebra.dot(s0, Qs0) / T(2)
lqdm.data.c0 += h0_constant
lqdm.data.c0 += LinearAlgebra.dot(s0, block_h02)
for i = 1:N
# Reset bounds on constraints from state variable bounds
lcon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
sl .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + ns * i):((i + 1) * ns)][bool_vec_s]
ucon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
su .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + ns * i):((i + 1) * ns)][bool_vec_s]
end
end
function reset_s0!(
lqdm::DenseLQDynamicModel{T, V, M1, M2, M3, M4, MK},
s0::V,
) where {T, V <: AbstractVector{T}, M1, M2, M3, M4, MK <: AbstractMatrix{T}}
dnlp = lqdm.dynamic_data
dense_blocks = lqdm.blocks
N = dnlp.N
ns = dnlp.ns
nu = dnlp.nu
E = dnlp.E
F = dnlp.E
K = dnlp.K
ul = dnlp.ul
uu = dnlp.uu
sl = dnlp.sl
su = dnlp.su
gl = dnlp.gl
gu = dnlp.gu
nc = size(E, 1)
# Get matrices for multiplying by s0
block_A = dense_blocks.A
block_Aw = dense_blocks.Aw
block_h = dense_blocks.h
block_h0 = dense_blocks.h01
block_d = dense_blocks.d
block_dw = dense_blocks.dw
block_KA = dense_blocks.KA
block_KAw = dense_blocks.KAw
block_h02 = dense_blocks.h02
h_constant = dense_blocks.h_constant
h0_constant = dense_blocks.h0_constant
lcon = lqdm.meta.lcon
ucon = lqdm.meta.ucon
# Reset s0
lqdm.dynamic_data.s0 .= s0
lqdm.data.c0 += LinearAlgebra.dot(s0, block_h02)
As0 = _init_similar(s0, ns * (N + 1), T)
Qs0 = _init_similar(s0, ns, T)
KAs0 = _init_similar(s0, nu * N, T)
dl = repeat(gl, N)
du = repeat(gu, N)
bool_vec_s = (sl .!= -Inf .|| su .!= Inf)
nsc = sum(bool_vec_s)
bool_vec_u = (ul .!= -Inf .|| uu .!= Inf)
nuc = sum(bool_vec_u)
sl = sl[bool_vec_s]
su = su[bool_vec_s]
ul = ul[bool_vec_u]
uu = uu[bool_vec_u]
LinearAlgebra.mul!(dl, block_d, s0, -1, 1)
LinearAlgebra.mul!(du, block_d, s0, -1, 1)
# Reset constraint bounds corresponding to E and F matrices
lcon[1:(nc * N)] .= dl
ucon[1:(nc * N)] .= du
lcon[1:(nc * N)] .-= block_dw
ucon[1:(nc * N)] .-= block_dw
LinearAlgebra.mul!(As0, block_A, s0)
LinearAlgebra.mul!(KAs0, block_KA, s0)
# reset linear term
LinearAlgebra.mul!(lqdm.data.c, block_h, s0)
lqdm.data.c += h_constant
# reset constant term
LinearAlgebra.mul!(Qs0, block_h0, s0)
lqdm.data.c0 = LinearAlgebra.dot(s0, Qs0) / T(2)
lqdm.data.c0 += h0_constant
lqdm.data.c0 += LinearAlgebra.dot(s0, block_h02)
for i = 1:N
# Reset bounds on constraints from state variable bounds
lcon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
sl .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + i * ns):((i + 1) * ns)][bool_vec_s]
ucon[(1 + nc * N + nsc * (i - 1)):(nc * N + nsc * i)] .=
su .- As0[(1 + ns * i):(ns * (i + 1))][bool_vec_s] .-
block_Aw[(1 + i * ns):((i + 1) * ns)][bool_vec_s]
# Reset bounds on constraints from input variable bounds
lcon[(1 + (nc + nsc) * N + nuc * (i - 1)):((nc + nsc) * N + nuc * i)] .=
ul .- KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .-
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
ucon[(1 + (nc + nsc) * N + nuc * (i - 1)):((nc + nsc) * N + nuc * i)] .=
uu .- KAs0[(1 + nu * (i - 1)):(nu * i)][bool_vec_u] .-
block_KAw[(1 + nu * (i - 1)):(nu * i)][bool_vec_u]
end
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | test/functions.jl | code | 6953 | function test_mul(lq_dense, lq_dense_imp)
dnlp = lq_dense.dynamic_data
N = dnlp.N
nu = dnlp.nu
J = get_jacobian(lq_dense)
J_imp = get_jacobian(lq_dense_imp)
Random.seed!(10)
x = rand(nu * N)
y = rand(size(J, 1))
x_imp = similar(lq_dense_imp.dynamic_data.s0, length(x))
y_imp = similar(lq_dense_imp.dynamic_data.s0, length(y))
LinearAlgebra.copyto!(x_imp, x)
LinearAlgebra.copyto!(y_imp, y)
LinearAlgebra.mul!(y, J, x)
LinearAlgebra.mul!(y_imp, J_imp, x_imp)
@test y β Vector(y_imp) atol = 1e-14
x = rand(nu * N)
y = rand(size(J, 1))
x_imp = similar(lq_dense_imp.dynamic_data.s0, length(x))
y_imp = similar(lq_dense_imp.dynamic_data.s0, length(y))
LinearAlgebra.copyto!(x_imp, x)
LinearAlgebra.copyto!(y_imp, y)
LinearAlgebra.mul!(x, J', y)
LinearAlgebra.mul!(x_imp, J_imp', y_imp)
@test x β Vector(x_imp) atol = 1e-14
end
function test_add_jtsj(lq_dense, lq_dense_imp)
dnlp = lq_dense.dynamic_data
N = dnlp.N
nu = dnlp.nu
H = zeros(nu * N, nu * N)
Random.seed!(10)
J = get_jacobian(lq_dense)
J_imp = get_jacobian(lq_dense_imp)
Ξ£J = similar(J); fill!(Ξ£J, 0)
x = rand(size(J, 1))
H_imp = similar(lq_dense_imp.data.H, nu * N, nu * N); fill!(H_imp, 0)
x_imp = similar(lq_dense_imp.dynamic_data.s0, length(x));
LinearAlgebra.copyto!(x_imp, x)
LinearAlgebra.mul!(Ξ£J, Diagonal(x), J)
LinearAlgebra.mul!(H, J', Ξ£J)
add_jtsj!(H_imp, J_imp, x_imp)
@test LowerTriangular(Array(H_imp)) β LowerTriangular(H) atol = 1e-10
end
function dynamic_data_to_CUDA(dnlp::LQDynamicData)
s0c = CuVector{Float64}(undef, length(dnlp.s0))
Ac = CuArray{Float64}(undef, size(dnlp.A))
Bc = CuArray{Float64}(undef, size(dnlp.B))
Qc = CuArray{Float64}(undef, size(dnlp.Q))
Rc = CuArray{Float64}(undef, size(dnlp.R))
Sc = CuArray{Float64}(undef, size(dnlp.S))
Ec = CuArray{Float64}(undef, size(dnlp.E))
Fc = CuArray{Float64}(undef, size(dnlp.F))
wc = CuArray{Float64}(undef, length(dnlp.w))
Qfc = CuArray{Float64}(undef, size(dnlp.Qf))
glc = CuVector{Float64}(undef, length(dnlp.gl))
guc = CuVector{Float64}(undef, length(dnlp.gu))
ulc = CuVector{Float64}(undef, length(dnlp.ul))
uuc = CuVector{Float64}(undef, length(dnlp.uu))
slc = CuVector{Float64}(undef, length(dnlp.sl))
suc = CuVector{Float64}(undef, length(dnlp.su))
LinearAlgebra.copyto!(Ac, dnlp.A)
LinearAlgebra.copyto!(Bc, dnlp.B)
LinearAlgebra.copyto!(Qc, dnlp.Q)
LinearAlgebra.copyto!(Rc, dnlp.R)
LinearAlgebra.copyto!(s0c, dnlp.s0)
LinearAlgebra.copyto!(Sc, dnlp.S)
LinearAlgebra.copyto!(Ec, dnlp.E)
LinearAlgebra.copyto!(Fc, dnlp.F)
LinearAlgebra.copyto!(wc, dnlp.w)
LinearAlgebra.copyto!(Qfc, dnlp.Qf)
LinearAlgebra.copyto!(glc, dnlp.gl)
LinearAlgebra.copyto!(guc, dnlp.gu)
LinearAlgebra.copyto!(ulc, dnlp.ul)
LinearAlgebra.copyto!(uuc, dnlp.uu)
LinearAlgebra.copyto!(slc, dnlp.sl)
LinearAlgebra.copyto!(suc, dnlp.su)
if dnlp.K != nothing
Kc = CuArray{Float64}(undef, size(dnlp.K))
LinearAlgebra.copyto!(Kc, dnlp.K)
else
Kc = nothing
end
LQDynamicData(s0c, Ac, Bc, Qc, Rc, dnlp.N; Qf = Qfc, S = Sc,
E = Ec, F = Fc, K = Kc, sl = slc, su = suc, ul = ulc, uu = uuc, gl = glc, gu = guc, w = wc
)
end
function test_sparse_support(lqdm)
d = lqdm.dynamic_data
(lqdm_sparse_data = SparseLQDynamicModel(d.s0, sparse(d.A), sparse(d.B), sparse(d.Q), sparse(d.R), d.N;
sl = d.sl, ul = d.ul, su = d.su, uu = d.uu, Qf = sparse(d.Qf), K = (d.K == nothing ? nothing : sparse(d.K)),
S = sparse(d.S), E = sparse(d.E), F = sparse(d.F), gl = d.gl, gu = d.gu))
@test lqdm.data.H β lqdm_sparse_data.data.H atol = 1e-10
@test lqdm.data.A β lqdm_sparse_data.data.A atol = 1e-10
end
function test_dense_reset_s0(dnlp, lq_dense, new_s0)
lq_dense_test = DenseLQDynamicModel(dnlp)
dnlp.s0 .= new_s0
lq_dense_new_s0 = DenseLQDynamicModel(dnlp)
reset_s0!(lq_dense_test, new_s0)
@test lq_dense_test.data.H β lq_dense_new_s0.data.H atol = 1e-10
@test lq_dense_test.data.A β lq_dense_new_s0.data.A atol = 1e-10
@test lq_dense_test.data.c β lq_dense_new_s0.data.c atol = 1e-10
@test lq_dense_test.data.c0 β lq_dense_new_s0.data.c0 atol = 1e-10
@test lq_dense_test.meta.lcon β lq_dense_new_s0.meta.lcon atol = 1e-8
@test lq_dense_test.meta.ucon β lq_dense_new_s0.meta.ucon atol = 1e-8
@test lq_dense_test.dynamic_data.s0 == lq_dense_new_s0.dynamic_data.s0
end
function test_sparse_reset_s0(dnlp, lq_sparse, new_s0)
reset_s0!(lq_sparse, new_s0)
@test lq_sparse.dynamic_data.s0 == new_s0
end
function runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
optimize!(model)
solution_ref_sparse = madnlp(lq_sparse, max_iter=100)
solution_ref_dense = madnlp(lq_dense, max_iter=100)
solution_ref_sparse_from_data = madnlp(lq_sparse_from_data, max_iter=100)
solution_ref_dense_from_data = madnlp(lq_dense_from_data, max_iter=100)
@test objective_value(model) β solution_ref_sparse.objective atol = 1e-7
@test objective_value(model) β solution_ref_dense.objective atol = 1e-5
@test objective_value(model) β solution_ref_sparse_from_data.objective atol = 1e-7
@test objective_value(model) β solution_ref_dense_from_data.objective atol = 1e-5
@test solution_ref_sparse.solution[(ns * (N + 1) + 1):(ns * (N + 1) + nu*N)] β solution_ref_dense.solution atol = 1e-5
@test solution_ref_sparse_from_data.solution[(ns * (N + 1) + 1):(ns * (N + 1) + nu*N)] β solution_ref_dense_from_data.solution atol = 1e-5
# Test get_u and get_s functions with no K matrix
s_values = value.(all_variables(model)[1:(ns * (N + 1))])
u_values = value.(all_variables(model)[(1 + ns * (N + 1)):(ns * (N + 1) + nu * N)])
@test s_values β get_s(solution_ref_sparse, lq_sparse) atol = 1e-6
@test u_values β get_u(solution_ref_sparse, lq_sparse) atol = 1e-6
@test s_values β get_s(solution_ref_dense, lq_dense) atol = 2e-5
@test u_values β get_u(solution_ref_dense, lq_dense) atol = 2e-5
test_sparse_support(lq_sparse)
lq_dense_imp = DenseLQDynamicModel(dnlp; implicit = true)
imp_test_set = []
push!(imp_test_set, lq_dense_imp)
if CUDA.has_cuda_gpu()
dnlp_cuda = dynamic_data_to_CUDA(dnlp)
lq_dense_cuda = DenseLQDynamicModel(dnlp_cuda; implicit=true)
push!(imp_test_set, lq_dense_cuda)
end
@testset "Test mul and add_jtsj!" for lq_imp in imp_test_set
test_mul(lq_dense, lq_imp)
test_add_jtsj(lq_dense, lq_imp)
end
new_s0 = copy(dnlp.s0) .+ .5
test_dense_reset_s0(dnlp, lq_dense, new_s0)
new_s0 = copy(dnlp.s0) .+ 1
test_sparse_reset_s0(dnlp, lq_sparse, new_s0)
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | test/runtests.jl | code | 13163 | using Test, DynamicNLPModels, MadNLP, Random, JuMP, LinearAlgebra, SparseArrays, CUDA
include("sparse_lq_test.jl")
include("functions.jl")
N = 3 # number of time steps
ns = 2 # number of states
nu = 1 # number of inputs
# generate random Q, R, A, and B matrices
Random.seed!(10)
Q_rand = Random.rand(ns, ns)
Q = Q_rand * Q_rand' + I
R_rand = Random.rand(nu,nu)
R = R_rand * R_rand' + I
A_rand = rand(ns, ns)
A = A_rand * A_rand' + I
B = rand(ns, nu)
# generate upper and lower bounds
sl = rand(ns)
ul = fill(-15.0, nu)
su = sl .+ 4
uu = ul .+ 10
s0 = sl .+ 2
su_with_inf = copy(su)
sl_with_inf = copy(sl)
su_with_inf[1] = Inf
sl_with_inf[1] = -Inf
Qf_rand = Random.rand(ns,ns)
Qf = Qf_rand * Qf_rand' + I
E = rand(3, ns)
F = rand(3, nu)
gl = fill(-5.0, 3)
gu = fill(15.0, 3)
S = rand(ns, nu)
w = rand(0.0:.0001:.25, ns * N)
K = rand(nu, ns)
# Test with no bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with lower bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with upper bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, su = su, uu = uu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; su = su, uu = uu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; su = su, uu = uu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; su = su, uu = uu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with upper and lower bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl=sl, ul=ul, su = su, uu = uu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl=sl, ul=ul, su = su, uu = uu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with Qf matrix
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, Qf = Qf, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test with E and F matrix bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test edge case where one state is unbounded, other(s) is bounded
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl_with_inf, ul = ul, su = su_with_inf, uu = uu, E = E, F = F, gl = gl, gu = gu, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
@test size(lq_dense.data.A, 1) == size(E, 1) * 3 + sum(su_with_inf .!= Inf .|| sl_with_inf .!= -Inf) * N
# Test S matrix case
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, S = S, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K matrix case without S
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K matrix case with S
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K matrix case with S and with partial bounds on u
nu = 2 # number of inputs
# generate random Q, R, A, and B matrices
Random.seed!(3)
R_rand = Random.rand(nu,nu)
R = R_rand * transpose(R_rand) + I
B = rand(ns, nu)
# generate upper and lower bounds
ul = fill(-20.0, nu)
uu = ul .+ 30
ul_with_inf = copy(ul)
uu_with_inf = copy(uu)
uu_with_inf[1] = Inf
ul_with_inf[1] = -Inf
F = rand(3, nu)
S = rand(ns, nu)
K = rand(nu, ns)
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; sl = sl, ul = ul_with_inf, su = su, uu = uu_with_inf, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test K with no bounds
model = build_QP_JuMP_model(Q,R,A,B, N;s0=s0, E = E, F = F, gl = gl, gu = gu, K = K, w = w)
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
lq_sparse_from_data = SparseLQDynamicModel(s0, A, B, Q, R, N; E = E, F = F, gl = gl, gu = gu, K = K, w = w)
lq_dense_from_data = DenseLQDynamicModel(s0, A, B, Q, R, N; E = E, F = F, gl = gl, gu = gu, K = K, w = w)
runtests(model, dnlp, lq_sparse, lq_dense, lq_sparse_from_data, lq_dense_from_data, N, ns, nu)
# Test get_* and set_* functions
dnlp = LQDynamicData(copy(s0), A, B, Q, R, N; sl = sl, ul = ul, su = su, uu = uu, E = E, F = F, gl = gl, gu = gu, K = K, S = S, w = w)
lq_sparse = SparseLQDynamicModel(dnlp)
lq_dense = DenseLQDynamicModel(dnlp)
@test get_A(dnlp) == A
@test get_A(lq_sparse) == A
@test get_A(lq_dense) == A
rand_val = rand()
Qtest = copy(Q)
Qtest[1, 2] = rand_val
set_Q!(dnlp, 1,2, rand_val)
@test get_Q(dnlp) == Qtest
Qtest[1, 1] = rand_val
set_Q!(lq_sparse, 1, 1, rand_val)
@test get_Q(lq_sparse) == Qtest
Qtest[2, 1] = rand_val
set_Q!(lq_dense, 2, 1, rand_val)
@test get_Q(lq_dense) == Qtest
rand_val = rand()
gltest = copy(gl)
gltest[1] = rand_val
set_gl!(dnlp, 1, rand_val)
@test get_gl(dnlp) == gltest
gltest[2] = rand_val
set_gl!(lq_sparse, 2, rand_val)
@test get_gl(lq_sparse) == gltest
gltest[3] = rand_val
set_gl!(lq_dense, 3, rand_val)
@test get_gl(lq_dense) == gltest
# Test non-default vector/matrix on GenericArrays
s0 = randn(Float32,2)
A = randn(Float32,2,2)
B = randn(Float32,2,2)
Q = randn(Float32,2,2)
R = randn(Float32,2,2)
S = randn(Float32,2,2)
K = randn(Float32,2,2)
E = randn(Float32,2,2)
F = randn(Float32,2,2)
gl = randn(Float32,2)
gu = gl .+ 2
sl = s0 .- 1
su = s0 .+ 1
ul = randn(Float32,2)
uu = ul .+ 2
w = Float32.(rand(0.0:.0001:.25, ns * 10))
s0 = Test.GenericArray(s0)
A = Test.GenericArray(A)
B = Test.GenericArray(B)
Q = Test.GenericArray(Q)
R = Test.GenericArray(R)
S = Test.GenericArray(S)
K = Test.GenericArray(K)
E = Test.GenericArray(E)
F = Test.GenericArray(F)
gl = Test.GenericArray(gl)
gu = Test.GenericArray(gu)
sl = Test.GenericArray(sl)
su = Test.GenericArray(su)
ul = Test.GenericArray(ul)
uu = Test.GenericArray(uu)
w = Test.GenericArray(w)
@test (DenseLQDynamicModel(s0, A, B, Q, R, 10; S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
DenseLQDynamicModel{Float32, GenericArray{Float32, 1}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, Nothing})
@test (DenseLQDynamicModel(s0, A, B, Q, R, 10; K = K, S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
DenseLQDynamicModel{Float32, GenericArray{Float32, 1}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}, GenericArray{Float32, 2}})
@test (SparseLQDynamicModel(s0, A, B, Q, R, 10; S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
SparseLQDynamicModel{Float32, GenericArray{Float32, 1}, SparseMatrixCSC{Float32, Int64}, SparseMatrixCSC{Float32, Int64}, GenericArray{Float32, 2}, Nothing})
@test (SparseLQDynamicModel(s0, A, B, Q, R, 10; K = K, S = S, E = E, F = F, gl = gl, gu = gu, ul = ul, uu = uu, sl = sl, su = su, w = w) isa
SparseLQDynamicModel{Float32, GenericArray{Float32, 1}, SparseMatrixCSC{Float32, Int64}, SparseMatrixCSC{Float32, Int64}, GenericArray{Float32, 2}, GenericArray{Float32, 2}})
# Test LQJacobianOperator APIs
lq_dense_imp = DenseLQDynamicModel(dnlp; implicit=true)
@test length(get_jacobian(lq_dense_imp)) == (length(get_jacobian(lq_dense_imp).truncated_jac1)
+ length(get_jacobian(lq_dense_imp).truncated_jac2) + length(get_jacobian(lq_dense_imp).truncated_jac3))
(@test size(get_jacobian(lq_dense_imp)) == (size(get_jacobian(lq_dense_imp).truncated_jac1, 1) + size(get_jacobian(lq_dense_imp).truncated_jac2, 1)
+ size(get_jacobian(lq_dense_imp).truncated_jac3, 1), size(get_jacobian(lq_dense_imp).truncated_jac1, 2)))
@test isreal(get_jacobian(lq_dense_imp)) == isreal(get_jacobian(lq_dense_imp).truncated_jac1)
@test eltype(get_jacobian(lq_dense_imp)) == eltype(get_jacobian(lq_dense_imp).truncated_jac1)
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | test/sparse_lq_test.jl | code | 3816 | """
build_QP_JuMP_model(Q,R,A,B,N;...) -> JuMP.Model(...)
Return a `JuMP.jl` Model for the quadratic problem
min 1/2 ( sum_{i=1}^{N-1} s_i^T Q s + sum_{i=1}^{N-1} u^T R u + s_N^T Qf s_n )
s.t. s_{i+1} = As_i + Bs_i for i = 1,..., N-1
Optional Arguments
- `Qf = []`: matrix multiplied by s_N in objective function (defaults to Q if not given)
- `c = zeros(N*size(Q,1) + N*size(R,1)`: linear term added to objective funciton, c^T z
- `sl = fill(-Inf, size(Q,1))`: lower bound on state variables
- `su = fill(Inf, size(Q,1))`: upper bound on state variables
- `ul = fill(-Inf, size(Q,1))`: lower bound on input variables
- `uu = fill(Inf, size(Q,1))`: upper bound on input variables
- `s0 = []`: initial state of the first state variables
"""
function build_QP_JuMP_model(
Q,R,A,B, N;
s0 = zeros(size(Q, 1)),
sl = [],
su = [],
ul = [],
uu = [],
Qf = Q,
E = [],
F = [],
gl = [],
gu = [],
S = zeros(size(Q, 1), size(R, 1)),
K = zeros(size(R, 1), size(Q, 1)),
w = zeros(size(Q, 1))
)
ns = size(Q,1) # Number of states
nu = size(R,1) # Number of inputs
NS = 1:ns # set of states
NN = 1:N # set of times
NU = 1:nu # set of inputs
model = Model(MadNLP.Optimizer) # define model
@variable(model, s[NS, 0:N]) # define states
@variable(model, u[NU, 0:(N-1)]) # define inputs
if !iszero(K)
@variable(model, v[NU, 0:(N-1)])
end
# Bound states/inputs
if length(sl) > 0
for i in NS
for j in 0:N
if sl[i] != -Inf
@constraint(model, s[i,j] >= sl[i])
end
end
end
end
if length(su) > 0
for i in NS
for j in 0:N
if su[i] != Inf
@constraint(model, s[i,j] <= su[i])
end
end
end
end
if length(ul) > 0
for i in NU
for j in 0:(N-1)
if ul[i] != -Inf
@constraint(model, u[i,j] >= ul[i])
end
end
end
end
if length(uu) > 0
for i in NU
for j in 0:(N-1)
if uu[i] != Inf
@constraint(model, u[i,j] <= uu[i])
end
end
end
end
if length(s0) >0
for i in NS
JuMP.fix(s[i,0], s0[i])
end
end
# Give constraints from A, B, matrices
for s1 in NS
for t in 0:(N - 1)
@constraint(model, s[s1, t + 1] == sum(A[s1, s2] * s[s2, t] for s2 in NS) + sum(B[s1, u1] * u[u1, t] for u1 in NU) + w[s1 + t * ns])
end
end
# Constraints for Kx + v = u
if !iszero(K)
for u1 in NU
@constraint(model, [t in 0:(N - 1)], u[u1, t] == v[u1, t] + sum( K[u1, s1] * s[s1,t] for s1 in NS))
end
end
# Add E, F constraints
if length(E) > 0
for i in 1:size(E,1)
@constraint(model,[t in 0:(N-1)], gl[i] <= sum(E[i, s1] * s[s1, t] for s1 in NS) + sum(F[i,u1] * u[u1, t] for u1 in NU))
@constraint(model,[t in 0:(N-1)], gu[i] >= sum(E[i, s1] * s[s1, t] for s1 in NS) + sum(F[i,u1] * u[u1, t] for u1 in NU))
end
end
# Give objective function as xT Q x + uT R u where x is summed over T and u is summed over T-1
@objective(model,Min, sum( 1/2 * Q[s1, s2]*s[s1,t]*s[s2,t] for s1 in NS, s2 in NS, t in 0:(N-1)) +
sum( 1/2 * R[u1,u2] * u[u1, t] * u[u2,t] for t in 0:(N-1) , u1 in NU, u2 in NU) +
sum( 1/2 * Qf[s1,s2] * s[s1,N] * s[s2, N] for s1 in NS, s2 in NS) +
sum( S[s1, u1] * s[s1, t] * u[u1, t] for s1 in NS, u1 in NU, t in 0:(N-1))
)
return model
end
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | README.md | docs | 4786 | # DynamicNLPModels.jl
| **Documentation** | **Build Status** | **Coverage** |
|:-----------------:|:----------------:|:----------------:|
| [![doc](https://img.shields.io/badge/docs-dev-blue.svg)](https://madnlp.github.io/DynamicNLPModels.jl/dev) | [![build](https://github.com/MadNLP/DynamicNLPModels.jl/actions/workflows/ci.yml/badge.svg)](https://github.com/MadNLP/DynamicNLPModels.jl/actions) | [![codecov](https://codecov.io/gh/MadNLP/DynamicNLPModels.jl/branch/main/graph/badge.svg?token=2Z18FIU4R7)](https://codecov.io/gh/MadNLP/DynamicNLPModels.jl) |
DynamicNLPModels.jl is a package for [Julia](https://julialang.org/) designed for representing linear [model predictive control (MPC)](https://en.wikipedia.org/wiki/Model_predictive_control) problems. It includes an API for building a model from user defined data and querying solutions.
## Installation
To install this package, please use
```julia
using Pkg
Pkg.add(url="https://github.com/MadNLP/DynamicNLPModels.jl.git")
```
or
```julia
pkg> add https://github.com/MadNLP/DynamicNLPModels.jl.git
```
## Overview
DynamicNLPModels.jl can construct both sparse and condensed formulations for MPC problems based on user defined data. We use the methods discussed by [Jerez et al.](https://doi.org/10.1016/j.automatica.2012.03.010) to eliminate the states and condense the problem. DynamicNLPModels.jl constructs models that are subtypes of `AbstractNLPModel` from [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) enabling both the sparse and condensed models to be solved with a variety of different solver packages in Julia. DynamicNLPModels was designed in part with the goal of solving linear MPC problems on the GPU. This can be done within [MadNLP.jl](https://github.com/MadNLP/MadNLP.jl) using [MadNLPGPU.jl](https://github.com/MadNLP/MadNLP.jl/tree/master/lib/MadNLPGPU).
The general sparse formulation used within DynamicNLPModels.jl is
$$\begin{align*}
\min_{s, u, v} \quad & s_N^\top Q_f s_N + \frac{1}{2} \sum_{i = 0}^{N-1} \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]^\top \left[ \begin{array}{cc} Q & S \\ S^\top & R \end{array} \right] \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]\\
\textrm{s.t.} \quad & s_{i+1} = As_i + Bu_i + w_i \quad \forall i = 0, 1, \cdots, N - 1 \\
& u_i = Ks_i + v_i \quad \forall i = 0, 1, \cdots, N - 1 \\
& g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, \cdots, N - 1\\
& s^l \le s_i \le s^u \quad \forall i = 0, 1, \cdots, N \\
& u^l \le u_i \le u^u \quad \forall i = 0, 1, \cdots, N - 1\\
& s_0 = \bar{s}
\end{align*}$$
where $s_i$ are the states, $u_i$ are the inputs$, $N$ is the time horizon, $\bar{s}$ are the initial states, and $Q$, $R$, $A$, and $B$ are user defined data. The matrices $Q_f$, $S$, $K$, $E$, and $F$ and the vectors $w$, $g^l$, $g^u$, $s^l$, $s^u$, $u^l$, and $u^u$ are optional data. $v_t$ is only needed in the condensed formulation, and it arises when $K$ is defined by the user to ensure numerical stability of the condensed problem.
The condensed formulation used within DynamicNLPModels.jl is
$$\begin{align*}
\min_{\boldsymbol{v}} \quad & \frac{1}{2} \boldsymbol{v}^\top \boldsymbol{H} \boldsymbol{v} + \boldsymbol{h}^\top \boldsymbol{v} + \boldsymbol{h}_0\\
\textrm{s.t.} \quad & d^l \le \boldsymbol{J} \boldsymbol{v} \le d^u.
\end{align*}$$
## Getting Started
DynamicNLPModels.jl takes user defined data to form a `SparseLQDyanmicModel` or a `DenseLQDynamicModel`. The user can first create an object containing the `LQDynamicData`, or they can pass the data directly to the `SparseLQDynamicModel` or `DenseLQDynamicModel` constructors.
```julia
using DynamicNLPModels, Random, LinearAlgebra
Q = 1.5 * Matrix(I, (3, 3))
R = 2.0 * Matrix(I, (2, 2))
A = rand(3, 3)
B = rand(3, 2)
N = 5
s0 = [1.0, 2.0, 3.0]
lqdd = LQDynamicData(s0, A, B, Q, R, N; **kwargs)
sparse_lqdm = SparseLQDynamicModel(lqdd)
dense_lqdm = DenseLQDynamicModel(lqdd)
# or
sparse_lqdm = SparseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
dense_lqdm = DenseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
```
Optional data (such as $s^l$, $s^u$, $S$, or $Q_f$) can be passed as key word arguments. The models `sparse_lqdm` or `dense_lqdm` can be solved by different solvers such as MadNLP.jl or Ipopt (Ipopt requires the extension NLPModelsIpopt.jl). An example script under `\examples` shows how the dense problem can be solved on a GPU using MadNLPGPU.jl.
DynamicNLPModels.jl also includes an API for querying solutions and reseting data. Solutions can be queried using `get_u(solver_ref, dynamic_model)` and `get_s(solver_ref, dynamic_model)`. The problem can be reset with a new $s_0$ by calling `reset_s0!(dynamic_model, s0)`.
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs/src/api.md | docs | 59 | # API Manual
```@autodocs
Modules = [DynamicNLPModels]
```
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs/src/guide.md | docs | 7295 |
# Getting Started
DynamicNLPModels.jl takes user defined data to construct a linear MPC problem of the form
```math
\begin{aligned}
\min_{s, u, v} &\; s_N^\top Q_f s_N + \frac{1}{2} \sum_{i = 0}^{N-1} \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]^\top \left[ \begin{array}{cc} Q & S \\ S^\top & R \end{array} \right] \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]\\
\textrm{s.t.} &\;s_{i+1} = As_i + Bu_i + w_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; u_i = Ks_i + v_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s^l \le s_i \le s^u \quad \forall i = 0, 1, \cdots, N \\
&\; u^l \le u_i \le u^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s_0 = \bar{s}.
\end{aligned}
```
This data is stored within the struct `LQDynamicData`, which can be created by passing the data `s0`, `A`, `B`, `Q`, `R` and `N` to the constructor as in the example below.
```julia
using DynamicNLPModels, Random, LinearAlgebra
Q = 1.5 * Matrix(I, (3, 3))
R = 2.0 * Matrix(I, (2, 2))
A = rand(3, 3)
B = rand(3, 2)
N = 5
s0 = [1.0, 2.0, 3.0]
lqdd = LQDynamicData(s0, A, B, Q, R, N; **kwargs)
```
`LQDynamicData` contains the following fields. All fields after `R` are keyword arguments:
* `ns`: number of states (determined from size of `Q`)
* `nu`: number of inputs (determined from size of `R`)
* `N` : number of time steps
* `s0`: a vector of initial states
* `A` : matrix that is multiplied by the states that corresponds to the dynamics of the problem. Number of columns is equal to `ns`
* `B` : matrix that is multiplied by the inputs that corresonds to the dynamics of the problem. Number of columns is equal to `nu`
* `Q` : objective function matrix for system states from ``0, 1, \cdots, (N - 1)``
* `R` : objective function matrix for system inputs from ``0, 1, \cdots, (N - 1)``
* `Qf`: objective function matrix for system states at time ``N``
* `S` : objective function matrix for system states and inputs
* `E` : constraint matrix multiplied by system states. Number of columns is equal to `ns`
* `F` : constraint matrix multiplied by system inputs. Number of columns is equal to `nu`
* `K` : feedback gain matrix. Used to ensure numerical stability of the condensed problem. Not necessary within the sparse problem
* `w` : constant term within dynamic constraints. At this time, this is the only data that is time varying. This vector must be length `ns` * `N`, where each set of `ns` entries corresponds to that time (i.e., entries `1:ns` correspond to time ``0``, entries `(ns + 1):(2 * ns)` corresond to time ``1``, etc.)
* `sl` : lower bounds on state variables
* `su` : upper bounds on state variables
* `ul` : lower bounds on ipnut variables
* `uu` : upper bounds on input variables
* `gl` : lower bounds on the constraints ``Es_i + Fu_i``
* `gu` : upper bounds on the constraints ``Es_i + Fu_i``
## `SparseLQDynamicModel`
A `SparseLQDynamicModel` can be created by either passing `LQDynamicData` to the constructor or passing the data itself, where the same keyword options exist which can be used for `LQDynamicData`.
```julia
sparse_lqdm = SparseLQDynamicModel(lqdd)
# or
sparse_lqdm = SparseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
```
The `SparseLQDynamicModel` contains four fields:
* `dynamic_data` which contains the `LQDynamicData`
* `data` which is the `QPData` from [QuadraticModels.jl](https://github.com/JuliaSmoothOptimizers/QuadraticModels.jl). This object also contains the following data:
- `H` which is the Hessian of the linear MPC problem
- `A` which is the Jacobian of the linear MPC problem such that ``\textrm{lcon} \le A z \le \textrm{ucon}``
- `c` which is the linear term of a quadratic objective function
- `c0` which is the constant term of a quadratic objective function
* `meta` which contains the `NLPModelMeta` for the problem from NLPModels.jl
* `counters` which is the `Counters` object from NLPModels.jl
!!! note
The `SparseLQDynamicModel` requires that all matrices in the `LQDynamicData` be the same type. It is recommended that the user be aware of how to most efficiently store their data in the `Q`, `R`, `A`, and `B` matrices as this impacts how efficiently the `SparseLQDynamicModel` is constructed. When `Q`, `R`, `A`, and `B` are sparse, building the `SparseLQDynamicModel` is much faster when these are passed as sparse rather than dense matrices.
## `DenseLQDynamicModel`
The `DenseLQDynamicModel` eliminates the states within the linear MPC problem to build an equivalent optimization problem that is only a function of the inputs. This can be particularly useful when the number of states is large compared to the number of inputs.
A `DenseLQDynamicModel` can be created by either passing `LQDynamicData` to the constructor or passing the data itself, where the same keyword options exist which can be used for `LQDynamicData`.
```julia
dense_lqdm = DenseLQDynamicModel(lqdd)
# or
dense_lqdm = DenseLQDynamicModel(s0, A, B, Q, R, N; **kwargs)
```
The `DenseLQDynamicModel` contains five fields:
* `dynamic_data` which contains the `LQDynamicData`
* `data` which is the `QPData` from [QuadraticModels.jl](https://github.com/JuliaSmoothOptimizers/QuadraticModels.jl). This object also contains the following data:
- `H` which is the Hessian of the condensed linear MPC problem
- `A` which is the Jacobian of the condensed linear MPC problem such that ``\textrm{lcon} \le A z \le \textrm{ucon}``
- `c` which is the linear term of the condensed linear MPC problem
- `c0` which is the constant term of the condensed linear MPC problem
* `meta` which contains the `NLPModelMeta` for the problem from NLPModels.jl
* `counters` which is the `Counters` object from NLPModels.jl
* `blocks` which contains the data needed to condense the model and then to update the condensed model when `s0` is reset.
The `DenseLQDynamicModel` is formed from dense matrices, and this dense system can be solved on a GPU using MadNLP.jl and MadNLPGPU.jl For an example script for performing this, please see the the [examples directory](https://github.com/MadNLP/DynamicNLPModels.jl/tree/main/examples) of the main repository.
## API functions
An API has been created for working with `LQDynamicData` and the sparse and dense models. All functions can be seen in the API Manual section. However, we give a short overview of these functions here.
* `reset_s0!(LQDynamicModel, new_s0)`: resets the model in place with a new `s0` value. This could be called after each sampling period in MPC to reset the model with a new measured value
* `get_s(solver_ref, LQDynamicModel)`: returns the optimal solution for the states from a given solver reference
* `get_u(solver_ref, LQDynamicModel)`: returns the optimal solution for the inputs from a given solver reference; when `K` is defined, the solver reference contains the optimal ``v`` values rather than optimal ``u`` values, adn this function converts ``v`` to ``u`` and returns the ``u`` values
* `get_*`: returns the data of `*` where `*` is an object within `LQDynamicData`
* `set_*!`: sets the value within the data of `*` for a given entry to a user defined value
| DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"MIT"
] | 0.1.0 | 4c035c67eee91a19afdc5db63eb71464c5db32ba | docs/src/index.md | docs | 3275 | # Introduction
[DynamicNLPModels.jl](https://github.com/MadNLP/DynamicNLPModels.jl) is a package for [Julia](https://julialang.org/) designed for representing linear [model predictive control (MPC)](https://en.wikipedia.org/wiki/Model_predictive_control) problems. It includes an API for building a model from user defined data and querying solutions.
!!! note
This documentation is also available in [PDF format](DynamicNLPModels.pdf).
## Installation
To install this package, please use
```julia
using Pkg
Pkg.add(url="https://github.com/MadNLP/DynamicNLPModels.jl.git")
```
or
```julia
pkg> add https://github.com/MadNLP/DynamicNLPModels.jl.git
```
## Overview
DynamicNLPModels.jl can construct both sparse and condensed formulations for MPC problems based on user defined data. We use the methods discussed by [Jerez et al.](https://doi.org/10.1016/j.automatica.2012.03.010) to eliminate the states and condense the problem. DynamicNLPModels.jl constructs models that are subtypes of `AbstractNLPModel` from [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) enabling both the sparse and condensed models to be solved with a variety of different solver packages in Julia. DynamicNLPModels was designed in part with the goal of solving linear MPC problems on the GPU. This can be done within [MadNLP.jl](https://github.com/MadNLP/MadNLP.jl) using [MadNLPGPU.jl](https://github.com/MadNLP/MadNLP.jl/tree/master/lib/MadNLPGPU).
The general sparse formulation used within DynamicNLPModels.jl is
```math
\begin{aligned}
\min_{s, u, v} &\; s_N^\top Q_f s_N + \frac{1}{2} \sum_{i = 0}^{N-1} \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]^\top \left[ \begin{array}{cc} Q & S \\ S^\top & R \end{array} \right] \left[ \begin{array}{c} s_i \\ u_i \end{array} \right]\\
\textrm{s.t.} &\;s_{i+1} = As_i + Bu_i + w_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; u_i = Ks_i + v_i \quad \forall i = 0, 1, \cdots, N - 1 \\
&\; g^l \le E s_i + F u_i \le g^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s^l \le s_i \le s^u \quad \forall i = 0, 1, \cdots, N \\
&\; u^l \le u_t \le u^u \quad \forall i = 0, 1, \cdots, N - 1\\
&\; s_0 = \bar{s}
\end{aligned}
```
where ``s_i`` are the states, ``u_i`` are the inputs, ``N`` is the time horizon, ``\bar{s}`` are the initial states, and ``Q``, ``R``, ``A``, and ``B`` are user defined data. The matrices ``Q_f``, ``S``, ``K``, ``E``, and ``F`` and the vectors ``w``, ``g^l``, ``g^u``, ``s^l``, ``s^u``, ``u^l``, and ``u^u`` are optional data. ``v_t`` is only needed in the condensed formulation, and it arises when ``K`` is defined by the user to ensure numerical stability of the condensed problem.
The condensed formulation used within DynamicNLPModels.jl is
```math
\begin{aligned}
\min_{\boldsymbol{v}} &\;\; \frac{1}{2} \boldsymbol{v}^\top \boldsymbol{H} \boldsymbol{v} + \boldsymbol{h}^\top \boldsymbol{v} + \boldsymbol{h}_0\\
\textrm{s.t.} &\; d^l \le \boldsymbol{J} \boldsymbol{v} \le d^u.
\end{aligned}
```
# Bug reports and support
This package is new and still undergoing some development. If you encounter a bug, please report it through Github's [issue tracker](https://github.com/MadNLP/DynamicNLPModels.jl/issues). | DynamicNLPModels | https://github.com/MadNLP/DynamicNLPModels.jl.git |
[
"Apache-2.0"
] | 0.1.0 | 5dd50891df13013c7551fd92b1f37f4cdac9976b | examples/sst_finetune.jl | code | 8979 | using BERT
using Knet
import Base: length, iterate
using Random
using CSV
using PyCall
using Dates
VOCABFILE = "bert-base-uncased-vocab.txt"
NUM_CLASSES = 2
LEARNING_RATE = 2e-5
NUM_OF_EPOCHS = 30
TRAIN = true
token2int = Dict()
f = open(VOCABFILE) do file
lines = readlines(file)
for (i,line) in enumerate(lines)
token2int[line] = i
end
end
int2token = Dict(value => key for (key, value) in token2int)
VOCABSIZE = length(token2int)
# include("preprocess.jl")
# include("optimizer.jl")
function convert_to_int_array(text, dict; lower_case=true)
tokens = bert_tokenize(text, dict, lower_case=lower_case)
out = Int[]
for token in tokens
if token in keys(dict)
push!(out, dict[token])
else
push!(out, dict["[UNK]"])
end
end
return out
end
function read_and_process(filename, dict; lower_case=true)
data = CSV.File(filename, delim="\t")
x = Array{Int,1}[]
y = Int8[]
for i in data
push!(x, convert_to_int_array(i.sentence, dict, lower_case=lower_case))
push!(y, Int8(i.label + 1)) # negative 1, positive 2
end
# Padding to maximum
# max_seq = findmax(length.(x))[1]
# for i in 1:length(x)
# append!(x[i], fill(1, max_seq - length(x[i]))) # 1 is for "[PAD]"
# end
return (x, y)
end
mutable struct ClassificationData
input_ids
input_mask
segment_ids
labels
batchsize
ninstances
shuffled
end
function ClassificationData(input_file, token2int; batchsize=8, shuffled=true, seq_len=64)
input_ids = []
input_mask = []
segment_ids = []
labels = []
(x, labels) = read_and_process(input_file, token2int)
for i in 1:length(x)
if length(x[i]) >= seq_len
x[i] = x[i][1:seq_len]
mask = Array{Int64}(ones(seq_len))
else
mask = Array{Int64}(ones(length(x[i])))
append!(x[i], fill(1, seq_len - length(x[i]))) # 1 is for "[PAD]"
append!(mask, fill(0, seq_len - length(mask))) # 0's vanish with masking operation
end
push!(input_ids, x[i])
push!(input_mask, mask)
push!(segment_ids, Array{Int64}(ones(seq_len)))
end
ninstances = length(input_ids)
return ClassificationData(input_ids, input_mask, segment_ids, labels, batchsize, ninstances, shuffled)
end
function length(d::ClassificationData)
d, r = divrem(d.ninstances, d.batchsize)
return r == 0 ? d : d+1
end
function iterate(d::ClassificationData, state=ifelse(d.shuffled, randperm(d.ninstances), 1:d.ninstances))
state === nothing && return nothing
if length(state) > d.batchsize
new_state = state[d.batchsize+1:end]
input_ids = hcat(d.input_ids[state[1:d.batchsize]]...)
input_mask = hcat(d.input_mask[state[1:d.batchsize]]...)
segment_ids = hcat(d.segment_ids[state[1:d.batchsize]]...)
labels = hcat(d.labels[state[1:d.batchsize]]...)
else
new_state = nothing
input_ids = hcat(d.input_ids[state]...)
input_mask = hcat(d.input_mask[state]...)
segment_ids = hcat(d.segment_ids[state]...)
labels = hcat(d.labels[state]...)
end
return ((input_ids, input_mask, segment_ids, labels), new_state)
end
# mutable struct ClassificationData2
# input_ids
# input_mask
# segment_ids
# labels
# batchsize
# ninstances
# shuffled
# end
# function ClassificationData2(input_file; batchsize=8, shuffled=true, seq_len=64)
# input_ids = []
# input_mask = []
# segment_ids = []
# labels = []
# f = open(input_file)
# tmp = split.(readlines(f), "\t")
# for i in 1:length(tmp)
# instance = eval.(Meta.parse.(tmp[i]))
# push!(input_ids, (instance[1] .+ 1)[1:seq_len])
# push!(input_mask, instance[2][1:seq_len])
# push!(segment_ids, (instance[3] .+ 1)[1:seq_len])
# push!(labels, (instance[4] + 1))
# end
# ninstances = length(input_ids)
# return ClassificationData2(input_ids, input_mask, segment_ids, labels, batchsize, ninstances, shuffled)
# end
# function length(d::ClassificationData2)
# d, r = divrem(d.ninstances, d.batchsize)
# return r == 0 ? d : d+1
# end
# function iterate(d::ClassificationData2, state=ifelse(d.shuffled, randperm(d.ninstances), 1:d.ninstances))
# state === nothing && return nothing
# if length(state) > d.batchsize
# new_state = state[d.batchsize+1:end]
# input_ids = hcat(d.input_ids[state[1:d.batchsize]]...)
# input_mask = hcat(d.input_mask[state[1:d.batchsize]]...)
# segment_ids = hcat(d.segment_ids[state[1:d.batchsize]]...)
# labels = hcat(d.labels[state[1:d.batchsize]]...)
# else
# new_state = nothing
# input_ids = hcat(d.input_ids[state]...)
# input_mask = hcat(d.input_mask[state]...)
# segment_ids = hcat(d.segment_ids[state]...)
# labels = hcat(d.labels[state]...)
# end
# return ((input_ids, input_mask, segment_ids, labels), new_state)
# end
# include("model.jl")
# Embedding Size, Vocab Size, Intermediate Hidden Size, Max Sequence Length, Sequence Length, Num of Segments, Num of Heads in Attention, Num of Encoders in Stack, Batch Size, Matrix Type, General Dropout Rate, Attention Dropout Rate, Activation Function
config = BertConfig(768, 30522, 3072, 512, 64, 2, 12, 12, 8, KnetArray{Float32}, 0.1, 0.1, "gelu")
if TRAIN
dtrn = ClassificationData("../project/mytrain.tsv", token2int, batchsize=config.batchsize, seq_len=config.seq_len)
ddev = ClassificationData("../project/dev.tsv", token2int, batchsize=config.batchsize, seq_len=config.seq_len)
else
dtst = ClassificationData("../project/mytest.tsv", token2int, batchsize=config.batchsize, seq_len=config.seq_len)
end
if TRAIN
model = BertClassification(config, NUM_CLASSES)
@pyimport torch
torch_model = torch.load("../project/pytorch_model.bin")
model = load_from_torch_base(model, config.num_encoder, config.atype, torch_model)
end
function accuracy2(model, dtst)
true_count = 0
all_count = 0
for (x, attention_mask, segment_ids, y) in dtst
probs = model(x, segment_ids, attention_mask=attention_mask)
preds = map(x -> x[1], argmax(Array{Float32}(probs),dims=1))
true_count += sum(y .== preds)
all_count += length(y)
end
return true_count/all_count
end
function initopt!(model, t_total; lr=0.001, warmup=0.1)
for par in params(model)
if length(size(value(par))) === 1
par.opt = BertAdam(lr=lr, warmup=warmup, t_total=t_total, w_decay_rate=0.01)
else
par.opt = BertAdam(lr=lr, warmup=warmup, t_total=t_total)
end
end
end
function mytrain!(model, dtrn, ddev, best_acc)
losses = []
accs = []
for (k, (x, attention_mask, segment_ids, labels)) in enumerate(dtrn)
J = @diff model(x, segment_ids, labels, attention_mask=attention_mask)
for par in params(model)
g = grad(J, par)
update!(value(par), g, par.opt)
end
push!(losses, value(J))
if k % 500 == 0
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Training loss up to $k iteration is : ", Knet.mean(losses))
flush(stdout)
acc = accuracy2(model, ddev)
push!(accs, acc)
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Accuracy at $k iteration : ", acc)
flush(stdout)
if acc > best_acc
best_acc = acc
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Saving...")
Knet.save("model_bert.jld2", "model", model)
flush(stdout)
end
end
end
return (best_acc, Knet.mean(losses), accs)
end
if TRAIN
t_total = length(dtrn) * NUM_OF_EPOCHS
initopt!(model, t_total, lr=LEARNING_RATE)
dev_accs = [0.0]
best_acc = 0.0
for epoch in 1:NUM_OF_EPOCHS
global best_acc
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Epoch : ", epoch)
flush(stdout)
(best_acc, lss, acc) = mytrain!(model, dtrn, ddev, best_acc)
append!(dev_accs, acc)
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Training loss for $epoch epoch is : $lss")
println(dev_accs)
flush(stdout)
#=
acc = accuracy2(model, ddev)
println("Accuracy : ", acc)
if acc > best_acc
best_acc = acc
println("Saving...")
Knet.save("model_bert.jld2", "model", model)
end
=#
end
Knet.save("accuracies.jld2", "dev_accs", dev_accs)
else
model = Knet.load("model_bert.jld2", "model")
result = accuracy2(model, dtst)
print(Dates.format(now(), "HH:MM:SS"), " -> ")
println("Test accuracy is : $result")
end
| BERT | https://github.com/OsmanMutlu/BERT.jl.git |