Commit 74e7fdbf authored by Florian Grosse's avatar Florian Grosse
Browse files

Deleted all files that are not part of the Krippendorff Archive

parent de802abf
name = "ConML"
uuid = "fc341c38-212c-4f39-b16b-6ce89dd7ead5"
authors = ["Florian Große <grosse2b@hotmail.de>"]
version = "0.8.3"
[deps]
JLSO = "9da8a3cd-07a3-59c0-a743-3fdc52c30d11"
KernelDensity = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b"
Krippendorff = "08378ef3-8d37-44eb-940a-2e19b2c00443"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
ScikitLearnBase = "6e75b9c4-186b-50bd-896f-2d2496a4843e"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
### ConML
This is an implementation of the constructivist machine learning paradigm. For more information, see:
> Schmid, Thomas. "Deconstructing the Final Frontier of Artificial Intelligence: Five Theses for a Constructivist Machine Learning." AAAI Spring Symposium: Combining Machine Learning with Knowledge Engineering. 2019.
## Next Steps
# Immediate
- update usage example
- Deconstruction
- ~~Write recursive DeleteDependentModels (for model disposal) EDIT: names purgefromKB~~
- ~~make sure that existing temporalClusterSearch and Reconstruction can be reused~~
- Write 4(3) specialized Deconstruction methods: TΣZ, ~~ΣZ~~, ~~TΣ~~
# Next
- Testing of Deconstruction and Interface again
# Long Term
- employ Lazy Iterators everywhere to reduce strain on RAM
- fix Deconstruction Krippendorff Class-Permutation Bug
- ~~save and load functionality for ParameterStruct~~
- ~~think about reasonable processes for Procedural Knowledge and add those~~ ==> out of project, gone to master thesis
- reproduce CFS and replace Replacement
- DocStrings
- move Krippendorff to own GitLab project, change dependencies to Distances.jl and clean up for registration
- thorough Testing and Profiling, optimize if necessary
- serialize KnowledgeDomain similar to python version
- TΣ (and TZ?) time matching tolerance parameter similar to that for full deconstruction
- Parameter to enable multiple matching ΣZ deconstructions at the same time, instead of one after another
- Lernblöcke haben im Log eine Relationship wenn sie aus vektoriellen Modellen bestehen, und ein Origin, wenn Sie aus der Dekonstruktion kommen.=> log anpassen
## Usage
...give some intro into the package
- for now, see usage in base folder Readme
#
# C O N S T R U C T I V I S T__ __
# _____ ____ ____ / |/ // /
# / ___// __ \ / __ \ / /|_/ // /
# / /__ / /_/ // / / // / / // /___
# \___/ \____//_/ /_//_/ /_//_____/
# M A C H I N E L E A R N I N G
#
#
# A Project by
# Thomas Schmid | UNIVERSITÄT LEIPZIG
# www.constructivist.ml
#
# Code Author: Florian Große
# Licence: GNU GPLv3
#
module ConML
import Base.sort
import Base.empty!
import KernelDensity
#import Interpolations
using Krippendorff
#using StaticArrays
using Random
using Statistics: mean
using StatsBase: counts
using ScikitLearnBase
using Requires
function __init__()
@require DataFrames="a93c6f00-e57d-5684-b7b6-d8193f3e46c0" begin
using .DataFrames: DataFrame, Not
function VMS(df::DataFrame; T = "T", Sigma = "Sigma", Z = "Z")
if !(eltype(df[!,T]) <: Union{Signed,Unsigned})
@warn "Type of T column should be <: Integer"
end
if !(eltype(df[!,Z]) <: AbstractString && eltype(df[!,Sigma]) <: AbstractString)
@warn "Type of Sigma and Z columns should be <: AbstractString"
end
data = df[!,Not([T, Sigma, Z])]
VMS(df[!,T], df[!,Sigma], df[!,Z], Matrix(data), names(data))
end
end
end
#Logger gets a separate module to keep namespace a little cleaner
include("LogCML.jl")
using .LogCML # TODO repeated: check if necessary things in the Logger are exported
include("Datastructures.jl")
include("Reconstruction.jl")
include("FeatureSelection.jl")
include("Construction.jl")
include("LearnBlockSearch.jl")
include("Deconstruction.jl")
include("JLSOsaveload.jl")
# things that will be made accessible via using ConML
export KnowledgeBase, ParametersConML, VMS, # Datastructures
featureNumber, sampleNumber, # helper functions
LearnerConML, FeatureSelector, Construct, Reconstruct # Pipeline
function generateNamesForEstimators(cs, estimatortype::Symbol, shortdefaultnames::Bool)
prefix = "Est"
if estimatortype === :construction
prefix = "Con"
elseif estimatortype === :reconstruction
prefix = "Rec"
end
postfixes = [ifelse(shortdefaultnames, filter(isuppercase, string(typeof(classif))), # short
string(typeof(classif))) for classif in cs] # long
return ["$(prefix)$(i)-$(postfix)" for (i,postfix) in enumerate(postfixes)]
end
"""
TODO
"""
struct LearnerConML # TODO make this pretty and a kwdef
knowledgeBase::KnowledgeBase
parameters::ParametersConML
construction::Construct
featureselection::Union{Nothing, FeatureSelector}
reconstruction::Reconstruct
#deconstruction::Deconstruct as method not object, since its defined by construct and reconstruct
skipLearnblockSearch::Bool
end
LearnerConML(kb, par, con, fs, rec; skipLearnblockSearch = false) = LearnerConML(kb, par, con, fs, rec, skipLearnblockSearch)
(cl::LearnerConML)(data::Any) = error("LearnerConML expects data in form of a VMS object. Run ?LearnerConML or ?VMS for more info.")
function (cl::LearnerConML)(data::VMS{D,I,S,F}) where {D,I,S,F}
logger = cl.parameters.Logger
learnblocks::Vector{VMS{D,I,S,F}} = preprocessLearnBlock(cl.parameters, cl.knowledgeBase, data, skipLearnblockSearch = cl.skipLearnblockSearch)
for lb in learnblocks
# init new data block in log
writelog(logger, InitDataBlockLog(), featureNumber(lb), sampleNumber(lb))
# initialize log to first level (of knowledgeBase, i.e. the input block)
writelog(logger, AdvanceLevelLog())
# make FS skippable if nothing was passed
fsmethod = isnothing(cl.featureselection) ? _skipfeatureselectiondummy : cl.featureselection
# run core methods for this learnblock + all higher levels in KB
return deconstruct!( cl.knowledgeBase, cl.parameters, cl.construction, cl.reconstruction,
cl.reconstruction( cl.knowledgeBase, cl.parameters,
fsmethod( cl.parameters,
cl.construction( cl.knowledgeBase, cl.parameters, lb
) ) ) )
end # for
end
end # module
#
# C O N S T R U C T I V I S T__ __
# _____ ____ ____ / |/ // /
# / ___// __ \ / __ \ / /|_/ // /
# / /__ / /_/ // / / // / / // /___
# \___/ \____//_/ /_//_/ /_//_____/
# M A C H I N E L E A R N I N G
#
#
# A Project by
# Thomas Schmid | UNIVERSITÄT LEIPZIG
# www.constructivist.ml
#
# Code Author: Florian Große
# Licence: GNU GPLv3
#
# this file is meant to be included by ConML.jl and
# may not behave as expected when used on it's own
#TODO mention rescale
"""
WIP! a callable object holding the necessary configurational values to perform construction
initialize once with the configuration you want and call onto learning blocks (or so it is intended)
""" # TODO docstring, mention shortdefaultnames
struct Construct{T}
classifiers::T
names::Vector{String}
rescale
function Construct(cs, names::AbstractVector{<:AbstractString}; rescale = unitrescale)
if length(cs)<=1
throw(AssertionError(string("Two or more classifiers needed for Construction, got: ", length(cs))))
end
if length(cs) != length(names)
throw(AssertionError("Number of classifiers and assigned names for Construct must match!"))
end
ct = Tuple(collect(cs)) # tuple for types
namesvector = [string(name) for name in names] #enforce type Vector{String}
new{typeof(ct)}(ct, namesvector, rescale)
end
end
Construct(cs; shortdefaultnames::Bool = false, kwargs...) = Construct(cs, generateNamesForEstimators(cs, :construction, shortdefaultnames); kwargs...)
Construct(pairsStringEst::AbstractVector{Pair{String,E}}; kwargs...) where {E} = Construct(last.(pairsStringEst), first.(pairsStringEst); kwargs...)
Construct(pairsEstString::AbstractVector{Pair{E,String}}; kwargs...) where {E} = Construct(first.(pairsEstString), last.(pairsEstString); kwargs...)
Construct(dictStringEst::Dict{String,E}; kwargs...) where {E} = Construct([(key => value) for (key,value) in dictStringEst]; kwargs...)
Construct(dictEstString::Dict{E,String}; kwargs...) where {E} = Construct([(key => value) for (key,value) in dictEstString]; kwargs...)
"""
myConstructor = Construct(iter_of_classifiers)
returned_model_candidates = myConstructor(learnblock)
Execute a construction process with the specified configuration in myConstructor.
Classifiers must implement the common ScikitLearn Interface (at least fit! and transform).
Returns a Tuple(learnblock, Array_of_model_candidates).
"""
function (c::Construct)(kb, par::ParametersConML, learnblock::VMS)
if par.verbose println("called Construction with ", length(c.classifiers), " classifiers for a learnblock.") end
# get data field from VMS, train classifiers
block = learnblock.vals
modelcandidates = [fit!(clone(classifier), block) for classifier in c.classifiers]
modelnames, targetsvec = _construction_loop(par.learningDomain, block, modelcandidates,
c.names, length(learnblock.T),
par.verbose, par.MinCategorySize, c.rescale)
# log
writelog(par.Logger, ConstructionLog(), featureNumber(block), sampleNumber(block), c.names)
kb.workdone[:construction] += length(modelnames)
# shortcut to abort if nothing passed, else wrap everything for reconstruction and return
if isempty(modelnames)
# if waste is enabled, record non-constructible block
if par.enablewaste
push!(kb.waste, nothingtuple[2])
end
return nothing
end
return (learnblock, targetsvec, modelnames)
end
# Loop for conceptual knowledge
function _construction_loop(::Conceptual, block, modelcandidates,
candidatenames, learnblocksize,
verbose::Bool, MinCategorySize, _)
threshold = floor(Int, MinCategorySize isa Int ?
MinCategorySize :
MinCategorySize*learnblocksize)
modelnames::Vector{String} = Vector{String}()
targetsvectors::Vector{Vector{Int}} = Vector{Vector{Int}}()
for (candid, candidatename) in zip(modelcandidates, candidatenames)
# get predictions
rawpredictions = transform(candid, block)
# unify format of prediction, collapse a one-hot encoding to a vector,
# leave a vector as is
levels = size(rawpredictions, 2)
classesvector = [length(r) == 1 ? t[1] : argmax(r) for r in eachrow(rawpredictions)]
classsizes = counts(classesvector, levels)
keep::Bool = all( >(threshold) , classsizes)
if verbose
println("Candidate: $(candidatename)")
show(stdout, "text/plain", classsizes)
println()
if !keep println("Dropped due to not fulfilling minimal category size of ", threshold) end
end
if keep
push!(modelnames, candidatename)
push!(targetsvectors, [argmax(t) for t in eachrow(rawpredictions)])
end
end
return (modelnames, targetsvectors)
end
# Loop for procedural knowledge
function _construction_loop(::Procedural, block, modelcandidates,
candidatenames, learnblocksize,
verbose::Bool, _, rescale )
modelnames::Vector{String} = Vector{String}()
targetsvectors::Vector{Vector{Float64}} = Vector{Vector{Float64}}()
for (candid, candidatename) in zip(modelcandidates, candidatenames)
# get predictions
rawpredictions = transform(candid, block)
# MinCategorySize can't be checked here obviously.
# Instead, models may choose (possibly via wrapper)
# to return nothing if their prediction should not be used
keep::Bool = !isnothing(rawpredictions)
if keep
if size(rawpredictions, 2) == 1
push!(modelnames, candidatename)
push!(targetsvectors, rawpredictions)
else
# Split models here, 1 for each target column
for (i, pred) in enumerate(eachcol(rawpredictions))
newname = string(candidatename,"_Dim",i)
push!(modelnames, newname)
# apply selected rescaling if any
push!(targetsvectors, rescale(pred))
end
end
else
if verbose println("Model discarded because construction quality control failed: $(candidatename)") end
end
end
return (modelnames, targetsvectors)
end
"""
constructionlikeformatter(Learnblock, Targets)
Take an existing learnblock and existing target values and return an output suitable for Reconstruction like a full Construction would.
""" # TODO update docstring
function constructionlikeformatter(learnblock::VMS, targetsvectors, Z)
conname = last(split(Z, ".")) # extract construction classifier name (or abbreviation) from Z string because Reconstruction reformats it again
# wrap everything in arrays because reconstruction expects it
return ([learnblock,], [targetsvectors,], [conname,])
end
"""
unitrescale(input; offset = 0.001)
Default rescaling function. This will attempt to linearly map all values to the range [0,1].
If offset is not 0, the destination range will be symmetrically narrowed to [+offset, 1-offset]
in order to prevent extreme values (particularly exact 0s).
"""
function unitrescale(input; offset = 0.001)
min, max = extrema(input)
min == max && return input # degenerate case, would create a lot of infs otherwise
reductionfactor = (1 - 2*offset) / (max - min)
return ((input .- min) .* reductionfactor) .+ offset
end
This diff is collapsed.
This diff is collapsed.
#
# C O N S T R U C T I V I S T__ __
# _____ ____ ____ / |/ // /
# / ___// __ \ / __ \ / /|_/ // /
# / /__ / /_/ // / / // / / // /___
# \___/ \____//_/ /_//_/ /_//_____/
# M A C H I N E L E A R N I N G
#
#
# A Project by
# Thomas Schmid | UNIVERSITÄT LEIPZIG
# www.constructivist.ml
#
# Code Author: Florian Große
# Licence: GNU GPLv3
#
# this file is meant to be included by ConML.jl and
# may not behave as expected when used on it's own
"""
FeatureSelector(embeddedMethod, filterMethod)
A callable object holding the necessary configurational values to perform feature selection,
namely a filter method and an embedded method. Both methods should be functions which will
receive a data block, a target vector and a kwargs dictionary that is guaranteed to contain
at least the the `MaxFeatures` parameter and the `testSetPercentage` for train/test splits.
Remember that the decision which method to apply depends on the ParameterStruct in the Learner.
""" # TODO docstring revise, correct signature, make # Implementation section
struct FeatureSelector{E<:Function, F<:Function, G<:Union{Nothing,Function}}
embeddedMethod::E
embeddedMethodName::String
filterMethod::F
filterMethodName::String
finalPass::G
finalPassName::String
kwargs::Dict{Symbol,Any}
function FeatureSelector(emb::Pair{String,<:Function}, filt::Pair{String,<:Function}, final::Union{Nothing,Pair{String,<:Function}}; kwargs...)
embeddedMethodName, embeddedMethod = emb
E = typeof(embeddedMethod)
filterMethodName, filterMethod = filt
F = typeof(filterMethod)
finalPassName, finalPass = isnothing(final) ? ("",nothing) : final
G = typeof(finalPass)
return new{E,F,G}(embeddedMethod, embeddedMethodName, filterMethod, filterMethodName, finalPass, finalPassName, kwargs)
end
end
FeatureSelector(emb::Function, filt::Function; kwargs...) = FeatureSelector(Pair("embedded",emb), Pair("filter",filt); kwargs...) #Default Names
FeatureSelector(emb::Function, filt::Function, final::Function; kwargs...) = FeatureSelector(Pair("embedded",emb), Pair("filter",filt), Pair("final",final); kwargs...) #Default Names
FeatureSelector(embl, embr, filtl, filtr, finall = nothing, finalr = nothing; kwargs...) = FeatureSelector(Pair(embl, embr), Pair(filtl, filtr), isnothing(finall) ? nothing : Pair(finall, finalr); kwargs...)
FeatureSelector(emb::Pair{<:Function,String}, filt::Pair{<:Function,String}, final::Union{Nothing,Pair{<:Function,String}}; kwargs...) = FeatureSelector(reverse(emb), reverse(filt), reverse(final); kwargs...)
function FeatureSelector(; embeddedMethod::Function, embeddedMethodName::String, filterMethod::Function, filterMethodName::String, finalPass::Union{Nothing, Function} = nothing, finalPassName::String = "", kwargs...)
FeatureSelector(Pair(embeddedMethodName,embeddedMethod), Pair(filterMethodName,filterMethod), isnothing(finalPass) ? nothing : Pair(finalPassName,finalPass); kwargs...)
end
#default values for feature reduction moved to ConMLDefaults package
"""
_skipfeatureselectiondummy(...)
Allows to skip feature selection, does pretty much nothing.
Will inform about skipped feature selection if ´verbose == true´ in the parameters struct.
"""
function _skipfeatureselectiondummy(par::ParametersConML, tup)
if par.verbose println("Feature selection was skipped.") end
return tup
end
# makes copying properties from a ParametersConML to a FeatureSelector easier
macro push_to_kwargs(par, kwargs, property)
esc(:( if haskey($kwargs, $property)
if $par.verbose && getproperty($par, $property) != $kwargs[$property] println("ConMLParameter `",$(property),"` overwritten by explicit kwarg in FeatureSelector.") end
else
$kwargs[$property] = getproperty($par, $property)
end))
end
"""
(f::FeatureSelector)(KnowledgeBase, ParametersConML, tuple(LearnBlock, Vector(targets_vector), names) )
(f::FeatureSelector)(KnowledgeBase, ParametersConML, LearnBlock, Vector(targets_vector), names)
Apply the feature selection methods stored in FeatureSelector to a data tuple:
Tuple(lb::VMS, candidates::AbstractVector{<:AbstractVector{<:Number}}, names::AbstractVector{<:AbstractString})
Each element of `candidates` should be a Vector of target values matching the length of `learnblock`
and is associated with the corresponding entry in `names`.
""" #TODO docstring revise
(f::FeatureSelector)(par::ParametersConML, tup) = f(par, tup...) # unpacking of tuple type if necessary
function (f::FeatureSelector)(par::ParametersConML, learnblock::VMS, targets, names)
# push predictionErrorScore, MaxFeatures, testSetPercentage and the current learning domain into kwargs dict
# just in case anyone overwrites these explicitly, issue a warning message if verbose
#@push_to_kwargs par f.kwargs :predictionErrorScore
@push_to_kwargs par f.kwargs :MaxFeatures
@push_to_kwargs par f.kwargs :testSetPercentage
@push_to_kwargs par f.kwargs :learningDomain
modeldata = Vector{typeof(learnblock)}(undef, length(targets)) # will be filled with processed data soon
_apply_fs!(f, modeldata, targets, names, par, learnblock)
return (modeldata, targets, names)
end
function _apply_fs!(f::FeatureSelector, modeldata, targets, names, par::ParametersConML, learnblock::VMS)
highSampleComplexity = sampleNumber(learnblock) > par.maxFilterSamples
initialfeaturenames = learnblock.fields
lbdata = learnblock.vals
# allow deletion of blocks if feature selection fails
deletemask = falses(length(targets))
for (target, name, i) in zip(targets, names, 1:length(targets))
if par.verbose println("Trying feature selection for candidate model: ", name) end
selecteddata, selectednames = _apply_fs_loop(f, target, initialfeaturenames, lbdata, highSampleComplexity,
par.verbose, par.MaxFeatures, par.maxFilterFeatures, par.MaxModelReduction)
writelog(par.Logger, FeatureSelectionLog(), name, length(selectednames), sampleNumber(learnblock))
if isempty(selecteddata) || isempty(selectednames)
if par.verbose println("Feature selection failed for model: ", name) end
deletemask[i] = true
else
modeldata[i] = VMS(learnblock.T, learnblock.Σ, learnblock.Z, selecteddata, selectednames)
end
end
if any(deletemask)
# delete failed blocks
deleteat!(targets, deletemask)
deleteat!(names, deletemask)
deleteat!(modeldata, deletemask)
end
end
function _apply_fs_loop(f, targetvalues, initialfeaturenames, lbdata::D, highSampleComplexity::Bool,
verbose::Bool, MaxFeatures::Int, maxFilterFeatures::Int, MaxModelReduction::Bool) where {D}
currentFeatureNumber::Int = length(initialfeaturenames)
previousFeatureNumber::Int = currentFeatureNumber+1 # to trigger loop condition initially
processedData::D = lbdata
processedfeaturenames::Vector{String} = initialfeaturenames
while (currentFeatureNumber > MaxFeatures || MaxModelReduction ) && (2 < currentFeatureNumber < previousFeatureNumber)
# update previousFeatureNumber
newfeatureindices::Vector{Int} = Int[]
previousFeatureNumber = currentFeatureNumber
# apply feature reduction
if highSampleComplexity || currentFeatureNumber > maxFilterFeatures
# use filter method to get indices
if verbose println("\tapplying selected feature reduction filter method: ", f.filterMethodName) end
newfeatureindices = f.filterMethod(processedData, targetvalues, f.kwargs)
else
# use embedded method
if verbose println("\tapplying selected feature reduction embedded method: ", f.embeddedMethodName) end
newfeatureindices = f.embeddedMethod(processedData, targetvalues, f.kwargs)
end
# subset data and fields
processedData = processedData[:, newfeatureindices]
processedfeaturenames = processedfeaturenames[newfeatureindices]
# update currentFeatureNumber
currentFeatureNumber = length(newfeatureindices)
if verbose println("\treduced number of features from ", previousFeatureNumber, " to ", currentFeatureNumber) end
end
# if everything failed to reduce features below MaxFeatures, attempt a final pass
if !isnothing(f.finalPass) && (currentFeatureNumber > MaxFeatures)
if verbose println("\tapplying final pass method: ", f.finalPassName) end
newfeatureindices = f.finalPass(processedData, targetvalues, f.kwargs)
# subset data and fields
processedData = processedData[:, newfeatureindices]
processedfeaturenames = processedfeaturenames[newfeatureindices]
# update currentFeatureNumber
currentFeatureNumber = length(newfeatureindices)
if verbose println("\treduced number of features from ", previousFeatureNumber, " to ", currentFeatureNumber) end
end
return (processedData, processedfeaturenames)
end
# shortcut if no classifier passed construction
function (f::FeatureSelector)(::ParametersConML, nothingtuple::Tuple{Nothing, VMS} )
# TODO is there anything loggable here?
return nothingtuple
end
# end of feature selection
#
# C O N S T R U C T I V I S T__ __
# _____ ____ ____ / |/ // /
# / ___// __ \ / __ \ / /|_/ // /
# / /__ / /_/ // / / // / / // /___
# \___/ \____//_/ /_//_/ /_//_____/
# M A C H I N E L E A R N I N G
#
#
# A Project by
# Thomas Schmid | UNIVERSITÄT LEIPZIG
# www.constructivist.ml
#
# Code Author: Florian Große
# Licence: GNU GPLv3
#
# this file is meant to be included by ConML.jl and
# may not behave as expected when used on it's own
#
using JLSO
function _checkclearfile(path)
open(path, read = true, create = true) do file # this touches the file if it does not exist
for line in readlines(file)
isempty(line) || return false
end
return true
end
end
function savekb(path, kb; overwrite = false)
fileclear = _checkclearfile(path)
if !fileclear
if overwrite
@info "Overwriting existing file $(basename(path))."
else
@warn "File $(basename(path)) $(isempty(dirname(path)) ? "" : string("at location ",dirname(path),Sys.iswindows() ? "\\" : "/", " "))does already exist.
If you really want to overwrite an existing file, set the keyword argument `overwrite = true`."
return nothing
end
end
# preprare bits of the knowledge domain
# VMSs at level 0
level0 = [] # gets narrowed later
for (ID,vms) in kb.level_0
push!(level0, (id=ID, t=vms.T, sigma=vms.Σ, z=vms.Z, fieldnames=vms.fields, content=vms.vals))
end
# all other levels that contain models
usedmodels = Dict{Symbol,String}()
higherlevels = []
for currentlevel in 1:length(kb.higherLevels)
kblvl = kb.higherLevels[currentlevel]
modelvec = []
for pragmaticmodel in values(kblvl)
t::NTuple{2, Int} = pragmaticmodel.T
sigma::String = pragmaticmodel.Σ
z::String = pragmaticmodel.Z
# put used machine model in dictionary if not already there
if !haskey(usedmodels, Symbol(sigma))
usedmodels[Symbol(sigma)] = string(typeof(pragmaticmodel.model))
end
# sanity check
if currentlevel != pragmaticmodel.level
@warn "Found a pragmatic model whose level field does not match the level of the KnowledgeBase that it resides in. Something may be wrong with your KnowledgeBase."
end
# extract everything else and save the NamedTuple
targettimestamps, targets = modelpredictions(pragmaticmodel, kb, t)
push!(modelvec, (id=pragmaticmodel.ID, t=t, sigma=sigma, z=z, level=pragmaticmodel.level,
source=pragmaticmodel.source, targets=targets, targettimestamps=targettimestamps))
# model can't be saved and is dropped
end
if isempty(modelvec)
break # there can be no more filled levels above an empty level, so stop here
else
push!(higherlevels, modelvec)
end