diff --git a/Project.toml b/Project.toml index 7bab33881..32f813230 100644 --- a/Project.toml +++ b/Project.toml @@ -1,5 +1,6 @@ [deps] CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" ImageDraw = "4381153b-2b60-58ae-a1ba-fd683676385f" ImageIO = "82e4d734-157c-48bb-816b-45c225c6df19" diff --git a/examples/main.jl b/examples/main.jl index 4388944f7..d21a9b9cb 100755 --- a/examples/main.jl +++ b/examples/main.jl @@ -39,7 +39,7 @@ function main(; smartChooseFeats::Bool=false, alt::Bool=false) # negTrainingPath = joinpath(altImagePath, "neg") end - numClassifiers = 5 + numClassifiers = 4 if ! smartChooseFeats # For performance reasons restricting feature size @@ -81,6 +81,9 @@ function main(; smartChooseFeats::Bool=false, alt::Bool=false) FaceDetection.notifyUser("Testing selected classifiers...") correctFaces = 0 correctNonFaces = 0 + + # correctFaces = sum([FaceDetection._get_feature_vote(face, classifiers) for face in facesIITesting]) + # correctNonFaces = length(non_faces_testing) - sum([FaceDetection._get_feature_vote(nonFace, classifiers) for nonFace in nonFacesIITesting]) correctFaces = sum(FaceDetection.ensembleVoteAll(facesIITesting, classifiers)) correctNonFaces = length(nonFacesTesting) - sum(FaceDetection.ensembleVoteAll(nonFacesIITesting, classifiers)) correctFacesPercent = (float(correctFaces) / length(facesTesting)) * 100 diff --git a/examples/scores.jl b/examples/scores.jl index 0c8d47d76..6d6361bbc 100755 --- a/examples/scores.jl +++ b/examples/scores.jl @@ -4,111 +4,141 @@ "${BASH_SOURCE[0]}" "$@" =# - println("\033[1;34m===>\033[0;38m\033[1;38m\tLoading required libraries (it will take a moment to precompile if it is your first time doing this)...\033[0;38m") +println("\033[1;34m===>\033[0;38m\033[1;38m\tLoading required libraries (it will take a moment to precompile if it is your first time doing this)...\033[0;38m") + +include(joinpath(dirname(dirname(@__FILE__)), "src", "FaceDetection.jl")) + +using .FaceDetection +using Images: imresize +using StatsPlots#, Plots # StatsPlots required for box plots +using CSV: write +using DataFrames: DataFrame + + +function main(smartChooseFeats::Bool=false, alt::Bool=false) + mainPath = dirname(dirname(@__FILE__)) + mainImagePath = joinpath(mainPath, "data", "main") + altImagePath = joinpath(mainPath, "data", "alt") + + if alt + posTrainingPath = joinpath(altImagePath, "pos") + negTrainingPath = joinpath(altImagePath, "neg") + posTestingPath = joinpath(altImagePath, "testing", "pos") + negTestingPath = joinpath(homedir(), "Desktop", "Assorted Personal Documents", "Wallpapers copy") + else + posTrainingPath = joinpath(mainImagePath, "trainset", "faces") + negTrainingPath = joinpath(mainImagePath, "trainset", "non-faces") + posTestingPath = joinpath(mainImagePath, "testset", "faces")#joinpath(homedir(), "Desktop", "faces")#"$mainImagePath/testset/faces/" + negTestingPath = joinpath(mainImagePath, "testset", "non-faces") + end - include(joinpath(dirname(dirname(@__FILE__)), "src", "FaceDetection.jl")) + numClassifiers = 4 - using .FaceDetection - using Images: imresize - using StatsPlots#, Plots # StatsPlots required for box plots - + if ! smartChooseFeats + # For performance reasons restricting feature size + minFeatureHeight = 8 + maxFeatureHeight = 10 + minFeatureWidth = 8 + maxFeatureWidth = 10 + end + + + FaceDetection.notifyUser("Loading faces...") + + facesTraining = FaceDetection.loadImages(posTrainingPath) + facesIITraining = map(FaceDetection.toIntegralImage, facesTraining) # list(map(...)) + println("...done. ", length(facesTraining), " faces loaded.") + + FaceDetection.notifyUser("Loading non-faces...") + + nonFacesTraining = FaceDetection.loadImages(negTrainingPath) + nonFacesIITraining = map(FaceDetection.toIntegralImage, nonFacesTraining) # list(map(...)) + println("...done. ", length(nonFacesTraining), " non-faces loaded.\n") - function main(smartChooseFeats::Bool=false, alt::Bool=false) - mainPath = dirname(dirname(@__FILE__)) - mainImagePath = joinpath(mainPath, "data", "main") - altImagePath = joinpath(mainPath, "data", "alt") - - if alt - posTrainingPath = joinpath(altImagePath, "pos") - negTrainingPath = joinpath(altImagePath, "neg") - posTestingPath = joinpath(altImagePath, "testing", "pos") - negTestingPath = joinpath(homedir(), "Desktop", "Assorted Personal Documents", "Wallpapers copy") - else - posTrainingPath = joinpath(mainImagePath, "trainset", "faces") - negTrainingPath = joinpath(mainImagePath, "trainset", "non-faces") - posTestingPath = joinpath(mainImagePath, "testset", "faces")#joinpath(homedir(), "Desktop", "faces")#"$mainImagePath/testset/faces/" - negTestingPath = joinpath(mainImagePath, "testset", "non-faces") - end - - numClassifiers = 10 - - if ! smartChooseFeats - # For performance reasons restricting feature size - minFeatureHeight = 8 - maxFeatureHeight = 10 - minFeatureWidth = 8 - maxFeatureWidth = 10 - end - - - FaceDetection.notifyUser("Loading faces...") - - facesTraining = FaceDetection.loadImages(posTrainingPath) - facesIITraining = map(FaceDetection.toIntegralImage, facesTraining) # list(map(...)) - println("...done. ", length(facesTraining), " faces loaded.") - - FaceDetection.notifyUser("Loading non-faces...") - - nonFacesTraining = FaceDetection.loadImages(negTrainingPath) - nonFacesIITraining = map(FaceDetection.toIntegralImage, nonFacesTraining) # list(map(...)) - println("...done. ", length(nonFacesTraining), " non-faces loaded.\n") - - # classifiers are haar like features - classifiers = FaceDetection.learn(facesIITraining, nonFacesIITraining, numClassifiers, minFeatureHeight, maxFeatureHeight, minFeatureWidth, maxFeatureWidth) - - FaceDetection.notifyUser("Loading test faces...") - - facesTesting = FaceDetection.loadImages(posTestingPath) - # facesIITesting = map(FaceDetection.toIntegralImage, facesTesting) - facesIITesting = map(i -> imresize(i, (19,19)), map(FaceDetection.toIntegralImage, facesTesting)) - println("...done. ", length(facesTesting), " faces loaded.") - - FaceDetection.notifyUser("Loading test non-faces..") - - nonFacesTesting = FaceDetection.loadImages(negTestingPath) - nonFacesIITesting = map(i -> imresize(i, (19,19)), map(FaceDetection.toIntegralImage, nonFacesTesting)) - println("...done. ", length(nonFacesTesting), " non-faces loaded.\n") - - FaceDetection.notifyUser("Testing selected classifiers...") - correctFaces = 0 - correctNonFaces = 0 - correctFaces = sum(FaceDetection.ensembleVoteAll(facesIITesting, classifiers)) - correctNonFaces = length(nonFacesTesting) - sum(FaceDetection.ensembleVoteAll(nonFacesIITesting, classifiers)) - correctFacesPercent = (float(correctFaces) / length(facesTesting)) * 100 - correctNonFacesPercent = (float(correctNonFaces) / length(nonFacesTesting)) * 100 - - println("...done.\n") - - notifyUser("Calculating test face scores and constructing dataset...") - - df = Matrix{Float64}(undef, max(length(facesIITesting), length(nonFacesIITesting)), 2) - df[1:length(nonFacesIITesting),2] .= [sum([getFacelikeness(c,nonFace) for c in classifiers]) for nonFace in nonFacesIITesting] - df[1:length(facesIITesting),1] .= [sum([getFacelikeness(c,face) for c in classifiers]) for face in facesIITesting] - - # displaymatrix(df) - - println("...done.\n") - - notifyUser("Constructing box plot with said dataset...") - - theme(:solarized) - plot = boxplot(["" ""], - df, - title = ["Scores of Faces" "Scores of Non-Faces"], - label = ["faces" "non-faces"], + # classifiers are haar like features + classifiers = FaceDetection.learn(facesIITraining, nonFacesIITraining, numClassifiers, minFeatureHeight, maxFeatureHeight, minFeatureWidth, maxFeatureWidth) + + FaceDetection.notifyUser("Loading test faces...") + + facesTesting = FaceDetection.loadImages(posTestingPath) + # facesIITesting = map(FaceDetection.toIntegralImage, facesTesting) + facesIITesting = map(i -> imresize(i, (19,19)), map(FaceDetection.toIntegralImage, facesTesting)) + println("...done. ", length(facesTesting), " faces loaded.") + + FaceDetection.notifyUser("Loading test non-faces..") + + nonFacesTesting = FaceDetection.loadImages(negTestingPath) + nonFacesIITesting = map(i -> imresize(i, (19,19)), map(FaceDetection.toIntegralImage, nonFacesTesting)) + println("...done. ", length(nonFacesTesting), " non-faces loaded.\n") + + FaceDetection.notifyUser("Testing selected classifiers...") + correctFaces = 0 + correctNonFaces = 0 + correctFaces = sum(FaceDetection.ensembleVoteAll(facesIITesting, classifiers)) + correctNonFaces = length(nonFacesTesting) - sum(FaceDetection.ensembleVoteAll(nonFacesIITesting, classifiers)) + correctFacesPercent = (float(correctFaces) / length(facesTesting)) * 100 + correctNonFacesPercent = (float(correctNonFaces) / length(nonFacesTesting)) * 100 + + println("...done.\n") + + notifyUser("Calculating test face scores and constructing dataset...") + + dfFaces = Matrix{Union{Float64, Missing}}(undef, length(facesIITesting), 1) + dfNonFaces = Matrix{Union{Float64, Missing}}(undef, length(nonFacesIITesting), 1) + dfFaces[1:length(facesIITesting)] .= [sum([FaceDetection.getFaceness(c,face) for c in classifiers]) for face in facesIITesting] + dfNonFaces[1:length(nonFacesIITesting)] .= [sum([FaceDetection.getFaceness(c,nonFace) for c in classifiers]) for nonFace in nonFacesIITesting] + + + # displaymatrix(dfFaces) + # displaymatrix(dfNonFaces) + + println("...done.\n") + + notifyUser("Constructing box plot with said dataset...") + + theme(:solarized) + plot = boxplot(["" ""],# titles? + dfFaces, dfNonFaces, + title = ["Scores of Faces" "Scores of Non-Faces"], + label = ["faces" "non-faces"], + fontfamily = font("Times"), + layout = @layout([a b]), + # fillcolor = [:blue, :orange], + link = :y, + framestyle = [:origin :origin] + ) + + plot( + boxplot(dfFaces, + title = "Scores of Faces", + label = "faces", fontfamily = font("Times"), - layout = @layout([a b]), # fillcolor = [:blue, :orange], link = :y, - framestyle = [:box :origin] # :origin + framestyle = [:origin :origin] ) - - savefig(plot, joinpath(dirname(dirname(@__FILE__)), "figs", "scores.pdf")) - - println("...done. Plot created at ", joinpath(dirname(dirname(@__FILE__)), "figs", "scores.pdf"), "\n") - + boxplot(dfNonFaces, + title = "Scores of Non-Faces", + label = non-faces", + fontfamily = font("Times"), + framestyle = [:origin :origin] + ) + ) + + if length(dfFaces) < length(dfNonFaces) # filling in the dataset + dfFaces = vcat(dfFaces, Matrix{Union{Float64, Missing}}(undef, length(nonFacesIITesting) - length(facesIITesting), 1)) + elseif length(dfFaces) > length(dfNonFaces) + dfNonFaces = vcat(dfNonFaces, Matrix{Union{Float64, Missing}}(undef, length(facesIITesting) - length(nonFacesIITesting), 1)) end + + write(joinpath(homedir(), "Desktop", "facelikeness-data.csv"), DataFrame(hcat(dfFaces, dfNonFaces)), writeheader=false) + + savefig(plot, joinpath(dirname(dirname(@__FILE__)), "figs", "scores.pdf")) + + println("...done. Plot created at ", joinpath(dirname(dirname(@__FILE__)), "figs", "scores.pdf"), "\n") + +end - @time main(false, false) +@time main(false, false) diff --git a/examples/validation.jl b/examples/validation.jl index eefbc90c0..6efb8bb67 100755 --- a/examples/validation.jl +++ b/examples/validation.jl @@ -35,7 +35,7 @@ function main(; smartChooseFeats::Bool=false, alt::Bool=false, imageReconstructi negTestingPath = joinpath(mainImagePath, "testset", "non-faces") end - numClassifiers = 10 + numClassifiers = 4 if ! smartChooseFeats # For performance reasons restricting feature size diff --git a/figs/scores.pdf b/figs/scores.pdf index a14c740fc..8e05c9e98 100644 Binary files a/figs/scores.pdf and b/figs/scores.pdf differ diff --git a/src/AdaBoost.jl b/src/AdaBoost.jl index 22cdfa7c8..5192c5ffa 100755 --- a/src/AdaBoost.jl +++ b/src/AdaBoost.jl @@ -18,7 +18,7 @@ using ProgressMeter: @showprogress using .HaarLikeFeature: FeatureTypes, HaarLikeObject, getVote using .Utils: notifyUser -export learn, _get_feature_vote, _create_features +export learn, _create_features function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassifiers::Int64=-1, minFeatureWidth::Int64=1, maxFeatureWidth::Int64=-1, minFeatureHeight::Int64=1, maxFeatureHeight::Int64=-1)#::Array{HaarLikeObject,1} @@ -77,6 +77,7 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi features = _create_features(imgHeight, imgWidth, minFeatureWidth, maxFeatureWidth, minFeatureHeight, maxFeatureHeight) numFeatures = length(features) featureIndices = Array(1:numFeatures) + used = [] if isequal(numClassifiers, -1) numClassifiers = numFeatures @@ -90,7 +91,7 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi n = numImgs processes = numImgs # i.e., hypotheses @showprogress for t in 1:processes # bar(range(num_imgs)): - votes[t, :] = Array(map(f -> _get_feature_vote(f, images[t]), features)) + votes[t, :] = Array(map(f -> getVote(f, images[t]), features)) end # end show progress in for loop print("\n") # for a new line after the progress bar @@ -142,9 +143,44 @@ function learn(positiveIIs::AbstractArray, negativeIIs::AbstractArray, numClassi end -function _get_feature_vote(feature::HaarLikeObject, image::AbstractArray) - return HaarLikeFeature.getVote(feature, image) -end +#find / update threshold and coeff for each feature +# function _feature_job(feature_nr, feature) +# # if (feature_nr+1) % 1000 == 0: +# # print('[ %d of %d ]'%(feature_nr+1, n_features)) +# if feature_nr ∈ used +# return +# end +# +# # find the scores for the images +# scores = zeros(n_img) +# for i, img in enumerate(images): +# scores[i] = feature.get_score(img) +# sorted_img_args = np.argsort(scores) +# Sp = np.zeros(n_img) # sum weights for positive examples below current img +# Sn = np.zeros(n_img) # sum weights for negative examples below current img +# Tp = 0 +# Tn = 0 +# for img_arg in np.nditer(sorted_img_args): +# if labels[img_arg] == 0: +# Tn += w[img_arg] +# Sn[img_arg] = Tn +# else: +# Tp += w[img_arg] +# Sp[img_arg] = Tp +# +# # compute the formula for the threshold +# nerror = Sp + (Tn - Sn) # error of classifying everything negative below threshold +# perror = Sn + (Tp - Sp) # error of classifying everything positive below threshold +# error = np.minimum(perror, nerror) # find minimum +# best_threshold_img = np.argmin(error) # find the image with the threshold +# best_local_error = error[best_threshold_img] +# feature.threshold = scores[best_threshold_img] # use the score we estimated for the image as new threshold +# # assign new polarity, based on above calculations +# feature.polarity = 1 if nerror[best_threshold_img] < perror[best_threshold_img] else -1 +# +# # store the error to find best feature +# errors[feature_nr] = best_local_error +# end function _create_features(imgHeight::Int64, imgWidth::Int64, minFeatureWidth::Int64, maxFeatureWidth::Int64, minFeatureHeight::Int64, maxFeatureHeight::Int64) @@ -179,6 +215,7 @@ function _create_features(imgHeight::Int64, imgWidth::Int64, minFeatureWidth::In for y in 1:(imgHeight - featureHeight) features = push!(features, HaarLikeFeature.HaarLikeObject(feature, (x, y), featureWidth, featureHeight, 0, 1)) features = push!(features, HaarLikeFeature.HaarLikeObject(feature, (x, y), featureWidth, featureHeight, 0, -1)) + # features = push!(features, HaarLikeFeature.HaarLikeObject(feature, (x, y), featureWidth, featureHeight, 0.11, -1)) end # end for y end # end for x end # end for feature height diff --git a/src/FaceDetection.jl b/src/FaceDetection.jl index 32afef133..6a3477adf 100755 --- a/src/FaceDetection.jl +++ b/src/FaceDetection.jl @@ -6,7 +6,7 @@ module FaceDetection -export toIntegralImage, sumRegion, FeatureTypes, HaarLikeObject, getScore, getVote, learn, _get_feature_vote, _create_features, displaymatrix, notifyUser, loadImages, getImageMatrix, ensembleVote, ensembleVoteAll, reconstruct, getRandomImage, generateValidationImage, getFacelikeness +export toIntegralImage, sumRegion, FeatureTypes, HaarLikeObject, getScore, getVote, learn, _create_features, displaymatrix, notifyUser, loadImages, getImageMatrix, ensembleVote, ensembleVoteAll, reconstruct, getRandomImage, generateValidationImage, getFaceness include("IntegralImage.jl") include("HaarLikeFeature.jl") @@ -14,8 +14,8 @@ include("AdaBoost.jl") include("Utils.jl") using .IntegralImage: toIntegralImage, sumRegion -using .HaarLikeFeature: FeatureTypes, HaarLikeObject, getScore, getVote, getFacelikeness -using .AdaBoost: learn, _get_feature_vote, _create_features -using .Utils: displaymatrix, notifyUser, loadImages, getImageMatrix, ensembleVote, ensembleVoteAll, reconstruct, getRandomImage, generateValidationImage +using .HaarLikeFeature: FeatureTypes, HaarLikeObject, getScore, getVote +using .AdaBoost: learn, _create_features +using .Utils: displaymatrix, notifyUser, loadImages, getImageMatrix, ensembleVote, ensembleVoteAll, getFaceness, reconstruct, getRandomImage, generateValidationImage end # end module diff --git a/src/HaarLikeFeature.jl b/src/HaarLikeFeature.jl index a54427593..840971d16 100755 --- a/src/HaarLikeFeature.jl +++ b/src/HaarLikeFeature.jl @@ -11,7 +11,7 @@ include("IntegralImage.jl") using .IntegralImage: sumRegion -export FeatureTypes, HaarLikeObject, getScore, getVote, getFacelikeness +export FeatureTypes, HaarLikeObject, getScore, getVote FeatureTypes = [(1, 2), (2, 1), (3, 1), (1, 3), (2, 2)] @@ -58,25 +58,30 @@ function getScore(feature, intImg::Array)#function getScore(feature::HaarLikeObj =# score = 0 + mycount = 0 if feature.featureType == FeatureTypes[1] # two vertical first = IntegralImage.sumRegion(intImg, feature.topLeft, (feature.topLeft[1] + feature.width, Int(round(feature.topLeft[2] + feature.height / 2)))) second = IntegralImage.sumRegion(intImg, (feature.topLeft[1], Int(round(feature.topLeft[2] + feature.height / 2))), feature.bottomRight) score = first - second + mycount = 1 elseif feature.featureType == FeatureTypes[2] # two horizontal first = IntegralImage.sumRegion(intImg, feature.topLeft, (Int(round(feature.topLeft[1] + feature.width / 2)), feature.topLeft[2] + feature.height)) second = IntegralImage.sumRegion(intImg, (Int(round(feature.topLeft[1] + feature.width / 2)), feature.topLeft[2]), feature.bottomRight) score = first - second + mycount = 2 elseif feature.featureType == FeatureTypes[3] # three horizontal first = IntegralImage.sumRegion(intImg, feature.topLeft, (Int(round(feature.topLeft[1] + feature.width / 3)), feature.topLeft[2] + feature.height)) second = IntegralImage.sumRegion(intImg, (Int(round(feature.topLeft[1] + feature.width / 3)), feature.topLeft[2]), (Int(round(feature.topLeft[1] + 2 * feature.width / 3)), feature.topLeft[2] + feature.height)) third = IntegralImage.sumRegion(intImg, (Int(round(feature.topLeft[1] + 2 * feature.width / 3)), feature.topLeft[2]), feature.bottomRight) score = first - second + third + mycount = 3 elseif feature.featureType == FeatureTypes[4] # three vertical first = IntegralImage.sumRegion(intImg, feature.topLeft, (feature.bottomRight[1], Int(round(feature.topLeft[2] + feature.height / 3)))) second = IntegralImage.sumRegion(intImg, (feature.topLeft[1], Int(round(feature.topLeft[2] + feature.height / 3))), (feature.bottomRight[1], Int(round(feature.topLeft[2] + 2 * feature.height / 3)))) third = IntegralImage.sumRegion(intImg, (feature.topLeft[1], Int(round(feature.topLeft[2] + 2 * feature.height / 3))), feature.bottomRight) score = first - second + third + mycount = 4 elseif feature.featureType == FeatureTypes[5] # four # top left area first = IntegralImage.sumRegion(intImg, feature.topLeft, (Int(round(feature.topLeft[1] + feature.width / 2)), Int(round(feature.topLeft[2] + feature.height / 2)))) @@ -87,9 +92,10 @@ function getScore(feature, intImg::Array)#function getScore(feature::HaarLikeObj # bottom right area fourth = IntegralImage.sumRegion(intImg, (Int(round(feature.topLeft[1] + feature.width / 2)), Int(round(feature.topLeft[2] + feature.height / 2))), feature.bottomRight) score = first - second - third + fourth + mycount = 5 end - return score + return score, mycount end @@ -107,38 +113,9 @@ function getVote(feature, intImg::AbstractArray)#function getVote(feature::HaarL [type: Integer] =# - score = getScore(feature, intImg) - - - return (feature.weight * score) < (feature.polarity * feature.threshold) ? 1 : -1 -end - + score = getScore(feature, intImg)[1] # we only care about score here -function getFacelikeness(feature, intImg::AbstractArray) - #= - Get facelikeness for a given feature. - - parameter `feature`: given Haar-like feature (parameterised replacement of Python's `self`) [type: HaarLikeObject] - parameter `intImg`: Integral image array [type: Abstract Array] - - return `score`: Score for given feature [type: Float] - =# - - score = 0 - - if feature.featureType == FeatureTypes[1] # two vertical - score += feature.weight * getVote(feature, intImg) - elseif feature.featureType == FeatureTypes[2] # two horizontal - score += feature.weight * getVote(feature, intImg) - elseif feature.featureType == FeatureTypes[3] # three horizontal - score += feature.weight * getVote(feature, intImg) - elseif feature.featureType == FeatureTypes[4] # three vertical - score += feature.weight * getVote(feature, intImg) - elseif feature.featureType == FeatureTypes[5] # four - score += feature.weight * getVote(feature, intImg) - end - - return score + return (feature.weight * score) < (feature.polarity * feature.threshold) ? 1 : -1 end diff --git a/src/Utils.jl b/src/Utils.jl index a896e8854..d232e9485 100755 --- a/src/Utils.jl +++ b/src/Utils.jl @@ -15,7 +15,7 @@ using ImageDraw: draw!, Polygon, Point using .HaarLikeFeature: FeatureTypes, getVote, getScore ,HaarLikeObject using .IntegralImage: toIntegralImage -export displaymatrix, notifyUser, loadImages, ensembleVoteAll, reconstruct, getRandomImage, generateValidationImage #, getImageMatrix, ensembleVote +export displaymatrix, notifyUser, loadImages, ensembleVoteAll, getFaceness, reconstruct, getRandomImage, generateValidationImage #, getImageMatrix, ensembleVote function displaymatrix(M::AbstractArray) @@ -84,6 +84,11 @@ function ensembleVote(intImg::AbstractArray, classifiers::AbstractArray) 0 otherwise [type: Integer] =# + + # evidence = sum([max(getVote(c[1], image), 0.) * c[2] for c in classifiers]) + # weightedSum = sum([c[2] for c in classifiers]) + # return evidence >= (weightedSum / 2) ? 1 : -1 + return sum([HaarLikeFeature.getVote(c, intImg) for c in classifiers]) >= 0 ? 1 : 0 end @@ -105,6 +110,40 @@ function ensembleVoteAll(intImgs::AbstractArray, classifiers::AbstractArray) end +function getFaceness(feature, intImg::AbstractArray) + #= + Get facelikeness for a given feature. + + parameter `feature`: given Haar-like feature (parameterised replacement of Python's `self`) [type: HaarLikeObject] + parameter `intImg`: Integral image array [type: Abstract Array] + + return `score`: Score for given feature [type: Float] + =# + + score, faceness = HaarLikeFeature.getScore(feature, intImg) + + return (feature.weight * score) < (feature.polarity * feature.threshold) ? faceness : 0 + + # score = 0 + # # weightedScore(feature, intImg::AbstractArray) = getScore(feature, intImg) + # return getVote(feature, intImg) + # + # if feature.featureType == FeatureTypes[1] # two vertical + # score += weightedScore(feature, intImg) + # elseif feature.featureType == FeatureTypes[2] # two horizontal + # score += weightedScore(feature, intImg) + # elseif feature.featureType == FeatureTypes[3] # three horizontal + # score += weightedScore(feature, intImg) + # elseif feature.featureType == FeatureTypes[4] # three vertical + # score += weightedScore(feature, intImg) + # elseif feature.featureType == FeatureTypes[5] # four + # score += weightedScore(feature, intImg) + # end + # + # return score +end + + function reconstruct(classifiers::AbstractArray, imgSize::Tuple) #= Creates an image by putting all given classifiers on top of each other producing an archetype of the learned class of object. @@ -374,6 +413,10 @@ function generateValidationImage(imagePath::AbstractString, classifiers::Abstrac save(joinpath(homedir(), "Desktop", "validation.png"), draw!(load(imagePath), Polygon([Point(boxDimensions[1]), Point(boxDimensions[2]), Point(boxDimensions[3]), Point(boxDimensions[4])]))) end + + # with open('classifiers_' + str(T) + '_' + hex(random.getrandbits(16)) + '.pckl', 'wb') as file: + # pickle.dump(classifiers, file) + # box = Polygon([Point(boxDimensions[1]), Point(boxDimensions[2]), Point(boxDimensions[3]), Point(boxDimensions[4])]) # return save(joinpath(homedir(), "Desktop", "validation.png"), draw!(load(imagePath), box)) diff --git a/test/runtests.jl b/test/runtests.jl index 184d1b6f8..ceca08c05 100755 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -40,3 +40,5 @@ arr = rand(Int, 100, 100) # Utils.jl # @test + +println(toIntegralImage([1 2 3; 4 5 6; 7 8 9]))