Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade to Gradle 6.8.3 and various minor fixes #724

Merged
merged 3 commits into from
Apr 23, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ Execute a normal build with gradle:
Upon success open `index.html` to check your changes.

----
firefox build/asciidoc/html5/index.html
firefox build/asciidoc/index.html
----

Do not forget to follow the naming and capitalization guidelines at the end of this README.
Expand Down
58 changes: 24 additions & 34 deletions build.gradle
Original file line number Diff line number Diff line change
@@ -1,30 +1,24 @@
import com.github.jk1.license.render.*
import com.github.jk1.license.render.InventoryHtmlReportRenderer
import org.aim42.htmlsanitycheck.check.*

buildscript {
repositories {
jcenter()
maven {
url "https://plugins.gradle.org/m2/"
}
}

dependencies {
classpath 'de.undercouch:gradle-download-task:3.1.2'
classpath 'org.asciidoctor:asciidoctor-gradle-plugin:1.5.9.2'
classpath 'xalan:xalan:2.7.2'
classpath 'gradle.plugin.org.aim42:htmlSanityCheck:1.1.3'
classpath 'com.github.jk1:gradle-license-report:1.7'
}
}

plugins {
id "de.undercouch.download" version "4.1.1"
id 'com.github.jk1.dependency-license-report' version '1.16'
id 'org.asciidoctor.jvm.convert' version '3.3.2'
id "org.aim42.htmlSanityCheck" version "1.1.3"
id 'java'
id 'maven-publish'
}

apply plugin: 'de.undercouch.download'
apply plugin: 'java'
apply plugin: 'maven'
apply plugin: 'org.asciidoctor.convert'
apply plugin: 'org.aim42.htmlSanityCheck'
apply plugin: 'com.github.jk1.dependency-license-report'
repositories {
jcenter()
}

asciidoctor {
sourceDir file("$projectDir/src/docs")
Expand All @@ -39,7 +33,6 @@ asciidoctor {
include 'admin/AdminTutorials.adoc'
include 'admin/Catalog.adoc'
include 'index.adoc'

}
resources {
from("$projectDir/src/docs/") {
Expand All @@ -49,36 +42,35 @@ asciidoctor {
include 'highlight/**'
}
}
baseDirFollowsSourceFile()
}

asciidoctorj {
version = '1.5.3'
version = '2.4.3'
}

htmlSanityCheck {
sourceDir = new File( "$buildDir/asciidoc/html5" )
sourceDir = new File( "$buildDir/asciidoc" )

sourceDocuments = fileTree(sourceDir) {
include "**/*.html"
exclude "user/schedulerjob.html"
}

// where to put results of sanityChecks...
checkingResultsDir = new File( "$buildDir/report/htmlchecks" )
checkingResultsDir = new File("$buildDir/report/htmlchecks")

checkerClasses = [BrokenCrossReferencesChecker, BrokenHttpLinksChecker, DuplicateIdChecker, ImageMapChecker, MissingAltInImageTagsChecker, MissingImageFilesChecker]

// fail build on errors?
failOnErrors = false

}

licenseReport {
configurations = ['asciidoctor']
renderers = [new InventoryHtmlReportRenderer()]
}


project.ext.branch = new File("$projectDir/src/docs/version-conf.js").text.split("'")[1]
if (project.ext.branch.endsWith("-SNAPSHOT")) {
project.ext.branch = 'master'
Expand All @@ -87,31 +79,29 @@ if (project.ext.branch.endsWith("-SNAPSHOT")) {
def getDate() {
def currentDate = new Date()
def formattedDateString = currentDate.format('yyyy-MM-dd')
if(project.ext.branch == 'master'){

if (project.ext.branch == 'master') {
formattedDateString = currentDate.format('yyyy-MM-dd HH:mm:ss')
}

return formattedDateString
}

task copyDocs(type: Copy) {
def dateString = getDate()
from ("$projectDir/src/docs/") {
from("$projectDir/src/docs/") {
include 'version-conf.js'
filter { line ->
line.replace('date: \'\'', 'date: \''+dateString+'\'')
line.replace('date: \'\'', 'date: \'' + dateString + '\'')
}
}
into "$buildDir/asciidoc/html5"
into "$buildDir/asciidoc"
}

task removeProperties(type: Delete) {
delete fileTree(dir: "$projectDir/src/docs/admin/references/properties/")
}

import de.undercouch.gradle.tasks.download.Download

task downloadRmProperties(type: Download) {
src "https://raw.githubusercontent.com/ow2-proactive/scheduling/${project.branch}/config/rm/settings.ini"
dest file("$projectDir/src/docs/admin/references/properties/rm.properties")
Expand All @@ -136,11 +126,11 @@ task xsdDoc(type: JavaExec) {
println file("$projectDir/src/xsd/schedulerjob.xsd").absoluteFile

inputs.files file("$projectDir/src/xsd/schedulerjob.xsd"), file("$projectDir/src/xsd/xs3p.xsl")
outputs.files file("$buildDir/html5/user/schedulerjob.html")
outputs.files file("$buildDir/asciidoc/user/schedulerjob.html")

classpath buildscript.configurations.classpath
main 'org.apache.xalan.xslt.Process'
args '-IN', 'src/xsd/schedulerjob.xsd', '-XSL', 'src/xsd/xs3p.xsl', '-OUT', "$buildDir/asciidoc/html5/user/schedulerjob.html"
args '-IN', 'src/xsd/schedulerjob.xsd', '-XSL', 'src/xsd/xs3p.xsl', '-OUT', "$buildDir/asciidoc/user/schedulerjob.html"
}

copyDocs.dependsOn removeProperties
Expand Down
2 changes: 1 addition & 1 deletion gradle/wrapper/gradle-wrapper.properties
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-6.8.3-bin.zip
33 changes: 25 additions & 8 deletions src/docs/PML/PMLUserGuide.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -1005,46 +1005,63 @@ Execute the workflow by setting the different workflow's variables as described
[cols="2,5,2"]
|===
|*Variable name* | *Description* | *Type*

3+^|*Workflow variables*

| `MODEL_SERVICE_INSTANCE_NAME`
| Service instance name.
| String (default="maas_dl-${PA_JOB_ID}").

| `MODEL_SERVICE_PROXIFIED`
| Allows access to the endpoint through an Http(s) Proxy.
| Boolean (default=False).

| `MODEL_SERVICE_ENTRYPOINT`
| This entry script starts the service and defines the different functions to deploy the model, scores the prediction requests based on the deployed model, and returns the results. This script is specific to your model. This file should be stored in the Catalog under the `model_as_service_resources` bucket. More information about this file can be found in the <<_customize_the_service>> section.
| String (default="dl_service").

| `MODEL_SERVICE_YAML_FILE`
| A YAML file that describes the OpenAPI Specification ver. 2 (known as Swagger Spec) of the service. This file should be stored in the catalog under the `model_as_service_resources` bucket. More information about the structure of this file can be found in the section <<_customize_the_service>>.
| String (default="dl_service-api").

| `MODEL_SERVICE_USER_NAME`
| A valid user name having the needed privileges to execute this action.
| String (default="user").

| `MODEL_SERVICE_NODE_NAME`
| The name of the node where the service will be deployed. If empty, the service will be deployed on an available node selected randomly.
|

3+^|*Task variables*

| `SERVICE_ID`
| The name of the service. Please keep the default value for this variable.
| String (default="MaaS_DL")

| `INSTANCE_NAME`
| The name of the service that will be deployed.
| String (default="$MODEL_SERVICE_INSTANCE_NAME")

| `ENGINE`
| Container engine.
| String (default="$CONTAINER_PLATFORM")

| `PROXIFIED`
| It takes by default the value of `MODEL_SERVICE_PROXYFIED` workflow variable.
| String (default="$MODEL_SERVICE_PROXYFIED")

| `PYTHON_ENTRYPOINT`
| It takes by default the value of `MODEL_SERVICE_ENTRYPOINT` workflow variable.
| String (default="$MODEL_SERVICE_ENTRYPOINT")

| `YAML_FILE`
| It takes by default the value of `MODEL_SERVICE_YAML_FILE` workflow variable.
| String (default="$MODEL_SERVICE_YAML_FILE")

| `USER_NAME`
| It takes by default the value of `MODEL_SERVICE_USER_NAME` workflow variable.
| String (default="$MODEL_SERVICE_USER_NAME")

| `NODE_NAME`
| It takes by default the value of `MODEL_SERVICE_NODE_NAME` workflow variable.
| String (default="$MODEL_SERVICE_NODE_NAME")
Expand Down Expand Up @@ -2338,7 +2355,7 @@ NOTE: Instead of training a model from scratch, a pre-trained sentiment analysis

*Train_Image_Classification:* trains a model to classify images from ants and bees.

*Train_Image_Segmentation:* trains a segmentation model using SegNet network on http://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^].
*Train_Image_Segmentation:* trains a segmentation model using SegNet network on https://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^].

*Train_Image_Object_Detection:* trains objects using YOLOv3 model on COCO dataset proposed by Microsoft Research.

Expand All @@ -2354,7 +2371,7 @@ This section presents custom AI workflows using tasks available on the `deep-lea

*Fake_Celebrity_Faces_Generation:* generates a wild diversity of fake faces using a GAN model that was trained based on thousands of real celebrity photos. The pre-trained GAN model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/Epoch+018.pt[link^].

*Image_Segmentation:* predicts a segmentation model using SegNet network on http://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^]. The pre-trained image segmentation model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/model_segnet.zip[link^].
*Image_Segmentation:* predicts a segmentation model using SegNet network on https://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^]. The pre-trained image segmentation model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/model_segnet.zip[link^].

*Image_Object_Detection:* detects objects using a pre-trained YOLOv3 model on COCO dataset proposed by Microsoft Research. The pre-trained model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/yolo3_coco.zip[link^].

Expand Down Expand Up @@ -3702,7 +3719,7 @@ NOTE: You can use RGB images in JPG format (Images folder) and the groundtruth a

NOTE: You can find an example of the organization of the folders at: https://s3.eu-west-2.amazonaws.com/activeeon-public/datasets/oxford.zip

. _Object Detection Dataset:_ Two folders are demanded: the first folder should contain the RGB images in JPG format and another folder should contain its corresponding anotations in XML format using http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html[PASCAL VOC^] format or TXT format using COCO format (http://cocodataset.org/#home). The RGB images and annotations should be organized as follows:
. _Object Detection Dataset:_ Two folders are demanded: the first folder should contain the RGB images in JPG format and another folder should contain its corresponding anotations in XML format using http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html[PASCAL VOC^] format or TXT format using COCO format (https://cocodataset.org/#home). The RGB images and annotations should be organized as follows:

image::object_detection.png[150,150]

Expand Down Expand Up @@ -3920,15 +3937,15 @@ NOTE: Torchtext were used to preprocess and load the text input. More informatio
| Boolean (default=True)
|===

NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[AlexNet^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[AlexNet^].

===== DenseNet-161

*Task Overview:* Densely Connected Convolutional Network (DenseNet) is a network architecture where each layer is directly connected to every other layer in a feed-forward fashion (within each dense block).

*Usage:* It should be connected to <<Train_Image_Classification_Model>>.

NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[DenseNet-161^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[DenseNet-161^].

.DenseNet-161_Task variables
[cols="2,5,2"]
Expand All @@ -3948,7 +3965,7 @@ NOTE: PyTorch is used to build the model architecture based on https://pytorch.o

*Usage:* It should be connected to <<Train_Image_Classification_Model>>.

NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[ResNet-18^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[ResNet-18^].

.ResNet-161_Task variables
[cols="2,5,2"]
Expand All @@ -3968,7 +3985,7 @@ NOTE: PyTorch is used to build the model architecture based on https://pytorch.o

*Usage:* It should be connected to <<Train_Image_Classification_Model>>.

NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[VGG-16^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[VGG-16^].

.VGG-16_Task variables
[cols="2,5,2"]
Expand Down Expand Up @@ -4154,7 +4171,7 @@ NOTE: PyTorch is used to build the model architecture based on https://github.co
| Boolean (default=True)
|===

NOTE: The default parameters of the YOLO network were set for the COCO dataset (http://cocodataset.org/#home). If you'd like to use another dataset, you probably need to change the default parameters.
NOTE: The default parameters of the YOLO network were set for the COCO dataset (https://cocodataset.org/#home). If you'd like to use another dataset, you probably need to change the default parameters.

==== Text Classification

Expand Down
4 changes: 2 additions & 2 deletions src/docs/PSA/references/reference.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -833,7 +833,7 @@ The service is started using the following variables.
| Boolean
| `false`
| `PYTHON_ENTRYPOINT`
| This entry script starts the service and defines the different functions to deploy the model, scores the prediction requests based on the deployed model, and returns the results. This script is specific to your model. This file should be stored in the Catalog under the `model_as_service_resources` bucket. More information about this file can be found in the <<_customize_the_service>> section.
| This entry script starts the service and defines the different functions to deploy the model, scores the prediction requests based on the deployed model, and returns the results. This script is specific to your model. This file should be stored in the Catalog under the `model_as_service_resources` bucket. More information about this file can be found in the <<../PML/PMLUserGuide.html#_customize_the_service>> section.
| Yes
| String
| `dl_service`
Expand All @@ -853,7 +853,7 @@ The service is started using the following variables.
| Boolean
| `true`
| `YAML_FILE`
| A YAML file that describes the OpenAPI Specification ver. 2 (known as Swagger Spec) of the service. This file should be stored in the catalog under the `model_as_service_resources` bucket. More information about the structure of this file can be found in the section <<_customize_the_service>>.
| A YAML file that describes the OpenAPI Specification ver. 2 (known as Swagger Spec) of the service. This file should be stored in the catalog under the `model_as_service_resources` bucket. More information about the structure of this file can be found in the section <<../PML/PMLUserGuide.html#_customize_the_service>>.
| Yes
| String
| `dl_service-api`
Expand Down
2 changes: 1 addition & 1 deletion src/docs/admin/ProActiveAdminGuide.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ The second way to start a ProActive Scheduler is to install it as a system servi

==== How to install ProActive on Windows

Under Windows, it is possible to use the https://nssm.cc/[nssm^] service manager tool to manage a running script as a service. You can configure nssm to absolve all responsibility for restarting it and let Windows take care of recovery actions.
Under Windows, it is possible to use the https://nssm.cc/[nssm^] service manager tool to manage a running script as a service. You can configure nssm to absolve all responsibility for restarting it and let Windows take care of recovery actions.
In our case, you need to provide to nssm the Path to this script `$PROACTIVE_HOME/bin/proactive-server.bat` to start ProActive as a service.

==== How to install ProActive on Linux
Expand Down