Skip to content

Commit

Permalink
Merge pull request #1 from jenkinsci/master
Browse files Browse the repository at this point in the history
Update from master
  • Loading branch information
inidona authored Sep 3, 2020
2 parents 024a859 + c78197f commit 7c4c2f7
Show file tree
Hide file tree
Showing 78 changed files with 689 additions and 375 deletions.
17 changes: 10 additions & 7 deletions examples/dind.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -10,26 +10,29 @@ podTemplate(yaml: '''
apiVersion: v1
kind: Pod
spec:
volumes:
- name: docker-socket
emptyDir: {}
containers:
- name: docker
image: docker:19.03.1
command:
- sleep
args:
- 99d
env:
- name: DOCKER_HOST
value: tcp://localhost:2375
volumeMounts:
- name: docker-socket
mountPath: /var/run
- name: docker-daemon
image: docker:19.03.1-dind
securityContext:
privileged: true
env:
- name: DOCKER_TLS_CERTDIR
value: ""
volumeMounts:
- name: docker-socket
mountPath: /var/run
''') {
node(POD_LABEL) {
git 'https://github.com/jenkinsci/docker-jnlp-slave.git'
writeFile file: 'Dockerfile', text: 'FROM scratch'
container('docker') {
sh 'docker version && DOCKER_BUILDKIT=1 docker build --progress plain -t testing .'
}
Expand Down
35 changes: 22 additions & 13 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
<parent>
<groupId>org.jenkins-ci.plugins</groupId>
<artifactId>plugin</artifactId>
<version>4.3</version>
<version>4.7</version>
<relativePath />
</parent>

Expand Down Expand Up @@ -39,7 +39,7 @@
</developers>

<properties>
<revision>1.26.5</revision>
<revision>1.27.1</revision>
<changelist>-SNAPSHOT</changelist>
<!-- in minikube
minikube ip | sed -e 's/\([0-9]*\.[0-9]*\.[0-9]*\).*/\1.1/' -->
Expand All @@ -49,7 +49,8 @@
<java.level>8</java.level>
<jenkins.version>2.222.4</jenkins.version>
<no-test-jar>false</no-test-jar>
<pipeline-model-definition.version>1.6.0</pipeline-model-definition.version>
<pipeline-model-definition.version>1.7.2</pipeline-model-definition.version>
<workflow-cps.version>2.81</workflow-cps.version>
<useBeta>true</useBeta>
</properties>

Expand All @@ -62,7 +63,7 @@
<dependency>
<groupId>org.jenkins-ci.plugins</groupId>
<artifactId>jackson2-api</artifactId>
<version>2.11.0</version>
<version>2.11.2</version>
</dependency>
<dependency>
<groupId>org.jenkins-ci.plugins</groupId>
Expand Down Expand Up @@ -114,9 +115,17 @@
<version>${pipeline-model-definition.version}</version>
<optional>true</optional>
</dependency>
<dependency> <!-- DeclarativeGeneratorTester -->
<groupId>org.jenkinsci.plugins</groupId>
<artifactId>pipeline-model-definition</artifactId>
<version>${pipeline-model-definition.version}</version>
<classifier>tests</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.jenkins-ci.plugins.workflow</groupId>
<artifactId>workflow-cps</artifactId>
<version>${workflow-cps.version}</version>
<optional>true</optional>
</dependency>

Expand Down Expand Up @@ -151,6 +160,7 @@
<dependency> <!-- SnippetizerTest -->
<groupId>org.jenkins-ci.plugins.workflow</groupId>
<artifactId>workflow-cps</artifactId>
<version>${workflow-cps.version}</version>
<classifier>tests</classifier>
<scope>test</scope>
</dependency>
Expand Down Expand Up @@ -180,7 +190,7 @@
<dependency>
<groupId>org.jenkins-ci.plugins</groupId>
<artifactId>ssh-agent</artifactId>
<version>1.19</version>
<version>1.20</version>
<scope>test</scope>
<exclusions>
<!-- conflict with sshd module -->
Expand Down Expand Up @@ -223,6 +233,12 @@
<artifactId>test-harness</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.jenkins-ci.plugins</groupId>
<artifactId>docker-workflow</artifactId>
<version>1.23</version>
<scope>test</scope>
</dependency>
<dependency>
<!-- Required to test run src/main/resources/org/csanchez/jenkins/plugins/kubernetes/pipeline/samples/maven.groovy -->
<groupId>org.jenkins-ci.plugins</groupId>
Expand Down Expand Up @@ -269,17 +285,10 @@
<hudson.slaves.NodeProvisioner.initialDelay>0</hudson.slaves.NodeProvisioner.initialDelay>
<hudson.slaves.NodeProvisioner.MARGIN>50</hudson.slaves.NodeProvisioner.MARGIN>
<hudson.slaves.NodeProvisioner.MARGIN0>0.85</hudson.slaves.NodeProvisioner.MARGIN0>
<jenkins.host.address>${jenkins.host.address}</jenkins.host.address>
</systemProperties>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>versions-maven-plugin</artifactId>
<version>2.7</version>
<configuration>
<rulesUri>file://${basedir}/src/test/resources/rules.xml</rulesUri>
</configuration>
</plugin>
</plugins>
</build>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@
import hudson.slaves.CloudProvisioningListener;
import hudson.slaves.NodeProvisioner;
import jenkins.model.Jenkins;
import jenkins.util.Timer;
import org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;

Expand All @@ -28,12 +30,12 @@
public class NoDelayProvisionerStrategy extends NodeProvisioner.Strategy {

private static final Logger LOGGER = Logger.getLogger(NoDelayProvisionerStrategy.class.getName());
private static final boolean DISABLE_NODELAY_PROVISING = Boolean.valueOf(
private static final boolean DISABLE_NO_DELAY_PROVISIONING = Boolean.parseBoolean(
System.getProperty("io.jenkins.plugins.kubernetes.disableNoDelayProvisioning"));

@Override
public NodeProvisioner.StrategyDecision apply(NodeProvisioner.StrategyState strategyState) {
if (DISABLE_NODELAY_PROVISING) {
if (DISABLE_NO_DELAY_PROVISIONING) {
LOGGER.log(Level.FINE, "Provisioning not complete, NoDelayProvisionerStrategy is disabled");
return NodeProvisioner.StrategyDecision.CONSULT_REMAINING_STRATEGIES;
}
Expand All @@ -49,6 +51,8 @@ public NodeProvisioner.StrategyDecision apply(NodeProvisioner.StrategyState stra
int currentDemand = snapshot.getQueueLength();
LOGGER.log(Level.FINE, "Available capacity={0}, currentDemand={1}",
new Object[]{availableCapacity, currentDemand});
int totalPlannedNodes = 0;
boolean canProvision = false;
if (availableCapacity < currentDemand) {
List<Cloud> jenkinsClouds = new ArrayList<>(Jenkins.get().clouds);
Collections.shuffle(jenkinsClouds);
Expand All @@ -61,22 +65,29 @@ public NodeProvisioner.StrategyDecision apply(NodeProvisioner.StrategyState stra
continue;
}
}
canProvision = true;
Collection<NodeProvisioner.PlannedNode> plannedNodes = cloud.provision(label, workloadToProvision);
LOGGER.log(Level.FINE, "Planned {0} new nodes", plannedNodes.size());
fireOnStarted(cloud, strategyState.getLabel(), plannedNodes);
strategyState.recordPendingLaunches(plannedNodes);
availableCapacity += plannedNodes.size();
totalPlannedNodes += plannedNodes.size();
LOGGER.log(Level.FINE, "After provisioning, available capacity={0}, currentDemand={1}", new Object[]{availableCapacity, currentDemand});
break;
}
}
if (availableCapacity >= currentDemand) {
LOGGER.log(Level.FINE, "Provisioning completed");
return NodeProvisioner.StrategyDecision.PROVISIONING_COMPLETED;
if (currentDemand - availableCapacity <= 0) {
LOGGER.log(Level.FINE, String.format("Provisioning completed for label: [%s]", label));
} else {
LOGGER.log(Level.FINE, "Provisioning not complete, consulting remaining strategies");
return NodeProvisioner.StrategyDecision.CONSULT_REMAINING_STRATEGIES;
if (!canProvision) {
return NodeProvisioner.StrategyDecision.CONSULT_REMAINING_STRATEGIES;
}
if (totalPlannedNodes > 0 && label != null) {
LOGGER.log(Level.FINE, "Suggesting NodeProvisioner review");
Timer.get().schedule(label.nodeProvisioner::suggestReviewNow, 1L, TimeUnit.SECONDS);
}
}
return NodeProvisioner.StrategyDecision.PROVISIONING_COMPLETED;
}

private static void fireOnStarted(final Cloud cloud, final Label label,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,14 @@
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;

import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import javax.servlet.ServletException;

import hudson.Main;
import hudson.model.ItemGroup;
import hudson.util.XStream2;
import io.fabric8.openshift.client.OpenShiftClient;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.csanchez.jenkins.plugins.kubernetes.pipeline.PodTemplateMap;
Expand Down Expand Up @@ -63,7 +62,6 @@
import hudson.slaves.NodeProvisioner;
import hudson.util.FormValidation;
import hudson.util.ListBoxModel;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.PodList;
import io.fabric8.kubernetes.client.KubernetesClient;
import io.fabric8.kubernetes.client.KubernetesClientException;
Expand Down Expand Up @@ -141,6 +139,7 @@ public class KubernetesCloud extends Cloud {
@DataBoundConstructor
public KubernetesCloud(String name) {
super(name);
setMaxRequestsPerHost(DEFAULT_MAX_REQUESTS_PER_HOST);
}

/**
Expand Down Expand Up @@ -540,24 +539,25 @@ public synchronized Collection<NodeProvisioner.PlannedNode> provision(@CheckForN
int toBeProvisioned = Math.max(0, excessWorkload - allInProvisioning.size());
LOGGER.log(Level.INFO, "Excess workload after pending Kubernetes agents: {0}", toBeProvisioned);

List<NodeProvisioner.PlannedNode> r = new ArrayList<NodeProvisioner.PlannedNode>();
List<NodeProvisioner.PlannedNode> plannedNodes = new ArrayList<>();

for (PodTemplate t: getTemplatesFor(label)) {
LOGGER.log(Level.INFO, "Template for label {0}: {1}", new Object[] { label, t.getName() });
for (PodTemplate podTemplate: getTemplatesFor(label)) {
LOGGER.log(Level.INFO, "Template for label {0}: {1}", new Object[] { label, podTemplate.getName() });
for (int i = 0; i < toBeProvisioned; i++) {
if (!addProvisionedSlave(t, label, i)) {
// Check concurrency limits
if (!addProvisionedSlave(podTemplate, label, i + allInProvisioning.size())) {
break;
}
r.add(PlannedNodeBuilderFactory.createInstance().cloud(this).template(t).label(label).build());
plannedNodes.add(PlannedNodeBuilderFactory.createInstance().cloud(this).template(podTemplate).label(label).build());
}
LOGGER.log(Level.FINEST, "Planned Kubernetes agents for template \"{0}\": {1}",
new Object[] { t.getName(), r.size() });
if (r.size() > 0) {
// Already found a matching template
return r;
new Object[] { podTemplate.getName(), plannedNodes.size() });
if (plannedNodes.size() > 0) {
// Return early when a matching template was found and nodes were planned
return plannedNodes;
}
}
return r;
return plannedNodes;
} catch (KubernetesClientException e) {
Throwable cause = e.getCause();
if (cause instanceof SocketTimeoutException || cause instanceof ConnectException || cause instanceof UnknownHostException) {
Expand All @@ -579,9 +579,9 @@ public synchronized Collection<NodeProvisioner.PlannedNode> provision(@CheckForN
* Check not too many already running.
*
*/
private boolean addProvisionedSlave(@Nonnull PodTemplate template, @CheckForNull Label label, int scheduledCount) throws Exception {
private boolean addProvisionedSlave(@Nonnull PodTemplate template, @CheckForNull Label label, int numProvisioned) throws Exception {
if (containerCap == 0) {
return true;
return false;
}

KubernetesClient client = connect();
Expand All @@ -592,23 +592,27 @@ private boolean addProvisionedSlave(@Nonnull PodTemplate template, @CheckForNull
templateNamespace = client.getNamespace();
}

// check overall concurrency limit using the default label(s) on all templates
Map<String, String> podLabels = getPodLabelsMap();
List<Pod> allActiveSlavePods = getActiveSlavePods(client, templateNamespace, podLabels);
if (allActiveSlavePods != null && containerCap <= allActiveSlavePods.size() + scheduledCount) {
long numRunningOrPending = getNumActiveSlavePods(client, templateNamespace, podLabels);
if (numRunningOrPending + numProvisioned >= containerCap) {
LOGGER.log(Level.INFO,
"Maximum number of concurrently running agent pods ({0}) reached for Kubernetes Cloud {4}, not provisioning: {1} running or pending in namespace {2} with Kubernetes labels {3}",
new Object[] { containerCap, allActiveSlavePods.size() + scheduledCount, templateNamespace, getLabels(), name });
"Maximum number of concurrently running agent pods ({0}) reached for Kubernetes Cloud {4}, " +
"not provisioning: {1} running or pending in namespace {2} with Kubernetes labels {3}",
new Object[] { containerCap, numRunningOrPending, templateNamespace, getLabels(), name });
return false;
}

Map<String, String> labelsMap = new HashMap<>(podLabels);
labelsMap.putAll(template.getLabelsMap());
List<Pod> activeTemplateSlavePods = getActiveSlavePods(client, templateNamespace, labelsMap);
if (activeTemplateSlavePods != null && allActiveSlavePods != null && template.getInstanceCap() <= activeTemplateSlavePods.size() + scheduledCount) {
// check template-level concurrency limit using template-level labels
Map<String, String> templateLabels = new HashMap<>(podLabels);
templateLabels.putAll(template.getLabelsMap());
numRunningOrPending = getNumActiveSlavePods(client, templateNamespace, podLabels);
if (numRunningOrPending + numProvisioned >= template.getInstanceCap()) {
LOGGER.log(Level.INFO,
"Maximum number of concurrently running agent pods ({0}) reached for template {1} in Kubernetes Cloud {6}, not provisioning: {2} running or pending in namespace {3} with label \"{4}\" and Kubernetes labels {5}",
new Object[] { template.getInstanceCap(), template.getName(), activeTemplateSlavePods.size() + scheduledCount,
templateNamespace, label == null ? "" : label.toString(), labelsMap, name });
"Maximum number of concurrently running agent pods ({0}) reached for template {1} in Kubernetes Cloud {6}, " +
"not provisioning: {2} running or pending in namespace {3} with label \"{4}\" and Kubernetes labels {5}",
new Object[] { template.getInstanceCap(), template.getName(), numRunningOrPending,
templateNamespace, label == null ? "" : label.toString(), templateLabels, name });
return false;
}
return true;
Expand All @@ -617,16 +621,15 @@ private boolean addProvisionedSlave(@Nonnull PodTemplate template, @CheckForNull
/**
* Query for running or pending pods
*/
private List<Pod> getActiveSlavePods(KubernetesClient client, String templateNamespace, Map<String, String> podLabels) {
private long getNumActiveSlavePods(KubernetesClient client, String templateNamespace, Map<String, String> podLabels) {
PodList slaveList = client.pods().inNamespace(templateNamespace).withLabels(podLabels).list();
List<Pod> activeSlavePods = null;
// JENKINS-53370 check for nulls
if (slaveList != null && slaveList.getItems() != null) {
activeSlavePods = slaveList.getItems().stream() //
return slaveList.getItems().stream() //
.filter(x -> x.getStatus().getPhase().toLowerCase().matches("(running|pending)"))
.collect(Collectors.toList());
.count();
}
return activeSlavePods;
return 0;
}

@Override
Expand Down Expand Up @@ -735,7 +738,10 @@ public boolean equals(Object o) {

@Override
public int hashCode() {
return Objects.hash(defaultsProviderTemplate, templates, serverUrl, serverCertificate, skipTlsVerify, addMasterProxyEnvVars, capOnlyOnAlivePods, namespace, jenkinsUrl, jenkinsTunnel, credentialsId, containerCap, retentionTimeout, connectTimeout, readTimeout, podLabels, usageRestricted, maxRequestsPerHost, podRetention);
return Objects.hash(defaultsProviderTemplate, templates, serverUrl, serverCertificate, skipTlsVerify,
addMasterProxyEnvVars, capOnlyOnAlivePods, namespace, jenkinsUrl, jenkinsTunnel, credentialsId,
containerCap, retentionTimeout, connectTimeout, readTimeout, podLabels, usageRestricted,
maxRequestsPerHost, podRetention);
}

public Integer getWaitForPodSec() {
Expand Down Expand Up @@ -990,4 +996,17 @@ public List<PodTemplate> getList(@Nonnull KubernetesCloud cloud) {
return cloud.getTemplates();
}
}

@Initializer(after = InitMilestone.SYSTEM_CONFIG_LOADED)
public static void hpiRunInit() {
if (Main.isDevelopmentMode) {
Jenkins jenkins = Jenkins.get();
String hostAddress = System.getProperty("jenkins.host.address");
if (hostAddress != null && jenkins.clouds.getAll(KubernetesCloud.class).isEmpty()) {
KubernetesCloud cloud = new KubernetesCloud("kubernetes");
cloud.setJenkinsUrl("http://" + hostAddress + ":8080/jenkins/");
jenkins.clouds.add(cloud);
}
}
}
}
Loading

0 comments on commit 7c4c2f7

Please sign in to comment.