Skip to content

Commit

Permalink
Modified pool allocation logic to 1) not to allocate all vms allowed
Browse files Browse the repository at this point in the history
by pool size at once and 2) consider vms in free pool first.
  • Loading branch information
Xiaolin Charlene Zang committed Aug 23, 2017
1 parent 75ca36d commit 93e60ad
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 5 deletions.
5 changes: 5 additions & 0 deletions config.template.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ class Config:
NUM_THREADS = 20

# We have the option to reuse VMs or discard them after each use
# xxxXXX??? strongly suspect the code path for the False case
# not working, after a failed experiment.
REUSE_VMS = True

# Worker waits this many seconds for functions waitvm, copyin (per
Expand Down Expand Up @@ -106,6 +108,9 @@ class Config:
# Default vm pool size
POOL_SIZE = 2

# Default increment step when enlarging vm pool
POOL_ALLOC_INCREMENT = 2

# Optionally log finer-grained timing information
LOG_TIMING = False

Expand Down
4 changes: 3 additions & 1 deletion jobManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,10 @@ def __manage(self):
# the worker if successful.
if Config.REUSE_VMS:
preVM = vm
self.log.info("_manage reuse vm %s" % preVM.id)
self.log.info("_manage use vm %s" % preVM.id)
else:
# xxxXXX??? strongly suspect this code path not work.
# After setting REUSE_VMS to False, job submissions don't run.
preVM = self.preallocator.allocVM(job.vm.name)
self.log.info("_manage allocate vm %s" % preVM.id)
vmms = self.vmms[job.vm.vmms] # Create new vmms object
Expand Down
11 changes: 7 additions & 4 deletions jobQueue.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,16 +207,19 @@ def getNextPendingJobReuse(self, target_id=None):
# if target_id is set, only interested in this id
if target_id and target_id != id:
continue
# Create a pool if necessary
if self.preallocator.poolSize(job.vm.name) == 0:
self.preallocator.update(job.vm, Config.POOL_SIZE)

# Create or enlarge a pool if there is no free vm to use and
# the limit for pool is not reached yet
if self.preallocator.freePoolSize(job.vm.name) == 0 and \
self.preallocator.poolSize(job.vm.name) < Config.POOL_SIZE:
self.preallocator.incrementPoolSize(job.vm, Config.POOL_ALLOC_INCREMENT)

# If the job hasn't been assigned to a worker yet, see if there
# is a free VM
if (job.isNotAssigned()):
vm = self.preallocator.allocVM(job.vm.name)
self.log.info("getNextPendingJobReuse alloc vm %s for %s" % (id, vm))
if vm:
self.log.info("getNextPendingJobReuse alloc vm %s to job %s" % (vm, id))
self.queueLock.release()
return (id, vm)

Expand Down
24 changes: 24 additions & 0 deletions preallocator.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,30 @@ def poolSize(self, vmName):
else:
return len(self.machines.get(vmName)[0])

def freePoolSize(self, vmName):
""" freePoolSize - returns the size of the vmName free pool, for external callers
"""
if vmName in self.machines.keys():
return self.machines.get(vmName)[1].qsize()
else:
return 0

def incrementPoolSize(self, vm, delta):
"""
Called by jobQueue to create the pool and allcoate given number of vms
"""

self.lock.acquire()
if vm.name not in self.machines.keys():
self.machines.set(vm.name, [[], TangoQueue(vm.name)])
# see comments in jobManager.py for the same call
self.machines.get(vm.name)[1].make_empty()
self.log.debug("Creating empty pool of %s instances" % (vm.name))
self.lock.release()

self.log.debug("incrementPoolSize: add %d new %s instances" % (delta, vm.name))
threading.Thread(target=self.__create(vm, delta)).start()

def update(self, vm, num):
""" update - Updates the number of machines of a certain type
to be preallocated.
Expand Down

0 comments on commit 93e60ad

Please sign in to comment.