|
| 1 | +// Copyright 2022 PingCAP, Inc. |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +package pooltask |
| 16 | + |
| 17 | +import ( |
| 18 | + "container/list" |
| 19 | + "sync" |
| 20 | + "time" |
| 21 | + |
| 22 | + "go.uber.org/atomic" |
| 23 | +) |
| 24 | + |
| 25 | +const shard int = 8 |
| 26 | + |
| 27 | +func getShardID(id uint64) uint64 { |
| 28 | + return id % uint64(shard) |
| 29 | +} |
| 30 | + |
| 31 | +type tContainer[T any, U any, C any, CT any, TF Context[CT]] struct { |
| 32 | + task *TaskBox[T, U, C, CT, TF] |
| 33 | +} |
| 34 | + |
| 35 | +type meta struct { |
| 36 | + stats *list.List |
| 37 | + createTS time.Time |
| 38 | + origin int32 |
| 39 | + running int32 |
| 40 | +} |
| 41 | + |
| 42 | +func newStats(concurrency int32) *meta { |
| 43 | + s := &meta{ |
| 44 | + createTS: time.Now(), |
| 45 | + stats: list.New(), |
| 46 | + origin: concurrency, |
| 47 | + } |
| 48 | + return s |
| 49 | +} |
| 50 | + |
| 51 | +func (m *meta) getOriginConcurrency() int32 { |
| 52 | + return m.origin |
| 53 | +} |
| 54 | + |
| 55 | +// TaskStatusContainer is a container that can control or watch the pool. |
| 56 | +type TaskStatusContainer[T any, U any, C any, CT any, TF Context[CT]] struct { |
| 57 | + stats map[uint64]*meta |
| 58 | + rw sync.RWMutex |
| 59 | +} |
| 60 | + |
| 61 | +// TaskManager is a manager that can control or watch the pool. |
| 62 | +type TaskManager[T any, U any, C any, CT any, TF Context[CT]] struct { |
| 63 | + task []TaskStatusContainer[T, U, C, CT, TF] |
| 64 | + running atomic.Int32 |
| 65 | + concurrency int32 |
| 66 | +} |
| 67 | + |
| 68 | +// NewTaskManager create a new pooltask manager. |
| 69 | +func NewTaskManager[T any, U any, C any, CT any, TF Context[CT]](c int32) TaskManager[T, U, C, CT, TF] { |
| 70 | + task := make([]TaskStatusContainer[T, U, C, CT, TF], shard) |
| 71 | + for i := 0; i < shard; i++ { |
| 72 | + task[i] = TaskStatusContainer[T, U, C, CT, TF]{ |
| 73 | + stats: make(map[uint64]*meta), |
| 74 | + } |
| 75 | + } |
| 76 | + return TaskManager[T, U, C, CT, TF]{ |
| 77 | + task: task, |
| 78 | + concurrency: c, |
| 79 | + } |
| 80 | +} |
| 81 | + |
| 82 | +// RegisterTask register a task to the manager. |
| 83 | +func (t *TaskManager[T, U, C, CT, TF]) RegisterTask(taskID uint64, concurrency int32) { |
| 84 | + id := getShardID(taskID) |
| 85 | + t.task[id].rw.Lock() |
| 86 | + t.task[id].stats[taskID] = newStats(concurrency) |
| 87 | + t.task[id].rw.Unlock() |
| 88 | +} |
| 89 | + |
| 90 | +// DeleteTask delete a task from the manager. |
| 91 | +func (t *TaskManager[T, U, C, CT, TF]) DeleteTask(taskID uint64) { |
| 92 | + shardID := getShardID(taskID) |
| 93 | + t.task[shardID].rw.Lock() |
| 94 | + delete(t.task[shardID].stats, taskID) |
| 95 | + t.task[shardID].rw.Unlock() |
| 96 | +} |
| 97 | + |
| 98 | +// hasTask check if the task is in the manager. |
| 99 | +func (t *TaskManager[T, U, C, CT, TF]) hasTask(taskID uint64) bool { |
| 100 | + shardID := getShardID(taskID) |
| 101 | + t.task[shardID].rw.Lock() |
| 102 | + defer t.task[shardID].rw.Unlock() |
| 103 | + _, ok := t.task[shardID].stats[taskID] |
| 104 | + return ok |
| 105 | +} |
| 106 | + |
| 107 | +// AddSubTask AddTask add a task to the manager. |
| 108 | +func (t *TaskManager[T, U, C, CT, TF]) AddSubTask(taskID uint64, task *TaskBox[T, U, C, CT, TF]) { |
| 109 | + shardID := getShardID(taskID) |
| 110 | + tc := tContainer[T, U, C, CT, TF]{ |
| 111 | + task: task, |
| 112 | + } |
| 113 | + t.running.Inc() |
| 114 | + t.task[shardID].rw.Lock() |
| 115 | + t.task[shardID].stats[taskID].stats.PushBack(tc) |
| 116 | + t.task[shardID].stats[taskID].running++ // running job in this task |
| 117 | + t.task[shardID].rw.Unlock() |
| 118 | +} |
| 119 | + |
| 120 | +// ExitSubTask is to exit a task, and it will decrease the count of running pooltask. |
| 121 | +func (t *TaskManager[T, U, C, CT, TF]) ExitSubTask(taskID uint64) { |
| 122 | + shardID := getShardID(taskID) |
| 123 | + t.running.Dec() // total running tasks |
| 124 | + t.task[shardID].rw.Lock() |
| 125 | + t.task[shardID].stats[taskID].running-- // running job in this task |
| 126 | + t.task[shardID].rw.Unlock() |
| 127 | +} |
| 128 | + |
| 129 | +// Running return the count of running job in this task. |
| 130 | +func (t *TaskManager[T, U, C, CT, TF]) Running(taskID uint64) int32 { |
| 131 | + shardID := getShardID(taskID) |
| 132 | + t.task[shardID].rw.Lock() |
| 133 | + defer t.task[shardID].rw.Unlock() |
| 134 | + return t.task[shardID].stats[taskID].running |
| 135 | +} |
| 136 | + |
| 137 | +// StopTask is to stop a task by TaskID. |
| 138 | +func (t *TaskManager[T, U, C, CT, TF]) StopTask(taskID uint64) { |
| 139 | + shardID := getShardID(taskID) |
| 140 | + t.task[shardID].rw.Lock() |
| 141 | + defer t.task[shardID].rw.Unlock() |
| 142 | + l := t.task[shardID].stats[taskID].stats |
| 143 | + for e := l.Front(); e != nil; e = e.Next() { |
| 144 | + e.Value.(tContainer[T, U, C, CT, TF]).task.SetStatus(StopTask) |
| 145 | + } |
| 146 | +} |
0 commit comments