-
Notifications
You must be signed in to change notification settings - Fork 16
/
Copy pathjupiter_config.py
312 lines (244 loc) · 12.3 KB
/
jupiter_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
"""
Top level config file (leave this file at the root directory). ``import config`` on the top of your file to include the global information included here.
"""
__author__ = "Pradipta Ghosh, Quynh Nguyen, Pranav Sakulkar, Jason A Tran, Bhaskar Krishnamachari"
__copyright__ = "Copyright (c) 2019, Autonomous Networks Research Group. All rights reserved."
__license__ = "GPL"
__version__ = "2.1"
from os import path
import os
import configparser
import logging
from core.jupiter_utils import app_config_parser
logging.basicConfig(level=logging.INFO)
# TODO: remove these globals
HERE = path.abspath(path.dirname(__file__)) + "/"
INI_PATH = HERE + 'jupiter_config.ini'
# this should be the same name as the app folder
# APP_NAME must only contain alphanumerics or hyphens! Prefer only alphanumerics.
#APP_NAME = "example"
APP_NAME = 'demo'
APP_DIR = path.join("app_specific_files", APP_NAME)
# TODO: deprecated, use jupiter_utils.app_config_parser to grab the docker
# registry. Remove this after set_globals() is removed.
DOCKER_REGISTRY = "anrg"
# TODO: quynh - should use a getter function instead of a
BOKEH = -1
def get_kubeconfig():
try:
kubeconfig_path = os.environ['KUBECONFIG']
except KeyError:
logging.info('$KUBECONFIG does not exist. Using default path ~/.kube/config.')
kubeconfig_path = "~/.kube/config"
return kubeconfig_path
def parse_config_ini():
config = configparser.ConfigParser()
# jupiter_config.ini should always be in the same dir as this file
config.read(path.join(
path.abspath(path.dirname(__file__)),
"jupiter_config.ini")
)
return config
# used for nodes.txt only
def get_home_node(file_name):
with open(file_name) as file:
line = file.readline().split()
return line[1]
# used for nodes.txt only
def get_datasources(file_name):
datasources = {}
node_file = open(file_name, "r")
for line in node_file:
node_line = line.strip().split(" ")
datasources.setdefault(node_line[0], [])
for i in range(1, len(node_line)):
datasources[node_line[0]].append(node_line[i])
return datasources
# TODO: remove all usage of this function
def set_globals():
"""Set global configuration information
"""
"""Configuration Paths"""
config = configparser.ConfigParser()
config.read(INI_PATH)
"""User input for scheduler information"""
global STATIC_MAPPING, SCHEDULER, TRANSFER, PROFILER, RUNTIME, PRICING, DCOMP
STATIC_MAPPING = int(config['CONFIG']['STATIC_MAPPING'])
# scheduler option chosen from SCHEDULER_LIST
SCHEDULER = int(config['CONFIG']['SCHEDULER'])
# transfer option chosen from TRANSFER_LIST
TRANSFER = int(config['CONFIG']['TRANSFER'])
# Network and Resource profiler (TA2) option chosen from TA2_LIST
PROFILER = int(config['CONFIG']['PROFILER'])
# Runtime profiling for data transfer methods: 0 for only senders, 1 for both senders and receivers
RUNTIME = int(config['CONFIG']['RUNTIME'])
# Using pricing or original scheme
PRICING = int(config['CONFIG']['PRICING'])
# Using dcomp
DCOMP = int(config['CONFIG']['DCOMP'])
"""Authorization information in the containers"""
global USERNAME, PASSWORD
USERNAME = config['AUTH']['USERNAME']
PASSWORD = config['AUTH']['PASSWORD']
"""Port and target port in containers for services to be used: Mongo, SSH and Flask"""
global MONGO_SVC, MONGO_DOCKER, SSH_SVC, SSH_DOCKER, FLASK_SVC, FLASK_DOCKER, FLASK_CIRCE, FLASK_DEPLOY
MONGO_SVC = config['PORT']['MONGO_SVC']
MONGO_DOCKER = config['PORT']['MONGO_DOCKER']
SSH_SVC = config['PORT']['SSH_SVC']
SSH_DOCKER = config['PORT']['SSH_DOCKER']
FLASK_SVC = config['PORT']['FLASK_SVC']
FLASK_DOCKER = config['PORT']['FLASK_DOCKER']
FLASK_CIRCE = config['PORT']['FLASK_CIRCE']
FLASK_DEPLOY = config['PORT']['FLASK_DEPLOY']
global BOKEH,BOKEH_SERVER, BOKEH_PORT, BOKEH
BOKEH = int(config['BOKEH_LIST']['BOKEH'])
BOKEH_SERVER = config['BOKEH_LIST']['BOKEH_SERVER']
BOKEH_PORT = int(config['BOKEH_LIST']['BOKEH_PORT'])
BOKEH = int(config['BOKEH_LIST']['BOKEH'])
"""Modules path of Jupiter"""
global NETR_PROFILER_PATH, EXEC_PROFILER_PATH, CIRCE_PATH, HEFT_PATH, WAVE_PATH, SCRIPT_PATH, STREAM_PATH
# default network and resource profiler: DRUPE
# default wave mapper: random wave
NETR_PROFILER_PATH = HERE + 'profilers/network_resource_profiler_mulhome/'
EXEC_PROFILER_PATH = HERE + 'profilers/execution_profiler_mulhome/'
CIRCE_PATH = HERE + 'circe/pricing/'
HEFT_PATH = HERE + 'task_mapper/heft_mulhome/original/'
WAVE_PATH = HERE + 'task_mapper/wave_mulhome/random_wave/'
SCRIPT_PATH = HERE + 'scripts/'
STREAM_PATH = HERE + 'simulation/data_sources/'
global heft_option, wave_option
heft_option = 'original'
wave_option = 'random'
if SCHEDULER == int(config['SCHEDULER_LIST']['WAVE_RANDOM']):
print('Task mapper: Wave random selected')
WAVE_PATH = HERE + 'task_mapper/wave_mulhome/random_wave/'
wave_option = 'random'
elif SCHEDULER == int(config['SCHEDULER_LIST']['WAVE_GREEDY']):
print('Task mapper: Wave greedy (original) selected')
WAVE_PATH = HERE + 'task_mapper/wave_mulhome/greedy_wave/'
wave_option = 'greedy'
elif SCHEDULER == int(config['SCHEDULER_LIST']['HEFT_BALANCE']):
print('Task mapper: Heft load balanced selected')
HEFT_PATH = HERE + 'task_mapper/heft_mulhome/heft_balance/'
heft_option = 'heftbalance'
else:
print('Task mapper: Heft original selected')
global pricing_option, profiler_option
pricing_option = 'original' #original pricing
profiler_option = 'multiple_home'
if PRICING == int(config['PRICING_LIST']['NONPRICING']): #non-pricing
pricing_option = 'original'
print('Non pricing scheme selected')
if PRICING == int(config['PRICING_LIST']['PUSH_PRICING']):#multiple home (push circe)
pricing_option = 'pricing_push'
print('Pricing pushing scheme selected')
if PRICING == int(config['PRICING_LIST']['EVENT_PRICING']):#multiple home, pricing (event-driven circe)
pricing_option = 'pricing_event'
print('Pricing event driven scheme selected')
if PRICING == int(config['PRICING_LIST']['INTERGRATED_PRICING']): #new-pricing
pricing_option = 'integrated_pricing'
print('Integrated pricing scheme selected')
if PRICING == int(config['PRICING_LIST']['DECOUPLED_PRICING']): #new-pricing
pricing_option = 'decoupled_pricing'
print('Decoupled pricing scheme selected')
CIRCE_PATH = HERE + 'circe/%s/'%(pricing_option)
global cluster_option
cluster_option = 'do'
if DCOMP == 1:
cluster_option = 'dcomp'
"""Kubernetes required information"""
global KUBECONFIG_PATH, DEPLOYMENT_NAMESPACE, PROFILER_NAMESPACE, \
MAPPER_NAMESPACE, EXEC_NAMESPACE
try:
KUBECONFIG_PATH = os.environ['KUBECONFIG']
except KeyError:
print('$KUBECONFIG does not exist. Using default path ~/.kube/config')
home = os.environ['HOME']
KUBECONFIG_PATH = home + '/.kube/config'
# Namespaces
DEPLOYMENT_NAMESPACE = 'quynh-circe'
PROFILER_NAMESPACE = 'quynh-profiler'
MAPPER_NAMESPACE = 'quynh-mapper'
EXEC_NAMESPACE = 'quynh-exec'
""" Node file path and first task information """
global HOME_NODE, HOME_CHILD, STREAM_NODE
HOME_NODE = get_home_node(HERE + 'nodes.txt')
STREAM_NODE = get_datasources(HERE + 'nodes.txt')
"""Application Information"""
global APP_PATH, APP_NAME, APP_OPTION
HOME_CHILD = 'master'
APP_PATH = HERE + 'app_specific_files/demo/'
APP_NAME = 'app_specific_files/demo'
APP_OPTION = 'demo'
"""pricing CIRCE home and worker images"""
global PRICING_HOME_IMAGE, WORKER_CONTROLLER_IMAGE, WORKER_COMPUTE_IMAGE
PRICING_HOME_IMAGE = 'quay.io/anrgusc/%s_circe_home:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
WORKER_CONTROLLER_IMAGE = 'quay.io/anrgusc/%s_circe_controller:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
WORKER_COMPUTE_IMAGE = 'quay.io/anrgusc/%s_circe_computing:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
global PRICING_HOME_CONTROLLER, PRICING_HOME_COMPUTE
PRICING_HOME_CONTROLLER = 'quay.io/anrgusc/%s_circe_home_controller:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
PRICING_HOME_COMPUTE = 'quay.io/anrgusc/%s_circe_home_compute:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
global NONDAG_CONTROLLER_IMAGE,NONDAG_WORKER_IMAGE # only required for non-DAG tasks (teradetectors and dft)
NONDAG_CONTROLLER_IMAGE = 'quay.io/anrgusc/%s_circe_nondag:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
NONDAG_WORKER_IMAGE = 'quay.io/anrgusc/%s_circe_nondag_worker:%s_%s' %(pricing_option,APP_OPTION,cluster_option)
"""CIRCE home and worker images for execution profiler"""
global HOME_IMAGE, WORKER_IMAGE, STREAM_IMAGE
HOME_IMAGE = 'quay.io/anrgusc/circe_home:%s_%s'%(APP_OPTION,cluster_option)
WORKER_IMAGE = 'quay.io/anrgusc/circe_worker:%s_%s'%(APP_OPTION,cluster_option)
STREAM_IMAGE = 'quay.io/anrgusc/stream_home:%s_%s'%(APP_OPTION,cluster_option)
"""DRUPE home and worker images"""
global PROFILER_HOME_IMAGE, PROFILER_WORKER_IMAGE
# PROFILER_HOME_IMAGE = 'quay.io/anrgusc/%s_profiler_home:coded_%s'%(profiler_option,cluster_option)
# PROFILER_WORKER_IMAGE = 'quay.io/anrgusc/%s_profiler_worker:coded_%s'%(profiler_option,cluster_option)
PROFILER_HOME_IMAGE = '{}/drupe_profiler_home:{}'.format(DOCKER_REGISTRY, APP_OPTION)
PROFILER_WORKER_IMAGE = '{}/drupe_profiler_worker:{}'.format(DOCKER_REGISTRY, APP_OPTION)
"""WAVE home and worker images"""
global WAVE_HOME_IMAGE, WAVE_WORKER_IMAGE
#%s: random, v1: greedy
WAVE_HOME_IMAGE = 'quay.io/anrgusc/%s_%s_wave_home:%s_%s' %(wave_option,profiler_option,APP_OPTION,cluster_option)
WAVE_WORKER_IMAGE = 'quay.io/anrgusc/%s_%s_wave_worker:%s_%s' %(wave_option,profiler_option,APP_OPTION,cluster_option)
"""Execution profiler home and worker images"""
global EXEC_HOME_IMAGE, EXEC_WORKER_IMAGE
EXEC_HOME_IMAGE = '{}/exec_profiler_home:{}'.format(DOCKER_REGISTRY, APP_OPTION)
EXEC_WORKER_IMAGE = '{}/exec_profiler_worker:{}'.format(DOCKER_REGISTRY, APP_OPTION)
"""HEFT docker image"""
global HEFT_IMAGE
HEFT_IMAGE = 'quay.io/anrgusc/%s_heft:%s_%s'%(heft_option,APP_OPTION,cluster_option)
global NUM_STRESS, STRESS_IMAGE
NUM_STRESS = int(config['OTHER']['NUM_STRESS'])
STRESS_IMAGE = 'quay.io/anrgusc/stress:%s'%(cluster_option)
def k8s_service_port_mappings():
config = parse_config_ini()
ports = []
for name, portmap in config.items("PORT_MAPPINGS"):
svc, docker = portmap.split(":")
ports.append({
"name": name,
"port": int(svc),
"targetPort": int(docker)
})
return ports
def k8s_deployment_port_mappings():
config = parse_config_ini()
ports = []
for name, portmap in config.items("PORT_MAPPINGS"):
# k8s service port not needed here
svc, docker = portmap.split(":")
ports.append({
"name": name,
"containerPort": int(docker)
})
return ports
def get_abs_app_dir():
abs_path = path.abspath(path.join(path.dirname(__file__), APP_DIR))
return abs_path
def task_mapper():
config = parse_config_ini()
return config["CONFIG"]["TASK_MAPPER"]
def flask_port_mapping():
config = parse_config_ini()
svc, docker = config["PORT_MAPPINGS"]["FLASK"].split(":")
return svc, docker
def kubectl_proxy_mapper():
config = parse_config_ini()
return config["CONFIG"]["KUBECTL_PROXY"]