diff --git a/installer/ansible/roles/cleaner/scenarios/delfin.yml b/installer/ansible/roles/cleaner/scenarios/delfin.yml index e0e1ef60d..01fcd92a9 100644 --- a/installer/ansible/roles/cleaner/scenarios/delfin.yml +++ b/installer/ansible/roles/cleaner/scenarios/delfin.yml @@ -16,7 +16,7 @@ - name: Stop delfin containers, if started shell: "{{ item }}" with_items: - - docker-compose down + - docker compose down become: yes ignore_errors: yes args: diff --git a/installer/ansible/roles/cleaner/scenarios/srm-toolchain.yml b/installer/ansible/roles/cleaner/scenarios/srm-toolchain.yml index 4f1a5040b..255517df0 100644 --- a/installer/ansible/roles/cleaner/scenarios/srm-toolchain.yml +++ b/installer/ansible/roles/cleaner/scenarios/srm-toolchain.yml @@ -19,7 +19,7 @@ register: srmtoolchainexisted - name: Stop and remove Prometheus, Alertmanager, Grafana containers but don't delete the images - shell: docker-compose rm -fs + shell: docker compose rm -fs args: chdir: "{{ srm_toolchain_work_dir }}/" when: @@ -27,7 +27,7 @@ - srmtoolchainexisted.stat.isdir is defined and srmtoolchainexisted.stat.isdir - name: Stop and remove Prometheus, Alertmanager, Grafana containers & delete the images - shell: docker-compose down --rmi all + shell: docker compose down --rmi all args: chdir: "{{ srm_toolchain_work_dir }}/" when: diff --git a/installer/ansible/roles/delfin-installer/scenarios/container.yml b/installer/ansible/roles/delfin-installer/scenarios/container.yml index 6893c22be..3b23d87b8 100644 --- a/installer/ansible/roles/delfin-installer/scenarios/container.yml +++ b/installer/ansible/roles/delfin-installer/scenarios/container.yml @@ -17,7 +17,7 @@ shell: "{{ item }}" with_items: - docker build -t sodafoundation/delfin . - - DELFIN_METRICS_DIR={{ delfin_exporter_prometheus_metrics_dir }} DELFIN_HOST_IP={{ host_ip }} DELFIN_RABBITMQ_USER={{ delfin_rabbitmq_user }} DELFIN_RABBITMQ_PASS={{ delfin_rabbitmq_pass }} docker-compose up -d + - DELFIN_METRICS_DIR={{ delfin_exporter_prometheus_metrics_dir }} DELFIN_HOST_IP={{ host_ip }} DELFIN_RABBITMQ_USER={{ delfin_rabbitmq_user }} DELFIN_RABBITMQ_PASS={{ delfin_rabbitmq_pass }} docker compose up -d become: yes args: chdir: "{{ delfin_work_dir }}" diff --git a/installer/ansible/roles/srm-toolchain-installer/tasks/main.yml b/installer/ansible/roles/srm-toolchain-installer/tasks/main.yml index 6d43ee2db..235eade16 100644 --- a/installer/ansible/roles/srm-toolchain-installer/tasks/main.yml +++ b/installer/ansible/roles/srm-toolchain-installer/tasks/main.yml @@ -57,12 +57,12 @@ shell: export PROMETHEUS=True - name: Stop and remove Prometheus, Alertmanager, Grafana containers, keeping images - shell: docker-compose rm -fs + shell: docker compose rm -fs args: chdir: "{{ srm_toolchain_work_dir }}/" - name: start service - shell: docker-compose up -d + shell: docker compose up -d args: chdir: "{{ srm_toolchain_work_dir }}/" diff --git a/installer/ansible/script/create_db.py b/installer/ansible/script/create_db.py index 7c908d103..f8e1e5007 100644 --- a/installer/ansible/script/create_db.py +++ b/installer/ansible/script/create_db.py @@ -28,10 +28,14 @@ CONF = cfg.CONF db_options.set_defaults(cfg.CONF, connection='sqlite:////var/lib/delfin/delfin.sqlite') + + def remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix):] return text + + def main(): CONF(sys.argv[1:], project='delfin', version=version.version_string()) @@ -41,6 +45,7 @@ def main(): if not os.path.exists(path): os.makedirs(path) db.register_db() + + if __name__ == '__main__': main() - diff --git a/installer/ansible/script/ministone.py b/installer/ansible/script/ministone.py index 392aebd8e..cbb569e07 100755 --- a/installer/ansible/script/ministone.py +++ b/installer/ansible/script/ministone.py @@ -19,22 +19,23 @@ import requests import json + def token_issue(): body = { 'auth': { - 'identity': { 'methods': ['password'], - 'password': { - 'user': { - 'name': OS_USERNAME, - 'domain': { 'name': OS_USER_DOMAIN_NAME }, - 'password': OS_PASSWORD - } - } - }, + 'identity': {'methods': ['password'], + 'password': { + 'user': { + 'name': OS_USERNAME, + 'domain': {'name': OS_USER_DOMAIN_NAME}, + 'password': OS_PASSWORD + } + } + }, 'scope': { 'project': { - 'name': OS_PROJECT_NAME, - 'domain': { 'name': OS_USER_DOMAIN_NAME } + 'name': OS_PROJECT_NAME, + 'domain': {'name': OS_USER_DOMAIN_NAME} } } } @@ -45,8 +46,9 @@ def token_issue(): try: r_post = requests.post(OS_AUTH_URL + '/v3/auth/tokens', headers=headers, data=json.dumps(body)) - except: + except Exception as ex: print('ERROR: %s' % (body)) + print('Execption: %s' % (ex)) return None if debug: @@ -58,6 +60,7 @@ def token_issue(): else: return None + def service_list(token): headers = { 'Content-Type': 'application/json', @@ -75,12 +78,14 @@ def service_list(token): result_list = json.loads(r_get.text)['services'] for s in result_list: - result_dict[s['name']] = s['id'] - except: + result_dict[s['name']] = s['id'] + except Exception as ex: + print("Got exception %s", ex) return None return result_dict + def endpoint_list(token, service): headers = { @@ -93,8 +98,9 @@ def endpoint_list(token, service): if debug: print('DEBUG: GET /v3/endpoints - status_code = %s' % (r_get.status_code)) - except: - return None + except Exception as ex: + print("Got exception %s", ex) + return None if r_get.status_code != 200: return None @@ -104,7 +110,8 @@ def endpoint_list(token, service): ep_list = [] for ep in json.loads(response)['endpoints']: - if service in service_dict.keys() and (ep['service_id'] == service_dict[service]): + if service in service_dict.keys() and (ep['service_id'] == + service_dict[service]): if debug: print('DEBUG: %s %s' % (ep['id'], ep['interface'])) print('DEBUG: url %s' % (ep['url'])) @@ -112,6 +119,7 @@ def endpoint_list(token, service): return ep_list + def endpoint_bulk_update(token, service, url): headers = { 'Content-Type': 'application/json', @@ -126,7 +134,7 @@ def endpoint_bulk_update(token, service, url): print("DEBUG: ep_list: %s %s" % (ep_list, url)) for ep in ep_list: - body = {"endpoint": { "url": url }} + body = {"endpoint": {"url": url}} endpoint_id = ep[0] if debug: print("DEBUG: %s / %s" % @@ -136,8 +144,8 @@ def endpoint_bulk_update(token, service, url): r_patch = requests.patch(OS_AUTH_URL + '/v3/endpoints/' + endpoint_id, headers=headers, data=json.dumps(body)) - except: - print('ERROR: endpoint update for id: %s failed.' % (endpoint_id)) + except Exception as ex: + print('ERROR: endpoint update for id: %s failed. %s' % (endpoint_id), ex) # continue for all the given endpoints if r_patch.status_code != 200: print('ERROR: endpoint update for id: %s failed. HTTP %s' % @@ -159,17 +167,18 @@ def endpoint_bulk_update(token, service, url): # Updates URL portion of keystone endpoints of given SERVICE_NAME # in one action. # + if __name__ == '__main__': debug = False - OS_AUTH_URL=os.environ['OS_AUTH_URL'] - OS_PASSWORD=os.environ['OS_PASSWORD'] - OS_PROJECT_DOMAIN_NAME=os.environ['OS_PROJECT_DOMAIN_NAME'] - OS_PROJECT_NAME=os.environ['OS_PROJECT_NAME'] - OS_USERNAME=os.environ['OS_USERNAME'] - OS_USER_DOMAIN_NAME=os.environ['OS_USER_DOMAIN_NAME'] - #OS_USER_DOMAIN_ID=os.environ['OS_USER_DOMAIN_ID'] + OS_AUTH_URL = os.environ['OS_AUTH_URL'] + OS_PASSWORD = os.environ['OS_PASSWORD'] + OS_PROJECT_DOMAIN_NAME = os.environ['OS_PROJECT_DOMAIN_NAME'] + OS_PROJECT_NAME = os.environ['OS_PROJECT_NAME'] + OS_USERNAME = os.environ['OS_USERNAME'] + OS_USER_DOMAIN_NAME = os.environ['OS_USER_DOMAIN_NAME'] + # OS_USER_DOMAIN_ID=os.environ['OS_USER_DOMAIN_ID'] # token_issue # used for keystone process start up check. @@ -177,18 +186,17 @@ def endpoint_bulk_update(token, service, url): if len(sys.argv) == 2 and sys.argv[1] == 'token_issue': token = token_issue() if not token: - sys.exit(1) + sys.exit(1) else: - sys.exit(0) + sys.exit(0) # endpoint_bulk_update # used for overwriting keystone endpoints if not ((len(sys.argv) == 4) and (sys.argv[1] == 'endpoint_bulk_update')): print('Specify service_name and url for bulk update. Exiting...') - sys.exit(1) + sys.exit(1) token = token_issue() if not token: sys.exit(1) endpoint_bulk_update(token, sys.argv[2], sys.argv[3]) - diff --git a/installer/install_dependencies.sh b/installer/install_dependencies.sh new file mode 100644 index 000000000..0e5f1daf6 --- /dev/null +++ b/installer/install_dependencies.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +echo Enabling docker repository +sudo mkdir -p /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg --yes + +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + +# Update local repositories +echo Updating local repositories +sudo apt-get update + +# Install dependencies +echo Installing dependencies +sudo apt-get install -y apt-transport-https ca-certificates curl gnupg gnupg-agent lsb-release software-properties-common sshpass pv gawk + +# Install python dependencies +echo Installing Python dependencies +sudo apt-get install -y python3-distutils +sudo apt-get install -y python3-pip + +# Install ansible if not present +if [ "`which ansible`" != "" ]; then + echo ansible already installed, skipping. +else + echo Installing ansible + python3 -m pip install --user ansible + echo 'PATH=$PATH:~/.local/bin'>>/etc/profile +fi + +# Install docker if not present +if [ "`which docker`" != "" ]; then + echo Docker already installed, skipping. +else + echo Installing docker + sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin +fi + +# Ensure /usr/local/bin is in path +PATH=$PATH:/usr/loca/bin