diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6bfcb999..32afcb45 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -17,8 +17,6 @@ jobs: image: - cloud-info - caso - - cloudkeeper-os - - cloudkeeper-core steps: - name: Checkout diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index cc101a46..cf4a3696 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -1,6 +1,8 @@ name: Python testing -on: pull_request +on: + - pull_request + - push jobs: test: @@ -9,11 +11,14 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python + - name: Set up Python uses: actions/setup-python@v4 - - name: Test cloud-info-generator + with: + python-version: '3.11' + - name: Test cloud-info-generator run: | cd cloud-info pip install -r requirements.txt pip install . - python3 -m cloud_info_catchall.test + cd cloud_info_catchall + python3 -m unittest diff --git a/cloud-info/ams-wrapper.sh b/cloud-info/ams-wrapper.sh index 350354f0..1d8504f5 100755 --- a/cloud-info/ams-wrapper.sh +++ b/cloud-info/ams-wrapper.sh @@ -34,20 +34,39 @@ curl -f "https://$AMS_HOST/v1/projects/$AMS_PROJECT/topics/$AMS_TOPIC?key=$AMS_T # Attempt to generate the site configuration AUTO_CONFIG_PATH="$(mktemp -d)" + +# First get valid access token export CHECKIN_SECRETS_FILE="$CHECKIN_SECRETS_PATH/secrets.yaml" -if VO_SECRETS_PATH="$AUTO_CONFIG_PATH/vos" config-generator > "$AUTO_CONFIG_PATH/site.yaml"; then - # this worked, let's update the env - export CHECKIN_SECRETS_PATH="$AUTO_CONFIG_PATH/vos" - export CLOUD_INFO_CONFIG="$AUTO_CONFIG_PATH/site.yaml" +# TODO(enolfc): avoid creating new tokens for every provider +export ACCESS_TOKEN_FILE="$AUTO_CONFIG_PATH/token.yaml" +USE_ACCESS_TOKEN=0 +if token-generator; then + # TODO(enolfc): even if this belows fails, we should use access token as it will provide + # access to more projects + if SECRETS_FILE="$ACCESS_TOKEN_FILE" config-generator > "$AUTO_CONFIG_PATH/site.yaml"; then + # this worked, let's update the env + export CHECKIN_SECRETS_PATH="$AUTO_CONFIG_PATH/vos" + export CLOUD_INFO_CONFIG="$AUTO_CONFIG_PATH/site.yaml" + USE_ACCESS_TOKEN=1 + fi fi # Any OS related parameter should be available as env variables if test "$CHECKIN_SECRETS_PATH" = ""; then + # Case 1: manual config + cloud-info-provider-service --yaml-file "$CLOUD_INFO_CONFIG" \ + --middleware "$CLOUD_INFO_MIDDLEWARE" \ + --ignore-share-errors \ + --format glue21 > cloud-info.out +elif test "$USE_ACCESS_TOKEN" -eq 1; then + # Case 2: access token style cloud-info-provider-service --yaml-file "$CLOUD_INFO_CONFIG" \ --middleware "$CLOUD_INFO_MIDDLEWARE" \ --ignore-share-errors \ + --auth-refresher accesstoken \ --format glue21 > cloud-info.out else + # Case 3: oidc refresh style cloud-info-provider-service --yaml-file "$CLOUD_INFO_CONFIG" \ --middleware "$CLOUD_INFO_MIDDLEWARE" \ --ignore-share-errors \ @@ -66,7 +85,7 @@ grep -q GLUE2ShareID cloud-info.out \ ARGO_URL="https://$AMS_HOST/v1/projects/$AMS_PROJECT/topics/$AMS_TOPIC:publish?key=$AMS_TOKEN" printf '{"messages":[{"attributes":{},"data":"' > ams-payload -grep -v "UNKNOWN" cloud-info.out | grep -v "^#" | gzip | base64 -w 0 >> ams-payload +grep -v "UNKNOWN" cloud-info.out | grep -v "^#" | grep -v ": $" | gzip | base64 -w 0 >> ams-payload printf '"}]}' >> ams-payload curl -X POST "$ARGO_URL" -H "content-type: application/json" -d @ams-payload diff --git a/cloud-info/cloud_info_catchall/config_generator.py b/cloud-info/cloud_info_catchall/config_generator.py index 6b553e22..f9c3d071 100755 --- a/cloud-info/cloud_info_catchall/config_generator.py +++ b/cloud-info/cloud_info_catchall/config_generator.py @@ -1,8 +1,23 @@ """Discover projects for cloud-info-povider and generate configuration Takes its own configuration from env variables: -CHECKIN_SECRETS_FILE: yaml file with the check-in secrets to get access tokens -CHECKIN_OIDC_TOKEN: URL for token refreshal +SECRETS_FILE: yaml file with the secrets to access shares +The yaml includes as many credentials as wanted in 2 formats +``` +--- +secret_name: + client_id:"client id" + client_secret: "client_secret" + refresh_token: "refresh_token" + +other_secret: + access_token: "access token" +``` +Any other formats will be ignored + +VO_SECRETS_PATH: directory to create VO structure with credentials + for cloud-info-provider +TOKEN_URL: URL to refresh tokens OS_AUTH_URL, OS_IDENTITY_PROVIDER, OS_PROTOCOL: OpenStack endpoint config SITE_NAME: site name """ @@ -10,106 +25,58 @@ import logging import os -import fedcloudclient.endpoint as fedcli import yaml -from cloud_info_provider.auth_refreshers.oidc_refresh import OidcRefreshToken +from cloud_info_catchall.share_discovery import ( + AccessTokenShareDiscovery, + RefresherShareDiscovery, +) -class ShareDiscovery: - def __init__(self, auth_url, identity_provider, protocol, token_url, vo_dir): - self.auth_url = auth_url - self.identity_provider = identity_provider - self.protocol = protocol - self.token_url = token_url - self.vo_dir = vo_dir - - def refresh_token(self, secret): - # fake the options for refreshing - # avoids code duplication but not very clean - class Opt: - timeout = 10 - - refresher = OidcRefreshToken(Opt) - return refresher._refresh_token( - self.token_url, - secret.get("client_id", None), - secret.get("client_secret", None), - secret.get("refresh_token", None), - "openid email profile voperson_id eduperson_entitlement", - ) - - def get_token_shares(self, access_token): - # rely on fedcloudclient for getting token - # exchange access_token for Keystone token - shares = {} - try: - token = fedcli.retrieve_unscoped_token( - self.auth_url, access_token, self.protocol - ) - except fedcli.TokenException: - # this check-in account does not have access to the site, ignore - return shares - projects = fedcli.get_projects_from_single_site(self.auth_url, token) - for p in projects: - vo = p.get("VO", None) - if not vo: - logging.warning( - "Discarding project %s as it does not have VO property", p["name"] - ) - continue - if not p.get("enabled", False): - logging.warning("Discarding project %s as it is not enabled", p["name"]) - continue - shares[vo] = {"auth": {"project_id": p["id"]}} - return shares +def read_secrets(secrets_file): + with open(secrets_file, "r") as f: + return yaml.load(f.read(), Loader=yaml.SafeLoader) - def generate_shares(self, secrets): - shares = {} - for s in secrets: - # not our thing - if not isinstance(secrets[s], dict): - continue - access_token = self.refresh_token(secrets[s]) - token_shares = self.get_token_shares(access_token) - shares.update(token_shares) - # create the directory structure for the cloud-info-provider - for d in token_shares: - dir_path = os.path.join(self.vo_dir, d) - os.makedirs(dir_path, exist_ok=True) - for field in "client_id", "client_secret", "refresh_token": - with open(os.path.join(dir_path, field), "w+") as f: - f.write(secrets[s].get(field, None) or "") - if not shares: - logging.error("No shares generated!") - raise Exception("No shares found!") - return shares - def generate_config(self, site_name, secrets): - shares = self.generate_shares(secrets) - return {"site": {"name": site_name}, "compute": {"shares": shares}} +def generate_shares(config, secrets): + """calls the share discovery class according to the secret type + that we have""" + shares = {} + for s in secrets: + # not our thing + if not isinstance(secrets[s], dict): + continue + if "client_id" in secrets[s] and "refresh_token" in secrets[s]: + discoverer = RefresherShareDiscovery(config, secrets[s]) + elif "access_token" in secrets[s]: + discoverer = AccessTokenShareDiscovery(config, secrets[s]) + token_shares = discoverer.get_token_shares() + shares.update(token_shares) + if not shares: + logging.error("No shares generated!") + raise Exception("No shares found!") + return shares -def read_secrets(secrets_file): - with open(secrets_file, "r") as f: - return yaml.load(f.read(), Loader=yaml.SafeLoader) +def generate_shares_config(config, secrets): + shares = generate_shares(config, secrets) + return {"site": {"name": config["site_name"]}, "compute": {"shares": shares}} def main(): logging.basicConfig() # get config from env - checkin_secrets_file = os.environ["CHECKIN_SECRETS_FILE"] - checkin_token_url = os.environ["CHECKIN_OIDC_TOKEN"] - os_auth_url = os.environ["OS_AUTH_URL"] - os_identity_provider = os.environ["OS_IDENTITY_PROVIDER"] - os_protocol = os.environ["OS_PROTOCOL"] - site_name = os.environ["SITE_NAME"] - vo_dir = os.environ["VO_SECRETS_PATH"] - secrets = read_secrets(checkin_secrets_file) - disc = ShareDiscovery( - os_auth_url, os_identity_provider, os_protocol, checkin_token_url, vo_dir - ) - config = disc.generate_config(site_name, secrets) - print(yaml.dump(config)) + secrets_file = os.environ["SECRETS_FILE"] + config = { + "auth_url": os.environ["OS_AUTH_URL"], + "identity_provider": os.environ["OS_IDENTITY_PROVIDER"], + "protocol": os.environ["OS_PROTOCOL"], + "site_name": os.environ["SITE_NAME"], + "token_url": os.environ.get("TOKEN_URL", ""), + "vo_dir": os.environ.get("VO_SECRETS_PATH", ""), + } + secrets = read_secrets(secrets_file) + shares_config = generate_shares_config(config, secrets) + print(yaml.dump(shares_config)) if __name__ == "__main__": diff --git a/cloud-info/cloud_info_catchall/share_discovery.py b/cloud-info/cloud_info_catchall/share_discovery.py new file mode 100644 index 00000000..a5ba7d21 --- /dev/null +++ b/cloud-info/cloud_info_catchall/share_discovery.py @@ -0,0 +1,99 @@ +"""Discover projects for cloud-info-povider and generate configuration +""" + +import logging +import os + +import fedcloudclient.endpoint as fedcli +from cloud_info_provider.auth_refreshers.oidc_refresh import OidcRefreshToken + + +class ShareDiscovery: + def __init__(self, config, secret): + self.auth_url = config["auth_url"] + self.identity_provider = config["identity_provider"] + self.protocol = config["protocol"] + self.secret = secret + + def build_share(self, project, access_token): + return {"auth": {"project_id": project["id"]}} + + def get_token_shares(self): + access_token = self.get_token() + # rely on fedcloudclient for getting token + # exchange access_token for Keystone token + shares = {} + try: + token = fedcli.retrieve_unscoped_token( + self.auth_url, access_token, self.protocol + ) + except fedcli.TokenException: + # this check-in account does not have access to the site, ignore + return shares + projects = fedcli.get_projects_from_single_site(self.auth_url, token) + for p in projects: + vo = p.get("VO", None) + if not vo: + logging.warning( + "Discarding project %s as it does not have VO property", p["name"] + ) + continue + if not p.get("enabled", False): + logging.warning("Discarding project %s as it is not enabled", p["name"]) + continue + shares[vo] = self.build_share(p, access_token) + self.config_shares(shares, access_token) + return shares + + def config_shares(self, shares, access_token): + """do any additional configuration to support the shares""" + pass + + def get_token(self): + raise NotImplementedError + + +class RefresherShareDiscovery(ShareDiscovery): + """Refreshes tokens using a refresh token and creates a VO configuration + for its refresh again by cloud-info-provider""" + + def __init__(self, config, secret): + super().__init__(config, secret) + self.token_url = config["token_url"] + self.vo_dir = config["vo_dir"] + + def get_token(self): + # fake the options for refreshing + # avoids code duplication but not very clean + class Opt: + timeout = 10 + + refresher = OidcRefreshToken(Opt) + return refresher._refresh_token( + self.token_url, + self.secret.get("client_id", None), + self.secret.get("client_secret", None), + self.secret.get("refresh_token", None), + "openid email profile voperson_id eduperson_entitlement", + ) + + def config_shares(self, shares, access_token): + # create the directory structure for the cloud-info-provider + for d in shares: + dir_path = os.path.join(self.vo_dir, d) + os.makedirs(dir_path, exist_ok=True) + for field in "client_id", "client_secret", "refresh_token": + with open(os.path.join(dir_path, field), "w+") as f: + f.write(self.secret.get(field, None) or "") + + +class AccessTokenShareDiscovery(ShareDiscovery): + """Uses existing access token to create VO configuration""" + + def get_token(self): + return self.secret["access_token"] + + def build_share(self, project, access_token): + s = super().build_share(project, access_token) + s["auth"].update({"access_token": access_token}) + return s diff --git a/cloud-info/cloud_info_catchall/test.py b/cloud-info/cloud_info_catchall/test.py deleted file mode 100644 index a2bdfd63..00000000 --- a/cloud-info/cloud_info_catchall/test.py +++ /dev/null @@ -1,124 +0,0 @@ -""" Tests for the config generator """ - -import unittest -from unittest.mock import call, mock_open, patch - -from cloud_info_catchall.config_generator import ShareDiscovery -from fedcloudclient.endpoint import TokenException - - -class TestConfig(unittest.TestCase): - @patch( - "cloud_info_provider.auth_refreshers.oidc_refresh.OidcRefreshToken._refresh_token" - ) - def test_token_refresh(self, m): - d = ShareDiscovery( - "https://openstack.org", "egi.eu", "oidc", "https://aai.egi.eu", "vo" - ) - t = d.refresh_token( - {"client_id": "id", "client_secret": "secret", "refresh_token": "token"} - ) - m.assert_called_with( - "https://aai.egi.eu", - "id", - "secret", - "token", - "openid email profile voperson_id eduperson_entitlement", - ) - self.assertEqual(t, m.return_value) - - @patch("fedcloudclient.endpoint.retrieve_unscoped_token") - def test_failed_token_shares(self, m): - d = ShareDiscovery( - "https://openstack.org", "egi.eu", "oidc", "https://aai.egi.eu", "vo" - ) - m.side_effect = TokenException() - s = d.get_token_shares("foobar") - m.assert_called_with("https://openstack.org", "foobar", "oidc") - self.assertEqual(s, {}) - - @patch("fedcloudclient.endpoint.get_projects_from_single_site") - @patch("fedcloudclient.endpoint.retrieve_unscoped_token") - def test_token_shares(self, m_token, m_proj): - d = ShareDiscovery( - "https://openstack.org", "egi.eu", "oidc", "https://aai.egi.eu", "vo" - ) - m_proj.return_value = [ - { - "VO": "foobar.eu", - "id": "id1", - "name": "enabled foobar VO", - "enabled": True, - }, - {"VO": "disabled.eu", "id": "id2", "name": "disabled VO", "enabled": False}, - {"id": "id3", "name": "not VO project", "enabled": True}, - ] - s = d.get_token_shares("foobar") - m_token.assert_called_with("https://openstack.org", "foobar", "oidc") - m_proj.assert_called_with("https://openstack.org", m_token.return_value) - # return only the enabled with VO - self.assertEqual(s, {"foobar.eu": {"auth": {"project_id": "id1"}}}) - - @patch.object(ShareDiscovery, "refresh_token") - @patch.object(ShareDiscovery, "get_token_shares") - @patch("os.makedirs") - def test_generate_shares(self, m_makedirs, m_shares, m_refresh): - d = ShareDiscovery( - "https://openstack.org", "egi.eu", "oidc", "https://aai.egi.eu", "vo" - ) - vos = { - "foobar.eu": { - "client_id": "bar", - "client_secret": "foo", - "refresh_token": "foobar", - }, - "baz.eu": { - "client_id": "barz", - "refresh_token": "foobarz", - }, - } - m_shares.side_effect = [ - {"foobar.eu": {"auth": {"project_id": "id1"}}}, - {"baz.eu": {"auth": {"project_id": "id2"}}}, - ] - with patch("builtins.open", mock_open()) as m_file: - s = d.generate_shares({"s1": vos["foobar.eu"], "s2": vos["baz.eu"]}) - handle = m_file() - for vo in vos: - for field in vos[vo]: - m_file.assert_any_call(f"vo/{vo}/{field}", "w+"), - handle.write.assert_any_call(vos[vo][field]) - m_refresh.assert_has_calls([call(vos["foobar.eu"]), call(vos["baz.eu"])]) - m_shares.assert_called_with(m_refresh.return_value) - m_makedirs.assert_has_calls( - [call("vo/foobar.eu", exist_ok=True), call("vo/baz.eu", exist_ok=True)] - ) - self.assertEqual( - s, - { - "foobar.eu": {"auth": {"project_id": "id1"}}, - "baz.eu": {"auth": {"project_id": "id2"}}, - }, - ) - - def test_generate_empty_shares(self): - d = ShareDiscovery( - "https://openstack.org", "egi.eu", "oidc", "https://aai.egi.eu", "vo" - ) - with self.assertRaises(Exception): - d.generate_shares({}) - - @patch.object(ShareDiscovery, "generate_shares") - def test_generate_config(self, m): - d = ShareDiscovery( - "https://openstack.org", "egi.eu", "oidc", "https://aai.egi.eu", "vo" - ) - s = d.generate_config("site", {}) - m.assert_called_with({}) - self.assertEqual( - s, {"site": {"name": "site"}, "compute": {"shares": m.return_value}} - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/cloud-info/cloud_info_catchall/test_config_generator.py b/cloud-info/cloud_info_catchall/test_config_generator.py new file mode 100644 index 00000000..436e3da2 --- /dev/null +++ b/cloud-info/cloud_info_catchall/test_config_generator.py @@ -0,0 +1,45 @@ +""" Tests for the config generator """ + +import unittest +from collections import defaultdict +from unittest.mock import patch + +import cloud_info_catchall.config_generator as cg + + +class ConfigGeneratorTest(unittest.TestCase): + @patch("cloud_info_catchall.config_generator.generate_shares") + def test_generate_shares_config(self, m_gen): + config = {"site_name": "SITE"} + secrets = {} + r = cg.generate_shares_config(config, secrets) + m_gen.assert_called_with(config, secrets) + self.assertEqual( + r, {"site": {"name": "SITE"}, "compute": {"shares": m_gen.return_value}} + ) + + def test_generate_empty_shares(self): + with self.assertRaises(Exception): + cg.generate_shares({}, {}) + + @patch("cloud_info_catchall.share_discovery.ShareDiscovery.get_token_shares") + def test_generate_shares(self, m_token_shares): + refresh_secret = { + "foo": { + "client_id": "id", + "client_secret": "secret", + "refresh_token": "refresh", + } + } + token_secret = {"bar": {"access_token": "token"}} + secrets = {} + secrets.update(refresh_secret) + secrets.update(token_secret) + m_token_shares.return_value = {"one": "two"} + r = cg.generate_shares(defaultdict(lambda: ""), secrets) + m_token_shares.assert_called_with() + self.assertEqual(r, {"one": "two"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/cloud-info/cloud_info_catchall/test_share_discovery.py b/cloud-info/cloud_info_catchall/test_share_discovery.py new file mode 100644 index 00000000..3861d417 --- /dev/null +++ b/cloud-info/cloud_info_catchall/test_share_discovery.py @@ -0,0 +1,134 @@ +""" Tests for the Share discovery """ + +import unittest +from unittest.mock import MagicMock, call, mock_open, patch + +from cloud_info_catchall.share_discovery import ( + AccessTokenShareDiscovery, + RefresherShareDiscovery, + ShareDiscovery, +) +from fedcloudclient.endpoint import TokenException + + +class ShareDiscoveryTest(unittest.TestCase): + DISCOVERER_CLASS = ShareDiscovery + CONFIG = { + "auth_url": "https://openstack.org", + "identity_provider": "egi.eu", + "protocol": "oidc", + "token_url": "https://aai.egi.eu", + "vo_dir": "vo", + } + SECRET = {"foo": "bar"} + + def setUp(self): + self.discoverer = self.DISCOVERER_CLASS(self.CONFIG, self.SECRET) + + @patch("fedcloudclient.endpoint.get_projects_from_single_site") + @patch("fedcloudclient.endpoint.retrieve_unscoped_token") + def test_token_shares(self, m_fedcli_token, m_proj): + m_get_token = MagicMock() + self.discoverer.get_token = m_get_token + m_build_share = MagicMock() + self.discoverer.build_share = m_build_share + m_proj.return_value = [ + { + "VO": "foobar.eu", + "id": "id1", + "name": "enabled foobar VO", + "enabled": True, + }, + {"VO": "disabled.eu", "id": "id2", "name": "disabled VO", "enabled": False}, + {"id": "id3", "name": "not VO project", "enabled": True}, + ] + s = self.discoverer.get_token_shares() + m_fedcli_token.assert_called_with( + "https://openstack.org", m_get_token.return_value, "oidc" + ) + m_get_token.assert_called_with() + m_proj.assert_called_with("https://openstack.org", m_fedcli_token.return_value) + m_build_share.assert_called_with( + { + "VO": "foobar.eu", + "id": "id1", + "name": "enabled foobar VO", + "enabled": True, + }, + m_get_token.return_value, + ) + # return only the enabled with VO + self.assertEqual(s, {"foobar.eu": m_build_share.return_value}) + + @patch("fedcloudclient.endpoint.retrieve_unscoped_token") + def test_failed_token_shares(self, m_fedcli_token): + m_get_token = MagicMock() + self.discoverer.get_token = m_get_token + m_fedcli_token.side_effect = TokenException() + s = self.discoverer.get_token_shares() + m_fedcli_token.assert_called_with( + "https://openstack.org", m_get_token.return_value, "oidc" + ) + self.assertEqual(s, {}) + + def test_build_share(self): + project = {"id": "foobar"} + self.assertEqual( + self.discoverer.build_share(project, "token"), + {"auth": {"project_id": "foobar"}}, + ) + + +class TestRefresherShareDiscovery(ShareDiscoveryTest): + SECRET = {"client_id": "id", "client_secret": "secret", "refresh_token": "token"} + DISCOVERER_CLASS = RefresherShareDiscovery + + @patch( + "cloud_info_provider.auth_refreshers.oidc_refresh.OidcRefreshToken._refresh_token" + ) + def test_token_refresh(self, m): + t = self.discoverer.get_token() + m.assert_called_with( + "https://aai.egi.eu", + "id", + "secret", + "token", + "openid email profile voperson_id eduperson_entitlement", + ) + self.assertEqual(t, m.return_value) + + @patch("os.makedirs") + def config_shares(self, m_makedirs): + shares = [ + {"foobar.eu": {"auth": {"project_id": "id1"}}}, + {"baz.eu": {"auth": {"project_id": "id2"}}}, + ] + with patch("builtins.open", mock_open()) as m_file: + self.discoverer.config_shares(shares, "token") + handle = m_file() + for vo in shares: + for field in self.SECRET: + m_file.assert_any_call(f"vo/{vo}/{field}", "w+"), + handle.write.assert_any_call(self.SECRET[field]) + m_makedirs.assert_has_calls( + [call("vo/foobar.eu", exist_ok=True), call("vo/baz.eu", exist_ok=True)] + ) + + +class TestAccessTokenShareDiscovery(ShareDiscoveryTest): + DISCOVERER_CLASS = AccessTokenShareDiscovery + SECRET = {"access_token": "token"} + + def test_get_token(self): + self.assertEqual(self.discoverer.get_token(), "token") + + def test_build_share(self): + project = {"id": "foobar"} + self.assertEqual( + self.discoverer.build_share(project, "token"), + {"auth": {"project_id": "foobar", "access_token": "token"}}, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/cloud-info/cloud_info_catchall/test_token_generator.py b/cloud-info/cloud_info_catchall/test_token_generator.py new file mode 100644 index 00000000..46a52ac3 --- /dev/null +++ b/cloud-info/cloud_info_catchall/test_token_generator.py @@ -0,0 +1,114 @@ +""" Tests for the config generator """ + +import unittest +from unittest.mock import call, patch + +import cloud_info_catchall.token_generator as tg +import jwt +import responses +from responses import matchers + + +class TokenGeneratorTest(unittest.TestCase): + OIDC_CONFIG = { + "jwks_uri": "https://example.com", + "token_endpoint": "https://example.com", + } + + @responses.activate + def test_get_access_token(self): + token_url = "https://example.com" + scopes = "a b c" + secret = {"client_id": "id", "client_secret": "secret"} + responses.post( + token_url, + json={"access_token": "foo"}, + match=[ + matchers.urlencoded_params_matcher( + { + "grant_type": "client_credentials", + "client_id": "id", + "client_secret": "secret", + "scope": "a b c", + } + ) + ], + ) + self.assertEqual(tg.get_access_token(token_url, scopes, secret), "foo") + + def test_valid_token_no_token(self): + self.assertEqual(tg.valid_token(None, None, None), False) + + @responses.activate + def _inner_test_valid_token(self, ttl, result): + responses.get("https://example.com", json={"keys": [{"kid": "123"}]}) + self.assertEqual(tg.valid_token("foo", self.OIDC_CONFIG, ttl), result) + + def _setup_valid_token_test(self, m_header, m_decode, m_calendar): + m_header.return_value = {"kid": "123", "alg": "bar"} + m_decode.return_value = {"exp": 10} + m_calendar.return_value = 8 + + def _assert_valid_token_test(self, m_header, m_decode, m_alg): + m_header.assert_called_with("foo") + m_alg.assert_called_with('{"kid": "123"}') + m_decode.assert_called_with("foo", key=m_alg.return_value, algorithms=["bar"]) + + @patch("jwt.algorithms.RSAAlgorithm.from_jwk") + @patch("jwt.get_unverified_header") + @patch("jwt.decode") + @patch("calendar.timegm") + def test_valid_token_within_time(self, m_calendar, m_decode, m_header, m_alg): + self._setup_valid_token_test(m_header, m_decode, m_calendar) + self._inner_test_valid_token(1, True) + self._assert_valid_token_test(m_header, m_decode, m_alg) + m_calendar.assert_called_once() + + @patch("jwt.algorithms.RSAAlgorithm.from_jwk") + @patch("jwt.get_unverified_header") + @patch("jwt.decode") + @patch("calendar.timegm") + def test_valid_token_not_within_time(self, m_calendar, m_decode, m_header, m_alg): + self._setup_valid_token_test(m_header, m_decode, m_calendar) + self._inner_test_valid_token(5, False) + self._assert_valid_token_test(m_header, m_decode, m_alg) + m_calendar.assert_called_once() + + @patch("jwt.algorithms.RSAAlgorithm.from_jwk") + @patch("jwt.get_unverified_header") + @patch("jwt.decode") + @patch("calendar.timegm") + def test_valid_token_decode_exception(self, m_calendar, m_decode, m_header, m_alg): + self._setup_valid_token_test(m_header, m_decode, m_calendar) + m_decode.side_effect = jwt.DecodeError() + self._inner_test_valid_token(1, False) + self._assert_valid_token_test(m_header, m_decode, m_alg) + m_calendar.assert_not_called() + + @patch("jwt.algorithms.RSAAlgorithm.from_jwk") + @patch("jwt.get_unverified_header") + @patch("jwt.decode") + @patch("calendar.timegm") + def test_valid_token_expired_exception(self, m_calendar, m_decode, m_header, m_alg): + self._setup_valid_token_test(m_header, m_decode, m_calendar) + m_decode.side_effect = jwt.ExpiredSignatureError() + self._inner_test_valid_token(1, False) + self._assert_valid_token_test(m_header, m_decode, m_alg) + m_calendar.assert_not_called() + + @patch("cloud_info_catchall.token_generator.valid_token") + @patch("cloud_info_catchall.token_generator.get_access_token") + def test_generate_tokens(self, m_get_access, m_valid_token): + tokens = {"foo": {"access_token": "abc"}, "bar": {"access_token": "def"}} + secrets = {"foo": {}, "bar": {}} + m_valid_token.side_effect = [True, False] + m_get_access.return_value = "xyz" + tg.generate_tokens(self.OIDC_CONFIG, "abc", tokens, 8, secrets) + m_valid_token.assert_has_calls( + [call("abc", self.OIDC_CONFIG, 8), call("def", self.OIDC_CONFIG, 8)] + ) + m_get_access.assert_called_with("https://example.com", "abc", {}) + + +if __name__ == "__main__": + unittest.main() diff --git a/cloud-info/cloud_info_catchall/token_generator.py b/cloud-info/cloud_info_catchall/token_generator.py new file mode 100755 index 00000000..aa56a0ca --- /dev/null +++ b/cloud-info/cloud_info_catchall/token_generator.py @@ -0,0 +1,106 @@ +"""Refreshes credentials for the cloud-info-provider + +Takes its own configuration from env variables: +CHECKIN_SECRETS_FILE: yaml file with the check-in secrets to get access tokens +CHECKIN_SCOPES: Scopes to request in the access token +CHECKIN_OIDC_URL: Discovery URL for Check-in +ACCESS_TOKEN_SECRETS_FILE: File where to dump the new access tokens if needed +ACCESS_TOKEN_TTL: Minimum TTL for the access token +""" + +import calendar +import json +import logging +import os +from datetime import datetime, timezone + +import jwt +import requests +import yaml + +# Default OIDC URL for Check-in +CHECKIN_OIDC_URL = "https://aai.egi.eu/auth/realms/egi/.well-known/openid-configuration" +# Default list of scopes +CHECKIN_SCOPES = "openid profile eduperson_entitlement email" +# Default access token TTL: 20 minutes +ACCESS_TOKEN_TTL = 20 * 60 + + +def read_secrets(secrets_file): + with open(secrets_file, "r") as f: + return yaml.load(f.read(), Loader=yaml.SafeLoader) + + +def get_access_token(token_url, scopes, secret): + payload = { + "grant_type": "client_credentials", + "client_id": secret["client_id"], + "client_secret": secret["client_secret"], + "scope": scopes, + } + r = requests.post(token_url, data=payload) + return r.json()["access_token"] + + +def valid_token(token, oidc_config, min_time): + if not token: + return False + jwks_config = requests.get(oidc_config["jwks_uri"]).json() + # See https://stackoverflow.com/a/68891371 + public_keys = {} + for jwk in jwks_config["keys"]: + kid = jwk["kid"] + public_keys[kid] = jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(jwk)) + try: + headers = jwt.get_unverified_header(token) + kid = headers["kid"] + key = public_keys[kid] + payload = jwt.decode(token, key=key, algorithms=[headers["alg"]]) + # this comes from JWT documentation + # https://pyjwt.readthedocs.io/en/stable/usage.html#expiration-time-claim-exp + now = calendar.timegm(datetime.now(tz=timezone.utc).utctimetuple()) + return payload["exp"] - now > min_time + except (jwt.DecodeError, jwt.ExpiredSignatureError) as e: + logging.warning(f"Unable to open / expired token: {e}") + return False + + +def generate_tokens(oidc_config, scopes, tokens, token_ttl, secrets): + new_tokens = {} + for s in secrets: + # not our thing + if not isinstance(secrets[s], dict): + continue + if "refresh_token" in secrets[s]: + # ignore those that have refresh token + continue + token = tokens.get(s, {}).get("access_token", None) + if not valid_token(token, oidc_config, token_ttl): + logging.info("Token needs refreshing") + token = get_access_token(oidc_config["token_endpoint"], scopes, secrets[s]) + else: + logging.info("Token is still valid, not refreshing") + new_tokens[s] = {"access_token": token} + return new_tokens + + +def main(): + logging.basicConfig() + # get config from env + checkin_secrets_file = os.environ["CHECKIN_SECRETS_FILE"] + oidc_config_url = os.environ.get("CHECKIN_OIDC_URL", CHECKIN_OIDC_URL) + oidc_config = requests.get(oidc_config_url).json() + scopes = os.environ.get("CHECKIN_SCOPES", CHECKIN_SCOPES) + access_token_file = os.environ["ACCESS_TOKEN_FILE"] + token_ttl = int(os.environ.get("ACCESS_TOKEN_TTL", ACCESS_TOKEN_TTL)) + secrets = read_secrets(checkin_secrets_file) + tokens = {} + if os.path.exists(access_token_file): + tokens.update(read_secrets(access_token_file)) + new_tokens = generate_tokens(oidc_config, scopes, tokens, token_ttl, secrets) + with open(access_token_file, "w+") as f: + f.write(yaml.dump(new_tokens)) + + +if __name__ == "__main__": + main() diff --git a/cloud-info/pyproject.toml b/cloud-info/pyproject.toml index e3d133b5..6891f142 100644 --- a/cloud-info/pyproject.toml +++ b/cloud-info/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "cloud_info_catchall" -version = "0.0.1" +version = "0.0.2" description = "Some tools to make cloud-info work with our deployment" authors = [ { name = "Enol Fernandez", email = "enol.fernandez@egi.eu" }, @@ -18,6 +18,7 @@ requires-python = ">=3.10" [project.scripts] config-generator = "cloud_info_catchall.config_generator:main" +token-generator = "cloud_info_catchall.token_generator:main" [tool.setuptools.dynamic] dependencies = {file = ["requirements.txt"]} diff --git a/cloud-info/requirements.txt b/cloud-info/requirements.txt index b60c419c..b0c673c5 100644 --- a/cloud-info/requirements.txt +++ b/cloud-info/requirements.txt @@ -1,5 +1,5 @@ -# Cloud info version is f6f6a2e265cc9608d791f31a8ef2903302ca33f6 -git+https://github.com/EGI-Federation/cloud-info-provider.git@f6f6a2e265cc9608d791f31a8ef2903302ca33f6 +# Cloud info version is 9d4c4c516b9311c77564444cb9ecbb059b7f2192 +git+https://github.com/EGI-Federation/cloud-info-provider.git@9d4c4c516b9311c77564444cb9ecbb059b7f2192 git+https://github.com/ARGOeu/argo-ams-library@devel python-glanceclient python-novaclient @@ -7,5 +7,7 @@ python-keystoneclient keystoneauth1 yq fedcloudclient +PyJWT +responses diff --git a/cloudkeeper-core/Dockerfile b/cloudkeeper-core/Dockerfile deleted file mode 100644 index 3e95390e..00000000 --- a/cloudkeeper-core/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -FROM ruby:2.6 - -LABEL org.opencontainers.image.source=https://github.com/EGI-Federation/fedcloud-catchall-operations - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -# We do need install recommends for the CAs -# hadolint ignore=DL3015, DL3008 -RUN apt-get update \ - && apt-get -qy install --fix-missing --no-install-recommends curl qemu-utils \ - && curl https://dl.igtf.net/distribution/igtf/current/GPG-KEY-EUGridPMA-RPM-3 | apt-key add - \ - && echo 'deb http://repository.egi.eu/sw/production/cas/1/current egi-igtf core' \ - > /etc/apt/sources.list.d/cas.list \ - && apt-get update \ - && apt-get -qy install --fix-missing fetch-crl \ - && apt-get -qy install --fix-missing ca-policy-egi-core \ - && apt-get clean autoclean \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* \ - && rm -rf /tmp/*.deb - -RUN fetch-crl -p 2 -T 30 || exit 0 - -RUN gem install google-protobuf -v 3.23.4 \ - && gem install cloudkeeper -v 1.7.1 - -COPY entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh - -COPY image-lists.conf /etc/cloudkeeper/image-lists.conf -COPY cloudkeeper.yml /etc/cloudkeeper/cloudkeeper.yml - -RUN mkdir -p /var/log/cloudkeeper \ - /var/lock/cloudkeeper \ - /var/spool/cloudkeeper/images - -ENTRYPOINT ["/entrypoint.sh"] -CMD ["cloudkeeper", "sync"] diff --git a/cloudkeeper-core/cloudkeeper.yml b/cloudkeeper-core/cloudkeeper.yml deleted file mode 100644 index ffd76299..00000000 --- a/cloudkeeper-core/cloudkeeper.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -cloudkeeper: - image-lists-file: /etc/cloudkeeper/image-lists.conf - ca-dir: /etc/grid-security/certificates/ # CA directory - authentication: false # core (client) <-> backend (server) authentication (certificate, key and backend-certificate options) - certificate: /etc/grid-security/hostcert.pem # Core's host certificate - key: /etc/grid-security/hostkey.pem # Core's host key - image-dir: /var/spool/cloudkeeper/images/ # Directory to store images to - external-tools: - binaries: - qemu-img: /usr/bin/qemu-img # qemu-img binary (image conversion) location - nginx: /usr/bin/nginx # nginx binary (HTTP server) location - execution-timeout: 600 # timeout for execution of external tools in seconds - remote-mode: false # Remote mode starts HTTP server (NGINX) and serves images to backend via HTTP - nginx: - runtime-dir: /var/run/cloudkeeper/ # Runtime directory for NGINX - error-log-file: /var/log/cloudkeeper/nginx-error.log # File for NGINX error log - access-log-file: /var/log/cloudkeeper/nginx-access.log # File for NGINX access log - pid-file: /var/run/cloudkeeper/nginx.pid # NGINX pid file - ip-address: 127.0.0.1 # IP address NGINX can listen on - port: 50505 # Port NGINX can listen on - proxy: - ip-address: # Proxy IP address - port: # Proxy port - ssl: false # Whether proxy will use SSL connection - backend: - endpoint: 127.0.0.1:50051 # Backend's gRPC endpoint - certificate: /etc/grid-security/backendcert.pem # Backend's certificate - formats: # List of acceptable formats images can be converted to - - qcow2 - logging: - level: ERROR # Logging level - file: /var/log/cloudkeeper/cloudkeeper.log # File to write log to. To turn off file logging leave this field empty. - lock-file: /var/lock/cloudkeeper/cloudkeeper.lock # File used to ensure only one running instance of cloudkeeper - debug: false # Debug mode diff --git a/cloudkeeper-core/entrypoint.sh b/cloudkeeper-core/entrypoint.sh deleted file mode 100755 index 1f42289d..00000000 --- a/cloudkeeper-core/entrypoint.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [ "$DEBUG" = "1" ] ; then - set -x -fi - -EXTRA_OPTS=() - -if [ "$BACKEND_PORT_50051_TCP_ADDR" != "" ]; then - EXTRA_OPTS=("${EXTRA_OPTS[@]}" --backend-endpoint="$BACKEND_PORT_50051_TCP_ADDR:$BACKEND_PORT_50051_TCP_PORT") -fi - -exec "$@" "${EXTRA_OPTS[@]}" diff --git a/cloudkeeper-core/image-lists.conf b/cloudkeeper-core/image-lists.conf deleted file mode 100644 index 6155091b..00000000 --- a/cloudkeeper-core/image-lists.conf +++ /dev/null @@ -1,3 +0,0 @@ -# One image list per line -# Lines can be commented if started with "#" -https://APPDB_TOKEN:x-oauth-basic@vmcaster.appdb.egi.eu/store/vo/ops/image.list diff --git a/cloudkeeper-os/Dockerfile b/cloudkeeper-os/Dockerfile deleted file mode 100644 index 0abc784e..00000000 --- a/cloudkeeper-os/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM centos:7 - -LABEL org.opencontainers.image.source=https://github.com/EGI-Federation/fedcloud-catchall-operations - -COPY cloudkeeper.repo /etc/yum.repos.d/cloudkeeper.repo - -# hadolint ignore=DL3033 -RUN yum install -y centos-release-openstack-stein \ - && yum install -y cloudkeeper-os \ - && yum clean all - -RUN mkdir -p /etc/cloudkeeper-os/ \ - /var/spool/cloudkeeper/images - -COPY cloudkeeper-os.conf /etc/cloudkeeper-os/cloudkeeper-os.conf -COPY mapping.json /etc/cloudkeeper-os/mapping.json - -# Patch the cloudkeeper-os -COPY openstack_client.py /usr/lib/python2.7/site-packages/cloudkeeper_os/openstack_client.py -RUN python -m compileall /usr/lib/python2.7/site-packages/cloudkeeper_os/openstack_client.py - -EXPOSE 50051 - -CMD ["cloudkeeper-os"] diff --git a/cloudkeeper-os/cloudkeeper-os.conf b/cloudkeeper-os/cloudkeeper-os.conf deleted file mode 100644 index f2823011..00000000 --- a/cloudkeeper-os/cloudkeeper-os.conf +++ /dev/null @@ -1,136 +0,0 @@ -[DEFAULT] - -# -# From cloudkeeper_os.config -# - -# The port on which the server will listen. (port value) -# Minimum value: 0 -# Maximum value: 65535 -#grpc_port = 50051 - -# Directory where the cloud credentials for each VO are stored. (string value) -#mapping_file = /etc/cloudkeeper-os/voms.json - - -# Directory where the images are downloaded (string value) -#tempdir = /tmp - -# -# From oslo.log -# - -# If set to true, the logging level will be set to DEBUG instead of the default -# INFO level. (boolean value) -# Note: This option can be changed without restarting. -#debug = false - -# DEPRECATED: If set to false, the logging level will be set to WARNING instead -# of the default INFO level. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, logging_context_format_string). (string value) -# Note: This option can be changed without restarting. -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# Defines the format string for %%(asctime)s in log records. Default: -# %(default)s . This option is ignored if log_config_append is set. (string -# value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to send logging output to. If no default is set, -# logging will go to stderr as defined by use_stderr. This option is ignored if -# log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative log_file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# Uses logging handler designed to watch file system. When log file is moved or -# removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log_file option is specified and -# Linux platform is used. This option is ignored if log_config_append is set. -# (boolean value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append -# is set. (boolean value) -#use_syslog = false - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = true - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages when context is undefined. (string -# value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Additional data to append to log message when logging level for the message -# is DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# Defines the format string for %(user_identity)s that is used in -# logging_context_format_string. (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# List of package logging levels in logger=LEVEL pairs. This option is ignored -# if log_config_append is set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string -# value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - - -[keystone_authtoken] - -# -# From cloudkeeper_os.config -# - -# Username (string value) -#username = cloudkeeper - -# User's password (string value) -#password = - -# User's domain name (string value) -#user_domain_name = default - -# Domain name containing project (string value) -#project_domain_name = default - -# Complete public Identity API endpoint. (string value) -#auth_url = diff --git a/cloudkeeper-os/cloudkeeper.repo b/cloudkeeper-os/cloudkeeper.repo deleted file mode 100644 index 03a372a9..00000000 --- a/cloudkeeper-os/cloudkeeper.repo +++ /dev/null @@ -1,9 +0,0 @@ -# EGI Applications Database (http://appdb.egi.eu/) -# EGI Community repository (http://repository.egi.eu/community/) -# POA ID:1142 -# Created: 2022-03-30 15:04:59 EEST -[cloudkeeper.os-sl-7-x86_64] -name=Repository for cloudkeeper.os (o/s: sl7 arch: x86_64) -baseurl=https://repository.egi.eu/community/software/cloudkeeper.os/0.9.x/releases/sl/7/x86_64/RPMS/ -enabled=1 -gpgcheck=0 diff --git a/cloudkeeper-os/mapping.json b/cloudkeeper-os/mapping.json deleted file mode 100644 index 4890f1b2..00000000 --- a/cloudkeeper-os/mapping.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "fedcloud.egi.eu": { - "tenant": "mytenant" - } -} diff --git a/cloudkeeper-os/openstack_client.py b/cloudkeeper-os/openstack_client.py deleted file mode 100644 index ce636c94..00000000 --- a/cloudkeeper-os/openstack_client.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2017 CNRS and University of Strasbourg -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Keystone helper -""" - -import glanceclient.v2.client as glanceclient -import webob.exc -from keystoneauth1 import exceptions, session -from keystoneauth1.identity import v3 -from oslo_config import cfg -from oslo_log import log - -CONF = cfg.CONF -LOG = log.getLogger(__name__) - -CFG_GROUP = "keystone_authtoken" - - -def get_session(project_name, domain_name): - """Get an auth session.""" - try: - # attempt with project_id - auth_params = dict(CONF[CFG_GROUP]) - auth_params.update(dict(project_id=project_name)) - auth = v3.Password(**auth_params) - sess = session.Session(auth=auth, verify=False) - sess.get_token() - except exceptions.Unauthorized: - # attempt with project_name - auth_params = dict(CONF[CFG_GROUP]) - auth_params.update(dict(project_name=project_name, domain_name=domain_name)) - auth = v3.Password(**auth_params) - sess = session.Session(auth=auth, verify=False) - return sess - - -def get_glance_client(project_name, domain_name): - """Get a glance client""" - LOG.debug("Get a glance client for the project: '%s'" % project_name) - - endpoint_type = CONF.endpoint_type - try: - sess = get_session(project_name=project_name, domain_name=domain_name) - if endpoint_type: - LOG.debug( - "Glance client is accessing Glance through the " - "following endpoint type: %s" % endpoint_type - ) - glance_client = glanceclient.Client(session=sess, interface=endpoint_type) - else: - glance_client = glanceclient.Client(session=sess) - except webob.exc.HTTPForbidden as err: - LOG.error("Connection to Glance failed.") - LOG.exception(err) - return None - return glance_client diff --git a/deploy/vos.yaml b/deploy/vos.yaml index 8e755f5f..45a162f3 100644 --- a/deploy/vos.yaml +++ b/deploy/vos.yaml @@ -95,8 +95,6 @@ vos: auth: "{{ fedcloud_sa }}" vo.e-rihs.eu: auth: "{{ fedcloud_sa }}" - vo.fuvex.es: - auth: "{{ fedcloud_sa }}" vo.geoss.eu: auth: "{{ fedcloud_sa }}" vo.i-nergy.eu: diff --git a/sites/SCAI.yaml b/sites/SCAI.yaml index e467a0b7..db5cbad0 100644 --- a/sites/SCAI.yaml +++ b/sites/SCAI.yaml @@ -11,6 +11,3 @@ vos: - name: vo.ebrain-health.eu auth: project_id: 4733227537724f7aade971aedefb015a -- name: vo.fuvex.es - auth: - project_id: e22d4912b50f47698b336e74cd18eee9 diff --git a/vo-mappings.yaml b/vo-mappings.yaml index 5ebd9def..e8932163 100644 --- a/vo-mappings.yaml +++ b/vo-mappings.yaml @@ -67,7 +67,6 @@ vos: vo.environmental.egi.eu: "urn:mace:egi.eu:group:vo.environmental.egi.eu:role=vm_operator#aai.egi.eu" vo.europlanet-vespa.eu: "urn:mace:egi.eu:group:vo.europlanet-vespa.eu:role=vm_operator#aai.egi.eu" vo.eurosea.marine.ie: "urn:mace:egi.eu:group:vo.eurosea.marine.ie:role=member#aai.egi.eu" - vo.fuvex.es: "urn:mace:egi.eu:group:vo.fuvex.es:role=vm_operator#aai.egi.eu" vo.geoss.eu: "urn:mace:egi.eu:group:vo.geoss.eu:role=vm_operator#aai.egi.eu" vo.i-nergy.eu: "urn:mace:egi.eu:group:vo.i-nergy.eu:role=vm_operator#aai.egi.eu" vo.imagine-ai.eu: "urn:mace:egi.eu: group:vo.imagine-ai.eu:role=vm_operator#aai.egi.eu"