From 2ad4fd96cf67c1673b8f3ac1af8aadc94e770af4 Mon Sep 17 00:00:00 2001 From: G-Lenz Date: Tue, 27 Aug 2024 10:52:13 -0400 Subject: [PATCH] Release v2.7.1 --- CHANGELOG.md | 12 ++++ README.md | 1 + SECURITY.md | 6 ++ .../lib/components/ecs-cluster-resources.ts | 4 ++ source/package-lock.json | 8 +-- source/package.json | 8 +-- .../__snapshots__/hub-snapshot.test.ts.snap | 22 ++++++ .../__tests__/test_directory_reader.py | 4 +- .../__tests__/test_metrics_helper.py | 38 +++++++--- .../__tests__/test_workspace_record.py | 19 +++-- .../__tests__/test_workspaces_helper.py | 23 +++--- .../workspaces_app/metrics_helper.py | 24 +++++-- .../utils/__tests__/test_usage_table_dao.py | 9 +-- .../workspaces_app/utils/solution_metrics.py | 6 ++ .../workspaces_app/workspace_record.py | 70 +++++++++++-------- .../workspaces_app/workspaces_helper.py | 14 ++++ 16 files changed, 185 insertions(+), 83 deletions(-) create mode 100644 SECURITY.md diff --git a/CHANGELOG.md b/CHANGELOG.md index ef0d2f8..3bc947d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,17 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.7.1] - 2024-08 +### Added +- Security.md file + +### Fixed +- Workspace analysis failing when pervious data is recorded in database but empty. +- Workspace analysis failing when timestamps from user connected data doesn't exist in other metric data. +- Workspace only reporting 24 hour period +- Workspace not reporting tags +- Updated micromatch to mitigate [CVE-2024-4067](https://avd.aquasec.com/nvd/2024/cve-2024-4067). + ## [2.7.0] - 2024-07 ### Added - Workspace performance metrics @@ -13,6 +24,7 @@ - Powertools logging - Operational insights CloudWatch dashboard - Support for G4DN workspaces + ### Fixed - sts token expired after one hour diff --git a/README.md b/README.md index ccb0313..c1a5af2 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,7 @@ npm run synth ├── LICENSE.txt ├── NOTICE.txt ├── README.md +├── SECURITY.md ├── buildspec.yml ├── deployment │   ├── build-open-source-dist.sh diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..7d08f2f --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,6 @@ +Reporting Security Issues +---------------------------------------------------------------------------------------------------------- +We take all security reports seriously. When we receive such reports, we will investigate and +subsequently address any potential vulnerabilities as quickly as possible. If you discover a potential +security issue in this project, please notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/) or +directly via email to [AWS Security](mailto:aws-security@amazon.com). Please do not create a public GitHub issue in this project. diff --git a/source/lib/components/ecs-cluster-resources.ts b/source/lib/components/ecs-cluster-resources.ts index 0bba447..4f351e4 100644 --- a/source/lib/components/ecs-cluster-resources.ts +++ b/source/lib/components/ecs-cluster-resources.ts @@ -403,6 +403,10 @@ export class EcsClusterResources extends Construct { name: "NumberOfMonthsForTerminationCheck", value: props.numberOfmonthsForTerminationCheck, }, + { + name: "ImageVersion", + value: image, + }, ], }, ], diff --git a/source/package-lock.json b/source/package-lock.json index acd57d9..1d1603e 100644 --- a/source/package-lock.json +++ b/source/package-lock.json @@ -5878,12 +5878,12 @@ } }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dev": true, "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { diff --git a/source/package.json b/source/package.json index 466cd6b..ee1f42f 100644 --- a/source/package.json +++ b/source/package.json @@ -18,10 +18,10 @@ "test": "jest --coverage", "license-report": "license-report --output=csv --delimiter=' under ' --fields=name --fields=licenseType", "cdk": "cdk", - "bootstrap": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk bootstrap", - "deploy": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces", - "deploySpoke": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces-spoke", - "synth": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces DIST_OUTPUT_BUCKET=solutions-reference cdk synth" + "bootstrap": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk bootstrap", + "deploy": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces", + "deploySpoke": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces-spoke", + "synth": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces DIST_OUTPUT_BUCKET=solutions-reference cdk synth" }, "devDependencies": { "@aws-cdk/assert": "2.68.0", diff --git a/source/test/__snapshots__/hub-snapshot.test.ts.snap b/source/test/__snapshots__/hub-snapshot.test.ts.snap index 1943e63..b2d89e3 100644 --- a/source/test/__snapshots__/hub-snapshot.test.ts.snap +++ b/source/test/__snapshots__/hub-snapshot.test.ts.snap @@ -1388,6 +1388,28 @@ exports[`hub stack synth matches the existing snapshot 1`] = ` "Ref": "NumberOfMonthsForTerminationCheck", }, }, + { + "Name": "ImageVersion", + "Value": { + "Fn::If": [ + "UseStableTagCondition", + { + "Fn::FindInMap": [ + "Solution", + "Data", + "StableImage", + ], + }, + { + "Fn::FindInMap": [ + "Solution", + "Data", + "Image", + ], + }, + ], + }, + }, ], "Essential": true, "Image": { diff --git a/source/workspaces_app/workspaces_app/__tests__/test_directory_reader.py b/source/workspaces_app/workspaces_app/__tests__/test_directory_reader.py index 701a0e4..338785e 100644 --- a/source/workspaces_app/workspaces_app/__tests__/test_directory_reader.py +++ b/source/workspaces_app/workspaces_app/__tests__/test_directory_reader.py @@ -73,7 +73,6 @@ def ws_description(**kwargs): "username": "test-user", "computer_name": "test-computer", "initial_mode": "test-mode", - "tags": ["tag1", "tag2"], } filtered_args = { key: value for key, value in kwargs.items() if key in default_args.keys() @@ -118,6 +117,7 @@ def ws_record(ws_billing_data, ws_metrics): report_date="test-report-date", last_reported_metric_period="test-last-period", last_known_user_connection="test-last-connection", + tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]", ) @@ -203,7 +203,7 @@ def test_process_directory_without_ddb_item( unittest.mock.ANY, unittest.mock.ANY, dashboard_metrics ) report_header = "WorkspaceID,Billable Hours,Usage Threshold,Change Reported,Bundle Type,Initial Mode,New Mode,Username,Computer Name,DirectoryId,WorkspaceTerminated,insessionlatency,cpuusage,memoryusage,rootvolumediskusage,uservolumediskusage,udppacketlossrate,Tags,ReportDate,\n" - list_processed_workspaces = "test-ws-id,20,100,No change,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,\"['tag1', 'tag2']\",test-report-date\n" + list_processed_workspaces = "test-ws-id,20,100,No change,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,[{'key1': 'tag1'}, {'key2': 'tag2'}],test-report-date\n" header_field_count = len(str.split(report_header, ",")) data_field_count = len(str.split(list_processed_workspaces, ",")) assert header_field_count == data_field_count diff --git a/source/workspaces_app/workspaces_app/__tests__/test_metrics_helper.py b/source/workspaces_app/workspaces_app/__tests__/test_metrics_helper.py index bbf8d7b..0bf5516 100644 --- a/source/workspaces_app/workspaces_app/__tests__/test_metrics_helper.py +++ b/source/workspaces_app/workspaces_app/__tests__/test_metrics_helper.py @@ -46,7 +46,6 @@ def ws_description(**kwargs): "username": "test-user", "computer_name": "test-computer", "initial_mode": "test-mode", - "tags": ["tag1", "tag2"], } filtered_args = { key: value for key, value in kwargs.items() if key in default_args.keys() @@ -91,6 +90,7 @@ def ws_record(ws_billing_data, ws_metrics): report_date="test-report-date", last_reported_metric_period="test-last-period", last_known_user_connection="test-last-connection", + tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]", ) @@ -180,13 +180,18 @@ def performance_metric_factory(length, start): def metric_data_factory(indices, length, start): metrics = {} + user_connected_timestamps = user_session_timestamps_factory(length) timestamps = user_session_timestamps_factory(length) for metric in METRIC_LIST: if metric == "UserConnected": data = user_connected_data_factory(indices, length) + metrics[metric.lower()] = { + "timestamps": user_connected_timestamps, + "values": data, + } else: data = performance_metric_factory(length, start) - metrics[metric.lower()] = {"timestamps": timestamps, "values": data} + metrics[metric.lower()] = {"timestamps": timestamps, "values": data} return metrics @@ -241,9 +246,14 @@ def expected_sessions_factory(user_session_data, active_indices, zero_limit): session.setdefault("active_sessions", []).append( user_session_data["cpuusage"]["timestamps"][active_index] ) - expected_avg = WeightedAverage( + current_avg = WeightedAverage( user_session_data["cpuusage"]["values"][active_index], 1 - ).merge(expected_avg) + ) + expected_avg = ( + current_avg.merge(expected_avg) + if expected_avg is not None + else current_avg + ) if session: duration_hours = math.ceil( (session["active_sessions"][-1] - session["active_sessions"][0]).seconds @@ -691,7 +701,7 @@ def test_get_billable_hours_and_performance(mocker, session, ws_record, metric_d metrics_helper, "get_list_data_points", return_value=metric_data ) mocker.patch.object(metrics_helper, "get_user_connected_hours") - mocker.patch.object(metrics_helper, "get_user_sessions") + mock_user_session = mocker.patch.object(metrics_helper, "get_user_sessions") mocker.patch.object(metrics_helper.session_table, "update_ddb_items"), spy_get_time_range = mocker.spy(metrics_helper, "get_time_range") spy_get_cloudwatch_metric_data_points = mocker.spy( @@ -712,7 +722,13 @@ def test_get_billable_hours_and_performance(mocker, session, ws_record, metric_d ) spy_get_cloudwatch_metric_data_points.assert_called_once() spy_get_list_data_points.assert_called_once() - spy_get_user_connected_hours.assert_called_once() + spy_get_user_connected_hours.assert_called_once_with( + mock_user_session(), + ws_record.description.workspace_id, + ws_record.description.initial_mode, + 60, + ws_record.billing_data.billable_hours, + ) spy_get_user_sessions.assert_called_once() @@ -807,6 +823,9 @@ def test_get_user_sessions(session, ws_record): total_values = 26 start_value = 1 user_session_data = metric_data_factory(active_indices, total_values, start_value) + user_session_data["userconnected"]["timestamps"][-1] += datetime.timedelta( + minutes=5 + ) result = metrics_helper.get_user_sessions( user_session_data, ws_description(), @@ -1436,7 +1455,7 @@ def test_get_user_sessions_32(session, ws_record): def test_process_performance_metrics(session, ws_record, metric_data): metrics_helper = MetricsHelper(session, "us-east-1", "test-table") current_weighted_avg = mean(metric_data["cpuusage"]["values"]) * 3 - previous_weighted_avg = ws_record.performance_metrics.cpu_usage.weighted_avg + previous_weighted_avg = ws_record.performance_metrics.cpu_usage.weighted_avg() expected_avg = Decimal( (current_weighted_avg + previous_weighted_avg) / (ws_record.performance_metrics.cpu_usage.count + 3), @@ -1462,9 +1481,8 @@ def test_process_performance_metrics_with_no_available_data_in_last_report( assert result.memory_usage.avg == Decimal("5") assert result.memory_usage.count == 3 - # test when current dat doesn't exist - assert result.udp_packet_loss_rate.avg == None - assert result.udp_packet_loss_rate.count == 0 + # test when current data doesn't exist + assert result.udp_packet_loss_rate == None def test_process_performance_metrics_with_zero_avg(session, ws_record, metric_data): diff --git a/source/workspaces_app/workspaces_app/__tests__/test_workspace_record.py b/source/workspaces_app/workspaces_app/__tests__/test_workspace_record.py index a0f1c57..6aba2d6 100644 --- a/source/workspaces_app/workspaces_app/__tests__/test_workspace_record.py +++ b/source/workspaces_app/workspaces_app/__tests__/test_workspace_record.py @@ -4,7 +4,6 @@ # SPDX-License-Identifier: Apache-2.0 # Standard Library -from dataclasses import fields from decimal import Decimal # Third Party Libraries @@ -35,7 +34,6 @@ def ws_description(): username="test-user", computer_name="test-computer", initial_mode="test-mode", - tags=["tag1", "tag2"], ) @@ -75,6 +73,7 @@ def ws_record(ws_description, ws_billing_data, ws_metrics): report_date="test-report-date", last_reported_metric_period="test-last-period", last_known_user_connection="test-last-connection", + tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]", ) @@ -155,7 +154,7 @@ def ddb_item(ws_record): "N": str(ws_record.performance_metrics.udp_packet_loss_rate.count) }, "Tags": { - "L": list(map(lambda x: {"S": x}, ws_record.description.tags)), + "S": ws_record.tags, }, "ReportDate": {"S": ws_record.report_date}, "LastReportedMetricPeriod": {"S": ws_record.last_reported_metric_period}, @@ -245,11 +244,10 @@ def test_ddb_attr_to_class_field_with_caps(): assert result == "test_string" -def test_weighted_avg_post_init_sets_weighted_avg_field(ws_metrics): - fields = vars(ws_metrics) - for field in fields: - value = getattr(ws_metrics, field) - assert value.weighted_avg == value.avg * value.count +def test_weighted_avg(ws_metrics): + weighted_avg = ws_metrics.cpu_usage.weighted_avg() + + assert weighted_avg == ws_metrics.cpu_usage.avg * ws_metrics.cpu_usage.count def test_weighted_average_merge(ws_metrics): @@ -258,12 +256,11 @@ def test_weighted_average_merge(ws_metrics): merged_wa = wa_1.merge(wa_2) expected_count = wa_1.count + wa_2.count - expected_avg = Decimal((wa_1.weighted_avg + wa_2.weighted_avg) / expected_count) + expected_avg = Decimal((wa_1.weighted_avg() + wa_2.weighted_avg()) / expected_count) assert merged_wa.avg == expected_avg assert merged_wa.count == expected_count - assert merged_wa.weighted_avg == expected_count * expected_avg def test_to_csv(ws_record): - expected = "test-ws-id,20,100,ToHourly,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,\"['tag1', 'tag2']\",test-report-date\n" + expected = "test-ws-id,20,100,ToHourly,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,[{'key1': 'tag1'}, {'key2': 'tag2'}],test-report-date\n" assert ws_record.to_csv() == expected diff --git a/source/workspaces_app/workspaces_app/__tests__/test_workspaces_helper.py b/source/workspaces_app/workspaces_app/__tests__/test_workspaces_helper.py index 19d7025..3f6770f 100644 --- a/source/workspaces_app/workspaces_app/__tests__/test_workspaces_helper.py +++ b/source/workspaces_app/workspaces_app/__tests__/test_workspaces_helper.py @@ -40,7 +40,6 @@ def ws_description(**kwargs): "username": "test-user", "computer_name": "test-computer", "initial_mode": "test-mode", - "tags": ["tag1", "tag2"], } filtered_args = { key: value for key, value in kwargs.items() if key in default_args.keys() @@ -59,11 +58,6 @@ def ws_billing_data(): ) -@pytest.fixture() -def weighted_avg(): - return WeightedAverage(Decimal("93.42"), 67) - - @pytest.fixture() def ws_metrics(): return WorkspacePerformanceMetrics( @@ -83,8 +77,9 @@ def ws_record(ws_billing_data, ws_metrics): billing_data=ws_billing_data, performance_metrics=ws_metrics, report_date="test-report-date", - last_reported_metric_period="test-last-period", + last_reported_metric_period="2024-08-29T00:00:00Z", last_known_user_connection="test-last-connection", + tags="[{'key1': 'tag1'}, {'key2': 'value2'}]", ) @@ -111,7 +106,7 @@ def test_skip_tag_true_process_standard_workspace(mocker, session, ws_record): } workspace_helper = workspaces_helper.WorkspacesHelper(session, settings) - mocker.patch.object( + mock_get_billable_hours = mocker.patch.object( workspace_helper.metrics_helper, "get_billable_hours_and_performance" ) workspace_helper.metrics_helper.get_billable_hours_and_performance.return_value = { @@ -127,6 +122,8 @@ def test_skip_tag_true_process_standard_workspace(mocker, session, ws_record): assert ( result.billing_data.new_mode == "test-mode" ) # The old mode should not be changed as the skip tag is True + # test that previous data was used + assert mock_get_billable_hours.call_args.args == ("", "", ws_record, 60) def test_bundle_type_returned_process_workspace(mocker, session, ws_record): @@ -151,7 +148,7 @@ def test_bundle_type_returned_process_workspace(mocker, session, ws_record): } workspace_helper = workspaces_helper.WorkspacesHelper(session, settings) - mocker.patch.object( + mock_get_billable_hours = mocker.patch.object( workspace_helper.metrics_helper, "get_billable_hours_and_performance" ) workspace_helper.metrics_helper.get_billable_hours_and_performance.return_value = { @@ -173,6 +170,8 @@ def test_bundle_type_returned_process_workspace(mocker, session, ws_record): ) mock_termination_status.return_value = "", "last-known-time" dashboard_metrics = DashboardMetrics() + # set ws_record last reported data to prior to v2.7.1 release + ws_record.last_reported_metric_period = "2024-08-27T00:00:00Z" result = workspace_helper.process_workspace(ws_record, 60, dashboard_metrics) assert result.description.bundle_type == "test-bundle" assert result.billing_data.billable_hours == 100 @@ -180,6 +179,8 @@ def test_bundle_type_returned_process_workspace(mocker, session, ws_record): assert result.billing_data.new_mode == "ALWAYS_ON" assert result.billing_data.change_reported == "-N-" assert result.performance_metrics == ws_record.performance_metrics + # test that previous data was not used + assert mock_get_billable_hours.call_args.args == ("", "", ws_record.description, 60) def test_modify_workspace_properties_returns_always_on(session): @@ -1599,16 +1600,18 @@ def test_process_workspace_with_metrics(mocker, session, ws_record): assert dashboard_metrics.billing_metrics.monthly_billed == 0 # Test case 3: Skip convert tag + tags = [{"Key": "skip_convert", "Value": "True"}] mocker.patch.object( workspace_helper, "get_list_tags_for_workspace", - return_value=[{"Key": "skip_convert", "Value": "True"}], + return_value=tags, ) dashboard_metrics = DashboardMetrics() result = workspace_helper.process_workspace(ws_record, 60, dashboard_metrics) assert result.billing_data.change_reported == "-S-" assert dashboard_metrics.conversion_metrics.conversion_skips == 1 + assert result.tags == "".join(('"', str(tags), '"')) # Test case 4: Error case mocker.patch.object( diff --git a/source/workspaces_app/workspaces_app/metrics_helper.py b/source/workspaces_app/workspaces_app/metrics_helper.py index dc0d6db..d14e1bc 100644 --- a/source/workspaces_app/workspaces_app/metrics_helper.py +++ b/source/workspaces_app/workspaces_app/metrics_helper.py @@ -122,7 +122,9 @@ def get_billable_hours_and_performance( ws_description.workspace_id, ws_description.initial_mode, autostop_timeout_minutes, - getattr(ws_record, "billable_hours", None), + getattr( + getattr(ws_record, "billing_data", None), "billable_hours", None + ), ) performance_metrics = self.process_performance_metrics( metric_data_points, getattr(ws_record, "performance_metrics", None) @@ -280,7 +282,9 @@ def get_user_connected_hours( user_connected_hours + session.duration_hours + idle_time_in_hours ) ## ADD PATCHING HOURS TO WORKSPACES - user_connected_hours = user_connected_hours + (previous_billable_hours or 0) + user_connected_hours = Decimal(str(user_connected_hours)) + ( + previous_billable_hours or 0 + ) return int(user_connected_hours) def get_user_sessions( @@ -444,7 +448,7 @@ def get_performance_for_period( metric = self.metric_id_to_name(metric_id) metric_running_average = session_metrics.get(metric) idx = bisect.bisect_left(data["timestamps"], time) - if data.get("timestamps") and data["timestamps"][idx] == time: + if idx < len(data["timestamps"]) and data["timestamps"][idx] == time: average_at_time = Decimal(str(data["values"][idx])) average_at_time = WeightedAverage(average_at_time, 1) @@ -478,13 +482,21 @@ def process_performance_metrics( current_avg = ( Decimal(str(mean(data_values))) if current_count > 0 else None ) - current_metric = WeightedAverage(avg=current_avg, count=current_count) + current_metric = ( + WeightedAverage(avg=current_avg, count=current_count) + if current_count + else None + ) prev_metric = getattr(prev_metrics, ws_record_field, None) - if prev_metric: + if prev_metric is not None and current_metric is not None: performance_metrics |= { ws_record_field: prev_metric.merge(current_metric) } - else: + elif prev_metric is not None: + performance_metrics |= {ws_record_field: prev_metric} + elif current_metric is not None: performance_metrics |= {ws_record_field: current_metric} + else: + performance_metrics |= {ws_record_field: None} return WorkspacePerformanceMetrics(**performance_metrics) diff --git a/source/workspaces_app/workspaces_app/utils/__tests__/test_usage_table_dao.py b/source/workspaces_app/workspaces_app/utils/__tests__/test_usage_table_dao.py index 2af42b9..2e04a83 100644 --- a/source/workspaces_app/workspaces_app/utils/__tests__/test_usage_table_dao.py +++ b/source/workspaces_app/workspaces_app/utils/__tests__/test_usage_table_dao.py @@ -33,7 +33,6 @@ def ws_description(): username="test-user", computer_name="test-computer", initial_mode="test-mode", - tags=["tag1", "tag2"], ) @@ -47,11 +46,6 @@ def ws_billing_data(): ) -@pytest.fixture() -def weighted_avg(): - return WeightedAverage(Decimal("93.42"), 67) - - @pytest.fixture() def ws_metrics(): return WorkspacePerformanceMetrics( @@ -73,6 +67,7 @@ def ws_record(ws_description, ws_billing_data, ws_metrics): report_date="test-report-date", last_reported_metric_period="test-last-period", last_known_user_connection="test-last-connection", + tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]", ) @@ -156,7 +151,7 @@ def workspace_ddb_item(ws_record): "N": str(perf_metrics.udp_packet_loss_rate.count), }, "Tags": { - "L": list(map(lambda x: {"S": x}, description.tags)), + "S": ws_record.tags, }, "ReportDate": {"S": ws_record.report_date}, "LastReportedMetricPeriod": {"S": ws_record.last_reported_metric_period}, diff --git a/source/workspaces_app/workspaces_app/utils/solution_metrics.py b/source/workspaces_app/workspaces_app/utils/solution_metrics.py index b0289ec..66ea5a8 100644 --- a/source/workspaces_app/workspaces_app/utils/solution_metrics.py +++ b/source/workspaces_app/workspaces_app/utils/solution_metrics.py @@ -56,6 +56,7 @@ def report_metrics( execution_time: int = -1 solution_version = get_solution_version() + image_version = get_image_version() metrics_data = { "List_of_Workspaces": workspaces, @@ -65,6 +66,7 @@ def report_metrics( "Stack_Parameters": self._stack_parameters, "ECS_Task_Execution_Time": execution_time, "SolutionVersion": solution_version, + "ImageVersion": image_version, } solution_id = get_solution_id() @@ -117,3 +119,7 @@ def get_uuid() -> str: def get_solution_version() -> str: return os.getenv("SolutionVersion", "Unknown") + + +def get_image_version() -> str: + return os.getenv("ImageVersion", "Unknown") diff --git a/source/workspaces_app/workspaces_app/workspace_record.py b/source/workspaces_app/workspaces_app/workspace_record.py index 5446f93..e032408 100644 --- a/source/workspaces_app/workspaces_app/workspace_record.py +++ b/source/workspaces_app/workspaces_app/workspace_record.py @@ -28,35 +28,37 @@ class WeightedAverage: avg: Decimal count: int - def __post_init__(self): - if self.avg is not None: - object.__setattr__(self, "weighted_avg", self.avg * self.count) + def weighted_avg(self): + return self.avg * self.count def merge(self, other_wa: "WeightedAverage") -> "WeightedAverage": - if other_wa is not None and other_wa.avg is not None: - merged_count = self.count + other_wa.count - merged_avg = Decimal( - str((self.weighted_avg + other_wa.weighted_avg) / merged_count) - ) - return WeightedAverage(avg=merged_avg, count=merged_count) - else: - return self + merged_count = self.count + other_wa.count + merged_avg = Decimal( + str((self.weighted_avg() + other_wa.weighted_avg()) / merged_count) + ) + return WeightedAverage(avg=merged_avg, count=merged_count) @dataclass(frozen=True) class WorkspacePerformanceMetrics: - in_session_latency: WeightedAverage - cpu_usage: WeightedAverage - memory_usage: WeightedAverage - root_volume_disk_usage: WeightedAverage - user_volume_disk_usage: WeightedAverage - udp_packet_loss_rate: WeightedAverage + in_session_latency: WeightedAverage | None + cpu_usage: WeightedAverage | None + memory_usage: WeightedAverage | None + root_volume_disk_usage: WeightedAverage | None + user_volume_disk_usage: WeightedAverage | None + udp_packet_loss_rate: WeightedAverage | None def to_json(self) -> dict[str, any]: class_as_dict = asdict(self) class_as_json = {} for key, value in class_as_dict.items(): - class_as_json |= {key: value.get("avg"), key + "_count": value.get("count")} + if value is not None: + class_as_json |= { + key: value.get("avg"), + key + "_count": value.get("count"), + } + else: # there is no data for the performance metric + class_as_json |= {key: None, key + "_count": 0} return class_as_json @classmethod @@ -70,9 +72,12 @@ def from_json(cls, json: dict[str, any]) -> "WorkspacePerformanceMetrics": count_key = key + "_count" if all(key in json for key in [key, count_key]): - performance_metrics |= { - key: WeightedAverage(avg=json[key], count=json[count_key]) - } + if json[key] is not None: + performance_metrics |= { + key: WeightedAverage(avg=json[key], count=json[count_key]) + } + else: + performance_metrics |= {key: None} else: raise KeyError( "JSON does not contain all keys needed to create a WorkspacePerformanceMetrics instance" @@ -103,7 +108,6 @@ class WorkspaceDescription: username: str computer_name: str initial_mode: str - tags: list[str] = field(default_factory=list) def to_json(self) -> dict[str, any]: return asdict(self) @@ -156,6 +160,7 @@ class WorkspaceRecord: report_date: str = "" last_reported_metric_period: str = "" last_known_user_connection: str = "" + tags: str = "" def to_json(self) -> dict[str, any]: return { @@ -165,6 +170,7 @@ def to_json(self) -> dict[str, any]: "report_date": self.report_date, "last_reported_metric_period": self.last_reported_metric_period, "last_known_user_connection": self.last_known_user_connection, + "tags": self.tags, } def to_ddb_obj(self) -> dict[str, any]: @@ -186,6 +192,7 @@ def to_csv(self) -> str: This method returns the workspace record as a string for use with a csv :return: a string representation of the workspace """ + raw_csv = ",".join( ( self.description.workspace_id, @@ -199,13 +206,17 @@ def to_csv(self) -> str: self.description.computer_name, self.description.directory_id, self.billing_data.workspace_terminated, - str(self.performance_metrics.in_session_latency.avg), - str(self.performance_metrics.cpu_usage.avg), - str(self.performance_metrics.memory_usage.avg), - str(self.performance_metrics.root_volume_disk_usage.avg), - str(self.performance_metrics.user_volume_disk_usage.avg), - str(self.performance_metrics.udp_packet_loss_rate.avg), - "".join(('"', str(self.description.tags), '"')), + str(getattr(self.performance_metrics.in_session_latency, "avg", "")), + str(getattr(self.performance_metrics.cpu_usage, "avg", "")), + str(getattr(self.performance_metrics.memory_usage, "avg", "")), + str( + getattr(self.performance_metrics.root_volume_disk_usage, "avg", "") + ), + str( + getattr(self.performance_metrics.user_volume_disk_usage, "avg", "") + ), + str(getattr(self.performance_metrics.udp_packet_loss_rate, "avg", "")), + self.tags, self.report_date + "\n", # Adding quotes to the string to help with csv format ) @@ -240,6 +251,7 @@ def from_ddb_obj( last_reported_metric_period=ddb_as_json["last_reported_metric_period"], last_known_user_connection=ddb_as_json["last_known_user_connection"], performance_metrics=WorkspacePerformanceMetrics.from_json(ddb_as_json), + tags=ddb_as_json["tags"], ) @staticmethod diff --git a/source/workspaces_app/workspaces_app/workspaces_helper.py b/source/workspaces_app/workspaces_app/workspaces_helper.py index e030f4b..17e61cc 100644 --- a/source/workspaces_app/workspaces_app/workspaces_helper.py +++ b/source/workspaces_app/workspaces_app/workspaces_helper.py @@ -7,6 +7,7 @@ import os import time import typing +from datetime import datetime # AWS Libraries import boto3 @@ -68,6 +69,18 @@ def process_workspace( if isinstance(ws_record, WorkspaceDescription) else ws_record.description ) + + # if last reported date was date before release, treat it as if + # there was no previous data + if isinstance(ws_record, WorkspaceRecord): + last_reported_period = ws_record.last_reported_metric_period + last_reported_period = datetime.strptime( + last_reported_period, "%Y-%m-%dT%H:%M:%SZ" + ) + release_271_date = datetime(2024, 8, 28) + if last_reported_period < release_271_date: + ws_record = ws_record.description + workspace_id = description.workspace_id logger.debug(f"workspaceID: {workspace_id}") workspace_running_mode = description.initial_mode @@ -138,6 +151,7 @@ def process_workspace( "end_time_for_current_month" ), last_known_user_connection=last_known_user_connection, + tags="".join(('"', str(tags), '"')), ) def get_hourly_threshold_for_bundle_type(self, bundle_type):