diff --git a/docs/data-sources/replication_report.md b/docs/data-sources/replication_report.md new file mode 100644 index 00000000..b3f802a8 --- /dev/null +++ b/docs/data-sources/replication_report.md @@ -0,0 +1,283 @@ +--- +# Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. +# +# Licensed under the Mozilla Public License Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://mozilla.org/MPL/2.0/ +# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +title: "powerscale_replication_report data source" +linkTitle: "powerscale_replication_report" +page_title: "powerscale_replication_report Data Source - terraform-provider-powerscale" +subcategory: "" +description: |- + +--- + +# powerscale_replication_report (Data Source) + + + +## Example Usage + +```terraform +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +# This Terraform DataSource is used to query the details of existing Replication Report from PowerScale array. + +# Returns the entire list of PowerScale replication report. +data "powerscale_replication_report" "all" { +} + +# Output value of above block by executing 'terraform output' command +# You can use the the fetched information by the variable data.powerscale_replication_report.all +output "powerscale_replication_report_all" { + value = data.powerscale_replication_report.all +} + +# Returns a list of PowerScale Replication Report based on the filters specified in the filter block. +data "powerscale_replication_report" "filtering" { + filter { + policy_name = "Policy" + reports_per_policy = 2 + sort = "policy_name" + dir = "ASC" + } +} + +# Output value of above block by executing 'terraform output' command +# You can use the the fetched information by the variable data.powerscale_replication_report.filtering +output "powerscale_replication_report_filter" { + value = data.powerscale_replication_report.filtering +} +``` + + +## Schema + +### Optional + +- `filter` (Block, Optional) (see [below for nested schema](#nestedblock--filter)) + +### Read-Only + +- `id` (String) Unique identifier of the network pool instance. +- `replication_reports` (Attributes List) (see [below for nested schema](#nestedatt--replication_reports)) + + +### Nested Schema for `filter` + +Optional: + +- `dir` (String) The direction of the sort. +- `limit` (Number) Return no more than this many results at once (see resume). +- `newer_than` (Number) Filter the returned reports to include only those whose jobs started more recently than the specified number of days ago. +- `policy_name` (String) Filter the returned reports to include only those with this policy name. +- `reports_per_policy` (Number) If specified, only the N most recent reports will be returned per policy. If no other query args are present this argument defaults to 1. +- `resume` (String) Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). +- `sort` (String) The field that will be used for sorting. +- `state` (String) Filter the returned reports to include only those whose jobs are in this state. +- `summary` (Boolean) Return a summary rather than entire objects + + + +### Nested Schema for `replication_reports` + +Read-Only: + +- `action` (String) The action to be taken by this job. +- `ads_streams_replicated` (Number) The number of ads streams replicated by this job. +- `block_specs_replicated` (Number) The number of block specs replicated by this job. +- `bytes_recoverable` (Number) The number of bytes recoverable by this job. +- `bytes_transferred` (Number) The number of bytes that have been transferred by this job. +- `char_specs_replicated` (Number) The number of char specs replicated by this job. +- `committed_files` (Number) The number of WORM committed files. +- `corrected_lins` (Number) The number of LINs corrected by this job. +- `dead_node` (Boolean) This field is true if the node running this job is dead. +- `directories_replicated` (Number) The number of directories replicated. +- `dirs_changed` (Number) The number of directories changed by this job. +- `dirs_deleted` (Number) The number of directories deleted by this job. +- `dirs_moved` (Number) The number of directories moved by this job. +- `dirs_new` (Number) The number of directories created by this job. +- `duration` (Number) The amount of time in seconds between when the job was started and when it ended. If the job has not yet ended, this is the amount of time since the job started. This field is null if the job has not yet started. +- `encrypted` (Boolean) If true, syncs will be encrypted. +- `end_time` (Number) The time the job ended in unix epoch seconds. The field is null if the job hasn't ended. +- `error` (String) The primary error message for this job. +- `error_checksum_files_skipped` (Number) The number of files with checksum errors skipped by this job. +- `error_io_files_skipped` (Number) The number of files with io errors skipped by this job. +- `error_net_files_skipped` (Number) The number of files with network errors skipped by this job. +- `errors` (List of String) A list of error messages for this job. +- `failed_chunks` (Number) Tyhe number of data chunks that failed transmission. +- `fifos_replicated` (Number) The number of fifos replicated by this job. +- `file_data_bytes` (Number) The number of bytes transferred that belong to files. +- `files_changed` (Number) The number of files changed by this job. +- `files_linked` (Number) The number of files linked by this job. +- `files_new` (Number) The number of files created by this job. +- `files_selected` (Number) The number of files selected by this job. +- `files_transferred` (Number) The number of files transferred by this job. +- `files_unlinked` (Number) The number of files unlinked by this job. +- `files_with_ads_replicated` (Number) The number of files with ads replicated by this job. +- `flipped_lins` (Number) The number of LINs flipped by this job. +- `hard_links_replicated` (Number) The number of hard links replicated by this job. +- `hash_exceptions_fixed` (Number) The number of hash exceptions fixed by this job. +- `hash_exceptions_found` (Number) The number of hash exceptions found by this job. +- `id` (String) A unique identifier for this object. +- `job_id` (Number) The ID of the job. +- `lins_total` (Number) The number of LINs transferred by this job. +- `network_bytes_to_source` (Number) The total number of bytes sent to the source by this job. +- `network_bytes_to_target` (Number) The total number of bytes sent to the target by this job. +- `new_files_replicated` (Number) The number of new files replicated by this job. +- `num_retransmitted_files` (Number) The number of files that have been retransmitted by this job. +- `phases` (Attributes List) Data for each phase of this job. (see [below for nested schema](#nestedatt--replication_reports--phases)) +- `policy` (Attributes) The policy associated with this job, or null if there is currently no policy associated with this job (this can happen if the job is newly created and not yet fully populated in the underlying database). (see [below for nested schema](#nestedatt--replication_reports--policy)) +- `policy_action` (String) This is the action the policy is configured to perform. +- `policy_id` (String) The ID of the policy. +- `policy_name` (String) The name of the policy. +- `quotas_deleted` (Number) The number of quotas removed from the target. +- `regular_files_replicated` (Number) The number of regular files replicated by this job. +- `resynced_lins` (Number) The number of LINs resynched by this job. +- `retransmitted_files` (List of String) The files that have been retransmitted by this job. +- `retry` (Number) The number of times the job has been retried. +- `running_chunks` (Number) The number of data chunks currently being transmitted. +- `sockets_replicated` (Number) The number of sockets replicated by this job. +- `source_bytes_recovered` (Number) The number of bytes recovered on the source. +- `source_directories_created` (Number) The number of directories created on the source. +- `source_directories_deleted` (Number) The number of directories deleted on the source. +- `source_directories_linked` (Number) The number of directories linked on the source. +- `source_directories_unlinked` (Number) The number of directories unlinked on the source. +- `source_directories_visited` (Number) The number of directories visited on the source. +- `source_files_deleted` (Number) The number of files deleted on the source. +- `source_files_linked` (Number) The number of files linked on the source. +- `source_files_unlinked` (Number) The number of files unlinked on the source. +- `sparse_data_bytes` (Number) The number of sparse data bytes transferred by this job. +- `start_time` (Number) The time the job started in unix epoch seconds. The field is null if the job hasn't started. +- `state` (String) The state of the job. +- `subreport_count` (Number) The number of subreports that are available for this job report. +- `succeeded_chunks` (Number) The number of data chunks that have been transmitted successfully. +- `symlinks_replicated` (Number) The number of symlinks replicated by this job. +- `sync_type` (String) The type of sync being performed by this job. +- `target_bytes_recovered` (Number) The number of bytes recovered on the target. +- `target_directories_created` (Number) The number of directories created on the target. +- `target_directories_deleted` (Number) The number of directories deleted on the target. +- `target_directories_linked` (Number) The number of directories linked on the target. +- `target_directories_unlinked` (Number) The number of directories unlinked on the target. +- `target_files_deleted` (Number) The number of files deleted on the target. +- `target_files_linked` (Number) The number of files linked on the target. +- `target_files_unlinked` (Number) The number of files unlinked on the target. +- `target_snapshots` (List of String) The target snapshots created by this job. +- `throughput` (String) Throughput of a job +- `total_chunks` (Number) The total number of data chunks transmitted by this job. +- `total_data_bytes` (Number) The total number of bytes transferred by this job. +- `total_files` (Number) The number of files affected by this job. +- `total_network_bytes` (Number) The total number of bytes sent over the network by this job. +- `total_phases` (Number) The total number of phases for this job. +- `unchanged_data_bytes` (Number) The number of bytes unchanged by this job. +- `up_to_date_files_skipped` (Number) The number of up-to-date files skipped by this job. +- `updated_files_replicated` (Number) The number of updated files replicated by this job. +- `user_conflict_files_skipped` (Number) The number of files with user conflicts skipped by this job. +- `warnings` (List of String) A list of warning messages for this job. +- `worm_committed_file_conflicts` (Number) The number of WORM committed files which needed to be reverted. Since WORM committed files cannot be reverted, this is the number of files that were preserved in the compliance store. + + +### Nested Schema for `replication_reports.phases` + +Read-Only: + +- `end_time` (Number) The time the job ended this phase. +- `phase` (String) The phase that the job was in. +- `start_time` (Number) The time the job began this phase. +- `statistics` (Attributes) Statistics for each phase of this job. (see [below for nested schema](#nestedatt--replication_reports--phases--statistics)) + + +### Nested Schema for `replication_reports.phases.statistics` + +Read-Only: + +- `compliance_dir_links` (String) Compliance Dir Links +- `corrected_lins` (String) Corrected LINs +- `deleted_dirs` (String) Deleted Dirs +- `deleted_files` (String) Deleted Files +- `dirs` (String) Dirs +- `files` (String) Files +- `flipped_lins` (String) Flipped LINs +- `hash_exceptions` (String) Hash Exceptions +- `linked_dirs` (String) Linked Dirs +- `linked_files` (String) Linked Files +- `marked_directories` (String) Marked Directories +- `marked_files` (String) Marked Files +- `modified_dirs` (String) Modified Dirs +- `modified_files` (String) Modified Files +- `modified_lins` (String) Modified LINs +- `new_compliance_dirs` (String) New Compliance Dirs +- `new_dirs` (String) New Dirs +- `new_files` (String) New Files +- `new_resynced_files` (String) New Resynced Files +- `resynced_file_links` (String) Resynced File Links +- `resynced_lins` (String) Resynced LINs +- `unlinked_files` (String) Unlinked Files + + + + +### Nested Schema for `replication_reports.policy` + +Read-Only: + +- `action` (String) The action to be taken by the job. +- `file_matching_pattern` (Attributes) A file matching pattern, organized as an OR'ed set of AND'ed file criteria, for example ((a AND b) OR (x AND y)) used to define a set of files with specific properties. Policies of type 'sync' cannot use 'path' or time criteria in their matching patterns, but policies of type 'copy' can use all listed criteria. (see [below for nested schema](#nestedatt--replication_reports--policy--file_matching_pattern)) +- `name` (String) User-assigned name of this sync policy. +- `source_exclude_directories` (List of String) Directories that will be excluded from the sync. Modifying this field will result in a full synchronization of all data. +- `source_include_directories` (List of String) Directories that will be included in the sync. Modifying this field will result in a full synchronization of all data. +- `source_root_path` (String) The root directory on the source cluster the files will be synced from. Modifying this field will result in a full synchronization of all data. +- `target_host` (String) Hostname or IP address of sync target cluster. Modifying the target cluster host can result in the policy being unrunnable if the new target does not match the current target association. +- `target_path` (String) Absolute filesystem path on the target cluster for the sync destination. + + +### Nested Schema for `replication_reports.policy.file_matching_pattern` + +Read-Only: + +- `or_criteria` (Attributes List) An array containing objects with "and_criteria" properties, each set of and_criteria will be logically OR'ed together to create the full file matching pattern. (see [below for nested schema](#nestedatt--replication_reports--policy--file_matching_pattern--or_criteria)) + + +### Nested Schema for `replication_reports.policy.file_matching_pattern.or_criteria` + +Read-Only: + +- `and_criteria` (Attributes List) An array containing individual file criterion objects each describing one criterion. These are logically AND'ed together to form a set of criteria. (see [below for nested schema](#nestedatt--replication_reports--policy--file_matching_pattern--or_criteria--and_criteria)) + + +### Nested Schema for `replication_reports.policy.file_matching_pattern.or_criteria.and_criteria` + +Read-Only: + +- `attribute_exists` (Boolean) For "custom_attribute" type criteria. The file will match as long as the attribute named by "field" exists. Default is true. +- `case_sensitive` (Boolean) If true, the value comparison will be case sensitive. Default is true. +- `field` (String) The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string "". +- `operator` (String) How to compare the specified attribute of each file to the specified value. +- `type` (String) The type of this criterion, that is, which file attribute to match on. +- `value` (String) The value to compare the specified attribute of each file to. +- `whole_word` (Boolean) If true, the attribute must match the entire word. Default is true. \ No newline at end of file diff --git a/examples/data-sources/powerscale_replication_report/data-source.tf b/examples/data-sources/powerscale_replication_report/data-source.tf new file mode 100644 index 00000000..230b50d7 --- /dev/null +++ b/examples/data-sources/powerscale_replication_report/data-source.tf @@ -0,0 +1,44 @@ +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +# This Terraform DataSource is used to query the details of existing Replication Report from PowerScale array. + +# Returns the entire list of PowerScale replication report. +data "powerscale_replication_report" "all" { +} + +# Output value of above block by executing 'terraform output' command +# You can use the the fetched information by the variable data.powerscale_replication_report.all +output "powerscale_replication_report_all" { + value = data.powerscale_replication_report.all +} + +# Returns a list of PowerScale Replication Report based on the filters specified in the filter block. +data "powerscale_replication_report" "filtering" { + filter { + policy_name = "Policy" + reports_per_policy = 2 + sort = "policy_name" + dir = "ASC" + } +} + +# Output value of above block by executing 'terraform output' command +# You can use the the fetched information by the variable data.powerscale_replication_report.filtering +output "powerscale_replication_report_filter" { + value = data.powerscale_replication_report.filtering +} diff --git a/examples/data-sources/powerscale_replication_report/provider.tf b/examples/data-sources/powerscale_replication_report/provider.tf new file mode 100644 index 00000000..83e4c1a9 --- /dev/null +++ b/examples/data-sources/powerscale_replication_report/provider.tf @@ -0,0 +1,30 @@ +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +terraform { + required_providers { + powerscale = { + source = "registry.terraform.io/dell/powerscale" + } + } +} + +provider "powerscale" { + username = var.username + password = var.password + endpoint = var.endpoint + insecure = var.insecure +} \ No newline at end of file diff --git a/powerscale/constants/constants.go b/powerscale/constants/constants.go index 73e56784..449384a4 100644 --- a/powerscale/constants/constants.go +++ b/powerscale/constants/constants.go @@ -461,4 +461,7 @@ const ( // ReadSnapshotRestoreJobReportErrorMsg specifies error details occurred while reading snapshot restore job reports. ReadSnapshotRestoreJobReportErrorMsg = "Could not read snapshot restore job reports " + + // ReadReplicationReportsErrorMsg specifies error details occurred while reading replication reports. + ReadReplicationReportsErrorMsg = "Could not read Replication Reports" ) diff --git a/powerscale/helper/replication_reports_helper.go b/powerscale/helper/replication_reports_helper.go new file mode 100644 index 00000000..a093d30f --- /dev/null +++ b/powerscale/helper/replication_reports_helper.go @@ -0,0 +1,68 @@ +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "context" + powerscale "dell/powerscale-go-client" + "terraform-provider-powerscale/client" + "terraform-provider-powerscale/powerscale/models" +) + +// GetRoles Get a list of replication reports. +func GetReplicationReports(ctx context.Context, client *client.Client, state models.ReplicationReportsDatasourceModel) (*powerscale.V15SyncReports, error) { + listRRParam := client.PscaleOpenAPIClient.SyncApi.GetSyncv15SyncReports(ctx) + if state.ReplicationReportFilter != nil { + if !state.ReplicationReportFilter.Sort.IsNull() { + listRRParam = listRRParam.Sort(state.ReplicationReportFilter.Sort.ValueString()) + } + if !state.ReplicationReportFilter.Resume.IsNull() { + listRRParam = listRRParam.Resume(state.ReplicationReportFilter.Resume.ValueString()) + } + if !state.ReplicationReportFilter.NewerThan.IsNull() { + listRRParam = listRRParam.NewerThan(int32(state.ReplicationReportFilter.NewerThan.ValueInt64())) + } + if !state.ReplicationReportFilter.PolicyName.IsNull() { + listRRParam = listRRParam.PolicyName(state.ReplicationReportFilter.PolicyName.ValueString()) + } + if !state.ReplicationReportFilter.State.IsNull() { + listRRParam = listRRParam.State(state.ReplicationReportFilter.State.ValueString()) + } + if !state.ReplicationReportFilter.Limit.IsNull() { + listRRParam = listRRParam.Limit(int32(state.ReplicationReportFilter.Limit.ValueInt64())) + } + if !state.ReplicationReportFilter.ReportsPerPolicy.IsNull() { + listRRParam = listRRParam.ReportsPerPolicy(int32(state.ReplicationReportFilter.ReportsPerPolicy.ValueInt64())) + } + if !state.ReplicationReportFilter.Dir.IsNull() { + listRRParam = listRRParam.Dir(state.ReplicationReportFilter.Dir.ValueString()) + } + if !state.ReplicationReportFilter.Summary.IsNull() { + listRRParam = listRRParam.Summary(state.ReplicationReportFilter.Summary.ValueBool()) + } + + } + resp, _, err := listRRParam.Execute() + return resp, err +} + +func ReplicationReportDetailMapper(ctx context.Context, rr *powerscale.V15SyncReport) (models.ReplicationReportsDetail, error) { + model := models.ReplicationReportsDetail{} + err := CopyFields(ctx, rr, &model) + return model, err +} diff --git a/powerscale/models/replication_reports.go b/powerscale/models/replication_reports.go new file mode 100644 index 00000000..008eee11 --- /dev/null +++ b/powerscale/models/replication_reports.go @@ -0,0 +1,193 @@ +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package models + +import "github.com/hashicorp/terraform-plugin-framework/types" + +type ReplicationReportsDatasourceModel struct { + ID types.String `tfsdk:"id"` + Reports []ReplicationReportsDetail `tfsdk:"replication_reports"` + ReplicationReportFilter *ReplicationReportFilterType `tfsdk:"filter"` +} + +type ReplicationReportFilterType struct { + Sort types.String `tfsdk:"sort"` + Resume types.String `tfsdk:"resume"` + NewerThan types.Int64 `tfsdk:"newer_than"` + PolicyName types.String `tfsdk:"policy_name"` + State types.String `tfsdk:"state"` + Limit types.Int64 `tfsdk:"limit"` + ReportsPerPolicy types.Int64 `tfsdk:"reports_per_policy"` + Summary types.Bool `tfsdk:"summary"` + Dir types.String `tfsdk:"dir"` +} + +type ReplicationReportsDetail struct { + Action types.String `tfsdk:"action"` + AdsStreamsReplicated types.Int64 `tfsdk:"ads_streams_replicated"` + BlockSpecsReplicated types.Int64 `tfsdk:"block_specs_replicated"` + BytesRecoverable types.Int64 `tfsdk:"bytes_recoverable"` + BytesTransferred types.Int64 `tfsdk:"bytes_transferred"` + CharSpecsReplicated types.Int64 `tfsdk:"char_specs_replicated"` + CommittedFiles types.Int64 `tfsdk:"committed_files"` + CorrectedLins types.Int64 `tfsdk:"corrected_lins"` + DeadNode types.Bool `tfsdk:"dead_node"` + DirectoriesReplicated types.Int64 `tfsdk:"directories_replicated"` + DirsChanged types.Int64 `tfsdk:"dirs_changed"` + DirsDeleted types.Int64 `tfsdk:"dirs_deleted"` + DirsMoved types.Int64 `tfsdk:"dirs_moved"` + DirsNew types.Int64 `tfsdk:"dirs_new"` + Duration types.Int64 `tfsdk:"duration"` + Encrypted types.Bool `tfsdk:"encrypted"` + EndTime types.Int64 `tfsdk:"end_time"` + Error types.String `tfsdk:"error"` + ErrorChecksumFilesSkipped types.Int64 `tfsdk:"error_checksum_files_skipped"` + ErrorIoFilesSkipped types.Int64 `tfsdk:"error_io_files_skipped"` + ErrorNetFilesSkipped types.Int64 `tfsdk:"error_net_files_skipped"` + Errors types.List `tfsdk:"errors"` + FailedChunks types.Int64 `tfsdk:"failed_chunks"` + FifosReplicated types.Int64 `tfsdk:"fifos_replicated"` + FileDataBytes types.Int64 `tfsdk:"file_data_bytes"` + FilesChanged types.Int64 `tfsdk:"files_changed"` + FilesLinked types.Int64 `tfsdk:"files_linked"` + FilesNew types.Int64 `tfsdk:"files_new"` + FilesSelected types.Int64 `tfsdk:"files_selected"` + FilesTransferred types.Int64 `tfsdk:"files_transferred"` + FilesUnlinked types.Int64 `tfsdk:"files_unlinked"` + FilesWithAdsReplicated types.Int64 `tfsdk:"files_with_ads_replicated"` + FlippedLins types.Int64 `tfsdk:"flipped_lins"` + HardLinksReplicated types.Int64 `tfsdk:"hard_links_replicated"` + HashExceptionsFixed types.Int64 `tfsdk:"hash_exceptions_fixed"` + HashExceptionsFound types.Int64 `tfsdk:"hash_exceptions_found"` + ID types.String `tfsdk:"id"` + JobID types.Int64 `tfsdk:"job_id"` + LinsTotal types.Int64 `tfsdk:"lins_total"` + NetworkBytesToSource types.Int64 `tfsdk:"network_bytes_to_source"` + NetworkBytesToTarget types.Int64 `tfsdk:"network_bytes_to_target"` + NewFilesReplicated types.Int64 `tfsdk:"new_files_replicated"` + NumRetransmittedFiles types.Int64 `tfsdk:"num_retransmitted_files"` + Phases []PhasesDetail `tfsdk:"phases"` + Policy PolicyDetail `tfsdk:"policy"` + PolicyAction types.String `tfsdk:"policy_action"` + PolicyID types.String `tfsdk:"policy_id"` + PolicyName types.String `tfsdk:"policy_name"` + QuotasDeleted types.Int64 `tfsdk:"quotas_deleted"` + RegularFilesReplicated types.Int64 `tfsdk:"regular_files_replicated"` + ResyncedLins types.Int64 `tfsdk:"resynced_lins"` + RetransmittedFiles types.List `tfsdk:"retransmitted_files"` + Retry types.Int64 `tfsdk:"retry"` + RunningChunks types.Int64 `tfsdk:"running_chunks"` + SocketsReplicated types.Int64 `tfsdk:"sockets_replicated"` + SourceBytesRecovered types.Int64 `tfsdk:"source_bytes_recovered"` + SourceDirectoriesCreated types.Int64 `tfsdk:"source_directories_created"` + SourceDirectoriesDeleted types.Int64 `tfsdk:"source_directories_deleted"` + SourceDirectoriesLinked types.Int64 `tfsdk:"source_directories_linked"` + SourceDirectoriesUnlinked types.Int64 `tfsdk:"source_directories_unlinked"` + SourceDirectoriesVisited types.Int64 `tfsdk:"source_directories_visited"` + SourceFilesDeleted types.Int64 `tfsdk:"source_files_deleted"` + SourceFilesLinked types.Int64 `tfsdk:"source_files_linked"` + SourceFilesUnlinked types.Int64 `tfsdk:"source_files_unlinked"` + SparseDataBytes types.Int64 `tfsdk:"sparse_data_bytes"` + StartTime types.Int64 `tfsdk:"start_time"` + State types.String `tfsdk:"state"` + SubreportCount types.Int64 `tfsdk:"subreport_count"` + SucceededChunks types.Int64 `tfsdk:"succeeded_chunks"` + SymlinksReplicated types.Int64 `tfsdk:"symlinks_replicated"` + SyncType types.String `tfsdk:"sync_type"` + TargetBytesRecovered types.Int64 `tfsdk:"target_bytes_recovered"` + TargetDirectoriesCreated types.Int64 `tfsdk:"target_directories_created"` + TargetDirectoriesDeleted types.Int64 `tfsdk:"target_directories_deleted"` + TargetDirectoriesLinked types.Int64 `tfsdk:"target_directories_linked"` + TargetDirectoriesUnlinked types.Int64 `tfsdk:"target_directories_unlinked"` + TargetFilesDeleted types.Int64 `tfsdk:"target_files_deleted"` + TargetFilesLinked types.Int64 `tfsdk:"target_files_linked"` + TargetFilesUnlinked types.Int64 `tfsdk:"target_files_unlinked"` + TargetSnapshots types.List `tfsdk:"target_snapshots"` + Throughput types.String `tfsdk:"throughput"` + TotalChunks types.Int64 `tfsdk:"total_chunks"` + TotalDataBytes types.Int64 `tfsdk:"total_data_bytes"` + TotalFiles types.Int64 `tfsdk:"total_files"` + TotalNetworkBytes types.Int64 `tfsdk:"total_network_bytes"` + TotalPhases types.Int64 `tfsdk:"total_phases"` + UnchangedDataBytes types.Int64 `tfsdk:"unchanged_data_bytes"` + UpToDateFilesSkipped types.Int64 `tfsdk:"up_to_date_files_skipped"` + UpdatedFilesReplicated types.Int64 `tfsdk:"updated_files_replicated"` + UserConflictFilesSkipped types.Int64 `tfsdk:"user_conflict_files_skipped"` + Warnings types.List `tfsdk:"warnings"` + WormCommittedFileConflicts types.Int64 `tfsdk:"worm_committed_file_conflicts"` +} + +type PolicyDetail struct { + Action types.String `tfsdk:"action"` + FileMatchingPattern FileMatchingPatternDetail `tfsdk:"file_matching_pattern"` + Name types.String `tfsdk:"name"` + SourceExcludeDirectories []types.String `tfsdk:"source_exclude_directories"` + SourceIncludeDirectories []types.String `tfsdk:"source_include_directories"` + SourceRootPath types.String `tfsdk:"source_root_path"` + TargetHost types.String `tfsdk:"target_host"` + TargetPath types.String `tfsdk:"target_path"` +} + +type OrCriteriaDetail struct { + AndCriteria []AndCriteriaDetail `tfsdk:"and_criteria"` +} + +type AndCriteriaDetail struct { + AttributeExists types.Bool `tfsdk:"attribute_exists"` + CaseSensitive types.Bool `tfsdk:"case_sensitive"` + Field types.String `tfsdk:"field"` + Operator types.String `tfsdk:"operator"` + Type types.String `tfsdk:"type"` + Value types.String `tfsdk:"value"` + WholeWord types.Bool `tfsdk:"whole_word"` +} + +type StatisticsDetail struct { + ComplianceDirLinks types.String `tfsdk:"compliance_dir_links"` + CorrectedLins types.String `tfsdk:"corrected_lins"` + DeletedDirs types.String `tfsdk:"deleted_dirs"` + DeletedFiles types.String `tfsdk:"deleted_files"` + Dirs types.String `tfsdk:"dirs"` + Files types.String `tfsdk:"files"` + FlippedLins types.String `tfsdk:"flipped_lins"` + HashExceptions types.String `tfsdk:"hash_exceptions"` + LinkedDirs types.String `tfsdk:"linked_dirs"` + LinkedFiles types.String `tfsdk:"linked_files"` + MarkedDirectories types.String `tfsdk:"marked_directories"` + MarkedFiles types.String `tfsdk:"marked_files"` + ModifiedDirs types.String `tfsdk:"modified_dirs"` + ModifiedFiles types.String `tfsdk:"modified_files"` + ModifiedLins types.String `tfsdk:"modified_lins"` + NewComplianceDirs types.String `tfsdk:"new_compliance_dirs"` + NewDirs types.String `tfsdk:"new_dirs"` + NewFiles types.String `tfsdk:"new_files"` + NewResyncedFiles types.String `tfsdk:"new_resynced_files"` + ResyncedFileLinks types.String `tfsdk:"resynced_file_links"` + ResyncedLins types.String `tfsdk:"resynced_lins"` + UnlinkedFiles types.String `tfsdk:"unlinked_files"` +} + +type PhasesDetail struct { + EndTime types.Int64 `tfsdk:"end_time"` + Phase types.String `tfsdk:"phase"` + StartTime types.Int64 `tfsdk:"start_time"` + Statistics StatisticsDetail `tfsdk:"statistics"` +} + +type FileMatchingPatternDetail struct { + OrCriteria []OrCriteriaDetail `tfsdk:"or_criteria"` +} diff --git a/powerscale/provider/provider.go b/powerscale/provider/provider.go index aae8a22a..8758fbae 100644 --- a/powerscale/provider/provider.go +++ b/powerscale/provider/provider.go @@ -247,6 +247,7 @@ func (p *PscaleProvider) DataSources(ctx context.Context) []func() datasource.Da NewSyncIQRuleDataSource, NewSyncIQGlobalSettingsDataSource, NewSyncIQPeerCertificateDataSource, + NewReplicationReportDataSource, } } diff --git a/powerscale/provider/replication_reports_datasource.go b/powerscale/provider/replication_reports_datasource.go new file mode 100644 index 00000000..070ee75f --- /dev/null +++ b/powerscale/provider/replication_reports_datasource.go @@ -0,0 +1,959 @@ +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "terraform-provider-powerscale/client" + "terraform-provider-powerscale/powerscale/constants" + "terraform-provider-powerscale/powerscale/helper" + "terraform-provider-powerscale/powerscale/models" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Ensure provider defined types fully satisfy framework interfaces. +var _ datasource.DataSource = &ReplicationReportDataSource{} + +// NewReplicationReportsDataSource creates a new data source. +func NewReplicationReportDataSource() datasource.DataSource { + return &ReplicationReportDataSource{} +} + +// ReplicationReportsDataSource defines the data source implementation. +type ReplicationReportDataSource struct { + client *client.Client +} + +// Metadata describes the data source arguments. +func (d *ReplicationReportDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_replication_report" +} + +// Schema describes the data source arguments. +func (d *ReplicationReportDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Description: "Unique identifier of the network pool instance.", + MarkdownDescription: "Unique identifier of the network pool instance.", + Computed: true, + }, + "replication_reports": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "total_phases": schema.Int64Attribute{ + Computed: true, + Description: "The total number of phases for this job.", + MarkdownDescription: "The total number of phases for this job.", + }, + "symlinks_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of symlinks replicated by this job.", + MarkdownDescription: "The number of symlinks replicated by this job.", + }, + "block_specs_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of block specs replicated by this job.", + MarkdownDescription: "The number of block specs replicated by this job.", + }, + "encrypted": schema.BoolAttribute{ + Computed: true, + Description: "If true, syncs will be encrypted.", + MarkdownDescription: "If true, syncs will be encrypted.", + }, + "source_bytes_recovered": schema.Int64Attribute{ + Computed: true, + Description: "The number of bytes recovered on the source.", + MarkdownDescription: "The number of bytes recovered on the source.", + }, + "network_bytes_to_source": schema.Int64Attribute{ + Computed: true, + Description: "The total number of bytes sent to the source by this job.", + MarkdownDescription: "The total number of bytes sent to the source by this job.", + }, + "error": schema.StringAttribute{ + Computed: true, + Description: "The primary error message for this job.", + MarkdownDescription: "The primary error message for this job.", + }, + "lins_total": schema.Int64Attribute{ + Computed: true, + Description: "The number of LINs transferred by this job.", + MarkdownDescription: "The number of LINs transferred by this job.", + }, + "job_id": schema.Int64Attribute{ + Computed: true, + Description: "The ID of the job.", + MarkdownDescription: "The ID of the job.", + }, + "source_directories_deleted": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories deleted on the source.", + MarkdownDescription: "The number of directories deleted on the source.", + }, + "throughput": schema.StringAttribute{ + Computed: true, + Description: "Throughput of a job", + MarkdownDescription: "Throughput of a job", + }, + "phases": schema.ListNestedAttribute{ + Computed: true, + Description: "Data for each phase of this job.", + MarkdownDescription: "Data for each phase of this job.", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "statistics": schema.SingleNestedAttribute{ + Computed: true, + Description: "Statistics for each phase of this job.", + MarkdownDescription: "Statistics for each phase of this job.", + Attributes: map[string]schema.Attribute{ + "compliance_dir_links": schema.StringAttribute{ + Computed: true, + Description: "Compliance Dir Links", + MarkdownDescription: "Compliance Dir Links", + }, + "files": schema.StringAttribute{ + Computed: true, + Description: "Files", + MarkdownDescription: "Files", + }, + "linked_dirs": schema.StringAttribute{ + Computed: true, + Description: "Linked Dirs", + MarkdownDescription: "Linked Dirs", + }, + "marked_files": schema.StringAttribute{ + Computed: true, + Description: "Marked Files", + MarkdownDescription: "Marked Files", + }, + "resynced_lins": schema.StringAttribute{ + Computed: true, + Description: "Resynced LINs", + MarkdownDescription: "Resynced LINs", + }, + "linked_files": schema.StringAttribute{ + Computed: true, + Description: "Linked Files", + MarkdownDescription: "Linked Files", + }, + "new_compliance_dirs": schema.StringAttribute{ + Computed: true, + Description: "New Compliance Dirs", + MarkdownDescription: "New Compliance Dirs", + }, + "modified_dirs": schema.StringAttribute{ + Computed: true, + Description: "Modified Dirs", + MarkdownDescription: "Modified Dirs", + }, + "deleted_dirs": schema.StringAttribute{ + Computed: true, + Description: "Deleted Dirs", + MarkdownDescription: "Deleted Dirs", + }, + "flipped_lins": schema.StringAttribute{ + Computed: true, + Description: "Flipped LINs", + MarkdownDescription: "Flipped LINs", + }, + "hash_exceptions": schema.StringAttribute{ + Computed: true, + Description: "Hash Exceptions", + MarkdownDescription: "Hash Exceptions", + }, + "new_files": schema.StringAttribute{ + Computed: true, + Description: "New Files", + MarkdownDescription: "New Files", + }, + "new_resynced_files": schema.StringAttribute{ + Computed: true, + Description: "New Resynced Files", + MarkdownDescription: "New Resynced Files", + }, + "resynced_file_links": schema.StringAttribute{ + Computed: true, + Description: "Resynced File Links", + MarkdownDescription: "Resynced File Links", + }, + "unlinked_files": schema.StringAttribute{ + Computed: true, + Description: "Unlinked Files", + MarkdownDescription: "Unlinked Files", + }, + "dirs": schema.StringAttribute{ + Computed: true, + Description: "Dirs", + MarkdownDescription: "Dirs", + }, + "modified_files": schema.StringAttribute{ + Computed: true, + Description: "Modified Files", + MarkdownDescription: "Modified Files", + }, + "corrected_lins": schema.StringAttribute{ + Computed: true, + Description: "Corrected LINs", + MarkdownDescription: "Corrected LINs", + }, + "new_dirs": schema.StringAttribute{ + Computed: true, + Description: "New Dirs", + MarkdownDescription: "New Dirs", + }, + "modified_lins": schema.StringAttribute{ + Computed: true, + Description: "Modified LINs", + MarkdownDescription: "Modified LINs", + }, + "marked_directories": schema.StringAttribute{ + Computed: true, + Description: "Marked Directories", + MarkdownDescription: "Marked Directories", + }, + "deleted_files": schema.StringAttribute{ + Computed: true, + Description: "Deleted Files", + MarkdownDescription: "Deleted Files", + }, + }, + }, + "phase": schema.StringAttribute{ + Computed: true, + Description: "The phase that the job was in.", + MarkdownDescription: "The phase that the job was in.", + }, + "start_time": schema.Int64Attribute{ + Computed: true, + Description: "The time the job began this phase.", + MarkdownDescription: "The time the job began this phase.", + }, + "end_time": schema.Int64Attribute{ + Computed: true, + Description: "The time the job ended this phase.", + MarkdownDescription: "The time the job ended this phase.", + }, + }, + }, + }, + "policy_id": schema.StringAttribute{ + Computed: true, + Description: "The ID of the policy.", + MarkdownDescription: "The ID of the policy.", + }, + "target_directories_deleted": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories deleted on the target.", + MarkdownDescription: "The number of directories deleted on the target.", + }, + "files_changed": schema.Int64Attribute{ + Computed: true, + Description: "The number of files changed by this job.", + MarkdownDescription: "The number of files changed by this job.", + }, + "dirs_changed": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories changed by this job.", + MarkdownDescription: "The number of directories changed by this job.", + }, + "target_files_deleted": schema.Int64Attribute{ + Computed: true, + Description: "The number of files deleted on the target.", + MarkdownDescription: "The number of files deleted on the target.", + }, + "source_directories_unlinked": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories unlinked on the source.", + MarkdownDescription: "The number of directories unlinked on the source.", + }, + "dirs_moved": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories moved by this job.", + MarkdownDescription: "The number of directories moved by this job.", + }, + "source_files_deleted": schema.Int64Attribute{ + Computed: true, + Description: "The number of files deleted on the source.", + MarkdownDescription: "The number of files deleted on the source.", + }, + "error_io_files_skipped": schema.Int64Attribute{ + Computed: true, + Description: "The number of files with io errors skipped by this job.", + MarkdownDescription: "The number of files with io errors skipped by this job.", + }, + "total_network_bytes": schema.Int64Attribute{ + Computed: true, + Description: "The total number of bytes sent over the network by this job.", + MarkdownDescription: "The total number of bytes sent over the network by this job.", + }, + "error_net_files_skipped": schema.Int64Attribute{ + Computed: true, + Description: "The number of files with network errors skipped by this job.", + MarkdownDescription: "The number of files with network errors skipped by this job.", + }, + "warnings": schema.ListAttribute{ + Computed: true, + Description: "A list of warning messages for this job.", + MarkdownDescription: "A list of warning messages for this job.", + ElementType: types.StringType, + }, + "char_specs_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of char specs replicated by this job.", + MarkdownDescription: "The number of char specs replicated by this job.", + }, + "total_data_bytes": schema.Int64Attribute{ + Computed: true, + Description: "The total number of bytes transferred by this job.", + MarkdownDescription: "The total number of bytes transferred by this job.", + }, + "subreport_count": schema.Int64Attribute{ + Computed: true, + Description: "The number of subreports that are available for this job report.", + MarkdownDescription: "The number of subreports that are available for this job report.", + }, + "sparse_data_bytes": schema.Int64Attribute{ + Computed: true, + Description: "The number of sparse data bytes transferred by this job.", + MarkdownDescription: "The number of sparse data bytes transferred by this job.", + }, + "action": schema.StringAttribute{ + Computed: true, + Description: "The action to be taken by this job.", + MarkdownDescription: "The action to be taken by this job.", + }, + "source_files_unlinked": schema.Int64Attribute{ + Computed: true, + Description: "The number of files unlinked on the source.", + MarkdownDescription: "The number of files unlinked on the source.", + }, + "error_checksum_files_skipped": schema.Int64Attribute{ + Computed: true, + Description: "The number of files with checksum errors skipped by this job.", + MarkdownDescription: "The number of files with checksum errors skipped by this job.", + }, + "up_to_date_files_skipped": schema.Int64Attribute{ + Computed: true, + Description: "The number of up-to-date files skipped by this job.", + MarkdownDescription: "The number of up-to-date files skipped by this job.", + }, + "unchanged_data_bytes": schema.Int64Attribute{ + Computed: true, + Description: "The number of bytes unchanged by this job.", + MarkdownDescription: "The number of bytes unchanged by this job.", + }, + "hash_exceptions_fixed": schema.Int64Attribute{ + Computed: true, + Description: "The number of hash exceptions fixed by this job.", + MarkdownDescription: "The number of hash exceptions fixed by this job.", + }, + "target_files_unlinked": schema.Int64Attribute{ + Computed: true, + Description: "The number of files unlinked on the target.", + MarkdownDescription: "The number of files unlinked on the target.", + }, + "new_files_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of new files replicated by this job.", + MarkdownDescription: "The number of new files replicated by this job.", + }, + "directories_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories replicated.", + MarkdownDescription: "The number of directories replicated.", + }, + "end_time": schema.Int64Attribute{ + Computed: true, + Description: "The time the job ended in unix epoch seconds. The field is null if the job hasn't ended.", + MarkdownDescription: "The time the job ended in unix epoch seconds. The field is null if the job hasn't ended.", + }, + "policy_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the policy.", + MarkdownDescription: "The name of the policy.", + }, + "quotas_deleted": schema.Int64Attribute{ + Computed: true, + Description: "The number of quotas removed from the target.", + MarkdownDescription: "The number of quotas removed from the target.", + }, + "source_files_linked": schema.Int64Attribute{ + Computed: true, + Description: "The number of files linked on the source.", + MarkdownDescription: "The number of files linked on the source.", + }, + "files_new": schema.Int64Attribute{ + Computed: true, + Description: "The number of files created by this job.", + MarkdownDescription: "The number of files created by this job.", + }, + "total_files": schema.Int64Attribute{ + Computed: true, + Description: "The number of files affected by this job.", + MarkdownDescription: "The number of files affected by this job.", + }, + "id": schema.StringAttribute{ + Computed: true, + Description: "A unique identifier for this object.", + MarkdownDescription: "A unique identifier for this object.", + }, + "dirs_new": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories created by this job.", + MarkdownDescription: "The number of directories created by this job.", + }, + "target_directories_linked": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories linked on the target.", + MarkdownDescription: "The number of directories linked on the target.", + }, + "bytes_recoverable": schema.Int64Attribute{ + Computed: true, + Description: "The number of bytes recoverable by this job.", + MarkdownDescription: "The number of bytes recoverable by this job.", + }, + "corrected_lins": schema.Int64Attribute{ + Computed: true, + Description: "The number of LINs corrected by this job.", + MarkdownDescription: "The number of LINs corrected by this job.", + }, + "files_linked": schema.Int64Attribute{ + Computed: true, + Description: "The number of files linked by this job.", + MarkdownDescription: "The number of files linked by this job.", + }, + "target_bytes_recovered": schema.Int64Attribute{ + Computed: true, + Description: "The number of bytes recovered on the target.", + MarkdownDescription: "The number of bytes recovered on the target.", + }, + "worm_committed_file_conflicts": schema.Int64Attribute{ + Computed: true, + Description: "The number of WORM committed files which needed to be reverted. Since WORM committed files cannot be reverted, this is the number of files that were preserved in the compliance store.", + MarkdownDescription: "The number of WORM committed files which needed to be reverted. Since WORM committed files cannot be reverted, this is the number of files that were preserved in the compliance store.", + }, + "user_conflict_files_skipped": schema.Int64Attribute{ + Computed: true, + Description: "The number of files with user conflicts skipped by this job.", + MarkdownDescription: "The number of files with user conflicts skipped by this job.", + }, + // // "service_report": schema.ListNestedAttribute{ + // // Computed: true, + // // Description: "Data for each component exported as part of service replication.", + // // MarkdownDescription: "Data for each component exported as part of service replication.", + // // NestedObject: schema.NestedAttributeObject{ + // // Attributes: map[string]schema.Attribute{ + // // "handlers_transferred": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The number of handlers exported.", + // // MarkdownDescription: "The number of handlers exported.", + // // }, + // // "start_time": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The time the job began this component.", + // // MarkdownDescription: "The time the job began this component.", + // // }, + // // "component": schema.StringAttribute{ + // // Computed: true, + // // Description: "The component that was processed.", + // // MarkdownDescription: "The component that was processed.", + // // }, + // // "directory": schema.StringAttribute{ + // // Computed: true, + // // Description: "The directory of the service export.", + // // MarkdownDescription: "The directory of the service export.", + // // }, + // // "handlers_skipped": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The number of handlers skipped during export.", + // // MarkdownDescription: "The number of handlers skipped during export.", + // // }, + // // "filter": schema.ListAttribute{ + // // Computed: true, + // // Description: "A list of path-based filters for exporting components.", + // // MarkdownDescription: "A list of path-based filters for exporting components.", + // // ElementType: types.StringType, + // // }, + // // "handlers_failed": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The number of handlers failed during export.", + // // MarkdownDescription: "The number of handlers failed during export.", + // // }, + // // "records_failed": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The number of records failed during export.", + // // MarkdownDescription: "The number of records failed during export.", + // // }, + // // "end_time": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The time the job ended this component.", + // // MarkdownDescription: "The time the job ended this component.", + // // }, + // // "status": schema.StringAttribute{ + // // Computed: true, + // // Description: "The current status of export for this component.", + // // MarkdownDescription: "The current status of export for this component.", + // // }, + // // "error_msg": schema.ListAttribute{ + // // Computed: true, + // // Description: "A list of error messages generated while exporting components.", + // // MarkdownDescription: "A list of error messages generated while exporting components.", + // // ElementType: types.StringType, + // // }, + // // "records_skipped": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The number of records skipped during export.", + // // MarkdownDescription: "The number of records skipped during export.", + // // }, + // // "records_transferred": schema.Int64Attribute{ + // // Computed: true, + // // Description: "The number of records exported.", + // // MarkdownDescription: "The number of records exported.", + // // }, + // // }, + // // }, + // // }, + "target_directories_created": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories created on the target.", + MarkdownDescription: "The number of directories created on the target.", + }, + "file_data_bytes": schema.Int64Attribute{ + Computed: true, + Description: "The number of bytes transferred that belong to files.", + MarkdownDescription: "The number of bytes transferred that belong to files.", + }, + "files_transferred": schema.Int64Attribute{ + Computed: true, + Description: "The number of files transferred by this job.", + MarkdownDescription: "The number of files transferred by this job.", + }, + "hash_exceptions_found": schema.Int64Attribute{ + Computed: true, + Description: "The number of hash exceptions found by this job.", + MarkdownDescription: "The number of hash exceptions found by this job.", + }, + "resynced_lins": schema.Int64Attribute{ + Computed: true, + Description: "The number of LINs resynched by this job.", + MarkdownDescription: "The number of LINs resynched by this job.", + }, + "ads_streams_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of ads streams replicated by this job.", + MarkdownDescription: "The number of ads streams replicated by this job.", + }, + "network_bytes_to_target": schema.Int64Attribute{ + Computed: true, + Description: "The total number of bytes sent to the target by this job.", + MarkdownDescription: "The total number of bytes sent to the target by this job.", + }, + "retransmitted_files": schema.ListAttribute{ + Computed: true, + Description: "The files that have been retransmitted by this job.", + MarkdownDescription: "The files that have been retransmitted by this job.", + ElementType: types.StringType, + }, + "policy_action": schema.StringAttribute{ + Computed: true, + Description: "This is the action the policy is configured to perform.", + MarkdownDescription: "This is the action the policy is configured to perform.", + }, + "dirs_deleted": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories deleted by this job.", + MarkdownDescription: "The number of directories deleted by this job.", + }, + "fifos_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of fifos replicated by this job.", + MarkdownDescription: "The number of fifos replicated by this job.", + }, + "total_chunks": schema.Int64Attribute{ + Computed: true, + Description: "The total number of data chunks transmitted by this job.", + MarkdownDescription: "The total number of data chunks transmitted by this job.", + }, + "flipped_lins": schema.Int64Attribute{ + Computed: true, + Description: "The number of LINs flipped by this job.", + MarkdownDescription: "The number of LINs flipped by this job.", + }, + "sockets_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of sockets replicated by this job.", + MarkdownDescription: "The number of sockets replicated by this job.", + }, + "dead_node": schema.BoolAttribute{ + Computed: true, + Description: "This field is true if the node running this job is dead.", + MarkdownDescription: "This field is true if the node running this job is dead.", + }, + "files_selected": schema.Int64Attribute{ + Computed: true, + Description: "The number of files selected by this job.", + MarkdownDescription: "The number of files selected by this job.", + }, + "source_directories_created": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories created on the source.", + MarkdownDescription: "The number of directories created on the source.", + }, + "bytes_transferred": schema.Int64Attribute{ + Computed: true, + Description: "The number of bytes that have been transferred by this job.", + MarkdownDescription: "The number of bytes that have been transferred by this job.", + }, + "succeeded_chunks": schema.Int64Attribute{ + Computed: true, + Description: "The number of data chunks that have been transmitted successfully.", + MarkdownDescription: "The number of data chunks that have been transmitted successfully.", + }, + "retry": schema.Int64Attribute{ + Computed: true, + Description: "The number of times the job has been retried.", + MarkdownDescription: "The number of times the job has been retried.", + }, + "updated_files_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of updated files replicated by this job.", + MarkdownDescription: "The number of updated files replicated by this job.", + }, + "source_directories_linked": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories linked on the source.", + MarkdownDescription: "The number of directories linked on the source.", + }, + "committed_files": schema.Int64Attribute{ + Computed: true, + Description: "The number of WORM committed files.", + MarkdownDescription: "The number of WORM committed files.", + }, + "errors": schema.ListAttribute{ + Computed: true, + Description: "A list of error messages for this job.", + MarkdownDescription: "A list of error messages for this job.", + ElementType: types.StringType, + }, + "target_snapshots": schema.ListAttribute{ + Computed: true, + Description: "The target snapshots created by this job.", + MarkdownDescription: "The target snapshots created by this job.", + ElementType: types.StringType, + }, + "source_directories_visited": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories visited on the source.", + MarkdownDescription: "The number of directories visited on the source.", + }, + "regular_files_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of regular files replicated by this job.", + MarkdownDescription: "The number of regular files replicated by this job.", + }, + "running_chunks": schema.Int64Attribute{ + Computed: true, + Description: "The number of data chunks currently being transmitted.", + MarkdownDescription: "The number of data chunks currently being transmitted.", + }, + "start_time": schema.Int64Attribute{ + Computed: true, + Description: "The time the job started in unix epoch seconds. The field is null if the job hasn't started.", + MarkdownDescription: "The time the job started in unix epoch seconds. The field is null if the job hasn't started.", + }, + "duration": schema.Int64Attribute{ + Computed: true, + Description: "The amount of time in seconds between when the job was started and when it ended. If the job has not yet ended, this is the amount of time since the job started. This field is null if the job has not yet started.", + MarkdownDescription: "The amount of time in seconds between when the job was started and when it ended. If the job has not yet ended, this is the amount of time since the job started. This field is null if the job has not yet started.", + }, + "files_with_ads_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of files with ads replicated by this job.", + MarkdownDescription: "The number of files with ads replicated by this job.", + }, + "files_unlinked": schema.Int64Attribute{ + Computed: true, + Description: "The number of files unlinked by this job.", + MarkdownDescription: "The number of files unlinked by this job.", + }, + "hard_links_replicated": schema.Int64Attribute{ + Computed: true, + Description: "The number of hard links replicated by this job.", + MarkdownDescription: "The number of hard links replicated by this job.", + }, + "num_retransmitted_files": schema.Int64Attribute{ + Computed: true, + Description: "The number of files that have been retransmitted by this job.", + MarkdownDescription: "The number of files that have been retransmitted by this job.", + }, + "target_directories_unlinked": schema.Int64Attribute{ + Computed: true, + Description: "The number of directories unlinked on the target.", + MarkdownDescription: "The number of directories unlinked on the target.", + }, + "target_files_linked": schema.Int64Attribute{ + Computed: true, + Description: "The number of files linked on the target.", + MarkdownDescription: "The number of files linked on the target.", + }, + "state": schema.StringAttribute{ + Computed: true, + Description: "The state of the job.", + MarkdownDescription: "The state of the job.", + }, + "sync_type": schema.StringAttribute{ + Computed: true, + Description: "The type of sync being performed by this job.", + MarkdownDescription: "The type of sync being performed by this job.", + }, + "failed_chunks": schema.Int64Attribute{ + Computed: true, + Description: "Tyhe number of data chunks that failed transmission.", + MarkdownDescription: "Tyhe number of data chunks that failed transmission.", + }, + "policy": schema.SingleNestedAttribute{ + Computed: true, + Description: "The policy associated with this job, or null if there is currently no policy associated with this job (this can happen if the job is newly created and not yet fully populated in the underlying database).", + MarkdownDescription: "The policy associated with this job, or null if there is currently no policy associated with this job (this can happen if the job is newly created and not yet fully populated in the underlying database).", + Attributes: map[string]schema.Attribute{ + "file_matching_pattern": schema.SingleNestedAttribute{ + Computed: true, + Description: "A file matching pattern, organized as an OR'ed set of AND'ed file criteria, for example ((a AND b) OR (x AND y)) used to define a set of files with specific properties. Policies of type 'sync' cannot use 'path' or time criteria in their matching patterns, but policies of type 'copy' can use all listed criteria.", + MarkdownDescription: "A file matching pattern, organized as an OR'ed set of AND'ed file criteria, for example ((a AND b) OR (x AND y)) used to define a set of files with specific properties. Policies of type 'sync' cannot use 'path' or time criteria in their matching patterns, but policies of type 'copy' can use all listed criteria.", + Attributes: map[string]schema.Attribute{ + "or_criteria": schema.ListNestedAttribute{ + Computed: true, + Description: "An array containing objects with \"and_criteria\" properties, each set of and_criteria will be logically OR'ed together to create the full file matching pattern.", + MarkdownDescription: "An array containing objects with \"and_criteria\" properties, each set of and_criteria will be logically OR'ed together to create the full file matching pattern.", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "and_criteria": schema.ListNestedAttribute{ + Computed: true, + Description: "An array containing individual file criterion objects each describing one criterion. These are logically AND'ed together to form a set of criteria.", + MarkdownDescription: "An array containing individual file criterion objects each describing one criterion. These are logically AND'ed together to form a set of criteria.", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "attribute_exists": schema.BoolAttribute{ + Computed: true, + Description: "For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true.", + MarkdownDescription: "For \"custom_attribute\" type criteria. The file will match as long as the attribute named by \"field\" exists. Default is true.", + }, + "case_sensitive": schema.BoolAttribute{ + Computed: true, + Description: "If true, the value comparison will be case sensitive. Default is true.", + MarkdownDescription: "If true, the value comparison will be case sensitive. Default is true.", + }, + "field": schema.StringAttribute{ + Computed: true, + Description: "The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\".", + MarkdownDescription: "The name of the file attribute to match on (only required if this is a custom_attribute type criterion). Default is an empty string \"\".", + }, + "whole_word": schema.BoolAttribute{ + Computed: true, + Description: "If true, the attribute must match the entire word. Default is true.", + MarkdownDescription: "If true, the attribute must match the entire word. Default is true.", + }, + "operator": schema.StringAttribute{ + Computed: true, + Description: "How to compare the specified attribute of each file to the specified value.", + MarkdownDescription: "How to compare the specified attribute of each file to the specified value.", + }, + "type": schema.StringAttribute{ + Computed: true, + Description: "The type of this criterion, that is, which file attribute to match on.", + MarkdownDescription: "The type of this criterion, that is, which file attribute to match on.", + }, + "value": schema.StringAttribute{ + Computed: true, + Description: "The value to compare the specified attribute of each file to.", + MarkdownDescription: "The value to compare the specified attribute of each file to.", + }, + }, + }, + }, + }, + }, + }, + }, + }, + "source_include_directories": schema.ListAttribute{ + Computed: true, + Description: "Directories that will be included in the sync. Modifying this field will result in a full synchronization of all data.", + MarkdownDescription: "Directories that will be included in the sync. Modifying this field will result in a full synchronization of all data.", + ElementType: types.StringType, + }, + "source_root_path": schema.StringAttribute{ + Computed: true, + Description: "The root directory on the source cluster the files will be synced from. Modifying this field will result in a full synchronization of all data.", + MarkdownDescription: "The root directory on the source cluster the files will be synced from. Modifying this field will result in a full synchronization of all data.", + }, + "target_host": schema.StringAttribute{ + Computed: true, + Description: "Hostname or IP address of sync target cluster. Modifying the target cluster host can result in the policy being unrunnable if the new target does not match the current target association.", + MarkdownDescription: "Hostname or IP address of sync target cluster. Modifying the target cluster host can result in the policy being unrunnable if the new target does not match the current target association.", + }, + "action": schema.StringAttribute{ + Computed: true, + Description: "The action to be taken by the job.", + MarkdownDescription: "The action to be taken by the job.", + }, + "target_path": schema.StringAttribute{ + Computed: true, + Description: "Absolute filesystem path on the target cluster for the sync destination.", + MarkdownDescription: "Absolute filesystem path on the target cluster for the sync destination.", + }, + "name": schema.StringAttribute{ + Computed: true, + Description: "User-assigned name of this sync policy.", + MarkdownDescription: "User-assigned name of this sync policy.", + }, + "source_exclude_directories": schema.ListAttribute{ + Computed: true, + Description: "Directories that will be excluded from the sync. Modifying this field will result in a full synchronization of all data.", + MarkdownDescription: "Directories that will be excluded from the sync. Modifying this field will result in a full synchronization of all data.", + ElementType: types.StringType, + }, + }, + }, + }, + }, + }, + }, + Blocks: map[string]schema.Block{ + "filter": schema.SingleNestedBlock{ + Attributes: map[string]schema.Attribute{ + "sort": schema.StringAttribute{ + Optional: true, + Description: "The field that will be used for sorting.", + MarkdownDescription: "The field that will be used for sorting.", + }, + "resume": schema.StringAttribute{ + Optional: true, + Description: "Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).", + MarkdownDescription: "Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).", + }, + "newer_than": schema.Int64Attribute{ + Optional: true, + Description: "Filter the returned reports to include only those whose jobs started more recently than the specified number of days ago.", + MarkdownDescription: "Filter the returned reports to include only those whose jobs started more recently than the specified number of days ago.", + }, + "policy_name": schema.StringAttribute{ + Optional: true, + Description: "Filter the returned reports to include only those with this policy name.", + MarkdownDescription: "Filter the returned reports to include only those with this policy name.", + }, + "state": schema.StringAttribute{ + Optional: true, + Description: "Filter the returned reports to include only those whose jobs are in this state.", + MarkdownDescription: "Filter the returned reports to include only those whose jobs are in this state.", + }, + "limit": schema.Int64Attribute{ + Optional: true, + Description: "Return no more than this many results at once (see resume).", + MarkdownDescription: "Return no more than this many results at once (see resume).", + }, + "reports_per_policy": schema.Int64Attribute{ + Optional: true, + Description: "If specified, only the N most recent reports will be returned per policy. If no other query args are present this argument defaults to 1. ", + MarkdownDescription: "If specified, only the N most recent reports will be returned per policy. If no other query args are present this argument defaults to 1. ", + }, + "summary": schema.BoolAttribute{ + Optional: true, + Description: "Return a summary rather than entire objects", + MarkdownDescription: "Return a summary rather than entire objects", + }, + "dir": schema.StringAttribute{ + Optional: true, + Description: "The direction of the sort.", + MarkdownDescription: "The direction of the sort.", + }, + }, + }, + }, + } +} + +// Configure configures the data source. +func (d *ReplicationReportDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + pscaleClient, ok := req.ProviderData.(*client.Client) + + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *http.Client, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + + return + } + + d.client = pscaleClient +} + +// Read reads data from the data source. +func (d *ReplicationReportDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + tflog.Info(ctx, "Reading replication report data source") + + var state models.ReplicationReportsDatasourceModel + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return + } + + replicationReportList, err := helper.GetReplicationReports(ctx, d.client, state) + + if err != nil { + errStr := constants.ReadReplicationReportsErrorMsg + "with error: " + message := helper.GetErrorString(err, errStr) + resp.Diagnostics.AddError( + "Error getting the list of replication reports", + message, + ) + return + } + var rr []models.ReplicationReportsDetail + for _, rrItem := range replicationReportList.Reports { + entity := models.ReplicationReportsDetail{} + err := helper.CopyFields(ctx, rrItem, &entity) + if err != nil { + resp.Diagnostics.AddError("Error reading replication report datasource plan", + fmt.Sprintf("Could not list replication report with error: %s", err.Error())) + return + } + rr = append(rr, entity) + } + state.Reports = rr + state.ID = types.StringValue("replication_report_datasource") + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + tflog.Info(ctx, "Done with reading replication report data source ") +} diff --git a/powerscale/provider/replication_reports_datasource_test.go b/powerscale/provider/replication_reports_datasource_test.go new file mode 100644 index 00000000..505a7222 --- /dev/null +++ b/powerscale/provider/replication_reports_datasource_test.go @@ -0,0 +1,119 @@ +/* +Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved. + +Licensed under the Mozilla Public License Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://mozilla.org/MPL/2.0/ + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "fmt" + "github.com/bytedance/mockey" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "regexp" + "terraform-provider-powerscale/powerscale/helper" + "testing" +) + +func TestAccReplicationReportsDataSourceAll(t *testing.T) { + var rrTerraformName = "data.powerscale_replication_report.all" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // read all + { + Config: ProviderConfig + RRDataSourceConfig, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet(rrTerraformName, "replication_reports.#"), + ), + }, + }, + }) +} + +func TestAccReplicationReportsDataSourceFilter(t *testing.T) { + var rrTerraformName = "data.powerscale_replication_report.filtering" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + // read all + { + Config: ProviderConfig + RRDataSourceConfigFilter, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet(rrTerraformName, "replication_reports.#"), + ), + }, + }, + }) +} + +func TestAccReplicationReportsDataSourceFilterErr(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: ProviderConfig + RRDataSourceFilterConfigErr, + ExpectError: regexp.MustCompile(`.*Unsupported argument*.`), + }, + }, + }) +} + +func TestAccReplicationReportsDataSourceGettingErr(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + PreConfig: func() { + FunctionMocker = mockey.Mock(helper.GetReplicationReports).Return(nil, fmt.Errorf("mock error")).Build() + }, + Config: ProviderConfig + RRDataSourceConfig, + ExpectError: regexp.MustCompile(`.*mock error*.`), + }, + }, + }) +} + +var RRDataSourceConfig = ` +data "powerscale_replication_report" "all" { +} +` + +var RRDataSourceConfigFilter = ` +data "powerscale_replication_report" "filtering" { + filter { + reports_per_policy = 2 + } +} +` + +var RRDataSourceNameConfigErr = ` +data "powerscale_replication_report" "test" { + filter { + policy_name = "InvalidName" + } +} +` + +var RRDataSourceFilterConfigErr = ` +data "powerscale_role" "test" { + filter { + invalidFilter = "Invalid" + } +} +`