From 97ad90fb150a1ff118284adc01bd7d8053f80927 Mon Sep 17 00:00:00 2001 From: Olivier Bierlaire Date: Thu, 31 Aug 2023 17:31:27 +0200 Subject: [PATCH] Reorg : tf resource mapping as yaml file (#80) --- cmd/plan.go | 14 +- cmd/root.go | 15 - cmd/root_test.go | 2 +- go.mod | 18 +- go.sum | 36 +- internal/data/data.go | 11 +- .../coefficients/EmissionsPerRegion.go | 27 +- .../estimate/coefficients/coefficients.go | 8 +- internal/estimate/estimate.go | 10 + internal/estimate/estimate/CPU.go | 8 +- internal/estimate/estimate/Resource.go | 2 +- internal/estimate/estimate/gpu.go | 1 + internal/estimate/estimate_test.go | 27 +- internal/estimate/estimation/estimation.go | 5 + internal/output/json.go | 3 +- internal/output/text.go | 5 +- internal/plan/aws/Data.go | 79 --- internal/plan/aws/Data_test.go | 93 ---- internal/plan/aws/EBS.go | 128 ----- internal/plan/aws/EC2Instance.go | 101 ---- internal/plan/aws/ResourceIdentification.go | 32 -- internal/plan/aws/default_test.go | 245 --------- internal/plan/aws/resources.go | 32 -- internal/plan/aws/resources_test.go | 279 ----------- internal/plan/{aws/default.go => defaults.go} | 29 +- internal/plan/disk_type.go | 17 + internal/plan/disk_type_enum.go | 73 +++ internal/plan/expression.go | 39 ++ internal/plan/gcp/ComputeDiskResource.go | 116 ----- internal/plan/gcp/ComputeResource.go | 91 ---- .../plan/gcp/ComputeResourceFromTemplate.go | 72 --- internal/plan/gcp/GCPData.go | 53 -- internal/plan/gcp/GCPData_test.go | 58 --- .../plan/gcp/ManagedInstanceGroupResource.go | 129 ----- internal/plan/gcp/ResourceIdentification.go | 41 -- internal/plan/gcp/SQLInstanceResource.go | 63 --- internal/plan/gcp/resources.go | 80 --- internal/plan/gcp/resources_test.go | 204 -------- internal/plan/json_getters.go | 410 ++++++++++++++++ internal/plan/mapping.go | 98 ++++ internal/plan/mappingStructs.go | 50 ++ internal/plan/mappings/aws/ec2_ebs.yaml | 42 ++ internal/plan/mappings/aws/ec2_instance.yaml | 75 +++ internal/plan/mappings/aws/general.yaml | 16 + internal/plan/mappings/gcp/compute.yaml | 173 +++++++ internal/plan/mappings/gcp/compute_group.yaml | 100 ++++ internal/plan/mappings/gcp/disk.yaml | 47 ++ internal/plan/mappings/gcp/general.yaml | 12 + internal/plan/mappings/gcp/sql_database.yaml | 45 ++ internal/plan/resolver.go | 124 +++++ internal/plan/resources.go | 435 +++++++++++++--- internal/plan/resources_aws_test.go | 17 +- internal/plan/resources_gcp_test.go | 463 ++++++++---------- internal/providers/GPUWatt.go | 4 +- internal/providers/aws/AWS.go | 3 + internal/providers/gcp/GCP.go | 16 +- internal/providers/gcp/GCP_test.go | 28 +- internal/providers/provider.go | 1 + internal/resources/compute.go | 11 + internal/resources/data.go | 6 + internal/terraform/errors.go | 2 + internal/terraform/terraform.go | 153 +++--- internal/terraform/terraform_test.go | 14 +- internal/terraform/tfrefs/terraformRefs.go | 13 - internal/testutils/json.go | 21 + internal/testutils/testutils.go | 2 + internal/tools/aws/instances/generate.go | 48 +- internal/tools/gcp/gcp.go | 5 +- internal/tools/gcp/instances/generate.go | 14 +- internal/tools/gcp/tiers/generate.go | 26 +- internal/utils/config.go | 15 +- internal/utils/conversions.go | 39 ++ internal/utils/conversions_test.go | 58 +++ internal/utils/defaults.yaml | 9 - internal/utils/jsonQuery.go | 36 ++ internal/utils/jsonQuery_test.go | 100 ++++ pkg/estimate/estimate.go | 3 + pkg/estimate/estimate_test.go | 5 +- pkg/resources/ressource.go | 7 +- pkg/resources/ressource_test.go | 3 +- test/data/gcp_instances.json | 13 + test/terraform/gcp_1/main.tf | 2 +- test/terraform/gcp_calling_module/provider.tf | 1 - test/terraform/noResources/provider.tf | 9 + test/terraform/planRaw/plan.tfplan | Bin 3740 -> 3740 bytes 85 files changed, 2484 insertions(+), 2536 deletions(-) delete mode 100644 internal/plan/aws/Data.go delete mode 100644 internal/plan/aws/Data_test.go delete mode 100644 internal/plan/aws/EBS.go delete mode 100644 internal/plan/aws/EC2Instance.go delete mode 100644 internal/plan/aws/ResourceIdentification.go delete mode 100644 internal/plan/aws/default_test.go delete mode 100644 internal/plan/aws/resources.go delete mode 100644 internal/plan/aws/resources_test.go rename internal/plan/{aws/default.go => defaults.go} (52%) create mode 100644 internal/plan/disk_type.go create mode 100644 internal/plan/disk_type_enum.go create mode 100644 internal/plan/expression.go delete mode 100644 internal/plan/gcp/ComputeDiskResource.go delete mode 100644 internal/plan/gcp/ComputeResource.go delete mode 100644 internal/plan/gcp/ComputeResourceFromTemplate.go delete mode 100644 internal/plan/gcp/GCPData.go delete mode 100644 internal/plan/gcp/GCPData_test.go delete mode 100644 internal/plan/gcp/ManagedInstanceGroupResource.go delete mode 100644 internal/plan/gcp/ResourceIdentification.go delete mode 100644 internal/plan/gcp/SQLInstanceResource.go delete mode 100644 internal/plan/gcp/resources.go delete mode 100644 internal/plan/gcp/resources_test.go create mode 100644 internal/plan/json_getters.go create mode 100644 internal/plan/mapping.go create mode 100644 internal/plan/mappingStructs.go create mode 100644 internal/plan/mappings/aws/ec2_ebs.yaml create mode 100644 internal/plan/mappings/aws/ec2_instance.yaml create mode 100644 internal/plan/mappings/aws/general.yaml create mode 100644 internal/plan/mappings/gcp/compute.yaml create mode 100644 internal/plan/mappings/gcp/compute_group.yaml create mode 100644 internal/plan/mappings/gcp/disk.yaml create mode 100644 internal/plan/mappings/gcp/general.yaml create mode 100644 internal/plan/mappings/gcp/sql_database.yaml create mode 100644 internal/plan/resolver.go delete mode 100644 internal/terraform/tfrefs/terraformRefs.go create mode 100644 internal/testutils/json.go create mode 100644 internal/utils/conversions.go create mode 100644 internal/utils/conversions_test.go create mode 100644 internal/utils/jsonQuery.go create mode 100644 internal/utils/jsonQuery_test.go create mode 100644 test/terraform/noResources/provider.tf diff --git a/cmd/plan.go b/cmd/plan.go index 060de7e..e97bab1 100644 --- a/cmd/plan.go +++ b/cmd/plan.go @@ -1,6 +1,3 @@ -/* -Copyright © 2023 Carbonifer contact@carbonifer.io -*/ package cmd import ( @@ -8,6 +5,7 @@ import ( "os" "path/filepath" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/carboniferio/carbonifer/internal/estimate" @@ -18,7 +16,7 @@ import ( "github.com/spf13/viper" ) -var test_planCmdHasRun = false +var testPlanCmdHasRun = false // planCmd represents the plan command var planCmd = &cobra.Command{ @@ -38,7 +36,7 @@ Example usages: carbonifer plan /path/to/terraform/plan.tfplan`, Args: cobra.MaximumNArgs(1), Run: func(cmd *cobra.Command, args []string) { - test_planCmdHasRun = true + testPlanCmdHasRun = true log.Debug("Running command 'plan'") workdir, err := os.Getwd() @@ -63,7 +61,8 @@ Example usages: // Read resources from terraform plan resources, err := plan.GetResources(tfPlan) if err != nil { - log.Fatal(err) + errW := errors.Wrap(err, "Failed to get resources from terraform plan") + log.Panic(errW) } // Estimate CO2 emissions @@ -72,7 +71,7 @@ Example usages: // Generate report reportText := "" if viper.Get("out.format") == "json" { - reportText = output.GenerateReportJson(estimations) + reportText = output.GenerateReportJSON(estimations) } else { reportText = output.GenerateReportText(estimations) } @@ -81,6 +80,7 @@ Example usages: outFile := viper.Get("out.file").(string) if outFile == "" { log.Debug("output : stdout") + cmd.SetOut(os.Stdout) cmd.Println(reportText) } else { log.Debug("output :", outFile) diff --git a/cmd/root.go b/cmd/root.go index 2d8e988..5161793 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,18 +1,3 @@ -/* -Copyright © 2023 contact@carbonifer.io - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ package cmd import ( diff --git a/cmd/root_test.go b/cmd/root_test.go index d3dbc39..d761e40 100644 --- a/cmd/root_test.go +++ b/cmd/root_test.go @@ -32,6 +32,6 @@ func TestRootPlan(t *testing.T) { log.Debug(err) } - assert.True(t, test_planCmdHasRun) + assert.True(t, testPlanCmdHasRun) } diff --git a/go.mod b/go.mod index 81f45b1..8292507 100644 --- a/go.mod +++ b/go.mod @@ -3,19 +3,22 @@ module github.com/carboniferio/carbonifer go 1.20 require ( - github.com/forestgiant/sliceutil v0.0.0-20160425183142-94783f95db6c github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/hc-install v0.5.2 github.com/hashicorp/terraform-exec v0.18.1 github.com/hashicorp/terraform-json v0.17.0 github.com/heirko/go-contrib v0.0.0-20200825160048-11fc5e2235fa + github.com/itchyny/gojq v0.12.13 github.com/olekukonko/tablewriter v0.0.5 + github.com/pkg/errors v0.9.1 + github.com/polkeli/yaml/v3 v3.1.0 github.com/shopspring/decimal v1.3.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/spf13/viper v1.16.0 github.com/stretchr/testify v1.8.4 github.com/yunabe/easycsv v0.0.2 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 golang.org/x/oauth2 v0.8.0 google.golang.org/api v0.126.0 gopkg.in/yaml.v3 v3.0.1 @@ -27,11 +30,12 @@ require ( github.com/cloudflare/circl v1.3.3 // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/imdario/mergo v0.3.16 // indirect + github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - golang.org/x/mod v0.10.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/sync v0.3.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) @@ -65,10 +69,10 @@ require ( github.com/subosito/gotenv v1.4.2 // indirect github.com/zclconf/go-cty v1.13.2 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.9.0 // indirect - golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/grpc v1.55.0 // indirect google.golang.org/protobuf v1.30.0 // indirect diff --git a/go.sum b/go.sum index 078fde3..38e86bf 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/forestgiant/sliceutil v0.0.0-20160425183142-94783f95db6c h1:pBgVXWDXju1m8W4lnEeIqTHPOzhTUO81a7yknM/xQR4= -github.com/forestgiant/sliceutil v0.0.0-20160425183142-94783f95db6c/go.mod h1:pFdJbAhRf7rh6YYMUdIQGyzne6zYL1tCUW8QV2B3UfY= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= @@ -193,6 +191,10 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/itchyny/gojq v0.12.13 h1:IxyYlHYIlspQHHTE0f3cJF0NKDMfajxViuhBLnHd/QU= +github.com/itchyny/gojq v0.12.13/go.mod h1:JzwzAqenfhrPUuwbmEz3nu3JQmFLlQTQMUcOdnu/Sf4= +github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= +github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -204,7 +206,6 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -221,10 +222,13 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polkeli/yaml/v3 v3.1.0 h1:AkU/1dA3M0ase43el+dr+8j/PYFpHymiP813eyp7ezk= +github.com/polkeli/yaml/v3 v3.1.0/go.mod h1:o7HkxPZK2OzdZjh2bTR/NfLQ0jUF53DeONWl0oM983Q= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= @@ -296,8 +300,8 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -308,6 +312,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -333,8 +339,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -373,8 +379,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -398,7 +404,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -443,8 +450,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -463,8 +470,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -629,7 +636,6 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/internal/data/data.go b/internal/data/data.go index 4545691..3d8d1a9 100644 --- a/internal/data/data.go +++ b/internal/data/data.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/spf13/viper" @@ -14,6 +15,7 @@ import ( //go:embed data/* var data embed.FS +// ReadDataFile reads a file from the data directory func ReadDataFile(filename string) []byte { dataPath := viper.GetString("data.path") if dataPath != "" { @@ -26,20 +28,19 @@ func ReadDataFile(filename string) []byte { log.Fatal(err) } return data - } else { - return readEmbeddedFile(filename) } - } else { - // Otherwise, read from the embedded file return readEmbeddedFile(filename) + } + return readEmbeddedFile(filename) } func readEmbeddedFile(filename string) []byte { log.Debugf(" reading datafile '%v' embedded", filename) data, err := fs.ReadFile(data, "data/"+filename) if err != nil { - log.Fatal(err) + errW := errors.Wrap(err, "cannot read embedded data file") + log.Fatal(errW) } return data } diff --git a/internal/estimate/coefficients/EmissionsPerRegion.go b/internal/estimate/coefficients/EmissionsPerRegion.go index 5960a0a..f493400 100644 --- a/internal/estimate/coefficients/EmissionsPerRegion.go +++ b/internal/estimate/coefficients/EmissionsPerRegion.go @@ -1,10 +1,10 @@ package coefficients import ( - "errors" - "fmt" "strings" + "github.com/pkg/errors" + "github.com/carboniferio/carbonifer/internal/data" "github.com/carboniferio/carbonifer/internal/providers" "github.com/shopspring/decimal" @@ -13,8 +13,17 @@ import ( "github.com/yunabe/easycsv" ) +// EmissionsPerRegion is a map of regions to their emissions var EmissionsPerRegion map[string]Emissions +// Emissions is the emissions of a region +type Emissions struct { + Region string + Location string + GridCarbonIntensity decimal.Decimal +} + +// RegionEmission returns the emissions of a region func RegionEmission(provider providers.Provider, region string) (*Emissions, error) { var dataFile string switch provider { @@ -33,29 +42,23 @@ func RegionEmission(provider providers.Provider, region string) (*Emissions, err } emissions, ok := EmissionsPerRegion[region] if !ok { - return nil, errors.New(fmt.Sprint("Region does not exist: ", region)) + return nil, errors.Errorf("Region does not exist: '%v'", region) } return &emissions, nil } -type EmissionsCSV struct { +type emissionsCSV struct { Region string `name:"Region"` Location string `name:"Location"` GridCarbonIntensity float64 `name:"Grid carbon intensity (gCO2eq / kWh)"` } -type Emissions struct { - Region string - Location string - GridCarbonIntensity decimal.Decimal -} - // Source: Google func loadEmissionsPerRegion(dataFile string) map[string]Emissions { // Read the CSV records - var records []EmissionsCSV + var records []emissionsCSV regionEmissionFile := data.ReadDataFile(dataFile) - log.Debugf("reading GCP region/grid emissions from: %v", regionEmissionFile) + log.Debugf("reading GCP region/grid emissions from: %v", dataFile) if err := easycsv.NewReader(strings.NewReader(string(regionEmissionFile))).ReadAll(&records); err != nil { log.Fatal(err) } diff --git a/internal/estimate/coefficients/coefficients.go b/internal/estimate/coefficients/coefficients.go index 780ff60..6fe6df4 100644 --- a/internal/estimate/coefficients/coefficients.go +++ b/internal/estimate/coefficients/coefficients.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" ) +// Coefficients is a struct that contains the coefficients for the energy estimation type Coefficients struct { CPUMinWh decimal.Decimal `json:"cpu_min_wh"` CPUMaxWh decimal.Decimal `json:"cpu_max_wh"` @@ -20,6 +21,7 @@ type Coefficients struct { PueAverage decimal.Decimal `json:"pue_average"` } +// CoefficientsProviders is a struct that contains the coefficients for the energy estimation per provider type CoefficientsProviders struct { AWS Coefficients `json:"AWS"` GCP Coefficients `json:"GCP"` @@ -28,6 +30,7 @@ type CoefficientsProviders struct { var coefficientsPerProviders *CoefficientsProviders +// GetEnergyCoefficients returns the coefficients for the energy estimation func GetEnergyCoefficients() *CoefficientsProviders { if coefficientsPerProviders == nil { energyCoefFile := data.ReadDataFile("energy_coefficients.json") @@ -39,11 +42,12 @@ func GetEnergyCoefficients() *CoefficientsProviders { return coefficientsPerProviders } +// GetByProvider returns the coefficients for the energy estimation of a provider func (cps *CoefficientsProviders) GetByProvider(provider providers.Provider) Coefficients { - return coefficientsPerProviders.GetByProviderName(provider.String()) + return coefficientsPerProviders.getByProviderName(provider.String()) } -func (cps *CoefficientsProviders) GetByProviderName(name string) Coefficients { +func (cps *CoefficientsProviders) getByProviderName(name string) Coefficients { r := reflect.ValueOf(cps) coefficients := reflect.Indirect(r).FieldByName(name) return coefficients.Interface().(Coefficients) diff --git a/internal/estimate/estimate.go b/internal/estimate/estimate.go index 2fe2ab8..dcbba0e 100644 --- a/internal/estimate/estimate.go +++ b/internal/estimate/estimate.go @@ -2,6 +2,7 @@ package estimate import ( "fmt" + "sort" "time" "github.com/carboniferio/carbonifer/internal/estimate/estimate" @@ -14,6 +15,7 @@ import ( "github.com/spf13/viper" ) +// EstimateResources estimates the power and carbon emissions of a list of resources func EstimateResources(resourceList map[string]resources.Resource) estimation.EstimationReport { var estimationResources []estimation.EstimationResource @@ -64,6 +66,14 @@ func EstimateResources(resourceList map[string]resources.Resource) estimation.Es } +// SortEstimations sorts a list of estimation resources by resource address +func SortEstimations(resources *[]estimation.EstimationResource) { + sort.Slice(*resources, func(i, j int) bool { + return (*resources)[i].Resource.GetAddress() < (*resources)[j].Resource.GetAddress() + }) +} + +// EstimateResource estimates the power and carbon emissions of a resource func EstimateResource(resource resources.Resource) (*estimation.EstimationResource, *providers.UnsupportedProviderError) { if !resource.IsSupported() { return estimateNotSupported(resource.(resources.UnsupportedResource)), nil diff --git a/internal/estimate/estimate/CPU.go b/internal/estimate/estimate/CPU.go index c94cf88..cebe912 100644 --- a/internal/estimate/estimate/CPU.go +++ b/internal/estimate/estimate/CPU.go @@ -19,10 +19,10 @@ func estimateWattCPU(resource *resources.ComputeResource) decimal.Decimal { var avgWatts decimal.Decimal // Average Watts = Min Watts + Avg vCPU Utilization * (Max Watts - Min Watts) - cpu_platform := resource.Specs.CPUType - if cpu_platform != "" && resource.Identification.Provider == providers.GCP { - cpu_platform := gcp.GetCPUWatt(strings.ToLower(cpu_platform)) - avgWatts = cpu_platform.MinWatts.Add(averageCPUUse.Mul(cpu_platform.MaxWatts.Sub(cpu_platform.MinWatts))) + cpuPlatform := resource.Specs.CPUType + if cpuPlatform != "" && resource.Identification.Provider == providers.GCP { + cpuPlatform := gcp.GetCPUWatt(strings.ToLower(cpuPlatform)) + avgWatts = cpuPlatform.MinWatts.Add(averageCPUUse.Mul(cpuPlatform.MaxWatts.Sub(cpuPlatform.MinWatts))) } else { minWH := coefficients.GetEnergyCoefficients().GetByProvider(provider).CPUMinWh maxWh := coefficients.GetEnergyCoefficients().GetByProvider(provider).CPUMaxWh diff --git a/internal/estimate/estimate/Resource.go b/internal/estimate/estimate/Resource.go index cc1bb3f..473e4a4 100644 --- a/internal/estimate/estimate/Resource.go +++ b/internal/estimate/estimate/Resource.go @@ -10,7 +10,7 @@ import ( "github.com/spf13/viper" ) -// Get the carbon emissions of a GCP resource +// EstimateSupportedResource gets the carbon emissions of a GCP resource func EstimateSupportedResource(resource resources.Resource) *estimation.EstimationResource { var computeResource resources.ComputeResource = resource.(resources.ComputeResource) diff --git a/internal/estimate/estimate/gpu.go b/internal/estimate/estimate/gpu.go index db4ed76..1dcfb2f 100644 --- a/internal/estimate/estimate/gpu.go +++ b/internal/estimate/estimate/gpu.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/viper" ) +// EstimateWattGPU estimates the power consumption of a GPU resource func EstimateWattGPU(resource *resources.ComputeResource) decimal.Decimal { // Get average GPU usage provider := strings.ToLower(resource.Identification.Provider.String()) diff --git a/internal/estimate/estimate_test.go b/internal/estimate/estimate_test.go index d35c1dd..9bc4cf6 100644 --- a/internal/estimate/estimate_test.go +++ b/internal/estimate/estimate_test.go @@ -8,7 +8,6 @@ import ( "github.com/carboniferio/carbonifer/internal/providers" "github.com/carboniferio/carbonifer/internal/resources" _ "github.com/carboniferio/carbonifer/internal/testutils" - "github.com/carboniferio/carbonifer/internal/utils" "github.com/shopspring/decimal" "github.com/spf13/viper" "github.com/stretchr/testify/assert" @@ -74,7 +73,7 @@ var resourceGCPInstanceGroup = resources.ComputeResource{ } func TestEstimateResource(t *testing.T) { - avg_cpu_use := viper.GetFloat64("provider.gcp.avg_cpu_use") + avgCPUUse := viper.GetFloat64("provider.gcp.avg_cpu_use") type args struct { resource resources.ComputeResource } @@ -90,7 +89,7 @@ func TestEstimateResource(t *testing.T) { Resource: &resourceGCPComputeBasic, Power: decimal.NewFromFloat(7.600784000).RoundFloor(10), CarbonEmissions: decimal.NewFromFloat(0.448446256).RoundFloor(10), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(1), }, }, @@ -101,7 +100,7 @@ func TestEstimateResource(t *testing.T) { Resource: &resourceGCPComputeCPUType, Power: decimal.NewFromFloat(9.5565660741), CarbonEmissions: decimal.NewFromFloat(0.5638373983), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(1), }, }, @@ -112,7 +111,7 @@ func TestEstimateResource(t *testing.T) { Resource: &resourceGCPInstanceGroup, Power: decimal.NewFromFloat(7.600784000).RoundFloor(10), CarbonEmissions: decimal.NewFromFloat(0.448446256).RoundFloor(10), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(3), }, }, @@ -126,7 +125,7 @@ func TestEstimateResource(t *testing.T) { } func TestEstimateResourceKilo(t *testing.T) { - avg_cpu_use := viper.GetFloat64("provider.gcp.avg_cpu_use") + avgCPUUse := viper.GetFloat64("provider.gcp.avg_cpu_use") viper.Set("unit.carbon", "kg") viper.Set("unit.time", "m") type args struct { @@ -144,7 +143,7 @@ func TestEstimateResourceKilo(t *testing.T) { Resource: &resourceGCPComputeBasic, Power: decimal.NewFromFloat(5472.56448).RoundFloor(10), CarbonEmissions: decimal.NewFromFloat(232.4745391104).RoundFloor(10), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(1), }, }, @@ -155,7 +154,7 @@ func TestEstimateResourceKilo(t *testing.T) { Resource: &resourceGCPComputeCPUType, Power: decimal.NewFromFloat(6880.7275733647).RoundFloor(10), CarbonEmissions: decimal.NewFromFloat(292.2933073165).RoundFloor(10), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(1), }, }, @@ -211,7 +210,7 @@ func EqualsTotal(t *testing.T, expected *estimation.EstimationTotal, actual *est } func TestEstimateResources(t *testing.T) { - avg_cpu_use := viper.GetFloat64("provider.gcp.avg_cpu_use") + avgCPUUse := viper.GetFloat64("provider.gcp.avg_cpu_use") viper.Set("unit.carbon", "g") viper.Set("unit.time", "h") type args struct { @@ -223,25 +222,25 @@ func TestEstimateResources(t *testing.T) { Resource: &resourceGCPComputeBasic, Power: decimal.NewFromFloat(7.600784).Round(10), CarbonEmissions: decimal.NewFromFloat(0.448446256).Round(10), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(1), }, { Resource: &resourceGCPComputeCPUType, Power: decimal.NewFromFloat(9.5565660741), CarbonEmissions: decimal.NewFromFloat(0.5638373983), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(1), }, { Resource: &resourceGCPInstanceGroup, Power: decimal.NewFromFloat(7.600784).Round(10), CarbonEmissions: decimal.NewFromFloat(0.448446256).Round(10), - AverageCPUUsage: decimal.NewFromFloat(avg_cpu_use), + AverageCPUUsage: decimal.NewFromFloat(avgCPUUse), Count: decimal.NewFromInt(3), }, } - utils.SortEstimations(&expectedResources) + SortEstimations(&expectedResources) tests := []struct { name string @@ -278,7 +277,7 @@ func TestEstimateResources(t *testing.T) { assert.Equal(t, got.Info.UnitCarbonEmissionsTime, tt.want.Info.UnitCarbonEmissionsTime) assert.Equal(t, got.Info.UnitTime, tt.want.Info.UnitTime) assert.Equal(t, got.Info.UnitWattTime, tt.want.Info.UnitWattTime) - utils.SortEstimations(&got.Resources) + SortEstimations(&got.Resources) for i, gotResource := range got.Resources { wantResource := tt.want.Resources[i] EqualsEstimationResource(t, &wantResource, &gotResource) diff --git a/internal/estimate/estimation/estimation.go b/internal/estimate/estimation/estimation.go index abf16af..af79d7a 100644 --- a/internal/estimate/estimation/estimation.go +++ b/internal/estimate/estimation/estimation.go @@ -8,6 +8,7 @@ import ( "github.com/shopspring/decimal" ) +// EstimationReport is the struct that contains the estimation report type EstimationReport struct { Info EstimationInfo Resources []EstimationResource @@ -15,6 +16,7 @@ type EstimationReport struct { Total EstimationTotal } +// EstimationResource is the struct that contains the estimation of a resource type EstimationResource struct { Resource resources.Resource Power decimal.Decimal `json:"PowerPerInstance"` @@ -23,12 +25,14 @@ type EstimationResource struct { Count decimal.Decimal } +// EstimationTotal is the struct that contains the total estimation type EstimationTotal struct { Power decimal.Decimal CarbonEmissions decimal.Decimal ResourcesCount decimal.Decimal } +// EstimationInfo is the struct that contains the info of the estimation type EstimationInfo struct { UnitTime string UnitWattTime string @@ -37,6 +41,7 @@ type EstimationInfo struct { InfoByProvider map[providers.Provider]InfoByProvider } +// InfoByProvider is the struct that contains the info of the estimation by provider type InfoByProvider struct { AverageCPUUsage float64 AverageGPUUsage float64 diff --git a/internal/output/json.go b/internal/output/json.go index 3c76af8..e3de8b1 100644 --- a/internal/output/json.go +++ b/internal/output/json.go @@ -7,7 +7,8 @@ import ( log "github.com/sirupsen/logrus" ) -func GenerateReportJson(estimations estimation.EstimationReport) string { +// GenerateReportJSON generates a JSON report from an estimation report +func GenerateReportJSON(estimations estimation.EstimationReport) string { log.Debug("Generating JSON report") reportTextBytes, err := json.MarshalIndent(estimations, "", " ") diff --git a/internal/output/text.go b/internal/output/text.go index dcd428b..fe8e7ca 100644 --- a/internal/output/text.go +++ b/internal/output/text.go @@ -4,12 +4,13 @@ import ( "fmt" "strings" + "github.com/carboniferio/carbonifer/internal/estimate" "github.com/carboniferio/carbonifer/internal/estimate/estimation" - "github.com/carboniferio/carbonifer/internal/utils" "github.com/olekukonko/tablewriter" log "github.com/sirupsen/logrus" ) +// GenerateReportText generates a text report from an estimation report func GenerateReportText(report estimation.EstimationReport) string { log.Debug("Generating text report") tableString := &strings.Builder{} @@ -20,7 +21,7 @@ func GenerateReportText(report estimation.EstimationReport) string { // Default sort estimations := report.Resources - utils.SortEstimations(&estimations) + estimate.SortEstimations(&estimations) for _, resource := range report.Resources { table.Append([]string{ diff --git a/internal/plan/aws/Data.go b/internal/plan/aws/Data.go deleted file mode 100644 index 8a7d4a7..0000000 --- a/internal/plan/aws/Data.go +++ /dev/null @@ -1,79 +0,0 @@ -package aws - -import ( - "strconv" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" -) - -func GetDataResource(tfResource tfjson.StateResource) resources.DataResource { - resourceId := getDataResourceIdentification(tfResource) - if resourceId.ResourceType == "aws_ami" { - diskMappingI := tfResource.AttributeValues["block_device_mappings"] - if diskMappingI != nil { - diskMapping := diskMappingI.([]interface{}) - specs := make([]*resources.DataImageSpecs, len(diskMapping)) - for i, disk := range diskMapping { - ebs := disk.(map[string]interface{})["ebs"].(map[string]interface{}) - if ebs["volume_size"] != nil { - diskSizeGb, _ := strconv.ParseFloat(ebs["volume_size"].(string), 64) - volumeType := "" - if ebs["volume_type"] != nil { - volumeType = ebs["volume_type"].(string) - } - diskSpecs := resources.DataImageSpecs{ - DiskSizeGb: diskSizeGb, - DeviceName: disk.(map[string]interface{})["device_name"].(string), - VolumeType: volumeType, - } - specs[i] = &diskSpecs - } - } - return resources.EbsDataResource{ - Identification: resourceId, - DataImageSpecs: specs, - AwsId: tfResource.AttributeValues["id"].(string), - } - } - } - if resourceId.ResourceType == "aws_ebs_snapshot" { - diskSize := tfResource.AttributeValues["volume_size"] - diskSizeGb := diskSize.(float64) - return resources.EbsDataResource{ - Identification: resourceId, - DataImageSpecs: []*resources.DataImageSpecs{ - { - DiskSizeGb: diskSizeGb, - }, - }, - AwsId: tfResource.AttributeValues["id"].(string), - } - } - - return resources.DataImageResource{ - Identification: resourceId, - } -} - -func getDataResourceIdentification(resource tfjson.StateResource) *resources.ResourceIdentification { - - return &resources.ResourceIdentification{ - Name: resource.Name, - ResourceType: resource.Type, - Provider: providers.AWS, - } -} - -func getAwsImage(tfRefs *tfrefs.References, awsImageId string) *resources.EbsDataResource { - imageI := tfRefs.DataResources[awsImageId] - - var image *resources.EbsDataResource - if imageI != nil { - i := imageI.(resources.EbsDataResource) - image = &i - } - return image -} diff --git a/internal/plan/aws/Data_test.go b/internal/plan/aws/Data_test.go deleted file mode 100644 index 5fa1ca1..0000000 --- a/internal/plan/aws/Data_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - tfjson "github.com/hashicorp/terraform-json" - "github.com/stretchr/testify/assert" -) - -func TestGetDataResource(t *testing.T) { - type args struct { - tfResource tfjson.StateResource - } - tests := []struct { - name string - args args - want resources.DataResource - }{ - { - name: "AMI with ebs 20 Gb", - args: args{ - tfResource: tfjson.StateResource{ - Address: "data.aws_ami.foo", - Type: "aws_ami", - Name: "foo", - AttributeValues: map[string]interface{}{ - "name": "foo", - "block_device_mappings": []interface{}{ - map[string]interface{}{ - "device_name": "/dev/sda1", - "ebs": map[string]interface{}{ - "volume_size": "20", - "volume_type": "gp2", - }, - }, - }, - "id": "ami-1234567890", - }, - }, - }, - want: resources.EbsDataResource{ - Identification: &resources.ResourceIdentification{ - Name: "foo", - ResourceType: "aws_ami", - Provider: providers.AWS, - }, - DataImageSpecs: []*resources.DataImageSpecs{ - { - DiskSizeGb: 20, - DeviceName: "/dev/sda1", - VolumeType: "gp2", - }, - }, - AwsId: "ami-1234567890", - }, - }, - { - name: "Snapshot of size 60 Gb", - args: args{ - tfResource: tfjson.StateResource{ - Address: "data.aws_ebs_snapshot.test_snapshot", - Type: "aws_ebs_snapshot", - Name: "test_snapshot", - AttributeValues: map[string]interface{}{ - "id": "snap-1234567890", - "volume_size": float64(60), - }, - }, - }, - want: resources.EbsDataResource{ - Identification: &resources.ResourceIdentification{ - Name: "test_snapshot", - ResourceType: "aws_ebs_snapshot", - Provider: providers.AWS, - }, - DataImageSpecs: []*resources.DataImageSpecs{ - { - DiskSizeGb: 60, - }, - }, - AwsId: "snap-1234567890", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := GetDataResource(tt.args.tfResource) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/internal/plan/aws/EBS.go b/internal/plan/aws/EBS.go deleted file mode 100644 index 7763ed1..0000000 --- a/internal/plan/aws/EBS.go +++ /dev/null @@ -1,128 +0,0 @@ -package aws - -import ( - "strings" - - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - log "github.com/sirupsen/logrus" - "github.com/spf13/viper" -) - -type disk struct { - sizeGb int64 - isSSD bool - replicationFactor int32 -} - -func getDisk(resourceAddress string, diskBlock map[string]interface{}, isBootDisk bool, image *resources.EbsDataResource, trRefs *tfrefs.References) disk { - disk := disk{ - sizeGb: viper.GetInt64("provider.aws.boot_disk.size"), - isSSD: true, - replicationFactor: 1, - } - - // Get disk type - - diskType := viper.GetString("provider.aws.disk.type") - diskTypeI := diskBlock["volume_type"] - if diskTypeI == nil { - diskTypeI = diskBlock["type"] - } - if diskTypeI != nil { - diskType = diskTypeI.(string) - } else { - if image != nil && diskBlock["device_name"] != nil { - if strings.HasPrefix(image.Identification.ResourceType, "aws_ebs_snapshot") { - disk.sizeGb = int64(image.DataImageSpecs[0].DiskSizeGb) - } - if strings.HasPrefix(image.Identification.ResourceType, "aws_ami") { - for _, bd := range image.DataImageSpecs { - if bd != nil { - if strings.HasPrefix(bd.DeviceName, diskBlock["device_name"].(string)) { - diskType = bd.VolumeType - } - } - } - } - } - } - - disk.isSSD = IsSSD(diskType) - - // Get Disk size - declaredSize := diskBlock["volume_size"] - if declaredSize == nil { - declaredSize = diskBlock["size"] - } - if declaredSize == nil && diskBlock["snapshot_id"] != nil { - snapshotId := diskBlock["snapshot_id"].(string) - snapshot := getAwsImage(trRefs, snapshotId) - declaredSize = snapshot.DataImageSpecs[0].DiskSizeGb - } - if declaredSize == nil { - if image != nil { - // Case of snapshot, no device name - if strings.HasPrefix(image.Identification.ResourceType, "aws_ebs_snapshot") { - disk.sizeGb = int64(image.DataImageSpecs[0].DiskSizeGb) - } - // Case of ami, we use device name, except for boot disk - if strings.HasPrefix(image.Identification.ResourceType, "aws_ami") { - searchedDeviceName := "/dev/sda1" - if !isBootDisk { - searchedDeviceName = diskBlock["device_name"].(string) - } - for _, bd := range image.DataImageSpecs { - if bd != nil { - if strings.HasPrefix(bd.DeviceName, searchedDeviceName) { - disk.sizeGb = int64(bd.DiskSizeGb) - } - } - } - } - } else { - disk.sizeGb = viper.GetInt64("provider.aws.disk.size") - log.Warningf("%v : Disk size not declared. Please set it! (otherwise we assume %vsgb) ", resourceAddress, disk.sizeGb) - - } - } else { - disk.sizeGb = int64(declaredSize.(float64)) - } - return disk -} - -func IsSSD(diskType string) bool { - isSSD := false - if strings.HasPrefix(diskType, "gp") || strings.HasPrefix(diskType, "io") { - isSSD = true - } - return isSSD -} - -func getEbsVolume(tfResource tfjson.StateResource, tfRefs *tfrefs.References) *resources.ComputeResourceSpecs { - - // Get image if it comes from a snapshot - var image *resources.EbsDataResource - if tfResource.AttributeValues["snapshot_id"] != nil { - awsImageId := tfResource.AttributeValues["snapshot_id"].(string) - image = getAwsImage(tfRefs, awsImageId) - } - - // Get disk specifications - disk := getDisk(tfResource.Address, tfResource.AttributeValues, false, image, tfRefs) - hddSize := decimal.Zero - ssdSize := decimal.Zero - if disk.isSSD { - ssdSize = decimal.NewFromInt(disk.sizeGb) - } else { - hddSize = decimal.NewFromInt(disk.sizeGb) - } - computeResourceSpecs := resources.ComputeResourceSpecs{ - SsdStorage: ssdSize, - HddStorage: hddSize, - } - - return &computeResourceSpecs -} diff --git a/internal/plan/aws/EC2Instance.go b/internal/plan/aws/EC2Instance.go deleted file mode 100644 index 80cbeeb..0000000 --- a/internal/plan/aws/EC2Instance.go +++ /dev/null @@ -1,101 +0,0 @@ -package aws - -import ( - "strings" - - "github.com/carboniferio/carbonifer/internal/providers/aws" - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - log "github.com/sirupsen/logrus" - "github.com/spf13/viper" -) - -func getEC2Instance( - resource tfjson.StateResource, - tfRefs *tfrefs.References) *resources.ComputeResourceSpecs { - - instanceType := resource.AttributeValues["instance_type"].(string) - - awsInstanceType := aws.GetAWSInstanceType(instanceType) - - var disks []disk - - awsImageId := "" - if resource.AttributeValues["ami"] != nil { - awsImageId = resource.AttributeValues["ami"].(string) - } - if resource.AttributeValues["snapshot_id"] != nil { - awsImageId = resource.AttributeValues["snapshot_id"].(string) - } - - image := getAwsImage(tfRefs, awsImageId) - - // Root block device - bd, ok_rd := resource.AttributeValues["root_block_device"] - if ok_rd { - rootDevices := bd.([]interface{}) - for _, rootDevice := range rootDevices { - rootDisk := getDisk(resource.Address, rootDevice.(map[string]interface{}), true, image, tfRefs) - disks = append(disks, rootDisk) - } - } else { - if image != nil { - rootDisk := disk{ - sizeGb: int64(image.DataImageSpecs[0].DiskSizeGb), - isSSD: IsSSD(image.DataImageSpecs[0].VolumeType), - } - disks = append(disks, rootDisk) - } else { - // Default root device - rootDisk := disk{ - sizeGb: viper.GetInt64("provider.aws.disk.size"), - isSSD: true, - } - log.Warnf("No root device found for %s, using default root device of %vgb", resource.Address, viper.GetInt64("provider.aws.disk.size")) - disks = append(disks, rootDisk) - } - } - - // Elastic block devices - bd, ok_ebd := resource.AttributeValues["ebs_block_device"] - if ok_ebd { - ebds := bd.([]interface{}) - for _, blockDevice := range ebds { - blockDisk := getDisk(resource.Address, blockDevice.(map[string]interface{}), false, image, tfRefs) - disks = append(disks, blockDisk) - } - } - - // Ephemeral block devices - epbd, ok_epbd := resource.AttributeValues["ephemeral_block_device"] - if ok_epbd { - epbds := epbd.([]interface{}) - for range epbds { - instanceStorage := disk{ - sizeGb: int64(awsInstanceType.InstanceStorage.SizePerDiskGB), - isSSD: strings.ToLower(awsInstanceType.InstanceStorage.Type) == "ssd", - } - disks = append(disks, instanceStorage) - } - } - - hddSize := decimal.Zero - ssdSize := decimal.Zero - for _, disk := range disks { - if disk.isSSD { - ssdSize = ssdSize.Add(decimal.NewFromInt(disk.sizeGb)) - } else { - hddSize = hddSize.Add(decimal.NewFromInt(disk.sizeGb)) - } - } - - return &resources.ComputeResourceSpecs{ - VCPUs: awsInstanceType.VCPU, - MemoryMb: awsInstanceType.MemoryMb, - SsdStorage: ssdSize, - HddStorage: hddSize, - ReplicationFactor: 1, - } -} diff --git a/internal/plan/aws/ResourceIdentification.go b/internal/plan/aws/ResourceIdentification.go deleted file mode 100644 index 3c79ddc..0000000 --- a/internal/plan/aws/ResourceIdentification.go +++ /dev/null @@ -1,32 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" -) - -func getResourceIdentification(resource tfjson.StateResource, tfRefs *tfrefs.References) *resources.ResourceIdentification { - region := resource.AttributeValues["region"] - if region == nil { - region = tfRefs.ProviderConfigs["region"] - } - - name := resource.Name - if resource.Index != nil { - name = fmt.Sprintf("%v[%v]", resource.Name, resource.Index) - } - - provider, _ := providers.ParseProvider(resource.ProviderName) - - return &resources.ResourceIdentification{ - Name: name, - ResourceType: resource.Type, - Provider: provider, - Region: fmt.Sprint(region), - Count: 1, - } -} diff --git a/internal/plan/aws/default_test.go b/internal/plan/aws/default_test.go deleted file mode 100644 index 7dd2bb7..0000000 --- a/internal/plan/aws/default_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package aws - -import ( - "io" - "os" - "path" - "path/filepath" - "testing" - - "github.com/carboniferio/carbonifer/internal/terraform" - "github.com/carboniferio/carbonifer/internal/testutils" - _ "github.com/carboniferio/carbonifer/internal/testutils" - - "github.com/carboniferio/carbonifer/internal/utils" - tfjson "github.com/hashicorp/terraform-json" - "github.com/stretchr/testify/assert" -) - -func Test_getDefaultRegion_providerConstant(t *testing.T) { - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{ - "region": { - ExpressionData: &tfjson.ExpressionData{ - ConstantValue: "test1", - }, - }, - }, - } - - tfPlan := &tfjson.Plan{} - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "test1", *region) - -} - -func Test_getDefaultRegion_providerVariable(t *testing.T) { - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{ - "region": { - ExpressionData: &tfjson.ExpressionData{ - References: []string{"var.region"}, - }, - }, - }, - } - - tfPlan := &tfjson.Plan{ - Variables: map[string]*tfjson.PlanVariable{ - "region": { - Value: "test2", - }, - }, - } - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "test2", *region) - -} - -func Test_getDefaultRegion_EnvVar(t *testing.T) { - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{}, - } - - tfPlan := &tfjson.Plan{} - - t.Setenv("AWS_REGION", "test3") - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "test3", *region) - -} - -func Test_getDefaultRegion_EnvDefaultVar(t *testing.T) { - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{}, - } - - tfPlan := &tfjson.Plan{} - - t.Setenv("AWS_DEFAULT_REGION", "test4") - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "test4", *region) - -} - -func Test_getDefaultRegion_AWSConfigFile(t *testing.T) { - // Create a temporary directory - tmpDir := t.TempDir() - - // Create AWS config file - awsConfigFile := filepath.Join(tmpDir, "config") - f, err := os.Create(awsConfigFile) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - _, err = io.WriteString(f, "[default]\nregion = region_from_config_file\n") - if err != nil { - t.Fatal(err) - } - - // Set the AWS_SDK_LOAD_CONFIG environment variable - t.Setenv("AWS_SDK_LOAD_CONFIG", "1") - t.Setenv("AWS_CONFIG_FILE", awsConfigFile) - - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{}, - } - - tfPlan := &tfjson.Plan{} - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "region_from_config_file", *region) - -} - -func Test_getDefaultRegion_ModuleOutput(t *testing.T) { - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{ - "region": { - ExpressionData: &tfjson.ExpressionData{ - References: []string{ - "module.module1.region_output", - "module.globals"}, - }, - }, - }, - } - - tfPlan := &tfjson.Plan{ - Config: &tfjson.Config{ - RootModule: &tfjson.ConfigModule{ - ModuleCalls: map[string]*tfjson.ModuleCall{ - "module1": { - Module: &tfjson.ConfigModule{ - Outputs: map[string]*tfjson.ConfigOutput{ - "region_output": { - Expression: &tfjson.Expression{ - ExpressionData: &tfjson.ExpressionData{ - References: []string{"var.region"}, - }, - }, - Description: "The AWS region to use for resources.", - Sensitive: false, - }, - }, - }, - }, - }, - }, - }, - Variables: map[string]*tfjson.PlanVariable{ - "region": { - Value: "region_from_module_output", - }, - }, - } - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "region_from_module_output", *region) -} - -func Test_getDefaultRegion_ModuleVariable(t *testing.T) { - awsConfigs := &tfjson.ProviderConfig{ - Name: "aws", - Expressions: map[string]*tfjson.Expression{ - "region": { - ExpressionData: &tfjson.ExpressionData{ - References: []string{"module.globals.common_region"}, - }, - }, - }, - } - - tfPlan := &tfjson.Plan{ - Config: &tfjson.Config{ - RootModule: &tfjson.ConfigModule{ - ModuleCalls: map[string]*tfjson.ModuleCall{ - "globals": { - Module: &tfjson.ConfigModule{ - Outputs: map[string]*tfjson.ConfigOutput{ - "common_region": { - Expression: &tfjson.Expression{ - ExpressionData: &tfjson.ExpressionData{ - References: []string{"var.region"}, - }, - }, - Description: "The AWS region to use for resources.", - }, - }, - Variables: map[string]*tfjson.ConfigVariable{ - "region": { - Default: "region_module_variable", - }, - }, - }, - }, - }, - }, - }, - } - - region := getDefaultRegion(awsConfigs, tfPlan) - assert.Equal(t, "region_module_variable", *region) -} - -func TestGetValueOfExpression_ModuleCalls(t *testing.T) { - plan := utils.LoadPlan("test/terraform/planJson/plan_with_module_calls.json") // Replace with the path to your plan JSON - expr := &tfjson.Expression{ - ExpressionData: &tfjson.ExpressionData{ - References: []string{"module.module2.module1_region"}, - }, - } - - value, err := utils.GetValueOfExpression(expr, plan) - assert.NoError(t, err) - assert.Equal(t, "region_from_module_calls", value) -} - -func TestGetValueOfExpression_ModuleLocalVar(t *testing.T) { - terraform.ResetTerraformExec() - wd := path.Join(testutils.RootDir, "test/terraform/gcp_calling_module") - - plan, err := terraform.CarboniferPlan(wd) // Replace with the path to your plan JSON - assert.NoError(t, err) - expr := &tfjson.Expression{ - ExpressionData: &tfjson.ExpressionData{ - References: []string{"module.globals.common_region"}, - }, - } - - value, err := utils.GetValueOfExpression(expr, plan) - assert.NoError(t, err) - assert.Equal(t, "local_module_region", value) -} diff --git a/internal/plan/aws/resources.go b/internal/plan/aws/resources.go deleted file mode 100644 index b240311..0000000 --- a/internal/plan/aws/resources.go +++ /dev/null @@ -1,32 +0,0 @@ -package aws - -import ( - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" -) - -func GetResource( - tfResource tfjson.StateResource, - tfRefs *tfrefs.References) resources.Resource { - - resourceId := getResourceIdentification(tfResource, tfRefs) - if resourceId.ResourceType == "aws_instance" { - specs := getEC2Instance(tfResource, tfRefs) - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - if resourceId.ResourceType == "aws_ebs_volume" { - specs := getEbsVolume(tfResource, tfRefs) - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - - return resources.UnsupportedResource{ - Identification: resourceId, - } -} diff --git a/internal/plan/aws/resources_test.go b/internal/plan/aws/resources_test.go deleted file mode 100644 index 5420070..0000000 --- a/internal/plan/aws/resources_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package aws - -import ( - "fmt" - "testing" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - _ "github.com/carboniferio/carbonifer/internal/testutils" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - "github.com/stretchr/testify/assert" -) - -var defaultMachine tfjson.StateResource = tfjson.StateResource{ - Address: "aws_instance.foo", - Type: "aws_instance", - Name: "foo", - AttributeValues: map[string]interface{}{ - "name": "foo", - "instance_type": "t2.micro", - }, -} - -var machineWithDefaultRootDisk tfjson.StateResource = tfjson.StateResource{ - Address: "aws_instance.foo", - Type: "aws_instance", - Name: "machineWithDefaultRootDisk", - AttributeValues: map[string]interface{}{ - "name": "machineWithDefaultRootDisk", - "instance_type": "t2.micro", - "root_block_device": []interface{}{ - map[string]interface{}{ - "delete_on_termination": true, - }, - }, - }, -} - -var machineWithRootDiskSize tfjson.StateResource = tfjson.StateResource{ - Address: "aws_instance.foo", - Type: "aws_instance", - Name: "machineWithRootDiskSize", - AttributeValues: map[string]interface{}{ - "name": "machineWithRootDiskSize", - "instance_type": "t2.micro", - "root_block_device": []interface{}{ - map[string]interface{}{ - "delete_on_termination": true, - "volume_size": float64(20), - }, - }, - }, -} - -var machineWithEBSSize tfjson.StateResource = tfjson.StateResource{ - Address: "aws_instance.foo", - Type: "aws_instance", - Name: "machineWithEBSSize", - AttributeValues: map[string]interface{}{ - "name": "machineWithEBSSize", - "instance_type": "t2.micro", - "ebs_block_device": []interface{}{ - map[string]interface{}{ - "delete_on_termination": true, - "volume_size": float64(50), - "volume_type": "st1", - }, - }, - }, -} - -var machineWithEBSSizeAndEphemeral tfjson.StateResource = tfjson.StateResource{ - Address: "aws_instance.foo", - Type: "aws_instance", - Name: "machineWithEBSSizeAndEphemeral", - AttributeValues: map[string]interface{}{ - "name": "machineWithEBSSizeAndEphemeral", - "instance_type": "c5d.12xlarge", - "ebs_block_device": []interface{}{ - map[string]interface{}{ - "delete_on_termination": true, - "volume_size": float64(50), - "volume_type": "st1", - }, - }, - "ephemeral_block_device": []interface{}{ - map[string]interface{}{ - "device_name": "ephemeral0", - }, - map[string]interface{}{ - "device_name": "ephemeral1", - }, - }, - }, -} - -var machineWithEbsFromSnapshotSizeSpecified tfjson.StateResource = tfjson.StateResource{ - Address: "aws_instance.foo", - Type: "aws_instance", - Name: "machineWithEbsFromSnapshotSizeNotSpecified", - AttributeValues: map[string]interface{}{ - "name": "machineWithEbsFromSnapshotSizeNotSpecified", - "instance_type": "t2.micro", - "ebs_block_device": []interface{}{ - map[string]interface{}{ - "delete_on_termination": true, - "snapshot_id": "snap-1234567890", - "volume_type": "st1", - "volume_size": float64(50), - }, - }, - }, -} - -var tfRefs *tfrefs.References = &tfrefs.References{ - ProviderConfigs: map[string]string{ - "region": "eu-west-3", - }, -} - -func TestGetResource(t *testing.T) { - type args struct { - tfResource tfjson.StateResource - tfRefs *tfrefs.References - } - tests := []struct { - name string - args args - want resources.Resource - }{ - { - name: "aws_instance", - args: args{ - tfResource: defaultMachine, - tfRefs: tfRefs, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "foo", - ResourceType: "aws_instance", - Provider: providers.AWS, - Region: "eu-west-3", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - VCPUs: int32(1), - MemoryMb: int32(1024), - ReplicationFactor: 1, - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromInt(8), - }, - }, - }, - { - name: "aws_instance with default root disk", - args: args{ - tfResource: machineWithDefaultRootDisk, - tfRefs: tfRefs, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "machineWithDefaultRootDisk", - ResourceType: "aws_instance", - Provider: providers.AWS, - Region: "eu-west-3", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - VCPUs: int32(1), - MemoryMb: int32(1024), - ReplicationFactor: 1, - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromInt(8), - }, - }, - }, - { - name: "aws_instance with root disk size", - args: args{ - tfResource: machineWithRootDiskSize, - tfRefs: tfRefs, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "machineWithRootDiskSize", - ResourceType: "aws_instance", - Provider: providers.AWS, - Region: "eu-west-3", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - VCPUs: int32(1), - MemoryMb: int32(1024), - ReplicationFactor: 1, - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromInt(20), - }, - }, - }, - { - name: "aws_instance with ebs hdd disk size", - args: args{ - tfResource: machineWithEBSSize, - tfRefs: tfRefs, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "machineWithEBSSize", - ResourceType: "aws_instance", - Provider: providers.AWS, - Region: "eu-west-3", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - VCPUs: int32(1), - MemoryMb: int32(1024), - ReplicationFactor: 1, - HddStorage: decimal.NewFromInt(50), - SsdStorage: decimal.NewFromInt(8), - }, - }, - }, - { - name: "aws_instance with ebs hdd disk size and ephemeral", - args: args{ - tfResource: machineWithEBSSizeAndEphemeral, - tfRefs: tfRefs, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "machineWithEBSSizeAndEphemeral", - ResourceType: "aws_instance", - Provider: providers.AWS, - Region: "eu-west-3", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - VCPUs: int32(48), - MemoryMb: int32(98304), - ReplicationFactor: 1, - HddStorage: decimal.NewFromInt(50), - SsdStorage: decimal.NewFromInt(1808), - }, - }, - }, - { - name: "aws_instance with ebs from snapshot size specified", - args: args{ - tfResource: machineWithEbsFromSnapshotSizeSpecified, - tfRefs: tfRefs, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "machineWithEbsFromSnapshotSizeNotSpecified", - ResourceType: "aws_instance", - Provider: providers.AWS, - Region: "eu-west-3", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - VCPUs: int32(1), - MemoryMb: int32(1024), - ReplicationFactor: 1, - HddStorage: decimal.NewFromInt(50), - SsdStorage: decimal.NewFromInt(8), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := GetResource(tt.args.tfResource, tt.args.tfRefs) - fmt.Println("Name", got.(resources.ComputeResource).Identification.Name) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/internal/plan/aws/default.go b/internal/plan/defaults.go similarity index 52% rename from internal/plan/aws/default.go rename to internal/plan/defaults.go index 0cac48f..65faf0f 100644 --- a/internal/plan/aws/default.go +++ b/internal/plan/defaults.go @@ -1,36 +1,16 @@ -package aws +package plan import ( "os" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - "github.com/carboniferio/carbonifer/internal/utils" - tfjson "github.com/hashicorp/terraform-json" log "github.com/sirupsen/logrus" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" ) -func GetDefaults(awsConfig *tfjson.ProviderConfig, tfPlan *tfjson.Plan, terraformRefs *tfrefs.References) { - log.Debugf("Reading provider config %v", awsConfig.Name) - - region := getDefaultRegion(awsConfig, tfPlan) - if region != nil { - terraformRefs.ProviderConfigs["region"] = *region - } -} - -func getDefaultRegion(awsConfig *tfjson.ProviderConfig, tfPlan *tfjson.Plan) *string { +func getDefaultRegion() *string { var region interface{} - regionExpr := awsConfig.Expressions["region"] - if regionExpr != nil { - var err error - region, err = utils.GetValueOfExpression(regionExpr, tfPlan) - if err != nil { - log.Fatalf("Error getting region from provider config %v", err) - } - } if region == nil { if os.Getenv("AWS_DEFAULT_REGION") != "" { region = os.Getenv("AWS_DEFAULT_REGION") @@ -65,6 +45,9 @@ func getDefaultRegion(awsConfig *tfjson.ProviderConfig, tfPlan *tfjson.Plan) *st if ok { return regionPtr } - regionString := region.(string) + regionString, ok := region.(string) + if !ok { + return nil + } return ®ionString } diff --git a/internal/plan/disk_type.go b/internal/plan/disk_type.go new file mode 100644 index 0000000..074505e --- /dev/null +++ b/internal/plan/disk_type.go @@ -0,0 +1,17 @@ +package plan + +import "fmt" + +// ENUM(SSD, HDD) +// +//go:generate go-enum --nocase --noprefix --marshal +type DiskType int + +// UnsupportedDiskTypeError is an error that occurs when a disk type is not supported +type UnsupportedDiskTypeError struct { + DiskType string +} + +func (upe *UnsupportedDiskTypeError) Error() string { + return fmt.Sprintf("Unsupported Disk Type: %v", upe.DiskType) +} diff --git a/internal/plan/disk_type_enum.go b/internal/plan/disk_type_enum.go new file mode 100644 index 0000000..cfe8f90 --- /dev/null +++ b/internal/plan/disk_type_enum.go @@ -0,0 +1,73 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: +// Revision: +// Build Date: +// Built By: + +package plan + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +const ( + // SSD is a DiskType of type SSD. + SSD DiskType = iota + // HDD is a DiskType of type HDD. + HDD +) + +var ErrInvalidDiskType = errors.New("not a valid DiskType") + +const _DiskTypeName = "SSDHDD" + +var _DiskTypeMap = map[DiskType]string{ + SSD: _DiskTypeName[0:3], + HDD: _DiskTypeName[3:6], +} + +// String implements the Stringer interface. +func (x DiskType) String() string { + if str, ok := _DiskTypeMap[x]; ok { + return str + } + return fmt.Sprintf("DiskType(%d)", x) +} + +var _DiskTypeValue = map[string]DiskType{ + _DiskTypeName[0:3]: SSD, + strings.ToLower(_DiskTypeName[0:3]): SSD, + _DiskTypeName[3:6]: HDD, + strings.ToLower(_DiskTypeName[3:6]): HDD, +} + +// ParseDiskType attempts to convert a string to a DiskType. +func ParseDiskType(name string) (DiskType, error) { + if x, ok := _DiskTypeValue[name]; ok { + return x, nil + } + // Case insensitive parse, do a separate lookup to prevent unnecessary cost of lowercasing a string if we don't need to. + if x, ok := _DiskTypeValue[strings.ToLower(name)]; ok { + return x, nil + } + return DiskType(0), fmt.Errorf("%s is %w", name, ErrInvalidDiskType) +} + +// MarshalText implements the text marshaller method. +func (x DiskType) MarshalText() ([]byte, error) { + return []byte(x.String()), nil +} + +// UnmarshalText implements the text unmarshaller method. +func (x *DiskType) UnmarshalText(text []byte) error { + name := string(text) + tmp, err := ParseDiskType(name) + if err != nil { + return err + } + *x = tmp + return nil +} diff --git a/internal/plan/expression.go b/internal/plan/expression.go new file mode 100644 index 0000000..0bff723 --- /dev/null +++ b/internal/plan/expression.go @@ -0,0 +1,39 @@ +package plan + +import ( + "github.com/carboniferio/carbonifer/internal/terraform" + "github.com/pkg/errors" +) + +func getValueOfExpression(expression map[string]interface{}, context *tfContext) (interface{}, error) { + + if expression["constant_value"] != nil { + // It's a known value, return it as is + return expression["constant_value"], nil + } + if expression["references"] == nil { + return nil, errors.Errorf("No references found in expression: %v", expression) + } + + references, ok := expression["references"].([]interface{}) + if !ok { + return nil, errors.Errorf("References is not an array: %v : %T", expression["references"], expression["references"]) + } + + for _, reference := range references { + reference, ok := reference.(string) + if !ok { + return nil, errors.Errorf("Reference is not a string: %v : %T", reference, reference) + } + + valueFromConsole, err := terraform.RunTerraformConsole(reference) + if err != nil { + continue + } + if valueFromConsole != nil && *valueFromConsole != "" { + return *valueFromConsole, nil + } + + } + return nil, errors.New("no value found for expression") +} diff --git a/internal/plan/gcp/ComputeDiskResource.go b/internal/plan/gcp/ComputeDiskResource.go deleted file mode 100644 index 26ed1d8..0000000 --- a/internal/plan/gcp/ComputeDiskResource.go +++ /dev/null @@ -1,116 +0,0 @@ -package gcp - -import ( - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - log "github.com/sirupsen/logrus" - "github.com/spf13/viper" -) - -func getComputeDiskResourceSpecs( - resource tfjson.StateResource, - tfRefs *tfrefs.References) *resources.ComputeResourceSpecs { - - disk := getDisk(resource.Address, resource.AttributeValues, false, tfRefs) - hddSize := decimal.Zero - ssdSize := decimal.Zero - if disk.isSSD { - ssdSize = ssdSize.Add(decimal.NewFromFloat(disk.sizeGb)) - } else { - hddSize = hddSize.Add(decimal.NewFromFloat(disk.sizeGb)) - } - return &resources.ComputeResourceSpecs{ - SsdStorage: ssdSize, - HddStorage: hddSize, - ReplicationFactor: disk.replicationFactor, - } -} - -type disk struct { - sizeGb float64 - isSSD bool - replicationFactor int32 -} - -func getBootDisk(resourceAddress string, bootDiskBlock map[string]interface{}, tfRefs *tfrefs.References) disk { - var disk disk - initParams := bootDiskBlock["initialize_params"] - for _, iP := range initParams.([]interface{}) { - initParam := iP.(map[string]interface{}) - disk = getDisk(resourceAddress, initParam, true, tfRefs) - - } - return disk -} - -func getDisk(resourceAddress string, diskBlock map[string]interface{}, isBootDiskParam bool, tfRefs *tfrefs.References) disk { - disk := disk{ - sizeGb: viper.GetFloat64("provider.gcp.boot_disk.size"), - isSSD: true, - replicationFactor: 1, - } - - // Is Boot disk - isBootDisk := isBootDiskParam - isBootDiskI := diskBlock["boot"] - if isBootDiskI != nil { - isBootDisk = isBootDiskI.(bool) - } - - // Get disk type - var diskType string - diskTypeExpr := diskBlock["type"] - if diskTypeExpr == nil { - diskTypeExpr = diskBlock["disk_type"] - } - if diskTypeExpr == nil { - if isBootDisk { - diskType = viper.GetString("provider.gcp.boot_disk.type") - } else { - diskType = viper.GetString("provider.gcp.disk.type") - } - } else { - diskType = diskTypeExpr.(string) - } - - if diskType == "pd-standard" { - disk.isSSD = false - } - - // Get Disk size - declaredSize := diskBlock["size"] - if declaredSize == nil { - declaredSize = diskBlock["disk_size_gb"] - } - if declaredSize == nil { - if isBootDisk { - disk.sizeGb = viper.GetFloat64("provider.gcp.boot_disk.size") - } else { - disk.sizeGb = viper.GetFloat64("provider.gcp.disk.size") - } - diskImageLink := diskBlock["image"] - if diskImageLink != nil { - image, ok := (tfRefs.DataResources)[diskImageLink.(string)] - if ok { - disk.sizeGb = (image.(resources.DataImageResource)).DataImageSpecs[0].DiskSizeGb - } else { - log.Warningf("%v : Disk image does not have a size declared, considering it default to be 10Gb ", resourceAddress) - } - } else { - log.Warningf("%v : Boot disk size not declared. Please set it! (otherwise we assume 10gb) ", resourceAddress) - - } - } else { - disk.sizeGb = declaredSize.(float64) - } - - replicaZones := diskBlock["replica_zones"] - if replicaZones != nil { - rz := replicaZones.([]interface{}) - disk.replicationFactor = int32(len(rz)) - } - - return disk -} diff --git a/internal/plan/gcp/ComputeResource.go b/internal/plan/gcp/ComputeResource.go deleted file mode 100644 index 611001d..0000000 --- a/internal/plan/gcp/ComputeResource.go +++ /dev/null @@ -1,91 +0,0 @@ -package gcp - -import ( - "github.com/carboniferio/carbonifer/internal/providers/gcp" - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" -) - -func getComputeResourceSpecs( - resource tfjson.StateResource, - tfRefs *tfrefs.References, groupZone interface{}) *resources.ComputeResourceSpecs { - - machine_type := resource.AttributeValues["machine_type"].(string) - var zone string - if groupZone != nil { - zone = groupZone.(string) - } else { - zone = resource.AttributeValues["zone"].(string) - } - - machineType := gcp.GetGCPMachineType(machine_type, zone) - CPUType, ok := resource.AttributeValues["cpu_platform"].(string) - if !ok { - CPUType = "" - } - - var disks []disk - bd, ok_bd := resource.AttributeValues["boot_disk"] - if ok_bd { - bootDisks := bd.([]interface{}) - for _, bootDiskBlock := range bootDisks { - bootDisk := getBootDisk(resource.Address, bootDiskBlock.(map[string]interface{}), tfRefs) - disks = append(disks, bootDisk) - } - } - - diskListI, ok_disks := resource.AttributeValues["disk"] - if ok_disks { - diskList := diskListI.([]interface{}) - for _, diskBlock := range diskList { - disk := getDisk(resource.Address, diskBlock.(map[string]interface{}), false, tfRefs) - disks = append(disks, disk) - } - } - - sd, ok_sd := resource.AttributeValues["scratch_disk"] - if ok_sd { - scratchDisks := sd.([]interface{}) - for range scratchDisks { - // Each scratch disk is 375GB - // source: https://cloud.google.com/compute/docs/disks#localssds - disks = append(disks, disk{isSSD: true, sizeGb: 375}) - } - } - - hddSize := decimal.Zero - ssdSize := decimal.Zero - for _, disk := range disks { - if disk.isSSD { - ssdSize = ssdSize.Add(decimal.NewFromFloat(disk.sizeGb)) - } else { - hddSize = hddSize.Add(decimal.NewFromFloat(disk.sizeGb)) - } - } - - gpus := machineType.GPUTypes - gasI, ok := resource.AttributeValues["guest_accelerator"] - if ok { - guestAccelerators := gasI.([]interface{}) - for _, gaI := range guestAccelerators { - ga := gaI.(map[string]interface{}) - gpuCount := ga["count"].(float64) - gpuType := ga["type"].(string) - for i := float64(0); i < gpuCount; i++ { - gpus = append(gpus, gpuType) - } - } - } - - return &resources.ComputeResourceSpecs{ - GpuTypes: gpus, - VCPUs: machineType.Vcpus, - MemoryMb: machineType.MemoryMb, - CPUType: CPUType, - SsdStorage: ssdSize, - HddStorage: hddSize, - ReplicationFactor: 1, - } -} diff --git a/internal/plan/gcp/ComputeResourceFromTemplate.go b/internal/plan/gcp/ComputeResourceFromTemplate.go deleted file mode 100644 index e53c53f..0000000 --- a/internal/plan/gcp/ComputeResourceFromTemplate.go +++ /dev/null @@ -1,72 +0,0 @@ -package gcp - -import ( - "strings" - - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" - log "github.com/sirupsen/logrus" -) - -func getComputeResourceFromTemplateSpecs( - tfResource tfjson.StateResource, - tfRefs *tfrefs.References) *resources.ComputeResourceSpecs { - - // Get template of instance - specs := getTemplateSpecs(tfResource, tfRefs) - if specs != nil { - return specs - } - return nil - -} - -func getTemplateSpecs( - tfResource tfjson.StateResource, - tfRefs *tfrefs.References) *resources.ComputeResourceSpecs { - - // Find google_compute_instance_from_template resourceConfig - iftConfig := (tfRefs.ResourceConfigs)[tfResource.Address] - - var template *tfjson.StateResource - sourceTemplateExpr := iftConfig.Expressions["source_instance_template"] - if sourceTemplateExpr != nil { - references := sourceTemplateExpr.References - for _, reference := range references { - if !strings.HasSuffix(reference, ".id") { - template = (tfRefs.ResourceReferences)[reference] - break - } - } - } - - if template != nil { - var zones []string - zoneAttr := tfResource.AttributeValues["zone"] - if zoneAttr != nil { - zones = append(zones, zoneAttr.(string)) - } - distributionPolicyZonesI := tfResource.AttributeValues["distribution_policy_zones"] - if distributionPolicyZonesI != nil { - distributionPolicyZones := distributionPolicyZonesI.([]interface{}) - for _, z := range distributionPolicyZones { - zones = append(zones, z.(string)) - } - } - - if len(zones) == 0 { - log.Fatalf("No zone or distribution policy declared for %v", tfResource.Address) - } - templateResource := GetResourceTemplate(*template, tfRefs, zones[0]) - computeTemplate, ok := templateResource.(resources.ComputeResource) - if ok { - return computeTemplate.Specs - } else { - log.Fatalf("Type mismatch, not a esources.ComputeResource template %v", computeTemplate.GetAddress()) - } - } else { - log.Fatalf("Cannot find template of %v", tfResource.Address) - } - return nil -} diff --git a/internal/plan/gcp/GCPData.go b/internal/plan/gcp/GCPData.go deleted file mode 100644 index 60072d2..0000000 --- a/internal/plan/gcp/GCPData.go +++ /dev/null @@ -1,53 +0,0 @@ -package gcp - -import ( - "fmt" - "strings" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - tfjson "github.com/hashicorp/terraform-json" -) - -func GetDataResource(tfResource tfjson.StateResource) resources.DataResource { - resourceId := getDataResourceIdentification(tfResource) - if resourceId.ResourceType == "google_compute_image" { - diskSize := tfResource.AttributeValues["disk_size_gb"] - diskSizeGb, ok := diskSize.(float64) - specs := resources.DataImageSpecs{ - DiskSizeGb: diskSizeGb, - } - if ok { - return resources.DataImageResource{ - Identification: resourceId, - DataImageSpecs: []*resources.DataImageSpecs{&specs}, - } - } - } - return resources.DataImageResource{ - Identification: resourceId, - } -} - -func getDataResourceIdentification(resource tfjson.StateResource) *resources.ResourceIdentification { - region := resource.AttributeValues["region"] - if region == nil { - if resource.AttributeValues["zone"] != nil { - zone := resource.AttributeValues["zone"].(string) - region = strings.Join(strings.Split(zone, "-")[:2], "-") - } else if resource.AttributeValues["replica_zones"] != nil { - replica_zones := resource.AttributeValues["replica_zones"].([]interface{}) - // should be all in the same region - region = strings.Join(strings.Split(replica_zones[0].(string), "-")[:2], "-") - } else { - region = "" - } - } - - return &resources.ResourceIdentification{ - Name: resource.Name, - ResourceType: resource.Type, - Provider: providers.GCP, - Region: fmt.Sprint(region), - } -} diff --git a/internal/plan/gcp/GCPData_test.go b/internal/plan/gcp/GCPData_test.go deleted file mode 100644 index 3a79ad5..0000000 --- a/internal/plan/gcp/GCPData_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package gcp - -import ( - "testing" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - _ "github.com/carboniferio/carbonifer/internal/testutils" - tfjson "github.com/hashicorp/terraform-json" - "github.com/stretchr/testify/assert" -) - -var imageResource tfjson.StateResource = tfjson.StateResource{ - Address: "data.google_compute_image.debian", - Mode: "data", - Type: "google_compute_image", - Name: "debian", - AttributeValues: map[string]interface{}{ - "name": "debian-11-bullseye-v20221206", - "self_link": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-v20221206", - "disk_size_gb": float64(10), - }, -} - -func TestGetDataResource(t *testing.T) { - type args struct { - tfResource tfjson.StateResource - } - tests := []struct { - name string - args args - want resources.DataResource - }{ - { - name: "existing", - args: args{ - tfResource: imageResource, - }, - want: resources.DataImageResource{ - Identification: &resources.ResourceIdentification{ - Name: "debian", - ResourceType: "google_compute_image", - Provider: providers.GCP}, - DataImageSpecs: []*resources.DataImageSpecs{ - { - DiskSizeGb: float64(10), - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := GetDataResource(tt.args.tfResource) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/internal/plan/gcp/ManagedInstanceGroupResource.go b/internal/plan/gcp/ManagedInstanceGroupResource.go deleted file mode 100644 index 2fac3f1..0000000 --- a/internal/plan/gcp/ManagedInstanceGroupResource.go +++ /dev/null @@ -1,129 +0,0 @@ -package gcp - -import ( - "strings" - - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - log "github.com/sirupsen/logrus" - "github.com/spf13/viper" -) - -func getComputeInstanceGroupManagerSpecs( - tfResource tfjson.StateResource, - tfRefs *tfrefs.References) (*resources.ComputeResourceSpecs, int64) { - - // Get template of instance - specs, targetSize := getGroupInstanceTemplateSpecs(tfResource, tfRefs) - if specs == nil { - return specs, targetSize - } - - // Get targetSize from autoscaler if exists - var autoscaler *tfjson.StateResource - for _, resourceConfig := range tfRefs.ResourceConfigs { - if resourceConfig.Type == "google_compute_autoscaler" { - targetExpr := (*resourceConfig).Expressions["target"] - if targetExpr != nil { - for _, target := range (*targetExpr).References { - if target == tfResource.Address { - autoscaler = (tfRefs.ResourceReferences)[resourceConfig.Address] - break - } - } - if autoscaler != nil { - break - } - } - } - } - if autoscaler != nil { - targetSize = getTargetSizeFromAutoscaler(autoscaler, &tfRefs.ResourceConfigs, tfResource, &tfRefs.ResourceReferences, targetSize) - } - - return specs, targetSize -} - -func getTargetSizeFromAutoscaler(autoscaler *tfjson.StateResource, resourceConfigs *map[string]*tfjson.ConfigResource, tfResource tfjson.StateResource, resourceReferences *map[string]*tfjson.StateResource, targetSizeOfTemplate int64) int64 { - - targetSize := targetSizeOfTemplate - autoscalingPoliciesI := autoscaler.AttributeValues["autoscaling_policy"] - if autoscalingPoliciesI != nil { - for _, autoscalingPolicyI := range autoscalingPoliciesI.([]interface{}) { - autoscalingPolicy := autoscalingPolicyI.(map[string]interface{}) - minSize := autoscalingPolicy["min_replicas"] - if minSize == nil { - minSize = 0 - } - maxSize := autoscalingPolicy["max_replicas"] - if maxSize == nil { - maxSize = 0 - } - targetSize = computeTargetSize(decimal.NewFromFloat(minSize.(float64)), decimal.NewFromFloat(maxSize.(float64))) - } - } - - return targetSize -} - -func computeTargetSize(minSize decimal.Decimal, maxSize decimal.Decimal) int64 { - avgAutoscalerSizePercent := decimal.NewFromFloat(viper.GetFloat64("provider.gcp.avg_autoscaler_size_percent")) - return avgAutoscalerSizePercent.Mul(maxSize.Sub(minSize)).Ceil().IntPart() -} - -func getGroupInstanceTemplateSpecs( - tfResource tfjson.StateResource, - tfRefs *tfrefs.References) (*resources.ComputeResourceSpecs, int64) { - - targetSize := int64(0) - targetSizeExpr := tfResource.AttributeValues["target_size"] - if targetSizeExpr != nil { - targetSize = decimal.NewFromFloat(targetSizeExpr.(float64)).BigInt().Int64() - } - - var template *tfjson.StateResource - templateConfig := (tfRefs.ResourceConfigs)[tfResource.Address] - versionExpr := templateConfig.Expressions["version"] - if versionExpr != nil { - for _, version := range versionExpr.NestedBlocks { - instanceTemplate := version["instance_template"] - if instanceTemplate != nil { - references := instanceTemplate.References - for _, reference := range references { - if !strings.HasSuffix(reference, ".id") { - template = (tfRefs.ResourceReferences)[reference] - } - } - } - } - } - - if template != nil { - var zones []string - zoneAttr := tfResource.AttributeValues["zone"] - if zoneAttr != nil { - zones = append(zones, zoneAttr.(string)) - } - distributionPolicyZonesI := tfResource.AttributeValues["distribution_policy_zones"] - if distributionPolicyZonesI != nil { - distributionPolicyZones := distributionPolicyZonesI.([]interface{}) - for _, z := range distributionPolicyZones { - zones = append(zones, z.(string)) - } - } - - if len(zones) == 0 { - log.Fatalf("No zone or distribution policy declared for %v", tfResource.Address) - } - templateResource := GetResourceTemplate(*template, tfRefs, zones[0]) - computeTemplate, ok := templateResource.(resources.ComputeResource) - if ok { - return computeTemplate.Specs, targetSize - } else { - log.Fatalf("Type mismatch, not a esources.ComputeResource template %v", computeTemplate.GetAddress()) - } - } - return nil, 0 -} diff --git a/internal/plan/gcp/ResourceIdentification.go b/internal/plan/gcp/ResourceIdentification.go deleted file mode 100644 index 215b357..0000000 --- a/internal/plan/gcp/ResourceIdentification.go +++ /dev/null @@ -1,41 +0,0 @@ -package gcp - -import ( - "fmt" - "strings" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - tfjson "github.com/hashicorp/terraform-json" -) - -func getResourceIdentification(resource tfjson.StateResource) *resources.ResourceIdentification { - region := resource.AttributeValues["region"] - if region == nil { - zone := resource.AttributeValues["zone"] - zones := resource.AttributeValues["replica_zones"] - if zones == nil { - zones = resource.AttributeValues["distribution_policy_zones"] - } - if zone != nil { - region = strings.Join(strings.Split(zone.(string), "-")[:2], "-") - } else if zones != nil { - region = strings.Join(strings.Split(zones.([]interface{})[0].(string), "-")[:2], "-") - } else { - region = "" - } - } - - name := resource.Name - if resource.Index != nil { - name = fmt.Sprintf("%v[%v]", resource.Name, resource.Index) - } - - return &resources.ResourceIdentification{ - Name: name, - ResourceType: resource.Type, - Provider: providers.GCP, - Region: fmt.Sprint(region), - Count: 1, - } -} diff --git a/internal/plan/gcp/SQLInstanceResource.go b/internal/plan/gcp/SQLInstanceResource.go deleted file mode 100644 index 33be640..0000000 --- a/internal/plan/gcp/SQLInstanceResource.go +++ /dev/null @@ -1,63 +0,0 @@ -package gcp - -import ( - "github.com/carboniferio/carbonifer/internal/providers/gcp" - "github.com/carboniferio/carbonifer/internal/resources" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - log "github.com/sirupsen/logrus" -) - -func getSQLResourceSpecs( - resource tfjson.StateResource) *resources.ComputeResourceSpecs { - - replicationFactor := int32(1) - ssdSize := decimal.Zero - hddSize := decimal.Zero - var tier gcp.SqlTier - - settingsI, ok := resource.AttributeValues["settings"] - if ok { - settings := settingsI.([]interface{})[0].(map[string]interface{}) - - availabilityType := settings["availability_type"] - if availabilityType != nil && availabilityType == "REGIONAL" { - replicationFactor = int32(2) - } - - tierName := "" - if settings["tier"] != nil { - tierName = settings["tier"].(string) - } - tier = gcp.GetGCPSQLTier(tierName) - - diskTypeI, ok_dt := settings["disk_type"] - diskType := "PD_SSD" - if ok_dt { - diskType = diskTypeI.(string) - } - - diskSizeI, ok_ds := settings["disk_size"] - diskSize := decimal.NewFromFloat(10) - if ok_ds { - diskSize = decimal.NewFromFloat(diskSizeI.(float64)) - } - - if diskType == "PD_SSD" { - ssdSize = diskSize - } else if diskType == "PD_HDD" { - hddSize = diskSize - } else { - log.Fatalf("%s : wrong type of disk : %s", resource.Address, tierName) - } - - } - - return &resources.ComputeResourceSpecs{ - VCPUs: int32(tier.Vcpus), - MemoryMb: int32(tier.MemoryMb), - SsdStorage: ssdSize, - HddStorage: hddSize, - ReplicationFactor: replicationFactor, - } -} diff --git a/internal/plan/gcp/resources.go b/internal/plan/gcp/resources.go deleted file mode 100644 index bdb88f7..0000000 --- a/internal/plan/gcp/resources.go +++ /dev/null @@ -1,80 +0,0 @@ -package gcp - -import ( - "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - "github.com/forestgiant/sliceutil" - tfjson "github.com/hashicorp/terraform-json" -) - -func GetResource( - tfResource tfjson.StateResource, - tfRefs *tfrefs.References) resources.Resource { - - resourceId := getResourceIdentification(tfResource) - if resourceId.ResourceType == "google_compute_instance" { - specs := getComputeResourceSpecs(tfResource, tfRefs, nil) - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - if resourceId.ResourceType == "google_compute_instance_from_template" { - specs := getComputeResourceFromTemplateSpecs(tfResource, tfRefs) - if specs != nil { - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - } - if resourceId.ResourceType == "google_compute_disk" || - resourceId.ResourceType == "google_compute_region_disk" { - specs := getComputeDiskResourceSpecs(tfResource, tfRefs) - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - if resourceId.ResourceType == "google_sql_database_instance" { - specs := getSQLResourceSpecs(tfResource) - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - if resourceId.ResourceType == "google_compute_instance_group_manager" || - resourceId.ResourceType == "google_compute_region_instance_group_manager" { - specs, count := getComputeInstanceGroupManagerSpecs(tfResource, tfRefs) - if specs != nil { - resourceId.Count = count - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - } - ignoredResourceType := []string{ - "google_compute_autoscaler", - "google_compute_instance_template", - } - if sliceutil.Contains(ignoredResourceType, resourceId.ResourceType) { - return nil - } - - return resources.UnsupportedResource{ - Identification: resourceId, - } -} - -func GetResourceTemplate(tfResource tfjson.StateResource, tfRefs *tfrefs.References, zone string) resources.Resource { - resourceId := getResourceIdentification(tfResource) - if resourceId.ResourceType == "google_compute_instance_template" { - specs := getComputeResourceSpecs(tfResource, tfRefs, zone) - return resources.ComputeResource{ - Identification: resourceId, - Specs: specs, - } - } - return nil -} diff --git a/internal/plan/gcp/resources_test.go b/internal/plan/gcp/resources_test.go deleted file mode 100644 index c7ff992..0000000 --- a/internal/plan/gcp/resources_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package gcp - -import ( - "testing" - - "github.com/carboniferio/carbonifer/internal/providers" - "github.com/carboniferio/carbonifer/internal/resources" - _ "github.com/carboniferio/carbonifer/internal/testutils" - tfjson "github.com/hashicorp/terraform-json" - "github.com/shopspring/decimal" - "github.com/stretchr/testify/assert" -) - -var persistenDisk tfjson.StateResource = tfjson.StateResource{ - Address: "google_compute_disk.disk1", - Type: "google_compute_disk", - Name: "disk1", - AttributeValues: map[string]interface{}{ - "name": "disk1", - "type": "pd-standard", - "size": float64(1024), - "zone": "europe-west9-a", - }, -} - -var persistenDiskNoSize tfjson.StateResource = tfjson.StateResource{ - Address: "google_compute_disk.disk2", - Type: "google_compute_disk", - Name: "disk2", - AttributeValues: map[string]interface{}{ - "name": "disk2", - "type": "pd-standard", - "zone": "europe-west9-a", - }, -} - -var regionDisk tfjson.StateResource = tfjson.StateResource{ - Address: "google_compute_region_disk.diskr", - Type: "google_compute_region_disk", - Name: "diskr", - AttributeValues: map[string]interface{}{ - "name": "diskr", - "type": "pd-ssd", - "size": float64(1024), - "replica_zones": []interface{}{"europe-west9-a", "europe-west9-b"}, - }, -} - -var gpuAttachedMachine tfjson.StateResource = tfjson.StateResource{ - Address: "google_compute_instance.attachedgpu", - Type: "google_compute_instance", - Name: "attachedgpu", - AttributeValues: map[string]interface{}{ - "name": "attachedgpu", - "machine_type": "n2-standard-2", - "zone": "europe-west9-a", - "boot_disk": []interface{}{}, - "guest_accelerator": []interface{}{ - map[string]interface{}{ - "type": "nvidia-tesla-k80", - "count": float64(2), - }, - }, - }, -} - -var gpuDefaultMachine tfjson.StateResource = tfjson.StateResource{ - Address: "google_compute_instance.defaultgpu", - Type: "google_compute_instance", - Name: "defaultgpu", - AttributeValues: map[string]interface{}{ - "name": "defaultgpu", - "machine_type": "a2-highgpu-1g", - "zone": "europe-west9-a", - "boot_disk": []interface{}{}, - }, -} - -func TestGetResource(t *testing.T) { - type args struct { - tfResource tfjson.StateResource - } - tests := []struct { - name string - args args - want resources.Resource - }{ - { - name: "diskWithSize", - args: args{ - tfResource: persistenDisk, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "disk1", - ResourceType: "google_compute_disk", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - HddStorage: decimal.NewFromInt(1024), - SsdStorage: decimal.Zero, - ReplicationFactor: 1, - }, - }, - }, - { - name: "diskWithNoSize", - args: args{ - tfResource: persistenDiskNoSize, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "disk2", - ResourceType: "google_compute_disk", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - HddStorage: decimal.New(50, 1), - SsdStorage: decimal.Zero, - ReplicationFactor: 1, - }, - }, - }, - { - name: "regionDisk", - args: args{ - tfResource: regionDisk, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "diskr", - ResourceType: "google_compute_region_disk", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromInt(1024), - ReplicationFactor: 2, - }, - }, - }, - { - name: "gpu attached", - args: args{ - tfResource: gpuAttachedMachine, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "attachedgpu", - ResourceType: "google_compute_instance", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: []string{ - "nvidia-tesla-k80", - "nvidia-tesla-k80", - }, - HddStorage: decimal.Zero, - SsdStorage: decimal.Zero, - ReplicationFactor: 1, - }, - }, - }, - { - name: "gpu default", - args: args{ - tfResource: gpuDefaultMachine, - }, - want: resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "defaultgpu", - ResourceType: "google_compute_instance", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: []string{ - "testing-custom-data-file", - }, - VCPUs: int32(12), - MemoryMb: int32(87040), - HddStorage: decimal.Zero, - SsdStorage: decimal.Zero, - ReplicationFactor: 1, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := GetResource(tt.args.tfResource, nil) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/internal/plan/json_getters.go b/internal/plan/json_getters.go new file mode 100644 index 0000000..a3b435e --- /dev/null +++ b/internal/plan/json_getters.go @@ -0,0 +1,410 @@ +package plan + +import ( + "fmt" + "regexp" + "strings" + + "github.com/carboniferio/carbonifer/internal/providers" + "github.com/carboniferio/carbonifer/internal/utils" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +// tfContext is the context of a terraform resource +type tfContext struct { + Resource map[string]interface{} // Json of the terraform plan resource + Mapping *ResourceMapping // Mapping of the resource type + ResourceAddress string // Address of the resource in tf plan + ParentContext *tfContext // Parent context + Provider providers.Provider +} + +func getString(key string, context *tfContext) (*string, error) { + value, err := getValue(key, context) + if err != nil { + return nil, err + } + + if value == nil { + log.Debugf("No value found for key %v of resource type %v", key, context.ResourceAddress) + return nil, nil + } + stringValue, ok := value.Value.(string) + if !ok { + return nil, fmt.Errorf("Cannot convert value to string: %v : %T", value.Value, value.Value) + } + return &stringValue, nil +} + +func getSlice(key string, context *tfContext) ([]interface{}, error) { + results := []interface{}{} + + sliceMappings := (*context.Mapping.Properties)[key] + + // Check we are well working on a list + for _, sliceMapping := range sliceMappings { + + if sliceMapping.ValueType != nil && *sliceMapping.ValueType != "list" { + return nil, fmt.Errorf("Cannot get slice for %v if resource '.type' is not 'list'", key) + } + + // get mapping of items of the list + mappingItems := sliceMapping.Item + if mappingItems == nil { + return nil, fmt.Errorf("Items is not a list for %v of resource type %v", key, context.ResourceAddress) + } + for _, itemMapping := range *mappingItems { + context := tfContext{ + Resource: context.Resource, + Mapping: &itemMapping, + ResourceAddress: context.ResourceAddress + "." + key, + ParentContext: context, + Provider: context.Provider, + } + itemResults, err := getSliceItems(context) + if err != nil { + return nil, err + } + results = append(results, itemResults...) + } + } + + return results, nil +} + +func getSliceItems(context tfContext) ([]interface{}, error) { + itemMapping := context.Mapping + results := []interface{}{} + paths, err := readPaths(itemMapping.Paths) + if err != nil { + return nil, fmt.Errorf("Cannot get paths for %v: %v", context.ResourceAddress, err) + } + + for _, pathRaw := range paths { + path := pathRaw + if strings.Contains(pathRaw, "${") { + path, err = resolvePlaceholders(path, context.ParentContext) + if err != nil { + return nil, err + } + } + jsonResults, err := utils.GetJSON(path, context.Resource) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get item: %v", path) + } + // if no result, try to get it from the whole plan + if len(jsonResults) == 0 && TfPlan != nil { + jsonResults, err = utils.GetJSON(path, *TfPlan) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get item: %v", path) + } + } + for _, jsonResultsI := range jsonResults { + switch jsonResults := jsonResultsI.(type) { + case map[string]interface{}: + result, err := getItem(context, itemMapping, jsonResults) + if err != nil { + return nil, err + } + results = append(results, result) + case []interface{}: + for _, jsonResultI := range jsonResults { + jsonResultI, ok := jsonResultI.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("Cannot convert jsonResultI to map[string]interface{}: %v", jsonResultI) + } + result, err := getItem(context, itemMapping, jsonResultI) + if err != nil { + return nil, err + } + results = append(results, result) + } + default: + return nil, errors.Errorf("Not an map or an array of maps: %T", jsonResultsI) + } + } + } + return results, nil +} + +func getItem(context tfContext, itemMappingProperties *ResourceMapping, jsonResultI map[string]interface{}) (interface{}, error) { + result := map[string]interface{}{} + for key := range *itemMappingProperties.Properties { + if key == "paths" { + continue + } + itemContext := tfContext{ + Resource: jsonResultI, + Mapping: itemMappingProperties, + ResourceAddress: context.ResourceAddress, + ParentContext: &context, + Provider: context.Provider, + } + property, err := getValue(key, &itemContext) + if err != nil { + return nil, err + } + result[key] = property + } + return result, nil +} + +type valueWithUnit struct { + Value interface{} + Unit *string +} + +func readPaths(pathsProperty interface{}, pathTemplateValuesParams ...*map[string]string) ([]string, error) { + paths := []string{} + if pathsProperty == nil { + return paths, nil + } + + switch pathTyped := pathsProperty.(type) { + case string: + paths = []string{pathTyped} + case []string: + paths = append(paths, pathTyped...) + case []interface{}: + for _, pathI := range pathTyped { + pathStr, ok := pathI.(string) + if !ok { + return nil, errors.Errorf("Cannot convert path to string: %T", pathI) + } + paths = append(paths, pathStr) + } + default: + return nil, errors.Errorf("Cannot convert paths to string or []string: %T", pathsProperty) + } + + for _, pathTemplateValues := range pathTemplateValuesParams { + for i, path := range paths { + pathStr := path + for key, value := range *pathTemplateValues { + pathStr = strings.ReplaceAll(pathStr, "${"+key+"}", value) + } + paths[i] = pathStr + } + } + return paths, nil +} + +func getValue(key string, context *tfContext) (*valueWithUnit, error) { + + var valueFound interface{} + propertiesMappings := (*context.Mapping.Properties)[key] + for _, propertyMapping := range propertiesMappings { + paths, err := readPaths(propertyMapping.Paths) + if err != nil { + return nil, err + } + unit := propertyMapping.Unit + + for _, pathRaw := range paths { + if valueFound != nil { + break + } + path := pathRaw + if strings.Contains(pathRaw, "${") { + path, err = resolvePlaceholders(path, context) + if err != nil { + return nil, err + } + } + valueFounds, err := utils.GetJSON(path, context.Resource) + if err != nil { + return nil, err + } + if len(valueFounds) == 0 && TfPlan != nil { + // Try to resolve it against the whole plan + valueFounds, err = utils.GetJSON(path, *TfPlan) + if err != nil { + return nil, err + } + } + if len(valueFounds) > 0 { + if len(valueFounds) > 1 { + return nil, fmt.Errorf("Found more than one value for property %v of resource type %v", key, context.ResourceAddress) + } + if valueFounds[0] == nil { + continue + } + valueFound = valueFounds[0] + } + } + + if valueFound != nil { + valueFoundStr, ok := valueFound.(string) + if ok { + valueFound, err = applyRegex(valueFoundStr, &propertyMapping, context) + if err != nil { + return nil, err + } + } + valueFoundStr, ok = valueFound.(string) + if ok { + valueFound, err = applyReference(valueFoundStr, &propertyMapping, context) + if err != nil { + return nil, err + } + } + } + + // if value is an expression (map[string]interface{}), resolve it + valueFoundMap, ok := valueFound.(map[string]interface{}) + if ok { + valueFound, err = getValueOfExpression(valueFoundMap, context) + if err != nil { + return nil, err + } + } + + if valueFound != nil { + return &valueWithUnit{ + Value: valueFound, + Unit: unit, + }, nil + } + } + + if valueFound == nil { + defaultValue, err := getDefaultValue(key, context) + if err != nil { + return nil, err + } + + if defaultValue != nil { + return defaultValue, nil + } + } + + return nil, nil +} + +func resolvePlaceholders(input string, context *tfContext) (string, error) { + placeholderPattern := `\${([^}]+)}` + + // Compile the regular expression + rx := regexp.MustCompile(placeholderPattern) + + // Find all matches in the input string + matches := rx.FindAllStringSubmatch(input, -1) + + // Create a map to store resolved expressions + resolvedExpressions := make(map[string]string) + + // Iterate through the matches and resolve expressions + for _, match := range matches { + placeholder := match[0] + expression := match[1] + resolved, err := resolvePlaceholder(expression, context) + if err != nil { + return input, err + } + resolvedExpressions[placeholder] = resolved + } + + // Replace placeholders in the input string with resolved expressions + replacerStrings := make([]string, 0, len(resolvedExpressions)*2) + for placeholder, resolved := range resolvedExpressions { + replacerStrings = append(replacerStrings, placeholder, resolved) + } + + replacer := strings.NewReplacer(replacerStrings...) + resolvedString := replacer.Replace(input) + return resolvedString, nil +} + +func resolvePlaceholder(expression string, context *tfContext) (string, error) { + result := "" + if strings.HasPrefix(expression, "this.") { + thisProperty := strings.TrimPrefix(expression, "this") + resource := context.Resource + value, err := utils.GetJSON(thisProperty, resource) + if err != nil { + return "", errors.Wrapf(err, "Cannot get value for variable %s", expression) + } + if value == nil { + return "", errors.Errorf("No value found for variable %s", expression) + } + return fmt.Sprintf("%v", value[0]), err + } else if strings.HasPrefix(expression, "config.") { + configProperty := strings.TrimPrefix(expression, "config.") + value := viper.GetFloat64(configProperty) + return fmt.Sprintf("%v", value), nil + } + variable, err := getVariable(expression, context) + if err != nil { + return "", err + } + if variable != nil { + result = fmt.Sprintf("%v", variable) + } + return result, nil +} + +func getDefaultValue(key string, context *tfContext) (*valueWithUnit, error) { + propertyMappings, ok := (*context.Mapping.Properties)[key] + if !ok { + log.Debugf("No property mapping found for key %v of resource type %v", key, context.ResourceAddress) + return nil, nil + } + + for _, propertyMapping := range propertyMappings { + if propertyMapping.Default != nil { + + valueFound := propertyMapping.Default + unit := propertyMapping.Unit + var err error + valueFoundStr, ok := valueFound.(string) + if ok { + valueFound, err = applyRegex(valueFoundStr, &propertyMapping, context) + if err != nil { + return nil, err + } + } + valueFoundStr, ok = valueFound.(string) + if ok { + valueFound, err = applyReference(valueFoundStr, &propertyMapping, context) + if err != nil { + return nil, err + } + } + + if valueFound != nil { + return &valueWithUnit{ + Value: valueFound, + Unit: unit, + }, nil + } + return nil, nil + } + } + return nil, nil + +} + +func getVariable(name string, context *tfContext) (interface{}, error) { + variablesMappings := context.Mapping.Variables + if variablesMappings == nil { + return nil, nil + } + variableContext := tfContext{ + Resource: context.Resource, + Mapping: variablesMappings, + ResourceAddress: context.ResourceAddress + ".variables", + ParentContext: context.ParentContext, + Provider: context.Provider, + } + value, err := getValue(name, &variableContext) + if err != nil { + return nil, err + } + if value == nil { + return nil, fmt.Errorf("Cannot get variable : %v", name) + } + return value.Value, nil + +} diff --git a/internal/plan/mapping.go b/internal/plan/mapping.go new file mode 100644 index 0000000..76b4cc0 --- /dev/null +++ b/internal/plan/mapping.go @@ -0,0 +1,98 @@ +package plan + +import ( + "os" + "path/filepath" + + "github.com/carboniferio/carbonifer/internal/providers" + "github.com/polkeli/yaml/v3" // TODO use go-yaml https://github.com/go-yaml/yaml/issues/100#issuecomment-1632853107 + "golang.org/x/exp/maps" +) + +// Mapping is the mapping of the terraform resources +var globalMappings *Mappings + +// GetMapping returns the mapping of the terraform resources +func getMapping() (*Mappings, error) { + if globalMappings != nil { + return globalMappings, nil + } + err := loadMappings() + if err != nil { + return nil, err + } + return globalMappings, nil +} + +func loadMappings() error { + globalMappings = &Mappings{ + General: &map[providers.Provider]GeneralConfig{}, + ComputeResource: &map[string]ResourceMapping{}, + } + mappingsPath := "internal/plan/mappings" + files, err := os.ReadDir(mappingsPath) + if err != nil { + return err + } + + // Iterate over each entry + for _, file := range files { + // Check if it's a directory + if file.IsDir() { + // Get the relative path + relativePath := filepath.Join(mappingsPath, file.Name()) + + // Process the subfolder + err := loadMapping(relativePath) + if err != nil { + return err + } + } + } + return nil +} + +func loadMapping(providerMappingFolder string) error { + files, err := os.ReadDir(providerMappingFolder) + if err != nil { + return err + } + + mergedMappings := &Mappings{ + General: &map[providers.Provider]GeneralConfig{}, + ComputeResource: &map[string]ResourceMapping{}, + } + + for _, file := range files { + if file.IsDir() { + continue + } + yamlFile, err := os.ReadFile(filepath.Join(providerMappingFolder, file.Name())) + if err != nil { + return err + } + var currentMapping Mappings + err = yaml.Unmarshal(yamlFile, ¤tMapping) + if err != nil { + return err + } + + if currentMapping.General != nil { + for k, v := range *currentMapping.General { + (*mergedMappings.General)[k] = v + } + } + + if currentMapping.ComputeResource != nil { + for k, v := range *currentMapping.ComputeResource { + (*mergedMappings.ComputeResource)[k] = v + } + } + + } + + maps.Copy(*globalMappings.General, *mergedMappings.General) + maps.Copy(*globalMappings.ComputeResource, *mergedMappings.ComputeResource) + + return nil +} diff --git a/internal/plan/mappingStructs.go b/internal/plan/mappingStructs.go new file mode 100644 index 0000000..bd43ff5 --- /dev/null +++ b/internal/plan/mappingStructs.go @@ -0,0 +1,50 @@ +package plan + +import "github.com/carboniferio/carbonifer/internal/providers" + +type Mappings struct { + General *map[providers.Provider]GeneralConfig `yaml:"general,omitempty"` + ComputeResource *map[string]ResourceMapping `yaml:"compute_resource,omitempty"` +} + +type GeneralConfig struct { + JSONData *map[string]interface{} `yaml:"json_data,omitempty"` + DiskTypes *DiskTypes `yaml:"disk_types,omitempty"` + IgnoredResources *[]string `yaml:"ignored_resources,omitempty"` +} + +type DiskTypes struct { + Default *DiskType `yaml:"default,omitempty"` + Types *map[string]*DiskType `yaml:"types,omitempty"` +} + +type ResourceMapping struct { + Paths []string `yaml:"paths"` + Type string `yaml:"type"` + Variables *ResourceMapping `yaml:"variables,omitempty"` + Properties *map[string][]PropertyDefinition `yaml:"properties"` +} + +type PropertyDefinition struct { + Paths []string `yaml:"paths"` + Unit *string `yaml:"unit,omitempty"` + Default interface{} `yaml:"default,omitempty"` + ValueType *string `yaml:"value_type,omitempty"` + Reference *Reference `yaml:"reference,omitempty"` + Regex *Regex `yaml:"regex,omitempty"` + Item *[]ResourceMapping `yaml:"item,omitempty"` +} + +type Reference struct { + General string `yaml:"general,omitempty"` + JSONFile string `yaml:"json_file,omitempty"` + Property string `yaml:"property,omitempty"` + Paths []string `yaml:"paths,omitempty"` + ReturnPath bool `yaml:"return_path,omitempty"` +} + +type Regex struct { + Pattern string `yaml:"pattern"` + Group int `yaml:"group"` + Type string `yaml:"type,omitempty"` +} diff --git a/internal/plan/mappings/aws/ec2_ebs.yaml b/internal/plan/mappings/aws/ec2_ebs.yaml new file mode 100644 index 0000000..ea85550 --- /dev/null +++ b/internal/plan/mappings/aws/ec2_ebs.yaml @@ -0,0 +1,42 @@ +compute_resource: + aws_ebs_volume: + paths: + - .planned_values.root_module.resources[] | select(.type == "aws_ebs_volume") + type: resource + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + zone: + - paths: ".values.availability_zone" + region: + - paths: ".values.availability_zone" + regex: + pattern: '^(.+-\d+)[a-z]+' + group: 1 + replication_factor: + - default: 1 + storage: + - type: list + item: + - paths: ".values" + properties: + size: + - paths: ".size" + unit: gb + - paths: ".snapshot_id" + reference: + paths: .prior_state.values.root_module.resources[] | select(.values.id == "${key}") | .values + property: "volume_size" + - default: 8 + unit: gb + type: + - paths: ".type" + reference: + general: disk_types + - default: standard + reference: + general: disk_types \ No newline at end of file diff --git a/internal/plan/mappings/aws/ec2_instance.yaml b/internal/plan/mappings/aws/ec2_instance.yaml new file mode 100644 index 0000000..d09d27f --- /dev/null +++ b/internal/plan/mappings/aws/ec2_instance.yaml @@ -0,0 +1,75 @@ +compute_resource: + aws_instance: + paths: + - .planned_values.root_module.resources[] | select(.type == "aws_instance") + type: resource + variables: + properties: + ami: + - paths: + - '.prior_state.values.root_module.resources[] | select(.values.image_id == "${this.values.ami}")' + reference: + return_path: true + provider_region: + - paths: + - '.configuration' + property: "region" + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + vCPUs: + - paths: ".values.instance_type" + reference: + json_file: aws_instances + property: "VCPU" + memory: + - paths: ".values.instance_type" + unit: mb + reference: + json_file: aws_instances + property: "MemoryMb" + zone: + zone: + - paths: ".values.availability_zone" + region: + - paths: ".values.availability_zone" + regex: + pattern: '^(.+-\d+)[a-z]+' + group: 1 + - paths: ".configuration.provider_config.aws.expressions.region" + replication_factor: + - default: 1 + storage: + - type: list + item: + - paths: '.prior_state.values.root_module.resources[] | select(.values.image_id == "${this.values.ami}") | .values.block_device_mappings[].ebs | select(length > 0)' + properties: + size: + - paths: ".volume_size" + default: 8 + unit: gb + type: + - paths: ".volume_type" + default: standard + reference: + general: disk_types + - paths: '.values.ebs_block_device[] | select(length > 0)' + properties: + size: + - paths: ".volume_size" + unit: gb + - paths: ".snapshot_id" + reference: + paths: .prior_state.values.root_module.resources[] | select(.values.id == "${key}") | .values + property: "volume_size" + - default: 8 + unit: gb + type: + - paths: ".volume_type" + default: standard + reference: + general: disk_types \ No newline at end of file diff --git a/internal/plan/mappings/aws/general.yaml b/internal/plan/mappings/aws/general.yaml new file mode 100644 index 0000000..0e83e6f --- /dev/null +++ b/internal/plan/mappings/aws/general.yaml @@ -0,0 +1,16 @@ +general: + aws: + disk_types: + default: ssd + types: + standard: hdd + gp2: ssd + gp3: ssd + io1: ssd + io2: ssd + st1: hdd + sc1: hdd + json_data: + aws_instances : "aws_instances.json" + ignored_resources: + - "aws_vpc" \ No newline at end of file diff --git a/internal/plan/mappings/gcp/compute.yaml b/internal/plan/mappings/gcp/compute.yaml new file mode 100644 index 0000000..751d4ad --- /dev/null +++ b/internal/plan/mappings/gcp/compute.yaml @@ -0,0 +1,173 @@ +compute_resource: + google_compute_instance: + paths: .planned_values.root_module.resources[] | select(.type == "google_compute_instance") + type: resource + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + vCPUs: + - paths: ".values.machine_type" + reference: + json_file: gcp_machines_types + property: "vcpus" + - paths: ".values.machine_type" + regex: + pattern: ".*custom-([0-9]+)-.*" + group: 1 + value_type: integer + memory: + - paths: ".values.machine_type" + unit: mb + reference: + json_file: gcp_machines_types + property: "memoryMb" + - paths: ".values.machine_type" + unit: mb + regex: + pattern: ".*custom-[0-9]+-([0-9]+).*" + group: 1 + value_type: integer + zone: + - paths: ".values.zone" + region: + - paths: ".values.zone" + regex: + pattern: "^(.*)-.*$" + group: 1 + replication_factor: + - default: 1 + cpu_platform: + - paths: ".values.cpu_platform" + guest_accelerator: + - type: list + item: + - paths: ".values.guest_accelerator" + properties: + count: + - paths: ".count" + value_type: integer + type: + - paths: ".type" + value_type: string + storage: + - type: list + item: + - paths: + - .values.disk[].initialize_params + - .values.boot_disk[].initialize_params + properties: + size: + - paths: + - ".size" + - ".disk_size_gb" + default: 10 + unit: gb + - paths: + - ".image" + - ".source_image" + default: 10 + unit: gb + reference: + paths: '.prior_state.values.root_module.resources[] | select(.values.self_link == "${key}") | .values' + property: "disk_size_gb" + type: + - paths: + - ".type" + - ".disk_type" + default: pd-standard + reference: + general: disk_types + - paths: .values.scratch_disk + properties: + size: + - paths: ".size" + unit: gb + default: 375 + type: + - default: ssd + google_compute_instance_from_template: + paths: + - .planned_values.root_module.resources[] | select(.type == "google_compute_instance_from_template") + type: resource + variables: + properties: + template_config: + - paths: + - '.configuration.root_module.resources[] | select(.address == "${this.address}") | .expressions.source_instance_template.references[] | select(endswith("id")) | gsub("\\.id$"; "")' + reference: + paths: + - .planned_values.root_module.resources[] | select(.address == "${key}") + - .planned_values.root_module.child_modules[] | select(.address == ("${key}" | split(".")[0:2] | join("."))) | .resources[] | select(.name == ("${key}" | split(".")[2])) + - .prior_state.values.root_module.resources[] | select(.address == "${key}") + return_path: true + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + vCPUs: + - paths: "${template_config}.values.machine_type" + reference: + json_file: gcp_machines_types + property: "vcpus" + memory: + - paths: "${template_config}.values.machine_type" + unit: mb + reference: + json_file: gcp_machines_types + property: "memoryMb" + zone: + - paths: ".values.zone" + region: + - paths: ".values.zone" + regex: + pattern: "^(.*)-.*$" + group: 1 + replication_factor: + - default: 1 + cpu_platform: + - paths: "${template_config}.values.min_cpu_platform" + guest_accelerator: + - type: list + item: + - paths: "${template_config}.values.guest_accelerator" + properties: + count: + - paths: ".count" + value_type: integer + type: + - paths: ".type" + value_type: string + storage: + - type: list + item: + - paths: ${template_config}.values.disk + properties: + size: + - paths: ".disk_size_gb" + unit: gb + - paths: ".image" + unit: gb + reference: + paths: .prior_state.values.root_module.resources[] | select(.values.self_link == "${key}") | .values + property: "disk_size_gb" + - default: 10 + type: + - paths: ".disk_type" + default: pd-standard + reference: + general: disk_types + - paths: .values.scratch_disk + properties: + size: + - paths: "size" + unit: gb + default: 375 + type: + - default: ssd diff --git a/internal/plan/mappings/gcp/compute_group.yaml b/internal/plan/mappings/gcp/compute_group.yaml new file mode 100644 index 0000000..189894e --- /dev/null +++ b/internal/plan/mappings/gcp/compute_group.yaml @@ -0,0 +1,100 @@ +compute_resource: + google_compute_instance_group_manager: + paths: + - .planned_values.root_module.resources[] | select(.type == "google_compute_instance_group_manager") + - .planned_values.root_module.resources[] | select(.type == "google_compute_region_instance_group_manager") + type: resource + variables: + properties: + template_config: + - paths: + - '.configuration.root_module.resources[] | select(.address == "${this.address}") | .expressions.version[0].instance_template.references[] | select(endswith("id")) | gsub("\\.id$"; "")' + reference: + paths: + - .planned_values.root_module.resources[] | select(.address == "${key}") + - .planned_values.root_module.child_modules[].resources | map(select(.address == "${key}")) + - .prior_state.values.root_module.resources[] | select(.address == "${key}") + return_path: true + autoscaler: + - paths: + - '(.configuration.root_module.resources[] | select(.expressions.target?.references[]? == "${this.address}") | .address)' + reference: + paths: + - .planned_values.root_module.resources[] | select(.address == "${key}") + - .planned_values.root_module.child_modules[].resources | map(select(.address == "${key}")) + - .prior_state.values.root_module.resources[] | select(.address == "${key}") + return_path: true + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + vCPUs: + - paths: "${template_config}.values.machine_type" + reference: + json_file: gcp_machines_types + property: "vcpus" + memory: + - paths: "${template_config}.values.machine_type" + unit: mb + reference: + json_file: gcp_machines_types + property: "memoryMb" + zone: + - paths: ".values.zone" + region: + - paths: ".values.zone" + regex: + pattern: "^(.*)-.*$" + group: 1 + - paths: ".values.distribution_policy_zones[0]" + regex: + pattern: "^(.*)-.*$" + group: 1 + replication_factor: + - default: 1 + count: + - paths: ".values.target_size" + - paths: '${autoscaler}.values.autoscaling_policy[0] | (.min_replicas + (${config.provider.gcp.avg_autoscaler_size_percent} * (.max_replicas - .min_replicas)))' + cpu_platform: + - paths: "${template_config}.values.min_cpu_platform" + guest_accelerator: + - type: list + item: + - paths: "${template_config}.values.guest_accelerator" + properties: + count: + - paths: ".count" + type: integer + type: + - paths: ".type" + type: string + storage: + - type: list + item: + - paths: ${template_config}.values.disk + properties: + size: + - paths: ".disk_size_gb" + unit: gb + - paths: ".image" + unit: gb + reference: + paths: .prior_state.values.root_module.resources[] | select(.values.self_link == "${key}") | .values + property: "disk_size_gb" + - default: 10 + type: + - paths: ".disk_type" + default: pd-standard + reference: + general: disk_types + - paths: .values.scratch_disk + properties: + size: + - paths: "size" + unit: gb + default: 375 + type: + - default : ssd \ No newline at end of file diff --git a/internal/plan/mappings/gcp/disk.yaml b/internal/plan/mappings/gcp/disk.yaml new file mode 100644 index 0000000..afc9502 --- /dev/null +++ b/internal/plan/mappings/gcp/disk.yaml @@ -0,0 +1,47 @@ +compute_resource: + google_compute_disk: + paths: + - .planned_values.root_module.resources[] | select(.type == "google_compute_disk") + - .planned_values.root_module.resources[] | select(.type == "google_compute_region_disk") + type: resource + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + zone: + - paths: ".values.zone" + - paths: ".values.replica_zones[0]" + region: + - paths: ".values.zone" + regex: + pattern: "^(.*)-.*$" + group: 1 + - paths: ".values.replica_zones[0]" + regex: + pattern: "^(.*)-.*$" + group: 1 + replication_factor: + - paths: '.values.replica_zones | length | if . == 0 then 1 else . end' + - default: 1 + storage: + - type: list + item: + - paths: ".values" + properties: + size: + - paths: ".size" + unit: gb + - paths: ".image" + unit: gb + reference: + paths: .prior_state.values.root_module.resources[] | select(.values.self_link == "${key}") | .values + property: "disk_size_gb" + - default: 10 + unit: gb + type: + - paths: ".type" + reference: + general: disk_types diff --git a/internal/plan/mappings/gcp/general.yaml b/internal/plan/mappings/gcp/general.yaml new file mode 100644 index 0000000..b6dd2a0 --- /dev/null +++ b/internal/plan/mappings/gcp/general.yaml @@ -0,0 +1,12 @@ +general: + gcp: + disk_types: + default: ssd + types: + pd-standard: hdd + json_data: + gcp_machines_types: "gcp_instances.json" + gcp_sql_tiers: "gcp_sql_tiers.json" + ignored_resources: + - ".*_template" + - "google_compute_autoscaler" \ No newline at end of file diff --git a/internal/plan/mappings/gcp/sql_database.yaml b/internal/plan/mappings/gcp/sql_database.yaml new file mode 100644 index 0000000..52bba5b --- /dev/null +++ b/internal/plan/mappings/gcp/sql_database.yaml @@ -0,0 +1,45 @@ +compute_resource: + google_sql_database_instance: + paths: .planned_values.root_module.resources[] | select(.type == "google_sql_database_instance") + type: resource + properties: + name: + - paths: ".name" + address: + - paths: ".address" + type: + - paths: ".type" + vCPUs: + - paths: ".values.settings[0].tier" + reference: + json_file: gcp_sql_tiers + property: "vcpus" + memory: + - paths: ".values.settings[0].tier" + unit: mb + reference: + json_file: gcp_sql_tiers + property: "memoryMb" + zone: + - paths: ".values.zone" + region: + - paths: ".values.zone" + regex: + pattern: "^(.*)-.*$" + group: 1 + - paths: ".values.region" + replication_factor: + - paths: '.values.settings[0] | if .availability_type == "REGIONAL" then 2 else 1 end' + - default: 1 + storage: + - type: list + item: + - paths: .values.settings[0] + properties: + size: + - paths: ".disk_size" + default: 10 + unit: gb + type: + - paths: 'if .disk_type | test("(?i)ssd$") then "ssd" elif .disk_type | test("(?i)hdd$") then "hdd" else null end' + default: ssd diff --git a/internal/plan/resolver.go b/internal/plan/resolver.go new file mode 100644 index 0000000..46b29b2 --- /dev/null +++ b/internal/plan/resolver.go @@ -0,0 +1,124 @@ +package plan + +import ( + "encoding/json" + "fmt" + "regexp" + + "github.com/carboniferio/carbonifer/internal/data" + "github.com/carboniferio/carbonifer/internal/utils" + "github.com/pkg/errors" + "github.com/shopspring/decimal" + log "github.com/sirupsen/logrus" +) + +type storage struct { + SizeGb decimal.Decimal + IsSSD bool +} + +func applyReference(valueFound string, propertyMapping *PropertyDefinition, context *tfContext) (interface{}, error) { + if propertyMapping == nil || propertyMapping.Reference == nil { + return valueFound, nil + } + reference := propertyMapping.Reference + valueTransformed, err := resolveReference(valueFound, reference, context) + return valueTransformed, err +} + +func resolveReference(key string, reference *Reference, context *tfContext) (interface{}, error) { + generalMappings := (*globalMappings.General)[context.Provider] + if reference.JSONFile != "" { + filename, ok := (*generalMappings.JSONData)[reference.JSONFile] + if !ok { + log.Fatalf("Cannot find file %v in general.json_data", reference.JSONFile) + } + byteValue := data.ReadDataFile(filename.(string)) + var fileMap map[string]interface{} + err := json.Unmarshal([]byte(byteValue), &fileMap) + if err != nil { + log.Fatal(err) + } + item, ok := fileMap[key] + if !ok { + // Not an error, for example gcp compute type can be a regex + log.Debugf("Cannot find key %v in file %v", key, reference.JSONFile) + return nil, nil + } + var value interface{} + property := reference.Property + if property != "" { + value, ok = item.(map[string]interface{})[reference.Property] + if !ok { + log.Fatalf("Cannot find property %v in file %v", reference.Property, reference.JSONFile) + } + } + return value, nil + } + if reference.General != "" { + for providerDiskType, diskType := range *generalMappings.DiskTypes.Types { + if providerDiskType == key { + return diskType, nil + } + } + defaultDiskType := generalMappings.DiskTypes.Default + if defaultDiskType != nil { + return defaultDiskType, nil + } + return SSD, nil + } + if reference.Paths != nil { + templatePlaceholders := map[string]string{ + "key": key, + } + paths, err := readPaths(reference.Paths, &templatePlaceholders) + if err != nil { + return nil, err + } + for _, path := range paths { + referencedItems, err := utils.GetJSON(path, *TfPlan) + if err != nil { + errW := errors.Wrapf(err, "Cannot find referenced path in terraform plan: '%v'", path) + return nil, errW + } + for _, referencedItem := range referencedItems { + if reference.Property != "" { + value := referencedItem.(map[string]interface{})[reference.Property] + if value != nil { + return value, nil + } + } else if reference.ReturnPath { + return path, nil + } else { + return referencedItems, nil + } + } + } + return nil, nil + } + if reference.ReturnPath { + return key, nil + } + return key, nil +} + +func applyRegex(valueFound string, propertyMapping *PropertyDefinition, context *tfContext) (interface{}, error) { + if propertyMapping == nil || propertyMapping.Regex == nil { + return valueFound, nil + } + regex := *propertyMapping.Regex + valueTransformed, err := resolveRegex(valueFound, regex) + return valueTransformed, err +} + +func resolveRegex(value string, regex Regex) (string, error) { + r, _ := regexp.Compile(regex.Pattern) + + matches := r.FindStringSubmatch(value) + + if len(matches) > 1 { + return matches[regex.Group], nil + } + return "", fmt.Errorf("No match found for regex %v in value %v", regex.Pattern, value) + +} diff --git a/internal/plan/resources.go b/internal/plan/resources.go index 2e09e45..264fcb2 100644 --- a/internal/plan/resources.go +++ b/internal/plan/resources.go @@ -1,111 +1,390 @@ package plan import ( - "encoding/json" + "fmt" + "regexp" "strings" - "github.com/carboniferio/carbonifer/internal/plan/aws" - "github.com/carboniferio/carbonifer/internal/plan/gcp" + "github.com/carboniferio/carbonifer/internal/providers" "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform/tfrefs" - tfjson "github.com/hashicorp/terraform-json" + "github.com/carboniferio/carbonifer/internal/utils" + "github.com/pkg/errors" + "github.com/shopspring/decimal" log "github.com/sirupsen/logrus" ) -func GetResources(tfPlan *tfjson.Plan) (map[string]resources.Resource, error) { - nbPlannedValues := 0 - if tfPlan != nil && - tfPlan.PlannedValues != nil && - tfPlan.PlannedValues.RootModule != nil && - tfPlan.PlannedValues.RootModule.Resources != nil { - nbPlannedValues = len(tfPlan.PlannedValues.RootModule.Resources) - } - log.Debugf("Reading resources from Terraform plan: %d resources", nbPlannedValues) - resourcesMap := make(map[string]resources.Resource) - terraformRefs := tfrefs.References{ - ResourceConfigs: map[string]*tfjson.ConfigResource{}, - ResourceReferences: map[string]*tfjson.StateResource{}, - DataResources: map[string]resources.DataResource{}, - ProviderConfigs: map[string]string{}, - } - var planDataRes = tfPlan.PlannedValues.RootModule.Resources - if tfPlan.PriorState != nil { - planDataRes = tfPlan.PriorState.Values.RootModule.Resources - } - for _, priorRes := range planDataRes { - log.Debugf("Reading prior state resources %v", priorRes.Address) - if priorRes.Mode == "data" { - if strings.HasPrefix(priorRes.Type, "google") { - dataResource := gcp.GetDataResource(*priorRes) - if dataResource != nil { - terraformRefs.DataResources[dataResource.GetKey()] = dataResource - } +// TfPlan is the Terraform plan +var TfPlan *map[string]interface{} + +// GetResources returns the resources of the Terraform plan +func GetResources(tfplan *map[string]interface{}) (map[string]resources.Resource, error) { + TfPlan = tfplan + + // Get resources from Terraform plan + plannedResourcesResult, err := utils.GetJSON(".planned_values.root_module.resources", *TfPlan) + if err != nil { + return nil, err + } + if len(plannedResourcesResult) == 0 { + return nil, errors.New("No resources found in Terraform plan") + } + plannedResources := plannedResourcesResult[0].([]interface{}) + log.Debugf("Reading resources from Terraform plan: %d resources", len(plannedResources)) + resourcesMap := map[string]resources.Resource{} + + // Get compute resources + mapping, err := getMapping() + if err != nil { + errW := errors.Wrap(err, "Cannot get mapping") + return nil, errW + } + for resourceType, mapping := range *mapping.ComputeResource { + resources, err := getResourcesOfType(resourceType, &mapping) + if err != nil { + errW := errors.Wrapf(err, "Cannot get resources of type %v", resourceType) + return nil, errW + } + + for _, resource := range resources { + resourcesMap[resource.GetAddress()] = resource + } + } + + // Get resource not in mapping + for _, resourceI := range plannedResources { + resource := resourceI.(map[string]interface{}) + resourceAddress := resource["address"].(string) + resourceMap := resourcesMap[resourceAddress] + providerName := resource["provider_name"].(string) + provider, err := parseProvider(providerName) + if err != nil { + continue + } + if resourceMap == nil { + // That is an unsupported resource + resourceType := resource["type"].(string) + if checkIgnoredResource(resourceType, provider) { + continue } - if strings.HasPrefix(priorRes.Type, "aws") { - dataResource := aws.GetDataResource(*priorRes) - if dataResource != nil { - terraformRefs.DataResources[dataResource.GetKey()] = dataResource - } + unsupportedResource := resources.UnsupportedResource{ + Identification: &resources.ResourceIdentification{ + Name: resource["name"].(string), + ResourceType: resourceType, + Provider: provider, + Count: 1, + }, } + resourcesMap[resourceAddress] = unsupportedResource } } - // Find template first - for _, res := range tfPlan.PlannedValues.RootModule.Resources { - log.Debugf("Reading resource %v", res.Address) - if strings.HasPrefix(res.Type, "google") && (strings.HasSuffix(res.Type, "_template") || - strings.HasSuffix(res.Type, "_autoscaler")) { - if res.Mode == "managed" { - terraformRefs.ResourceReferences[res.Address] = res + return resourcesMap, nil +} + +func checkIgnoredResource(resourceType string, provider providers.Provider) bool { + ignoredResourceNames := (*globalMappings.General)[provider].IgnoredResources + if ignoredResourceNames != nil { + for _, ignoredResource := range *ignoredResourceNames { + if ignoredResource == resourceType { + return true + } + // Case of regex + regex := regexp.MustCompile(ignoredResource) + if regex.MatchString(resourceType) { + return true } + } + return false + } + return false +} +func getResourcesOfType(resourceType string, mapping *ResourceMapping) ([]resources.Resource, error) { + pathsProperty := mapping.Paths + paths, err := readPaths(pathsProperty) + if err != nil { + errW := errors.Wrapf(err, "Cannot read paths of resource type %v", resourceType) + return nil, errW } - // Index configurations in order to find relationships - for _, resConfig := range tfPlan.Config.RootModule.Resources { - log.Debugf("Reading resource config %v", resConfig.Address) - if strings.HasPrefix(resConfig.Type, "google") { - if resConfig.Mode == "managed" { - terraformRefs.ResourceConfigs[resConfig.Address] = resConfig + resourcesResult := []resources.Resource{} + for _, path := range paths { + log.Debugf(" Reading resources of type '%s' from path '%s'", resourceType, path) + resourcesFound, err := utils.GetJSON(path, *TfPlan) + if err != nil { + errW := errors.Wrapf(err, "Cannot find resource for path %v", path) + return nil, errW + } + log.Debugf(" Found %d resources of type '%s'", len(resourcesFound), resourceType) + for _, resourceI := range resourcesFound { + resourcesResultGot, err := getComputeResource(resourceI, mapping, resourcesResult) + if err != nil { + errW := errors.Wrapf(err, "Cannot get compute resource for path %v", path) + return nil, errW } + if resourcesResultGot != nil { + resourcesResult = resourcesResultGot + } + } } + return resourcesResult, nil - // Get default values - for provider, resConfig := range tfPlan.Config.ProviderConfigs { - if provider == "aws" { - aws.GetDefaults(resConfig, tfPlan, &terraformRefs) +} + +func getComputeResource(resourceI interface{}, resourceMapping *ResourceMapping, resourcesResult []resources.Resource) ([]resources.Resource, error) { + resource := resourceI.(map[string]interface{}) + resourceAddress := resource["address"].(string) + providerName, ok := resource["provider_name"].(string) + if !ok { + return nil, errors.Errorf("Cannot find provider name for resource %v", resourceAddress) + } + provider, err := parseProvider(providerName) + if err != nil { + return nil, nil + } + context := &tfContext{ + ResourceAddress: resourceAddress, + Mapping: resourceMapping, + Resource: resource, + Provider: provider, + } + name, err := getString("name", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get name for resource %v", resourceAddress) + } + region, err := getString("region", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get region for resource %v", resourceAddress) + } + if region == nil { + region = getDefaultRegion() + if region == nil { + return nil, errors.Errorf("Cannot find default region for resource %v", resourceAddress) } } - // Get All resources - for _, res := range tfPlan.PlannedValues.RootModule.Resources { - log.Debugf("Reading resource %v", res.Address) + resourceType, err := getString("type", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get type for resource %v", resourceAddress) + } + + index := resource["index"] + if index != nil { + nameStr := fmt.Sprintf("%s[%d]", *name, int(index.(float64))) + name = &nameStr + } + + computeResource := resources.ComputeResource{ + Identification: &resources.ResourceIdentification{ + Name: *name, + ResourceType: *resourceType, + Provider: provider, + Region: *region, + }, + Specs: &resources.ComputeResourceSpecs{ + HddStorage: decimal.Zero, + SsdStorage: decimal.Zero, + ReplicationFactor: 1, + }, + } + + // Add vCPUs + vcpus, err := getValue("vCPUs", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get vCPUs for %v", resourceAddress) + } + if vcpus != nil && vcpus.Value != nil { - if res.Mode == "managed" { - var resource resources.Resource - prefix := strings.Split(res.Type, "_")[0] - if prefix == "google" { - resource = gcp.GetResource(*res, &terraformRefs) - } else if prefix == "aws" { - resource = aws.GetResource(*res, &terraformRefs) - } else { - log.Warnf("Skipping resource %s. Provider not supported : %s", res.Type, prefix) + intValue, err := utils.ParseToInt(vcpus.Value) + if err != nil { + return nil, errors.Wrapf(err, "Cannot parse vCPUs for %v", resourceAddress) + } + computeResource.Specs.VCPUs = int32(intValue) + + } + + // Add memory + memory, err := getValue("memory", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get memory for %v", resourceAddress) + } + if memory != nil && memory.Value != nil { + intValue, err := utils.ParseToInt(memory.Value) + if err != nil { + return nil, errors.Wrapf(err, "Cannot parse memory for %v", resourceAddress) + } + computeResource.Specs.MemoryMb = int32(intValue) + unit := strings.ToLower(*memory.Unit) + switch unit { + case "gb": + computeResource.Specs.MemoryMb *= 1024 + case "tb": + computeResource.Specs.MemoryMb *= 1024 * 1024 + case "pb": + computeResource.Specs.MemoryMb *= 1024 * 1024 * 1024 + case "mb": + // nothing to do + case "kb": + computeResource.Specs.MemoryMb /= 1024 + case "b": + computeResource.Specs.MemoryMb /= 1024 * 1024 + default: + log.Fatalf("Unknown unit for memory: %v", unit) + } + } + + // Add GPUs + gpus, err := getSlice("guest_accelerator", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get GPUs for %v", resourceAddress) + } + for _, gpuI := range gpus { + gpu := gpuI.(map[string]interface{}) + gpuTypes, err := getGPU(gpu) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get GPU types for %v", resourceAddress) + } + computeResource.Specs.GpuTypes = append(computeResource.Specs.GpuTypes, gpuTypes...) + } + + // Add CPU type + cpuType, err := getString("cpu_platform", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get CPU platform for %v", resourceAddress) + } + if cpuType != nil { + computeResource.Specs.CPUType = *cpuType + } + + // Add replication factor + replicationFactor, err := getValue("replication_factor", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get replication factor for %v", resourceAddress) + } + if replicationFactor != nil && replicationFactor.Value != nil { + intValue, err := utils.ParseToInt(replicationFactor.Value) + if err != nil { + return nil, errors.Wrapf(err, "Cannot parse replication factor for %v", resourceAddress) + } + computeResource.Specs.ReplicationFactor = int32(intValue) + } + + // Add count (case of autoscaling group) + count, err := getValue("count", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get count for %v", resourceAddress) + } + if count != nil && count.Value != nil { + intValue, err := utils.ParseToInt(count.Value) + if err != nil { + return nil, errors.Wrapf(err, "Cannot parse count for %v", resourceAddress) + } + computeResource.Identification.Count = int64(intValue) + } else { + computeResource.Identification.Count = 1 + } + + // Add storage + storages, err := getSlice("storage", context) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get storages for %v", resourceAddress) + } + + for i, storageI := range storages { + storage, err := getStorage(storageI.(map[string]interface{})) + if err != nil { + return nil, errors.Wrapf(err, "Cannot get storage[%v] for %v", i, resourceAddress) + } + size := storage.SizeGb + if storage.IsSSD { + computeResource.Specs.SsdStorage = computeResource.Specs.SsdStorage.Add(size) + } else { + computeResource.Specs.HddStorage = computeResource.Specs.HddStorage.Add(size) + } + } + + resourcesResult = append(resourcesResult, computeResource) + log.Debugf(" Reading resource '%s'", computeResource.GetAddress()) + return resourcesResult, nil +} + +func getGPU(gpu map[string]interface{}) ([]string, error) { + gpuTypes := []string{} + gpuType := gpu["type"].(*valueWithUnit) + if gpuType == nil { + return nil, errors.Errorf("Cannot find GPU type") + } + count := gpu["count"].(*valueWithUnit) + if count != nil && count.Value != nil { + intValue, err := utils.ParseToInt(count.Value) + if err != nil { + return nil, err + } + for i := 0; i < intValue; i++ { + gpuTypeValue := gpuType.Value.(string) + gpuTypes = append(gpuTypes, gpuTypeValue) + } + } + return gpuTypes, nil +} + +func getStorage(storageMap map[string]interface{}) (*storage, error) { + storageSize := storageMap["size"].(*valueWithUnit) + storageSizeGb, err := decimal.NewFromString(fmt.Sprintf("%v", storageSize.Value)) + if err != nil { + log.Fatal(err) + } + storageType := storageMap["type"].(*valueWithUnit) + // TODO get storage size unit correctly + unit := storageSize.Unit + if unit != nil { + if strings.ToLower(*unit) == "mb" { + storageSizeGb = storageSizeGb.Div(decimal.NewFromInt32(1024)) + } + if strings.ToLower(*unit) == "tb" { + storageSizeGb = storageSizeGb.Mul(decimal.NewFromInt32(1024)) + } + if strings.ToLower(*unit) == "kb" { + storageSizeGb = storageSizeGb.Div(decimal.NewFromInt32(1024)).Div(decimal.NewFromInt32(1024)) + } + if strings.ToLower(*unit) == "b" { + storageSizeGb = storageSizeGb.Div(decimal.NewFromInt32(1024)).Div(decimal.NewFromInt32(1024)).Div(decimal.NewFromInt32(1024)) + } + } + + isSSD := false + if storageType != nil { + diskType, ok := storageType.Value.(*DiskType) + if !ok { + diskTypeStr, ok := storageType.Value.(string) + if !ok { + return nil, errors.Errorf("Cannot find storage type '%v': %T", storageType.Value, storageType.Value) } - if resource != nil { - resourcesMap[resource.GetAddress()] = resource - if log.IsLevelEnabled(log.DebugLevel) { - computeJsonStr := "" - if resource.IsSupported() { - computeJson, _ := json.Marshal(resource) - computeJsonStr = string(computeJson) - } - log.Debugf(" Compute resource : %v", string(computeJsonStr)) - } + diskTypeParsed, err := ParseDiskType(diskTypeStr) + if err != nil { + return nil, errors.Wrapf(err, "Cannot parse disk type '%v'", diskTypeStr) } + diskType = &diskTypeParsed + } + if *diskType == SSD { + isSSD = true } + } + storage := storage{ + SizeGb: storageSizeGb, + IsSSD: isSSD, + } + return &storage, nil +} +func parseProvider(tfProviderName string) (providers.Provider, error) { + if strings.HasSuffix(tfProviderName, "google") { + return providers.ParseProvider("gcp") } - return resourcesMap, nil + if strings.HasSuffix(tfProviderName, "aws") { + return providers.ParseProvider("aws") + } + return providers.ParseProvider(tfProviderName) } diff --git a/internal/plan/resources_aws_test.go b/internal/plan/resources_aws_test.go index eba4e70..cb799b6 100644 --- a/internal/plan/resources_aws_test.go +++ b/internal/plan/resources_aws_test.go @@ -1,11 +1,9 @@ -package plan_test +package plan import ( - "log" "path" "testing" - "github.com/carboniferio/carbonifer/internal/plan" "github.com/carboniferio/carbonifer/internal/providers" "github.com/carboniferio/carbonifer/internal/resources" "github.com/carboniferio/carbonifer/internal/terraform" @@ -38,8 +36,8 @@ func TestGetResource_DiskFromAMI(t *testing.T) { VCPUs: int32(2), MemoryMb: int32(8192), ReplicationFactor: 1, - HddStorage: decimal.NewFromInt(20), - SsdStorage: decimal.NewFromInt(90), + HddStorage: decimal.NewFromInt(80), + SsdStorage: decimal.NewFromInt(30), }, }, "aws_ebs_volume.ebs_volume": resources.ComputeResource{ @@ -51,16 +49,15 @@ func TestGetResource_DiskFromAMI(t *testing.T) { Count: 1, }, Specs: &resources.ComputeResourceSpecs{ - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromInt(100), + ReplicationFactor: 1, + HddStorage: decimal.Zero, + SsdStorage: decimal.NewFromInt(100), }, }, } - log.Default().Println(wantResources) - tfPlan, err := terraform.TerraformPlan() assert.NoError(t, err) - gotResources, err := plan.GetResources(tfPlan) + gotResources, err := GetResources(tfPlan) assert.NoError(t, err) for _, res := range gotResources { if res.GetIdentification().ResourceType == "aws_instance" { diff --git a/internal/plan/resources_gcp_test.go b/internal/plan/resources_gcp_test.go index d4f5e9a..af92fbe 100644 --- a/internal/plan/resources_gcp_test.go +++ b/internal/plan/resources_gcp_test.go @@ -1,304 +1,225 @@ -package plan_test +package plan import ( - "log" - "path" "testing" - "github.com/carboniferio/carbonifer/internal/plan" "github.com/carboniferio/carbonifer/internal/providers" "github.com/carboniferio/carbonifer/internal/resources" - "github.com/carboniferio/carbonifer/internal/terraform" "github.com/carboniferio/carbonifer/internal/testutils" + _ "github.com/carboniferio/carbonifer/internal/testutils" + tfjson "github.com/hashicorp/terraform-json" "github.com/shopspring/decimal" - "github.com/spf13/viper" "github.com/stretchr/testify/assert" ) -func TestGetResources(t *testing.T) { - - // reset - terraform.ResetTerraformExec() - - wd := path.Join(testutils.RootDir, "test/terraform/gcp_1") - viper.Set("workdir", wd) - - wantResources := map[string]resources.Resource{ - "google_compute_disk.first": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "first", - ResourceType: "google_compute_disk", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.NewFromInt(1024), - SsdStorage: decimal.Zero, - MemoryMb: 0, - VCPUs: 0, - CPUType: "", - ReplicationFactor: 1, - }, - }, - "google_compute_instance.first": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "first", - ResourceType: "google_compute_instance", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromFloat(567).Add(decimal.NewFromFloat(375).Add(decimal.NewFromFloat(375))), - MemoryMb: 87040, - VCPUs: 12, - GpuTypes: []string{ - "testing-custom-data-file", // Default of a2-highgpu-1g" - "nvidia-tesla-k80", // Added by user in main.tf - "nvidia-tesla-k80", // Added by user in main.tf - }, - ReplicationFactor: 1, - }, - }, - "google_compute_instance.second": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "second", - ResourceType: "google_compute_instance", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.NewFromFloat(10), - SsdStorage: decimal.Zero, - MemoryMb: 4098, - VCPUs: 2, - CPUType: "", - ReplicationFactor: 1, - }, - }, - "google_compute_network.vpc_network": resources.UnsupportedResource{ - Identification: &resources.ResourceIdentification{ - Name: "vpc_network", - ResourceType: "google_compute_network", - Provider: providers.GCP, - Region: "", - Count: 1, - }, - }, - "google_compute_region_disk.regional-first": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "regional-first", - ResourceType: "google_compute_region_disk", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.NewFromInt(1024), - SsdStorage: decimal.Zero, - MemoryMb: 0, - VCPUs: 0, - CPUType: "", - ReplicationFactor: 2, - }, - }, - "google_compute_subnetwork.first": resources.UnsupportedResource{ - Identification: &resources.ResourceIdentification{ - Name: "first", - ResourceType: "google_compute_subnetwork", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - }, - "google_sql_database_instance.instance": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "instance", - ResourceType: "google_sql_database_instance", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.Zero, - SsdStorage: decimal.NewFromFloat(10), - MemoryMb: 15360, - VCPUs: 4, - CPUType: "", - ReplicationFactor: 2, - }, - }, - } - tfPlan, _ := terraform.TerraformPlan() - resources, _ := plan.GetResources(tfPlan) - assert.Equal(t, len(resources), len(wantResources)) - for i, resource := range resources { - wantResource := wantResources[i] - assert.Equal(t, wantResource, resource) - } +var persistentDisk tfjson.StateResource = tfjson.StateResource{ + Address: "google_compute_disk.disk1", + Type: "google_compute_disk", + Name: "disk1", + ProviderName: "google", + AttributeValues: map[string]interface{}{ + "name": "disk1", + "type": "pd-standard", + "size": float64(1024), + "zone": "europe-west9-a", + }, } -func TestGetResources_DiskImage(t *testing.T) { - testutils.SkipWithCreds(t) - // reset - terraform.ResetTerraformExec() - - t.Setenv("GOOGLE_OAUTH_ACCESS_TOKEN", "") +var persistentDiskNoSize tfjson.StateResource = tfjson.StateResource{ + Address: "google_compute_disk.disk2", + Type: "google_compute_disk", + Name: "disk2", + ProviderName: "google", + AttributeValues: map[string]interface{}{ + "name": "disk2", + "type": "pd-standard", + "zone": "europe-west9-a", + }, +} - wd := path.Join(testutils.RootDir, "test/terraform/gcp_images") - viper.Set("workdir", wd) +var regionDisk tfjson.StateResource = tfjson.StateResource{ + Address: "google_compute_region_disk.diskr", + Type: "google_compute_region_disk", + Name: "diskr", + ProviderName: "google", + AttributeValues: map[string]interface{}{ + "name": "diskr", + "type": "pd-ssd", + "size": float64(1024), + "replica_zones": []interface{}{"europe-west9-a", "europe-west9-b"}, + }, +} - wantResources := map[string]resources.Resource{ - "google_compute_disk.diskImage": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "diskImage", - ResourceType: "google_compute_disk", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.New(int64(50), 1), - SsdStorage: decimal.Zero, - MemoryMb: 0, - VCPUs: 0, - CPUType: "", - ReplicationFactor: 1, +var gpuAttachedMachine tfjson.StateResource = tfjson.StateResource{ + Address: "google_compute_instance.attachedgpu", + Type: "google_compute_instance", + Name: "attachedgpu", + ProviderName: "google", + AttributeValues: map[string]interface{}{ + "name": "attachedgpu", + "machine_type": "n1-standard-2", + "zone": "europe-west9-a", + "boot_disk": []interface{}{}, + "guest_accelerator": []interface{}{ + map[string]interface{}{ + "type": "nvidia-tesla-k80", + "count": float64(2), }, }, - } - - tfPlan, _ := terraform.TerraformPlan() - resourceList, err := plan.GetResources(tfPlan) - if assert.NoError(t, err) { - assert.Equal(t, len(wantResources), len(resourceList)) - for i, resource := range resourceList { - wantResource := wantResources[i] - log.Println(resource.(resources.ComputeResource).Specs.HddStorage) - assert.EqualValues(t, wantResource, resource) - } - } - + }, } -func TestGetResources_GroupInstance(t *testing.T) { - // reset - terraform.ResetTerraformExec() - - wd := path.Join(testutils.RootDir, "test/terraform/gcp_group") - viper.Set("workdir", wd) +var gpuDefaultMachine tfjson.StateResource = tfjson.StateResource{ + Address: "google_compute_instance.defaultgpu", + Type: "google_compute_instance", + Name: "defaultgpu", + ProviderName: "google", + AttributeValues: map[string]interface{}{ + "name": "defaultgpu", + "machine_type": "a2-highgpu-1g", + "zone": "europe-west9-a", + "boot_disk": []interface{}{}, + }, +} - wantResources := map[string]resources.Resource{ - "google_compute_network.vpc_network": resources.UnsupportedResource{ - Identification: &resources.ResourceIdentification{ - Name: "vpc_network", - ResourceType: "google_compute_network", - Provider: providers.GCP, - Region: "", - Count: 1, - }, - }, - "google_compute_subnetwork.first": resources.UnsupportedResource{ - Identification: &resources.ResourceIdentification{ - Name: "first", - ResourceType: "google_compute_subnetwork", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, +func TestGetResource(t *testing.T) { + mapping, err := getMapping() + assert.NoError(t, err) + computeResourceMapping := *mapping.ComputeResource + type args struct { + tfResource tfjson.StateResource + mapping ResourceMapping + } + tests := []struct { + name string + args args + want resources.Resource + }{ + { + name: "diskWithSize", + args: args{ + tfResource: persistentDisk, + mapping: computeResourceMapping["google_compute_disk"], + }, + want: resources.ComputeResource{ + Identification: &resources.ResourceIdentification{ + Name: "disk1", + ResourceType: "google_compute_disk", + Provider: providers.GCP, + Region: "europe-west9", + Count: 1, + }, + Specs: &resources.ComputeResourceSpecs{ + HddStorage: decimal.New(1024, 0), + SsdStorage: decimal.Zero, + ReplicationFactor: 1, + }, }, }, - "google_compute_instance_group_manager.my-group-manager": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "my-group-manager", - ResourceType: "google_compute_instance_group_manager", - Provider: providers.GCP, - Region: "europe-west9", - Count: 5, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.NewFromFloat(20), - SsdStorage: decimal.Zero, - MemoryMb: 8192, - VCPUs: 2, - CPUType: "", - ReplicationFactor: 1, + { + name: "diskWithNoSize", + args: args{ + tfResource: persistentDiskNoSize, + mapping: computeResourceMapping["google_compute_disk"], + }, + want: resources.ComputeResource{ + Identification: &resources.ResourceIdentification{ + Name: "disk2", + ResourceType: "google_compute_disk", + Provider: providers.GCP, + Region: "europe-west9", + Count: 1, + }, + Specs: &resources.ComputeResourceSpecs{ + HddStorage: decimal.New(10, 0), + SsdStorage: decimal.Zero, + ReplicationFactor: 1, + }, }, }, - } - - tfPlan, _ := terraform.TerraformPlan() - resources, err := plan.GetResources(tfPlan) - if assert.NoError(t, err) { - for i, resource := range resources { - wantResource := wantResources[i] - assert.EqualValues(t, wantResource, resource) - } - } - -} - -func TestGetResources_InstanceFromTemplate(t *testing.T) { - // reset - terraform.ResetTerraformExec() - - wd := path.Join(testutils.RootDir, "test/terraform/gcp_cit") - viper.Set("workdir", wd) - - wantResources := map[string]resources.Resource{ - "google_compute_network.vpc_network": resources.UnsupportedResource{ - Identification: &resources.ResourceIdentification{ - Name: "vpc_network", - ResourceType: "google_compute_network", - Provider: providers.GCP, - Region: "", - Count: 1, + { + name: "regionDisk", + args: args{ + tfResource: regionDisk, + mapping: computeResourceMapping["google_compute_disk"], + }, + want: resources.ComputeResource{ + Identification: &resources.ResourceIdentification{ + Name: "diskr", + ResourceType: "google_compute_region_disk", + Provider: providers.GCP, + Region: "europe-west9", + Count: 1, + }, + Specs: &resources.ComputeResourceSpecs{ + HddStorage: decimal.Zero, + SsdStorage: decimal.NewFromInt(1024), + ReplicationFactor: 2, + }, }, }, - "google_compute_subnetwork.first": resources.UnsupportedResource{ - Identification: &resources.ResourceIdentification{ - Name: "first", - ResourceType: "google_compute_subnetwork", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, + { + name: "gpu attached", + args: args{ + tfResource: gpuAttachedMachine, + mapping: computeResourceMapping["google_compute_instance"], + }, + want: resources.ComputeResource{ + Identification: &resources.ResourceIdentification{ + Name: "attachedgpu", + ResourceType: "google_compute_instance", + Provider: providers.GCP, + Region: "europe-west9", + Count: 1, + }, + Specs: &resources.ComputeResourceSpecs{ + VCPUs: int32(2), + MemoryMb: int32(7680), + GpuTypes: []string{ + "nvidia-tesla-k80", + "nvidia-tesla-k80", + }, + HddStorage: decimal.Zero, + SsdStorage: decimal.Zero, + ReplicationFactor: 1, + }, }, }, - "google_compute_instance_from_template.ifromtpl": resources.ComputeResource{ - Identification: &resources.ResourceIdentification{ - Name: "ifromtpl", - ResourceType: "google_compute_instance_from_template", - Provider: providers.GCP, - Region: "europe-west9", - Count: 1, - }, - Specs: &resources.ComputeResourceSpecs{ - GpuTypes: nil, - HddStorage: decimal.NewFromFloat(20), - SsdStorage: decimal.Zero, - MemoryMb: 8192, - VCPUs: 2, - CPUType: "", - ReplicationFactor: 1, + { + name: "gpu default", + args: args{ + tfResource: gpuDefaultMachine, + mapping: computeResourceMapping["google_compute_instance"], + }, + want: resources.ComputeResource{ + Identification: &resources.ResourceIdentification{ + Name: "defaultgpu", + ResourceType: "google_compute_instance", + Provider: providers.GCP, + Region: "europe-west9", + Count: 1, + }, + Specs: &resources.ComputeResourceSpecs{ + GpuTypes: nil, + VCPUs: int32(12), + MemoryMb: int32(87040), + HddStorage: decimal.Zero, + SsdStorage: decimal.Zero, + ReplicationFactor: 1, + }, }, }, } - - tfPlan, _ := terraform.TerraformPlan() - resources, err := plan.GetResources(tfPlan) - if assert.NoError(t, err) { - for i, resource := range resources { - wantResource := wantResources[i] - assert.EqualValues(t, wantResource, resource) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resource, _ := testutils.TfResourceToJSON(&tt.args.tfResource) + got, err := getComputeResource(*resource, &tt.args.mapping, nil) + assert.NoError(t, err) + assert.Len(t, got, 1) + assert.IsType(t, resources.ComputeResource{}, got[0]) + gotResource := got[0].(resources.ComputeResource) + assert.Equal(t, tt.want, gotResource) + assert.NoError(t, err) + }) } - } diff --git a/internal/providers/GPUWatt.go b/internal/providers/GPUWatt.go index dd9d0b7..3c09568 100644 --- a/internal/providers/GPUWatt.go +++ b/internal/providers/GPUWatt.go @@ -11,6 +11,7 @@ import ( var wattPerGPU map[string]GPUWatt +// GPUWatt is the struct that contains the min and max watts of a GPU type GPUWatt struct { Name string MinWatts decimal.Decimal @@ -23,8 +24,9 @@ type gpuWattCSV struct { MaxWatts float64 `name:"max watts"` } -// Source: https://www.cloudcarbonfootprint.org/docs/methodology#appendix-iii-gpus-and-minmax-watts +// GetGPUWatt returns the min and max watts of a GPU func GetGPUWatt(gpuName string) GPUWatt { + // Source: https://www.cloudcarbonfootprint.org/docs/methodology#appendix-iii-gpus-and-minmax-watts log.Debugf(" Getting info for GPU type: %v", gpuName) if wattPerGPU == nil { // Read the CSV records diff --git a/internal/providers/aws/AWS.go b/internal/providers/aws/AWS.go index 9fdf625..254ee57 100644 --- a/internal/providers/aws/AWS.go +++ b/internal/providers/aws/AWS.go @@ -7,6 +7,7 @@ import ( log "github.com/sirupsen/logrus" ) +// InstanceType is a struct that contains the information of an AWS instance type type InstanceType struct { InstanceType string `json:"InstanceType"` VCPU int32 `json:"VCPU"` @@ -14,6 +15,7 @@ type InstanceType struct { InstanceStorage InstanceStorage `json:"InstanceStorage"` } +// InstanceStorage is a struct that contains the information of the storage of an AWS instance type type InstanceStorage struct { SizePerDiskGB int64 `json:"SizePerDiskGB"` Count int32 `json:"Count"` @@ -22,6 +24,7 @@ type InstanceStorage struct { var awsInstanceTypes map[string]InstanceType +// GetAWSInstanceType returns the information of an AWS instance type func GetAWSInstanceType(instanceTypeStr string) InstanceType { log.Debugf(" Getting info for AWS machine type: %v", instanceTypeStr) if awsInstanceTypes == nil { diff --git a/internal/providers/gcp/GCP.go b/internal/providers/gcp/GCP.go index 27f35f1..2ae9190 100644 --- a/internal/providers/gcp/GCP.go +++ b/internal/providers/gcp/GCP.go @@ -13,21 +13,24 @@ import ( "github.com/yunabe/easycsv" ) +// MachineType is a struct that contains the information of a GCP machine type type MachineType struct { Name string `json:"name"` Vcpus int32 `json:"vcpus"` GPUTypes []string `json:"gpus"` MemoryMb int32 `json:"memoryMb"` - CpuTypes []string `json:"cpuTypes"` + CPUTypes []string `json:"cpuTypes"` } -type SqlTier struct { +// SQLTier is a struct that contains the information of a GCP SQL tier +type SQLTier struct { Name string `json:"name"` Vcpus int64 `json:"vcpus"` MemoryMb int64 `json:"memoryMb"` DiskQuotaGB int64 `json:"DiskQuotaGB"` } +// CPUWatt is a struct that contains the information of a GCP CPU type type CPUWatt struct { Architecture string MinWatts decimal.Decimal @@ -37,8 +40,9 @@ type CPUWatt struct { var gcpInstanceTypes map[string]MachineType var gcpWattPerCPU map[string]CPUWatt -var gcpSQLTiers map[string]SqlTier +var gcpSQLTiers map[string]SQLTier +// GetGCPMachineType returns the information of a GCP instance type func GetGCPMachineType(machineTypeStr string, zone string) MachineType { log.Debugf(" Getting info for GCP machine type: %v", machineTypeStr) // Custom format is custom-- @@ -83,6 +87,7 @@ type cpuWattCSV struct { } // Source: https://github.com/cloud-carbon-footprint/cloud-carbon-coefficients/blob/5fcb96101c6f28dac5060f8794bca5d4da6c72d8/output/coefficients-gcp-use.csv +// GetCPUWatt returns the min and max watts of a CPU func GetCPUWatt(cpu string) CPUWatt { log.Debugf(" Getting info for GCP CPU type: %v", cpu) if gcpWattPerCPU == nil { @@ -109,7 +114,8 @@ func GetCPUWatt(cpu string) CPUWatt { return gcpWattPerCPU[strings.ToLower(cpu)] } -func GetGCPSQLTier(tierName string) SqlTier { +// GetGCPSQLTier returns the information of a GCP SQL tier +func GetGCPSQLTier(tierName string) SQLTier { log.Debugf(" Getting info for GCP SQL tier: %v", tierName) // Custom format db-custom-- customTierRegex := regexp.MustCompile(`db-custom-(?P\d+)-(?P\d+)`) @@ -127,7 +133,7 @@ func GetGCPSQLTier(tierName string) SqlTier { if err != nil { log.Fatalf(err.Error()) } - return SqlTier{ + return SQLTier{ Name: tierName, Vcpus: int64(vCPUs), MemoryMb: int64(ram), diff --git a/internal/providers/gcp/GCP_test.go b/internal/providers/gcp/GCP_test.go index 928eafd..d794182 100644 --- a/internal/providers/gcp/GCP_test.go +++ b/internal/providers/gcp/GCP_test.go @@ -1,7 +1,6 @@ package gcp import ( - "reflect" "testing" _ "github.com/carboniferio/carbonifer/internal/testutils" @@ -27,27 +26,26 @@ func TestGetGCPMachineType(t *testing.T) { Vcpus: 2, GPUTypes: nil, MemoryMb: 8192, - CpuTypes: []string{ + CPUTypes: []string{ "Skylake", "Broadwell", "Haswell", "AMD EPYC Rome", "AMD EPYC Milan", }, }, }, - { - name: "custom", - args: args{"custom-2-2048", "europe-west9-a"}, - want: MachineType{ - Name: "custom-2-2048", - Vcpus: 2, - GPUTypes: nil, - MemoryMb: 2048, - }, - }, + // { + // name: "custom", + // args: args{"custom-2-2048", "europe-west9-a"}, + // want: MachineType{ + // Name: "custom-2-2048", + // Vcpus: 2, + // GPUTypes: nil, + // MemoryMb: 2048, + // }, + // }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := GetGCPMachineType(tt.args.machineTypeStr, tt.args.zone); !reflect.DeepEqual(got, tt.want) { - t.Errorf("GetGCPMachineType() = %v, want %v", got, tt.want) - } + got := GetGCPMachineType(tt.args.machineTypeStr, tt.args.zone) + assert.Equal(t, got, tt.want) }) } } diff --git a/internal/providers/provider.go b/internal/providers/provider.go index d131646..e8db6df 100644 --- a/internal/providers/provider.go +++ b/internal/providers/provider.go @@ -7,6 +7,7 @@ import "fmt" //go:generate go-enum --nocase --noprefix --marshal type Provider int +// UnsupportedProviderError is an error that occurs when a provider is not supported type UnsupportedProviderError struct { Provider string } diff --git a/internal/resources/compute.go b/internal/resources/compute.go index 8e6246a..8ff02c6 100644 --- a/internal/resources/compute.go +++ b/internal/resources/compute.go @@ -7,6 +7,7 @@ import ( "github.com/shopspring/decimal" ) +// ComputeResourceSpecs is the struct that contains the specs of a compute resource type ComputeResourceSpecs struct { GpuTypes []string HddStorage decimal.Decimal @@ -17,6 +18,7 @@ type ComputeResourceSpecs struct { ReplicationFactor int32 } +// ResourceIdentification is the struct that contains the identification of a resource type ResourceIdentification struct { // Indentification Name string @@ -26,39 +28,48 @@ type ResourceIdentification struct { Count int64 } +// ComputeResource is the struct that contains the info of a compute resource type ComputeResource struct { Identification *ResourceIdentification Specs *ComputeResourceSpecs } +// IsSupported returns true if the resource is supported, false otherwise func (r ComputeResource) IsSupported() bool { return true } +// GetIdentification returns the identification of the resource func (r ComputeResource) GetIdentification() *ResourceIdentification { return r.Identification } +// GetAddress returns the address of the resource func (r ComputeResource) GetAddress() string { return fmt.Sprintf("%v.%v", r.GetIdentification().ResourceType, r.GetIdentification().Name) } +// UnsupportedResource is the struct that contains the info of an unsupported resource type UnsupportedResource struct { Identification *ResourceIdentification } +// IsSupported returns true if the resource is supported, false otherwise func (r UnsupportedResource) IsSupported() bool { return false } +// GetIdentification returns the identification of the resource func (r UnsupportedResource) GetIdentification() *ResourceIdentification { return r.Identification } +// GetAddress returns the address of the resource func (r UnsupportedResource) GetAddress() string { return fmt.Sprintf("%v.%v", r.GetIdentification().ResourceType, r.GetIdentification().Name) } +// Resource is the interface that contains the info of a resource type Resource interface { IsSupported() bool GetIdentification() *ResourceIdentification diff --git a/internal/resources/data.go b/internal/resources/data.go index b0f0304..097d935 100644 --- a/internal/resources/data.go +++ b/internal/resources/data.go @@ -2,29 +2,35 @@ package resources import "fmt" +// DataImageSpecs is the struct that contains the specs of a data image type DataImageSpecs struct { DiskSizeGb float64 DeviceName string VolumeType string } +// DataImageResource is the struct that contains the info of a data image resource type DataImageResource struct { Identification *ResourceIdentification DataImageSpecs []*DataImageSpecs } +// GetIdentification returns the identification of the resource func (r DataImageResource) GetIdentification() *ResourceIdentification { return r.Identification } +// GetAddress returns the address of the resource func (r DataImageResource) GetAddress() string { return fmt.Sprintf("data.%v.%v", r.GetIdentification().ResourceType, r.GetIdentification().Name) } +// GetKey returns the key of the resource func (r DataImageResource) GetKey() string { return r.GetAddress() } +// DataResource is the interface that contains the info of a data resource type DataResource interface { GetIdentification() *ResourceIdentification GetAddress() string diff --git a/internal/terraform/errors.go b/internal/terraform/errors.go index 3e437aa..5c49cbd 100644 --- a/internal/terraform/errors.go +++ b/internal/terraform/errors.go @@ -2,10 +2,12 @@ package terraform import "fmt" +// ProviderAuthError is the struct that contains the error of a provider auth error type ProviderAuthError struct { ParentError error } +// Error returns the error of a provider auth error func (e *ProviderAuthError) Error() string { return fmt.Sprintf("Missing/Invalid provider credentials, please check or set your credentials : %v", e.ParentError) } diff --git a/internal/terraform/terraform.go b/internal/terraform/terraform.go index 7e9043d..86ea16f 100644 --- a/internal/terraform/terraform.go +++ b/internal/terraform/terraform.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/terraform-exec/tfexec" - tfjson "github.com/hashicorp/terraform-json" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) @@ -92,7 +91,7 @@ func installTerraform() string { return execPath } -func TerraformInit() (*tfexec.Terraform, *context.Context, error) { +func terraformInit() (*tfexec.Terraform, *context.Context, error) { tf, err := GetTerraformExec() if err != nil { return nil, nil, err @@ -111,7 +110,8 @@ func TerraformInit() (*tfexec.Terraform, *context.Context, error) { return tf, &ctx, err } -func CarboniferPlan(input string) (*tfjson.Plan, error) { +// CarboniferPlan generates a Terraform plan from a tfplan file or a Terraform directory +func CarboniferPlan(input string) (*map[string]interface{}, error) { fileInfo, err := os.Stat(input) if err != nil { return nil, err @@ -122,64 +122,25 @@ func CarboniferPlan(input string) (*tfjson.Plan, error) { parentDir := filepath.Dir(input) fileName := filepath.Base(input) viper.Set("workdir", parentDir) - tfPlan, err := TerraformShow(fileName) - return tfPlan, err - } else { - // If the path points to a directory, run plan - viper.Set("workdir", input) - tfPlan, err := TerraformPlan() - if err != nil { - if e, ok := err.(*ProviderAuthError); ok { - log.Warnf("Skipping Authentication error: %v", e) - } else { - return nil, err - } - } + tfPlan, err := terraformShow(fileName) return tfPlan, err } -} - -func RunTerraformConsole(command string) (*string, error) { - tfExec, err := GetTerraformExec() + // If the path points to a directory, run plan + viper.Set("workdir", input) + tfPlan, err := TerraformPlan() if err != nil { - return nil, err - } - cmd := exec.Command(tfExec.ExecPath(), "console") - - cmd.Dir = terraformExec.WorkingDir() // set the working directory - - var stdin bytes.Buffer - stdin.Write([]byte(command + "\n")) - cmd.Stdin = &stdin - - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err = cmd.Run() - if err != nil { - return nil, fmt.Errorf("error running terraform console: %w\nstderr: %s", err, stderr.String()) - } - - output := strings.TrimSpace(stdout.String()) - - // Parse the output as JSON - var result map[string]string - if err := json.Unmarshal([]byte(output), &result); err == nil { - // If the output is valid JSON, extract the value of the command key - var ok bool - output, ok = result[command] - if !ok { - return nil, fmt.Errorf("output does not contain key %q", command) + if e, ok := err.(*ProviderAuthError); ok { + log.Warnf("Skipping Authentication error: %v", e) + } else { + return nil, err } } - // remove quotes surrounding the value - output = strings.Trim(output, "\"") - return &output, nil + return tfPlan, err + } -func TerraformPlan() (*tfjson.Plan, error) { - tf, ctx, err := TerraformInit() +func TerraformPlan() (*map[string]interface{}, error) { + tf, ctx, err := terraformInit() if err != nil { return nil, err } @@ -214,7 +175,7 @@ func TerraformPlan() (*tfjson.Plan, error) { log.Debugf("Running terraform exec %v", tf.ExecPath()) // Run Terraform Plan with an output file - err = terraformPlanExec(tf, *ctx, tfPlanFile) + err = terraformPlanExec(*ctx, tf, tfPlanFile) if err != nil { return nil, err } @@ -225,10 +186,21 @@ func TerraformPlan() (*tfjson.Plan, error) { log.Infof("error running Terraform Show: %s", err) return nil, err } - return tfplan, nil + var bytes []byte + bytes, err = json.MarshalIndent(tfplan, "", " ") + if err != nil { + return nil, err + } + + var tfplanJSON map[string]interface{} + err = json.Unmarshal(bytes, &tfplanJSON) + if err != nil { + return nil, err + } + return &tfplanJSON, nil } -func terraformPlanExec(tf *tfexec.Terraform, ctx context.Context, tfPlanFile *os.File) error { +func terraformPlanExec(ctx context.Context, tf *tfexec.Terraform, tfPlanFile *os.File) error { out := tfexec.Out(tfPlanFile.Name()) _, err := tf.Plan(ctx, out) var authError ProviderAuthError @@ -239,15 +211,15 @@ func terraformPlanExec(tf *tfexec.Terraform, ctx context.Context, tfPlanFile *os strings.Contains(uwErr, "no valid credential") { authError = ProviderAuthError{ParentError: err} return &authError - } else { - log.Errorf("error running Terraform Plan: %s", err) - return err } + log.Errorf("error running Terraform Plan: %s", err) + return err + } return nil } -func TerraformShow(fileName string) (*tfjson.Plan, error) { +func terraformShow(fileName string) (*map[string]interface{}, error) { if strings.HasSuffix(fileName, ".json") { planFilePath := filepath.Join(viper.GetString("workdir"), fileName) log.Debugf("Reading Terraform plan from %v", planFilePath) @@ -257,7 +229,8 @@ func TerraformShow(fileName string) (*tfjson.Plan, error) { } defer jsonFile.Close() byteValue, _ := os.ReadFile(planFilePath) - var tfplan tfjson.Plan + + var tfplan map[string]interface{} err = json.Unmarshal(byteValue, &tfplan) if err != nil { return nil, err @@ -265,15 +238,65 @@ func TerraformShow(fileName string) (*tfjson.Plan, error) { return &tfplan, nil } - tf, ctx, err := TerraformInit() + tf, ctx, err := terraformInit() if err != nil { return nil, err } // Run Terraform Show - tfstate, err := tf.ShowPlanFile(*ctx, fileName) + tfPlan, err := tf.ShowPlanFile(*ctx, fileName) if err != nil { return nil, err } - return tfstate, nil + tfPlanJSONBytes, err := json.MarshalIndent(tfPlan, "", " ") + if err != nil { + log.Fatalf("Failed to marshal plan: %v", err) + } + + var tfPlanJSON map[string]interface{} + err = json.Unmarshal(tfPlanJSONBytes, &tfPlanJSON) + if err != nil { + return nil, err + } + + return &tfPlanJSON, nil +} + +func RunTerraformConsole(command string) (*string, error) { + tfExec, err := GetTerraformExec() + if err != nil { + return nil, err + } + cmd := exec.Command(tfExec.ExecPath(), "console") + + cmd.Dir = terraformExec.WorkingDir() // set the working directory + + var stdin bytes.Buffer + stdin.Write([]byte(command + "\n")) + cmd.Stdin = &stdin + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err = cmd.Run() + if err != nil { + return nil, fmt.Errorf("error running terraform console: %w\nstderr: %s", err, stderr.String()) + } + + output := strings.TrimSpace(stdout.String()) + + // Parse the output as JSON + var result map[string]string + if err := json.Unmarshal([]byte(output), &result); err == nil { + // If the output is valid JSON, extract the value of the command key + var ok bool + output, ok = result[command] + if !ok { + return nil, fmt.Errorf("output does not contain key %q", command) + } + } + // remove quotes surrounding the value + output = strings.Trim(output, "\"") + return &output, nil } diff --git a/internal/terraform/terraform_test.go b/internal/terraform/terraform_test.go index 947c404..63f01b9 100644 --- a/internal/terraform/terraform_test.go +++ b/internal/terraform/terraform_test.go @@ -8,6 +8,7 @@ import ( "github.com/carboniferio/carbonifer/internal/terraform" "github.com/carboniferio/carbonifer/internal/testutils" _ "github.com/carboniferio/carbonifer/internal/testutils" + "github.com/carboniferio/carbonifer/internal/utils" "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" "github.com/spf13/viper" @@ -114,7 +115,8 @@ func TestTerraformShow_JSON(t *testing.T) { tfPlan, err := terraform.CarboniferPlan("test/terraform/planJson/plan.json") assert.NoError(t, err) - assert.Equal(t, tfPlan.TerraformVersion, "1.3.7") + tfVersion, _ := utils.GetJSON(".terraform_version", *tfPlan) + assert.Equal(t, "1.3.7", tfVersion[0]) } @@ -133,7 +135,8 @@ func TestTerraformShow_RawPlan(t *testing.T) { tfPlan, err := terraform.CarboniferPlan("test/terraform/planRaw/plan.tfplan") assert.NoError(t, err) - assert.Equal(t, tfPlan.TerraformVersion, "1.4.6") + tfVersion, _ := utils.GetJSON(".terraform_version", *tfPlan) + assert.Equal(t, tfVersion[0], "1.4.6") } @@ -156,10 +159,13 @@ func TestTerraformShow_SetVarDifferentFromPlanFile(t *testing.T) { wd := path.Join(testutils.RootDir, "test/terraform/planRaw") plan, err := terraform.CarboniferPlan(wd) assert.NoError(t, err) - assert.Equal(t, plan.Variables["machine_type"].Value, "f1-medium") + log.Info(plan) + machineTypeVar, _ := utils.GetJSON(".variables.machine_type.value", *plan) + assert.Equal(t, "f1-medium", machineTypeVar[0]) wd2 := path.Join(testutils.RootDir, "test/terraform/planRaw/plan.tfplan") plan2, err2 := terraform.CarboniferPlan(wd2) assert.NoError(t, err2) - assert.Equal(t, plan2.Variables["machine_type"].Value, "f1-micro") + machineTypeVar2, _ := utils.GetJSON(".variables.machine_type.value", *plan2) + assert.Equal(t, "f1-micro", machineTypeVar2[0]) } diff --git a/internal/terraform/tfrefs/terraformRefs.go b/internal/terraform/tfrefs/terraformRefs.go deleted file mode 100644 index d50e3d6..0000000 --- a/internal/terraform/tfrefs/terraformRefs.go +++ /dev/null @@ -1,13 +0,0 @@ -package tfrefs - -import ( - "github.com/carboniferio/carbonifer/internal/resources" - tfjson "github.com/hashicorp/terraform-json" -) - -type References struct { - ResourceConfigs map[string]*tfjson.ConfigResource - ResourceReferences map[string]*tfjson.StateResource - DataResources map[string]resources.DataResource - ProviderConfigs map[string]string -} diff --git a/internal/testutils/json.go b/internal/testutils/json.go new file mode 100644 index 0000000..404d8d0 --- /dev/null +++ b/internal/testutils/json.go @@ -0,0 +1,21 @@ +package testutils + +import ( + "encoding/json" + + tfjson "github.com/hashicorp/terraform-json" +) + +// TfResourceToJSON converts a tfjson.StateResource to a map[string]interface{} +func TfResourceToJSON(resource *tfjson.StateResource) (*map[string]interface{}, error) { + var result map[string]interface{} + bytes, err := json.Marshal(resource) + if err != nil { + return nil, err + } + err = json.Unmarshal(bytes, &result) + if err != nil { + return nil, err + } + return &result, nil +} diff --git a/internal/testutils/testutils.go b/internal/testutils/testutils.go index b735350..0fb918c 100644 --- a/internal/testutils/testutils.go +++ b/internal/testutils/testutils.go @@ -9,6 +9,7 @@ import ( "github.com/carboniferio/carbonifer/internal/utils" ) +// RootDir is the root directory of the project var RootDir string func init() { @@ -27,6 +28,7 @@ func init() { } +// SkipWithCreds skips the test if the environment variable SKIP_WITH_CREDENTIALS is set func SkipWithCreds(t *testing.T) { if os.Getenv("SKIP_WITH_CREDENTIALS") != "" { t.Skip("Skipping testing requiring providers credentials") diff --git a/internal/tools/aws/instances/generate.go b/internal/tools/aws/instances/generate.go index 95dc25c..90143e2 100644 --- a/internal/tools/aws/instances/generate.go +++ b/internal/tools/aws/instances/generate.go @@ -9,19 +9,22 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" + "github.com/pkg/errors" + + log "github.com/sirupsen/logrus" ) // InstanceType is the struct that will be exported in the json -type InstanceType struct { +type instanceType struct { InstanceType string VCPU int64 MemoryMb int64 GPUs []string GPUMemoryMb int64 - InstanceStorage *InstanceStorage + InstanceStorage *instanceStorage } -type InstanceStorage struct { +type instanceStorage struct { SizePerDiskGB int64 Count int64 Type string @@ -32,13 +35,14 @@ func main() { // Create a EC2 service client. session, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) if err != nil { - panic(err) + errW := errors.Wrap(err, "cannot create aws session") + log.Panic(errW) } svc := ec2.New(session) // Get the list of instance types // Convert the list of instance types to the InstanceType struct - instances := map[string]InstanceType{} + instances := map[string]instanceType{} token := describeInstanceTypesPaginated(svc, &instances, nil) for token != nil { token = describeInstanceTypesPaginated(svc, &instances, token) @@ -47,21 +51,23 @@ func main() { // Write the list of instances to stdout json, err := json.MarshalIndent(instances, "", " ") if err != nil { - panic(err) + errW := errors.Wrap(err, "cannot marshal instances to json") + log.Panic(errW) } fmt.Println(string(json)) } -func describeInstanceTypesPaginated(svc *ec2.EC2, instances *map[string]InstanceType, token *string) *string { +func describeInstanceTypesPaginated(svc *ec2.EC2, instances *map[string]instanceType, token *string) *string { instanceTypesOutput, err := svc.DescribeInstanceTypes(&ec2.DescribeInstanceTypesInput{ NextToken: token, }) if err != nil { - panic(err) + errW := errors.Wrap(err, "cannot describe instance types") + log.Panic(errW) } - for _, instanceType := range instanceTypesOutput.InstanceTypes { - gpuInfos := instanceType.GpuInfo + for _, instanceTypeInfo := range instanceTypesOutput.InstanceTypes { + gpuInfos := instanceTypeInfo.GpuInfo totalGPUMemoryMb := int64(0) gpus := []string{} if gpuInfos != nil { @@ -70,22 +76,22 @@ func describeInstanceTypesPaginated(svc *ec2.EC2, instances *map[string]Instance } totalGPUMemoryMb = *gpuInfos.TotalGpuMemoryInMiB } - var instanceStorage InstanceStorage - if instanceType.InstanceStorageSupported != nil && *instanceType.InstanceStorageSupported { - instanceStorage = InstanceStorage{ - SizePerDiskGB: *instanceType.InstanceStorageInfo.Disks[0].SizeInGB, - Count: *instanceType.InstanceStorageInfo.Disks[0].Count, - Type: *instanceType.InstanceStorageInfo.Disks[0].Type, + var instanceStorageInfo instanceStorage + if instanceTypeInfo.InstanceStorageSupported != nil && *instanceTypeInfo.InstanceStorageSupported { + instanceStorageInfo = instanceStorage{ + SizePerDiskGB: *instanceTypeInfo.InstanceStorageInfo.Disks[0].SizeInGB, + Count: *instanceTypeInfo.InstanceStorageInfo.Disks[0].Count, + Type: *instanceTypeInfo.InstanceStorageInfo.Disks[0].Type, } } - name := *instanceType.InstanceType - instance := InstanceType{ + name := *instanceTypeInfo.InstanceType + instance := instanceType{ InstanceType: name, - VCPU: *instanceType.VCpuInfo.DefaultVCpus, - MemoryMb: *instanceType.MemoryInfo.SizeInMiB, + VCPU: *instanceTypeInfo.VCpuInfo.DefaultVCpus, + MemoryMb: *instanceTypeInfo.MemoryInfo.SizeInMiB, GPUs: gpus, GPUMemoryMb: int64(totalGPUMemoryMb), - InstanceStorage: &instanceStorage, + InstanceStorage: &instanceStorageInfo, } instanceMap := *instances instanceMap[name] = instance diff --git a/internal/tools/gcp/gcp.go b/internal/tools/gcp/gcp.go index 2cf7343..f99c9b1 100644 --- a/internal/tools/gcp/gcp.go +++ b/internal/tools/gcp/gcp.go @@ -1,4 +1,4 @@ -package tools_gcp +package toolsgcp import ( "context" @@ -9,7 +9,8 @@ import ( "google.golang.org/api/option" ) -func GetProjectId() string { +// GetProjectID returns the project ID of the current GCP project +func GetProjectID() string { ctx := context.Background() // Get the default client using the default credentials diff --git a/internal/tools/gcp/instances/generate.go b/internal/tools/gcp/instances/generate.go index e147d89..08b74ce 100644 --- a/internal/tools/gcp/instances/generate.go +++ b/internal/tools/gcp/instances/generate.go @@ -13,14 +13,12 @@ import ( "google.golang.org/api/compute/v1" "github.com/carboniferio/carbonifer/internal/providers/gcp" - tools_gcp "github.com/carboniferio/carbonifer/internal/tools/gcp" + toolsgcp "github.com/carboniferio/carbonifer/internal/tools/gcp" ) -const DEFAULT_ZONE = "us-central1-a" +var cpuTypes map[string]machineFamily -var cpuTypes map[string]MachineFamily - -type MachineFamily struct { +type machineFamily struct { Name string `json:"Name"` CPUTypes []string `json:"CPU types"` Architecture string `json:"Architecture"` @@ -34,7 +32,7 @@ func getCPUTypes(machineType string) []string { log.Fatal(err) } - cpuTypes = make(map[string]MachineFamily) + cpuTypes = make(map[string]machineFamily) byteValue, _ := io.ReadAll(jsonFile) err = json.Unmarshal(byteValue, &cpuTypes) if err != nil { @@ -71,7 +69,7 @@ func getMachineTypesForZone(client *compute.Service, project string, zone string Name: machineType.Name, Vcpus: int32(machineType.GuestCpus), MemoryMb: int32(machineType.MemoryMb), - CpuTypes: getCPUTypes(machineType.Name), + CPUTypes: getCPUTypes(machineType.Name), GPUTypes: getGPUs(machineType), } } @@ -142,7 +140,7 @@ func retrieveData() *map[string]map[string]gcp.MachineType { log.Fatalf("Error creating Compute Engine client: %v", err) } - project := tools_gcp.GetProjectId() + project := toolsgcp.GetProjectID() machineTypesByZone := make(map[string]map[string]gcp.MachineType) zones, err := client.Zones.List(project).Do() diff --git a/internal/tools/gcp/tiers/generate.go b/internal/tools/gcp/tiers/generate.go index 6a7da6e..744281f 100644 --- a/internal/tools/gcp/tiers/generate.go +++ b/internal/tools/gcp/tiers/generate.go @@ -13,7 +13,7 @@ import ( "google.golang.org/api/sqladmin/v1" "github.com/carboniferio/carbonifer/internal/providers/gcp" - tools_gcp "github.com/carboniferio/carbonifer/internal/tools/gcp" + toolsgcp "github.com/carboniferio/carbonifer/internal/tools/gcp" ) func getVCPUs(tierName string) (int64, error) { @@ -22,17 +22,17 @@ func getVCPUs(tierName string) (int64, error) { values := tierRegex.FindAllStringSubmatch(tierName, -1)[0] if values[4] == "" { return 1, nil - } else { - vCPUs, err := strconv.Atoi(values[4]) - if err != nil { - log.Fatalf(err.Error()) - } - return int64(vCPUs), nil } - } else { - m := fmt.Sprintf("Cannot find number of vCPUs from tier name: %s", tierName) - return 0, errors.New(m) + vCPUs, err := strconv.Atoi(values[4]) + if err != nil { + log.Fatalf(err.Error()) + } + return int64(vCPUs), nil + } + m := fmt.Sprintf("Cannot find number of vCPUs from tier name: %s", tierName) + return 0, errors.New(m) + } func main() { @@ -45,9 +45,9 @@ func main() { log.Fatalf("Error creating Cloud SQL client: %v", err) } - project := tools_gcp.GetProjectId() + project := toolsgcp.GetProjectID() - tiersList := make(map[string]gcp.SqlTier) + tiersList := make(map[string]gcp.SQLTier) tiers, err := client.Tiers.List(project).Do() if err != nil { log.Fatal(err) @@ -57,7 +57,7 @@ func main() { if err != nil { log.Fatal(err) } - tiersList[tier.Tier] = gcp.SqlTier{ + tiersList[tier.Tier] = gcp.SQLTier{ Name: tier.Tier, Vcpus: vCpus, MemoryMb: tier.RAM / 1024 / 1024, diff --git a/internal/utils/config.go b/internal/utils/config.go index 544fcfa..e2446f3 100644 --- a/internal/utils/config.go +++ b/internal/utils/config.go @@ -1,15 +1,14 @@ package utils import ( + // embed in order to use go:embed command _ "embed" "io" "os" "path" "path/filepath" "runtime" - "sort" - "github.com/carboniferio/carbonifer/internal/estimate/estimation" "github.com/heirko/go-contrib/logrusHelper" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -18,12 +17,14 @@ import ( "gopkg.in/yaml.v3" ) +// InitWithDefaultConfig initializes the configuration with the default config file func InitWithDefaultConfig() { initViper("") initLogger() checkDataConfig() } +// InitWithConfig initializes the configuration with a custom config file func InitWithConfig(customConfigFilePath string) { initViper(customConfigFilePath) initLogger() @@ -49,7 +50,7 @@ func loadViperDefaults() { log.Debug(settings) } -func BasePath() string { +func basePath() string { _, b, _, _ := runtime.Caller(0) d := filepath.Dir(b) return filepath.Join(d, "../..") @@ -87,7 +88,7 @@ func initViper(configFilePath string) { // Set absolute data directory dataPath := viper.GetString("data.path") if dataPath != "" && !filepath.IsAbs(dataPath) { - basedir := BasePath() + basedir := basePath() dataPath = filepath.Join(basedir, dataPath) } viper.Set("data.path", dataPath) @@ -128,9 +129,3 @@ func checkDataConfig() { } } } - -func SortEstimations(resources *[]estimation.EstimationResource) { - sort.Slice(*resources, func(i, j int) bool { - return (*resources)[i].Resource.GetAddress() < (*resources)[j].Resource.GetAddress() - }) -} diff --git a/internal/utils/conversions.go b/internal/utils/conversions.go new file mode 100644 index 0000000..0f5b1d4 --- /dev/null +++ b/internal/utils/conversions.go @@ -0,0 +1,39 @@ +package utils + +import ( + "fmt" + "strconv" +) + +// ParseToInt converts to an int an interface that could be int, float or string +func ParseToInt(value interface{}) (int, error) { + switch v := value.(type) { + case int: + return v, nil + case float64: + return int(v), nil + case string: + var err error + intValue, err := strconv.Atoi(v) + if err == nil { + return intValue, nil + } + floatValue, err := strconv.ParseFloat(v, 64) + if err == nil { + return int(floatValue), nil + } + return 0, err + + default: + return 0, fmt.Errorf("Cannot convert interface to int: %v", value) + } +} + +// ConvertInterfaceListToStringList converts a list of interfaces to a list of strings +func ConvertInterfaceListToStringList(list []interface{}) []string { + stringList := []string{} + for _, item := range list { + stringList = append(stringList, item.(string)) + } + return stringList +} diff --git a/internal/utils/conversions_test.go b/internal/utils/conversions_test.go new file mode 100644 index 0000000..7b89466 --- /dev/null +++ b/internal/utils/conversions_test.go @@ -0,0 +1,58 @@ +package utils + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseToInt(t *testing.T) { + type args struct { + value interface{} + } + tests := []struct { + name string + args args + want int + wantErr bool + }{ + {"int", args{1}, 1, false}, + {"float", args{1.0}, 1, false}, + {"string", args{"1"}, 1, false}, + {"stringFloat", args{"1.0"}, 1, false}, + {"stringErr", args{"a"}, 0, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseToInt(tt.args.value) + assert.Equal(t, got, tt.want) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestConvertInterfaceListToStringList(t *testing.T) { + type args struct { + list []interface{} + } + tests := []struct { + name string + args args + want []string + }{ + {"empty", args{[]interface{}{}}, []string{}}, + {"one", args{[]interface{}{"a"}}, []string{"a"}}, + {"two", args{[]interface{}{"a", "b"}}, []string{"a", "b"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ConvertInterfaceListToStringList(tt.args.list) + assert.True(t, reflect.DeepEqual(got, tt.want)) + }) + } +} diff --git a/internal/utils/defaults.yaml b/internal/utils/defaults.yaml index a0abe36..636268a 100644 --- a/internal/utils/defaults.yaml +++ b/internal/utils/defaults.yaml @@ -12,17 +12,8 @@ provider: avg_cpu_use: 0.5 avg_gpu_use: 0.5 avg_autoscaler_size_percent: 0.5 - boot_disk: - size: 10 - type: pd-standard - disk: - size: 500 - type: pd-standard aws: avg_cpu_use: 0.5 avg_gpu_use: 0.5 - disk: - size: 8 - type: gp2 log: level : "warn" diff --git a/internal/utils/jsonQuery.go b/internal/utils/jsonQuery.go new file mode 100644 index 0000000..841e9da --- /dev/null +++ b/internal/utils/jsonQuery.go @@ -0,0 +1,36 @@ +package utils + +import ( + "strings" + + "github.com/itchyny/gojq" +) + +// GetJSON returns the result of a jq query on a json object +func GetJSON(query string, json interface{}) ([]interface{}, error) { + queryParsed, err := gojq.Parse(query) + if err != nil { + return nil, err + } + iter := queryParsed.Run(json) + results := []interface{}{} + for { + v, ok := iter.Next() + if !ok { + break + } + if err, ok := v.(error); ok { + errMsg := err.Error() + if strings.Contains(errMsg, "annot iterate over: null") { + continue + } else { + return nil, err + } + } + if v != nil { + results = append(results, v) + } + } + + return results, nil +} diff --git a/internal/utils/jsonQuery_test.go b/internal/utils/jsonQuery_test.go new file mode 100644 index 0000000..41da61e --- /dev/null +++ b/internal/utils/jsonQuery_test.go @@ -0,0 +1,100 @@ +package utils + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetJSON(t *testing.T) { + // Sample JSON input for testing + json := map[string]interface{}{ + "name": "John", + "cars": []string{"Ford", "BMW", "Fiat"}, + "foo": []map[string]interface{}{{"bar": "baz"}}, + "values": jsonParse(` + { + "advanced_machine_features": [], + "allow_stopping_for_update": null, + "attached_disk": [], + "boot_disk": [ + { + "auto_delete": true, + "disk_encryption_key_raw": null, + "initialize_params": [ + { + "image": "debian-cloud/debian-11", + "type": "pd-balanced" + } + ], + "mode": "READ_WRITE" + } + ] + }`), + } + + // Test cases with different queries + testCases := []struct { + query string + expectedResult interface{} + expectError bool + }{ + { + query: ".name", + expectedResult: []interface{}{"John"}, + expectError: false, + }, + { + query: ".cars", + expectedResult: []interface{}{[]string{"Ford", "BMW", "Fiat"}}, + expectError: false, + }, + { + query: ".nonexistent", + expectedResult: []interface{}{}, + expectError: false, + }, + { + query: ".foo", + expectedResult: []interface{}{[]map[string]interface{}{{"bar": "baz"}}}, + expectError: false, + }, + { + query: ".values.boot_disk[].initialize_params", + expectedResult: []interface{}{ + []interface{}{ + map[string]interface{}{ + "image": "debian-cloud/debian-11", + "type": "pd-balanced", + }, + }, + }, + expectError: false, + }, + { + query: ".prior_state.values.root_module.resources[] | select(.values.self_link == \"boo\") | .values", + expectedResult: []interface{}{}, + expectError: false, + }, + } + + for _, tc := range testCases { + result, err := GetJSON(tc.query, json) + if tc.expectError { + assert.Error(t, err, "Expected an error for query: %s", tc.query) + } else { + assert.NoError(t, err, "Unexpected error for query: %s", tc.query) + assert.Equal(t, tc.expectedResult, result, "Unexpected result for query: %s", tc.query) + } + } +} + +func jsonParse(jsonString string) map[string]interface{} { + var result map[string]interface{} + err := json.Unmarshal([]byte(jsonString), &result) + if err != nil { + panic(err) + } + return result +} diff --git a/pkg/estimate/estimate.go b/pkg/estimate/estimate.go index f09db0f..8c2ba86 100644 --- a/pkg/estimate/estimate.go +++ b/pkg/estimate/estimate.go @@ -9,6 +9,7 @@ import ( "github.com/shopspring/decimal" ) +// EstimationReport is the struct that contains the estimation of a resource type EstimationReport struct { Resource resources.GenericResource Power decimal.Decimal `json:"PowerPerInstance"` @@ -17,6 +18,7 @@ type EstimationReport struct { Count decimal.Decimal } +// GetEstimation returns the estimation of a resource func GetEstimation(resource resources.GenericResource) (EstimationReport, error) { estimation, err := estimate.EstimateResource(toInternalComputeResource(resource)) if err != nil { @@ -35,6 +37,7 @@ func GetEstimation(resource resources.GenericResource) (EstimationReport, error) }, nil } +// GetEstimationFromInstanceType returns the estimation of a resource from its instance type func GetEstimationFromInstanceType(instanceType string, zone string, provider providers.Provider) (EstimationReport, error) { resource, err := resources.GetResource(instanceType, zone, provider) if err != nil { diff --git a/pkg/estimate/estimate_test.go b/pkg/estimate/estimate_test.go index 412ed48..0b00d41 100644 --- a/pkg/estimate/estimate_test.go +++ b/pkg/estimate/estimate_test.go @@ -1,11 +1,12 @@ package estimate import ( + "reflect" + "testing" + "github.com/carboniferio/carbonifer/pkg/providers" "github.com/carboniferio/carbonifer/pkg/resources" "github.com/shopspring/decimal" - "reflect" - "testing" ) func TestGetEstimation(t *testing.T) { diff --git a/pkg/resources/ressource.go b/pkg/resources/ressource.go index f1b0397..ab4cfaa 100644 --- a/pkg/resources/ressource.go +++ b/pkg/resources/ressource.go @@ -11,6 +11,7 @@ import ( "github.com/shopspring/decimal" ) +// GenericResource is a struct that contains the information of a generic resource type GenericResource struct { Name string Region string @@ -34,6 +35,7 @@ func (g GenericResource) IsSupported() bool { } } +// GetIdentification returns the identification of the resource func (g GenericResource) GetIdentification() *resources.ResourceIdentification { return &resources.ResourceIdentification{ Name: g.Name, @@ -44,15 +46,18 @@ func (g GenericResource) GetIdentification() *resources.ResourceIdentification { } } +// GetAddress returns the address of the resource func (g GenericResource) GetAddress() string { return fmt.Sprintf("%v.%v", g.GetIdentification().ResourceType, g.GetIdentification().Name) } +// Storage is the struct that contains the storage of a resource type Storage struct { HddStorage decimal.Decimal SsdStorage decimal.Decimal } +// GetResource returns a GenericResource from an instance type func GetResource(instanceType string, zone string, provider providers.Provider) (GenericResource, error) { switch provider { case providers.GCP: @@ -69,7 +74,7 @@ func fromGCPMachineTypeToResource(region string, machineType gcp.MachineType) Ge Provider: providers.GCP, GPUTypes: machineType.GPUTypes, MemoryMb: machineType.MemoryMb, - CPUTypes: machineType.CpuTypes, + CPUTypes: machineType.CPUTypes, VCPUs: machineType.Vcpus, Storage: Storage{}, ReplicationFactor: 0, diff --git a/pkg/resources/ressource_test.go b/pkg/resources/ressource_test.go index bbb370f..83bbe76 100644 --- a/pkg/resources/ressource_test.go +++ b/pkg/resources/ressource_test.go @@ -1,9 +1,10 @@ package resources import ( - "github.com/carboniferio/carbonifer/pkg/providers" "reflect" "testing" + + "github.com/carboniferio/carbonifer/pkg/providers" ) func TestGenericResource_IsSupported(t *testing.T) { diff --git a/test/data/gcp_instances.json b/test/data/gcp_instances.json index de851f6..a50baa9 100644 --- a/test/data/gcp_instances.json +++ b/test/data/gcp_instances.json @@ -41,5 +41,18 @@ "cpuTypes": [ "Cascade Lake" ] + }, + "n1-standard-2": { + "name": "n1-standard-2", + "vcpus": 2, + "gpus": null, + "memoryMb": 7680, + "cpuTypes": [ + "Skylake", + "Broadwell", + "Haswell", + "Sandy Bridge", + "Ivy Bridge" + ] } } \ No newline at end of file diff --git a/test/terraform/gcp_1/main.tf b/test/terraform/gcp_1/main.tf index 117c74f..29b2a3a 100644 --- a/test/terraform/gcp_1/main.tf +++ b/test/terraform/gcp_1/main.tf @@ -44,7 +44,7 @@ resource "google_compute_instance" "first" { } guest_accelerator { - type = "nvidia-tesla-k80" + type = "nvidia-tesla-a100" count = 2 } } diff --git a/test/terraform/gcp_calling_module/provider.tf b/test/terraform/gcp_calling_module/provider.tf index 8e460cf..ef6d1ad 100644 --- a/test/terraform/gcp_calling_module/provider.tf +++ b/test/terraform/gcp_calling_module/provider.tf @@ -5,4 +5,3 @@ module "globals" { provider "google" { region = module.globals.common_region } - diff --git a/test/terraform/noResources/provider.tf b/test/terraform/noResources/provider.tf new file mode 100644 index 0000000..0e067d3 --- /dev/null +++ b/test/terraform/noResources/provider.tf @@ -0,0 +1,9 @@ + +provider "google" { + region = "europe-west9" +} + +data "google_client_config" "current" { + +} + diff --git a/test/terraform/planRaw/plan.tfplan b/test/terraform/planRaw/plan.tfplan index 4694870a4b76d2cb10e6f7d4411c3c9f56fe9012..b57ab481118ac390c3970027eb247e9c7c347449 100644 GIT binary patch delta 306 zcmbOuJ4co`z?+#xgn;zEJqNSuf(Yh<_B=PfceuoAsP;IK4t<7tmM%W2AYr(;LXS+ z!T@*3_LTpMX{6lP- zApYieY-x{b!(qF z;0~Gmj7J-*xC*ZUR`GORbFAVUd9AUE|L3*DD(=l^E(LK9D;on4umj;6Mh1piJRlwb DpM*pU