diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index 37c61f71..b69aaba9 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -29,18 +29,18 @@ jobs: echo "Version: $VERSION" - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - uses: depot/use-action@v1 + - uses: depot/use-action@v1.3.1 with: project: xnsnw3m20t - name: Build and push container - uses: depot/build-push-action@v1 + uses: depot/build-push-action@v1.17.0 id: build with: project: xnsnw3m20t diff --git a/.github/workflows/finalize-copybara-sync.yml b/.github/workflows/finalize-copybara-sync.yml index 62426bd6..070d3ff7 100644 --- a/.github/workflows/finalize-copybara-sync.yml +++ b/.github/workflows/finalize-copybara-sync.yml @@ -45,11 +45,13 @@ jobs: run: go mod tidy - name: Commit and push go mod tidy changes + env: + HEAD_BRANCH: ${{ github.ref_name }} run: | if ! git diff --quiet go.mod go.sum; then git add go.mod go.sum git commit -m "Run go mod tidy" - git push origin ${{ github.ref_name }} + git push origin "$HEAD_BRANCH" else echo "No changes from go mod tidy" fi @@ -81,6 +83,7 @@ jobs: AUTHOR_NAME: ${{ steps.author.outputs.name }} AUTHOR_EMAIL: ${{ steps.author.outputs.email }} GITHUB_USER: ${{ steps.author.outputs.github_user }} + HEAD_BRANCH: ${{ github.ref_name }} run: | # Build PR body PR_BODY="## Copybara Sync - Release ${VERSION} @@ -106,7 +109,7 @@ jobs: # Create the PR PR_URL=$(gh pr create \ --base main \ - --head "${{ github.ref_name }}" \ + --head "$HEAD_BRANCH" \ --title "Release ${VERSION}" \ --body "$PR_BODY") diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 04b7f572..4050aad0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -50,7 +50,7 @@ jobs: cache: true - name: Run GoReleaser (publish) - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@v7 with: version: latest args: release --clean diff --git a/.github/workflows/tag-on-merge.yml b/.github/workflows/tag-on-merge.yml index 541619db..b2192dab 100644 --- a/.github/workflows/tag-on-merge.yml +++ b/.github/workflows/tag-on-merge.yml @@ -18,9 +18,10 @@ jobs: steps: - name: Extract version from branch name id: version + env: + BRANCH: ${{ github.event.pull_request.head.ref }} run: | # Extract v1.2.3 from copybara/v1.2.3 - BRANCH="${{ github.event.pull_request.head.ref }}" VERSION=$(echo "$BRANCH" | sed 's|copybara/||') echo "version=$VERSION" >> $GITHUB_OUTPUT echo "Extracted version: $VERSION" @@ -48,8 +49,8 @@ jobs: - name: Delete copybara branch env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: ${{ github.event.pull_request.head.ref }} run: | - BRANCH="${{ github.event.pull_request.head.ref }}" echo "Deleting branch: $BRANCH" git push origin --delete "$BRANCH" || echo "Branch may have already been deleted" diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 926cb565..2fc26655 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -9,7 +9,7 @@ builds: - linux - windows ldflags: - - -s -w -X github.com/overmindtech/cli/tracing.version={{.Version}} + - -s -w -X github.com/overmindtech/cli/go/tracing.version={{.Version}} - binary: overmind id: overmind-macos env: @@ -17,7 +17,7 @@ builds: goos: - darwin ldflags: - - -s -w -X github.com/overmindtech/cli/tracing.version={{.Version}} + - -s -w -X github.com/overmindtech/cli/go/tracing.version={{.Version}} # For now we are going to disable signing MacOS packages. This works on Dylan's # person laptop, but we haven't worked out a way to get this set up in a github diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl index 589f730a..fd321991 100644 --- a/.terraform.lock.hcl +++ b/.terraform.lock.hcl @@ -2,37 +2,37 @@ # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/aws" { - version = "6.32.1" + version = "6.35.1" constraints = ">= 4.56.0" hashes = [ - "h1:/kSj+4KeiYIJR4GZKUIp+NjaOSGPbEpoJFo+n8r21iQ=", - "h1:2vgwE6+ZCd7tLwQOb41OO0nLYAgV7ssIb8Xr9CdUupo=", - "h1:5tbI29RszzXinjHPzy5Qqp1ooS3/T+zLrjodyr0osJs=", - "h1:CAu73BoUtKnbgWP6oBs5pCTXL+Hfy8xc3sWZuZf1vEk=", - "h1:HYVnQr6ZXWVB/U3j/VuDZmn5fdzrCD6StyC3t7LM150=", - "h1:ONBRGZUR973/u9y3Yf2yxYGNHH4acyVm0r03iHxS4L8=", - "h1:Qhwre3rhX8AN+kAOiNjyS9uNtjlpK8yhwtoQPAJ2HyM=", - "h1:S9FhFACHbATLcz5I7dsM7KwUL94hdyfxj2AhHZ4rtKA=", - "h1:e8ls5XHmaRt5w4XVpWhon8BnmdaFqYtm+kIhIutyR4g=", - "h1:j691GxEePvwjhYV08mwgTLD/CiCG4YHdZOXL+gV6qt0=", - "h1:mdjVlnAux6Ddq2c+14yDmwT1PBK0t9D/v/y2sv3Pyu4=", - "h1:pIWlu/D0yFPM5dk37TWNaRUZZSNOs9MW7to8IP/F5TA=", - "h1:uHqBzBSaSuJrwsqcbF4kY5tW8hc2qAlVLAl7QdRrEmE=", - "h1:yqfHWALCdPdktH7iDa9gWKmxHeTl8r7Wu97JDeRO3tI=", - "zh:024d2cc116c8c83bb63b71623e3654109948791b250929449f4533b06678d574", - "zh:0ee944eb1c0b28957ad04541546ebac66f81b74ae811d20bcd7043d0313722e1", - "zh:43f1b6bcc2d6ba34dd4f02aab2ef3923281cf82455e608ac1ea493374dbb132d", - "zh:52e91c66c3d946d9d24ecf6684e23337abbe7e93a7e8d927f8b7cc69d096215e", - "zh:5d8030a02b61256fb6ee51efe70c1ddfc0d57b4dc0f25c621afddab81575a9c2", - "zh:67b25c8732af678af5772cf57bfb68937bdb535ef06f7f353202e272d843f52c", - "zh:6e846e85e55d7c49820410fb3db338e2d2adf19e3481558e3bec0d63b953c521", - "zh:8d4922a86a39cb2788c14f430008fcaf236b0023260439bc95cc7758d5b76f4a", + "h1:0/uXxSpL98lpRqjRhjAvvWZVnJZnbOehfAlTrcPXURI=", + "h1:3AfkMHiID/TK41i9ipxdUbSx3v4hjIePNEZR0fEuQJ4=", + "h1:JDzMZ25sVEhH1IfEvIOHSobkAG6zVR9XJheIo/1Rxcg=", + "h1:LXMHE13aDvLoetIJuf5sSgg4Aop5iBZN+NKHfbG5zDc=", + "h1:OWVRgvaFuQ/uIysY2FJpLBA2syuDy2riakYgRxu8Vfs=", + "h1:RaWReVCoriJ00TKG4aLdybf291yTrhkTebXAB7gOOYg=", + "h1:Rkp2NXMY5RwM6J9bmep0l98i5mGBZ2yMhC+9nNaslUc=", + "h1:ZXdXwOk/VKPMcYlqEDevz+jyj4zoZKEV8D5zXOt3Lts=", + "h1:bau+2wU1EyOYFYbVSr+ldecm/JsRWrH/EJ2rPlSIT2Y=", + "h1:hNOrSOTUfwctGIrrd2033JXCcmA+zt+eeY2SNkg0Aq8=", + "h1:mj5knyv94JKLLBTwlTEv5Yn4MDAWYRPaYkCbZFhRYdU=", + "h1:qegJgE+n90ruKoC7xx+as0s9JSO64pRvCtw5Bxs6bTE=", + "h1:tjis0/YUzxXTgD7zaDS/ZVNyOU8oysKvso7GTteFaYU=", + "h1:xD+5zPhF0ry3sutriARfFVIg5m38VwYt66RveI3aUyI=", + "zh:0a16d1b0ba9379e5c5295e6b3caa42f0b8ba6b9f0a7cc9dbe58c232cf995db2d", + "zh:4b2e69907a1a2c557e45ef590f9fd6187ab5bf90378346ba9f723535e49ce908", + "zh:56bdafda0d629e15dc3dd9275b54f1fb953e2e09a3bc1a34e027da9d03ea4893", + "zh:5b84e933989150249036f84faad221dce0daa9d3043ff24401547e18f00b121e", + "zh:70bac98c27a14cb2cedabd741a1f7f1bab074c127efdcf02b54dbcf0d03db3cc", + "zh:7184f48bd077eaf68e184fd44f97e2d971cb77c59a68aedb95a0f8dc01b134fe", + "zh:7367589ae8b584bfcd83c973f5003e15010a453349c017a0d2cca8772d4fcfd9", + "zh:7ec9699dee49dd31bbc2d0e50fa1fff451eee5c1d9fd59bca7412acb49ce6594", + "zh:92dd139b96977a64af0e976cd06e84921033678ab97550f1b687c0ea54a8e82c", "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:9e3d4d1848fc6675c6bd88087188f229c4ec98b1a35de97c2697a0160fb76678", - "zh:b21c1b932c896c21988baac3b1cbc8b51843581b8fabf5e396952a329c9e6a12", - "zh:df8e5b1a2713880e2b3c489cc22ad3b14490e1702a1637273f91747bf091c071", - "zh:ec66785d40f7c04f138bb94fec55b8ddaae6fcc9cb25cc388989150bfaf2de4c", - "zh:f1ecb00fcfdb0c2aec3622549c023f469db401f395bc25bbddfe5cf8b51cd046", - "zh:fca78bf28897c8077130ce8d0f4d67900dbd77619adb1326bcd017ef421e5f1f", + "zh:9f2df575a5b010db60068668c48806595a3d617a2c0305035283fe8b72f07b19", + "zh:a4602b7602c75c8f726bdc7e706dc5c26736e47cc8381be01386aa8d8d998403", + "zh:bc25fefeeee10425df7aebfc21dc6532d19acdf03fa97b9e6d8c113adffd0a1d", + "zh:f445592040b5fc368a12e6edeffc951b2eb41e86413c4074638a13376e25a9cc", + "zh:ff43962a48bd8f85e17188736bbd3c145b6a1320bd8303221f6b4f9ec861e1e6", ] } diff --git a/README.md b/README.md index 4a75128a..e2f6c167 100644 --- a/README.md +++ b/README.md @@ -230,6 +230,14 @@ overmind --version changes you make with `overmind terraform apply`, so that you can be sure that your changes haven't had any unexpected downstream impact. +- `overmind knowledge list` + + View which knowledge files Overmind would discover from your current location. + Knowledge files in `.overmind/knowledge/` teach the AI investigator about your + infrastructure context, standards, and approved patterns. This command shows the + resolved knowledge directory path, valid files with their metadata, and any + validation warnings for invalid files. + ## Cloud Provider Support The CLI automatically discovers AWS and GCP providers from your Terraform configuration. diff --git a/aws-source/adapters/adapterhelpers_get_list_adapter_v2.go b/aws-source/adapters/adapterhelpers_get_list_adapter_v2.go index 05099a01..d4c662b6 100644 --- a/aws-source/adapters/adapterhelpers_get_list_adapter_v2.go +++ b/aws-source/adapters/adapterhelpers_get_list_adapter_v2.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "time" "github.com/overmindtech/cli/go/discovery" @@ -135,13 +136,7 @@ func (s *GetListAdapterV2[ListInput, ListOutput, AWSItem, ClientStruct, Options] return true } - for _, s := range s.Scopes() { - if s == scope { - return true - } - } - - return false + return slices.Contains(s.Scopes(), scope) } // Get retrieves an item from the adapter based on the provided scope, query, and diff --git a/aws-source/adapters/adapterhelpers_get_list_adapter_v2_test.go b/aws-source/adapters/adapterhelpers_get_list_adapter_v2_test.go index 57f81e07..7d8b4f65 100644 --- a/aws-source/adapters/adapterhelpers_get_list_adapter_v2_test.go +++ b/aws-source/adapters/adapterhelpers_get_list_adapter_v2_test.go @@ -261,7 +261,7 @@ func TestListFuncPaginatorBuilder(t *testing.T) { return output, nil }, ItemMapper: func(query *string, scope string, awsItem string) (*sdp.Item, error) { - attrs, _ := sdp.ToAttributes(map[string]interface{}{ + attrs, _ := sdp.ToAttributes(map[string]any{ "id": awsItem, }) return &sdp.Item{ diff --git a/aws-source/adapters/adapterhelpers_get_list_source.go b/aws-source/adapters/adapterhelpers_get_list_source.go index 35b5a739..2b50a44d 100644 --- a/aws-source/adapters/adapterhelpers_get_list_source.go +++ b/aws-source/adapters/adapterhelpers_get_list_source.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "time" "buf.build/go/protovalidate" @@ -108,13 +109,7 @@ func (s *GetListAdapter[AWSItem, ClientStruct, Options]) hasScope(scope string) return true } - for _, s := range s.Scopes() { - if s == scope { - return true - } - } - - return false + return slices.Contains(s.Scopes(), scope) } // Get retrieves an item from the adapter based on the provided scope, query, and diff --git a/aws-source/adapters/adapterhelpers_shared_tests.go b/aws-source/adapters/adapterhelpers_shared_tests.go index 8bf6cda0..da6bbd64 100644 --- a/aws-source/adapters/adapterhelpers_shared_tests.go +++ b/aws-source/adapters/adapterhelpers_shared_tests.go @@ -12,34 +12,6 @@ import ( "github.com/overmindtech/cli/go/sdp-go" ) -func PtrString(v string) *string { - return &v -} - -func PtrInt32(v int32) *int32 { - return &v -} - -func PtrInt64(v int64) *int64 { - return &v -} - -func PtrFloat32(v float32) *float32 { - return &v -} - -func PtrFloat64(v float64) *float64 { - return &v -} - -func PtrTime(v time.Time) *time.Time { - return &v -} - -func PtrBool(v bool) *bool { - return &v -} - type Subnet struct { ID *string CIDR string diff --git a/aws-source/adapters/adapterhelpers_util.go b/aws-source/adapters/adapterhelpers_util.go index c77ac4e4..1a2724b6 100644 --- a/aws-source/adapters/adapterhelpers_util.go +++ b/aws-source/adapters/adapterhelpers_util.go @@ -488,7 +488,7 @@ func GetAutoConfig(t *testing.T) (aws.Config, string, string) { // Converts an interface to SDP attributes using the `sdp.ToAttributesSorted` // function, and also allows the user to exclude certain top-level fields from // the resulting attributes -func ToAttributesWithExclude(i interface{}, exclusions ...string) (*sdp.ItemAttributes, error) { +func ToAttributesWithExclude(i any, exclusions ...string) (*sdp.ItemAttributes, error) { attrs, err := sdp.ToAttributesViaJson(i) if err != nil { return nil, err diff --git a/aws-source/adapters/apigateway-domain-name_test.go b/aws-source/adapters/apigateway-domain-name_test.go index 80798499..6e4243dc 100644 --- a/aws-source/adapters/apigateway-domain-name_test.go +++ b/aws-source/adapters/apigateway-domain-name_test.go @@ -43,21 +43,21 @@ import ( func TestDomainNameOutputMapper(t *testing.T) { domainName := &types.DomainName{ - CertificateArn: PtrString("arn:aws:acm:region:account-id:certificate/certificate-id"), - CertificateName: PtrString("certificate-name"), - CertificateUploadDate: PtrTime(time.Now()), - DistributionDomainName: PtrString("distribution-domain-name"), - DistributionHostedZoneId: PtrString("distribution-hosted-zone-id"), - DomainName: PtrString("domain-name"), + CertificateArn: new("arn:aws:acm:region:account-id:certificate/certificate-id"), + CertificateName: new("certificate-name"), + CertificateUploadDate: new(time.Now()), + DistributionDomainName: new("distribution-domain-name"), + DistributionHostedZoneId: new("distribution-hosted-zone-id"), + DomainName: new("domain-name"), DomainNameStatus: types.DomainNameStatusAvailable, - DomainNameStatusMessage: PtrString("status-message"), + DomainNameStatusMessage: new("status-message"), EndpointConfiguration: &types.EndpointConfiguration{Types: []types.EndpointType{types.EndpointTypeEdge}}, - MutualTlsAuthentication: &types.MutualTlsAuthentication{TruststoreUri: PtrString("truststore-uri")}, - OwnershipVerificationCertificateArn: PtrString("arn:aws:acm:region:account-id:certificate/ownership-verification-certificate-id"), - RegionalCertificateArn: PtrString("arn:aws:acm:region:account-id:certificate/regional-certificate-id"), - RegionalCertificateName: PtrString("regional-certificate-name"), - RegionalDomainName: PtrString("regional-domain-name"), - RegionalHostedZoneId: PtrString("regional-hosted-zone-id"), + MutualTlsAuthentication: &types.MutualTlsAuthentication{TruststoreUri: new("truststore-uri")}, + OwnershipVerificationCertificateArn: new("arn:aws:acm:region:account-id:certificate/ownership-verification-certificate-id"), + RegionalCertificateArn: new("arn:aws:acm:region:account-id:certificate/regional-certificate-id"), + RegionalCertificateName: new("regional-certificate-name"), + RegionalDomainName: new("regional-domain-name"), + RegionalHostedZoneId: new("regional-hosted-zone-id"), SecurityPolicy: types.SecurityPolicyTls12, Tags: map[string]string{"key": "value"}, } diff --git a/aws-source/adapters/apigateway-resource_test.go b/aws-source/adapters/apigateway-resource_test.go index 94352d27..93e18c8a 100644 --- a/aws-source/adapters/apigateway-resource_test.go +++ b/aws-source/adapters/apigateway-resource_test.go @@ -83,25 +83,25 @@ import ( func TestResourceOutputMapper(t *testing.T) { resource := &types.Resource{ - Id: PtrString("test-id"), - ParentId: PtrString("parent-id"), - Path: PtrString("/test-path"), - PathPart: PtrString("test-path-part"), + Id: new("test-id"), + ParentId: new("parent-id"), + Path: new("/test-path"), + PathPart: new("test-path-part"), ResourceMethods: map[string]types.Method{ "GET": { - ApiKeyRequired: PtrBool(true), + ApiKeyRequired: new(true), AuthorizationScopes: []string{"scope1", "scope2"}, - AuthorizationType: PtrString("NONE"), - AuthorizerId: PtrString("authorizer-id"), - HttpMethod: PtrString("GET"), + AuthorizationType: new("NONE"), + AuthorizerId: new("authorizer-id"), + HttpMethod: new("GET"), MethodIntegration: &types.Integration{ CacheKeyParameters: []string{"param1", "param2"}, - CacheNamespace: PtrString("namespace"), - ConnectionId: PtrString("connection-id"), + CacheNamespace: new("namespace"), + ConnectionId: new("connection-id"), ConnectionType: types.ConnectionTypeInternet, ContentHandling: types.ContentHandlingStrategyConvertToBinary, - Credentials: PtrString("credentials"), - HttpMethod: PtrString("POST"), + Credentials: new("credentials"), + HttpMethod: new("POST"), IntegrationResponses: map[string]types.IntegrationResponse{ "200": { ContentHandling: types.ContentHandlingStrategyConvertToText, @@ -111,11 +111,11 @@ func TestResourceOutputMapper(t *testing.T) { ResponseTemplates: map[string]string{ "template1": "value1", }, - SelectionPattern: PtrString("pattern"), - StatusCode: PtrString("200"), + SelectionPattern: new("pattern"), + StatusCode: new("200"), }, }, - PassthroughBehavior: PtrString("WHEN_NO_MATCH"), + PassthroughBehavior: new("WHEN_NO_MATCH"), RequestParameters: map[string]string{ "param1": "value1", }, @@ -127,7 +127,7 @@ func TestResourceOutputMapper(t *testing.T) { InsecureSkipVerification: false, }, Type: types.IntegrationTypeAwsProxy, - Uri: PtrString("uri"), + Uri: new("uri"), }, MethodResponses: map[string]types.MethodResponse{ "200": { @@ -137,17 +137,17 @@ func TestResourceOutputMapper(t *testing.T) { ResponseParameters: map[string]bool{ "param1": true, }, - StatusCode: PtrString("200"), + StatusCode: new("200"), }, }, - OperationName: PtrString("operation"), + OperationName: new("operation"), RequestModels: map[string]string{ "model1": "value1", }, RequestParameters: map[string]bool{ "param1": true, }, - RequestValidatorId: PtrString("validator-id"), + RequestValidatorId: new("validator-id"), }, }, } diff --git a/aws-source/adapters/apigateway-rest-api_test.go b/aws-source/adapters/apigateway-rest-api_test.go index 459fd95b..4f283380 100644 --- a/aws-source/adapters/apigateway-rest-api_test.go +++ b/aws-source/adapters/apigateway-rest-api_test.go @@ -39,22 +39,22 @@ func TestRestApiOutputMapper(t *testing.T) { output := &apigateway.GetRestApiOutput{ ApiKeySource: types.ApiKeySourceTypeHeader, BinaryMediaTypes: []string{"application/json"}, - CreatedDate: PtrTime(time.Now()), - Description: PtrString("Example API"), + CreatedDate: new(time.Now()), + Description: new("Example API"), DisableExecuteApiEndpoint: false, EndpointConfiguration: &types.EndpointConfiguration{ Types: []types.EndpointType{types.EndpointTypePrivate}, VpcEndpointIds: []string{"vpce-12345678"}, }, - Id: PtrString("abc123"), - MinimumCompressionSize: PtrInt32(1024), - Name: PtrString("ExampleAPI"), - Policy: PtrString("{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Principal\": \"*\", \"Action\": \"execute-api:Invoke\", \"Resource\": \"*\"}]}"), - RootResourceId: PtrString("root123"), + Id: new("abc123"), + MinimumCompressionSize: new(int32(1024)), + Name: new("ExampleAPI"), + Policy: new("{\"Version\": \"2012-10-17\", \"Statement\": [{\"Effect\": \"Allow\", \"Principal\": \"*\", \"Action\": \"execute-api:Invoke\", \"Resource\": \"*\"}]}"), + RootResourceId: new("root123"), Tags: map[string]string{ "env": "production", }, - Version: PtrString("v1"), + Version: new("v1"), Warnings: []string{"This is a warning"}, } diff --git a/aws-source/adapters/autoscaling-auto-scaling-group_test.go b/aws-source/adapters/autoscaling-auto-scaling-group_test.go index fb14825b..a17a8784 100644 --- a/aws-source/adapters/autoscaling-auto-scaling-group_test.go +++ b/aws-source/adapters/autoscaling-auto-scaling-group_test.go @@ -17,33 +17,33 @@ func TestAutoScalingGroupOutputMapper(t *testing.T) { output := autoscaling.DescribeAutoScalingGroupsOutput{ AutoScalingGroups: []types.AutoScalingGroup{ { - AutoScalingGroupName: PtrString("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - AutoScalingGroupARN: PtrString("arn:aws:autoscaling:eu-west-2:944651592624:autoScalingGroup:1cbb0e22-818f-4d8b-8662-77f73d3713ca:autoScalingGroupName/eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + AutoScalingGroupName: new("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + AutoScalingGroupARN: new("arn:aws:autoscaling:eu-west-2:944651592624:autoScalingGroup:1cbb0e22-818f-4d8b-8662-77f73d3713ca:autoScalingGroupName/eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), MixedInstancesPolicy: &types.MixedInstancesPolicy{ LaunchTemplate: &types.LaunchTemplate{ LaunchTemplateSpecification: &types.LaunchTemplateSpecification{ - LaunchTemplateId: PtrString("lt-0174ff2b8909d0c75"), // link - LaunchTemplateName: PtrString("eks-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - Version: PtrString("1"), + LaunchTemplateId: new("lt-0174ff2b8909d0c75"), // link + LaunchTemplateName: new("eks-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + Version: new("1"), }, Overrides: []types.LaunchTemplateOverrides{ { - InstanceType: PtrString("t3.large"), + InstanceType: new("t3.large"), }, }, }, InstancesDistribution: &types.InstancesDistribution{ - OnDemandAllocationStrategy: PtrString("prioritized"), - OnDemandBaseCapacity: PtrInt32(0), - OnDemandPercentageAboveBaseCapacity: PtrInt32(100), - SpotAllocationStrategy: PtrString("lowest-price"), - SpotInstancePools: PtrInt32(2), + OnDemandAllocationStrategy: new("prioritized"), + OnDemandBaseCapacity: new(int32(0)), + OnDemandPercentageAboveBaseCapacity: new(int32(100)), + SpotAllocationStrategy: new("lowest-price"), + SpotInstancePools: new(int32(2)), }, }, - MinSize: PtrInt32(1), - MaxSize: PtrInt32(3), - DesiredCapacity: PtrInt32(1), - DefaultCooldown: PtrInt32(300), + MinSize: new(int32(1)), + MaxSize: new(int32(3)), + DesiredCapacity: new(int32(1)), + DefaultCooldown: new(int32(300)), AvailabilityZones: []string{ // link "eu-west-2c", "eu-west-2a", @@ -53,62 +53,62 @@ func TestAutoScalingGroupOutputMapper(t *testing.T) { TargetGroupARNs: []string{ "arn:partition:service:region:account-id:resource-type/resource-id", // link }, - HealthCheckType: PtrString("EC2"), - HealthCheckGracePeriod: PtrInt32(15), + HealthCheckType: new("EC2"), + HealthCheckGracePeriod: new(int32(15)), Instances: []types.Instance{ { - InstanceId: PtrString("i-0be6c4fe789cb1b78"), // link - InstanceType: PtrString("t3.large"), - AvailabilityZone: PtrString("eu-west-2c"), + InstanceId: new("i-0be6c4fe789cb1b78"), // link + InstanceType: new("t3.large"), + AvailabilityZone: new("eu-west-2c"), LifecycleState: types.LifecycleStateInService, - HealthStatus: PtrString("Healthy"), + HealthStatus: new("Healthy"), LaunchTemplate: &types.LaunchTemplateSpecification{ - LaunchTemplateId: PtrString("lt-0174ff2b8909d0c75"), // Link - LaunchTemplateName: PtrString("eks-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - Version: PtrString("1"), + LaunchTemplateId: new("lt-0174ff2b8909d0c75"), // Link + LaunchTemplateName: new("eks-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + Version: new("1"), }, - ProtectedFromScaleIn: PtrBool(false), + ProtectedFromScaleIn: new(false), }, }, - CreatedTime: PtrTime(time.Now()), + CreatedTime: new(time.Now()), SuspendedProcesses: []types.SuspendedProcess{}, - VPCZoneIdentifier: PtrString("subnet-0e234bef35fc4a9e1,subnet-09d5f6fa75b0b4569,subnet-0960234bbc4edca03"), + VPCZoneIdentifier: new("subnet-0e234bef35fc4a9e1,subnet-09d5f6fa75b0b4569,subnet-0960234bbc4edca03"), EnabledMetrics: []types.EnabledMetric{}, Tags: []types.TagDescription{ { - ResourceId: PtrString("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - ResourceType: PtrString("auto-scaling-group"), - Key: PtrString("eks:cluster-name"), - Value: PtrString("dogfood"), - PropagateAtLaunch: PtrBool(true), + ResourceId: new("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + ResourceType: new("auto-scaling-group"), + Key: new("eks:cluster-name"), + Value: new("dogfood"), + PropagateAtLaunch: new(true), }, { - ResourceId: PtrString("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - ResourceType: PtrString("auto-scaling-group"), - Key: PtrString("eks:nodegroup-name"), - Value: PtrString("default-20230117110031319900000013"), - PropagateAtLaunch: PtrBool(true), + ResourceId: new("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + ResourceType: new("auto-scaling-group"), + Key: new("eks:nodegroup-name"), + Value: new("default-20230117110031319900000013"), + PropagateAtLaunch: new(true), }, { - ResourceId: PtrString("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - ResourceType: PtrString("auto-scaling-group"), - Key: PtrString("k8s.io/cluster-autoscaler/dogfood"), - Value: PtrString("owned"), - PropagateAtLaunch: PtrBool(true), + ResourceId: new("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + ResourceType: new("auto-scaling-group"), + Key: new("k8s.io/cluster-autoscaler/dogfood"), + Value: new("owned"), + PropagateAtLaunch: new(true), }, { - ResourceId: PtrString("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - ResourceType: PtrString("auto-scaling-group"), - Key: PtrString("k8s.io/cluster-autoscaler/enabled"), - Value: PtrString("true"), - PropagateAtLaunch: PtrBool(true), + ResourceId: new("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + ResourceType: new("auto-scaling-group"), + Key: new("k8s.io/cluster-autoscaler/enabled"), + Value: new("true"), + PropagateAtLaunch: new(true), }, { - ResourceId: PtrString("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), - ResourceType: PtrString("auto-scaling-group"), - Key: PtrString("kubernetes.io/cluster/dogfood"), - Value: PtrString("owned"), - PropagateAtLaunch: PtrBool(true), + ResourceId: new("eks-default-20230117110031319900000013-96c2dfb1-a11b-b5e4-6efb-0fea7e22855c"), + ResourceType: new("auto-scaling-group"), + Key: new("kubernetes.io/cluster/dogfood"), + Value: new("owned"), + PropagateAtLaunch: new(true), }, }, TerminationPolicies: []string{ @@ -116,36 +116,36 @@ func TestAutoScalingGroupOutputMapper(t *testing.T) { "OldestLaunchTemplate", "OldestInstance", }, - NewInstancesProtectedFromScaleIn: PtrBool(false), - ServiceLinkedRoleARN: PtrString("arn:aws:iam::944651592624:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling"), // link - CapacityRebalance: PtrBool(true), + NewInstancesProtectedFromScaleIn: new(false), + ServiceLinkedRoleARN: new("arn:aws:iam::944651592624:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling"), // link + CapacityRebalance: new(true), TrafficSources: []types.TrafficSourceIdentifier{ { - Identifier: PtrString("arn:partition:service:region:account-id:resource-type/resource-id"), // We will skip this for now since it's related to VPC lattice groups which are still in preview + Identifier: new("arn:partition:service:region:account-id:resource-type/resource-id"), // We will skip this for now since it's related to VPC lattice groups which are still in preview }, }, - Context: PtrString("foo"), - DefaultInstanceWarmup: PtrInt32(10), - DesiredCapacityType: PtrString("foo"), - LaunchConfigurationName: PtrString("launchConfig"), // link + Context: new("foo"), + DefaultInstanceWarmup: new(int32(10)), + DesiredCapacityType: new("foo"), + LaunchConfigurationName: new("launchConfig"), // link LaunchTemplate: &types.LaunchTemplateSpecification{ - LaunchTemplateId: PtrString("id"), // link - LaunchTemplateName: PtrString("launchTemplateName"), + LaunchTemplateId: new("id"), // link + LaunchTemplateName: new("launchTemplateName"), }, - MaxInstanceLifetime: PtrInt32(30), - PlacementGroup: PtrString("placementGroup"), // link (ec2) - PredictedCapacity: PtrInt32(1), - Status: PtrString("OK"), + MaxInstanceLifetime: new(int32(30)), + PlacementGroup: new("placementGroup"), // link (ec2) + PredictedCapacity: new(int32(1)), + Status: new("OK"), WarmPoolConfiguration: &types.WarmPoolConfiguration{ InstanceReusePolicy: &types.InstanceReusePolicy{ - ReuseOnScaleIn: PtrBool(true), + ReuseOnScaleIn: new(true), }, - MaxGroupPreparedCapacity: PtrInt32(1), - MinSize: PtrInt32(1), + MaxGroupPreparedCapacity: new(int32(1)), + MinSize: new(int32(1)), PoolState: types.WarmPoolStateHibernated, Status: types.WarmPoolStatusPendingDelete, }, - WarmPoolSize: PtrInt32(1), + WarmPoolSize: new(int32(1)), }, }, } diff --git a/aws-source/adapters/autoscaling-auto-scaling-policy_test.go b/aws-source/adapters/autoscaling-auto-scaling-policy_test.go index 70929e68..ea1afe02 100644 --- a/aws-source/adapters/autoscaling-auto-scaling-policy_test.go +++ b/aws-source/adapters/autoscaling-auto-scaling-policy_test.go @@ -16,86 +16,86 @@ func TestScalingPolicyOutputMapper(t *testing.T) { output := autoscaling.DescribePoliciesOutput{ ScalingPolicies: []types.ScalingPolicy{ { - PolicyName: PtrString("scale-up-policy"), - PolicyARN: PtrString("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:12345678-1234-1234-1234-123456789012:autoScalingGroupName/my-asg:policyName/scale-up-policy"), - AutoScalingGroupName: PtrString("my-asg"), - PolicyType: PtrString("TargetTrackingScaling"), - AdjustmentType: PtrString("ChangeInCapacity"), - MinAdjustmentMagnitude: PtrInt32(1), - ScalingAdjustment: PtrInt32(1), - Cooldown: PtrInt32(300), - MetricAggregationType: PtrString("Average"), - EstimatedInstanceWarmup: PtrInt32(300), - Enabled: PtrBool(true), + PolicyName: new("scale-up-policy"), + PolicyARN: new("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:12345678-1234-1234-1234-123456789012:autoScalingGroupName/my-asg:policyName/scale-up-policy"), + AutoScalingGroupName: new("my-asg"), + PolicyType: new("TargetTrackingScaling"), + AdjustmentType: new("ChangeInCapacity"), + MinAdjustmentMagnitude: new(int32(1)), + ScalingAdjustment: new(int32(1)), + Cooldown: new(int32(300)), + MetricAggregationType: new("Average"), + EstimatedInstanceWarmup: new(int32(300)), + Enabled: new(true), TargetTrackingConfiguration: &types.TargetTrackingConfiguration{ PredefinedMetricSpecification: &types.PredefinedMetricSpecification{ PredefinedMetricType: types.MetricTypeALBRequestCountPerTarget, - ResourceLabel: PtrString("app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff"), + ResourceLabel: new("app/my-alb/778d41231b141a0f/targetgroup/my-alb-target-group/943f017f100becff"), }, - TargetValue: PtrFloat64(50.0), + TargetValue: new(50.0), }, Alarms: []types.Alarm{ { - AlarmName: PtrString("my-alarm-high"), - AlarmARN: PtrString("arn:aws:cloudwatch:us-east-1:123456789012:alarm:my-alarm-high"), + AlarmName: new("my-alarm-high"), + AlarmARN: new("arn:aws:cloudwatch:us-east-1:123456789012:alarm:my-alarm-high"), }, { - AlarmName: PtrString("my-alarm-low"), - AlarmARN: PtrString("arn:aws:cloudwatch:us-east-1:123456789012:alarm:my-alarm-low"), + AlarmName: new("my-alarm-low"), + AlarmARN: new("arn:aws:cloudwatch:us-east-1:123456789012:alarm:my-alarm-low"), }, }, }, { - PolicyName: PtrString("step-scaling-policy"), - PolicyARN: PtrString("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:87654321-4321-4321-4321-210987654321:autoScalingGroupName/my-asg:policyName/step-scaling-policy"), - AutoScalingGroupName: PtrString("my-asg"), - PolicyType: PtrString("StepScaling"), - AdjustmentType: PtrString("PercentChangeInCapacity"), - MinAdjustmentMagnitude: PtrInt32(2), - MetricAggregationType: PtrString("Average"), - EstimatedInstanceWarmup: PtrInt32(60), - Enabled: PtrBool(true), + PolicyName: new("step-scaling-policy"), + PolicyARN: new("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:87654321-4321-4321-4321-210987654321:autoScalingGroupName/my-asg:policyName/step-scaling-policy"), + AutoScalingGroupName: new("my-asg"), + PolicyType: new("StepScaling"), + AdjustmentType: new("PercentChangeInCapacity"), + MinAdjustmentMagnitude: new(int32(2)), + MetricAggregationType: new("Average"), + EstimatedInstanceWarmup: new(int32(60)), + Enabled: new(true), StepAdjustments: []types.StepAdjustment{ { - MetricIntervalLowerBound: PtrFloat64(0.0), - MetricIntervalUpperBound: PtrFloat64(10.0), - ScalingAdjustment: PtrInt32(10), + MetricIntervalLowerBound: new(0.0), + MetricIntervalUpperBound: new(10.0), + ScalingAdjustment: new(int32(10)), }, { - MetricIntervalLowerBound: PtrFloat64(10.0), - ScalingAdjustment: PtrInt32(20), + MetricIntervalLowerBound: new(10.0), + ScalingAdjustment: new(int32(20)), }, }, Alarms: []types.Alarm{ { - AlarmName: PtrString("step-alarm"), - AlarmARN: PtrString("arn:aws:cloudwatch:us-east-1:123456789012:alarm:step-alarm"), + AlarmName: new("step-alarm"), + AlarmARN: new("arn:aws:cloudwatch:us-east-1:123456789012:alarm:step-alarm"), }, }, }, { - PolicyName: PtrString("simple-scaling-policy"), - PolicyARN: PtrString("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:11111111-2222-3333-4444-555555555555:autoScalingGroupName/another-asg:policyName/simple-scaling-policy"), - AutoScalingGroupName: PtrString("another-asg"), - PolicyType: PtrString("SimpleScaling"), - AdjustmentType: PtrString("ExactCapacity"), - ScalingAdjustment: PtrInt32(5), - Cooldown: PtrInt32(600), - Enabled: PtrBool(false), + PolicyName: new("simple-scaling-policy"), + PolicyARN: new("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:11111111-2222-3333-4444-555555555555:autoScalingGroupName/another-asg:policyName/simple-scaling-policy"), + AutoScalingGroupName: new("another-asg"), + PolicyType: new("SimpleScaling"), + AdjustmentType: new("ExactCapacity"), + ScalingAdjustment: new(int32(5)), + Cooldown: new(int32(600)), + Enabled: new(false), }, { - PolicyName: PtrString("predictive-scaling-policy"), - PolicyARN: PtrString("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:99999999-8888-7777-6666-555555555555:autoScalingGroupName/predictive-asg:policyName/predictive-scaling-policy"), - AutoScalingGroupName: PtrString("predictive-asg"), - PolicyType: PtrString("PredictiveScaling"), - Enabled: PtrBool(true), + PolicyName: new("predictive-scaling-policy"), + PolicyARN: new("arn:aws:autoscaling:us-east-1:123456789012:scalingPolicy:99999999-8888-7777-6666-555555555555:autoScalingGroupName/predictive-asg:policyName/predictive-scaling-policy"), + AutoScalingGroupName: new("predictive-asg"), + PolicyType: new("PredictiveScaling"), + Enabled: new(true), PredictiveScalingConfiguration: &types.PredictiveScalingConfiguration{ MetricSpecifications: []types.PredictiveScalingMetricSpecification{ { - TargetValue: PtrFloat64(40.0), + TargetValue: new(40.0), PredefinedMetricPairSpecification: &types.PredictiveScalingPredefinedMetricPair{ PredefinedMetricType: types.PredefinedMetricPairTypeALBRequestCount, - ResourceLabel: PtrString("app/predictive-alb/abc123def456/targetgroup/predictive-tg/789xyz"), + ResourceLabel: new("app/predictive-alb/abc123def456/targetgroup/predictive-tg/789xyz"), }, }, }, diff --git a/aws-source/adapters/cloudfront-cache-policy_test.go b/aws-source/adapters/cloudfront-cache-policy_test.go index 8380c496..e90c0d73 100644 --- a/aws-source/adapters/cloudfront-cache-policy_test.go +++ b/aws-source/adapters/cloudfront-cache-policy_test.go @@ -11,29 +11,29 @@ import ( ) var testCachePolicy = &types.CachePolicy{ - Id: PtrString("test-id"), - LastModifiedTime: PtrTime(time.Now()), + Id: new("test-id"), + LastModifiedTime: new(time.Now()), CachePolicyConfig: &types.CachePolicyConfig{ - MinTTL: PtrInt64(1), - Name: PtrString("test-name"), - Comment: PtrString("test-comment"), - DefaultTTL: PtrInt64(1), - MaxTTL: PtrInt64(1), + MinTTL: new(int64(1)), + Name: new("test-name"), + Comment: new("test-comment"), + DefaultTTL: new(int64(1)), + MaxTTL: new(int64(1)), ParametersInCacheKeyAndForwardedToOrigin: &types.ParametersInCacheKeyAndForwardedToOrigin{ CookiesConfig: &types.CachePolicyCookiesConfig{ CookieBehavior: types.CachePolicyCookieBehaviorAll, Cookies: &types.CookieNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "test-cookie", }, }, }, - EnableAcceptEncodingGzip: PtrBool(true), + EnableAcceptEncodingGzip: new(true), HeadersConfig: &types.CachePolicyHeadersConfig{ HeaderBehavior: types.CachePolicyHeaderBehaviorWhitelist, Headers: &types.Headers{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "test-header", }, @@ -42,13 +42,13 @@ var testCachePolicy = &types.CachePolicy{ QueryStringsConfig: &types.CachePolicyQueryStringsConfig{ QueryStringBehavior: types.CachePolicyQueryStringBehaviorWhitelist, QueryStrings: &types.QueryStringNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "test-query-string", }, }, }, - EnableAcceptEncodingBrotli: PtrBool(true), + EnableAcceptEncodingBrotli: new(true), }, }, } diff --git a/aws-source/adapters/cloudfront-continuous-deployment-policy_test.go b/aws-source/adapters/cloudfront-continuous-deployment-policy_test.go index 77c38358..1fe7ec00 100644 --- a/aws-source/adapters/cloudfront-continuous-deployment-policy_test.go +++ b/aws-source/adapters/cloudfront-continuous-deployment-policy_test.go @@ -11,12 +11,12 @@ import ( func TestContinuousDeploymentPolicyItemMapper(t *testing.T) { item, err := continuousDeploymentPolicyItemMapper("", "test", &types.ContinuousDeploymentPolicy{ - Id: PtrString("test-id"), - LastModifiedTime: PtrTime(time.Now()), + Id: new("test-id"), + LastModifiedTime: new(time.Now()), ContinuousDeploymentPolicyConfig: &types.ContinuousDeploymentPolicyConfig{ - Enabled: PtrBool(true), + Enabled: new(true), StagingDistributionDnsNames: &types.StagingDistributionDnsNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "staging.test.com", // link }, @@ -24,14 +24,14 @@ func TestContinuousDeploymentPolicyItemMapper(t *testing.T) { TrafficConfig: &types.TrafficConfig{ Type: types.ContinuousDeploymentPolicyTypeSingleWeight, SingleHeaderConfig: &types.ContinuousDeploymentSingleHeaderConfig{ - Header: PtrString("test-header"), - Value: PtrString("test-value"), + Header: new("test-header"), + Value: new("test-value"), }, SingleWeightConfig: &types.ContinuousDeploymentSingleWeightConfig{ - Weight: PtrFloat32(1), + Weight: new(float32(1)), SessionStickinessConfig: &types.SessionStickinessConfig{ - IdleTTL: PtrInt32(1), - MaximumTTL: PtrInt32(2), + IdleTTL: new(int32(1)), + MaximumTTL: new(int32(2)), }, }, }, diff --git a/aws-source/adapters/cloudfront-distribution_test.go b/aws-source/adapters/cloudfront-distribution_test.go index 20c713df..3fd3db2a 100644 --- a/aws-source/adapters/cloudfront-distribution_test.go +++ b/aws-source/adapters/cloudfront-distribution_test.go @@ -14,20 +14,20 @@ import ( func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloudfront.GetDistributionInput, optFns ...func(*cloudfront.Options)) (*cloudfront.GetDistributionOutput, error) { return &cloudfront.GetDistributionOutput{ Distribution: &types.Distribution{ - ARN: PtrString("arn:aws:cloudfront::123456789012:distribution/test-id"), - DomainName: PtrString("d111111abcdef8.cloudfront.net"), // link - Id: PtrString("test-id"), - InProgressInvalidationBatches: PtrInt32(1), - LastModifiedTime: PtrTime(time.Now()), - Status: PtrString("Deployed"), // health: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-returned.html + ARN: new("arn:aws:cloudfront::123456789012:distribution/test-id"), + DomainName: new("d111111abcdef8.cloudfront.net"), // link + Id: new("test-id"), + InProgressInvalidationBatches: new(int32(1)), + LastModifiedTime: new(time.Now()), + Status: new("Deployed"), // health: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-returned.html ActiveTrustedKeyGroups: &types.ActiveTrustedKeyGroups{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []types.KGKeyPairIds{ { - KeyGroupId: PtrString("key-group-1"), // link + KeyGroupId: new("key-group-1"), // link KeyPairIds: &types.KeyPairIds{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "123456789", }, @@ -36,13 +36,13 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud }, }, ActiveTrustedSigners: &types.ActiveTrustedSigners{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []types.Signer{ { - AwsAccountNumber: PtrString("123456789"), + AwsAccountNumber: new("123456789"), KeyPairIds: &types.KeyPairIds{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "123456789", }, @@ -52,54 +52,54 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud }, AliasICPRecordals: []types.AliasICPRecordal{ { - CNAME: PtrString("something.foo.bar.com"), // link + CNAME: new("something.foo.bar.com"), // link ICPRecordalStatus: types.ICPRecordalStatusApproved, }, }, DistributionConfig: &types.DistributionConfig{ - CallerReference: PtrString("test-caller-reference"), - Comment: PtrString("test-comment"), - Enabled: PtrBool(true), + CallerReference: new("test-caller-reference"), + Comment: new("test-comment"), + Enabled: new(true), Aliases: &types.Aliases{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "www.example.com", // link }, }, - Staging: PtrBool(true), - ContinuousDeploymentPolicyId: PtrString("test-continuous-deployment-policy-id"), // link + Staging: new(true), + ContinuousDeploymentPolicyId: new("test-continuous-deployment-policy-id"), // link CacheBehaviors: &types.CacheBehaviors{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.CacheBehavior{ { - PathPattern: PtrString("/foo"), - TargetOriginId: PtrString("CustomOriginConfig"), + PathPattern: new("/foo"), + TargetOriginId: new("CustomOriginConfig"), ViewerProtocolPolicy: types.ViewerProtocolPolicyHttpsOnly, AllowedMethods: &types.AllowedMethods{ Items: []types.Method{ types.MethodGet, }, }, - CachePolicyId: PtrString("test-cache-policy-id"), // link - Compress: PtrBool(true), - DefaultTTL: PtrInt64(1), - FieldLevelEncryptionId: PtrString("test-field-level-encryption-id"), // link - MaxTTL: PtrInt64(1), - MinTTL: PtrInt64(1), - OriginRequestPolicyId: PtrString("test-origin-request-policy-id"), // link - RealtimeLogConfigArn: PtrString("arn:aws:logs:us-east-1:123456789012:realtime-log-config/test-id"), // link - ResponseHeadersPolicyId: PtrString("test-response-headers-policy-id"), // link - SmoothStreaming: PtrBool(true), + CachePolicyId: new("test-cache-policy-id"), // link + Compress: new(true), + DefaultTTL: new(int64(1)), + FieldLevelEncryptionId: new("test-field-level-encryption-id"), // link + MaxTTL: new(int64(1)), + MinTTL: new(int64(1)), + OriginRequestPolicyId: new("test-origin-request-policy-id"), // link + RealtimeLogConfigArn: new("arn:aws:logs:us-east-1:123456789012:realtime-log-config/test-id"), // link + ResponseHeadersPolicyId: new("test-response-headers-policy-id"), // link + SmoothStreaming: new(true), TrustedKeyGroups: &types.TrustedKeyGroups{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []string{ "key-group-1", // link }, }, TrustedSigners: &types.TrustedSigners{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []string{ "123456789", }, @@ -108,42 +108,42 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud Cookies: &types.CookiePreference{ Forward: types.ItemSelectionWhitelist, WhitelistedNames: &types.CookieNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "cookie_123", }, }, }, - QueryString: PtrBool(true), + QueryString: new(true), Headers: &types.Headers{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "X-Customer-Header", }, }, QueryStringCacheKeys: &types.QueryStringCacheKeys{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "test-query-string-cache-key", }, }, }, FunctionAssociations: &types.FunctionAssociations{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.FunctionAssociation{ { EventType: types.EventTypeOriginRequest, - FunctionARN: PtrString("arn:aws:cloudfront::123412341234:function/1234"), // link + FunctionARN: new("arn:aws:cloudfront::123412341234:function/1234"), // link }, }, }, LambdaFunctionAssociations: &types.LambdaFunctionAssociations{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.LambdaFunctionAssociation{ { EventType: types.EventTypeOriginResponse, - LambdaFunctionARN: PtrString("arn:aws:lambda:us-east-1:123456789012:function:test-function"), // link - IncludeBody: PtrBool(true), + LambdaFunctionARN: new("arn:aws:lambda:us-east-1:123456789012:function:test-function"), // link + IncludeBody: new(true), }, }, }, @@ -153,107 +153,107 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud Origins: &types.Origins{ Items: []types.Origin{ { - DomainName: PtrString("DOC-EXAMPLE-BUCKET.s3.us-west-2.amazonaws.com"), // link - Id: PtrString("CustomOriginConfig"), - ConnectionAttempts: PtrInt32(3), - ConnectionTimeout: PtrInt32(10), + DomainName: new("DOC-EXAMPLE-BUCKET.s3.us-west-2.amazonaws.com"), // link + Id: new("CustomOriginConfig"), + ConnectionAttempts: new(int32(3)), + ConnectionTimeout: new(int32(10)), CustomHeaders: &types.CustomHeaders{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.OriginCustomHeader{ { - HeaderName: PtrString("test-header-name"), - HeaderValue: PtrString("test-header-value"), + HeaderName: new("test-header-name"), + HeaderValue: new("test-header-value"), }, }, }, CustomOriginConfig: &types.CustomOriginConfig{ - HTTPPort: PtrInt32(80), - HTTPSPort: PtrInt32(443), + HTTPPort: new(int32(80)), + HTTPSPort: new(int32(443)), OriginProtocolPolicy: types.OriginProtocolPolicyMatchViewer, - OriginKeepaliveTimeout: PtrInt32(5), - OriginReadTimeout: PtrInt32(30), + OriginKeepaliveTimeout: new(int32(5)), + OriginReadTimeout: new(int32(30)), OriginSslProtocols: &types.OriginSslProtocols{ Items: types.SslProtocolSSLv3.Values(), }, }, - OriginAccessControlId: PtrString("test-origin-access-control-id"), // link - OriginPath: PtrString("/foo"), + OriginAccessControlId: new("test-origin-access-control-id"), // link + OriginPath: new("/foo"), OriginShield: &types.OriginShield{ - Enabled: PtrBool(true), - OriginShieldRegion: PtrString("eu-west-1"), + Enabled: new(true), + OriginShieldRegion: new("eu-west-1"), }, S3OriginConfig: &types.S3OriginConfig{ - OriginAccessIdentity: PtrString("test-origin-access-identity"), // link + OriginAccessIdentity: new("test-origin-access-identity"), // link }, }, }, }, DefaultCacheBehavior: &types.DefaultCacheBehavior{ - TargetOriginId: PtrString("CustomOriginConfig"), + TargetOriginId: new("CustomOriginConfig"), ViewerProtocolPolicy: types.ViewerProtocolPolicyHttpsOnly, - CachePolicyId: PtrString("test-cache-policy-id"), // link - Compress: PtrBool(true), - DefaultTTL: PtrInt64(1), - FieldLevelEncryptionId: PtrString("test-field-level-encryption-id"), // link - MaxTTL: PtrInt64(1), - MinTTL: PtrInt64(1), - OriginRequestPolicyId: PtrString("test-origin-request-policy-id"), // link - RealtimeLogConfigArn: PtrString("arn:aws:logs:us-east-1:123456789012:realtime-log-config/test-id"), // link - ResponseHeadersPolicyId: PtrString("test-response-headers-policy-id"), // link - SmoothStreaming: PtrBool(true), + CachePolicyId: new("test-cache-policy-id"), // link + Compress: new(true), + DefaultTTL: new(int64(1)), + FieldLevelEncryptionId: new("test-field-level-encryption-id"), // link + MaxTTL: new(int64(1)), + MinTTL: new(int64(1)), + OriginRequestPolicyId: new("test-origin-request-policy-id"), // link + RealtimeLogConfigArn: new("arn:aws:logs:us-east-1:123456789012:realtime-log-config/test-id"), // link + ResponseHeadersPolicyId: new("test-response-headers-policy-id"), // link + SmoothStreaming: new(true), ForwardedValues: &types.ForwardedValues{ Cookies: &types.CookiePreference{ Forward: types.ItemSelectionWhitelist, WhitelistedNames: &types.CookieNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "cooke_123", }, }, }, - QueryString: PtrBool(true), + QueryString: new(true), Headers: &types.Headers{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "X-Customer-Header", }, }, QueryStringCacheKeys: &types.QueryStringCacheKeys{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "test-query-string-cache-key", }, }, }, FunctionAssociations: &types.FunctionAssociations{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.FunctionAssociation{ { EventType: types.EventTypeViewerRequest, - FunctionARN: PtrString("arn:aws:cloudfront::123412341234:function/1234"), // link + FunctionARN: new("arn:aws:cloudfront::123412341234:function/1234"), // link }, }, }, LambdaFunctionAssociations: &types.LambdaFunctionAssociations{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.LambdaFunctionAssociation{ { EventType: types.EventTypeOriginRequest, - LambdaFunctionARN: PtrString("arn:aws:lambda:us-east-1:123456789012:function:test-function"), // link - IncludeBody: PtrBool(true), + LambdaFunctionARN: new("arn:aws:lambda:us-east-1:123456789012:function:test-function"), // link + IncludeBody: new(true), }, }, }, TrustedKeyGroups: &types.TrustedKeyGroups{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []string{ "key-group-1", // link }, }, TrustedSigners: &types.TrustedSigners{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []string{ "123456789", }, @@ -262,7 +262,7 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud Items: []types.Method{ types.MethodGet, }, - Quantity: PtrInt32(1), + Quantity: new(int32(1)), CachedMethods: &types.CachedMethods{ Items: []types.Method{ types.MethodGet, @@ -271,27 +271,27 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud }, }, CustomErrorResponses: &types.CustomErrorResponses{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.CustomErrorResponse{ { - ErrorCode: PtrInt32(404), - ErrorCachingMinTTL: PtrInt64(1), - ResponseCode: PtrString("200"), - ResponsePagePath: PtrString("/foo"), + ErrorCode: new(int32(404)), + ErrorCachingMinTTL: new(int64(1)), + ResponseCode: new("200"), + ResponsePagePath: new("/foo"), }, }, }, - DefaultRootObject: PtrString("index.html"), + DefaultRootObject: new("index.html"), HttpVersion: types.HttpVersionHttp11, - IsIPV6Enabled: PtrBool(true), + IsIPV6Enabled: new(true), Logging: &types.LoggingConfig{ - Bucket: PtrString("aws-cf-access-logs.s3.amazonaws.com"), // link - Enabled: PtrBool(true), - IncludeCookies: PtrBool(true), - Prefix: PtrString("test-prefix"), + Bucket: new("aws-cf-access-logs.s3.amazonaws.com"), // link + Enabled: new(true), + IncludeCookies: new(true), + Prefix: new("test-prefix"), }, OriginGroups: &types.OriginGroups{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.OriginGroup{ { FailoverCriteria: &types.OriginGroupFailoverCriteria{ @@ -299,15 +299,15 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud Items: []int32{ 404, }, - Quantity: PtrInt32(1), + Quantity: new(int32(1)), }, }, - Id: PtrString("test-id"), + Id: new("test-id"), Members: &types.OriginGroupMembers{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.OriginGroupMember{ { - OriginId: PtrString("CustomOriginConfig"), + OriginId: new("CustomOriginConfig"), }, }, }, @@ -317,7 +317,7 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud PriceClass: types.PriceClassPriceClass200, Restrictions: &types.Restrictions{ GeoRestriction: &types.GeoRestriction{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), RestrictionType: types.GeoRestrictionTypeWhitelist, Items: []string{ "US", @@ -325,16 +325,16 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud }, }, ViewerCertificate: &types.ViewerCertificate{ - ACMCertificateArn: PtrString("arn:aws:acm:us-east-1:123456789012:certificate/test-id"), // link - Certificate: PtrString("test-certificate"), + ACMCertificateArn: new("arn:aws:acm:us-east-1:123456789012:certificate/test-id"), // link + Certificate: new("test-certificate"), CertificateSource: types.CertificateSourceAcm, - CloudFrontDefaultCertificate: PtrBool(true), - IAMCertificateId: PtrString("test-iam-certificate-id"), // link + CloudFrontDefaultCertificate: new(true), + IAMCertificateId: new("test-iam-certificate-id"), // link MinimumProtocolVersion: types.MinimumProtocolVersion(types.SslProtocolSSLv3), SSLSupportMethod: types.SSLSupportMethodSniOnly, }, // Note this can also be in the format: 473e64fd-f30b-4765-81a0-62ad96dd167a for WAF Classic - WebACLId: PtrString("arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a"), // link + WebACLId: new("arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a"), // link }, }, }, nil @@ -343,10 +343,10 @@ func (t TestCloudFrontClient) GetDistribution(ctx context.Context, params *cloud func (t TestCloudFrontClient) ListDistributions(ctx context.Context, params *cloudfront.ListDistributionsInput, optFns ...func(*cloudfront.Options)) (*cloudfront.ListDistributionsOutput, error) { return &cloudfront.ListDistributionsOutput{ DistributionList: &types.DistributionList{ - IsTruncated: PtrBool(false), + IsTruncated: new(false), Items: []types.DistributionSummary{ { - Id: PtrString("test-id"), + Id: new("test-id"), }, }, }, diff --git a/aws-source/adapters/cloudfront-function_test.go b/aws-source/adapters/cloudfront-function_test.go index 83f3fe9b..29c12b89 100644 --- a/aws-source/adapters/cloudfront-function_test.go +++ b/aws-source/adapters/cloudfront-function_test.go @@ -11,17 +11,17 @@ import ( func TestFunctionItemMapper(t *testing.T) { summary := types.FunctionSummary{ FunctionConfig: &types.FunctionConfig{ - Comment: PtrString("test-comment"), + Comment: new("test-comment"), Runtime: types.FunctionRuntimeCloudfrontJs20, }, FunctionMetadata: &types.FunctionMetadata{ - FunctionARN: PtrString("arn:aws:cloudfront::123456789012:function/test-function"), - LastModifiedTime: PtrTime(time.Now()), - CreatedTime: PtrTime(time.Now()), + FunctionARN: new("arn:aws:cloudfront::123456789012:function/test-function"), + LastModifiedTime: new(time.Now()), + CreatedTime: new(time.Now()), Stage: types.FunctionStageLive, }, - Name: PtrString("test-function"), - Status: PtrString("test-status"), + Name: new("test-function"), + Status: new("test-status"), } item, err := functionItemMapper("", "test", &summary) diff --git a/aws-source/adapters/cloudfront-key-group_test.go b/aws-source/adapters/cloudfront-key-group_test.go index c874fae0..ebf69b5e 100644 --- a/aws-source/adapters/cloudfront-key-group_test.go +++ b/aws-source/adapters/cloudfront-key-group_test.go @@ -10,15 +10,15 @@ import ( func TestKeyGroupItemMapper(t *testing.T) { group := types.KeyGroup{ - Id: PtrString("test-id"), + Id: new("test-id"), KeyGroupConfig: &types.KeyGroupConfig{ Items: []string{ "some-identity", }, - Name: PtrString("test-name"), - Comment: PtrString("test-comment"), + Name: new("test-name"), + Comment: new("test-comment"), }, - LastModifiedTime: PtrTime(time.Now()), + LastModifiedTime: new(time.Now()), } item, err := KeyGroupItemMapper("", "test", &group) diff --git a/aws-source/adapters/cloudfront-origin-access-control_test.go b/aws-source/adapters/cloudfront-origin-access-control_test.go index 25621cce..5c78db94 100644 --- a/aws-source/adapters/cloudfront-origin-access-control_test.go +++ b/aws-source/adapters/cloudfront-origin-access-control_test.go @@ -10,13 +10,13 @@ import ( func TestOriginAccessControlItemMapper(t *testing.T) { x := types.OriginAccessControl{ - Id: PtrString("test"), + Id: new("test"), OriginAccessControlConfig: &types.OriginAccessControlConfig{ - Name: PtrString("example-name"), + Name: new("example-name"), OriginAccessControlOriginType: types.OriginAccessControlOriginTypesS3, SigningBehavior: types.OriginAccessControlSigningBehaviorsAlways, SigningProtocol: types.OriginAccessControlSigningProtocolsSigv4, - Description: PtrString("example-description"), + Description: new("example-description"), }, } diff --git a/aws-source/adapters/cloudfront-origin-request-policy_test.go b/aws-source/adapters/cloudfront-origin-request-policy_test.go index 4458f998..0e5f639d 100644 --- a/aws-source/adapters/cloudfront-origin-request-policy_test.go +++ b/aws-source/adapters/cloudfront-origin-request-policy_test.go @@ -10,29 +10,29 @@ import ( func TestOriginRequestPolicyItemMapper(t *testing.T) { x := types.OriginRequestPolicy{ - Id: PtrString("test"), - LastModifiedTime: PtrTime(time.Now()), + Id: new("test"), + LastModifiedTime: new(time.Now()), OriginRequestPolicyConfig: &types.OriginRequestPolicyConfig{ - Name: PtrString("example-policy"), - Comment: PtrString("example comment"), + Name: new("example-policy"), + Comment: new("example comment"), QueryStringsConfig: &types.OriginRequestPolicyQueryStringsConfig{ QueryStringBehavior: types.OriginRequestPolicyQueryStringBehaviorAllExcept, QueryStrings: &types.QueryStringNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{"test"}, }, }, CookiesConfig: &types.OriginRequestPolicyCookiesConfig{ CookieBehavior: types.OriginRequestPolicyCookieBehaviorAll, Cookies: &types.CookieNames{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{"test"}, }, }, HeadersConfig: &types.OriginRequestPolicyHeadersConfig{ HeaderBehavior: types.OriginRequestPolicyHeaderBehaviorAllViewer, Headers: &types.Headers{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{"test"}, }, }, diff --git a/aws-source/adapters/cloudfront-realtime-log-config_test.go b/aws-source/adapters/cloudfront-realtime-log-config_test.go index 58ae1589..c5fb0587 100644 --- a/aws-source/adapters/cloudfront-realtime-log-config_test.go +++ b/aws-source/adapters/cloudfront-realtime-log-config_test.go @@ -11,15 +11,15 @@ import ( func TestRealtimeLogConfigsItemMapper(t *testing.T) { x := types.RealtimeLogConfig{ - Name: PtrString("test"), - SamplingRate: PtrInt64(100), - ARN: PtrString("arn:aws:cloudfront::123456789012:realtime-log-config/12345678-1234-1234-1234-123456789012"), + Name: new("test"), + SamplingRate: new(int64(100)), + ARN: new("arn:aws:cloudfront::123456789012:realtime-log-config/12345678-1234-1234-1234-123456789012"), EndPoints: []types.EndPoint{ { - StreamType: PtrString("Kinesis"), + StreamType: new("Kinesis"), KinesisStreamConfig: &types.KinesisStreamConfig{ - RoleARN: PtrString("arn:aws:iam::123456789012:role/CloudFront_Logger"), // link - StreamARN: PtrString("arn:aws:kinesis:us-east-1:123456789012:stream/cloudfront-logs"), // link + RoleARN: new("arn:aws:iam::123456789012:role/CloudFront_Logger"), // link + StreamARN: new("arn:aws:kinesis:us-east-1:123456789012:stream/cloudfront-logs"), // link }, }, }, diff --git a/aws-source/adapters/cloudfront-response-headers-policy_test.go b/aws-source/adapters/cloudfront-response-headers-policy_test.go index e188964a..2806b9a7 100644 --- a/aws-source/adapters/cloudfront-response-headers-policy_test.go +++ b/aws-source/adapters/cloudfront-response-headers-policy_test.go @@ -10,68 +10,68 @@ import ( func TestResponseHeadersPolicyItemMapper(t *testing.T) { x := types.ResponseHeadersPolicy{ - Id: PtrString("test"), - LastModifiedTime: PtrTime(time.Now()), + Id: new("test"), + LastModifiedTime: new(time.Now()), ResponseHeadersPolicyConfig: &types.ResponseHeadersPolicyConfig{ - Name: PtrString("example-policy"), - Comment: PtrString("example comment"), + Name: new("example-policy"), + Comment: new("example comment"), CorsConfig: &types.ResponseHeadersPolicyCorsConfig{ - AccessControlAllowCredentials: PtrBool(true), + AccessControlAllowCredentials: new(true), AccessControlAllowHeaders: &types.ResponseHeadersPolicyAccessControlAllowHeaders{ Items: []string{"X-Customer-Header"}, - Quantity: PtrInt32(1), + Quantity: new(int32(1)), }, }, CustomHeadersConfig: &types.ResponseHeadersPolicyCustomHeadersConfig{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.ResponseHeadersPolicyCustomHeader{ { - Header: PtrString("X-Customer-Header"), - Override: PtrBool(true), - Value: PtrString("test"), + Header: new("X-Customer-Header"), + Override: new(true), + Value: new("test"), }, }, }, RemoveHeadersConfig: &types.ResponseHeadersPolicyRemoveHeadersConfig{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []types.ResponseHeadersPolicyRemoveHeader{ { - Header: PtrString("X-Private-Header"), + Header: new("X-Private-Header"), }, }, }, SecurityHeadersConfig: &types.ResponseHeadersPolicySecurityHeadersConfig{ ContentSecurityPolicy: &types.ResponseHeadersPolicyContentSecurityPolicy{ - ContentSecurityPolicy: PtrString("default-src 'none';"), - Override: PtrBool(true), + ContentSecurityPolicy: new("default-src 'none';"), + Override: new(true), }, ContentTypeOptions: &types.ResponseHeadersPolicyContentTypeOptions{ - Override: PtrBool(true), + Override: new(true), }, FrameOptions: &types.ResponseHeadersPolicyFrameOptions{ FrameOption: types.FrameOptionsListDeny, - Override: PtrBool(true), + Override: new(true), }, ReferrerPolicy: &types.ResponseHeadersPolicyReferrerPolicy{ - Override: PtrBool(true), + Override: new(true), ReferrerPolicy: types.ReferrerPolicyListNoReferrer, }, StrictTransportSecurity: &types.ResponseHeadersPolicyStrictTransportSecurity{ - AccessControlMaxAgeSec: PtrInt32(86400), - Override: PtrBool(true), - IncludeSubdomains: PtrBool(true), - Preload: PtrBool(true), + AccessControlMaxAgeSec: new(int32(86400)), + Override: new(true), + IncludeSubdomains: new(true), + Preload: new(true), }, XSSProtection: &types.ResponseHeadersPolicyXSSProtection{ - Override: PtrBool(true), - Protection: PtrBool(true), - ModeBlock: PtrBool(true), - ReportUri: PtrString("https://example.com/report"), + Override: new(true), + Protection: new(true), + ModeBlock: new(true), + ReportUri: new("https://example.com/report"), }, }, ServerTimingHeadersConfig: &types.ResponseHeadersPolicyServerTimingHeadersConfig{ - Enabled: PtrBool(true), - SamplingRate: PtrFloat64(0.1), + Enabled: new(true), + SamplingRate: new(0.1), }, }, } diff --git a/aws-source/adapters/cloudfront-streaming-distribution_test.go b/aws-source/adapters/cloudfront-streaming-distribution_test.go index eaeb0502..e211c389 100644 --- a/aws-source/adapters/cloudfront-streaming-distribution_test.go +++ b/aws-source/adapters/cloudfront-streaming-distribution_test.go @@ -13,21 +13,21 @@ import ( func (t TestCloudFrontClient) GetStreamingDistribution(ctx context.Context, params *cloudfront.GetStreamingDistributionInput, optFns ...func(*cloudfront.Options)) (*cloudfront.GetStreamingDistributionOutput, error) { return &cloudfront.GetStreamingDistributionOutput{ - ETag: PtrString("E2QWRUHAPOMQZL"), + ETag: new("E2QWRUHAPOMQZL"), StreamingDistribution: &types.StreamingDistribution{ - ARN: PtrString("arn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5"), - DomainName: PtrString("d111111abcdef8.cloudfront.net"), // link - Id: PtrString("EDFDVBD632BHDS5"), - Status: PtrString("Deployed"), // health - LastModifiedTime: PtrTime(time.Now()), + ARN: new("arn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5"), + DomainName: new("d111111abcdef8.cloudfront.net"), // link + Id: new("EDFDVBD632BHDS5"), + Status: new("Deployed"), // health + LastModifiedTime: new(time.Now()), ActiveTrustedSigners: &types.ActiveTrustedSigners{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []types.Signer{ { - AwsAccountNumber: PtrString("123456789012"), + AwsAccountNumber: new("123456789012"), KeyPairIds: &types.KeyPairIds{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "APKAJDGKZRVEXAMPLE", }, @@ -36,30 +36,30 @@ func (t TestCloudFrontClient) GetStreamingDistribution(ctx context.Context, para }, }, StreamingDistributionConfig: &types.StreamingDistributionConfig{ - CallerReference: PtrString("test"), - Comment: PtrString("test"), - Enabled: PtrBool(true), + CallerReference: new("test"), + Comment: new("test"), + Enabled: new(true), S3Origin: &types.S3Origin{ - DomainName: PtrString("myawsbucket.s3.amazonaws.com"), // link - OriginAccessIdentity: PtrString("origin-access-identity/cloudfront/E127EXAMPLE51Z"), // link + DomainName: new("myawsbucket.s3.amazonaws.com"), // link + OriginAccessIdentity: new("origin-access-identity/cloudfront/E127EXAMPLE51Z"), // link }, TrustedSigners: &types.TrustedSigners{ - Enabled: PtrBool(true), - Quantity: PtrInt32(1), + Enabled: new(true), + Quantity: new(int32(1)), Items: []string{ "self", }, }, Aliases: &types.Aliases{ - Quantity: PtrInt32(1), + Quantity: new(int32(1)), Items: []string{ "example.com", // link }, }, Logging: &types.StreamingLoggingConfig{ - Bucket: PtrString("myawslogbucket.s3.amazonaws.com"), // link - Enabled: PtrBool(true), - Prefix: PtrString("myprefix"), + Bucket: new("myawslogbucket.s3.amazonaws.com"), // link + Enabled: new(true), + Prefix: new("myprefix"), }, PriceClass: types.PriceClassPriceClassAll, }, @@ -70,10 +70,10 @@ func (t TestCloudFrontClient) GetStreamingDistribution(ctx context.Context, para func (t TestCloudFrontClient) ListStreamingDistributions(ctx context.Context, params *cloudfront.ListStreamingDistributionsInput, optFns ...func(*cloudfront.Options)) (*cloudfront.ListStreamingDistributionsOutput, error) { return &cloudfront.ListStreamingDistributionsOutput{ StreamingDistributionList: &types.StreamingDistributionList{ - IsTruncated: PtrBool(false), + IsTruncated: new(false), Items: []types.StreamingDistributionSummary{ { - Id: PtrString("test-id"), + Id: new("test-id"), }, }, }, diff --git a/aws-source/adapters/cloudfront_test.go b/aws-source/adapters/cloudfront_test.go index 7486fe9d..3357e79d 100644 --- a/aws-source/adapters/cloudfront_test.go +++ b/aws-source/adapters/cloudfront_test.go @@ -13,8 +13,8 @@ func (c TestCloudFrontClient) ListTagsForResource(ctx context.Context, params *c Tags: &types.Tags{ Items: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, diff --git a/aws-source/adapters/cloudwatch-alarm_test.go b/aws-source/adapters/cloudwatch-alarm_test.go index be179f7e..2af7a602 100644 --- a/aws-source/adapters/cloudwatch-alarm_test.go +++ b/aws-source/adapters/cloudwatch-alarm_test.go @@ -19,8 +19,8 @@ func (c testCloudwatchClient) ListTagsForResource(ctx context.Context, params *c return &cloudwatch.ListTagsForResourceOutput{ Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("example"), + Key: new("Name"), + Value: new("example"), }, }, }, nil @@ -38,11 +38,11 @@ func TestAlarmOutputMapper(t *testing.T) { output := &cloudwatch.DescribeAlarmsOutput{ MetricAlarms: []types.MetricAlarm{ { - AlarmName: PtrString("TargetTracking-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), - AlarmArn: PtrString("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:TargetTracking-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), - AlarmDescription: PtrString("DO NOT EDIT OR DELETE. For TargetTrackingScaling policy arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b."), - AlarmConfigurationUpdatedTimestamp: PtrTime(time.Now()), - ActionsEnabled: PtrBool(true), + AlarmName: new("TargetTracking-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), + AlarmArn: new("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:TargetTracking-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), + AlarmDescription: new("DO NOT EDIT OR DELETE. For TargetTrackingScaling policy arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b."), + AlarmConfigurationUpdatedTimestamp: new(time.Now()), + ActionsEnabled: new(true), OKActions: []string{ "arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b", }, @@ -53,32 +53,32 @@ func TestAlarmOutputMapper(t *testing.T) { "arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b", }, StateValue: types.StateValueOk, - StateReason: PtrString("Threshold Crossed: 2 datapoints [0.0 (09/01/23 14:02:00), 1.0 (09/01/23 14:01:00)] were not greater than the threshold (42.0)."), - StateReasonData: PtrString("{\"version\":\"1.0\",\"queryDate\":\"2023-01-09T14:07:25.504+0000\",\"startDate\":\"2023-01-09T14:01:00.000+0000\",\"statistic\":\"Sum\",\"period\":60,\"recentDatapoints\":[1.0,0.0],\"threshold\":42.0,\"evaluatedDatapoints\":[{\"timestamp\":\"2023-01-09T14:02:00.000+0000\",\"sampleCount\":1.0,\"value\":0.0}]}"), - StateUpdatedTimestamp: PtrTime(time.Now()), - MetricName: PtrString("ConsumedWriteCapacityUnits"), - Namespace: PtrString("AWS/DynamoDB"), + StateReason: new("Threshold Crossed: 2 datapoints [0.0 (09/01/23 14:02:00), 1.0 (09/01/23 14:01:00)] were not greater than the threshold (42.0)."), + StateReasonData: new("{\"version\":\"1.0\",\"queryDate\":\"2023-01-09T14:07:25.504+0000\",\"startDate\":\"2023-01-09T14:01:00.000+0000\",\"statistic\":\"Sum\",\"period\":60,\"recentDatapoints\":[1.0,0.0],\"threshold\":42.0,\"evaluatedDatapoints\":[{\"timestamp\":\"2023-01-09T14:02:00.000+0000\",\"sampleCount\":1.0,\"value\":0.0}]}"), + StateUpdatedTimestamp: new(time.Now()), + MetricName: new("ConsumedWriteCapacityUnits"), + Namespace: new("AWS/DynamoDB"), Statistic: types.StatisticSum, Dimensions: []types.Dimension{ { - Name: PtrString("TableName"), - Value: PtrString("dylan-tfstate"), + Name: new("TableName"), + Value: new("dylan-tfstate"), }, }, - Period: PtrInt32(60), - EvaluationPeriods: PtrInt32(2), - Threshold: PtrFloat64(42.0), + Period: new(int32(60)), + EvaluationPeriods: new(int32(2)), + Threshold: new(42.0), ComparisonOperator: types.ComparisonOperatorGreaterThanThreshold, - StateTransitionedTimestamp: PtrTime(time.Now()), + StateTransitionedTimestamp: new(time.Now()), }, }, CompositeAlarms: []types.CompositeAlarm{ { - AlarmName: PtrString("TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), - AlarmArn: PtrString("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), - AlarmDescription: PtrString("DO NOT EDIT OR DELETE. For TargetTrackingScaling policy arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b."), - AlarmConfigurationUpdatedTimestamp: PtrTime(time.Now()), - ActionsEnabled: PtrBool(true), + AlarmName: new("TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), + AlarmArn: new("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), + AlarmDescription: new("DO NOT EDIT OR DELETE. For TargetTrackingScaling policy arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b."), + AlarmConfigurationUpdatedTimestamp: new(time.Now()), + ActionsEnabled: new(true), OKActions: []string{ "arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b", }, @@ -89,17 +89,17 @@ func TestAlarmOutputMapper(t *testing.T) { "arn:aws:autoscaling:eu-west-2:052392120703:scalingPolicy:32f3f053-dc75-46fa-9cd4-8e8c34c47b37:resource/dynamodb/table/dylan-tfstate:policyName/$dylan-tfstate-scaling-policy:createdBy/e5bd51d8-94a8-461e-a989-08f4d10b326b", }, StateValue: types.StateValueOk, - StateReason: PtrString("Threshold Crossed: 2 datapoints [0.0 (09/01/23 14:02:00), 1.0 (09/01/23 14:01:00)] were not greater than the threshold (42.0)."), - StateReasonData: PtrString("{\"version\":\"1.0\",\"queryDate\":\"2023-01-09T14:07:25.504+0000\",\"startDate\":\"2023-01-09T14:01:00.000+0000\",\"statistic\":\"Sum\",\"period\":60,\"recentDatapoints\":[1.0,0.0],\"threshold\":42.0,\"evaluatedDatapoints\":[{\"timestamp\":\"2023-01-09T14:02:00.000+0000\",\"sampleCount\":1.0,\"value\":0.0}]}"), - StateUpdatedTimestamp: PtrTime(time.Now()), - StateTransitionedTimestamp: PtrTime(time.Now()), + StateReason: new("Threshold Crossed: 2 datapoints [0.0 (09/01/23 14:02:00), 1.0 (09/01/23 14:01:00)] were not greater than the threshold (42.0)."), + StateReasonData: new("{\"version\":\"1.0\",\"queryDate\":\"2023-01-09T14:07:25.504+0000\",\"startDate\":\"2023-01-09T14:01:00.000+0000\",\"statistic\":\"Sum\",\"period\":60,\"recentDatapoints\":[1.0,0.0],\"threshold\":42.0,\"evaluatedDatapoints\":[{\"timestamp\":\"2023-01-09T14:02:00.000+0000\",\"sampleCount\":1.0,\"value\":0.0}]}"), + StateUpdatedTimestamp: new(time.Now()), + StateTransitionedTimestamp: new(time.Now()), ActionsSuppressedBy: types.ActionsSuppressedByAlarm, - ActionsSuppressedReason: PtrString("Alarm is in INSUFFICIENT_DATA state"), + ActionsSuppressedReason: new("Alarm is in INSUFFICIENT_DATA state"), // link - ActionsSuppressor: PtrString("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), - ActionsSuppressorExtensionPeriod: PtrInt32(0), - ActionsSuppressorWaitPeriod: PtrInt32(0), - AlarmRule: PtrString("ALARM TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), + ActionsSuppressor: new("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), + ActionsSuppressorExtensionPeriod: new(int32(0)), + ActionsSuppressorWaitPeriod: new(int32(0)), + AlarmRule: new("ALARM TargetTracking2-table/dylan-tfstate-AlarmHigh-14069c4a-6dcc-48a2-bfe6-b5547c90c43d"), }, }, } @@ -178,12 +178,12 @@ func TestAlarmOutputMapperWithTagError(t *testing.T) { output := &cloudwatch.DescribeAlarmsOutput{ MetricAlarms: []types.MetricAlarm{ { - AlarmName: PtrString("api-51c748b4-cpu-credits-low"), - AlarmArn: PtrString("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:api-51c748b4-cpu-credits-low"), - AlarmDescription: PtrString("CPU credits low alarm"), + AlarmName: new("api-51c748b4-cpu-credits-low"), + AlarmArn: new("arn:aws:cloudwatch:eu-west-2:052392120703:alarm:api-51c748b4-cpu-credits-low"), + AlarmDescription: new("CPU credits low alarm"), StateValue: types.StateValueOk, - MetricName: PtrString("CPUCreditBalance"), - Namespace: PtrString("AWS/EC2"), + MetricName: new("CPUCreditBalance"), + Namespace: new("AWS/EC2"), }, }, } diff --git a/aws-source/adapters/cloudwatch-instance-metric.go b/aws-source/adapters/cloudwatch-instance-metric.go index ef9a0e6a..c43b96a4 100644 --- a/aws-source/adapters/cloudwatch-instance-metric.go +++ b/aws-source/adapters/cloudwatch-instance-metric.go @@ -121,7 +121,7 @@ func formatMetricValue(metricName string, value float64) string { // metricOutputMapper converts CloudWatch GetMetricData output to an SDP item func metricOutputMapper(ctx context.Context, client CloudwatchMetricClient, scope string, instanceID string, output *cloudwatch.GetMetricDataOutput) (*sdp.Item, error) { // Build attributes map with instance ID - attrsMap := map[string]interface{}{ + attrsMap := map[string]any{ "InstanceId": instanceID, "PeriodMinutes": 15, "Statistic": "Average", diff --git a/aws-source/adapters/directconnect-connection_test.go b/aws-source/adapters/directconnect-connection_test.go index 86dc2d8a..31d546e6 100644 --- a/aws-source/adapters/directconnect-connection_test.go +++ b/aws-source/adapters/directconnect-connection_test.go @@ -15,26 +15,26 @@ func TestDirectconnectConnectionOutputMapper(t *testing.T) { output := &directconnect.DescribeConnectionsOutput{ Connections: []types.Connection{ { - AwsDeviceV2: PtrString("EqDC2-123h49s71dabc"), - AwsLogicalDeviceId: PtrString("device-1"), - Bandwidth: PtrString("1Gbps"), - ConnectionId: PtrString("dxcon-fguhmqlc"), - ConnectionName: PtrString("My_Connection"), + AwsDeviceV2: new("EqDC2-123h49s71dabc"), + AwsLogicalDeviceId: new("device-1"), + Bandwidth: new("1Gbps"), + ConnectionId: new("dxcon-fguhmqlc"), + ConnectionName: new("My_Connection"), ConnectionState: "down", - EncryptionMode: PtrString("must_encrypt"), + EncryptionMode: new("must_encrypt"), HasLogicalRedundancy: "unknown", - JumboFrameCapable: PtrBool(true), - LagId: PtrString("dxlag-ffrz71kw"), - LoaIssueTime: PtrTime(time.Now()), - Location: PtrString("EqDC2"), - Region: PtrString("us-east-1"), - ProviderName: PtrString("provider-1"), - OwnerAccount: PtrString("123456789012"), - PartnerName: PtrString("partner-1"), + JumboFrameCapable: new(true), + LagId: new("dxlag-ffrz71kw"), + LoaIssueTime: new(time.Now()), + Location: new("EqDC2"), + Region: new("us-east-1"), + ProviderName: new("provider-1"), + OwnerAccount: new("123456789012"), + PartnerName: new("partner-1"), Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, diff --git a/aws-source/adapters/directconnect-customer-metadata_test.go b/aws-source/adapters/directconnect-customer-metadata_test.go index f2375df6..616589f3 100644 --- a/aws-source/adapters/directconnect-customer-metadata_test.go +++ b/aws-source/adapters/directconnect-customer-metadata_test.go @@ -14,8 +14,8 @@ func TestCustomerMetadataOutputMapper(t *testing.T) { output := &directconnect.DescribeCustomerMetadataOutput{ Agreements: []types.CustomerAgreement{ { - AgreementName: PtrString("example-customer-agreement"), - Status: PtrString("signed"), + AgreementName: new("example-customer-agreement"), + Status: new("signed"), }, }, } diff --git a/aws-source/adapters/directconnect-direct-connect-gateway-association-proposal_test.go b/aws-source/adapters/directconnect-direct-connect-gateway-association-proposal_test.go index 942eff81..c540f6b7 100644 --- a/aws-source/adapters/directconnect-direct-connect-gateway-association-proposal_test.go +++ b/aws-source/adapters/directconnect-direct-connect-gateway-association-proposal_test.go @@ -17,27 +17,27 @@ func TestDirectConnectGatewayAssociationProposalOutputMapper(t *testing.T) { output := &directconnect.DescribeDirectConnectGatewayAssociationProposalsOutput{ DirectConnectGatewayAssociationProposals: []types.DirectConnectGatewayAssociationProposal{ { - ProposalId: PtrString("c2ede9b4-bbc6-4d33-923c-bc4feEXAMPLE"), - DirectConnectGatewayId: PtrString("5f294f92-bafb-4011-916d-9b0bexample"), - DirectConnectGatewayOwnerAccount: PtrString("123456789012"), + ProposalId: new("c2ede9b4-bbc6-4d33-923c-bc4feEXAMPLE"), + DirectConnectGatewayId: new("5f294f92-bafb-4011-916d-9b0bexample"), + DirectConnectGatewayOwnerAccount: new("123456789012"), ProposalState: types.DirectConnectGatewayAssociationProposalStateRequested, AssociatedGateway: &types.AssociatedGateway{ - Id: PtrString("tgw-02f776b1a7EXAMPLE"), + Id: new("tgw-02f776b1a7EXAMPLE"), Type: types.GatewayTypeTransitGateway, - OwnerAccount: PtrString("111122223333"), - Region: PtrString("us-east-1"), + OwnerAccount: new("111122223333"), + Region: new("us-east-1"), }, ExistingAllowedPrefixesToDirectConnectGateway: []types.RouteFilterPrefix{ { - Cidr: PtrString("192.168.2.0/30"), + Cidr: new("192.168.2.0/30"), }, { - Cidr: PtrString("192.168.1.0/30"), + Cidr: new("192.168.1.0/30"), }, }, RequestedAllowedPrefixesToDirectConnectGateway: []types.RouteFilterPrefix{ { - Cidr: PtrString("192.168.1.0/30"), + Cidr: new("192.168.1.0/30"), }, }, }, diff --git a/aws-source/adapters/directconnect-direct-connect-gateway-association_test.go b/aws-source/adapters/directconnect-direct-connect-gateway-association_test.go index 7fc04408..226eebef 100644 --- a/aws-source/adapters/directconnect-direct-connect-gateway-association_test.go +++ b/aws-source/adapters/directconnect-direct-connect-gateway-association_test.go @@ -16,10 +16,10 @@ func TestDirectConnectGatewayAssociationOutputMapper_Health_OK(t *testing.T) { DirectConnectGatewayAssociations: []types.DirectConnectGatewayAssociation{ { AssociationState: types.DirectConnectGatewayAssociationStateAssociating, - AssociationId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), - VirtualGatewayOwnerAccount: PtrString("123456789012"), - DirectConnectGatewayId: PtrString("5f294f92-bafb-4011-916d-9b0bexample"), - VirtualGatewayId: PtrString("vgw-6efe725e"), + AssociationId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), + VirtualGatewayOwnerAccount: new("123456789012"), + DirectConnectGatewayId: new("5f294f92-bafb-4011-916d-9b0bexample"), + VirtualGatewayId: new("vgw-6efe725e"), }, }, } @@ -68,11 +68,11 @@ func TestDirectConnectGatewayAssociationOutputMapper_Health_Error(t *testing.T) DirectConnectGatewayAssociations: []types.DirectConnectGatewayAssociation{ { AssociationState: types.DirectConnectGatewayAssociationStateAssociating, - AssociationId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), - VirtualGatewayOwnerAccount: PtrString("123456789012"), - DirectConnectGatewayId: PtrString("5f294f92-bafb-4011-916d-9b0bexample"), - VirtualGatewayId: PtrString("vgw-6efe725e"), - StateChangeError: PtrString("something went wrong"), + AssociationId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), + VirtualGatewayOwnerAccount: new("123456789012"), + DirectConnectGatewayId: new("5f294f92-bafb-4011-916d-9b0bexample"), + VirtualGatewayId: new("vgw-6efe725e"), + StateChangeError: new("something went wrong"), }, }, } diff --git a/aws-source/adapters/directconnect-direct-connect-gateway-attachment_test.go b/aws-source/adapters/directconnect-direct-connect-gateway-attachment_test.go index 096629a3..936b5a33 100644 --- a/aws-source/adapters/directconnect-direct-connect-gateway-attachment_test.go +++ b/aws-source/adapters/directconnect-direct-connect-gateway-attachment_test.go @@ -15,10 +15,10 @@ func TestDirectConnectGatewayAttachmentOutputMapper_Health_OK(t *testing.T) { output := &directconnect.DescribeDirectConnectGatewayAttachmentsOutput{ DirectConnectGatewayAttachments: []types.DirectConnectGatewayAttachment{ { - VirtualInterfaceOwnerAccount: PtrString("123456789012"), - VirtualInterfaceRegion: PtrString("us-east-2"), - VirtualInterfaceId: PtrString("dxvif-ffhhk74f"), - DirectConnectGatewayId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), + VirtualInterfaceOwnerAccount: new("123456789012"), + VirtualInterfaceRegion: new("us-east-2"), + VirtualInterfaceId: new("dxvif-ffhhk74f"), + DirectConnectGatewayId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), AttachmentState: "detaching", }, }, @@ -67,12 +67,12 @@ func TestDirectConnectGatewayAttachmentOutputMapper_Health_Error(t *testing.T) { output := &directconnect.DescribeDirectConnectGatewayAttachmentsOutput{ DirectConnectGatewayAttachments: []types.DirectConnectGatewayAttachment{ { - VirtualInterfaceOwnerAccount: PtrString("123456789012"), - VirtualInterfaceRegion: PtrString("us-east-2"), - VirtualInterfaceId: PtrString("dxvif-ffhhk74f"), - DirectConnectGatewayId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), + VirtualInterfaceOwnerAccount: new("123456789012"), + VirtualInterfaceRegion: new("us-east-2"), + VirtualInterfaceId: new("dxvif-ffhhk74f"), + DirectConnectGatewayId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), AttachmentState: "detaching", - StateChangeError: PtrString("error"), + StateChangeError: new("error"), }, }, } diff --git a/aws-source/adapters/directconnect-direct-connect-gateway_test.go b/aws-source/adapters/directconnect-direct-connect-gateway_test.go index 32ed2630..db876da3 100644 --- a/aws-source/adapters/directconnect-direct-connect-gateway_test.go +++ b/aws-source/adapters/directconnect-direct-connect-gateway_test.go @@ -15,10 +15,10 @@ func TestDirectConnectGatewayOutputMapper_Health_OK(t *testing.T) { output := &directconnect.DescribeDirectConnectGatewaysOutput{ DirectConnectGateways: []types.DirectConnectGateway{ { - AmazonSideAsn: PtrInt64(64512), - DirectConnectGatewayId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), - OwnerAccount: PtrString("123456789012"), - DirectConnectGatewayName: PtrString("DxGateway2"), + AmazonSideAsn: new(int64(64512)), + DirectConnectGatewayId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), + OwnerAccount: new("123456789012"), + DirectConnectGatewayName: new("DxGateway2"), DirectConnectGatewayState: types.DirectConnectGatewayStateAvailable, }, }, @@ -48,12 +48,12 @@ func TestDirectConnectGatewayOutputMapper_Health_ERROR(t *testing.T) { output := &directconnect.DescribeDirectConnectGatewaysOutput{ DirectConnectGateways: []types.DirectConnectGateway{ { - AmazonSideAsn: PtrInt64(64512), - DirectConnectGatewayId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), - OwnerAccount: PtrString("123456789012"), - DirectConnectGatewayName: PtrString("DxGateway2"), + AmazonSideAsn: new(int64(64512)), + DirectConnectGatewayId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), + OwnerAccount: new("123456789012"), + DirectConnectGatewayName: new("DxGateway2"), DirectConnectGatewayState: types.DirectConnectGatewayStateAvailable, - StateChangeError: PtrString("error"), + StateChangeError: new("error"), }, }, } diff --git a/aws-source/adapters/directconnect-hosted-connection_test.go b/aws-source/adapters/directconnect-hosted-connection_test.go index 6e869b45..59b3458a 100644 --- a/aws-source/adapters/directconnect-hosted-connection_test.go +++ b/aws-source/adapters/directconnect-hosted-connection_test.go @@ -15,26 +15,26 @@ func TestHostedConnectionOutputMapper(t *testing.T) { output := &directconnect.DescribeHostedConnectionsOutput{ Connections: []types.Connection{ { - AwsDeviceV2: PtrString("EqDC2-123h49s71dabc"), - AwsLogicalDeviceId: PtrString("device-1"), - Bandwidth: PtrString("1Gbps"), - ConnectionId: PtrString("dxcon-fguhmqlc"), - ConnectionName: PtrString("My_Connection"), + AwsDeviceV2: new("EqDC2-123h49s71dabc"), + AwsLogicalDeviceId: new("device-1"), + Bandwidth: new("1Gbps"), + ConnectionId: new("dxcon-fguhmqlc"), + ConnectionName: new("My_Connection"), ConnectionState: "down", - EncryptionMode: PtrString("must_encrypt"), + EncryptionMode: new("must_encrypt"), HasLogicalRedundancy: "unknown", - JumboFrameCapable: PtrBool(true), - LagId: PtrString("dxlag-ffrz71kw"), - LoaIssueTime: PtrTime(time.Now()), - Location: PtrString("EqDC2"), - Region: PtrString("us-east-1"), - ProviderName: PtrString("provider-1"), - OwnerAccount: PtrString("123456789012"), - PartnerName: PtrString("partner-1"), + JumboFrameCapable: new(true), + LagId: new("dxlag-ffrz71kw"), + LoaIssueTime: new(time.Now()), + Location: new("EqDC2"), + Region: new("us-east-1"), + ProviderName: new("provider-1"), + OwnerAccount: new("123456789012"), + PartnerName: new("partner-1"), Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, diff --git a/aws-source/adapters/directconnect-interconnect_test.go b/aws-source/adapters/directconnect-interconnect_test.go index 84f09a63..de3ccbdd 100644 --- a/aws-source/adapters/directconnect-interconnect_test.go +++ b/aws-source/adapters/directconnect-interconnect_test.go @@ -15,23 +15,23 @@ func TestInterconnectOutputMapper(t *testing.T) { output := &directconnect.DescribeInterconnectsOutput{ Interconnects: []types.Interconnect{ { - AwsDeviceV2: PtrString("EqDC2-123h49s71dabc"), - AwsLogicalDeviceId: PtrString("device-1"), - Bandwidth: PtrString("1Gbps"), + AwsDeviceV2: new("EqDC2-123h49s71dabc"), + AwsLogicalDeviceId: new("device-1"), + Bandwidth: new("1Gbps"), HasLogicalRedundancy: types.HasLogicalRedundancyUnknown, - InterconnectId: PtrString("dxcon-fguhmqlc"), - InterconnectName: PtrString("interconnect-1"), + InterconnectId: new("dxcon-fguhmqlc"), + InterconnectName: new("interconnect-1"), InterconnectState: types.InterconnectStateAvailable, - JumboFrameCapable: PtrBool(true), - LagId: PtrString("dxlag-ffrz71kw"), - LoaIssueTime: PtrTime(time.Now()), - Location: PtrString("EqDC2"), - Region: PtrString("us-east-1"), - ProviderName: PtrString("provider-1"), + JumboFrameCapable: new(true), + LagId: new("dxlag-ffrz71kw"), + LoaIssueTime: new(time.Now()), + Location: new("EqDC2"), + Region: new("us-east-1"), + ProviderName: new("provider-1"), Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, @@ -125,7 +125,7 @@ func TestInterconnectHealth(t *testing.T) { Interconnects: []types.Interconnect{ { InterconnectState: c.state, - LagId: PtrString("dxlag-fgsu9erb"), + LagId: new("dxlag-fgsu9erb"), }, }, } diff --git a/aws-source/adapters/directconnect-lag_test.go b/aws-source/adapters/directconnect-lag_test.go index 6632482b..125968f1 100644 --- a/aws-source/adapters/directconnect-lag_test.go +++ b/aws-source/adapters/directconnect-lag_test.go @@ -52,7 +52,7 @@ func TestLagHealth(t *testing.T) { Lags: []types.Lag{ { LagState: c.state, - LagId: PtrString("dxlag-fgsu9erb"), + LagId: new("dxlag-fgsu9erb"), }, }, } @@ -78,38 +78,38 @@ func TestLagOutputMapper(t *testing.T) { output := &directconnect.DescribeLagsOutput{ Lags: []types.Lag{ { - AwsDeviceV2: PtrString("EqDC2-19y7z3m17xpuz"), + AwsDeviceV2: new("EqDC2-19y7z3m17xpuz"), NumberOfConnections: int32(2), LagState: types.LagStateAvailable, - OwnerAccount: PtrString("123456789012"), - LagName: PtrString("DA-LAG"), + OwnerAccount: new("123456789012"), + LagName: new("DA-LAG"), Connections: []types.Connection{ { - OwnerAccount: PtrString("123456789012"), - ConnectionId: PtrString("dxcon-ffnikghc"), - LagId: PtrString("dxlag-fgsu9erb"), + OwnerAccount: new("123456789012"), + ConnectionId: new("dxcon-ffnikghc"), + LagId: new("dxlag-fgsu9erb"), ConnectionState: "requested", - Bandwidth: PtrString("10Gbps"), - Location: PtrString("EqDC2"), - ConnectionName: PtrString("Requested Connection 1 for Lag dxlag-fgsu9erb"), - Region: PtrString("us-east-1"), + Bandwidth: new("10Gbps"), + Location: new("EqDC2"), + ConnectionName: new("Requested Connection 1 for Lag dxlag-fgsu9erb"), + Region: new("us-east-1"), }, { - OwnerAccount: PtrString("123456789012"), - ConnectionId: PtrString("dxcon-fglgbdea"), - LagId: PtrString("dxlag-fgsu9erb"), + OwnerAccount: new("123456789012"), + ConnectionId: new("dxcon-fglgbdea"), + LagId: new("dxlag-fgsu9erb"), ConnectionState: "requested", - Bandwidth: PtrString("10Gbps"), - Location: PtrString("EqDC2"), - ConnectionName: PtrString("Requested Connection 2 for Lag dxlag-fgsu9erb"), - Region: PtrString("us-east-1"), + Bandwidth: new("10Gbps"), + Location: new("EqDC2"), + ConnectionName: new("Requested Connection 2 for Lag dxlag-fgsu9erb"), + Region: new("us-east-1"), }, }, - LagId: PtrString("dxlag-fgsu9erb"), + LagId: new("dxlag-fgsu9erb"), MinimumLinks: int32(0), - ConnectionsBandwidth: PtrString("10Gbps"), - Region: PtrString("us-east-1"), - Location: PtrString("EqDC2"), + ConnectionsBandwidth: new("10Gbps"), + Region: new("us-east-1"), + Location: new("EqDC2"), }, }, } diff --git a/aws-source/adapters/directconnect-location_test.go b/aws-source/adapters/directconnect-location_test.go index 9c18c9f6..e84e8e3b 100644 --- a/aws-source/adapters/directconnect-location_test.go +++ b/aws-source/adapters/directconnect-location_test.go @@ -17,9 +17,9 @@ func TestLocationOutputMapper(t *testing.T) { AvailableMacSecPortSpeeds: []string{"1 Gbps", "10 Gbps"}, AvailablePortSpeeds: []string{"50 Mbps", "100 Mbps", "1 Gbps", "10 Gbps"}, AvailableProviders: []string{"ProviderA", "ProviderB", "ProviderC"}, - LocationName: PtrString("NAP do Brasil, Barueri, Sao Paulo"), - LocationCode: PtrString("TNDB"), - Region: PtrString("us-east-1"), + LocationName: new("NAP do Brasil, Barueri, Sao Paulo"), + LocationCode: new("TNDB"), + Region: new("us-east-1"), }, }, } diff --git a/aws-source/adapters/directconnect-router-configuration_test.go b/aws-source/adapters/directconnect-router-configuration_test.go index 6a151e2d..c8b4ffb9 100644 --- a/aws-source/adapters/directconnect-router-configuration_test.go +++ b/aws-source/adapters/directconnect-router-configuration_test.go @@ -13,17 +13,17 @@ import ( func TestRouterConfigurationOutputMapper(t *testing.T) { output := &directconnect.DescribeRouterConfigurationOutput{ - CustomerRouterConfig: PtrString("some config"), + CustomerRouterConfig: new("some config"), Router: &types.RouterType{ - Platform: PtrString("2900 Series Routers"), - RouterTypeIdentifier: PtrString("CiscoSystemsInc-2900SeriesRouters-IOS124"), - Software: PtrString("IOS 12.4+"), - Vendor: PtrString("Cisco Systems, Inc."), - XsltTemplateName: PtrString("customer-router-cisco-generic.xslt"), - XsltTemplateNameForMacSec: PtrString(""), + Platform: new("2900 Series Routers"), + RouterTypeIdentifier: new("CiscoSystemsInc-2900SeriesRouters-IOS124"), + Software: new("IOS 12.4+"), + Vendor: new("Cisco Systems, Inc."), + XsltTemplateName: new("customer-router-cisco-generic.xslt"), + XsltTemplateNameForMacSec: new(""), }, - VirtualInterfaceId: PtrString("dxvif-ffhhk74f"), - VirtualInterfaceName: PtrString("PrivateVirtualInterface"), + VirtualInterfaceId: new("dxvif-ffhhk74f"), + VirtualInterfaceName: new("PrivateVirtualInterface"), } items, err := routerConfigurationOutputMapper(context.Background(), nil, "foo", nil, output) diff --git a/aws-source/adapters/directconnect-virtual-gateway_test.go b/aws-source/adapters/directconnect-virtual-gateway_test.go index c4003bb5..85245770 100644 --- a/aws-source/adapters/directconnect-virtual-gateway_test.go +++ b/aws-source/adapters/directconnect-virtual-gateway_test.go @@ -14,8 +14,8 @@ func TestVirtualGatewayOutputMapper(t *testing.T) { output := &directconnect.DescribeVirtualGatewaysOutput{ VirtualGateways: []types.VirtualGateway{ { - VirtualGatewayId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), - VirtualGatewayState: PtrString("available"), + VirtualGatewayId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), + VirtualGatewayState: new("available"), }, }, } diff --git a/aws-source/adapters/directconnect-virtual-interface_test.go b/aws-source/adapters/directconnect-virtual-interface_test.go index 396737b2..474489c6 100644 --- a/aws-source/adapters/directconnect-virtual-interface_test.go +++ b/aws-source/adapters/directconnect-virtual-interface_test.go @@ -17,14 +17,14 @@ func TestVirtualInterfaceOutputMapper(t *testing.T) { output := &directconnect.DescribeVirtualInterfacesOutput{ VirtualInterfaces: []types.VirtualInterface{ { - VirtualInterfaceId: PtrString("dxvif-ffhhk74f"), - ConnectionId: PtrString("dxcon-fguhmqlc"), + VirtualInterfaceId: new("dxvif-ffhhk74f"), + ConnectionId: new("dxcon-fguhmqlc"), VirtualInterfaceState: "verifying", - CustomerAddress: PtrString("192.168.1.2/30"), - AmazonAddress: PtrString("192.168.1.1/30"), - VirtualInterfaceType: PtrString("private"), - VirtualInterfaceName: PtrString("PrivateVirtualInterface"), - DirectConnectGatewayId: PtrString("cf68415c-f4ae-48f2-87a7-3b52cexample"), + CustomerAddress: new("192.168.1.2/30"), + AmazonAddress: new("192.168.1.1/30"), + VirtualInterfaceType: new("private"), + VirtualInterfaceName: new("PrivateVirtualInterface"), + DirectConnectGatewayId: new("cf68415c-f4ae-48f2-87a7-3b52cexample"), }, }, } diff --git a/aws-source/adapters/dynamodb-backup_test.go b/aws-source/adapters/dynamodb-backup_test.go index 4b924a28..6046d1f8 100644 --- a/aws-source/adapters/dynamodb-backup_test.go +++ b/aws-source/adapters/dynamodb-backup_test.go @@ -15,43 +15,43 @@ func (t *DynamoDBTestClient) DescribeBackup(ctx context.Context, params *dynamod return &dynamodb.DescribeBackupOutput{ BackupDescription: &types.BackupDescription{ BackupDetails: &types.BackupDetails{ - BackupArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test2/backup/01673461724486-a6007753"), - BackupName: PtrString("test2-backup"), - BackupSizeBytes: PtrInt64(0), + BackupArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test2/backup/01673461724486-a6007753"), + BackupName: new("test2-backup"), + BackupSizeBytes: new(int64(0)), BackupStatus: types.BackupStatusAvailable, BackupType: types.BackupTypeUser, - BackupCreationDateTime: PtrTime(time.Now()), + BackupCreationDateTime: new(time.Now()), }, SourceTableDetails: &types.SourceTableDetails{ - TableName: PtrString("test2"), // link - TableId: PtrString("12670f3b-8ca1-463b-b15e-f2e27eaf70b0"), - TableArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test2"), - TableSizeBytes: PtrInt64(0), + TableName: new("test2"), // link + TableId: new("12670f3b-8ca1-463b-b15e-f2e27eaf70b0"), + TableArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test2"), + TableSizeBytes: new(int64(0)), KeySchema: []types.KeySchemaElement{ { - AttributeName: PtrString("ArtistId"), + AttributeName: new("ArtistId"), KeyType: types.KeyTypeHash, }, { - AttributeName: PtrString("Concert"), + AttributeName: new("Concert"), KeyType: types.KeyTypeRange, }, }, - TableCreationDateTime: PtrTime(time.Now()), + TableCreationDateTime: new(time.Now()), ProvisionedThroughput: &types.ProvisionedThroughput{ - ReadCapacityUnits: PtrInt64(5), - WriteCapacityUnits: PtrInt64(5), + ReadCapacityUnits: new(int64(5)), + WriteCapacityUnits: new(int64(5)), }, - ItemCount: PtrInt64(0), + ItemCount: new(int64(0)), BillingMode: types.BillingModeProvisioned, }, SourceTableFeatureDetails: &types.SourceTableFeatureDetails{ GlobalSecondaryIndexes: []types.GlobalSecondaryIndexInfo{ { - IndexName: PtrString("GSI"), + IndexName: new("GSI"), KeySchema: []types.KeySchemaElement{ { - AttributeName: PtrString("TicketSales"), + AttributeName: new("TicketSales"), KeyType: types.KeyTypeHash, }, }, @@ -59,8 +59,8 @@ func (t *DynamoDBTestClient) DescribeBackup(ctx context.Context, params *dynamod ProjectionType: types.ProjectionTypeKeysOnly, }, ProvisionedThroughput: &types.ProvisionedThroughput{ - ReadCapacityUnits: PtrInt64(5), - WriteCapacityUnits: PtrInt64(5), + ReadCapacityUnits: new(int64(5)), + WriteCapacityUnits: new(int64(5)), }, }, }, @@ -73,15 +73,15 @@ func (t *DynamoDBTestClient) ListBackups(ctx context.Context, params *dynamodb.L return &dynamodb.ListBackupsOutput{ BackupSummaries: []types.BackupSummary{ { - TableName: PtrString("test2"), - TableId: PtrString("12670f3b-8ca1-463b-b15e-f2e27eaf70b0"), - TableArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test2"), - BackupArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test2/backup/01673461724486-a6007753"), - BackupName: PtrString("test2-backup"), - BackupCreationDateTime: PtrTime(time.Now()), + TableName: new("test2"), + TableId: new("12670f3b-8ca1-463b-b15e-f2e27eaf70b0"), + TableArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test2"), + BackupArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test2/backup/01673461724486-a6007753"), + BackupName: new("test2-backup"), + BackupCreationDateTime: new(time.Now()), BackupStatus: types.BackupStatusAvailable, BackupType: types.BackupTypeUser, - BackupSizeBytes: PtrInt64(10), + BackupSizeBytes: new(int64(10)), }, }, }, nil diff --git a/aws-source/adapters/dynamodb-table_test.go b/aws-source/adapters/dynamodb-table_test.go index ecbbfadd..ac7924d9 100644 --- a/aws-source/adapters/dynamodb-table_test.go +++ b/aws-source/adapters/dynamodb-table_test.go @@ -16,46 +16,46 @@ func (t *DynamoDBTestClient) DescribeTable(context.Context, *dynamodb.DescribeTa Table: &types.TableDescription{ AttributeDefinitions: []types.AttributeDefinition{ { - AttributeName: PtrString("ArtistId"), + AttributeName: new("ArtistId"), AttributeType: types.ScalarAttributeTypeS, }, { - AttributeName: PtrString("Concert"), + AttributeName: new("Concert"), AttributeType: types.ScalarAttributeTypeS, }, { - AttributeName: PtrString("TicketSales"), + AttributeName: new("TicketSales"), AttributeType: types.ScalarAttributeTypeS, }, }, - TableName: PtrString("test-DDBTable-1X52D7BWAAB2H"), + TableName: new("test-DDBTable-1X52D7BWAAB2H"), KeySchema: []types.KeySchemaElement{ { - AttributeName: PtrString("ArtistId"), + AttributeName: new("ArtistId"), KeyType: types.KeyTypeHash, }, { - AttributeName: PtrString("Concert"), + AttributeName: new("Concert"), KeyType: types.KeyTypeRange, }, }, TableStatus: types.TableStatusActive, - CreationDateTime: PtrTime(time.Now()), + CreationDateTime: new(time.Now()), ProvisionedThroughput: &types.ProvisionedThroughputDescription{ - NumberOfDecreasesToday: PtrInt64(0), - ReadCapacityUnits: PtrInt64(5), - WriteCapacityUnits: PtrInt64(5), + NumberOfDecreasesToday: new(int64(0)), + ReadCapacityUnits: new(int64(5)), + WriteCapacityUnits: new(int64(5)), }, - TableSizeBytes: PtrInt64(0), - ItemCount: PtrInt64(0), - TableArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H"), - TableId: PtrString("32ef65bf-d6f3-4508-a3db-f201df09e437"), + TableSizeBytes: new(int64(0)), + ItemCount: new(int64(0)), + TableArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H"), + TableId: new("32ef65bf-d6f3-4508-a3db-f201df09e437"), GlobalSecondaryIndexes: []types.GlobalSecondaryIndexDescription{ { - IndexName: PtrString("GSI"), + IndexName: new("GSI"), KeySchema: []types.KeySchemaElement{ { - AttributeName: PtrString("TicketSales"), + AttributeName: new("TicketSales"), KeyType: types.KeyTypeHash, }, }, @@ -64,35 +64,35 @@ func (t *DynamoDBTestClient) DescribeTable(context.Context, *dynamodb.DescribeTa }, IndexStatus: types.IndexStatusActive, ProvisionedThroughput: &types.ProvisionedThroughputDescription{ - NumberOfDecreasesToday: PtrInt64(0), - ReadCapacityUnits: PtrInt64(5), - WriteCapacityUnits: PtrInt64(5), + NumberOfDecreasesToday: new(int64(0)), + ReadCapacityUnits: new(int64(5)), + WriteCapacityUnits: new(int64(5)), }, - IndexSizeBytes: PtrInt64(0), - ItemCount: PtrInt64(0), - IndexArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H/index/GSI"), // no link, t + IndexSizeBytes: new(int64(0)), + ItemCount: new(int64(0)), + IndexArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H/index/GSI"), // no link, t }, }, ArchivalSummary: &types.ArchivalSummary{ - ArchivalBackupArn: PtrString("arn:aws:backups:eu-west-1:052392120703:some-backup/one"), // link - ArchivalDateTime: PtrTime(time.Now()), - ArchivalReason: PtrString("fear"), + ArchivalBackupArn: new("arn:aws:backups:eu-west-1:052392120703:some-backup/one"), // link + ArchivalDateTime: new(time.Now()), + ArchivalReason: new("fear"), }, BillingModeSummary: &types.BillingModeSummary{ BillingMode: types.BillingModePayPerRequest, }, - GlobalTableVersion: PtrString("1"), - LatestStreamArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H/stream/2023-01-11T16:53:02.371"), // This doesn't get linked because there is no more data to get - LatestStreamLabel: PtrString("2023-01-11T16:53:02.371"), + GlobalTableVersion: new("1"), + LatestStreamArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H/stream/2023-01-11T16:53:02.371"), // This doesn't get linked because there is no more data to get + LatestStreamLabel: new("2023-01-11T16:53:02.371"), LocalSecondaryIndexes: []types.LocalSecondaryIndexDescription{ { - IndexArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H/index/GSX"), // no link - IndexName: PtrString("GSX"), - IndexSizeBytes: PtrInt64(29103), - ItemCount: PtrInt64(234234), + IndexArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H/index/GSX"), // no link + IndexName: new("GSX"), + IndexSizeBytes: new(int64(29103)), + ItemCount: new(int64(234234)), KeySchema: []types.KeySchemaElement{ { - AttributeName: PtrString("TicketSales"), + AttributeName: new("TicketSales"), KeyType: types.KeyTypeHash, }, }, @@ -108,11 +108,11 @@ func (t *DynamoDBTestClient) DescribeTable(context.Context, *dynamodb.DescribeTa { GlobalSecondaryIndexes: []types.ReplicaGlobalSecondaryIndexDescription{ { - IndexName: PtrString("name"), + IndexName: new("name"), }, }, - KMSMasterKeyId: PtrString("keyID"), - RegionName: PtrString("eu-west-2"), // link + KMSMasterKeyId: new("keyID"), + RegionName: new("eu-west-2"), // link ReplicaStatus: types.ReplicaStatusActive, ReplicaTableClassSummary: &types.TableClassSummary{ TableClass: types.TableClassStandard, @@ -120,23 +120,23 @@ func (t *DynamoDBTestClient) DescribeTable(context.Context, *dynamodb.DescribeTa }, }, RestoreSummary: &types.RestoreSummary{ - RestoreDateTime: PtrTime(time.Now()), - RestoreInProgress: PtrBool(false), - SourceBackupArn: PtrString("arn:aws:backup:eu-west-1:052392120703:recovery-point:89d0f956-d3a6-42fd-abbd-7d397766bc7e"), // link - SourceTableArn: PtrString("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H"), // link + RestoreDateTime: new(time.Now()), + RestoreInProgress: new(false), + SourceBackupArn: new("arn:aws:backup:eu-west-1:052392120703:recovery-point:89d0f956-d3a6-42fd-abbd-7d397766bc7e"), // link + SourceTableArn: new("arn:aws:dynamodb:eu-west-1:052392120703:table/test-DDBTable-1X52D7BWAAB2H"), // link }, SSEDescription: &types.SSEDescription{ - InaccessibleEncryptionDateTime: PtrTime(time.Now()), - KMSMasterKeyArn: PtrString("arn:aws:service:region:account:type/id"), // link + InaccessibleEncryptionDateTime: new(time.Now()), + KMSMasterKeyArn: new("arn:aws:service:region:account:type/id"), // link SSEType: types.SSETypeAes256, Status: types.SSEStatusDisabling, }, StreamSpecification: &types.StreamSpecification{ - StreamEnabled: PtrBool(true), + StreamEnabled: new(true), StreamViewType: types.StreamViewTypeKeysOnly, }, TableClassSummary: &types.TableClassSummary{ - LastUpdateDateTime: PtrTime(time.Now()), + LastUpdateDateTime: new(time.Now()), TableClass: types.TableClassStandard, }, }, @@ -156,8 +156,8 @@ func (t *DynamoDBTestClient) DescribeKinesisStreamingDestination(ctx context.Con KinesisDataStreamDestinations: []types.KinesisDataStreamDestination{ { DestinationStatus: types.DestinationStatusActive, - DestinationStatusDescription: PtrString("description"), - StreamArn: PtrString("arn:aws:kinesis:eu-west-1:052392120703:stream/test"), + DestinationStatusDescription: new("description"), + StreamArn: new("arn:aws:kinesis:eu-west-1:052392120703:stream/test"), }, }, }, nil @@ -167,8 +167,8 @@ func (t *DynamoDBTestClient) ListTagsOfResource(context.Context, *dynamodb.ListT return &dynamodb.ListTagsOfResourceOutput{ Tags: []types.Tag{ { - Key: PtrString("key"), - Value: PtrString("value"), + Key: new("key"), + Value: new("value"), }, }, NextToken: nil, diff --git a/aws-source/adapters/ec2-address_test.go b/aws-source/adapters/ec2-address_test.go index 59262cc9..41a3bf7e 100644 --- a/aws-source/adapters/ec2-address_test.go +++ b/aws-source/adapters/ec2-address_test.go @@ -43,16 +43,16 @@ func TestAddressOutputMapper(t *testing.T) { output := ec2.DescribeAddressesOutput{ Addresses: []types.Address{ { - PublicIp: PtrString("3.11.82.6"), - AllocationId: PtrString("eipalloc-030a6f43bc6086267"), + PublicIp: new("3.11.82.6"), + AllocationId: new("eipalloc-030a6f43bc6086267"), Domain: types.DomainTypeVpc, - PublicIpv4Pool: PtrString("amazon"), - NetworkBorderGroup: PtrString("eu-west-2"), - InstanceId: PtrString("instance"), - CarrierIp: PtrString("3.11.82.7"), - CustomerOwnedIp: PtrString("3.11.82.8"), - NetworkInterfaceId: PtrString("foo"), - PrivateIpAddress: PtrString("3.11.82.9"), + PublicIpv4Pool: new("amazon"), + NetworkBorderGroup: new("eu-west-2"), + InstanceId: new("instance"), + CarrierIp: new("3.11.82.7"), + CustomerOwnedIp: new("3.11.82.8"), + NetworkInterfaceId: new("foo"), + PrivateIpAddress: new("3.11.82.9"), }, }, } diff --git a/aws-source/adapters/ec2-capacity-reservation-fleet_test.go b/aws-source/adapters/ec2-capacity-reservation-fleet_test.go index cb81a86f..358fac63 100644 --- a/aws-source/adapters/ec2-capacity-reservation-fleet_test.go +++ b/aws-source/adapters/ec2-capacity-reservation-fleet_test.go @@ -14,31 +14,31 @@ func TestCapacityReservationFleetOutputMapper(t *testing.T) { output := &ec2.DescribeCapacityReservationFleetsOutput{ CapacityReservationFleets: []types.CapacityReservationFleet{ { - AllocationStrategy: PtrString("prioritized"), - CapacityReservationFleetArn: PtrString("arn:aws:ec2:us-east-1:123456789012:capacity-reservation/fleet/crf-1234567890abcdef0"), - CapacityReservationFleetId: PtrString("crf-1234567890abcdef0"), - CreateTime: PtrTime(time.Now()), + AllocationStrategy: new("prioritized"), + CapacityReservationFleetArn: new("arn:aws:ec2:us-east-1:123456789012:capacity-reservation/fleet/crf-1234567890abcdef0"), + CapacityReservationFleetId: new("crf-1234567890abcdef0"), + CreateTime: new(time.Now()), EndDate: nil, InstanceMatchCriteria: types.FleetInstanceMatchCriteriaOpen, InstanceTypeSpecifications: []types.FleetCapacityReservation{ { - AvailabilityZone: PtrString("us-east-1a"), // link - AvailabilityZoneId: PtrString("use1-az1"), - CapacityReservationId: PtrString("cr-1234567890abcdef0"), // link - CreateDate: PtrTime(time.Now()), - EbsOptimized: PtrBool(true), - FulfilledCapacity: PtrFloat64(1), + AvailabilityZone: new("us-east-1a"), // link + AvailabilityZoneId: new("use1-az1"), + CapacityReservationId: new("cr-1234567890abcdef0"), // link + CreateDate: new(time.Now()), + EbsOptimized: new(true), + FulfilledCapacity: new(float64(1)), InstancePlatform: types.CapacityReservationInstancePlatformLinuxUnix, InstanceType: types.InstanceTypeA12xlarge, - Priority: PtrInt32(1), - TotalInstanceCount: PtrInt32(1), - Weight: PtrFloat64(1), + Priority: new(int32(1)), + TotalInstanceCount: new(int32(1)), + Weight: new(float64(1)), }, }, State: types.CapacityReservationFleetStateActive, // health Tenancy: types.FleetCapacityReservationTenancyDefault, - TotalFulfilledCapacity: PtrFloat64(1), - TotalTargetCapacity: PtrInt32(1), + TotalFulfilledCapacity: new(float64(1)), + TotalTargetCapacity: new(int32(1)), }, }, } diff --git a/aws-source/adapters/ec2-capacity-reservation_test.go b/aws-source/adapters/ec2-capacity-reservation_test.go index 94394fec..0f165949 100644 --- a/aws-source/adapters/ec2-capacity-reservation_test.go +++ b/aws-source/adapters/ec2-capacity-reservation_test.go @@ -15,30 +15,30 @@ func TestCapacityReservationOutputMapper(t *testing.T) { output := &ec2.DescribeCapacityReservationsOutput{ CapacityReservations: []types.CapacityReservation{ { - AvailabilityZone: PtrString("us-east-1a"), // links - AvailabilityZoneId: PtrString("use1-az1"), - AvailableInstanceCount: PtrInt32(1), - CapacityReservationArn: PtrString("arn:aws:ec2:us-east-1:123456789012:capacity-reservation/cr-1234567890abcdef0"), - CapacityReservationId: PtrString("cr-1234567890abcdef0"), - CapacityReservationFleetId: PtrString("crf-1234567890abcdef0"), // link - CreateDate: PtrTime(time.Now()), - EbsOptimized: PtrBool(true), + AvailabilityZone: new("us-east-1a"), // links + AvailabilityZoneId: new("use1-az1"), + AvailableInstanceCount: new(int32(1)), + CapacityReservationArn: new("arn:aws:ec2:us-east-1:123456789012:capacity-reservation/cr-1234567890abcdef0"), + CapacityReservationId: new("cr-1234567890abcdef0"), + CapacityReservationFleetId: new("crf-1234567890abcdef0"), // link + CreateDate: new(time.Now()), + EbsOptimized: new(true), EndDateType: types.EndDateTypeUnlimited, EndDate: nil, InstanceMatchCriteria: types.InstanceMatchCriteriaTargeted, InstancePlatform: types.CapacityReservationInstancePlatformLinuxUnix, - InstanceType: PtrString("t2.micro"), - OutpostArn: PtrString("arn:aws:ec2:us-east-1:123456789012:outpost/op-1234567890abcdef0"), // link - OwnerId: PtrString("123456789012"), - PlacementGroupArn: PtrString("arn:aws:ec2:us-east-1:123456789012:placement-group/pg-1234567890abcdef0"), // link - StartDate: PtrTime(time.Now()), + InstanceType: new("t2.micro"), + OutpostArn: new("arn:aws:ec2:us-east-1:123456789012:outpost/op-1234567890abcdef0"), // link + OwnerId: new("123456789012"), + PlacementGroupArn: new("arn:aws:ec2:us-east-1:123456789012:placement-group/pg-1234567890abcdef0"), // link + StartDate: new(time.Now()), State: types.CapacityReservationStateActive, Tenancy: types.CapacityReservationTenancyDefault, - TotalInstanceCount: PtrInt32(1), + TotalInstanceCount: new(int32(1)), CapacityAllocations: []types.CapacityAllocation{ { AllocationType: types.AllocationTypeUsed, - Count: PtrInt32(1), + Count: new(int32(1)), }, }, }, diff --git a/aws-source/adapters/ec2-egress-only-internet-gateway_test.go b/aws-source/adapters/ec2-egress-only-internet-gateway_test.go index 4f1362d0..1bcd11fe 100644 --- a/aws-source/adapters/ec2-egress-only-internet-gateway_test.go +++ b/aws-source/adapters/ec2-egress-only-internet-gateway_test.go @@ -46,10 +46,10 @@ func TestEgressOnlyInternetGatewayOutputMapper(t *testing.T) { Attachments: []types.InternetGatewayAttachment{ { State: types.AttachmentStatusAttached, - VpcId: PtrString("vpc-0d7892e00e573e701"), + VpcId: new("vpc-0d7892e00e573e701"), }, }, - EgressOnlyInternetGatewayId: PtrString("eigw-0ff50f360e066777a"), + EgressOnlyInternetGatewayId: new("eigw-0ff50f360e066777a"), }, }, } diff --git a/aws-source/adapters/ec2-iam-instance-profile-association_test.go b/aws-source/adapters/ec2-iam-instance-profile-association_test.go index a0573570..e60b665a 100644 --- a/aws-source/adapters/ec2-iam-instance-profile-association_test.go +++ b/aws-source/adapters/ec2-iam-instance-profile-association_test.go @@ -15,14 +15,14 @@ func TestIamInstanceProfileAssociationOutputMapper(t *testing.T) { output := ec2.DescribeIamInstanceProfileAssociationsOutput{ IamInstanceProfileAssociations: []types.IamInstanceProfileAssociation{ { - AssociationId: PtrString("eipassoc-1234567890abcdef0"), + AssociationId: new("eipassoc-1234567890abcdef0"), IamInstanceProfile: &types.IamInstanceProfile{ - Arn: PtrString("arn:aws:iam::123456789012:instance-profile/webserver"), // link - Id: PtrString("AIDACKCEVSQ6C2EXAMPLE"), + Arn: new("arn:aws:iam::123456789012:instance-profile/webserver"), // link + Id: new("AIDACKCEVSQ6C2EXAMPLE"), }, - InstanceId: PtrString("i-1234567890abcdef0"), // link + InstanceId: new("i-1234567890abcdef0"), // link State: types.IamInstanceProfileAssociationStateAssociated, - Timestamp: PtrTime(time.Now()), + Timestamp: new(time.Now()), }, }, } diff --git a/aws-source/adapters/ec2-image_test.go b/aws-source/adapters/ec2-image_test.go index eadd9d08..ebdb9e58 100644 --- a/aws-source/adapters/ec2-image_test.go +++ b/aws-source/adapters/ec2-image_test.go @@ -44,38 +44,38 @@ func TestImageOutputMapper(t *testing.T) { Images: []types.Image{ { Architecture: "x86_64", - CreationDate: PtrString("2022-12-16T19:37:36.000Z"), - ImageId: PtrString("ami-0ed3646be6ecd97c5"), - ImageLocation: PtrString("052392120703/test"), + CreationDate: new("2022-12-16T19:37:36.000Z"), + ImageId: new("ami-0ed3646be6ecd97c5"), + ImageLocation: new("052392120703/test"), ImageType: types.ImageTypeValuesMachine, - Public: PtrBool(false), - OwnerId: PtrString("052392120703"), - PlatformDetails: PtrString("Linux/UNIX"), - UsageOperation: PtrString("RunInstances"), + Public: new(false), + OwnerId: new("052392120703"), + PlatformDetails: new("Linux/UNIX"), + UsageOperation: new("RunInstances"), State: types.ImageStateAvailable, BlockDeviceMappings: []types.BlockDeviceMapping{ { - DeviceName: PtrString("/dev/xvda"), + DeviceName: new("/dev/xvda"), Ebs: &types.EbsBlockDevice{ - DeleteOnTermination: PtrBool(true), - SnapshotId: PtrString("snap-0efd796ecbd599f8d"), - VolumeSize: PtrInt32(8), + DeleteOnTermination: new(true), + SnapshotId: new("snap-0efd796ecbd599f8d"), + VolumeSize: new(int32(8)), VolumeType: types.VolumeTypeGp2, - Encrypted: PtrBool(false), + Encrypted: new(false), }, }, }, - EnaSupport: PtrBool(true), + EnaSupport: new(true), Hypervisor: types.HypervisorTypeXen, - Name: PtrString("test"), - RootDeviceName: PtrString("/dev/xvda"), + Name: new("test"), + RootDeviceName: new("/dev/xvda"), RootDeviceType: types.DeviceTypeEbs, - SriovNetSupport: PtrString("simple"), + SriovNetSupport: new("simple"), VirtualizationType: types.VirtualizationTypeHvm, Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("test"), + Key: new("Name"), + Value: new("test"), }, }, }, diff --git a/aws-source/adapters/ec2-instance-event-window_test.go b/aws-source/adapters/ec2-instance-event-window_test.go index e0826d43..3a2eb46d 100644 --- a/aws-source/adapters/ec2-instance-event-window_test.go +++ b/aws-source/adapters/ec2-instance-event-window_test.go @@ -51,14 +51,14 @@ func TestInstanceEventWindowOutputMapper(t *testing.T) { "instance", }, }, - CronExpression: PtrString("something"), - InstanceEventWindowId: PtrString("window-123"), - Name: PtrString("test"), + CronExpression: new("something"), + InstanceEventWindowId: new("window-123"), + Name: new("test"), State: types.InstanceEventWindowStateActive, TimeRanges: []types.InstanceEventWindowTimeRange{ { - StartHour: PtrInt32(1), - EndHour: PtrInt32(2), + StartHour: new(int32(1)), + EndHour: new(int32(2)), EndWeekDay: types.WeekDayFriday, StartWeekDay: types.WeekDayMonday, }, diff --git a/aws-source/adapters/ec2-instance-status_test.go b/aws-source/adapters/ec2-instance-status_test.go index f4ff907e..b593b8ed 100644 --- a/aws-source/adapters/ec2-instance-status_test.go +++ b/aws-source/adapters/ec2-instance-status_test.go @@ -43,10 +43,10 @@ func TestInstanceStatusOutputMapper(t *testing.T) { output := &ec2.DescribeInstanceStatusOutput{ InstanceStatuses: []types.InstanceStatus{ { - AvailabilityZone: PtrString("eu-west-2c"), // link - InstanceId: PtrString("i-022bdccde30270570"), // link + AvailabilityZone: new("eu-west-2c"), // link + InstanceId: new("i-022bdccde30270570"), // link InstanceState: &types.InstanceState{ - Code: PtrInt32(16), + Code: new(int32(16)), Name: types.InstanceStateNameRunning, }, InstanceStatus: &types.InstanceStatusSummary{ diff --git a/aws-source/adapters/ec2-instance_test.go b/aws-source/adapters/ec2-instance_test.go index 6fc7f5e9..601ed302 100644 --- a/aws-source/adapters/ec2-instance_test.go +++ b/aws-source/adapters/ec2-instance_test.go @@ -45,171 +45,171 @@ func TestInstanceOutputMapper(t *testing.T) { { Instances: []types.Instance{ { - AmiLaunchIndex: PtrInt32(0), - PublicIpAddress: PtrString("43.5.36.7"), - ImageId: PtrString("ami-04706e771f950937f"), - InstanceId: PtrString("i-04c7b2794f7bc3d6a"), + AmiLaunchIndex: new(int32(0)), + PublicIpAddress: new("43.5.36.7"), + ImageId: new("ami-04706e771f950937f"), + InstanceId: new("i-04c7b2794f7bc3d6a"), IamInstanceProfile: &types.IamInstanceProfile{ - Arn: PtrString("arn:aws:iam::052392120703:instance-profile/test"), - Id: PtrString("AIDAJQEAZVQ7Y2EYQ2Z6Q"), + Arn: new("arn:aws:iam::052392120703:instance-profile/test"), + Id: new("AIDAJQEAZVQ7Y2EYQ2Z6Q"), }, BootMode: types.BootModeValuesLegacyBios, CurrentInstanceBootMode: types.InstanceBootModeValuesLegacyBios, ElasticGpuAssociations: []types.ElasticGpuAssociation{ { - ElasticGpuAssociationId: PtrString("ega-0a1b2c3d4e5f6g7h8"), - ElasticGpuAssociationState: PtrString("associated"), - ElasticGpuAssociationTime: PtrString("now"), - ElasticGpuId: PtrString("egp-0a1b2c3d4e5f6g7h8"), + ElasticGpuAssociationId: new("ega-0a1b2c3d4e5f6g7h8"), + ElasticGpuAssociationState: new("associated"), + ElasticGpuAssociationTime: new("now"), + ElasticGpuId: new("egp-0a1b2c3d4e5f6g7h8"), }, }, - CapacityReservationId: PtrString("cr-0a1b2c3d4e5f6g7h8"), + CapacityReservationId: new("cr-0a1b2c3d4e5f6g7h8"), InstanceType: types.InstanceTypeT2Micro, ElasticInferenceAcceleratorAssociations: []types.ElasticInferenceAcceleratorAssociation{ { - ElasticInferenceAcceleratorArn: PtrString("arn:aws:elastic-inference:us-east-1:052392120703:accelerator/eia-0a1b2c3d4e5f6g7h8"), - ElasticInferenceAcceleratorAssociationId: PtrString("eiaa-0a1b2c3d4e5f6g7h8"), - ElasticInferenceAcceleratorAssociationState: PtrString("associated"), - ElasticInferenceAcceleratorAssociationTime: PtrTime(time.Now()), + ElasticInferenceAcceleratorArn: new("arn:aws:elastic-inference:us-east-1:052392120703:accelerator/eia-0a1b2c3d4e5f6g7h8"), + ElasticInferenceAcceleratorAssociationId: new("eiaa-0a1b2c3d4e5f6g7h8"), + ElasticInferenceAcceleratorAssociationState: new("associated"), + ElasticInferenceAcceleratorAssociationTime: new(time.Now()), }, }, InstanceLifecycle: types.InstanceLifecycleTypeScheduled, - Ipv6Address: PtrString("2001:db8:3333:4444:5555:6666:7777:8888"), - KeyName: PtrString("dylan.ratcliffe"), - KernelId: PtrString("aki-0a1b2c3d4e5f6g7h8"), + Ipv6Address: new("2001:db8:3333:4444:5555:6666:7777:8888"), + KeyName: new("dylan.ratcliffe"), + KernelId: new("aki-0a1b2c3d4e5f6g7h8"), Licenses: []types.LicenseConfiguration{ { - LicenseConfigurationArn: PtrString("arn:aws:license-manager:us-east-1:052392120703:license-configuration:lic-0a1b2c3d4e5f6g7h8"), + LicenseConfigurationArn: new("arn:aws:license-manager:us-east-1:052392120703:license-configuration:lic-0a1b2c3d4e5f6g7h8"), }, }, - OutpostArn: PtrString("arn:aws:outposts:us-east-1:052392120703:outpost/op-0a1b2c3d4e5f6g7h8"), + OutpostArn: new("arn:aws:outposts:us-east-1:052392120703:outpost/op-0a1b2c3d4e5f6g7h8"), Platform: types.PlatformValuesWindows, - RamdiskId: PtrString("ari-0a1b2c3d4e5f6g7h8"), - SpotInstanceRequestId: PtrString("sir-0a1b2c3d4e5f6g7h8"), - SriovNetSupport: PtrString("simple"), + RamdiskId: new("ari-0a1b2c3d4e5f6g7h8"), + SpotInstanceRequestId: new("sir-0a1b2c3d4e5f6g7h8"), + SriovNetSupport: new("simple"), StateReason: &types.StateReason{ - Code: PtrString("foo"), - Message: PtrString("bar"), + Code: new("foo"), + Message: new("bar"), }, - TpmSupport: PtrString("foo"), - LaunchTime: PtrTime(time.Now()), + TpmSupport: new("foo"), + LaunchTime: new(time.Now()), Monitoring: &types.Monitoring{ State: types.MonitoringStateDisabled, }, Placement: &types.Placement{ - AvailabilityZone: PtrString("eu-west-2c"), // link - GroupName: PtrString(""), - GroupId: PtrString("groupId"), + AvailabilityZone: new("eu-west-2c"), // link + GroupName: new(""), + GroupId: new("groupId"), Tenancy: types.TenancyDefault, }, - PrivateDnsName: PtrString("ip-172-31-95-79.eu-west-2.compute.internal"), - PrivateIpAddress: PtrString("172.31.95.79"), + PrivateDnsName: new("ip-172-31-95-79.eu-west-2.compute.internal"), + PrivateIpAddress: new("172.31.95.79"), ProductCodes: []types.ProductCode{}, - PublicDnsName: PtrString(""), + PublicDnsName: new(""), State: &types.InstanceState{ - Code: PtrInt32(16), + Code: new(int32(16)), Name: types.InstanceStateNameRunning, }, - StateTransitionReason: PtrString(""), - SubnetId: PtrString("subnet-0450a637af9984235"), - VpcId: PtrString("vpc-0d7892e00e573e701"), + StateTransitionReason: new(""), + SubnetId: new("subnet-0450a637af9984235"), + VpcId: new("vpc-0d7892e00e573e701"), Architecture: types.ArchitectureValuesX8664, BlockDeviceMappings: []types.InstanceBlockDeviceMapping{ { - DeviceName: PtrString("/dev/xvda"), + DeviceName: new("/dev/xvda"), Ebs: &types.EbsInstanceBlockDevice{ - AttachTime: PtrTime(time.Now()), - DeleteOnTermination: PtrBool(true), + AttachTime: new(time.Now()), + DeleteOnTermination: new(true), Status: types.AttachmentStatusAttached, - VolumeId: PtrString("vol-06c7211d9e79a355e"), + VolumeId: new("vol-06c7211d9e79a355e"), }, }, }, - ClientToken: PtrString("eafad400-29e0-4b5c-a0fc-ef74c77659c4"), - EbsOptimized: PtrBool(false), - EnaSupport: PtrBool(true), + ClientToken: new("eafad400-29e0-4b5c-a0fc-ef74c77659c4"), + EbsOptimized: new(false), + EnaSupport: new(true), Hypervisor: types.HypervisorTypeXen, NetworkInterfaces: []types.InstanceNetworkInterface{ { Attachment: &types.InstanceNetworkInterfaceAttachment{ - AttachTime: PtrTime(time.Now()), - AttachmentId: PtrString("eni-attach-02b19215d0dd9c7be"), - DeleteOnTermination: PtrBool(true), - DeviceIndex: PtrInt32(0), + AttachTime: new(time.Now()), + AttachmentId: new("eni-attach-02b19215d0dd9c7be"), + DeleteOnTermination: new(true), + DeviceIndex: new(int32(0)), Status: types.AttachmentStatusAttached, - NetworkCardIndex: PtrInt32(0), + NetworkCardIndex: new(int32(0)), }, - Description: PtrString(""), + Description: new(""), Groups: []types.GroupIdentifier{ { - GroupName: PtrString("default"), - GroupId: PtrString("sg-094e151c9fc5da181"), + GroupName: new("default"), + GroupId: new("sg-094e151c9fc5da181"), }, }, Ipv6Addresses: []types.InstanceIpv6Address{}, - MacAddress: PtrString("02:8c:61:38:6f:c2"), - NetworkInterfaceId: PtrString("eni-09711a69e6d511358"), - OwnerId: PtrString("052392120703"), - PrivateDnsName: PtrString("ip-172-31-95-79.eu-west-2.compute.internal"), - PrivateIpAddress: PtrString("172.31.95.79"), + MacAddress: new("02:8c:61:38:6f:c2"), + NetworkInterfaceId: new("eni-09711a69e6d511358"), + OwnerId: new("052392120703"), + PrivateDnsName: new("ip-172-31-95-79.eu-west-2.compute.internal"), + PrivateIpAddress: new("172.31.95.79"), PrivateIpAddresses: []types.InstancePrivateIpAddress{ { - Primary: PtrBool(true), - PrivateDnsName: PtrString("ip-172-31-95-79.eu-west-2.compute.internal"), - PrivateIpAddress: PtrString("172.31.95.79"), + Primary: new(true), + PrivateDnsName: new("ip-172-31-95-79.eu-west-2.compute.internal"), + PrivateIpAddress: new("172.31.95.79"), }, }, - SourceDestCheck: PtrBool(true), + SourceDestCheck: new(true), Status: types.NetworkInterfaceStatusInUse, - SubnetId: PtrString("subnet-0450a637af9984235"), - VpcId: PtrString("vpc-0d7892e00e573e701"), - InterfaceType: PtrString("interface"), + SubnetId: new("subnet-0450a637af9984235"), + VpcId: new("vpc-0d7892e00e573e701"), + InterfaceType: new("interface"), }, }, - RootDeviceName: PtrString("/dev/xvda"), + RootDeviceName: new("/dev/xvda"), RootDeviceType: types.DeviceTypeEbs, SecurityGroups: []types.GroupIdentifier{ { - GroupName: PtrString("default"), - GroupId: PtrString("sg-094e151c9fc5da181"), + GroupName: new("default"), + GroupId: new("sg-094e151c9fc5da181"), }, }, - SourceDestCheck: PtrBool(true), + SourceDestCheck: new(true), Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("test"), + Key: new("Name"), + Value: new("test"), }, }, VirtualizationType: types.VirtualizationTypeHvm, CpuOptions: &types.CpuOptions{ - CoreCount: PtrInt32(1), - ThreadsPerCore: PtrInt32(1), + CoreCount: new(int32(1)), + ThreadsPerCore: new(int32(1)), }, CapacityReservationSpecification: &types.CapacityReservationSpecificationResponse{ CapacityReservationPreference: types.CapacityReservationPreferenceOpen, }, HibernationOptions: &types.HibernationOptions{ - Configured: PtrBool(false), + Configured: new(false), }, MetadataOptions: &types.InstanceMetadataOptionsResponse{ State: types.InstanceMetadataOptionsStateApplied, HttpTokens: types.HttpTokensStateOptional, - HttpPutResponseHopLimit: PtrInt32(1), + HttpPutResponseHopLimit: new(int32(1)), HttpEndpoint: types.InstanceMetadataEndpointStateEnabled, HttpProtocolIpv6: types.InstanceMetadataProtocolStateDisabled, InstanceMetadataTags: types.InstanceMetadataTagsStateDisabled, }, EnclaveOptions: &types.EnclaveOptions{ - Enabled: PtrBool(false), + Enabled: new(false), }, - PlatformDetails: PtrString("Linux/UNIX"), - UsageOperation: PtrString("RunInstances"), - UsageOperationUpdateTime: PtrTime(time.Now()), + PlatformDetails: new("Linux/UNIX"), + UsageOperation: new("RunInstances"), + UsageOperationUpdateTime: new(time.Now()), PrivateDnsNameOptions: &types.PrivateDnsNameOptionsResponse{ HostnameType: types.HostnameTypeIpName, - EnableResourceNameDnsARecord: PtrBool(true), - EnableResourceNameDnsAAAARecord: PtrBool(false), + EnableResourceNameDnsARecord: new(true), + EnableResourceNameDnsAAAARecord: new(false), }, MaintenanceOptions: &types.InstanceMaintenanceOptions{ AutoRecovery: types.InstanceAutoRecoveryStateDefault, diff --git a/aws-source/adapters/ec2-internet-gateway_test.go b/aws-source/adapters/ec2-internet-gateway_test.go index 4c6888d5..a0b8213b 100644 --- a/aws-source/adapters/ec2-internet-gateway_test.go +++ b/aws-source/adapters/ec2-internet-gateway_test.go @@ -46,15 +46,15 @@ func TestInternetGatewayOutputMapper(t *testing.T) { Attachments: []types.InternetGatewayAttachment{ { State: types.AttachmentStatusAttached, - VpcId: PtrString("vpc-0d7892e00e573e701"), + VpcId: new("vpc-0d7892e00e573e701"), }, }, - InternetGatewayId: PtrString("igw-03809416c9e2fcb66"), - OwnerId: PtrString("052392120703"), + InternetGatewayId: new("igw-03809416c9e2fcb66"), + OwnerId: new("052392120703"), Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("test"), + Key: new("Name"), + Value: new("test"), }, }, }, diff --git a/aws-source/adapters/ec2-key-pair_test.go b/aws-source/adapters/ec2-key-pair_test.go index 780daf80..2cef251f 100644 --- a/aws-source/adapters/ec2-key-pair_test.go +++ b/aws-source/adapters/ec2-key-pair_test.go @@ -42,13 +42,13 @@ func TestKeyPairOutputMapper(t *testing.T) { output := &ec2.DescribeKeyPairsOutput{ KeyPairs: []types.KeyPairInfo{ { - KeyPairId: PtrString("key-04d7068d3a33bf9b2"), - KeyFingerprint: PtrString("df:73:bb:86:a7:cd:9e:18:16:10:50:79:fa:3b:4f:c7:1d:32:cf:58"), - KeyName: PtrString("dylan.ratcliffe"), + KeyPairId: new("key-04d7068d3a33bf9b2"), + KeyFingerprint: new("df:73:bb:86:a7:cd:9e:18:16:10:50:79:fa:3b:4f:c7:1d:32:cf:58"), + KeyName: new("dylan.ratcliffe"), KeyType: types.KeyTypeRsa, Tags: []types.Tag{}, - CreateTime: PtrTime(time.Now()), - PublicKey: PtrString("PUB"), + CreateTime: new(time.Now()), + PublicKey: new("PUB"), }, }, } diff --git a/aws-source/adapters/ec2-launch-template-version_test.go b/aws-source/adapters/ec2-launch-template-version_test.go index 32ac3182..3efb7b63 100644 --- a/aws-source/adapters/ec2-launch-template-version_test.go +++ b/aws-source/adapters/ec2-launch-template-version_test.go @@ -47,48 +47,48 @@ func TestLaunchTemplateVersionOutputMapper(t *testing.T) { output := &ec2.DescribeLaunchTemplateVersionsOutput{ LaunchTemplateVersions: []types.LaunchTemplateVersion{ { - LaunchTemplateId: PtrString("lt-015547202038ae102"), - LaunchTemplateName: PtrString("test"), - VersionNumber: PtrInt64(1), - CreateTime: PtrTime(time.Now()), - CreatedBy: PtrString("arn:aws:sts::052392120703:assumed-role/AWSReservedSSO_AWSAdministratorAccess_c1c3c9c54821c68a/dylan@overmind.tech"), - DefaultVersion: PtrBool(true), + LaunchTemplateId: new("lt-015547202038ae102"), + LaunchTemplateName: new("test"), + VersionNumber: new(int64(1)), + CreateTime: new(time.Now()), + CreatedBy: new("arn:aws:sts::052392120703:assumed-role/AWSReservedSSO_AWSAdministratorAccess_c1c3c9c54821c68a/dylan@overmind.tech"), + DefaultVersion: new(true), LaunchTemplateData: &types.ResponseLaunchTemplateData{ NetworkInterfaces: []types.LaunchTemplateInstanceNetworkInterfaceSpecification{ { Ipv6Addresses: []types.InstanceIpv6Address{ { - Ipv6Address: PtrString("ipv6"), + Ipv6Address: new("ipv6"), }, }, - NetworkInterfaceId: PtrString("networkInterface"), + NetworkInterfaceId: new("networkInterface"), PrivateIpAddresses: []types.PrivateIpAddressSpecification{ { - Primary: PtrBool(true), - PrivateIpAddress: PtrString("ip"), + Primary: new(true), + PrivateIpAddress: new("ip"), }, }, - SubnetId: PtrString("subnet"), - DeviceIndex: PtrInt32(0), + SubnetId: new("subnet"), + DeviceIndex: new(int32(0)), Groups: []string{ "sg-094e151c9fc5da181", }, }, }, - ImageId: PtrString("ami-084e8c05825742534"), + ImageId: new("ami-084e8c05825742534"), InstanceType: types.InstanceTypeT1Micro, - KeyName: PtrString("dylan.ratcliffe"), + KeyName: new("dylan.ratcliffe"), BlockDeviceMappings: []types.LaunchTemplateBlockDeviceMapping{ { Ebs: &types.LaunchTemplateEbsBlockDevice{ - SnapshotId: PtrString("snap"), + SnapshotId: new("snap"), }, }, }, CapacityReservationSpecification: &types.LaunchTemplateCapacityReservationSpecificationResponse{ CapacityReservationPreference: types.CapacityReservationPreferenceNone, CapacityReservationTarget: &types.CapacityReservationTargetResponse{ - CapacityReservationId: PtrString("cap"), + CapacityReservationId: new("cap"), }, }, CpuOptions: &types.LaunchTemplateCpuOptions{}, @@ -97,9 +97,9 @@ func TestLaunchTemplateVersionOutputMapper(t *testing.T) { EnclaveOptions: &types.LaunchTemplateEnclaveOptions{}, ElasticInferenceAccelerators: []types.LaunchTemplateElasticInferenceAcceleratorResponse{}, Placement: &types.LaunchTemplatePlacement{ - AvailabilityZone: PtrString("foo"), - GroupId: PtrString("placement"), - HostId: PtrString("host"), + AvailabilityZone: new("foo"), + GroupId: new("placement"), + HostId: new("host"), }, SecurityGroupIds: []string{ "secGroup", diff --git a/aws-source/adapters/ec2-launch-template_test.go b/aws-source/adapters/ec2-launch-template_test.go index afa43420..af5f58bb 100644 --- a/aws-source/adapters/ec2-launch-template_test.go +++ b/aws-source/adapters/ec2-launch-template_test.go @@ -42,12 +42,12 @@ func TestLaunchTemplateOutputMapper(t *testing.T) { output := &ec2.DescribeLaunchTemplatesOutput{ LaunchTemplates: []types.LaunchTemplate{ { - CreateTime: PtrTime(time.Now()), - CreatedBy: PtrString("me"), - DefaultVersionNumber: PtrInt64(1), - LatestVersionNumber: PtrInt64(10), - LaunchTemplateId: PtrString("id"), - LaunchTemplateName: PtrString("hello"), + CreateTime: new(time.Now()), + CreatedBy: new("me"), + DefaultVersionNumber: new(int64(1)), + LatestVersionNumber: new(int64(10)), + LaunchTemplateId: new("id"), + LaunchTemplateName: new("hello"), Tags: []types.Tag{}, }, }, diff --git a/aws-source/adapters/ec2-nat-gateway_test.go b/aws-source/adapters/ec2-nat-gateway_test.go index 88e739e5..2e5cf42d 100644 --- a/aws-source/adapters/ec2-nat-gateway_test.go +++ b/aws-source/adapters/ec2-nat-gateway_test.go @@ -43,47 +43,47 @@ func TestNatGatewayOutputMapper(t *testing.T) { output := &ec2.DescribeNatGatewaysOutput{ NatGateways: []types.NatGateway{ { - CreateTime: PtrTime(time.Now()), - DeleteTime: PtrTime(time.Now()), - FailureCode: PtrString("Gateway.NotAttached"), - FailureMessage: PtrString("Network vpc-0d7892e00e573e701 has no Internet gateway attached"), + CreateTime: new(time.Now()), + DeleteTime: new(time.Now()), + FailureCode: new("Gateway.NotAttached"), + FailureMessage: new("Network vpc-0d7892e00e573e701 has no Internet gateway attached"), NatGatewayAddresses: []types.NatGatewayAddress{ { - AllocationId: PtrString("eipalloc-000a9739291350592"), - NetworkInterfaceId: PtrString("eni-0c59532b8e10343ae"), - PrivateIp: PtrString("172.31.89.23"), + AllocationId: new("eipalloc-000a9739291350592"), + NetworkInterfaceId: new("eni-0c59532b8e10343ae"), + PrivateIp: new("172.31.89.23"), }, }, - NatGatewayId: PtrString("nat-0e4e73d7ac46af25e"), + NatGatewayId: new("nat-0e4e73d7ac46af25e"), State: types.NatGatewayStateFailed, - SubnetId: PtrString("subnet-0450a637af9984235"), - VpcId: PtrString("vpc-0d7892e00e573e701"), + SubnetId: new("subnet-0450a637af9984235"), + VpcId: new("vpc-0d7892e00e573e701"), Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("test"), + Key: new("Name"), + Value: new("test"), }, }, ConnectivityType: types.ConnectivityTypePublic, }, { - CreateTime: PtrTime(time.Now()), + CreateTime: new(time.Now()), NatGatewayAddresses: []types.NatGatewayAddress{ { - AllocationId: PtrString("eipalloc-000a9739291350592"), - NetworkInterfaceId: PtrString("eni-0b4652e6f2aa36d78"), - PrivateIp: PtrString("172.31.35.98"), - PublicIp: PtrString("18.170.133.9"), + AllocationId: new("eipalloc-000a9739291350592"), + NetworkInterfaceId: new("eni-0b4652e6f2aa36d78"), + PrivateIp: new("172.31.35.98"), + PublicIp: new("18.170.133.9"), }, }, - NatGatewayId: PtrString("nat-0e07f7530ef076766"), + NatGatewayId: new("nat-0e07f7530ef076766"), State: types.NatGatewayStateAvailable, - SubnetId: PtrString("subnet-0d8ae4b4e07647efa"), - VpcId: PtrString("vpc-0d7892e00e573e701"), + SubnetId: new("subnet-0d8ae4b4e07647efa"), + VpcId: new("vpc-0d7892e00e573e701"), Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("test"), + Key: new("Name"), + Value: new("test"), }, }, ConnectivityType: types.ConnectivityTypePublic, diff --git a/aws-source/adapters/ec2-network-acl_test.go b/aws-source/adapters/ec2-network-acl_test.go index fba190e0..4b34b1dc 100644 --- a/aws-source/adapters/ec2-network-acl_test.go +++ b/aws-source/adapters/ec2-network-acl_test.go @@ -45,42 +45,42 @@ func TestNetworkAclOutputMapper(t *testing.T) { { Associations: []types.NetworkAclAssociation{ { - NetworkAclAssociationId: PtrString("aclassoc-0f85f8b1fde0a5939"), - NetworkAclId: PtrString("acl-0a346e8e6f5a9ad91"), - SubnetId: PtrString("subnet-0450a637af9984235"), + NetworkAclAssociationId: new("aclassoc-0f85f8b1fde0a5939"), + NetworkAclId: new("acl-0a346e8e6f5a9ad91"), + SubnetId: new("subnet-0450a637af9984235"), }, { - NetworkAclAssociationId: PtrString("aclassoc-064b78003a2d309a4"), - NetworkAclId: PtrString("acl-0a346e8e6f5a9ad91"), - SubnetId: PtrString("subnet-06c0dea0437180c61"), + NetworkAclAssociationId: new("aclassoc-064b78003a2d309a4"), + NetworkAclId: new("acl-0a346e8e6f5a9ad91"), + SubnetId: new("subnet-06c0dea0437180c61"), }, { - NetworkAclAssociationId: PtrString("aclassoc-0575080579a7381f5"), - NetworkAclId: PtrString("acl-0a346e8e6f5a9ad91"), - SubnetId: PtrString("subnet-0d8ae4b4e07647efa"), + NetworkAclAssociationId: new("aclassoc-0575080579a7381f5"), + NetworkAclId: new("acl-0a346e8e6f5a9ad91"), + SubnetId: new("subnet-0d8ae4b4e07647efa"), }, }, Entries: []types.NetworkAclEntry{ { - CidrBlock: PtrString("0.0.0.0/0"), - Egress: PtrBool(true), - Protocol: PtrString("-1"), + CidrBlock: new("0.0.0.0/0"), + Egress: new(true), + Protocol: new("-1"), RuleAction: types.RuleActionAllow, - RuleNumber: PtrInt32(100), + RuleNumber: new(int32(100)), }, { - CidrBlock: PtrString("0.0.0.0/0"), - Egress: PtrBool(true), - Protocol: PtrString("-1"), + CidrBlock: new("0.0.0.0/0"), + Egress: new(true), + Protocol: new("-1"), RuleAction: types.RuleActionDeny, - RuleNumber: PtrInt32(32767), + RuleNumber: new(int32(32767)), }, }, - IsDefault: PtrBool(true), - NetworkAclId: PtrString("acl-0a346e8e6f5a9ad91"), + IsDefault: new(true), + NetworkAclId: new("acl-0a346e8e6f5a9ad91"), Tags: []types.Tag{}, - VpcId: PtrString("vpc-0d7892e00e573e701"), - OwnerId: PtrString("052392120703"), + VpcId: new("vpc-0d7892e00e573e701"), + OwnerId: new("052392120703"), }, }, } diff --git a/aws-source/adapters/ec2-network-interface-permission_test.go b/aws-source/adapters/ec2-network-interface-permission_test.go index 93104d99..7906de40 100644 --- a/aws-source/adapters/ec2-network-interface-permission_test.go +++ b/aws-source/adapters/ec2-network-interface-permission_test.go @@ -43,9 +43,9 @@ func TestNetworkInterfacePermissionOutputMapper(t *testing.T) { output := &ec2.DescribeNetworkInterfacePermissionsOutput{ NetworkInterfacePermissions: []types.NetworkInterfacePermission{ { - NetworkInterfacePermissionId: PtrString("eni-perm-0b6211455242c105e"), - NetworkInterfaceId: PtrString("eni-07f8f3d404036c833"), - AwsService: PtrString("routing.hyperplane.eu-west-2.amazonaws.com"), + NetworkInterfacePermissionId: new("eni-perm-0b6211455242c105e"), + NetworkInterfaceId: new("eni-07f8f3d404036c833"), + AwsService: new("routing.hyperplane.eu-west-2.amazonaws.com"), Permission: types.InterfacePermissionTypeInstanceAttach, PermissionState: &types.NetworkInterfacePermissionState{ State: types.NetworkInterfacePermissionStateCodeGranted, diff --git a/aws-source/adapters/ec2-network-interface_test.go b/aws-source/adapters/ec2-network-interface_test.go index 2a03331f..d3066cac 100644 --- a/aws-source/adapters/ec2-network-interface_test.go +++ b/aws-source/adapters/ec2-network-interface_test.go @@ -126,62 +126,62 @@ func TestNetworkInterfaceOutputMapper(t *testing.T) { NetworkInterfaces: []types.NetworkInterface{ { Association: &types.NetworkInterfaceAssociation{ - AllocationId: PtrString("eipalloc-000a9739291350592"), - AssociationId: PtrString("eipassoc-049cda1f947e5efe6"), - IpOwnerId: PtrString("052392120703"), - PublicDnsName: PtrString("ec2-18-170-133-9.eu-west-2.compute.amazonaws.com"), - PublicIp: PtrString("18.170.133.9"), + AllocationId: new("eipalloc-000a9739291350592"), + AssociationId: new("eipassoc-049cda1f947e5efe6"), + IpOwnerId: new("052392120703"), + PublicDnsName: new("ec2-18-170-133-9.eu-west-2.compute.amazonaws.com"), + PublicIp: new("18.170.133.9"), }, Attachment: &types.NetworkInterfaceAttachment{ - AttachmentId: PtrString("ela-attach-03e560efca8c9e5d8"), - DeleteOnTermination: PtrBool(false), - DeviceIndex: PtrInt32(1), - InstanceOwnerId: PtrString("amazon-aws"), + AttachmentId: new("ela-attach-03e560efca8c9e5d8"), + DeleteOnTermination: new(false), + DeviceIndex: new(int32(1)), + InstanceOwnerId: new("amazon-aws"), Status: types.AttachmentStatusAttached, - InstanceId: PtrString("foo"), + InstanceId: new("foo"), }, - AvailabilityZone: PtrString("eu-west-2b"), - Description: PtrString("Interface for NAT Gateway nat-0e07f7530ef076766"), + AvailabilityZone: new("eu-west-2b"), + Description: new("Interface for NAT Gateway nat-0e07f7530ef076766"), Groups: []types.GroupIdentifier{ { - GroupId: PtrString("group-123"), - GroupName: PtrString("something"), + GroupId: new("group-123"), + GroupName: new("something"), }, }, InterfaceType: types.NetworkInterfaceTypeNatGateway, Ipv6Addresses: []types.NetworkInterfaceIpv6Address{ { - Ipv6Address: PtrString("2001:db8:1234:0000:0000:0000:0000:0000"), + Ipv6Address: new("2001:db8:1234:0000:0000:0000:0000:0000"), }, }, - MacAddress: PtrString("0a:f4:55:b0:6c:be"), - NetworkInterfaceId: PtrString("eni-0b4652e6f2aa36d78"), - OwnerId: PtrString("052392120703"), - PrivateDnsName: PtrString("ip-172-31-35-98.eu-west-2.compute.internal"), - PrivateIpAddress: PtrString("172.31.35.98"), + MacAddress: new("0a:f4:55:b0:6c:be"), + NetworkInterfaceId: new("eni-0b4652e6f2aa36d78"), + OwnerId: new("052392120703"), + PrivateDnsName: new("ip-172-31-35-98.eu-west-2.compute.internal"), + PrivateIpAddress: new("172.31.35.98"), PrivateIpAddresses: []types.NetworkInterfacePrivateIpAddress{ { Association: &types.NetworkInterfaceAssociation{ - AllocationId: PtrString("eipalloc-000a9739291350592"), - AssociationId: PtrString("eipassoc-049cda1f947e5efe6"), - IpOwnerId: PtrString("052392120703"), - PublicDnsName: PtrString("ec2-18-170-133-9.eu-west-2.compute.amazonaws.com"), - PublicIp: PtrString("18.170.133.9"), - CarrierIp: PtrString("18.170.133.10"), - CustomerOwnedIp: PtrString("18.170.133.11"), + AllocationId: new("eipalloc-000a9739291350592"), + AssociationId: new("eipassoc-049cda1f947e5efe6"), + IpOwnerId: new("052392120703"), + PublicDnsName: new("ec2-18-170-133-9.eu-west-2.compute.amazonaws.com"), + PublicIp: new("18.170.133.9"), + CarrierIp: new("18.170.133.10"), + CustomerOwnedIp: new("18.170.133.11"), }, - Primary: PtrBool(true), - PrivateDnsName: PtrString("ip-172-31-35-98.eu-west-2.compute.internal"), - PrivateIpAddress: PtrString("172.31.35.98"), + Primary: new(true), + PrivateDnsName: new("ip-172-31-35-98.eu-west-2.compute.internal"), + PrivateIpAddress: new("172.31.35.98"), }, }, - RequesterId: PtrString("440527171281"), - RequesterManaged: PtrBool(true), - SourceDestCheck: PtrBool(false), + RequesterId: new("440527171281"), + RequesterManaged: new(true), + SourceDestCheck: new(false), Status: types.NetworkInterfaceStatusInUse, - SubnetId: PtrString("subnet-0d8ae4b4e07647efa"), + SubnetId: new("subnet-0d8ae4b4e07647efa"), TagSet: []types.Tag{}, - VpcId: PtrString("vpc-0d7892e00e573e701"), + VpcId: new("vpc-0d7892e00e573e701"), }, }, } diff --git a/aws-source/adapters/ec2-placement-group_test.go b/aws-source/adapters/ec2-placement-group_test.go index ca79deb5..ea72ca7f 100644 --- a/aws-source/adapters/ec2-placement-group_test.go +++ b/aws-source/adapters/ec2-placement-group_test.go @@ -42,13 +42,13 @@ func TestPlacementGroupOutputMapper(t *testing.T) { output := &ec2.DescribePlacementGroupsOutput{ PlacementGroups: []types.PlacementGroup{ { - GroupArn: PtrString("arn"), - GroupId: PtrString("id"), - GroupName: PtrString("name"), + GroupArn: new("arn"), + GroupId: new("id"), + GroupName: new("name"), SpreadLevel: types.SpreadLevelHost, State: types.PlacementGroupStateAvailable, Strategy: types.PlacementStrategyCluster, - PartitionCount: PtrInt32(1), + PartitionCount: new(int32(1)), Tags: []types.Tag{}, }, }, diff --git a/aws-source/adapters/ec2-reserved-instance_test.go b/aws-source/adapters/ec2-reserved-instance_test.go index 9a381de8..31450d4e 100644 --- a/aws-source/adapters/ec2-reserved-instance_test.go +++ b/aws-source/adapters/ec2-reserved-instance_test.go @@ -42,12 +42,12 @@ func TestReservedInstanceOutputMapper(t *testing.T) { output := &ec2.DescribeReservedInstancesOutput{ ReservedInstances: []types.ReservedInstances{ { - AvailabilityZone: PtrString("az"), + AvailabilityZone: new("az"), CurrencyCode: types.CurrencyCodeValuesUsd, - Duration: PtrInt64(100), - End: PtrTime(time.Now()), - FixedPrice: PtrFloat32(1.23), - InstanceCount: PtrInt32(1), + Duration: new(int64(100)), + End: new(time.Now()), + FixedPrice: new(float32(1.23)), + InstanceCount: new(int32(1)), InstanceTenancy: types.TenancyDedicated, InstanceType: types.InstanceTypeA14xlarge, OfferingClass: types.OfferingClassTypeConvertible, @@ -55,15 +55,15 @@ func TestReservedInstanceOutputMapper(t *testing.T) { ProductDescription: types.RIProductDescription("foo"), RecurringCharges: []types.RecurringCharge{ { - Amount: PtrFloat64(1.111), + Amount: new(1.111), Frequency: types.RecurringChargeFrequencyHourly, }, }, - ReservedInstancesId: PtrString("id"), + ReservedInstancesId: new("id"), Scope: types.ScopeAvailabilityZone, - Start: PtrTime(time.Now()), + Start: new(time.Now()), State: types.ReservedInstanceStateActive, - UsagePrice: PtrFloat32(99.00000001), + UsagePrice: new(float32(99.00000001)), }, }, } diff --git a/aws-source/adapters/ec2-route-table_test.go b/aws-source/adapters/ec2-route-table_test.go index 7d5d1875..96878600 100644 --- a/aws-source/adapters/ec2-route-table_test.go +++ b/aws-source/adapters/ec2-route-table_test.go @@ -45,11 +45,11 @@ func TestRouteTableOutputMapper(t *testing.T) { { Associations: []types.RouteTableAssociation{ { - Main: PtrBool(false), - RouteTableAssociationId: PtrString("rtbassoc-0aa1442039abff3db"), - RouteTableId: PtrString("rtb-00b1197fa95a6b35f"), - SubnetId: PtrString("subnet-06c0dea0437180c61"), - GatewayId: PtrString("ID"), + Main: new(false), + RouteTableAssociationId: new("rtbassoc-0aa1442039abff3db"), + RouteTableId: new("rtb-00b1197fa95a6b35f"), + SubnetId: new("subnet-06c0dea0437180c61"), + GatewayId: new("ID"), AssociationState: &types.RouteTableAssociationState{ State: types.RouteTableAssociationStateCodeAssociated, }, @@ -57,35 +57,35 @@ func TestRouteTableOutputMapper(t *testing.T) { }, PropagatingVgws: []types.PropagatingVgw{ { - GatewayId: PtrString("goo"), + GatewayId: new("goo"), }, }, - RouteTableId: PtrString("rtb-00b1197fa95a6b35f"), + RouteTableId: new("rtb-00b1197fa95a6b35f"), Routes: []types.Route{ { - DestinationCidrBlock: PtrString("172.31.0.0/16"), - GatewayId: PtrString("igw-12345"), + DestinationCidrBlock: new("172.31.0.0/16"), + GatewayId: new("igw-12345"), Origin: types.RouteOriginCreateRouteTable, State: types.RouteStateActive, }, { - DestinationPrefixListId: PtrString("pl-7ca54015"), - GatewayId: PtrString("vpce-09fcbac4dcf142db3"), + DestinationPrefixListId: new("pl-7ca54015"), + GatewayId: new("vpce-09fcbac4dcf142db3"), Origin: types.RouteOriginCreateRoute, State: types.RouteStateActive, - CarrierGatewayId: PtrString("id"), - EgressOnlyInternetGatewayId: PtrString("id"), - InstanceId: PtrString("id"), - InstanceOwnerId: PtrString("id"), - LocalGatewayId: PtrString("id"), - NatGatewayId: PtrString("id"), - NetworkInterfaceId: PtrString("id"), - TransitGatewayId: PtrString("id"), - VpcPeeringConnectionId: PtrString("id"), + CarrierGatewayId: new("id"), + EgressOnlyInternetGatewayId: new("id"), + InstanceId: new("id"), + InstanceOwnerId: new("id"), + LocalGatewayId: new("id"), + NatGatewayId: new("id"), + NetworkInterfaceId: new("id"), + TransitGatewayId: new("id"), + VpcPeeringConnectionId: new("id"), }, }, - VpcId: PtrString("vpc-0d7892e00e573e701"), - OwnerId: PtrString("052392120703"), + VpcId: new("vpc-0d7892e00e573e701"), + OwnerId: new("052392120703"), }, }, } diff --git a/aws-source/adapters/ec2-security-group-rule_test.go b/aws-source/adapters/ec2-security-group-rule_test.go index 95465917..5523c009 100644 --- a/aws-source/adapters/ec2-security-group-rule_test.go +++ b/aws-source/adapters/ec2-security-group-rule_test.go @@ -43,33 +43,33 @@ func TestSecurityGroupRuleOutputMapper(t *testing.T) { output := &ec2.DescribeSecurityGroupRulesOutput{ SecurityGroupRules: []types.SecurityGroupRule{ { - SecurityGroupRuleId: PtrString("sgr-0b0e42d1431e832bd"), - GroupId: PtrString("sg-0814766e46f201c22"), - GroupOwnerId: PtrString("052392120703"), - IsEgress: PtrBool(false), - IpProtocol: PtrString("tcp"), - FromPort: PtrInt32(2049), - ToPort: PtrInt32(2049), + SecurityGroupRuleId: new("sgr-0b0e42d1431e832bd"), + GroupId: new("sg-0814766e46f201c22"), + GroupOwnerId: new("052392120703"), + IsEgress: new(false), + IpProtocol: new("tcp"), + FromPort: new(int32(2049)), + ToPort: new(int32(2049)), ReferencedGroupInfo: &types.ReferencedSecurityGroup{ - GroupId: PtrString("sg-09371b4a54fe7ab38"), - UserId: PtrString("052392120703"), + GroupId: new("sg-09371b4a54fe7ab38"), + UserId: new("052392120703"), }, - Description: PtrString("Created by the LIW for EFS at 2022-12-16T19:14:27.033Z"), + Description: new("Created by the LIW for EFS at 2022-12-16T19:14:27.033Z"), Tags: []types.Tag{}, }, { - SecurityGroupRuleId: PtrString("sgr-04b583a90b4fa4ada"), - GroupId: PtrString("sg-09371b4a54fe7ab38"), - GroupOwnerId: PtrString("052392120703"), - IsEgress: PtrBool(true), - IpProtocol: PtrString("tcp"), - FromPort: PtrInt32(2049), - ToPort: PtrInt32(2049), + SecurityGroupRuleId: new("sgr-04b583a90b4fa4ada"), + GroupId: new("sg-09371b4a54fe7ab38"), + GroupOwnerId: new("052392120703"), + IsEgress: new(true), + IpProtocol: new("tcp"), + FromPort: new(int32(2049)), + ToPort: new(int32(2049)), ReferencedGroupInfo: &types.ReferencedSecurityGroup{ - GroupId: PtrString("sg-0814766e46f201c22"), - UserId: PtrString("052392120703"), + GroupId: new("sg-0814766e46f201c22"), + UserId: new("052392120703"), }, - Description: PtrString("Created by the LIW for EFS at 2022-12-16T19:14:27.349Z"), + Description: new("Created by the LIW for EFS at 2022-12-16T19:14:27.349Z"), Tags: []types.Tag{}, }, }, diff --git a/aws-source/adapters/ec2-security-group.go b/aws-source/adapters/ec2-security-group.go index ee5db475..fe35ff1b 100644 --- a/aws-source/adapters/ec2-security-group.go +++ b/aws-source/adapters/ec2-security-group.go @@ -59,8 +59,7 @@ func securityGroupOutputMapper(_ context.Context, _ *ec2.Client, scope string, _ } // Network Interfaces using this security group - // This enables blast radius propagation from security groups to - // instances via their network interfaces + // Link to network interfaces using this security group so the graph and blast radius analysis can traverse to attached instances. if securityGroup.GroupId != nil { item.LinkedItemQueries = append(item.LinkedItemQueries, &sdp.LinkedItemQuery{ Query: &sdp.Query{ diff --git a/aws-source/adapters/ec2-security-group_test.go b/aws-source/adapters/ec2-security-group_test.go index ca1379a4..07f8f003 100644 --- a/aws-source/adapters/ec2-security-group_test.go +++ b/aws-source/adapters/ec2-security-group_test.go @@ -43,30 +43,30 @@ func TestSecurityGroupOutputMapper(t *testing.T) { output := &ec2.DescribeSecurityGroupsOutput{ SecurityGroups: []types.SecurityGroup{ { - Description: PtrString("default VPC security group"), - GroupName: PtrString("default"), + Description: new("default VPC security group"), + GroupName: new("default"), IpPermissions: []types.IpPermission{ { - IpProtocol: PtrString("-1"), + IpProtocol: new("-1"), IpRanges: []types.IpRange{}, Ipv6Ranges: []types.Ipv6Range{}, PrefixListIds: []types.PrefixListId{}, UserIdGroupPairs: []types.UserIdGroupPair{ { - GroupId: PtrString("sg-094e151c9fc5da181"), - UserId: PtrString("052392120704"), + GroupId: new("sg-094e151c9fc5da181"), + UserId: new("052392120704"), }, }, }, }, - OwnerId: PtrString("052392120703"), - GroupId: PtrString("sg-094e151c9fc5da181"), + OwnerId: new("052392120703"), + GroupId: new("sg-094e151c9fc5da181"), IpPermissionsEgress: []types.IpPermission{ { - IpProtocol: PtrString("-1"), + IpProtocol: new("-1"), IpRanges: []types.IpRange{ { - CidrIp: PtrString("0.0.0.0/0"), + CidrIp: new("0.0.0.0/0"), }, }, Ipv6Ranges: []types.Ipv6Range{}, @@ -74,7 +74,7 @@ func TestSecurityGroupOutputMapper(t *testing.T) { UserIdGroupPairs: []types.UserIdGroupPair{}, }, }, - VpcId: PtrString("vpc-0d7892e00e573e701"), + VpcId: new("vpc-0d7892e00e573e701"), }, }, } diff --git a/aws-source/adapters/ec2-snapshot_test.go b/aws-source/adapters/ec2-snapshot_test.go index bf485224..44af9400 100644 --- a/aws-source/adapters/ec2-snapshot_test.go +++ b/aws-source/adapters/ec2-snapshot_test.go @@ -43,23 +43,23 @@ func TestSnapshotOutputMapper(t *testing.T) { output := &ec2.DescribeSnapshotsOutput{ Snapshots: []types.Snapshot{ { - DataEncryptionKeyId: PtrString("ek"), - KmsKeyId: PtrString("key"), - SnapshotId: PtrString("id"), - Description: PtrString("foo"), - Encrypted: PtrBool(false), - OutpostArn: PtrString("something"), - OwnerAlias: PtrString("something"), - OwnerId: PtrString("owner"), - Progress: PtrString("50%"), - RestoreExpiryTime: PtrTime(time.Now()), - StartTime: PtrTime(time.Now()), + DataEncryptionKeyId: new("ek"), + KmsKeyId: new("key"), + SnapshotId: new("id"), + Description: new("foo"), + Encrypted: new(false), + OutpostArn: new("something"), + OwnerAlias: new("something"), + OwnerId: new("owner"), + Progress: new("50%"), + RestoreExpiryTime: new(time.Now()), + StartTime: new(time.Now()), State: types.SnapshotStatePending, - StateMessage: PtrString("pending"), + StateMessage: new("pending"), StorageTier: types.StorageTierArchive, Tags: []types.Tag{}, - VolumeId: PtrString("volumeId"), - VolumeSize: PtrInt32(1024), + VolumeId: new("volumeId"), + VolumeSize: new(int32(1024)), }, }, } diff --git a/aws-source/adapters/ec2-subnet_test.go b/aws-source/adapters/ec2-subnet_test.go index 693afacd..cf2a9341 100644 --- a/aws-source/adapters/ec2-subnet_test.go +++ b/aws-source/adapters/ec2-subnet_test.go @@ -43,36 +43,36 @@ func TestSubnetOutputMapper(t *testing.T) { output := &ec2.DescribeSubnetsOutput{ Subnets: []types.Subnet{ { - AvailabilityZone: PtrString("eu-west-2c"), - AvailabilityZoneId: PtrString("euw2-az1"), - AvailableIpAddressCount: PtrInt32(4091), - CidrBlock: PtrString("172.31.80.0/20"), - DefaultForAz: PtrBool(false), - MapPublicIpOnLaunch: PtrBool(false), - MapCustomerOwnedIpOnLaunch: PtrBool(false), + AvailabilityZone: new("eu-west-2c"), + AvailabilityZoneId: new("euw2-az1"), + AvailableIpAddressCount: new(int32(4091)), + CidrBlock: new("172.31.80.0/20"), + DefaultForAz: new(false), + MapPublicIpOnLaunch: new(false), + MapCustomerOwnedIpOnLaunch: new(false), State: types.SubnetStateAvailable, - SubnetId: PtrString("subnet-0450a637af9984235"), - VpcId: PtrString("vpc-0d7892e00e573e701"), - OwnerId: PtrString("052392120703"), - AssignIpv6AddressOnCreation: PtrBool(false), + SubnetId: new("subnet-0450a637af9984235"), + VpcId: new("vpc-0d7892e00e573e701"), + OwnerId: new("052392120703"), + AssignIpv6AddressOnCreation: new(false), Ipv6CidrBlockAssociationSet: []types.SubnetIpv6CidrBlockAssociation{ { - AssociationId: PtrString("id-1234"), - Ipv6CidrBlock: PtrString("something"), + AssociationId: new("id-1234"), + Ipv6CidrBlock: new("something"), Ipv6CidrBlockState: &types.SubnetCidrBlockState{ State: types.SubnetCidrBlockStateCodeAssociated, - StatusMessage: PtrString("something here"), + StatusMessage: new("something here"), }, }, }, Tags: []types.Tag{}, - SubnetArn: PtrString("arn:aws:ec2:eu-west-2:052392120703:subnet/subnet-0450a637af9984235"), - EnableDns64: PtrBool(false), - Ipv6Native: PtrBool(false), + SubnetArn: new("arn:aws:ec2:eu-west-2:052392120703:subnet/subnet-0450a637af9984235"), + EnableDns64: new(false), + Ipv6Native: new(false), PrivateDnsNameOptionsOnLaunch: &types.PrivateDnsNameOptionsOnLaunch{ HostnameType: types.HostnameTypeIpName, - EnableResourceNameDnsARecord: PtrBool(false), - EnableResourceNameDnsAAAARecord: PtrBool(false), + EnableResourceNameDnsARecord: new(false), + EnableResourceNameDnsAAAARecord: new(false), }, }, }, diff --git a/aws-source/adapters/ec2-transit-gateway-route-table-association_test.go b/aws-source/adapters/ec2-transit-gateway-route-table-association_test.go index b6447a84..fea112ec 100644 --- a/aws-source/adapters/ec2-transit-gateway-route-table-association_test.go +++ b/aws-source/adapters/ec2-transit-gateway-route-table-association_test.go @@ -33,8 +33,8 @@ func TestTransitGatewayRouteTableAssociationItemMapper(t *testing.T) { item := &transitGatewayRouteTableAssociationItem{ RouteTableID: "tgw-rtb-123", Association: types.TransitGatewayRouteTableAssociation{ - TransitGatewayAttachmentId: PtrString("tgw-attach-456"), - ResourceId: PtrString("vpc-abc"), + TransitGatewayAttachmentId: new("tgw-attach-456"), + ResourceId: new("vpc-abc"), ResourceType: types.TransitGatewayAttachmentResourceTypeVpc, State: types.TransitGatewayAssociationStateAssociated, }, diff --git a/aws-source/adapters/ec2-transit-gateway-route-table-propagation_test.go b/aws-source/adapters/ec2-transit-gateway-route-table-propagation_test.go index 1d2e6b9f..b4526c91 100644 --- a/aws-source/adapters/ec2-transit-gateway-route-table-propagation_test.go +++ b/aws-source/adapters/ec2-transit-gateway-route-table-propagation_test.go @@ -33,8 +33,8 @@ func TestTransitGatewayRouteTablePropagationItemMapper(t *testing.T) { item := &transitGatewayRouteTablePropagationItem{ RouteTableID: "tgw-rtb-123", Propagation: types.TransitGatewayRouteTablePropagation{ - TransitGatewayAttachmentId: PtrString("tgw-attach-456"), - ResourceId: PtrString("vpc-abc"), + TransitGatewayAttachmentId: new("tgw-attach-456"), + ResourceId: new("vpc-abc"), ResourceType: types.TransitGatewayAttachmentResourceTypeVpc, State: types.TransitGatewayPropagationStateEnabled, }, diff --git a/aws-source/adapters/ec2-transit-gateway-route-table_test.go b/aws-source/adapters/ec2-transit-gateway-route-table_test.go index 2bf596fe..4234d548 100644 --- a/aws-source/adapters/ec2-transit-gateway-route-table_test.go +++ b/aws-source/adapters/ec2-transit-gateway-route-table_test.go @@ -43,13 +43,13 @@ func TestTransitGatewayRouteTableOutputMapper(t *testing.T) { output := &ec2.DescribeTransitGatewayRouteTablesOutput{ TransitGatewayRouteTables: []types.TransitGatewayRouteTable{ { - TransitGatewayRouteTableId: PtrString("tgw-rtb-0123456789abcdef0"), - TransitGatewayId: PtrString("tgw-0abc123"), + TransitGatewayRouteTableId: new("tgw-rtb-0123456789abcdef0"), + TransitGatewayId: new("tgw-0abc123"), State: types.TransitGatewayRouteTableStateAvailable, - DefaultAssociationRouteTable: PtrBool(false), - DefaultPropagationRouteTable: PtrBool(false), + DefaultAssociationRouteTable: new(false), + DefaultPropagationRouteTable: new(false), Tags: []types.Tag{ - {Key: PtrString("Name"), Value: PtrString("my-route-table")}, + {Key: new("Name"), Value: new("my-route-table")}, }, }, }, diff --git a/aws-source/adapters/ec2-transit-gateway-route.go b/aws-source/adapters/ec2-transit-gateway-route.go index c7f10314..52fa50db 100644 --- a/aws-source/adapters/ec2-transit-gateway-route.go +++ b/aws-source/adapters/ec2-transit-gateway-route.go @@ -56,7 +56,7 @@ func parseRouteQuery(query string) (routeTableID, destination string, err error) // searchRoutesFilter returns a filter that returns all routes (active and blackhole). func searchRoutesFilter() []types.Filter { return []types.Filter{ - {Name: PtrString("state"), Values: []string{"active", "blackhole"}}, + {Name: new("state"), Values: []string{"active", "blackhole"}}, } } @@ -72,7 +72,7 @@ func getTransitGatewayRoute(ctx context.Context, client *ec2.Client, _, query st out, err := client.SearchTransitGatewayRoutes(ctx, &ec2.SearchTransitGatewayRoutesInput{ TransitGatewayRouteTableId: &routeTableID, Filters: searchRoutesFilter(), - MaxResults: PtrInt32(maxSearchRoutesResults), + MaxResults: new(int32(maxSearchRoutesResults)), }) if err != nil { return nil, err @@ -112,7 +112,7 @@ func listTransitGatewayRoutes(ctx context.Context, client *ec2.Client, _ string) routeOut, err := client.SearchTransitGatewayRoutes(ctx, &ec2.SearchTransitGatewayRoutesInput{ TransitGatewayRouteTableId: &rtID, Filters: searchRoutesFilter(), - MaxResults: PtrInt32(maxSearchRoutesResults), + MaxResults: new(int32(maxSearchRoutesResults)), }) if err != nil { return nil, err @@ -135,7 +135,7 @@ func searchTransitGatewayRoutes(ctx context.Context, client *ec2.Client, _, quer routeOut, err := client.SearchTransitGatewayRoutes(ctx, &ec2.SearchTransitGatewayRoutesInput{ TransitGatewayRouteTableId: &routeTableID, Filters: searchRoutesFilter(), - MaxResults: PtrInt32(maxSearchRoutesResults), + MaxResults: new(int32(maxSearchRoutesResults)), }) if err != nil { return nil, err @@ -285,11 +285,11 @@ var transitGatewayRouteAdapterMetadata = Metadata.Register(&sdp.AdapterMetadata{ Type: "ec2-transit-gateway-route", DescriptiveName: "Transit Gateway Route", SupportedQueryMethods: &sdp.AdapterSupportedQueryMethods{ - Get: true, - List: true, - Search: true, - GetDescription: "Get by TransitGatewayRouteTableId|Destination (CIDR or pl:PrefixListId)", - ListDescription: "List all transit gateway routes", + Get: true, + List: true, + Search: true, + GetDescription: "Get by TransitGatewayRouteTableId|Destination (CIDR or pl:PrefixListId)", + ListDescription: "List all transit gateway routes", SearchDescription: "Search by TransitGatewayRouteTableId to list routes for that route table", }, PotentialLinks: []string{"ec2-transit-gateway", "ec2-transit-gateway-route-table", "ec2-transit-gateway-route-table-association", "ec2-transit-gateway-attachment", "ec2-transit-gateway-route-table-announcement", "ec2-vpc", "ec2-vpn-connection", "ec2-managed-prefix-list", "directconnect-direct-connect-gateway"}, diff --git a/aws-source/adapters/ec2-transit-gateway-route_test.go b/aws-source/adapters/ec2-transit-gateway-route_test.go index 888edb22..b357f7cb 100644 --- a/aws-source/adapters/ec2-transit-gateway-route_test.go +++ b/aws-source/adapters/ec2-transit-gateway-route_test.go @@ -8,10 +8,10 @@ import ( ) func TestTransitGatewayRouteDestination(t *testing.T) { - if transitGatewayRouteDestination(&types.TransitGatewayRoute{DestinationCidrBlock: PtrString("10.0.0.0/16")}) != "10.0.0.0/16" { + if transitGatewayRouteDestination(&types.TransitGatewayRoute{DestinationCidrBlock: new("10.0.0.0/16")}) != "10.0.0.0/16" { t.Error("expected CIDR destination") } - if transitGatewayRouteDestination(&types.TransitGatewayRoute{PrefixListId: PtrString("pl-123")}) != "pl:pl-123" { + if transitGatewayRouteDestination(&types.TransitGatewayRoute{PrefixListId: new("pl-123")}) != "pl:pl-123" { t.Error("expected prefix list destination") } } @@ -42,7 +42,7 @@ func TestTransitGatewayRouteItemMapper(t *testing.T) { item := &transitGatewayRouteItem{ RouteTableID: "tgw-rtb-123", Route: types.TransitGatewayRoute{ - DestinationCidrBlock: PtrString("10.0.0.0/16"), + DestinationCidrBlock: new("10.0.0.0/16"), State: types.TransitGatewayRouteStateActive, Type: types.TransitGatewayRouteTypeStatic, }, diff --git a/aws-source/adapters/ec2-volume-status_test.go b/aws-source/adapters/ec2-volume-status_test.go index a5ac6cae..15943f0a 100644 --- a/aws-source/adapters/ec2-volume-status_test.go +++ b/aws-source/adapters/ec2-volume-status_test.go @@ -45,33 +45,33 @@ func TestVolumeStatusOutputMapper(t *testing.T) { { Actions: []types.VolumeStatusAction{ { - Code: PtrString("enable-volume-io"), - Description: PtrString("Enable volume I/O"), - EventId: PtrString("12"), - EventType: PtrString("io-enabled"), + Code: new("enable-volume-io"), + Description: new("Enable volume I/O"), + EventId: new("12"), + EventType: new("io-enabled"), }, }, - AvailabilityZone: PtrString("eu-west-2c"), + AvailabilityZone: new("eu-west-2c"), Events: []types.VolumeStatusEvent{ { - Description: PtrString("The volume is operating normally"), - EventId: PtrString("12"), - EventType: PtrString("io-enabled"), - InstanceId: PtrString("i-0667d3ca802741e30"), // link - NotAfter: PtrTime(time.Now()), - NotBefore: PtrTime(time.Now()), + Description: new("The volume is operating normally"), + EventId: new("12"), + EventType: new("io-enabled"), + InstanceId: new("i-0667d3ca802741e30"), // link + NotAfter: new(time.Now()), + NotBefore: new(time.Now()), }, }, - VolumeId: PtrString("vol-0a38796ac85e21c11"), // link + VolumeId: new("vol-0a38796ac85e21c11"), // link VolumeStatus: &types.VolumeStatusInfo{ Details: []types.VolumeStatusDetails{ { Name: types.VolumeStatusNameIoEnabled, - Status: PtrString("passed"), + Status: new("passed"), }, { Name: types.VolumeStatusNameIoPerformance, - Status: PtrString("not-applicable"), + Status: new("not-applicable"), }, }, Status: types.VolumeStatusInfoStatusOk, diff --git a/aws-source/adapters/ec2-volume_test.go b/aws-source/adapters/ec2-volume_test.go index 277c8dbb..46526bb2 100644 --- a/aws-source/adapters/ec2-volume_test.go +++ b/aws-source/adapters/ec2-volume_test.go @@ -45,24 +45,24 @@ func TestVolumeOutputMapper(t *testing.T) { { Attachments: []types.VolumeAttachment{ { - AttachTime: PtrTime(time.Now()), - Device: PtrString("/dev/sdb"), - InstanceId: PtrString("i-0667d3ca802741e30"), + AttachTime: new(time.Now()), + Device: new("/dev/sdb"), + InstanceId: new("i-0667d3ca802741e30"), State: types.VolumeAttachmentStateAttaching, - VolumeId: PtrString("vol-0eae6976b359d8825"), - DeleteOnTermination: PtrBool(false), + VolumeId: new("vol-0eae6976b359d8825"), + DeleteOnTermination: new(false), }, }, - AvailabilityZone: PtrString("eu-west-2c"), - CreateTime: PtrTime(time.Now()), - Encrypted: PtrBool(false), - Size: PtrInt32(8), + AvailabilityZone: new("eu-west-2c"), + CreateTime: new(time.Now()), + Encrypted: new(false), + Size: new(int32(8)), State: types.VolumeStateInUse, - VolumeId: PtrString("vol-0eae6976b359d8825"), - Iops: PtrInt32(3000), + VolumeId: new("vol-0eae6976b359d8825"), + Iops: new(int32(3000)), VolumeType: types.VolumeTypeGp3, - MultiAttachEnabled: PtrBool(false), - Throughput: PtrInt32(125), + MultiAttachEnabled: new(false), + Throughput: new(int32(125)), }, }, } diff --git a/aws-source/adapters/ec2-vpc-endpoint_test.go b/aws-source/adapters/ec2-vpc-endpoint_test.go index 8a471d13..c12a170d 100644 --- a/aws-source/adapters/ec2-vpc-endpoint_test.go +++ b/aws-source/adapters/ec2-vpc-endpoint_test.go @@ -31,13 +31,13 @@ func TestVpcEndpointOutputMapper(t *testing.T) { output := &ec2.DescribeVpcEndpointsOutput{ VpcEndpoints: []types.VpcEndpoint{ { - VpcEndpointId: PtrString("vpce-0d7892e00e573e701"), + VpcEndpointId: new("vpce-0d7892e00e573e701"), VpcEndpointType: types.VpcEndpointTypeInterface, - CreationTimestamp: PtrTime(time.Now()), - VpcId: PtrString("vpc-0d7892e00e573e701"), // link - ServiceName: PtrString("com.amazonaws.us-east-1.s3"), + CreationTimestamp: new(time.Now()), + VpcId: new("vpc-0d7892e00e573e701"), // link + ServiceName: new("com.amazonaws.us-east-1.s3"), State: types.StateAvailable, - PolicyDocument: PtrString("{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":\"*\",\"Resource\":\"*\",\"Effect\":\"Allow\",\"Principal\":\"*\"},{\"Condition\":{\"StringNotEquals\":{\"aws:PrincipalAccount\":\"944651592624\"}},\"Action\":\"*\",\"Resource\":\"*\",\"Effect\":\"Deny\",\"Principal\":\"*\"}]}"), // parse this + PolicyDocument: new("{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":\"*\",\"Resource\":\"*\",\"Effect\":\"Allow\",\"Principal\":\"*\"},{\"Condition\":{\"StringNotEquals\":{\"aws:PrincipalAccount\":\"944651592624\"}},\"Action\":\"*\",\"Resource\":\"*\",\"Effect\":\"Deny\",\"Principal\":\"*\"}]}"), // parse this RouteTableIds: []string{ "rtb-0d7892e00e573e701", // link }, @@ -46,35 +46,35 @@ func TestVpcEndpointOutputMapper(t *testing.T) { }, Groups: []types.SecurityGroupIdentifier{ { - GroupId: PtrString("sg-0d7892e00e573e701"), // link - GroupName: PtrString("default"), + GroupId: new("sg-0d7892e00e573e701"), // link + GroupName: new("default"), }, }, IpAddressType: types.IpAddressTypeIpv4, - PrivateDnsEnabled: PtrBool(true), - RequesterManaged: PtrBool(false), + PrivateDnsEnabled: new(true), + RequesterManaged: new(false), DnsEntries: []types.DnsEntry{ { - DnsName: PtrString("vpce-0d7892e00e573e701-123456789012.us-east-1.vpce.amazonaws.com"), // link - HostedZoneId: PtrString("Z2F56UZL2M1ACD"), // link + DnsName: new("vpce-0d7892e00e573e701-123456789012.us-east-1.vpce.amazonaws.com"), // link + HostedZoneId: new("Z2F56UZL2M1ACD"), // link }, }, DnsOptions: &types.DnsOptions{ DnsRecordIpType: types.DnsRecordIpTypeDualstack, - PrivateDnsOnlyForInboundResolverEndpoint: PtrBool(false), + PrivateDnsOnlyForInboundResolverEndpoint: new(false), }, LastError: &types.LastError{ - Code: PtrString("Client::ValidationException"), - Message: PtrString("The security group 'sg-0d7892e00e573e701' does not exist"), + Code: new("Client::ValidationException"), + Message: new("The security group 'sg-0d7892e00e573e701' does not exist"), }, NetworkInterfaceIds: []string{ "eni-0d7892e00e573e701", // link }, - OwnerId: PtrString("052392120703"), + OwnerId: new("052392120703"), Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("my-vpce"), + Key: new("Name"), + Value: new("my-vpce"), }, }, }, diff --git a/aws-source/adapters/ec2-vpc-peering-connection_test.go b/aws-source/adapters/ec2-vpc-peering-connection_test.go index 6b2b6da3..949f5179 100644 --- a/aws-source/adapters/ec2-vpc-peering-connection_test.go +++ b/aws-source/adapters/ec2-vpc-peering-connection_test.go @@ -15,48 +15,48 @@ func TestVpcPeeringConnectionOutputMapper(t *testing.T) { output := &ec2.DescribeVpcPeeringConnectionsOutput{ VpcPeeringConnections: []types.VpcPeeringConnection{ { - VpcPeeringConnectionId: PtrString("pcx-1234567890"), + VpcPeeringConnectionId: new("pcx-1234567890"), Status: &types.VpcPeeringConnectionStateReason{ Code: types.VpcPeeringConnectionStateReasonCodeActive, // health - Message: PtrString("message"), + Message: new("message"), }, AccepterVpcInfo: &types.VpcPeeringConnectionVpcInfo{ - CidrBlock: PtrString("10.0.0.1/24"), + CidrBlock: new("10.0.0.1/24"), CidrBlockSet: []types.CidrBlock{ { - CidrBlock: PtrString("10.0.2.1/24"), + CidrBlock: new("10.0.2.1/24"), }, }, Ipv6CidrBlockSet: []types.Ipv6CidrBlock{ { - Ipv6CidrBlock: PtrString("::/64"), + Ipv6CidrBlock: new("::/64"), }, }, - OwnerId: PtrString("123456789012"), - Region: PtrString("eu-west-2"), // link - VpcId: PtrString("vpc-1234567890"), // link + OwnerId: new("123456789012"), + Region: new("eu-west-2"), // link + VpcId: new("vpc-1234567890"), // link PeeringOptions: &types.VpcPeeringConnectionOptionsDescription{ - AllowDnsResolutionFromRemoteVpc: PtrBool(true), + AllowDnsResolutionFromRemoteVpc: new(true), }, }, RequesterVpcInfo: &types.VpcPeeringConnectionVpcInfo{ - CidrBlock: PtrString("10.0.0.1/24"), + CidrBlock: new("10.0.0.1/24"), CidrBlockSet: []types.CidrBlock{ { - CidrBlock: PtrString("10.0.2.1/24"), + CidrBlock: new("10.0.2.1/24"), }, }, Ipv6CidrBlockSet: []types.Ipv6CidrBlock{ { - Ipv6CidrBlock: PtrString("::/64"), + Ipv6CidrBlock: new("::/64"), }, }, - OwnerId: PtrString("987654321098"), + OwnerId: new("987654321098"), PeeringOptions: &types.VpcPeeringConnectionOptionsDescription{ - AllowDnsResolutionFromRemoteVpc: PtrBool(true), + AllowDnsResolutionFromRemoteVpc: new(true), }, - Region: PtrString("eu-west-5"), // link - VpcId: PtrString("vpc-9887654321"), // link + Region: new("eu-west-5"), // link + VpcId: new("vpc-9887654321"), // link }, }, }, diff --git a/aws-source/adapters/ec2-vpc_test.go b/aws-source/adapters/ec2-vpc_test.go index fea243cd..76c22809 100644 --- a/aws-source/adapters/ec2-vpc_test.go +++ b/aws-source/adapters/ec2-vpc_test.go @@ -42,38 +42,38 @@ func TestVpcOutputMapper(t *testing.T) { output := &ec2.DescribeVpcsOutput{ Vpcs: []types.Vpc{ { - CidrBlock: PtrString("172.31.0.0/16"), - DhcpOptionsId: PtrString("dopt-0959b838bf4a4c7b8"), + CidrBlock: new("172.31.0.0/16"), + DhcpOptionsId: new("dopt-0959b838bf4a4c7b8"), State: types.VpcStateAvailable, - VpcId: PtrString("vpc-0d7892e00e573e701"), - OwnerId: PtrString("052392120703"), + VpcId: new("vpc-0d7892e00e573e701"), + OwnerId: new("052392120703"), InstanceTenancy: types.TenancyDefault, CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{ { - AssociationId: PtrString("vpc-cidr-assoc-0b77866f37f500af6"), - CidrBlock: PtrString("172.31.0.0/16"), + AssociationId: new("vpc-cidr-assoc-0b77866f37f500af6"), + CidrBlock: new("172.31.0.0/16"), CidrBlockState: &types.VpcCidrBlockState{ State: types.VpcCidrBlockStateCodeAssociated, }, }, }, - IsDefault: PtrBool(false), + IsDefault: new(false), Tags: []types.Tag{ { - Key: PtrString("aws:cloudformation:logical-id"), - Value: PtrString("VPC"), + Key: new("aws:cloudformation:logical-id"), + Value: new("VPC"), }, { - Key: PtrString("aws:cloudformation:stack-id"), - Value: PtrString("arn:aws:cloudformation:eu-west-2:052392120703:stack/StackSet-AWSControlTowerBP-VPC-ACCOUNT-FACTORY-V1-8c2a9348-a30c-4ac3-94c2-8279157c9243/ccde3240-7afa-11ed-81ff-02845d4c2702"), + Key: new("aws:cloudformation:stack-id"), + Value: new("arn:aws:cloudformation:eu-west-2:052392120703:stack/StackSet-AWSControlTowerBP-VPC-ACCOUNT-FACTORY-V1-8c2a9348-a30c-4ac3-94c2-8279157c9243/ccde3240-7afa-11ed-81ff-02845d4c2702"), }, { - Key: PtrString("aws:cloudformation:stack-name"), - Value: PtrString("StackSet-AWSControlTowerBP-VPC-ACCOUNT-FACTORY-V1-8c2a9348-a30c-4ac3-94c2-8279157c9243"), + Key: new("aws:cloudformation:stack-name"), + Value: new("StackSet-AWSControlTowerBP-VPC-ACCOUNT-FACTORY-V1-8c2a9348-a30c-4ac3-94c2-8279157c9243"), }, { - Key: PtrString("Name"), - Value: PtrString("aws-controltower-VPC"), + Key: new("Name"), + Value: new("aws-controltower-VPC"), }, }, }, diff --git a/aws-source/adapters/ecs-capacity-provider_test.go b/aws-source/adapters/ecs-capacity-provider_test.go index 77724230..41512cd3 100644 --- a/aws-source/adapters/ecs-capacity-provider_test.go +++ b/aws-source/adapters/ecs-capacity-provider_test.go @@ -17,42 +17,42 @@ func (t *ecsTestClient) DescribeCapacityProviders(ctx context.Context, params *e "": { CapacityProviders: []types.CapacityProvider{ { - CapacityProviderArn: PtrString("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/FARGATE"), - Name: PtrString("FARGATE"), + CapacityProviderArn: new("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/FARGATE"), + Name: new("FARGATE"), Status: types.CapacityProviderStatusActive, }, }, - NextToken: PtrString("one"), + NextToken: new("one"), }, "one": { CapacityProviders: []types.CapacityProvider{ { - CapacityProviderArn: PtrString("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/FARGATE_SPOT"), - Name: PtrString("FARGATE_SPOT"), + CapacityProviderArn: new("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/FARGATE_SPOT"), + Name: new("FARGATE_SPOT"), Status: types.CapacityProviderStatusActive, }, }, - NextToken: PtrString("two"), + NextToken: new("two"), }, "two": { CapacityProviders: []types.CapacityProvider{ { - CapacityProviderArn: PtrString("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/test"), - Name: PtrString("test"), + CapacityProviderArn: new("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/test"), + Name: new("test"), Status: types.CapacityProviderStatusActive, AutoScalingGroupProvider: &types.AutoScalingGroupProvider{ - AutoScalingGroupArn: PtrString("arn:aws:autoscaling:eu-west-2:052392120703:autoScalingGroup:9df90815-98c1-4136-a12a-90abef1c4e4e:autoScalingGroupName/ecs-test"), + AutoScalingGroupArn: new("arn:aws:autoscaling:eu-west-2:052392120703:autoScalingGroup:9df90815-98c1-4136-a12a-90abef1c4e4e:autoScalingGroupName/ecs-test"), ManagedScaling: &types.ManagedScaling{ Status: types.ManagedScalingStatusEnabled, - TargetCapacity: PtrInt32(80), - MinimumScalingStepSize: PtrInt32(1), - MaximumScalingStepSize: PtrInt32(10000), - InstanceWarmupPeriod: PtrInt32(300), + TargetCapacity: new(int32(80)), + MinimumScalingStepSize: new(int32(1)), + MaximumScalingStepSize: new(int32(10000)), + InstanceWarmupPeriod: new(int32(300)), }, ManagedTerminationProtection: types.ManagedTerminationProtectionDisabled, }, UpdateStatus: types.CapacityProviderUpdateStatusDeleteComplete, - UpdateStatusReason: PtrString("reason"), + UpdateStatusReason: new("reason"), }, }, }, @@ -76,22 +76,22 @@ func TestCapacityProviderOutputMapper(t *testing.T) { &ecs.DescribeCapacityProvidersOutput{ CapacityProviders: []types.CapacityProvider{ { - CapacityProviderArn: PtrString("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/test"), - Name: PtrString("test"), + CapacityProviderArn: new("arn:aws:ecs:eu-west-2:052392120703:capacity-provider/test"), + Name: new("test"), Status: types.CapacityProviderStatusActive, AutoScalingGroupProvider: &types.AutoScalingGroupProvider{ - AutoScalingGroupArn: PtrString("arn:aws:autoscaling:eu-west-2:052392120703:autoScalingGroup:9df90815-98c1-4136-a12a-90abef1c4e4e:autoScalingGroupName/ecs-test"), + AutoScalingGroupArn: new("arn:aws:autoscaling:eu-west-2:052392120703:autoScalingGroup:9df90815-98c1-4136-a12a-90abef1c4e4e:autoScalingGroupName/ecs-test"), ManagedScaling: &types.ManagedScaling{ Status: types.ManagedScalingStatusEnabled, - TargetCapacity: PtrInt32(80), - MinimumScalingStepSize: PtrInt32(1), - MaximumScalingStepSize: PtrInt32(10000), - InstanceWarmupPeriod: PtrInt32(300), + TargetCapacity: new(int32(80)), + MinimumScalingStepSize: new(int32(1)), + MaximumScalingStepSize: new(int32(10000)), + InstanceWarmupPeriod: new(int32(300)), }, ManagedTerminationProtection: types.ManagedTerminationProtectionDisabled, }, UpdateStatus: types.CapacityProviderUpdateStatusDeleteComplete, - UpdateStatusReason: PtrString("reason"), + UpdateStatusReason: new("reason"), }, }, }, diff --git a/aws-source/adapters/ecs-cluster_test.go b/aws-source/adapters/ecs-cluster_test.go index 3ee97b51..316b1066 100644 --- a/aws-source/adapters/ecs-cluster_test.go +++ b/aws-source/adapters/ecs-cluster_test.go @@ -15,24 +15,24 @@ func (t *ecsTestClient) DescribeClusters(ctx context.Context, params *ecs.Descri return &ecs.DescribeClustersOutput{ Clusters: []types.Cluster{ { - ClusterArn: PtrString("arn:aws:ecs:eu-west-2:052392120703:cluster/default"), - ClusterName: PtrString("default"), - Status: PtrString("ACTIVE"), + ClusterArn: new("arn:aws:ecs:eu-west-2:052392120703:cluster/default"), + ClusterName: new("default"), + Status: new("ACTIVE"), RegisteredContainerInstancesCount: 0, RunningTasksCount: 1, PendingTasksCount: 0, ActiveServicesCount: 1, Statistics: []types.KeyValuePair{ { - Name: PtrString("key"), - Value: PtrString("value"), + Name: new("key"), + Value: new("value"), }, }, Tags: []types.Tag{}, Settings: []types.ClusterSetting{ { Name: types.ClusterSettingNameContainerInsights, - Value: PtrString("ENABLED"), + Value: new("ENABLED"), }, }, CapacityProviders: []string{ @@ -40,43 +40,43 @@ func (t *ecsTestClient) DescribeClusters(ctx context.Context, params *ecs.Descri }, DefaultCapacityProviderStrategy: []types.CapacityProviderStrategyItem{ { - CapacityProvider: PtrString("provider"), + CapacityProvider: new("provider"), Base: 10, Weight: 100, }, }, Attachments: []types.Attachment{ { - Id: PtrString("1c1f9cf4-461c-4072-aab2-e2dd346c53e1"), - Type: PtrString("as_policy"), - Status: PtrString("CREATED"), + Id: new("1c1f9cf4-461c-4072-aab2-e2dd346c53e1"), + Type: new("as_policy"), + Status: new("CREATED"), Details: []types.KeyValuePair{ { - Name: PtrString("capacityProviderName"), - Value: PtrString("test"), + Name: new("capacityProviderName"), + Value: new("test"), }, { - Name: PtrString("scalingPolicyName"), - Value: PtrString("ECSManagedAutoScalingPolicy-d2f110eb-20a6-4278-9c1c-47d98e21b1ed"), + Name: new("scalingPolicyName"), + Value: new("ECSManagedAutoScalingPolicy-d2f110eb-20a6-4278-9c1c-47d98e21b1ed"), }, }, }, }, - AttachmentsStatus: PtrString("UPDATE_COMPLETE"), + AttachmentsStatus: new("UPDATE_COMPLETE"), Configuration: &types.ClusterConfiguration{ ExecuteCommandConfiguration: &types.ExecuteCommandConfiguration{ - KmsKeyId: PtrString("id"), + KmsKeyId: new("id"), LogConfiguration: &types.ExecuteCommandLogConfiguration{ CloudWatchEncryptionEnabled: true, - CloudWatchLogGroupName: PtrString("cloud-watch-name"), - S3BucketName: PtrString("s3-name"), + CloudWatchLogGroupName: new("cloud-watch-name"), + S3BucketName: new("s3-name"), S3EncryptionEnabled: true, - S3KeyPrefix: PtrString("prod"), + S3KeyPrefix: new("prod"), }, }, }, ServiceConnectDefaults: &types.ClusterServiceConnectDefaults{ - Namespace: PtrString("prod"), + Namespace: new("prod"), }, }, }, diff --git a/aws-source/adapters/ecs-container-instance.go b/aws-source/adapters/ecs-container-instance.go index 021c1a3e..99255436 100644 --- a/aws-source/adapters/ecs-container-instance.go +++ b/aws-source/adapters/ecs-container-instance.go @@ -118,7 +118,7 @@ func NewECSContainerInstanceAdapter(client ECSClient, accountID string, region s Region: region, GetFunc: containerInstanceGetFunc, AdapterMetadata: containerInstanceAdapterMetadata, - cache: cache, + cache: cache, GetInputMapper: func(scope, query string) *ecs.DescribeContainerInstancesInput { // We are using a custom id of {clusterName}/{id} e.g. // ecs-template-ECSCluster-8nS0WOLbs3nZ/50e9bf71ed57450ca56293cc5a042886 @@ -144,7 +144,7 @@ func NewECSContainerInstanceAdapter(client ECSClient, accountID string, region s SearchInputMapper: func(scope, query string) (*ecs.ListContainerInstancesInput, error) { // Custom search by cluster return &ecs.ListContainerInstancesInput{ - Cluster: PtrString(query), + Cluster: new(query), }, nil }, ListFuncOutputMapper: containerInstanceListFuncOutputMapper, diff --git a/aws-source/adapters/ecs-container-instance_test.go b/aws-source/adapters/ecs-container-instance_test.go index 6a9b8632..bdc53b47 100644 --- a/aws-source/adapters/ecs-container-instance_test.go +++ b/aws-source/adapters/ecs-container-instance_test.go @@ -15,32 +15,32 @@ func (t *ecsTestClient) DescribeContainerInstances(ctx context.Context, params * return &ecs.DescribeContainerInstancesOutput{ ContainerInstances: []types.ContainerInstance{ { - ContainerInstanceArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:container-instance/ecs-template-ECSCluster-8nS0WOLbs3nZ/50e9bf71ed57450ca56293cc5a042886"), - Ec2InstanceId: PtrString("i-0e778f25705bc0c84"), // link + ContainerInstanceArn: new("arn:aws:ecs:eu-west-1:052392120703:container-instance/ecs-template-ECSCluster-8nS0WOLbs3nZ/50e9bf71ed57450ca56293cc5a042886"), + Ec2InstanceId: new("i-0e778f25705bc0c84"), // link Version: 4, VersionInfo: &types.VersionInfo{ - AgentVersion: PtrString("1.47.0"), - AgentHash: PtrString("1489adfa"), - DockerVersion: PtrString("DockerVersion: 19.03.6-ce"), + AgentVersion: new("1.47.0"), + AgentHash: new("1489adfa"), + DockerVersion: new("DockerVersion: 19.03.6-ce"), }, RemainingResources: []types.Resource{ { - Name: PtrString("CPU"), - Type: PtrString("INTEGER"), + Name: new("CPU"), + Type: new("INTEGER"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 2028, }, { - Name: PtrString("MEMORY"), - Type: PtrString("INTEGER"), + Name: new("MEMORY"), + Type: new("INTEGER"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 7474, }, { - Name: PtrString("PORTS"), - Type: PtrString("STRINGSET"), + Name: new("PORTS"), + Type: new("STRINGSET"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 0, @@ -53,8 +53,8 @@ func (t *ecsTestClient) DescribeContainerInstances(ctx context.Context, params * }, }, { - Name: PtrString("PORTS_UDP"), - Type: PtrString("STRINGSET"), + Name: new("PORTS_UDP"), + Type: new("STRINGSET"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 0, @@ -63,22 +63,22 @@ func (t *ecsTestClient) DescribeContainerInstances(ctx context.Context, params * }, RegisteredResources: []types.Resource{ { - Name: PtrString("CPU"), - Type: PtrString("INTEGER"), + Name: new("CPU"), + Type: new("INTEGER"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 2048, }, { - Name: PtrString("MEMORY"), - Type: PtrString("INTEGER"), + Name: new("MEMORY"), + Type: new("INTEGER"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 7974, }, { - Name: PtrString("PORTS"), - Type: PtrString("STRINGSET"), + Name: new("PORTS"), + Type: new("STRINGSET"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 0, @@ -91,223 +91,223 @@ func (t *ecsTestClient) DescribeContainerInstances(ctx context.Context, params * }, }, { - Name: PtrString("PORTS_UDP"), - Type: PtrString("STRINGSET"), + Name: new("PORTS_UDP"), + Type: new("STRINGSET"), DoubleValue: 0.0, LongValue: 0, IntegerValue: 0, StringSetValue: []string{}, }, }, - Status: PtrString("ACTIVE"), + Status: new("ACTIVE"), AgentConnected: true, RunningTasksCount: 1, PendingTasksCount: 0, Attributes: []types.Attribute{ { - Name: PtrString("ecs.capability.secrets.asm.environment-variables"), + Name: new("ecs.capability.secrets.asm.environment-variables"), }, { - Name: PtrString("ecs.capability.branch-cni-plugin-version"), - Value: PtrString("a21d3a41-"), + Name: new("ecs.capability.branch-cni-plugin-version"), + Value: new("a21d3a41-"), }, { - Name: PtrString("ecs.ami-id"), - Value: PtrString("ami-0c9ef930279337028"), + Name: new("ecs.ami-id"), + Value: new("ami-0c9ef930279337028"), }, { - Name: PtrString("ecs.capability.secrets.asm.bootstrap.log-driver"), + Name: new("ecs.capability.secrets.asm.bootstrap.log-driver"), }, { - Name: PtrString("ecs.capability.task-eia.optimized-cpu"), + Name: new("ecs.capability.task-eia.optimized-cpu"), }, { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.none"), + Name: new("com.amazonaws.ecs.capability.logging-driver.none"), }, { - Name: PtrString("ecs.capability.ecr-endpoint"), + Name: new("ecs.capability.ecr-endpoint"), }, { - Name: PtrString("ecs.capability.docker-plugin.local"), + Name: new("ecs.capability.docker-plugin.local"), }, { - Name: PtrString("ecs.capability.task-cpu-mem-limit"), + Name: new("ecs.capability.task-cpu-mem-limit"), }, { - Name: PtrString("ecs.capability.secrets.ssm.bootstrap.log-driver"), + Name: new("ecs.capability.secrets.ssm.bootstrap.log-driver"), }, { - Name: PtrString("ecs.capability.efsAuth"), + Name: new("ecs.capability.efsAuth"), }, { - Name: PtrString("ecs.capability.full-sync"), + Name: new("ecs.capability.full-sync"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.30"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.30"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.31"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.31"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.32"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.32"), }, { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.fluentd"), + Name: new("com.amazonaws.ecs.capability.logging-driver.fluentd"), }, { - Name: PtrString("ecs.capability.firelens.options.config.file"), + Name: new("ecs.capability.firelens.options.config.file"), }, { - Name: PtrString("ecs.availability-zone"), - Value: PtrString("eu-west-1a"), + Name: new("ecs.availability-zone"), + Value: new("eu-west-1a"), }, { - Name: PtrString("ecs.capability.aws-appmesh"), + Name: new("ecs.capability.aws-appmesh"), }, { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.awslogs"), + Name: new("com.amazonaws.ecs.capability.logging-driver.awslogs"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.24"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.24"), }, { - Name: PtrString("ecs.capability.task-eni-trunking"), + Name: new("ecs.capability.task-eni-trunking"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.25"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.25"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.26"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.26"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.27"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.27"), }, { - Name: PtrString("com.amazonaws.ecs.capability.privileged-container"), + Name: new("com.amazonaws.ecs.capability.privileged-container"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.28"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.28"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.29"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.29"), }, { - Name: PtrString("ecs.cpu-architecture"), - Value: PtrString("x86_64"), + Name: new("ecs.cpu-architecture"), + Value: new("x86_64"), }, { - Name: PtrString("com.amazonaws.ecs.capability.ecr-auth"), + Name: new("com.amazonaws.ecs.capability.ecr-auth"), }, { - Name: PtrString("ecs.capability.firelens.fluentbit"), + Name: new("ecs.capability.firelens.fluentbit"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.20"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.20"), }, { - Name: PtrString("ecs.os-type"), - Value: PtrString("linux"), + Name: new("ecs.os-type"), + Value: new("linux"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.21"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.21"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.22"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.22"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.23"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.23"), }, { - Name: PtrString("ecs.capability.task-eia"), + Name: new("ecs.capability.task-eia"), }, { - Name: PtrString("ecs.capability.private-registry-authentication.secretsmanager"), + Name: new("ecs.capability.private-registry-authentication.secretsmanager"), }, { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.syslog"), + Name: new("com.amazonaws.ecs.capability.logging-driver.syslog"), }, { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.awsfirelens"), + Name: new("com.amazonaws.ecs.capability.logging-driver.awsfirelens"), }, { - Name: PtrString("ecs.capability.firelens.options.config.s3"), + Name: new("ecs.capability.firelens.options.config.s3"), }, { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.json-file"), + Name: new("com.amazonaws.ecs.capability.logging-driver.json-file"), }, { - Name: PtrString("ecs.capability.execution-role-awslogs"), + Name: new("ecs.capability.execution-role-awslogs"), }, { - Name: PtrString("ecs.vpc-id"), - Value: PtrString("vpc-0e120717a7263de70"), + Name: new("ecs.vpc-id"), + Value: new("vpc-0e120717a7263de70"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.17"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.17"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.18"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.18"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.19"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.19"), }, { - Name: PtrString("ecs.capability.docker-plugin.amazon-ecs-volume-plugin"), + Name: new("ecs.capability.docker-plugin.amazon-ecs-volume-plugin"), }, { - Name: PtrString("ecs.capability.task-eni"), + Name: new("ecs.capability.task-eni"), }, { - Name: PtrString("ecs.capability.firelens.fluentd"), + Name: new("ecs.capability.firelens.fluentd"), }, { - Name: PtrString("ecs.capability.efs"), + Name: new("ecs.capability.efs"), }, { - Name: PtrString("ecs.capability.execution-role-ecr-pull"), + Name: new("ecs.capability.execution-role-ecr-pull"), }, { - Name: PtrString("ecs.capability.task-eni.ipv6"), + Name: new("ecs.capability.task-eni.ipv6"), }, { - Name: PtrString("ecs.capability.container-health-check"), + Name: new("ecs.capability.container-health-check"), }, { - Name: PtrString("ecs.subnet-id"), - Value: PtrString("subnet-0bfdb717a234c01b3"), + Name: new("ecs.subnet-id"), + Value: new("subnet-0bfdb717a234c01b3"), }, { - Name: PtrString("ecs.instance-type"), - Value: PtrString("t2.large"), + Name: new("ecs.instance-type"), + Value: new("t2.large"), }, { - Name: PtrString("com.amazonaws.ecs.capability.task-iam-role-network-host"), + Name: new("com.amazonaws.ecs.capability.task-iam-role-network-host"), }, { - Name: PtrString("ecs.capability.container-ordering"), + Name: new("ecs.capability.container-ordering"), }, { - Name: PtrString("ecs.capability.cni-plugin-version"), - Value: PtrString("55b2ae77-2020.09.0"), + Name: new("ecs.capability.cni-plugin-version"), + Value: new("55b2ae77-2020.09.0"), }, { - Name: PtrString("ecs.capability.env-files.s3"), + Name: new("ecs.capability.env-files.s3"), }, { - Name: PtrString("ecs.capability.pid-ipc-namespace-sharing"), + Name: new("ecs.capability.pid-ipc-namespace-sharing"), }, { - Name: PtrString("ecs.capability.secrets.ssm.environment-variables"), + Name: new("ecs.capability.secrets.ssm.environment-variables"), }, { - Name: PtrString("com.amazonaws.ecs.capability.task-iam-role"), + Name: new("com.amazonaws.ecs.capability.task-iam-role"), }, }, - RegisteredAt: PtrTime(time.Now()), + RegisteredAt: new(time.Now()), Attachments: []types.Attachment{}, // There is probably an opportunity for some links here but I don't have example data Tags: []types.Tag{}, AgentUpdateStatus: types.AgentUpdateStatusFailed, - CapacityProviderName: PtrString("name"), + CapacityProviderName: new("name"), HealthStatus: &types.ContainerInstanceHealthStatus{ OverallStatus: types.InstanceHealthCheckStateImpaired, }, diff --git a/aws-source/adapters/ecs-service.go b/aws-source/adapters/ecs-service.go index fcbbc3ff..66cf2fb8 100644 --- a/aws-source/adapters/ecs-service.go +++ b/aws-source/adapters/ecs-service.go @@ -304,7 +304,7 @@ func NewECSServiceAdapter(client ECSClient, accountID string, region string, cac GetFunc: serviceGetFunc, DisableList: true, AdapterMetadata: ecsServiceAdapterMetadata, - cache: cache, + cache: cache, GetInputMapper: func(scope, query string) *ecs.DescribeServicesInput { // We are using a custom id of {clusterName}/{id} e.g. // ecs-template-ECSCluster-8nS0WOLbs3nZ/ecs-template-service-i0mQKzkhDI2C @@ -329,7 +329,7 @@ func NewECSServiceAdapter(client ECSClient, accountID string, region string, cac SearchInputMapper: func(scope, query string) (*ecs.ListServicesInput, error) { // Custom search by cluster return &ecs.ListServicesInput{ - Cluster: PtrString(query), + Cluster: new(query), }, nil }, ListFuncOutputMapper: serviceListFuncOutputMapper, diff --git a/aws-source/adapters/ecs-service_test.go b/aws-source/adapters/ecs-service_test.go index b7e6683d..a6921d25 100644 --- a/aws-source/adapters/ecs-service_test.go +++ b/aws-source/adapters/ecs-service_test.go @@ -16,37 +16,37 @@ func (t *ecsTestClient) DescribeServices(ctx context.Context, params *ecs.Descri Failures: []types.Failure{}, Services: []types.Service{ { - ServiceArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:service/ecs-template-ECSCluster-8nS0WOLbs3nZ/ecs-template-service-i0mQKzkhDI2C"), - ServiceName: PtrString("ecs-template-service-i0mQKzkhDI2C"), - ClusterArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:cluster/ecs-template-ECSCluster-8nS0WOLbs3nZ"), // link + ServiceArn: new("arn:aws:ecs:eu-west-1:052392120703:service/ecs-template-ECSCluster-8nS0WOLbs3nZ/ecs-template-service-i0mQKzkhDI2C"), + ServiceName: new("ecs-template-service-i0mQKzkhDI2C"), + ClusterArn: new("arn:aws:ecs:eu-west-1:052392120703:cluster/ecs-template-ECSCluster-8nS0WOLbs3nZ"), // link LoadBalancers: []types.LoadBalancer{ { - TargetGroupArn: PtrString("arn:aws:elasticloadbalancing:eu-west-1:052392120703:targetgroup/ECSTG/0c44b1cdb3437902"), // link - ContainerName: PtrString("simple-app"), - ContainerPort: PtrInt32(80), + TargetGroupArn: new("arn:aws:elasticloadbalancing:eu-west-1:052392120703:targetgroup/ECSTG/0c44b1cdb3437902"), // link + ContainerName: new("simple-app"), + ContainerPort: new(int32(80)), }, }, ServiceRegistries: []types.ServiceRegistry{ { - ContainerName: PtrString("name"), - ContainerPort: PtrInt32(80), - Port: PtrInt32(80), - RegistryArn: PtrString("arn:aws:service:region:account:type:name"), // link + ContainerName: new("name"), + ContainerPort: new(int32(80)), + Port: new(int32(80)), + RegistryArn: new("arn:aws:service:region:account:type:name"), // link }, }, - Status: PtrString("ACTIVE"), + Status: new("ACTIVE"), DesiredCount: 1, RunningCount: 1, PendingCount: 0, LaunchType: types.LaunchTypeEc2, - TaskDefinition: PtrString("arn:aws:ecs:eu-west-1:052392120703:task-definition/ecs-template-ecs-demo-app:1"), // link + TaskDefinition: new("arn:aws:ecs:eu-west-1:052392120703:task-definition/ecs-template-ecs-demo-app:1"), // link DeploymentConfiguration: &types.DeploymentConfiguration{ DeploymentCircuitBreaker: &types.DeploymentCircuitBreaker{ Enable: false, Rollback: false, }, - MaximumPercent: PtrInt32(200), - MinimumHealthyPercent: PtrInt32(100), + MaximumPercent: new(int32(200)), + MinimumHealthyPercent: new(int32(100)), Alarms: &types.DeploymentAlarms{ AlarmNames: []string{ "foo", @@ -57,21 +57,21 @@ func (t *ecsTestClient) DescribeServices(ctx context.Context, params *ecs.Descri }, Deployments: []types.Deployment{ { - Id: PtrString("ecs-svc/6893472562508357546"), - Status: PtrString("PRIMARY"), - TaskDefinition: PtrString("arn:aws:ecs:eu-west-1:052392120703:task-definition/ecs-template-ecs-demo-app:1"), // link + Id: new("ecs-svc/6893472562508357546"), + Status: new("PRIMARY"), + TaskDefinition: new("arn:aws:ecs:eu-west-1:052392120703:task-definition/ecs-template-ecs-demo-app:1"), // link DesiredCount: 1, PendingCount: 0, RunningCount: 1, FailedTasks: 0, - CreatedAt: PtrTime(time.Now()), - UpdatedAt: PtrTime(time.Now()), + CreatedAt: new(time.Now()), + UpdatedAt: new(time.Now()), LaunchType: types.LaunchTypeEc2, RolloutState: types.DeploymentRolloutStateCompleted, - RolloutStateReason: PtrString("ECS deployment ecs-svc/6893472562508357546 completed."), + RolloutStateReason: new("ECS deployment ecs-svc/6893472562508357546 completed."), CapacityProviderStrategy: []types.CapacityProviderStrategyItem{ { - CapacityProvider: PtrString("provider"), // link + CapacityProvider: new("provider"), // link Base: 10, Weight: 10, }, @@ -87,8 +87,8 @@ func (t *ecsTestClient) DescribeServices(ctx context.Context, params *ecs.Descri }, }, }, - PlatformFamily: PtrString("foo"), - PlatformVersion: PtrString("LATEST"), + PlatformFamily: new("foo"), + PlatformVersion: new("LATEST"), ServiceConnectConfiguration: &types.ServiceConnectConfiguration{ Enabled: true, LogConfiguration: &types.LogConfiguration{ @@ -96,19 +96,19 @@ func (t *ecsTestClient) DescribeServices(ctx context.Context, params *ecs.Descri Options: map[string]string{}, SecretOptions: []types.Secret{ { - Name: PtrString("something"), - ValueFrom: PtrString("somewhere"), + Name: new("something"), + ValueFrom: new("somewhere"), }, }, }, - Namespace: PtrString("namespace"), + Namespace: new("namespace"), Services: []types.ServiceConnectService{ { - PortName: PtrString("http"), + PortName: new("http"), ClientAliases: []types.ServiceConnectClientAlias{ { - Port: PtrInt32(80), - DnsName: PtrString("www.foo.com"), // link + Port: new(int32(80)), + DnsName: new("www.foo.com"), // link }, }, }, @@ -116,65 +116,65 @@ func (t *ecsTestClient) DescribeServices(ctx context.Context, params *ecs.Descri }, ServiceConnectResources: []types.ServiceConnectServiceResource{ { - DiscoveryArn: PtrString("arn:aws:service:region:account:layer:name:version"), // link - DiscoveryName: PtrString("name"), + DiscoveryArn: new("arn:aws:service:region:account:layer:name:version"), // link + DiscoveryName: new("name"), }, }, }, }, - RoleArn: PtrString("arn:aws:iam::052392120703:role/ecs-template-ECSServiceRole-1IL5CNMR1600J"), + RoleArn: new("arn:aws:iam::052392120703:role/ecs-template-ECSServiceRole-1IL5CNMR1600J"), Events: []types.ServiceEvent{ { - Id: PtrString("a727ef2a-8a38-4746-905e-b529c952edee"), - CreatedAt: PtrTime(time.Now()), - Message: PtrString("(service ecs-template-service-i0mQKzkhDI2C) has reached a steady state."), + Id: new("a727ef2a-8a38-4746-905e-b529c952edee"), + CreatedAt: new(time.Now()), + Message: new("(service ecs-template-service-i0mQKzkhDI2C) has reached a steady state."), }, { - Id: PtrString("69489991-f8ee-42a2-94f2-db8ffeda1ee7"), - CreatedAt: PtrTime(time.Now()), - Message: PtrString("(service ecs-template-service-i0mQKzkhDI2C) (deployment ecs-svc/6893472562508357546) deployment completed."), + Id: new("69489991-f8ee-42a2-94f2-db8ffeda1ee7"), + CreatedAt: new(time.Now()), + Message: new("(service ecs-template-service-i0mQKzkhDI2C) (deployment ecs-svc/6893472562508357546) deployment completed."), }, { - Id: PtrString("9ce65c4b-2993-477d-aa83-dbe98988f90b"), - CreatedAt: PtrTime(time.Now()), - Message: PtrString("(service ecs-template-service-i0mQKzkhDI2C) registered 1 targets in (target-group arn:aws:elasticloadbalancing:eu-west-1:052392120703:targetgroup/ECSTG/0c44b1cdb3437902)"), + Id: new("9ce65c4b-2993-477d-aa83-dbe98988f90b"), + CreatedAt: new(time.Now()), + Message: new("(service ecs-template-service-i0mQKzkhDI2C) registered 1 targets in (target-group arn:aws:elasticloadbalancing:eu-west-1:052392120703:targetgroup/ECSTG/0c44b1cdb3437902)"), }, { - Id: PtrString("753e988a-9fb9-4907-b801-5f67369bc0de"), - CreatedAt: PtrTime(time.Now()), - Message: PtrString("(service ecs-template-service-i0mQKzkhDI2C) has started 1 tasks: (task 53074e0156204f30a3cea97e7bf32d31)."), + Id: new("753e988a-9fb9-4907-b801-5f67369bc0de"), + CreatedAt: new(time.Now()), + Message: new("(service ecs-template-service-i0mQKzkhDI2C) has started 1 tasks: (task 53074e0156204f30a3cea97e7bf32d31)."), }, { - Id: PtrString("deb2400b-a776-4ebe-8c97-f94feef2b780"), - CreatedAt: PtrTime(time.Now()), - Message: PtrString("(service ecs-template-service-i0mQKzkhDI2C) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide."), + Id: new("deb2400b-a776-4ebe-8c97-f94feef2b780"), + CreatedAt: new(time.Now()), + Message: new("(service ecs-template-service-i0mQKzkhDI2C) was unable to place a task because no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. For more information, see the Troubleshooting section of the Amazon ECS Developer Guide."), }, }, - CreatedAt: PtrTime(time.Now()), + CreatedAt: new(time.Now()), PlacementConstraints: []types.PlacementConstraint{ { - Expression: PtrString("expression"), + Expression: new("expression"), Type: types.PlacementConstraintTypeDistinctInstance, }, }, PlacementStrategy: []types.PlacementStrategy{ { - Field: PtrString("field"), + Field: new("field"), Type: types.PlacementStrategyTypeSpread, }, }, - HealthCheckGracePeriodSeconds: PtrInt32(0), + HealthCheckGracePeriodSeconds: new(int32(0)), SchedulingStrategy: types.SchedulingStrategyReplica, DeploymentController: &types.DeploymentController{ Type: types.DeploymentControllerTypeEcs, }, - CreatedBy: PtrString("arn:aws:iam::052392120703:role/aws-reserved/sso.amazonaws.com/eu-west-2/AWSReservedSSO_AWSAdministratorAccess_c1c3c9c54821c68a"), + CreatedBy: new("arn:aws:iam::052392120703:role/aws-reserved/sso.amazonaws.com/eu-west-2/AWSReservedSSO_AWSAdministratorAccess_c1c3c9c54821c68a"), EnableECSManagedTags: false, PropagateTags: types.PropagateTagsNone, EnableExecuteCommand: false, CapacityProviderStrategy: []types.CapacityProviderStrategyItem{ { - CapacityProvider: PtrString("provider"), + CapacityProvider: new("provider"), Base: 10, Weight: 10, }, @@ -190,15 +190,15 @@ func (t *ecsTestClient) DescribeServices(ctx context.Context, params *ecs.Descri }, }, }, - PlatformFamily: PtrString("family"), - PlatformVersion: PtrString("LATEST"), + PlatformFamily: new("family"), + PlatformVersion: new("LATEST"), Tags: []types.Tag{}, TaskSets: []types.TaskSet{ // This seems to be able to return the *entire* task set, // which is redundant info. We should remove everything // other than the IDs { - Id: PtrString("id"), // link, then remove + Id: new("id"), // link, then remove }, }, }, diff --git a/aws-source/adapters/ecs-task-definition.go b/aws-source/adapters/ecs-task-definition.go index 41e691f6..5756feae 100644 --- a/aws-source/adapters/ecs-task-definition.go +++ b/aws-source/adapters/ecs-task-definition.go @@ -159,12 +159,12 @@ func NewECSTaskDefinitionAdapter(client ECSClient, accountID string, region stri GetFunc: taskDefinitionGetFunc, ListInput: &ecs.ListTaskDefinitionsInput{}, AdapterMetadata: taskDefinitionAdapterMetadata, - cache: cache, + cache: cache, GetInputMapper: func(scope, query string) *ecs.DescribeTaskDefinitionInput { // AWS actually supports "family:revision" format as an input here // so we can just push it in directly return &ecs.DescribeTaskDefinitionInput{ - TaskDefinition: PtrString(query), + TaskDefinition: new(query), } }, ListFuncPaginatorBuilder: func(client ECSClient, input *ecs.ListTaskDefinitionsInput) Paginator[*ecs.ListTaskDefinitionsOutput, *ecs.Options] { @@ -176,7 +176,7 @@ func NewECSTaskDefinitionAdapter(client ECSClient, accountID string, region stri for _, arn := range output.TaskDefinitionArns { if a, err := ParseARN(arn); err == nil { getInputs = append(getInputs, &ecs.DescribeTaskDefinitionInput{ - TaskDefinition: PtrString(a.ResourceID()), + TaskDefinition: new(a.ResourceID()), }) } } diff --git a/aws-source/adapters/ecs-task-definition_test.go b/aws-source/adapters/ecs-task-definition_test.go index 4b7ce3cb..e4b772f2 100644 --- a/aws-source/adapters/ecs-task-definition_test.go +++ b/aws-source/adapters/ecs-task-definition_test.go @@ -14,60 +14,60 @@ import ( func (t *ecsTestClient) DescribeTaskDefinition(ctx context.Context, params *ecs.DescribeTaskDefinitionInput, optFns ...func(*ecs.Options)) (*ecs.DescribeTaskDefinitionOutput, error) { return &ecs.DescribeTaskDefinitionOutput{ TaskDefinition: &types.TaskDefinition{ - TaskDefinitionArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:task-definition/ecs-template-ecs-demo-app:1"), + TaskDefinitionArn: new("arn:aws:ecs:eu-west-1:052392120703:task-definition/ecs-template-ecs-demo-app:1"), ContainerDefinitions: []types.ContainerDefinition{ { - Name: PtrString("simple-app"), - Image: PtrString("httpd:2.4"), + Name: new("simple-app"), + Image: new("httpd:2.4"), Cpu: 10, - Memory: PtrInt32(300), + Memory: new(int32(300)), Links: []string{}, PortMappings: []types.PortMapping{ { - ContainerPort: PtrInt32(80), - HostPort: PtrInt32(0), + ContainerPort: new(int32(80)), + HostPort: new(int32(0)), Protocol: types.TransportProtocolTcp, AppProtocol: types.ApplicationProtocolHttp, }, }, - Essential: PtrBool(true), + Essential: new(true), EntryPoint: []string{}, Command: []string{}, Environment: []types.KeyValuePair{ { - Name: PtrString("DATABASE_SERVER"), - Value: PtrString("database01.my-company.com"), + Name: new("DATABASE_SERVER"), + Value: new("database01.my-company.com"), }, }, EnvironmentFiles: []types.EnvironmentFile{}, MountPoints: []types.MountPoint{ { - SourceVolume: PtrString("my-vol"), - ContainerPath: PtrString("/usr/local/apache2/htdocs"), - ReadOnly: PtrBool(false), + SourceVolume: new("my-vol"), + ContainerPath: new("/usr/local/apache2/htdocs"), + ReadOnly: new(false), }, }, VolumesFrom: []types.VolumeFrom{ { - SourceContainer: PtrString("container"), + SourceContainer: new("container"), }, }, Secrets: []types.Secret{ { - Name: PtrString("secrets-manager"), - ValueFrom: PtrString("arn:aws:secretsmanager:us-west-2:123456789012:secret:my-path/my-secret-name-1a2b3c"), // link + Name: new("secrets-manager"), + ValueFrom: new("arn:aws:secretsmanager:us-west-2:123456789012:secret:my-path/my-secret-name-1a2b3c"), // link }, { - Name: PtrString("ssm"), - ValueFrom: PtrString("arn:aws:ssm:us-east-2:123456789012:parameter/prod-123"), // link + Name: new("ssm"), + ValueFrom: new("arn:aws:ssm:us-east-2:123456789012:parameter/prod-123"), // link }, }, DnsServers: []string{}, DnsSearchDomains: []string{}, ExtraHosts: []types.HostEntry{ { - Hostname: PtrString("host"), - IpAddress: PtrString("127.0.0.1"), + Hostname: new("host"), + IpAddress: new("127.0.0.1"), }, }, DockerSecurityOptions: []string{}, @@ -82,43 +82,43 @@ func (t *ecsTestClient) DescribeTaskDefinition(ctx context.Context, params *ecs. }, SecretOptions: []types.Secret{ { - Name: PtrString("secrets-manager"), - ValueFrom: PtrString("arn:aws:secretsmanager:us-west-2:123456789012:secret:my-path/my-secret-name-1a2b3c"), // link + Name: new("secrets-manager"), + ValueFrom: new("arn:aws:secretsmanager:us-west-2:123456789012:secret:my-path/my-secret-name-1a2b3c"), // link }, { - Name: PtrString("ssm"), - ValueFrom: PtrString("arn:aws:ssm:us-east-2:123456789012:parameter/prod-123"), // link + Name: new("ssm"), + ValueFrom: new("arn:aws:ssm:us-east-2:123456789012:parameter/prod-123"), // link }, }, }, SystemControls: []types.SystemControl{}, DependsOn: []types.ContainerDependency{}, - DisableNetworking: PtrBool(false), + DisableNetworking: new(false), FirelensConfiguration: &types.FirelensConfiguration{ Type: types.FirelensConfigurationTypeFluentd, Options: map[string]string{}, }, HealthCheck: &types.HealthCheck{}, - Hostname: PtrString("hostname"), - Interactive: PtrBool(false), + Hostname: new("hostname"), + Interactive: new(false), LinuxParameters: &types.LinuxParameters{}, - MemoryReservation: PtrInt32(100), - Privileged: PtrBool(false), - PseudoTerminal: PtrBool(false), - ReadonlyRootFilesystem: PtrBool(false), + MemoryReservation: new(int32(100)), + Privileged: new(false), + PseudoTerminal: new(false), + ReadonlyRootFilesystem: new(false), RepositoryCredentials: &types.RepositoryCredentials{}, // Skipping the link here for now, if you need it, add it in a PR ResourceRequirements: []types.ResourceRequirement{}, - StartTimeout: PtrInt32(1), - StopTimeout: PtrInt32(1), - User: PtrString("foo"), - WorkingDirectory: PtrString("/"), + StartTimeout: new(int32(1)), + StopTimeout: new(int32(1)), + User: new("foo"), + WorkingDirectory: new("/"), }, { - Name: PtrString("busybox"), - Image: PtrString("busybox"), + Name: new("busybox"), + Image: new("busybox"), Cpu: 10, - Memory: PtrInt32(200), - Essential: PtrBool(false), + Memory: new(int32(200)), + Essential: new(false), EntryPoint: []string{ "sh", "-c", @@ -128,7 +128,7 @@ func (t *ecsTestClient) DescribeTaskDefinition(ctx context.Context, params *ecs. }, VolumesFrom: []types.VolumeFrom{ { - SourceContainer: PtrString("simple-app"), + SourceContainer: new("simple-app"), }, }, DockerLabels: map[string]string{}, @@ -142,29 +142,29 @@ func (t *ecsTestClient) DescribeTaskDefinition(ctx context.Context, params *ecs. }, }, }, - Family: PtrString("ecs-template-ecs-demo-app"), + Family: new("ecs-template-ecs-demo-app"), Revision: 1, Volumes: []types.Volume{ { - Name: PtrString("my-vol"), + Name: new("my-vol"), Host: &types.HostVolumeProperties{ - SourcePath: PtrString("/"), + SourcePath: new("/"), }, }, }, Status: types.TaskDefinitionStatusActive, RequiresAttributes: []types.Attribute{ { - Name: PtrString("com.amazonaws.ecs.capability.logging-driver.awslogs"), + Name: new("com.amazonaws.ecs.capability.logging-driver.awslogs"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.19"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.19"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.17"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.17"), }, { - Name: PtrString("com.amazonaws.ecs.capability.docker-remote-api.1.18"), + Name: new("com.amazonaws.ecs.capability.docker-remote-api.1.18"), }, }, PlacementConstraints: []types.TaskDefinitionPlacementConstraint{}, @@ -172,17 +172,17 @@ func (t *ecsTestClient) DescribeTaskDefinition(ctx context.Context, params *ecs. "EXTERNAL", "EC2", }, - RegisteredAt: PtrTime(time.Now()), - RegisteredBy: PtrString("arn:aws:sts::052392120703:assumed-role/AWSReservedSSO_AWSAdministratorAccess_c1c3c9c54821c68a/dylan@overmind.tech"), - Cpu: PtrString("cpu"), - DeregisteredAt: PtrTime(time.Now()), + RegisteredAt: new(time.Now()), + RegisteredBy: new("arn:aws:sts::052392120703:assumed-role/AWSReservedSSO_AWSAdministratorAccess_c1c3c9c54821c68a/dylan@overmind.tech"), + Cpu: new("cpu"), + DeregisteredAt: new(time.Now()), EphemeralStorage: &types.EphemeralStorage{ SizeInGiB: 1, }, - ExecutionRoleArn: PtrString("arn:aws:iam:us-east-2:123456789012:role/foo"), // link + ExecutionRoleArn: new("arn:aws:iam:us-east-2:123456789012:role/foo"), // link InferenceAccelerators: []types.InferenceAccelerator{}, IpcMode: types.IpcModeHost, - Memory: PtrString("memory"), + Memory: new("memory"), NetworkMode: types.NetworkModeAwsvpc, PidMode: types.PidModeHost, ProxyConfiguration: nil, @@ -191,7 +191,7 @@ func (t *ecsTestClient) DescribeTaskDefinition(ctx context.Context, params *ecs. CpuArchitecture: types.CPUArchitectureX8664, OperatingSystemFamily: types.OSFamilyLinux, }, - TaskRoleArn: PtrString("arn:aws:iam:us-east-2:123456789012:role/bar"), // link + TaskRoleArn: new("arn:aws:iam:us-east-2:123456789012:role/bar"), // link }, }, nil } @@ -206,7 +206,7 @@ func (t *ecsTestClient) ListTaskDefinitions(context.Context, *ecs.ListTaskDefini func TestTaskDefinitionGetFunc(t *testing.T) { item, err := taskDefinitionGetFunc(context.Background(), &ecsTestClient{}, "foo", &ecs.DescribeTaskDefinitionInput{ - TaskDefinition: PtrString("ecs-template-ecs-demo-app:1"), + TaskDefinition: new("ecs-template-ecs-demo-app:1"), }) if err != nil { diff --git a/aws-source/adapters/ecs-task.go b/aws-source/adapters/ecs-task.go index b301f1a7..69cf56ee 100644 --- a/aws-source/adapters/ecs-task.go +++ b/aws-source/adapters/ecs-task.go @@ -165,7 +165,7 @@ func taskGetInputMapper(scope, query string) *ecs.DescribeTasksInput { Tasks: []string{ sections[1], }, - Cluster: PtrString(sections[0]), + Cluster: new(sections[0]), Include: TaskIncludeFields, } } @@ -203,14 +203,14 @@ func NewECSTaskAdapter(client ECSClient, accountID string, region string, cache Region: region, GetFunc: taskGetFunc, AdapterMetadata: ecsTaskAdapterMetadata, - cache: cache, + cache: cache, ListInput: &ecs.ListTasksInput{}, GetInputMapper: taskGetInputMapper, DisableList: true, SearchInputMapper: func(scope, query string) (*ecs.ListTasksInput, error) { // Search by cluster return &ecs.ListTasksInput{ - Cluster: PtrString(query), + Cluster: new(query), }, nil }, ListFuncPaginatorBuilder: func(client ECSClient, input *ecs.ListTasksInput) Paginator[*ecs.ListTasksOutput, *ecs.Options] { diff --git a/aws-source/adapters/ecs-task_test.go b/aws-source/adapters/ecs-task_test.go index e0e8b1b0..9179758e 100644 --- a/aws-source/adapters/ecs-task_test.go +++ b/aws-source/adapters/ecs-task_test.go @@ -17,90 +17,90 @@ func (t *ecsTestClient) DescribeTasks(ctx context.Context, params *ecs.DescribeT { Attachments: []types.Attachment{ { - Id: PtrString("id"), // link? - Status: PtrString("OK"), - Type: PtrString("ElasticNetworkInterface"), + Id: new("id"), // link? + Status: new("OK"), + Type: new("ElasticNetworkInterface"), }, }, Attributes: []types.Attribute{ { - Name: PtrString("ecs.cpu-architecture"), - Value: PtrString("x86_64"), + Name: new("ecs.cpu-architecture"), + Value: new("x86_64"), }, }, - AvailabilityZone: PtrString("eu-west-1c"), - ClusterArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:cluster/test-ECSCluster-Bt4SqcM3CURk"), // link + AvailabilityZone: new("eu-west-1c"), + ClusterArn: new("arn:aws:ecs:eu-west-1:052392120703:cluster/test-ECSCluster-Bt4SqcM3CURk"), // link Connectivity: types.ConnectivityConnected, - ConnectivityAt: PtrTime(time.Now()), - ContainerInstanceArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:container-instance/test-ECSCluster-Bt4SqcM3CURk/4b5c1d7dbb6746b38ada1b97b1866f6a"), // link + ConnectivityAt: new(time.Now()), + ContainerInstanceArn: new("arn:aws:ecs:eu-west-1:052392120703:container-instance/test-ECSCluster-Bt4SqcM3CURk/4b5c1d7dbb6746b38ada1b97b1866f6a"), // link Containers: []types.Container{ { - ContainerArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:container/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2/39a3ede1-1b28-472e-967a-d87d691f65e0"), - TaskArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:task/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2"), - Name: PtrString("busybox"), - Image: PtrString("busybox"), - RuntimeId: PtrString("7c158f5c2711416cbb6e653ad90997346489c9722c59d1115ad2121dd040748e"), - LastStatus: PtrString("RUNNING"), + ContainerArn: new("arn:aws:ecs:eu-west-1:052392120703:container/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2/39a3ede1-1b28-472e-967a-d87d691f65e0"), + TaskArn: new("arn:aws:ecs:eu-west-1:052392120703:task/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2"), + Name: new("busybox"), + Image: new("busybox"), + RuntimeId: new("7c158f5c2711416cbb6e653ad90997346489c9722c59d1115ad2121dd040748e"), + LastStatus: new("RUNNING"), NetworkBindings: []types.NetworkBinding{}, NetworkInterfaces: []types.NetworkInterface{}, HealthStatus: types.HealthStatusUnknown, - Cpu: PtrString("10"), - Memory: PtrString("200"), + Cpu: new("10"), + Memory: new("200"), }, { - ContainerArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:container/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2/8f3db814-6b39-4cc0-9d0a-a7d5702175eb"), - TaskArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:task/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2"), - Name: PtrString("simple-app"), - Image: PtrString("httpd:2.4"), - RuntimeId: PtrString("7316b64efb397cececce7cc5f39c6d48ab454f904cc80009aef5ed01ebdb1333"), - LastStatus: PtrString("RUNNING"), + ContainerArn: new("arn:aws:ecs:eu-west-1:052392120703:container/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2/8f3db814-6b39-4cc0-9d0a-a7d5702175eb"), + TaskArn: new("arn:aws:ecs:eu-west-1:052392120703:task/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2"), + Name: new("simple-app"), + Image: new("httpd:2.4"), + RuntimeId: new("7316b64efb397cececce7cc5f39c6d48ab454f904cc80009aef5ed01ebdb1333"), + LastStatus: new("RUNNING"), NetworkBindings: []types.NetworkBinding{ { - BindIP: PtrString("0.0.0.0"), // Link? NetworkSocket? - ContainerPort: PtrInt32(80), - HostPort: PtrInt32(32768), + BindIP: new("0.0.0.0"), // Link? NetworkSocket? + ContainerPort: new(int32(80)), + HostPort: new(int32(32768)), Protocol: types.TransportProtocolTcp, }, }, NetworkInterfaces: []types.NetworkInterface{ { - AttachmentId: PtrString("attachmentId"), - Ipv6Address: PtrString("2001:db8:3333:4444:5555:6666:7777:8888"), // link - PrivateIpv4Address: PtrString("10.0.0.1"), // link + AttachmentId: new("attachmentId"), + Ipv6Address: new("2001:db8:3333:4444:5555:6666:7777:8888"), // link + PrivateIpv4Address: new("10.0.0.1"), // link }, }, HealthStatus: types.HealthStatusUnknown, - Cpu: PtrString("10"), - Memory: PtrString("300"), + Cpu: new("10"), + Memory: new("300"), }, }, - Cpu: PtrString("20"), - CreatedAt: PtrTime(time.Now()), - DesiredStatus: PtrString("RUNNING"), + Cpu: new("20"), + CreatedAt: new(time.Now()), + DesiredStatus: new("RUNNING"), EnableExecuteCommand: false, - Group: PtrString("service:test-service-lszmaXSqRKuF"), + Group: new("service:test-service-lszmaXSqRKuF"), HealthStatus: types.HealthStatusUnknown, - LastStatus: PtrString("RUNNING"), + LastStatus: new("RUNNING"), LaunchType: types.LaunchTypeEc2, - Memory: PtrString("500"), + Memory: new("500"), Overrides: &types.TaskOverride{ ContainerOverrides: []types.ContainerOverride{ { - Name: PtrString("busybox"), + Name: new("busybox"), }, { - Name: PtrString("simple-app"), + Name: new("simple-app"), }, }, InferenceAcceleratorOverrides: []types.InferenceAcceleratorOverride{}, }, - PullStartedAt: PtrTime(time.Now()), - PullStoppedAt: PtrTime(time.Now()), - StartedAt: PtrTime(time.Now()), - StartedBy: PtrString("ecs-svc/0710912874193920929"), + PullStartedAt: new(time.Now()), + PullStoppedAt: new(time.Now()), + StartedAt: new(time.Now()), + StartedBy: new("ecs-svc/0710912874193920929"), Tags: []types.Tag{}, - TaskArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:task/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2"), - TaskDefinitionArn: PtrString("arn:aws:ecs:eu-west-1:052392120703:task-definition/test-ecs-demo-app:1"), // link + TaskArn: new("arn:aws:ecs:eu-west-1:052392120703:task/test-ECSCluster-Bt4SqcM3CURk/2ffd7ed376c841bcb0e6795ddb6e72e2"), + TaskDefinitionArn: new("arn:aws:ecs:eu-west-1:052392120703:task-definition/test-ecs-demo-app:1"), // link Version: 3, EphemeralStorage: &types.EphemeralStorage{ SizeInGiB: 1, diff --git a/aws-source/adapters/efs-access-point_test.go b/aws-source/adapters/efs-access-point_test.go index 2a47b2a6..0c29c7c9 100644 --- a/aws-source/adapters/efs-access-point_test.go +++ b/aws-source/adapters/efs-access-point_test.go @@ -15,32 +15,32 @@ func TestAccessPointOutputMapper(t *testing.T) { output := &efs.DescribeAccessPointsOutput{ AccessPoints: []types.AccessPointDescription{ { - AccessPointArn: PtrString("arn:aws:elasticfilesystem:eu-west-2:944651592624:access-point/fsap-073b1534eafbc5ee2"), - AccessPointId: PtrString("fsap-073b1534eafbc5ee2"), - ClientToken: PtrString("pvc-66e4418c-edf5-4a0e-9834-5945598d51fe"), - FileSystemId: PtrString("fs-0c6f2f41e957f42a9"), + AccessPointArn: new("arn:aws:elasticfilesystem:eu-west-2:944651592624:access-point/fsap-073b1534eafbc5ee2"), + AccessPointId: new("fsap-073b1534eafbc5ee2"), + ClientToken: new("pvc-66e4418c-edf5-4a0e-9834-5945598d51fe"), + FileSystemId: new("fs-0c6f2f41e957f42a9"), LifeCycleState: types.LifeCycleStateAvailable, - Name: PtrString("example access point"), - OwnerId: PtrString("944651592624"), + Name: new("example access point"), + OwnerId: new("944651592624"), PosixUser: &types.PosixUser{ - Gid: PtrInt64(1000), - Uid: PtrInt64(1000), + Gid: new(int64(1000)), + Uid: new(int64(1000)), SecondaryGids: []int64{ 1002, }, }, RootDirectory: &types.RootDirectory{ CreationInfo: &types.CreationInfo{ - OwnerGid: PtrInt64(1000), - OwnerUid: PtrInt64(1000), - Permissions: PtrString("700"), + OwnerGid: new(int64(1000)), + OwnerUid: new(int64(1000)), + Permissions: new("700"), }, - Path: PtrString("/etc/foo"), + Path: new("/etc/foo"), }, Tags: []types.Tag{ { - Key: PtrString("Name"), - Value: PtrString("example access point"), + Key: new("Name"), + Value: new("example access point"), }, }, }, diff --git a/aws-source/adapters/efs-backup-policy_test.go b/aws-source/adapters/efs-backup-policy_test.go index 7cb04558..560bb827 100644 --- a/aws-source/adapters/efs-backup-policy_test.go +++ b/aws-source/adapters/efs-backup-policy_test.go @@ -16,7 +16,7 @@ func TestBackupPolicyOutputMapper(t *testing.T) { } items, err := BackupPolicyOutputMapper(context.Background(), nil, "foo", &efs.DescribeBackupPolicyInput{ - FileSystemId: PtrString("fs-1234"), + FileSystemId: new("fs-1234"), }, output) if err != nil { diff --git a/aws-source/adapters/efs-file-system_test.go b/aws-source/adapters/efs-file-system_test.go index 6b0831dd..11fd47e8 100644 --- a/aws-source/adapters/efs-file-system_test.go +++ b/aws-source/adapters/efs-file-system_test.go @@ -15,32 +15,32 @@ func TestFileSystemOutputMapper(t *testing.T) { output := &efs.DescribeFileSystemsOutput{ FileSystems: []types.FileSystemDescription{ { - CreationTime: PtrTime(time.Now()), - CreationToken: PtrString("TOKEN"), - FileSystemId: PtrString("fs-1231123123"), + CreationTime: new(time.Now()), + CreationToken: new("TOKEN"), + FileSystemId: new("fs-1231123123"), LifeCycleState: types.LifeCycleStateAvailable, NumberOfMountTargets: 10, - OwnerId: PtrString("944651592624"), + OwnerId: new("944651592624"), PerformanceMode: types.PerformanceModeGeneralPurpose, SizeInBytes: &types.FileSystemSize{ Value: 1024, - Timestamp: PtrTime(time.Now()), - ValueInIA: PtrInt64(2048), - ValueInStandard: PtrInt64(128), + Timestamp: new(time.Now()), + ValueInIA: new(int64(2048)), + ValueInStandard: new(int64(128)), }, Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, - AvailabilityZoneId: PtrString("use1-az1"), - AvailabilityZoneName: PtrString("us-east-1"), - Encrypted: PtrBool(true), - FileSystemArn: PtrString("arn:aws:elasticfilesystem:eu-west-2:944651592624:file-system/fs-0c6f2f41e957f42a9"), - KmsKeyId: PtrString("arn:aws:kms:eu-west-2:944651592624:key/be76a6fa-d307-41c2-a4e3-cbfba2440747"), - Name: PtrString("test"), - ProvisionedThroughputInMibps: PtrFloat64(64), + AvailabilityZoneId: new("use1-az1"), + AvailabilityZoneName: new("us-east-1"), + Encrypted: new(true), + FileSystemArn: new("arn:aws:elasticfilesystem:eu-west-2:944651592624:file-system/fs-0c6f2f41e957f42a9"), + KmsKeyId: new("arn:aws:kms:eu-west-2:944651592624:key/be76a6fa-d307-41c2-a4e3-cbfba2440747"), + Name: new("test"), + ProvisionedThroughputInMibps: new(float64(64)), ThroughputMode: types.ThroughputModeBursting, }, }, diff --git a/aws-source/adapters/efs-mount-target_test.go b/aws-source/adapters/efs-mount-target_test.go index 30885f3b..7732ed1b 100644 --- a/aws-source/adapters/efs-mount-target_test.go +++ b/aws-source/adapters/efs-mount-target_test.go @@ -14,16 +14,16 @@ func TestMountTargetOutputMapper(t *testing.T) { output := &efs.DescribeMountTargetsOutput{ MountTargets: []types.MountTargetDescription{ { - FileSystemId: PtrString("fs-1234567890"), + FileSystemId: new("fs-1234567890"), LifeCycleState: types.LifeCycleStateAvailable, - MountTargetId: PtrString("fsmt-01e86506d8165e43f"), - SubnetId: PtrString("subnet-1234567"), - AvailabilityZoneId: PtrString("use1-az1"), - AvailabilityZoneName: PtrString("us-east-1"), - IpAddress: PtrString("10.230.43.1"), - NetworkInterfaceId: PtrString("eni-2345"), - OwnerId: PtrString("234234"), - VpcId: PtrString("vpc-23452345235"), + MountTargetId: new("fsmt-01e86506d8165e43f"), + SubnetId: new("subnet-1234567"), + AvailabilityZoneId: new("use1-az1"), + AvailabilityZoneName: new("us-east-1"), + IpAddress: new("10.230.43.1"), + NetworkInterfaceId: new("eni-2345"), + OwnerId: new("234234"), + VpcId: new("vpc-23452345235"), }, }, } diff --git a/aws-source/adapters/efs-replication-configuration_test.go b/aws-source/adapters/efs-replication-configuration_test.go index d3cf233c..d9c37d49 100644 --- a/aws-source/adapters/efs-replication-configuration_test.go +++ b/aws-source/adapters/efs-replication-configuration_test.go @@ -13,25 +13,25 @@ func TestReplicationConfigurationOutputMapper(t *testing.T) { output := &efs.DescribeReplicationConfigurationsOutput{ Replications: []types.ReplicationConfigurationDescription{ { - CreationTime: PtrTime(time.Now()), + CreationTime: new(time.Now()), Destinations: []types.Destination{ { - FileSystemId: PtrString("fs-12345678"), - Region: PtrString("eu-west-1"), + FileSystemId: new("fs-12345678"), + Region: new("eu-west-1"), Status: types.ReplicationStatusEnabled, - LastReplicatedTimestamp: PtrTime(time.Now()), + LastReplicatedTimestamp: new(time.Now()), }, { - FileSystemId: PtrString("fs-98765432"), - Region: PtrString("us-west-2"), + FileSystemId: new("fs-98765432"), + Region: new("us-west-2"), Status: types.ReplicationStatusError, - LastReplicatedTimestamp: PtrTime(time.Now()), + LastReplicatedTimestamp: new(time.Now()), }, }, - OriginalSourceFileSystemArn: PtrString("arn:aws:elasticfilesystem:eu-west-2:944651592624:file-system/fs-0c6f2f41e957f42a9"), - SourceFileSystemArn: PtrString("arn:aws:elasticfilesystem:eu-west-2:944651592624:file-system/fs-0c6f2f41e957f42a9"), - SourceFileSystemId: PtrString("fs-748927493"), - SourceFileSystemRegion: PtrString("us-east-1"), + OriginalSourceFileSystemArn: new("arn:aws:elasticfilesystem:eu-west-2:944651592624:file-system/fs-0c6f2f41e957f42a9"), + SourceFileSystemArn: new("arn:aws:elasticfilesystem:eu-west-2:944651592624:file-system/fs-0c6f2f41e957f42a9"), + SourceFileSystemId: new("fs-748927493"), + SourceFileSystemRegion: new("us-east-1"), }, }, } diff --git a/aws-source/adapters/eks-addon_test.go b/aws-source/adapters/eks-addon_test.go index dacefe45..c0988fa1 100644 --- a/aws-source/adapters/eks-addon_test.go +++ b/aws-source/adapters/eks-addon_test.go @@ -13,24 +13,24 @@ import ( var AddonTestClient = EKSTestClient{ DescribeAddonOutput: &eks.DescribeAddonOutput{ Addon: &types.Addon{ - AddonName: PtrString("aws-ebs-csi-driver"), - ClusterName: PtrString("dylan"), + AddonName: new("aws-ebs-csi-driver"), + ClusterName: new("dylan"), Status: types.AddonStatusActive, - AddonVersion: PtrString("v1.13.0-eksbuild.3"), - ConfigurationValues: PtrString("values"), + AddonVersion: new("v1.13.0-eksbuild.3"), + ConfigurationValues: new("values"), MarketplaceInformation: &types.MarketplaceInformation{ - ProductId: PtrString("id"), - ProductUrl: PtrString("url"), + ProductId: new("id"), + ProductUrl: new("url"), }, - Publisher: PtrString("publisher"), - Owner: PtrString("owner"), + Publisher: new("publisher"), + Owner: new("owner"), Health: &types.AddonHealth{ Issues: []types.AddonIssue{}, }, - AddonArn: PtrString("arn:aws:eks:eu-west-2:801795385023:addon/dylan/aws-ebs-csi-driver/a2c29d0e-72c4-a702-7887-2f739f4fc189"), - CreatedAt: PtrTime(time.Now()), - ModifiedAt: PtrTime(time.Now()), - ServiceAccountRoleArn: PtrString("arn:aws:iam::801795385023:role/eks-csi-dylan"), + AddonArn: new("arn:aws:eks:eu-west-2:801795385023:addon/dylan/aws-ebs-csi-driver/a2c29d0e-72c4-a702-7887-2f739f4fc189"), + CreatedAt: new(time.Now()), + ModifiedAt: new(time.Now()), + ServiceAccountRoleArn: new("arn:aws:iam::801795385023:role/eks-csi-dylan"), }, }, } diff --git a/aws-source/adapters/eks-cluster_test.go b/aws-source/adapters/eks-cluster_test.go index 80dd9d3f..82232c1b 100644 --- a/aws-source/adapters/eks-cluster_test.go +++ b/aws-source/adapters/eks-cluster_test.go @@ -14,31 +14,31 @@ import ( var ClusterClient = EKSTestClient{ DescribeClusterOutput: &eks.DescribeClusterOutput{ Cluster: &types.Cluster{ - Name: PtrString("dylan"), - Arn: PtrString("arn:aws:eks:eu-west-2:801795385023:cluster/dylan"), - CreatedAt: PtrTime(time.Now()), - Version: PtrString("1.24"), - Endpoint: PtrString("https://00D3FF4CC48CBAA9BBC070DAA80BD251.gr7.eu-west-2.eks.amazonaws.com"), - RoleArn: PtrString("arn:aws:iam::801795385023:role/dylan-cluster-20221222134106992100000001"), - ClientRequestToken: PtrString("token"), + Name: new("dylan"), + Arn: new("arn:aws:eks:eu-west-2:801795385023:cluster/dylan"), + CreatedAt: new(time.Now()), + Version: new("1.24"), + Endpoint: new("https://00D3FF4CC48CBAA9BBC070DAA80BD251.gr7.eu-west-2.eks.amazonaws.com"), + RoleArn: new("arn:aws:iam::801795385023:role/dylan-cluster-20221222134106992100000001"), + ClientRequestToken: new("token"), ConnectorConfig: &types.ConnectorConfigResponse{ - ActivationCode: PtrString("code"), - ActivationExpiry: PtrTime(time.Now()), - ActivationId: PtrString("id"), - Provider: PtrString("provider"), - RoleArn: PtrString("arn:aws:iam::801795385023:role/dylan-cluster-20221222134106992100000002"), + ActivationCode: new("code"), + ActivationExpiry: new(time.Now()), + ActivationId: new("id"), + Provider: new("provider"), + RoleArn: new("arn:aws:iam::801795385023:role/dylan-cluster-20221222134106992100000002"), }, Health: &types.ClusterHealth{ Issues: []types.ClusterIssue{}, }, - Id: PtrString("id"), + Id: new("id"), OutpostConfig: &types.OutpostConfigResponse{ - ControlPlaneInstanceType: PtrString("type"), + ControlPlaneInstanceType: new("type"), OutpostArns: []string{ "arn1", }, ControlPlanePlacement: &types.ControlPlanePlacementResponse{ - GroupName: PtrString("groupName"), + GroupName: new("groupName"), }, }, ResourcesVpcConfig: &types.VpcConfigResponse{ @@ -50,8 +50,8 @@ var ClusterClient = EKSTestClient{ SecurityGroupIds: []string{ "sg-0bf38eb7e14777399", }, - ClusterSecurityGroupId: PtrString("sg-08df96f08566d4dda"), - VpcId: PtrString("vpc-0c9152ce7ed2b7305"), + ClusterSecurityGroupId: new("sg-08df96f08566d4dda"), + VpcId: new("vpc-0c9152ce7ed2b7305"), EndpointPublicAccess: true, EndpointPrivateAccess: true, PublicAccessCidrs: []string{ @@ -59,9 +59,9 @@ var ClusterClient = EKSTestClient{ }, }, KubernetesNetworkConfig: &types.KubernetesNetworkConfigResponse{ - ServiceIpv4Cidr: PtrString("172.20.0.0/16"), + ServiceIpv4Cidr: new("172.20.0.0/16"), IpFamily: types.IpFamilyIpv4, - ServiceIpv6Cidr: PtrString("ipv6cidr"), + ServiceIpv6Cidr: new("ipv6cidr"), }, Logging: &types.Logging{ ClusterLogging: []types.LogSetup{ @@ -72,26 +72,26 @@ var ClusterClient = EKSTestClient{ "controllerManager", "scheduler", }, - Enabled: PtrBool(true), + Enabled: new(true), }, { Types: []types.LogType{ "audit", }, - Enabled: PtrBool(false), + Enabled: new(false), }, }, }, Identity: &types.Identity{ Oidc: &types.OIDC{ - Issuer: PtrString("https://oidc.eks.eu-west-2.amazonaws.com/id/00D3FF4CC48CBAA9BBC070DAA80BD251"), + Issuer: new("https://oidc.eks.eu-west-2.amazonaws.com/id/00D3FF4CC48CBAA9BBC070DAA80BD251"), }, }, Status: types.ClusterStatusActive, CertificateAuthority: &types.Certificate{ - Data: PtrString("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXlNakV6TkRZME5Gb1hEVE15TVRJeE9URXpORFkwTkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTC9tCkN6b25QdUZIUXM1a0xudzdCeXMrak9pNWJscEVCN2RhZUYvQzZqaEVTbkcwdVBVRjVWSFUzbmRyZHRKelBaemQKenM4U1pEMzRsKytGWmw0NFQrYWRqMGFYanpmZ0NTeFo4K0MvaWJUOWIzck5jWU9ZZ3FYT1lXc2JVYmpBSjRadgpnakFqdEl3dTBvUHNYT0JSZU5KTDlhRkl6VFFIcy9QL1hONWI5eGRlSHhwOXN4cnlEREYxQVNuQkxwajduUHMrCmgyNUtvd0hQV1luekV6WVd1T3NZbDQ2RjZacHh4aVhya2hnOGozckR4dXRWZGMvQVBFaVhUdHh3OU9CMjFDMkwKK1VpanpxS2RrZm5idVEvOHF0TTRqbFVGTkgzUG03STlkTEdIMTBTOFdhQkhpODNRMklCd3c0eE5RZ04xNC91dgpXWFZOWkxmM1EwbElkdmtxaCtrQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZCa2wvVEJwNVNyMFJrVEk2V1dMVkR4MVdZYUxNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ0FCVWtZUWZSQXlRRFVsc2todgp2NTRZN3lFQ1lUSG00OWVtMWoyV2hyN0JPdXdlUkU4M3g1b0NhWEtjK2tMemlvOEVvY2hxOWN1a1FEYm1KNkpoCmRhUUlyaFFwaG5PMHZSd290YXlhWjdlV2IwTm50WmNxN1ZmNkp5ZU5CR3Y1NTJGdlNNcGprWnh0UXVpTTJ5TXoKbjJWWmtxMzJPb0RjTmxCMERhRVBCSjlIM2ZnbG1qcGdWL0NHZFdMNG1wNEpkb3VPNTFtNkJBMm1ET2JWYzh4VgppNFJIWE9KNG9hSGFTd1B6MHBuQUxabkJoUnpxV0Q1cGlycVlucjBxSlFDamJDWXF1TmJTU3d4c2JMYVFjanNFCjhiUXk0aGxXaEJNWno3UldOeDg1UTBZSjhWNEhKdXVCZ09MaVg1REFtNDZIbndWUy95MHJyN2JTWThoTXErM2QKTmtrPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="), + Data: new("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1USXlNakV6TkRZME5Gb1hEVE15TVRJeE9URXpORFkwTkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTC9tCkN6b25QdUZIUXM1a0xudzdCeXMrak9pNWJscEVCN2RhZUYvQzZqaEVTbkcwdVBVRjVWSFUzbmRyZHRKelBaemQKenM4U1pEMzRsKytGWmw0NFQrYWRqMGFYanpmZ0NTeFo4K0MvaWJUOWIzck5jWU9ZZ3FYT1lXc2JVYmpBSjRadgpnakFqdEl3dTBvUHNYT0JSZU5KTDlhRkl6VFFIcy9QL1hONWI5eGRlSHhwOXN4cnlEREYxQVNuQkxwajduUHMrCmgyNUtvd0hQV1luekV6WVd1T3NZbDQ2RjZacHh4aVhya2hnOGozckR4dXRWZGMvQVBFaVhUdHh3OU9CMjFDMkwKK1VpanpxS2RrZm5idVEvOHF0TTRqbFVGTkgzUG03STlkTEdIMTBTOFdhQkhpODNRMklCd3c0eE5RZ04xNC91dgpXWFZOWkxmM1EwbElkdmtxaCtrQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZCa2wvVEJwNVNyMFJrVEk2V1dMVkR4MVdZYUxNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQ0FCVWtZUWZSQXlRRFVsc2todgp2NTRZN3lFQ1lUSG00OWVtMWoyV2hyN0JPdXdlUkU4M3g1b0NhWEtjK2tMemlvOEVvY2hxOWN1a1FEYm1KNkpoCmRhUUlyaFFwaG5PMHZSd290YXlhWjdlV2IwTm50WmNxN1ZmNkp5ZU5CR3Y1NTJGdlNNcGprWnh0UXVpTTJ5TXoKbjJWWmtxMzJPb0RjTmxCMERhRVBCSjlIM2ZnbG1qcGdWL0NHZFdMNG1wNEpkb3VPNTFtNkJBMm1ET2JWYzh4VgppNFJIWE9KNG9hSGFTd1B6MHBuQUxabkJoUnpxV0Q1cGlycVlucjBxSlFDamJDWXF1TmJTU3d4c2JMYVFjanNFCjhiUXk0aGxXaEJNWno3UldOeDg1UTBZSjhWNEhKdXVCZ09MaVg1REFtNDZIbndWUy95MHJyN2JTWThoTXErM2QKTmtrPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg=="), }, - PlatformVersion: PtrString("eks.3"), + PlatformVersion: new("eks.3"), Tags: map[string]string{}, EncryptionConfig: []types.EncryptionConfig{ { @@ -99,7 +99,7 @@ var ClusterClient = EKSTestClient{ "secrets", }, Provider: &types.Provider{ - KeyArn: PtrString("arn:aws:kms:eu-west-2:801795385023:key/3a478539-9717-4c20-83a5-19989154dc32"), + KeyArn: new("arn:aws:kms:eu-west-2:801795385023:key/3a478539-9717-4c20-83a5-19989154dc32"), }, }, }, diff --git a/aws-source/adapters/eks-fargate-profile_test.go b/aws-source/adapters/eks-fargate-profile_test.go index 6d6cf7db..65215362 100644 --- a/aws-source/adapters/eks-fargate-profile_test.go +++ b/aws-source/adapters/eks-fargate-profile_test.go @@ -14,15 +14,15 @@ import ( var FargateTestClient = EKSTestClient{ DescribeFargateProfileOutput: &eks.DescribeFargateProfileOutput{ FargateProfile: &types.FargateProfile{ - ClusterName: PtrString("cluster"), - CreatedAt: PtrTime(time.Now()), - FargateProfileArn: PtrString("arn:partition:service:region:account-id:resource-type/resource-id"), - FargateProfileName: PtrString("name"), - PodExecutionRoleArn: PtrString("arn:partition:service::account-id:resource-type/resource-id"), + ClusterName: new("cluster"), + CreatedAt: new(time.Now()), + FargateProfileArn: new("arn:partition:service:region:account-id:resource-type/resource-id"), + FargateProfileName: new("name"), + PodExecutionRoleArn: new("arn:partition:service::account-id:resource-type/resource-id"), Selectors: []types.FargateProfileSelector{ { Labels: map[string]string{}, - Namespace: PtrString("namespace"), + Namespace: new("namespace"), }, }, Status: types.FargateProfileStatusActive, diff --git a/aws-source/adapters/eks-nodegroup_test.go b/aws-source/adapters/eks-nodegroup_test.go index c630f329..f2d9424b 100644 --- a/aws-source/adapters/eks-nodegroup_test.go +++ b/aws-source/adapters/eks-nodegroup_test.go @@ -14,26 +14,26 @@ import ( var NodeGroupClient = EKSTestClient{ DescribeNodegroupOutput: &eks.DescribeNodegroupOutput{ Nodegroup: &types.Nodegroup{ - NodegroupName: PtrString("default-2022122213523169820000001f"), - NodegroupArn: PtrString("arn:aws:eks:eu-west-2:801795385023:nodegroup/dylan/default-2022122213523169820000001f/98c29d0d-b22a-aaa3-445e-ebf71d43f67c"), - ClusterName: PtrString("dylan"), - Version: PtrString("1.24"), - ReleaseVersion: PtrString("1.24.7-20221112"), - CreatedAt: PtrTime(time.Now()), - ModifiedAt: PtrTime(time.Now()), + NodegroupName: new("default-2022122213523169820000001f"), + NodegroupArn: new("arn:aws:eks:eu-west-2:801795385023:nodegroup/dylan/default-2022122213523169820000001f/98c29d0d-b22a-aaa3-445e-ebf71d43f67c"), + ClusterName: new("dylan"), + Version: new("1.24"), + ReleaseVersion: new("1.24.7-20221112"), + CreatedAt: new(time.Now()), + ModifiedAt: new(time.Now()), Status: types.NodegroupStatusActive, CapacityType: types.CapacityTypesOnDemand, - DiskSize: PtrInt32(100), + DiskSize: new(int32(100)), RemoteAccess: &types.RemoteAccessConfig{ - Ec2SshKey: PtrString("key"), // link + Ec2SshKey: new("key"), // link SourceSecurityGroups: []string{ "sg1", // link }, }, ScalingConfig: &types.NodegroupScalingConfig{ - MinSize: PtrInt32(1), - MaxSize: PtrInt32(3), - DesiredSize: PtrInt32(1), + MinSize: new(int32(1)), + MaxSize: new(int32(3)), + DesiredSize: new(int32(1)), }, InstanceTypes: []string{ "T3large", @@ -42,33 +42,33 @@ var NodeGroupClient = EKSTestClient{ "subnet0d1fabfe6794b5543", // link }, AmiType: types.AMITypesAl2Arm64, - NodeRole: PtrString("arn:aws:iam::801795385023:role/default-eks-node-group-20221222134106992000000003"), + NodeRole: new("arn:aws:iam::801795385023:role/default-eks-node-group-20221222134106992000000003"), Labels: map[string]string{}, Taints: []types.Taint{ { Effect: types.TaintEffectNoSchedule, - Key: PtrString("key"), - Value: PtrString("value"), + Key: new("key"), + Value: new("value"), }, }, Resources: &types.NodegroupResources{ AutoScalingGroups: []types.AutoScalingGroup{ { - Name: PtrString("eks-default-2022122213523169820000001f-98c29d0d-b22a-aaa3-445e-ebf71d43f67c"), // link + Name: new("eks-default-2022122213523169820000001f-98c29d0d-b22a-aaa3-445e-ebf71d43f67c"), // link }, }, - RemoteAccessSecurityGroup: PtrString("sg2"), // link + RemoteAccessSecurityGroup: new("sg2"), // link }, Health: &types.NodegroupHealth{ Issues: []types.Issue{}, }, UpdateConfig: &types.NodegroupUpdateConfig{ - MaxUnavailablePercentage: PtrInt32(33), + MaxUnavailablePercentage: new(int32(33)), }, LaunchTemplate: &types.LaunchTemplateSpecification{ - Name: PtrString("default-2022122213523100410000001d"), // link - Version: PtrString("1"), - Id: PtrString("lt-097e994ce7e14fcdc"), + Name: new("default-2022122213523100410000001d"), // link + Version: new("1"), + Id: new("lt-097e994ce7e14fcdc"), }, Tags: map[string]string{}, }, diff --git a/aws-source/adapters/elb-instance-health_test.go b/aws-source/adapters/elb-instance-health_test.go index de83a9cc..e4375456 100644 --- a/aws-source/adapters/elb-instance-health_test.go +++ b/aws-source/adapters/elb-instance-health_test.go @@ -15,10 +15,10 @@ func TestInstanceHealthOutputMapper(t *testing.T) { output := elb.DescribeInstanceHealthOutput{ InstanceStates: []types.InstanceState{ { - InstanceId: PtrString("i-0337802d908b4a81e"), // link - State: PtrString("InService"), - ReasonCode: PtrString("N/A"), - Description: PtrString("N/A"), + InstanceId: new("i-0337802d908b4a81e"), // link + State: new("InService"), + ReasonCode: new("N/A"), + Description: new("N/A"), }, }, } diff --git a/aws-source/adapters/elb-load-balancer_test.go b/aws-source/adapters/elb-load-balancer_test.go index e54a56c9..aea8fb63 100644 --- a/aws-source/adapters/elb-load-balancer_test.go +++ b/aws-source/adapters/elb-load-balancer_test.go @@ -17,11 +17,11 @@ func (m mockElbClient) DescribeTags(ctx context.Context, params *elb.DescribeTag return &elb.DescribeTagsOutput{ TagDescriptions: []types.TagDescription{ { - LoadBalancerName: PtrString("a8c3c8851f0df43fda89797c8e941a91"), + LoadBalancerName: new("a8c3c8851f0df43fda89797c8e941a91"), Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, @@ -37,35 +37,35 @@ func TestELBv2LoadBalancerOutputMapper(t *testing.T) { output := &elb.DescribeLoadBalancersOutput{ LoadBalancerDescriptions: []types.LoadBalancerDescription{ { - LoadBalancerName: PtrString("a8c3c8851f0df43fda89797c8e941a91"), - DNSName: PtrString("a8c3c8851f0df43fda89797c8e941a91-182843316.eu-west-2.elb.amazonaws.com"), // link - CanonicalHostedZoneName: PtrString("a8c3c8851f0df43fda89797c8e941a91-182843316.eu-west-2.elb.amazonaws.com"), // link - CanonicalHostedZoneNameID: PtrString("ZHURV8PSTC4K8"), // link + LoadBalancerName: new("a8c3c8851f0df43fda89797c8e941a91"), + DNSName: new("a8c3c8851f0df43fda89797c8e941a91-182843316.eu-west-2.elb.amazonaws.com"), // link + CanonicalHostedZoneName: new("a8c3c8851f0df43fda89797c8e941a91-182843316.eu-west-2.elb.amazonaws.com"), // link + CanonicalHostedZoneNameID: new("ZHURV8PSTC4K8"), // link ListenerDescriptions: []types.ListenerDescription{ { Listener: &types.Listener{ - Protocol: PtrString("TCP"), + Protocol: new("TCP"), LoadBalancerPort: 7687, - InstanceProtocol: PtrString("TCP"), - InstancePort: PtrInt32(30133), + InstanceProtocol: new("TCP"), + InstancePort: new(int32(30133)), }, PolicyNames: []string{}, }, { Listener: &types.Listener{ - Protocol: PtrString("TCP"), + Protocol: new("TCP"), LoadBalancerPort: 7473, - InstanceProtocol: PtrString("TCP"), - InstancePort: PtrInt32(31459), + InstanceProtocol: new("TCP"), + InstancePort: new(int32(31459)), }, PolicyNames: []string{}, }, { Listener: &types.Listener{ - Protocol: PtrString("TCP"), + Protocol: new("TCP"), LoadBalancerPort: 7474, - InstanceProtocol: PtrString("TCP"), - InstancePort: PtrInt32(30761), + InstanceProtocol: new("TCP"), + InstancePort: new(int32(30761)), }, PolicyNames: []string{}, }, @@ -73,21 +73,21 @@ func TestELBv2LoadBalancerOutputMapper(t *testing.T) { Policies: &types.Policies{ AppCookieStickinessPolicies: []types.AppCookieStickinessPolicy{ { - CookieName: PtrString("foo"), - PolicyName: PtrString("policy"), + CookieName: new("foo"), + PolicyName: new("policy"), }, }, LBCookieStickinessPolicies: []types.LBCookieStickinessPolicy{ { - CookieExpirationPeriod: PtrInt64(10), - PolicyName: PtrString("name"), + CookieExpirationPeriod: new(int64(10)), + PolicyName: new("name"), }, }, OtherPolicies: []string{}, }, BackendServerDescriptions: []types.BackendServerDescription{ { - InstancePort: PtrInt32(443), + InstancePort: new(int32(443)), PolicyNames: []string{}, }, }, @@ -101,28 +101,28 @@ func TestELBv2LoadBalancerOutputMapper(t *testing.T) { "subnet09d5f6fa75b0b4569", "subnet0e234bef35fc4a9e1", }, - VPCId: PtrString("vpc-0c72199250cd479ea"), // link + VPCId: new("vpc-0c72199250cd479ea"), // link Instances: []types.Instance{ { - InstanceId: PtrString("i-0337802d908b4a81e"), // link *2 to ec2-instance and health + InstanceId: new("i-0337802d908b4a81e"), // link *2 to ec2-instance and health }, }, HealthCheck: &types.HealthCheck{ - Target: PtrString("HTTP:31151/healthz"), - Interval: PtrInt32(10), - Timeout: PtrInt32(5), - UnhealthyThreshold: PtrInt32(6), - HealthyThreshold: PtrInt32(2), + Target: new("HTTP:31151/healthz"), + Interval: new(int32(10)), + Timeout: new(int32(5)), + UnhealthyThreshold: new(int32(6)), + HealthyThreshold: new(int32(2)), }, SourceSecurityGroup: &types.SourceSecurityGroup{ - OwnerAlias: PtrString("944651592624"), - GroupName: PtrString("k8s-elb-a8c3c8851f0df43fda89797c8e941a91"), // link + OwnerAlias: new("944651592624"), + GroupName: new("k8s-elb-a8c3c8851f0df43fda89797c8e941a91"), // link }, SecurityGroups: []string{ "sg097e3cfdfc6d53b77", // link }, - CreatedTime: PtrTime(time.Now()), - Scheme: PtrString("internet-facing"), + CreatedTime: new(time.Now()), + Scheme: new("internet-facing"), }, }, } diff --git a/aws-source/adapters/elbv2-listener.go b/aws-source/adapters/elbv2-listener.go index a6950275..59bfbcbd 100644 --- a/aws-source/adapters/elbv2-listener.go +++ b/aws-source/adapters/elbv2-listener.go @@ -37,9 +37,9 @@ func listenerOutputMapper(ctx context.Context, client elbv2Client, scope string, sha := base64.URLEncoding.EncodeToString(h.Sum(nil)) if len(sha) > 12 { - action.AuthenticateOidcConfig.ClientSecret = PtrString(fmt.Sprintf("REDACTED (Version: %v)", sha[:11])) + action.AuthenticateOidcConfig.ClientSecret = new(fmt.Sprintf("REDACTED (Version: %v)", sha[:11])) } else { - action.AuthenticateOidcConfig.ClientSecret = PtrString("[REDACTED]") + action.AuthenticateOidcConfig.ClientSecret = new("[REDACTED]") } } } diff --git a/aws-source/adapters/elbv2-listener_test.go b/aws-source/adapters/elbv2-listener_test.go index 01a3eb6c..f7b0ec6a 100644 --- a/aws-source/adapters/elbv2-listener_test.go +++ b/aws-source/adapters/elbv2-listener_test.go @@ -14,17 +14,17 @@ func TestListenerOutputMapper(t *testing.T) { output := elbv2.DescribeListenersOutput{ Listeners: []types.Listener{ { - ListenerArn: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:listener/app/ingress/1bf10920c5bd199d/9d28f512be129134"), - LoadBalancerArn: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d"), // link - Port: PtrInt32(443), + ListenerArn: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:listener/app/ingress/1bf10920c5bd199d/9d28f512be129134"), + LoadBalancerArn: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d"), // link + Port: new(int32(443)), Protocol: types.ProtocolEnumHttps, Certificates: []types.Certificate{ { - CertificateArn: PtrString("arn:aws:acm:eu-west-2:944651592624:certificate/acd84d34-fb78-4411-bd8a-43684a3477c5"), // link - IsDefault: PtrBool(true), + CertificateArn: new("arn:aws:acm:eu-west-2:944651592624:certificate/acd84d34-fb78-4411-bd8a-43684a3477c5"), // link + IsDefault: new(true), }, }, - SslPolicy: PtrString("ELBSecurityPolicy-2016-08"), + SslPolicy: new("ELBSecurityPolicy-2016-08"), AlpnPolicy: []string{ "policy1", }, diff --git a/aws-source/adapters/elbv2-load-balancer_test.go b/aws-source/adapters/elbv2-load-balancer_test.go index ed37a0a7..765889e7 100644 --- a/aws-source/adapters/elbv2-load-balancer_test.go +++ b/aws-source/adapters/elbv2-load-balancer_test.go @@ -15,38 +15,38 @@ func TestLoadBalancerOutputMapper(t *testing.T) { output := elbv2.DescribeLoadBalancersOutput{ LoadBalancers: []types.LoadBalancer{ { - LoadBalancerArn: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d"), - DNSName: PtrString("ingress-1285969159.eu-west-2.elb.amazonaws.com"), // link - CanonicalHostedZoneId: PtrString("ZHURV8PSTC4K8"), // link - CreatedTime: PtrTime(time.Now()), - LoadBalancerName: PtrString("ingress"), + LoadBalancerArn: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d"), + DNSName: new("ingress-1285969159.eu-west-2.elb.amazonaws.com"), // link + CanonicalHostedZoneId: new("ZHURV8PSTC4K8"), // link + CreatedTime: new(time.Now()), + LoadBalancerName: new("ingress"), Scheme: types.LoadBalancerSchemeEnumInternetFacing, - VpcId: PtrString("vpc-0c72199250cd479ea"), // link + VpcId: new("vpc-0c72199250cd479ea"), // link State: &types.LoadBalancerState{ Code: types.LoadBalancerStateEnumActive, - Reason: PtrString("reason"), + Reason: new("reason"), }, Type: types.LoadBalancerTypeEnumApplication, AvailabilityZones: []types.AvailabilityZone{ { - ZoneName: PtrString("eu-west-2b"), // link - SubnetId: PtrString("subnet-0960234bbc4edca03"), // link + ZoneName: new("eu-west-2b"), // link + SubnetId: new("subnet-0960234bbc4edca03"), // link LoadBalancerAddresses: []types.LoadBalancerAddress{ { - AllocationId: PtrString("allocation-id"), // link? - IPv6Address: PtrString(":::1"), // link - IpAddress: PtrString("1.1.1.1"), // link - PrivateIPv4Address: PtrString("10.0.0.1"), // link + AllocationId: new("allocation-id"), // link? + IPv6Address: new(":::1"), // link + IpAddress: new("1.1.1.1"), // link + PrivateIPv4Address: new("10.0.0.1"), // link }, }, - OutpostId: PtrString("outpost-id"), + OutpostId: new("outpost-id"), }, }, SecurityGroups: []string{ "sg-0b21edc8578ea3f93", // link }, IpAddressType: types.IpAddressTypeIpv4, - CustomerOwnedIpv4Pool: PtrString("ipv4-pool"), // link + CustomerOwnedIpv4Pool: new("ipv4-pool"), // link }, }, } diff --git a/aws-source/adapters/elbv2-rule_test.go b/aws-source/adapters/elbv2-rule_test.go index 1cdd7024..7cc3d3bb 100644 --- a/aws-source/adapters/elbv2-rule_test.go +++ b/aws-source/adapters/elbv2-rule_test.go @@ -18,11 +18,11 @@ func TestRuleOutputMapper(t *testing.T) { output := elbv2.DescribeRulesOutput{ Rules: []types.Rule{ { - RuleArn: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:listener-rule/app/ingress/1bf10920c5bd199d/9d28f512be129134/0f73a74d21b008f7"), - Priority: PtrString("1"), + RuleArn: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:listener-rule/app/ingress/1bf10920c5bd199d/9d28f512be129134/0f73a74d21b008f7"), + Priority: new("1"), Conditions: []types.RuleCondition{ { - Field: PtrString("path-pattern"), + Field: new("path-pattern"), Values: []string{ "/api/gateway", }, @@ -37,7 +37,7 @@ func TestRuleOutputMapper(t *testing.T) { }, }, HttpHeaderConfig: &types.HttpHeaderConditionConfig{ - HttpHeaderName: PtrString("SOMETHING"), + HttpHeaderName: new("SOMETHING"), Values: []string{ "foo", }, @@ -50,8 +50,8 @@ func TestRuleOutputMapper(t *testing.T) { QueryStringConfig: &types.QueryStringConditionConfig{ Values: []types.QueryStringKeyValuePair{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, @@ -65,7 +65,7 @@ func TestRuleOutputMapper(t *testing.T) { Actions: []types.Action{ // Tested in actions.go }, - IsDefault: PtrBool(false), + IsDefault: new(false), }, }, } diff --git a/aws-source/adapters/elbv2-target-group_test.go b/aws-source/adapters/elbv2-target-group_test.go index 44ec62ba..816ddddf 100644 --- a/aws-source/adapters/elbv2-target-group_test.go +++ b/aws-source/adapters/elbv2-target-group_test.go @@ -16,28 +16,28 @@ func TestTargetGroupOutputMapper(t *testing.T) { output := elbv2.DescribeTargetGroupsOutput{ TargetGroups: []types.TargetGroup{ { - TargetGroupArn: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222"), - TargetGroupName: PtrString("k8s-default-apiserve-d87e8f7010"), + TargetGroupArn: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222"), + TargetGroupName: new("k8s-default-apiserve-d87e8f7010"), Protocol: types.ProtocolEnumHttp, - Port: PtrInt32(8080), - VpcId: PtrString("vpc-0c72199250cd479ea"), // link + Port: new(int32(8080)), + VpcId: new("vpc-0c72199250cd479ea"), // link HealthCheckProtocol: types.ProtocolEnumHttp, - HealthCheckPort: PtrString("traffic-port"), - HealthCheckEnabled: PtrBool(true), - HealthCheckIntervalSeconds: PtrInt32(10), - HealthCheckTimeoutSeconds: PtrInt32(10), - HealthyThresholdCount: PtrInt32(10), - UnhealthyThresholdCount: PtrInt32(10), - HealthCheckPath: PtrString("/"), + HealthCheckPort: new("traffic-port"), + HealthCheckEnabled: new(true), + HealthCheckIntervalSeconds: new(int32(10)), + HealthCheckTimeoutSeconds: new(int32(10)), + HealthyThresholdCount: new(int32(10)), + UnhealthyThresholdCount: new(int32(10)), + HealthCheckPath: new("/"), Matcher: &types.Matcher{ - HttpCode: PtrString("200"), - GrpcCode: PtrString("code"), + HttpCode: new("200"), + GrpcCode: new("code"), }, LoadBalancerArns: []string{ "arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d", // link }, TargetType: types.TargetTypeEnumIp, - ProtocolVersion: PtrString("HTTP1"), + ProtocolVersion: new("HTTP1"), IpAddressType: types.TargetGroupIpAddressTypeEnumIpv4, }, }, diff --git a/aws-source/adapters/elbv2-target-health_test.go b/aws-source/adapters/elbv2-target-health_test.go index c15e8a62..2d884172 100644 --- a/aws-source/adapters/elbv2-target-health_test.go +++ b/aws-source/adapters/elbv2-target-health_test.go @@ -15,61 +15,61 @@ func TestTargetHealthOutputMapper(t *testing.T) { TargetHealthDescriptions: []types.TargetHealthDescription{ { Target: &types.TargetDescription{ - Id: PtrString("10.0.6.64"), // link - Port: PtrInt32(8080), - AvailabilityZone: PtrString("eu-west-2c"), + Id: new("10.0.6.64"), // link + Port: new(int32(8080)), + AvailabilityZone: new("eu-west-2c"), }, - HealthCheckPort: PtrString("8080"), + HealthCheckPort: new("8080"), TargetHealth: &types.TargetHealth{ State: types.TargetHealthStateEnumHealthy, Reason: types.TargetHealthReasonEnumDeregistrationInProgress, - Description: PtrString("Health checks failed with these codes: [404]"), + Description: new("Health checks failed with these codes: [404]"), }, }, { Target: &types.TargetDescription{ - Id: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d"), // link - Port: PtrInt32(8080), - AvailabilityZone: PtrString("eu-west-2c"), + Id: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:loadbalancer/app/ingress/1bf10920c5bd199d"), // link + Port: new(int32(8080)), + AvailabilityZone: new("eu-west-2c"), }, - HealthCheckPort: PtrString("8080"), + HealthCheckPort: new("8080"), TargetHealth: &types.TargetHealth{ State: types.TargetHealthStateEnumHealthy, Reason: types.TargetHealthReasonEnumDeregistrationInProgress, - Description: PtrString("Health checks failed with these codes: [404]"), + Description: new("Health checks failed with these codes: [404]"), }, }, { Target: &types.TargetDescription{ - Id: PtrString("i-foo"), // link - Port: PtrInt32(8080), - AvailabilityZone: PtrString("eu-west-2c"), + Id: new("i-foo"), // link + Port: new(int32(8080)), + AvailabilityZone: new("eu-west-2c"), }, - HealthCheckPort: PtrString("8080"), + HealthCheckPort: new("8080"), TargetHealth: &types.TargetHealth{ State: types.TargetHealthStateEnumHealthy, Reason: types.TargetHealthReasonEnumDeregistrationInProgress, - Description: PtrString("Health checks failed with these codes: [404]"), + Description: new("Health checks failed with these codes: [404]"), }, }, { Target: &types.TargetDescription{ - Id: PtrString("arn:aws:lambda:eu-west-2:944651592624:function/foobar"), // link - Port: PtrInt32(8080), - AvailabilityZone: PtrString("eu-west-2c"), + Id: new("arn:aws:lambda:eu-west-2:944651592624:function/foobar"), // link + Port: new(int32(8080)), + AvailabilityZone: new("eu-west-2c"), }, - HealthCheckPort: PtrString("8080"), + HealthCheckPort: new("8080"), TargetHealth: &types.TargetHealth{ State: types.TargetHealthStateEnumHealthy, Reason: types.TargetHealthReasonEnumDeregistrationInProgress, - Description: PtrString("Health checks failed with these codes: [404]"), + Description: new("Health checks failed with these codes: [404]"), }, }, }, } items, err := targetHealthOutputMapper(context.Background(), nil, "foo", &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: PtrString("arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222"), + TargetGroupArn: new("arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222"), }, &output) if err != nil { @@ -167,8 +167,8 @@ func TestTargetHealthUniqueID(t *testing.T) { id := TargetHealthUniqueID{ TargetGroupArn: "arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222", Id: "10.0.0.1", - AvailabilityZone: PtrString("eu-west-2"), - Port: PtrInt32(8080), + AvailabilityZone: new("eu-west-2"), + Port: new(int32(8080)), } expected := "arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222|10.0.0.1|eu-west-2|8080" @@ -192,7 +192,7 @@ func TestTargetHealthUniqueID(t *testing.T) { id := TargetHealthUniqueID{ TargetGroupArn: "arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222", Id: "arn:partition:service:region:account-id:resource-type:resource-id", - Port: PtrInt32(8080), + Port: new(int32(8080)), } expected := "arn:aws:elasticloadbalancing:eu-west-2:944651592624:targetgroup/k8s-default-apiserve-d87e8f7010/559d207158e41222|arn:partition:service:region:account-id:resource-type:resource-id||8080" diff --git a/aws-source/adapters/elbv2_test.go b/aws-source/adapters/elbv2_test.go index 6079768a..1a1f991e 100644 --- a/aws-source/adapters/elbv2_test.go +++ b/aws-source/adapters/elbv2_test.go @@ -19,8 +19,8 @@ func (m mockElbv2Client) DescribeTags(ctx context.Context, params *elbv2.Describ ResourceArn: &arn, Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }) @@ -50,59 +50,59 @@ func (m mockElbv2Client) DescribeTargetGroups(ctx context.Context, params *elbv2 func TestActionToRequests(t *testing.T) { action := types.Action{ Type: types.ActionTypeEnumFixedResponse, - Order: PtrInt32(1), + Order: new(int32(1)), FixedResponseConfig: &types.FixedResponseActionConfig{ - StatusCode: PtrString("404"), - ContentType: PtrString("text/plain"), - MessageBody: PtrString("not found"), + StatusCode: new("404"), + ContentType: new("text/plain"), + MessageBody: new("not found"), }, AuthenticateCognitoConfig: &types.AuthenticateCognitoActionConfig{ - UserPoolArn: PtrString("arn:partition:service:region:account-id:resource-type:resource-id"), // link - UserPoolClientId: PtrString("clientID"), - UserPoolDomain: PtrString("domain.com"), + UserPoolArn: new("arn:partition:service:region:account-id:resource-type:resource-id"), // link + UserPoolClientId: new("clientID"), + UserPoolDomain: new("domain.com"), AuthenticationRequestExtraParams: map[string]string{ "foo": "bar", }, OnUnauthenticatedRequest: types.AuthenticateCognitoActionConditionalBehaviorEnumAuthenticate, - Scope: PtrString("foo"), - SessionCookieName: PtrString("cookie"), - SessionTimeout: PtrInt64(10), + Scope: new("foo"), + SessionCookieName: new("cookie"), + SessionTimeout: new(int64(10)), }, AuthenticateOidcConfig: &types.AuthenticateOidcActionConfig{ - AuthorizationEndpoint: PtrString("https://auth.somewhere.com/app1"), // link - ClientId: PtrString("CLIENT-ID"), - Issuer: PtrString("Someone"), - TokenEndpoint: PtrString("https://auth.somewhere.com/app1/tokens"), // link - UserInfoEndpoint: PtrString("https://auth.somewhere.com/app1/users"), // link + AuthorizationEndpoint: new("https://auth.somewhere.com/app1"), // link + ClientId: new("CLIENT-ID"), + Issuer: new("Someone"), + TokenEndpoint: new("https://auth.somewhere.com/app1/tokens"), // link + UserInfoEndpoint: new("https://auth.somewhere.com/app1/users"), // link AuthenticationRequestExtraParams: map[string]string{}, - ClientSecret: PtrString("secret"), // Redact + ClientSecret: new("secret"), // Redact OnUnauthenticatedRequest: types.AuthenticateOidcActionConditionalBehaviorEnumAllow, - Scope: PtrString("foo"), - SessionCookieName: PtrString("cookie"), - SessionTimeout: PtrInt64(10), - UseExistingClientSecret: PtrBool(true), + Scope: new("foo"), + SessionCookieName: new("cookie"), + SessionTimeout: new(int64(10)), + UseExistingClientSecret: new(true), }, ForwardConfig: &types.ForwardActionConfig{ TargetGroupStickinessConfig: &types.TargetGroupStickinessConfig{ - DurationSeconds: PtrInt32(10), - Enabled: PtrBool(true), + DurationSeconds: new(int32(10)), + Enabled: new(true), }, TargetGroups: []types.TargetGroupTuple{ { - TargetGroupArn: PtrString("arn:partition:service:region:account-id:resource-type:resource-id1"), // link - Weight: PtrInt32(1), + TargetGroupArn: new("arn:partition:service:region:account-id:resource-type:resource-id1"), // link + Weight: new(int32(1)), }, }, }, RedirectConfig: &types.RedirectActionConfig{ StatusCode: types.RedirectActionStatusCodeEnumHttp302, - Host: PtrString("somewhere.else.com"), // combine and link - Path: PtrString("/login"), // combine and link - Port: PtrString("8080"), // combine and link - Protocol: PtrString("https"), // combine and link - Query: PtrString("foo=bar"), // combine and link + Host: new("somewhere.else.com"), // combine and link + Path: new("/login"), // combine and link + Port: new("8080"), // combine and link + Protocol: new("https"), // combine and link + Query: new("foo=bar"), // combine and link }, - TargetGroupArn: PtrString("arn:partition:service:region:account-id:resource-type:resource-id2"), // link + TargetGroupArn: new("arn:partition:service:region:account-id:resource-type:resource-id2"), // link } item := sdp.Item{ diff --git a/aws-source/adapters/iam-group_test.go b/aws-source/adapters/iam-group_test.go index 2ad67320..5778aa10 100644 --- a/aws-source/adapters/iam-group_test.go +++ b/aws-source/adapters/iam-group_test.go @@ -12,11 +12,11 @@ import ( func TestGroupItemMapper(t *testing.T) { zone := types.Group{ - Path: PtrString("/"), - GroupName: PtrString("power-users"), - GroupId: PtrString("AGPA3VLV2U27T6SSLJMDS"), - Arn: PtrString("arn:aws:iam::801795385023:group/power-users"), - CreateDate: PtrTime(time.Now()), + Path: new("/"), + GroupName: new("power-users"), + GroupId: new("AGPA3VLV2U27T6SSLJMDS"), + Arn: new("arn:aws:iam::801795385023:group/power-users"), + CreateDate: new(time.Now()), } item, err := groupItemMapper(nil, "foo", &zone) diff --git a/aws-source/adapters/iam-instance-profile_test.go b/aws-source/adapters/iam-instance-profile_test.go index 50ade39e..92a78897 100644 --- a/aws-source/adapters/iam-instance-profile_test.go +++ b/aws-source/adapters/iam-instance-profile_test.go @@ -12,28 +12,28 @@ import ( func TestInstanceProfileItemMapper(t *testing.T) { profile := types.InstanceProfile{ - Arn: PtrString("arn:aws:iam::123456789012:instance-profile/webserver"), - CreateDate: PtrTime(time.Now()), - InstanceProfileId: PtrString("AIDACKCEVSQ6C2EXAMPLE"), - InstanceProfileName: PtrString("webserver"), - Path: PtrString("/"), + Arn: new("arn:aws:iam::123456789012:instance-profile/webserver"), + CreateDate: new(time.Now()), + InstanceProfileId: new("AIDACKCEVSQ6C2EXAMPLE"), + InstanceProfileName: new("webserver"), + Path: new("/"), Roles: []types.Role{ { - Arn: PtrString("arn:aws:iam::123456789012:role/webserver"), // link - CreateDate: PtrTime(time.Now()), - Path: PtrString("/"), - RoleId: PtrString("AIDACKCEVSQ6C2EXAMPLE"), - RoleName: PtrString("webserver"), - AssumeRolePolicyDocument: PtrString(`{}`), - Description: PtrString("Allows EC2 instances to call AWS services on your behalf."), - MaxSessionDuration: PtrInt32(3600), + Arn: new("arn:aws:iam::123456789012:role/webserver"), // link + CreateDate: new(time.Now()), + Path: new("/"), + RoleId: new("AIDACKCEVSQ6C2EXAMPLE"), + RoleName: new("webserver"), + AssumeRolePolicyDocument: new(`{}`), + Description: new("Allows EC2 instances to call AWS services on your behalf."), + MaxSessionDuration: new(int32(3600)), PermissionsBoundary: &types.AttachedPermissionsBoundary{ - PermissionsBoundaryArn: PtrString("arn:aws:iam::123456789012:policy/XCompanyBoundaries"), // link + PermissionsBoundaryArn: new("arn:aws:iam::123456789012:policy/XCompanyBoundaries"), // link PermissionsBoundaryType: types.PermissionsBoundaryAttachmentTypePolicy, }, RoleLastUsed: &types.RoleLastUsed{ - LastUsedDate: PtrTime(time.Now()), - Region: PtrString("us-east-1"), + LastUsedDate: new(time.Now()), + Region: new("us-east-1"), }, }, }, diff --git a/aws-source/adapters/iam-policy.go b/aws-source/adapters/iam-policy.go index 5a48ad15..0383d6e0 100644 --- a/aws-source/adapters/iam-policy.go +++ b/aws-source/adapters/iam-policy.go @@ -39,7 +39,7 @@ func policyGetFunc(ctx context.Context, client IAMClient, scope, query string) ( }, } out, err := client.GetPolicy(ctx, &iam.GetPolicyInput{ - PolicyArn: PtrString(a.String()), + PolicyArn: new(a.String()), }) if err != nil { return nil, err diff --git a/aws-source/adapters/iam-policy_test.go b/aws-source/adapters/iam-policy_test.go index a9b85490..f2e4e593 100644 --- a/aws-source/adapters/iam-policy_test.go +++ b/aws-source/adapters/iam-policy_test.go @@ -17,16 +17,16 @@ import ( func (t *TestIAMClient) GetPolicy(ctx context.Context, params *iam.GetPolicyInput, optFns ...func(*iam.Options)) (*iam.GetPolicyOutput, error) { return &iam.GetPolicyOutput{ Policy: &types.Policy{ - PolicyName: PtrString("AWSControlTowerStackSetRolePolicy"), - PolicyId: PtrString("ANPA3VLV2U277MP54R2OV"), - Arn: PtrString("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerStackSetRolePolicy"), - Path: PtrString("/service-role/"), - DefaultVersionId: PtrString("v1"), - AttachmentCount: PtrInt32(1), - PermissionsBoundaryUsageCount: PtrInt32(0), + PolicyName: new("AWSControlTowerStackSetRolePolicy"), + PolicyId: new("ANPA3VLV2U277MP54R2OV"), + Arn: new("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerStackSetRolePolicy"), + Path: new("/service-role/"), + DefaultVersionId: new("v1"), + AttachmentCount: new(int32(1)), + PermissionsBoundaryUsageCount: new(int32(0)), IsAttachable: true, - CreateDate: PtrTime(time.Now()), - UpdateDate: PtrTime(time.Now()), + CreateDate: new(time.Now()), + UpdateDate: new(time.Now()), }, }, nil } @@ -35,20 +35,20 @@ func (t *TestIAMClient) ListEntitiesForPolicy(context.Context, *iam.ListEntities return &iam.ListEntitiesForPolicyOutput{ PolicyGroups: []types.PolicyGroup{ { - GroupId: PtrString("groupId"), - GroupName: PtrString("groupName"), + GroupId: new("groupId"), + GroupName: new("groupName"), }, }, PolicyRoles: []types.PolicyRole{ { - RoleId: PtrString("roleId"), - RoleName: PtrString("roleName"), + RoleId: new("roleId"), + RoleName: new("roleName"), }, }, PolicyUsers: []types.PolicyUser{ { - UserId: PtrString("userId"), - UserName: PtrString("userName"), + UserId: new("userId"), + UserName: new("userName"), }, }, }, nil @@ -58,28 +58,28 @@ func (t *TestIAMClient) ListPolicies(context.Context, *iam.ListPoliciesInput, .. return &iam.ListPoliciesOutput{ Policies: []types.Policy{ { - PolicyName: PtrString("AWSControlTowerAdminPolicy"), - PolicyId: PtrString("ANPA3VLV2U2745H37HTHN"), - Arn: PtrString("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerAdminPolicy"), - Path: PtrString("/service-role/"), - DefaultVersionId: PtrString("v1"), - AttachmentCount: PtrInt32(1), - PermissionsBoundaryUsageCount: PtrInt32(0), + PolicyName: new("AWSControlTowerAdminPolicy"), + PolicyId: new("ANPA3VLV2U2745H37HTHN"), + Arn: new("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerAdminPolicy"), + Path: new("/service-role/"), + DefaultVersionId: new("v1"), + AttachmentCount: new(int32(1)), + PermissionsBoundaryUsageCount: new(int32(0)), IsAttachable: true, - CreateDate: PtrTime(time.Now()), - UpdateDate: PtrTime(time.Now()), + CreateDate: new(time.Now()), + UpdateDate: new(time.Now()), }, { - PolicyName: PtrString("AWSControlTowerCloudTrailRolePolicy"), - PolicyId: PtrString("ANPA3VLV2U27UOP7KSM6I"), - Arn: PtrString("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerCloudTrailRolePolicy"), - Path: PtrString("/service-role/"), - DefaultVersionId: PtrString("v1"), - AttachmentCount: PtrInt32(1), - PermissionsBoundaryUsageCount: PtrInt32(0), + PolicyName: new("AWSControlTowerCloudTrailRolePolicy"), + PolicyId: new("ANPA3VLV2U27UOP7KSM6I"), + Arn: new("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerCloudTrailRolePolicy"), + Path: new("/service-role/"), + DefaultVersionId: new("v1"), + AttachmentCount: new(int32(1)), + PermissionsBoundaryUsageCount: new(int32(0)), IsAttachable: true, - CreateDate: PtrTime(time.Now()), - UpdateDate: PtrTime(time.Now()), + CreateDate: new(time.Now()), + UpdateDate: new(time.Now()), }, }, }, nil @@ -89,8 +89,8 @@ func (t *TestIAMClient) ListPolicyTags(ctx context.Context, params *iam.ListPoli return &iam.ListPolicyTagsOutput{ Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("foo"), + Key: new("foo"), + Value: new("foo"), }, }, }, nil @@ -208,7 +208,7 @@ func TestPolicyGetFunc(t *testing.T) { func TestPolicyListTagsFunc(t *testing.T) { tags, err := policyListTagsFunc(context.Background(), &PolicyDetails{ Policy: &types.Policy{ - Arn: PtrString("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerAdminPolicy"), + Arn: new("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerAdminPolicy"), }, }, &TestIAMClient{}) if err != nil { @@ -223,33 +223,33 @@ func TestPolicyListTagsFunc(t *testing.T) { func TestPolicyItemMapper(t *testing.T) { details := &PolicyDetails{ Policy: &types.Policy{ - PolicyName: PtrString("AWSControlTowerAdminPolicy"), - PolicyId: PtrString("ANPA3VLV2U2745H37HTHN"), - Arn: PtrString("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerAdminPolicy"), - Path: PtrString("/service-role/"), - DefaultVersionId: PtrString("v1"), - AttachmentCount: PtrInt32(1), - PermissionsBoundaryUsageCount: PtrInt32(0), + PolicyName: new("AWSControlTowerAdminPolicy"), + PolicyId: new("ANPA3VLV2U2745H37HTHN"), + Arn: new("arn:aws:iam::801795385023:policy/service-role/AWSControlTowerAdminPolicy"), + Path: new("/service-role/"), + DefaultVersionId: new("v1"), + AttachmentCount: new(int32(1)), + PermissionsBoundaryUsageCount: new(int32(0)), IsAttachable: true, - CreateDate: PtrTime(time.Now()), - UpdateDate: PtrTime(time.Now()), + CreateDate: new(time.Now()), + UpdateDate: new(time.Now()), }, PolicyGroups: []types.PolicyGroup{ { - GroupId: PtrString("groupId"), - GroupName: PtrString("groupName"), + GroupId: new("groupId"), + GroupName: new("groupName"), }, }, PolicyRoles: []types.PolicyRole{ { - RoleId: PtrString("roleId"), - RoleName: PtrString("roleName"), + RoleId: new("roleId"), + RoleName: new("roleName"), }, }, PolicyUsers: []types.PolicyUser{ { - UserId: PtrString("userId"), - UserName: PtrString("userName"), + UserId: new("userId"), + UserName: new("userName"), }, }, } diff --git a/aws-source/adapters/iam-role_test.go b/aws-source/adapters/iam-role_test.go index e08d1456..aada9e72 100644 --- a/aws-source/adapters/iam-role_test.go +++ b/aws-source/adapters/iam-role_test.go @@ -19,12 +19,12 @@ import ( func (t *TestIAMClient) GetRole(ctx context.Context, params *iam.GetRoleInput, optFns ...func(*iam.Options)) (*iam.GetRoleOutput, error) { return &iam.GetRoleOutput{ Role: &types.Role{ - Path: PtrString("/service-role/"), - RoleName: PtrString("AWSControlTowerConfigAggregatorRoleForOrganizations"), - RoleId: PtrString("AROA3VLV2U27YSTBFCGCJ"), - Arn: PtrString("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), - CreateDate: PtrTime(time.Now()), - AssumeRolePolicyDocument: PtrString(`{ + Path: new("/service-role/"), + RoleName: new("AWSControlTowerConfigAggregatorRoleForOrganizations"), + RoleId: new("AROA3VLV2U27YSTBFCGCJ"), + Arn: new("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), + CreateDate: new(time.Now()), + AssumeRolePolicyDocument: new(`{ "Version": "2012-10-17", "Statement": [ { @@ -36,7 +36,7 @@ func (t *TestIAMClient) GetRole(ctx context.Context, params *iam.GetRoleInput, o } ] }`), - MaxSessionDuration: PtrInt32(3600), + MaxSessionDuration: new(int32(3600)), }, }, nil } @@ -54,12 +54,12 @@ func (t *TestIAMClient) ListRoles(context.Context, *iam.ListRolesInput, ...func( return &iam.ListRolesOutput{ Roles: []types.Role{ { - Path: PtrString("/service-role/"), - RoleName: PtrString("AWSControlTowerConfigAggregatorRoleForOrganizations"), - RoleId: PtrString("AROA3VLV2U27YSTBFCGCJ"), - Arn: PtrString("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), - CreateDate: PtrTime(time.Now()), - AssumeRolePolicyDocument: PtrString(`{ + Path: new("/service-role/"), + RoleName: new("AWSControlTowerConfigAggregatorRoleForOrganizations"), + RoleId: new("AROA3VLV2U27YSTBFCGCJ"), + Arn: new("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), + CreateDate: new(time.Now()), + AssumeRolePolicyDocument: new(`{ "Version": "2012-10-17", "Statement": [ { @@ -71,7 +71,7 @@ func (t *TestIAMClient) ListRoles(context.Context, *iam.ListRolesInput, ...func( } ] }`), - MaxSessionDuration: PtrInt32(3600), + MaxSessionDuration: new(int32(3600)), }, }, }, nil @@ -81,8 +81,8 @@ func (t *TestIAMClient) ListRoleTags(ctx context.Context, params *iam.ListRoleTa return &iam.ListRoleTagsOutput{ Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, }, nil @@ -91,7 +91,7 @@ func (t *TestIAMClient) ListRoleTags(ctx context.Context, params *iam.ListRoleTa func (t *TestIAMClient) GetRolePolicy(ctx context.Context, params *iam.GetRolePolicyInput, optFns ...func(*iam.Options)) (*iam.GetRolePolicyOutput, error) { return &iam.GetRolePolicyOutput{ PolicyName: params.PolicyName, - PolicyDocument: PtrString(`{ + PolicyDocument: new(`{ "Version": "2012-10-17", "Statement": [ { @@ -110,12 +110,12 @@ func (t *TestIAMClient) ListAttachedRolePolicies(ctx context.Context, params *ia return &iam.ListAttachedRolePoliciesOutput{ AttachedPolicies: []types.AttachedPolicy{ { - PolicyArn: PtrString("arn:aws:iam::aws:policy/AdministratorAccess"), - PolicyName: PtrString("AdministratorAccess"), + PolicyArn: new("arn:aws:iam::aws:policy/AdministratorAccess"), + PolicyName: new("AdministratorAccess"), }, { - PolicyArn: PtrString("arn:aws:iam::aws:policy/AmazonS3FullAccess"), - PolicyName: PtrString("AmazonS3FullAccess"), + PolicyArn: new("arn:aws:iam::aws:policy/AmazonS3FullAccess"), + PolicyName: new("AmazonS3FullAccess"), }, }, }, nil @@ -160,7 +160,7 @@ func TestRoleListFunc(t *testing.T) { func TestRoleListTagsFunc(t *testing.T) { tags, err := roleListTagsFunc(context.Background(), &RoleDetails{ Role: &types.Role{ - Arn: PtrString("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), + Arn: new("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), }, }, &TestIAMClient{}) if err != nil { @@ -193,21 +193,21 @@ func TestRoleItemMapper(t *testing.T) { role := RoleDetails{ Role: &types.Role{ - Path: PtrString("/service-role/"), - RoleName: PtrString("AWSControlTowerConfigAggregatorRoleForOrganizations"), - RoleId: PtrString("AROA3VLV2U27YSTBFCGCJ"), - Arn: PtrString("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), - CreateDate: PtrTime(time.Now()), - AssumeRolePolicyDocument: PtrString(`%7B%22Version%22%3A%222012-10-17%22%2C%22Statement%22%3A%5B%7B%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22config.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D`), - MaxSessionDuration: PtrInt32(3600), - Description: PtrString("description"), + Path: new("/service-role/"), + RoleName: new("AWSControlTowerConfigAggregatorRoleForOrganizations"), + RoleId: new("AROA3VLV2U27YSTBFCGCJ"), + Arn: new("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), + CreateDate: new(time.Now()), + AssumeRolePolicyDocument: new(`%7B%22Version%22%3A%222012-10-17%22%2C%22Statement%22%3A%5B%7B%22Effect%22%3A%22Allow%22%2C%22Principal%22%3A%7B%22Service%22%3A%22config.amazonaws.com%22%7D%2C%22Action%22%3A%22sts%3AAssumeRole%22%7D%5D%7D`), + MaxSessionDuration: new(int32(3600)), + Description: new("description"), PermissionsBoundary: &types.AttachedPermissionsBoundary{ - PermissionsBoundaryArn: PtrString("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), + PermissionsBoundaryArn: new("arn:aws:iam::801795385023:role/service-role/AWSControlTowerConfigAggregatorRoleForOrganizations"), PermissionsBoundaryType: types.PermissionsBoundaryAttachmentTypePolicy, }, RoleLastUsed: &types.RoleLastUsed{ - LastUsedDate: PtrTime(time.Now()), - Region: PtrString("us-east-2"), + LastUsedDate: new(time.Now()), + Region: new("us-east-2"), }, }, EmbeddedPolicies: []embeddedPolicy{ @@ -218,8 +218,8 @@ func TestRoleItemMapper(t *testing.T) { }, AttachedPolicies: []types.AttachedPolicy{ { - PolicyArn: PtrString("arn:aws:iam::aws:policy/AdministratorAccess"), - PolicyName: PtrString("AdministratorAccess"), + PolicyArn: new("arn:aws:iam::aws:policy/AdministratorAccess"), + PolicyName: new("AdministratorAccess"), }, }, } diff --git a/aws-source/adapters/iam-user_test.go b/aws-source/adapters/iam-user_test.go index 66c6c8af..8860fcd3 100644 --- a/aws-source/adapters/iam-user_test.go +++ b/aws-source/adapters/iam-user_test.go @@ -21,7 +21,7 @@ func (t *TestIAMClient) ListGroupsForUser(ctx context.Context, params *iam.ListG marker := params.Marker if marker == nil { - marker = PtrString("0") + marker = new("0") } // Get the current page @@ -34,17 +34,17 @@ func (t *TestIAMClient) ListGroupsForUser(ctx context.Context, params *iam.ListG isTruncated = false marker = nil } else { - marker = PtrString(fmt.Sprint(markerInt)) + marker = new(fmt.Sprint(markerInt)) } return &iam.ListGroupsForUserOutput{ Groups: []types.Group{ { - Arn: PtrString("arn:aws:iam::801795385023:Group/something"), - CreateDate: PtrTime(time.Now()), - GroupId: PtrString("id"), - GroupName: PtrString(fmt.Sprintf("group-%v", marker)), - Path: PtrString("/"), + Arn: new("arn:aws:iam::801795385023:Group/something"), + CreateDate: new(time.Now()), + GroupId: new("id"), + GroupName: new(fmt.Sprintf("group-%v", marker)), + Path: new("/"), }, }, IsTruncated: isTruncated, @@ -55,11 +55,11 @@ func (t *TestIAMClient) ListGroupsForUser(ctx context.Context, params *iam.ListG func (t *TestIAMClient) GetUser(ctx context.Context, params *iam.GetUserInput, optFns ...func(*iam.Options)) (*iam.GetUserOutput, error) { return &iam.GetUserOutput{ User: &types.User{ - Path: PtrString("/"), - UserName: PtrString("power-users"), - UserId: PtrString("AGPA3VLV2U27T6SSLJMDS"), - Arn: PtrString("arn:aws:iam::801795385023:User/power-users"), - CreateDate: PtrTime(time.Now()), + Path: new("/"), + UserName: new("power-users"), + UserId: new("AGPA3VLV2U27T6SSLJMDS"), + Arn: new("arn:aws:iam::801795385023:User/power-users"), + CreateDate: new(time.Now()), }, }, nil } @@ -69,7 +69,7 @@ func (t *TestIAMClient) ListUsers(ctx context.Context, params *iam.ListUsersInpu marker := params.Marker if marker == nil { - marker = PtrString("0") + marker = new("0") } // Get the current page @@ -82,17 +82,17 @@ func (t *TestIAMClient) ListUsers(ctx context.Context, params *iam.ListUsersInpu isTruncated = false marker = nil } else { - marker = PtrString(fmt.Sprint(markerInt)) + marker = new(fmt.Sprint(markerInt)) } return &iam.ListUsersOutput{ Users: []types.User{ { - Path: PtrString("/"), - UserName: PtrString(fmt.Sprintf("user-%v", marker)), - UserId: PtrString("AGPA3VLV2U27T6SSLJMDS"), - Arn: PtrString("arn:aws:iam::801795385023:User/power-users"), - CreateDate: PtrTime(time.Now()), + Path: new("/"), + UserName: new(fmt.Sprintf("user-%v", marker)), + UserId: new("AGPA3VLV2U27T6SSLJMDS"), + Arn: new("arn:aws:iam::801795385023:User/power-users"), + CreateDate: new(time.Now()), }, }, IsTruncated: isTruncated, @@ -104,8 +104,8 @@ func (t *TestIAMClient) ListUserTags(context.Context, *iam.ListUserTagsInput, .. return &iam.ListUserTagsOutput{ Tags: []types.Tag{ { - Key: PtrString("foo"), - Value: PtrString("bar"), + Key: new("foo"), + Value: new("bar"), }, }, IsTruncated: false, @@ -114,7 +114,7 @@ func (t *TestIAMClient) ListUserTags(context.Context, *iam.ListUserTagsInput, .. } func TestGetUserGroups(t *testing.T) { - groups, err := getUserGroups(context.Background(), &TestIAMClient{}, PtrString("foo")) + groups, err := getUserGroups(context.Background(), &TestIAMClient{}, new("foo")) if err != nil { t.Error(err) } @@ -168,7 +168,7 @@ func TestUserListFunc(t *testing.T) { func TestUserListTagsFunc(t *testing.T) { tags, err := userListTagsFunc(context.Background(), &UserDetails{ User: &types.User{ - UserName: PtrString("foo"), + UserName: new("foo"), }, }, &TestIAMClient{}) if err != nil { @@ -183,19 +183,19 @@ func TestUserListTagsFunc(t *testing.T) { func TestUserItemMapper(t *testing.T) { details := UserDetails{ User: &types.User{ - Path: PtrString("/"), - UserName: PtrString("power-users"), - UserId: PtrString("AGPA3VLV2U27T6SSLJMDS"), - Arn: PtrString("arn:aws:iam::801795385023:User/power-users"), - CreateDate: PtrTime(time.Now()), + Path: new("/"), + UserName: new("power-users"), + UserId: new("AGPA3VLV2U27T6SSLJMDS"), + Arn: new("arn:aws:iam::801795385023:User/power-users"), + CreateDate: new(time.Now()), }, UserGroups: []types.Group{ { - Arn: PtrString("arn:aws:iam::801795385023:Group/something"), - CreateDate: PtrTime(time.Now()), - GroupId: PtrString("id"), - GroupName: PtrString("name"), - Path: PtrString("/"), + Arn: new("arn:aws:iam::801795385023:Group/something"), + CreateDate: new(time.Now()), + GroupId: new("id"), + GroupName: new("name"), + Path: new("/"), }, }, } diff --git a/aws-source/adapters/integration/apigateway/create.go b/aws-source/adapters/integration/apigateway/create.go index 429d2c68..a152466c 100644 --- a/aws-source/adapters/integration/apigateway/create.go +++ b/aws-source/adapters/integration/apigateway/create.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/service/apigateway" "github.com/aws/aws-sdk-go-v2/service/apigateway/types" - "github.com/overmindtech/cli/aws-source/adapters" "github.com/overmindtech/cli/aws-source/adapters/integration" ) @@ -30,8 +29,8 @@ func createRestAPI(ctx context.Context, logger *slog.Logger, client *apigateway. } result, err := client.CreateRestApi(ctx, &apigateway.CreateRestApiInput{ - Name: adapters.PtrString(integration.ResourceName(integration.APIGateway, restAPISrc, testID)), - Description: adapters.PtrString("Test Rest API"), + Name: new(integration.ResourceName(integration.APIGateway, restAPISrc, testID)), + Description: new("Test Rest API"), Tags: resourceTags(restAPISrc, testID), }) if err != nil { @@ -60,7 +59,7 @@ func createResource(ctx context.Context, logger *slog.Logger, client *apigateway result, err := client.CreateResource(ctx, &apigateway.CreateResourceInput{ RestApiId: restAPIID, ParentId: parentID, - PathPart: adapters.PtrString(cleanPath(path)), + PathPart: new(cleanPath(path)), }) if err != nil { return nil, err @@ -97,8 +96,8 @@ func createMethod(ctx context.Context, logger *slog.Logger, client *apigateway.C _, err = client.PutMethod(ctx, &apigateway.PutMethodInput{ RestApiId: restAPIID, ResourceId: resourceID, - HttpMethod: adapters.PtrString(method), - AuthorizationType: adapters.PtrString("NONE"), + HttpMethod: new(method), + AuthorizationType: new("NONE"), }) if err != nil { return err @@ -126,8 +125,8 @@ func createMethodResponse(ctx context.Context, logger *slog.Logger, client *apig _, err = client.PutMethodResponse(ctx, &apigateway.PutMethodResponseInput{ RestApiId: restAPIID, ResourceId: resourceID, - HttpMethod: adapters.PtrString(method), - StatusCode: adapters.PtrString(statusCode), + HttpMethod: new(method), + StatusCode: new(statusCode), ResponseModels: map[string]string{ "application/json": "Empty", }, @@ -161,7 +160,7 @@ func createIntegration(ctx context.Context, logger *slog.Logger, client *apigate _, err = client.PutIntegration(ctx, &apigateway.PutIntegrationInput{ RestApiId: restAPIID, ResourceId: resourceID, - HttpMethod: adapters.PtrString(method), + HttpMethod: new(method), Type: "MOCK", }) if err != nil { @@ -188,7 +187,7 @@ func createAPIKey(ctx context.Context, logger *slog.Logger, client *apigateway.C } _, err = client.CreateApiKey(ctx, &apigateway.CreateApiKeyInput{ - Name: adapters.PtrString(integration.ResourceName(integration.APIGateway, apiKeySrc, testID)), + Name: new(integration.ResourceName(integration.APIGateway, apiKeySrc, testID)), Tags: resourceTags(apiKeySrc, testID), Enabled: true, }) @@ -218,10 +217,10 @@ func createAuthorizer(ctx context.Context, logger *slog.Logger, client *apigatew identitySource := "method.request.header.Authorization" _, err = client.CreateAuthorizer(ctx, &apigateway.CreateAuthorizerInput{ RestApiId: &restAPIID, - Name: adapters.PtrString(integration.ResourceName(integration.APIGateway, authorizerSrc, testID)), + Name: new(integration.ResourceName(integration.APIGateway, authorizerSrc, testID)), Type: types.AuthorizerTypeToken, IdentitySource: &identitySource, - AuthorizerUri: adapters.PtrString("arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:123456789012:function:auth-function/invocations"), + AuthorizerUri: new("arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:123456789012:function:auth-function/invocations"), }) if err != nil { return err @@ -248,7 +247,7 @@ func createDeployment(ctx context.Context, logger *slog.Logger, client *apigatew resp, err := client.CreateDeployment(ctx, &apigateway.CreateDeploymentInput{ RestApiId: &restAPIID, - Description: adapters.PtrString("test-deployment"), + Description: new("test-deployment"), }) if err != nil { return nil, err @@ -307,8 +306,8 @@ func createModel(ctx context.Context, logger *slog.Logger, client *apigateway.Cl _, err = client.CreateModel(ctx, &apigateway.CreateModelInput{ RestApiId: &restAPIID, Name: &modelName, - Schema: adapters.PtrString("{}"), - ContentType: adapters.PtrString("application/json"), + Schema: new("{}"), + ContentType: new("application/json"), }) if err != nil { return err diff --git a/aws-source/adapters/integration/apigateway/delete.go b/aws-source/adapters/integration/apigateway/delete.go index c41b285b..e5ec7f11 100644 --- a/aws-source/adapters/integration/apigateway/delete.go +++ b/aws-source/adapters/integration/apigateway/delete.go @@ -4,13 +4,11 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/service/apigateway" - - "github.com/overmindtech/cli/aws-source/adapters" ) func deleteRestAPI(ctx context.Context, client *apigateway.Client, restAPIID string) error { _, err := client.DeleteRestApi(ctx, &apigateway.DeleteRestApiInput{ - RestApiId: adapters.PtrString(restAPIID), + RestApiId: new(restAPIID), }) return err diff --git a/aws-source/adapters/integration/ec2-transit-gateway/setup.go b/aws-source/adapters/integration/ec2-transit-gateway/setup.go index ec86afd7..a32305ef 100644 --- a/aws-source/adapters/integration/ec2-transit-gateway/setup.go +++ b/aws-source/adapters/integration/ec2-transit-gateway/setup.go @@ -19,7 +19,7 @@ const integrationTestName = "integration-test" // Package-level state set by Setup and used by tests and Teardown. var ( - createdTransitGatewayID string + createdTransitGatewayID string createdRouteTableID string createdVpcID string createdSubnetID string @@ -43,14 +43,14 @@ func Setup(t *testing.T) { func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { out, err := client.CreateTransitGateway(ctx, &ec2.CreateTransitGatewayInput{ - Description: ptr("Overmind " + integrationTestName), + Description: new("Overmind " + integrationTestName), TagSpecifications: []types.TagSpecification{ { ResourceType: types.ResourceTypeTransitGateway, Tags: []types.Tag{ - {Key: ptr(integration.TagTestKey), Value: ptr(integration.TagTestValue)}, - {Key: ptr(integration.TagTestIDKey), Value: ptr(integrationTestName)}, - {Key: ptr("Name"), Value: ptr(integrationTestName)}, + {Key: new(integration.TagTestKey), Value: new(integration.TagTestValue)}, + {Key: new(integration.TagTestIDKey), Value: new(integrationTestName)}, + {Key: new("Name"), Value: new(integrationTestName)}, }, }, }, @@ -99,7 +99,7 @@ func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { // Resolve default route table for this TGW (needed for attachment and static route). rtOut, err := client.DescribeTransitGatewayRouteTables(ctx, &ec2.DescribeTransitGatewayRouteTablesInput{ Filters: []types.Filter{ - {Name: ptr("transit-gateway-id"), Values: []string{tgwID}}, + {Name: new("transit-gateway-id"), Values: []string{tgwID}}, }, }) if err != nil { @@ -118,14 +118,14 @@ func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { // Create VPC and subnet so we can create a VPC attachment (association + propagation + route target). vpcOut, err := client.CreateVpc(ctx, &ec2.CreateVpcInput{ - CidrBlock: ptr("10.99.0.0/16"), + CidrBlock: new("10.99.0.0/16"), TagSpecifications: []types.TagSpecification{ { ResourceType: types.ResourceTypeVpc, Tags: []types.Tag{ - {Key: ptr(integration.TagTestKey), Value: ptr(integration.TagTestValue)}, - {Key: ptr(integration.TagTestIDKey), Value: ptr(integrationTestName)}, - {Key: ptr("Name"), Value: ptr(integrationTestName)}, + {Key: new(integration.TagTestKey), Value: new(integration.TagTestValue)}, + {Key: new(integration.TagTestIDKey), Value: new(integrationTestName)}, + {Key: new("Name"), Value: new(integrationTestName)}, }, }, }, @@ -142,7 +142,7 @@ func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { // Pick one AZ for the subnet. azOut, err := client.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{ Filters: []types.Filter{ - {Name: ptr("state"), Values: []string{"available"}}, + {Name: new("state"), Values: []string{"available"}}, }, }) if err != nil || len(azOut.AvailabilityZones) == 0 { @@ -152,15 +152,15 @@ func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { subOut, err := client.CreateSubnet(ctx, &ec2.CreateSubnetInput{ VpcId: &createdVpcID, - CidrBlock: ptr("10.99.1.0/24"), + CidrBlock: new("10.99.1.0/24"), AvailabilityZone: az, TagSpecifications: []types.TagSpecification{ { ResourceType: types.ResourceTypeSubnet, Tags: []types.Tag{ - {Key: ptr(integration.TagTestKey), Value: ptr(integration.TagTestValue)}, - {Key: ptr(integration.TagTestIDKey), Value: ptr(integrationTestName)}, - {Key: ptr("Name"), Value: ptr(integrationTestName)}, + {Key: new(integration.TagTestKey), Value: new(integration.TagTestValue)}, + {Key: new(integration.TagTestIDKey), Value: new(integrationTestName)}, + {Key: new("Name"), Value: new(integrationTestName)}, }, }, }, @@ -182,9 +182,9 @@ func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { { ResourceType: types.ResourceTypeTransitGatewayAttachment, Tags: []types.Tag{ - {Key: ptr(integration.TagTestKey), Value: ptr(integration.TagTestValue)}, - {Key: ptr(integration.TagTestIDKey), Value: ptr(integrationTestName)}, - {Key: ptr("Name"), Value: ptr(integrationTestName)}, + {Key: new(integration.TagTestKey), Value: new(integration.TagTestValue)}, + {Key: new(integration.TagTestIDKey), Value: new(integrationTestName)}, + {Key: new("Name"), Value: new(integrationTestName)}, }, }, }, @@ -239,5 +239,3 @@ func setup(ctx context.Context, logger *slog.Logger, client *ec2.Client) error { return nil } - -func ptr(s string) *string { return &s } diff --git a/aws-source/adapters/integration/ec2-transit-gateway/teardown.go b/aws-source/adapters/integration/ec2-transit-gateway/teardown.go index 61e94270..fd73d71d 100644 --- a/aws-source/adapters/integration/ec2-transit-gateway/teardown.go +++ b/aws-source/adapters/integration/ec2-transit-gateway/teardown.go @@ -15,8 +15,8 @@ import ( // integrationTestTagFilters returns filters to discover resources created by this suite. func integrationTestTagFilters() []types.Filter { return []types.Filter{ - {Name: ptr("tag:" + integration.TagTestKey), Values: []string{integration.TagTestValue}}, - {Name: ptr("tag:" + integration.TagTestIDKey), Values: []string{integrationTestName}}, + {Name: new("tag:" + integration.TagTestKey), Values: []string{integration.TagTestValue}}, + {Name: new("tag:" + integration.TagTestIDKey), Values: []string{integrationTestName}}, } } @@ -81,7 +81,7 @@ func teardown(ctx context.Context, logger *slog.Logger, client *ec2.Client) erro // Resolve default route table and delete our static route. rtOut, err := client.DescribeTransitGatewayRouteTables(ctx, &ec2.DescribeTransitGatewayRouteTablesInput{ - Filters: []types.Filter{{Name: ptr("transit-gateway-id"), Values: []string{tgwID}}}, + Filters: []types.Filter{{Name: new("transit-gateway-id"), Values: []string{tgwID}}}, }) if err != nil { return err @@ -103,7 +103,7 @@ func teardown(ctx context.Context, logger *slog.Logger, client *ec2.Client) erro // List VPC attachments for this TGW and delete each. attachOut, err := client.DescribeTransitGatewayVpcAttachments(ctx, &ec2.DescribeTransitGatewayVpcAttachmentsInput{ - Filters: []types.Filter{{Name: ptr("transit-gateway-id"), Values: []string{tgwID}}}, + Filters: []types.Filter{{Name: new("transit-gateway-id"), Values: []string{tgwID}}}, }) if err != nil { return err diff --git a/aws-source/adapters/integration/kms/create.go b/aws-source/adapters/integration/kms/create.go index 9f994485..642b18ad 100644 --- a/aws-source/adapters/integration/kms/create.go +++ b/aws-source/adapters/integration/kms/create.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "log/slog" + "slices" "github.com/aws/aws-sdk-go-v2/service/kms" "github.com/aws/aws-sdk-go-v2/service/kms/types" @@ -48,11 +49,9 @@ func createAlias(ctx context.Context, logger *slog.Logger, client *kms.Client, k } } - for _, aName := range aliasNames { - if aName == aliasName { - logger.InfoContext(ctx, "KMS alias already exists", "alias", aliasName, "keyID", keyID) - return nil - } + if slices.Contains(aliasNames, aliasName) { + logger.InfoContext(ctx, "KMS alias already exists", "alias", aliasName, "keyID", keyID) + return nil } _, err = client.CreateAlias(ctx, &kms.CreateAliasInput{ diff --git a/aws-source/adapters/integration/kms/kms_test.go b/aws-source/adapters/integration/kms/kms_test.go index 750a7730..626c33db 100644 --- a/aws-source/adapters/integration/kms/kms_test.go +++ b/aws-source/adapters/integration/kms/kms_test.go @@ -148,7 +148,7 @@ func KMS(t *testing.T) { } // Get the alias for this key - var aliasUniqueAttributeValue interface{} + var aliasUniqueAttributeValue any for _, alias := range sdpListAliases { // Check if the alias is for the key diff --git a/aws-source/adapters/integration/ssm/main_test.go b/aws-source/adapters/integration/ssm/main_test.go index ab13915c..81492ce8 100644 --- a/aws-source/adapters/integration/ssm/main_test.go +++ b/aws-source/adapters/integration/ssm/main_test.go @@ -184,10 +184,7 @@ func TestIntegrationSSM(t *testing.T) { // Delete parameters in batches of 100 for i := 0; i < len(output.Parameters); i += 100 { - end := i + 100 - if end > len(output.Parameters) { - end = len(output.Parameters) - } + end := min(i+100, len(output.Parameters)) batch := output.Parameters[i:end] names := make([]string, len(batch)) diff --git a/aws-source/adapters/kms-alias_test.go b/aws-source/adapters/kms-alias_test.go index 3e99443a..e4e86ba1 100644 --- a/aws-source/adapters/kms-alias_test.go +++ b/aws-source/adapters/kms-alias_test.go @@ -15,11 +15,11 @@ func TestAliasOutputMapper(t *testing.T) { output := &kms.ListAliasesOutput{ Aliases: []types.AliasListEntry{ { - AliasName: PtrString("alias/test-key"), - TargetKeyId: PtrString("cf68415c-f4ae-48f2-87a7-3b52ce"), - AliasArn: PtrString("arn:aws:kms:us-west-2:123456789012:alias/test-key"), - CreationDate: PtrTime(time.Now()), - LastUpdatedDate: PtrTime(time.Now()), + AliasName: new("alias/test-key"), + TargetKeyId: new("cf68415c-f4ae-48f2-87a7-3b52ce"), + AliasArn: new("arn:aws:kms:us-west-2:123456789012:alias/test-key"), + CreationDate: new(time.Now()), + LastUpdatedDate: new(time.Now()), }, }, } diff --git a/aws-source/adapters/kms-custom-key-store_test.go b/aws-source/adapters/kms-custom-key-store_test.go index 19c2d834..849b740d 100644 --- a/aws-source/adapters/kms-custom-key-store_test.go +++ b/aws-source/adapters/kms-custom-key-store_test.go @@ -17,12 +17,12 @@ func TestCustomKeyStoreOutputMapper(t *testing.T) { output := &kms.DescribeCustomKeyStoresOutput{ CustomKeyStores: []types.CustomKeyStoresListEntry{ { - CustomKeyStoreId: PtrString("custom-key-store-1"), - CreationDate: PtrTime(time.Now()), - CloudHsmClusterId: PtrString("cloud-hsm-cluster-1"), + CustomKeyStoreId: new("custom-key-store-1"), + CreationDate: new(time.Now()), + CloudHsmClusterId: new("cloud-hsm-cluster-1"), ConnectionState: types.ConnectionStateTypeConnected, - TrustAnchorCertificate: PtrString("-----BEGIN CERTIFICATE-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwJ1z\n-----END CERTIFICATE-----"), - CustomKeyStoreName: PtrString("key-store-1"), + TrustAnchorCertificate: new("-----BEGIN CERTIFICATE-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwJ1z\n-----END CERTIFICATE-----"), + CustomKeyStoreName: new("key-store-1"), }, }, } @@ -82,7 +82,7 @@ func TestHealthState(t *testing.T) { output: &kms.DescribeCustomKeyStoresOutput{ CustomKeyStores: []types.CustomKeyStoresListEntry{ { - CustomKeyStoreId: PtrString("custom-key-store-1"), + CustomKeyStoreId: new("custom-key-store-1"), ConnectionState: types.ConnectionStateTypeConnected, }, }, @@ -94,7 +94,7 @@ func TestHealthState(t *testing.T) { output: &kms.DescribeCustomKeyStoresOutput{ CustomKeyStores: []types.CustomKeyStoresListEntry{ { - CustomKeyStoreId: PtrString("custom-key-store-1"), + CustomKeyStoreId: new("custom-key-store-1"), ConnectionState: types.ConnectionStateTypeConnecting, }, }, @@ -106,7 +106,7 @@ func TestHealthState(t *testing.T) { output: &kms.DescribeCustomKeyStoresOutput{ CustomKeyStores: []types.CustomKeyStoresListEntry{ { - CustomKeyStoreId: PtrString("custom-key-store-1"), + CustomKeyStoreId: new("custom-key-store-1"), ConnectionState: types.ConnectionStateTypeDisconnected, }, }, @@ -118,7 +118,7 @@ func TestHealthState(t *testing.T) { output: &kms.DescribeCustomKeyStoresOutput{ CustomKeyStores: []types.CustomKeyStoresListEntry{ { - CustomKeyStoreId: PtrString("custom-key-store-1"), + CustomKeyStoreId: new("custom-key-store-1"), ConnectionState: types.ConnectionStateTypeFailed, }, }, @@ -130,7 +130,7 @@ func TestHealthState(t *testing.T) { output: &kms.DescribeCustomKeyStoresOutput{ CustomKeyStores: []types.CustomKeyStoresListEntry{ { - CustomKeyStoreId: PtrString("custom-key-store-1"), + CustomKeyStoreId: new("custom-key-store-1"), ConnectionState: "unknown-state", }, }, diff --git a/aws-source/adapters/kms-grant.go b/aws-source/adapters/kms-grant.go index 2d3e90cc..1804cdb9 100644 --- a/aws-source/adapters/kms-grant.go +++ b/aws-source/adapters/kms-grant.go @@ -173,7 +173,7 @@ func NewKMSGrantAdapter(client *kms.Client, accountID string, region string, cac AccountID: accountID, Region: region, AdapterMetadata: grantAdapterMetadata, - cache: cache, + cache: cache, DescribeFunc: func(ctx context.Context, client *kms.Client, input *kms.ListGrantsInput) (*kms.ListGrantsOutput, error) { return client.ListGrants(ctx, input) }, @@ -189,8 +189,8 @@ func NewKMSGrantAdapter(client *kms.Client, accountID string, region string, cac } return &kms.ListGrantsInput{ - KeyId: &tmp[0], // keyID - GrantId: PtrString(strings.Join(tmp[1:], "/")), // grantId + KeyId: &tmp[0], // keyID + GrantId: new(strings.Join(tmp[1:], "/")), // grantId }, nil }, UseListForGet: true, diff --git a/aws-source/adapters/kms-grant_test.go b/aws-source/adapters/kms-grant_test.go index c2f8dbf9..d824cd10 100644 --- a/aws-source/adapters/kms-grant_test.go +++ b/aws-source/adapters/kms-grant_test.go @@ -130,14 +130,14 @@ func TestGrantOutputMapper(t *testing.T) { "aws:dynamodb:tableName": "Services", }, }, - IssuingAccount: PtrString("arn:aws:iam::123456789012:root"), - Name: PtrString("8276b9a6-6cf0-46f1-b2f0-7993a7f8c89a"), + IssuingAccount: new("arn:aws:iam::123456789012:root"), + Name: new("8276b9a6-6cf0-46f1-b2f0-7993a7f8c89a"), Operations: []types.GrantOperation{"Decrypt", "Encrypt", "GenerateDataKey", "ReEncryptFrom", "ReEncryptTo", "RetireGrant", "DescribeKey"}, - GrantId: PtrString("1667b97d27cf748cf05b487217dd4179526c949d14fb3903858e25193253fe59"), - KeyId: PtrString("arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab"), - RetiringPrincipal: PtrString("arn:aws:iam::account:role/role-name-with-path"), - GranteePrincipal: PtrString("arn:aws:iam::account:user/user-name-with-path"), - CreationDate: PtrTime(time.Now()), + GrantId: new("1667b97d27cf748cf05b487217dd4179526c949d14fb3903858e25193253fe59"), + KeyId: new("arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab"), + RetiringPrincipal: new("arn:aws:iam::account:role/role-name-with-path"), + GranteePrincipal: new("arn:aws:iam::account:user/user-name-with-path"), + CreationDate: new(time.Now()), }, }, } @@ -197,15 +197,15 @@ func TestGrantOutputMapperWithServicePrincipal(t *testing.T) { "aws:dynamodb:tableName": "Services", }, }, - IssuingAccount: PtrString("arn:aws:iam::123456789012:root"), - Name: PtrString("8276b9a6-6cf0-46f1-b2f0-7993a7f8c89a"), + IssuingAccount: new("arn:aws:iam::123456789012:root"), + Name: new("8276b9a6-6cf0-46f1-b2f0-7993a7f8c89a"), Operations: []types.GrantOperation{"Decrypt", "Encrypt"}, - GrantId: PtrString("1667b97d27cf748cf05b487217dd4179526c949d14fb3903858e25193253fe59"), - KeyId: PtrString("arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab"), + GrantId: new("1667b97d27cf748cf05b487217dd4179526c949d14fb3903858e25193253fe59"), + KeyId: new("arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab"), // These are service principals, not ARNs - they should be skipped - RetiringPrincipal: PtrString("dynamodb.us-west-2.amazonaws.com"), - GranteePrincipal: PtrString("rds.eu-west-2.amazonaws.com"), - CreationDate: PtrTime(time.Now()), + RetiringPrincipal: new("dynamodb.us-west-2.amazonaws.com"), + GranteePrincipal: new("rds.eu-west-2.amazonaws.com"), + CreationDate: new(time.Now()), }, }, } diff --git a/aws-source/adapters/kms-key-policy_test.go b/aws-source/adapters/kms-key-policy_test.go index be46f433..f0b98a2e 100644 --- a/aws-source/adapters/kms-key-policy_test.go +++ b/aws-source/adapters/kms-key-policy_test.go @@ -43,7 +43,7 @@ type mockKeyPolicyClient struct{} func (m *mockKeyPolicyClient) GetKeyPolicy(ctx context.Context, params *kms.GetKeyPolicyInput, optFns ...func(*kms.Options)) (*kms.GetKeyPolicyOutput, error) { return &kms.GetKeyPolicyOutput{ - Policy: PtrString(`{ + Policy: new(`{ "Version" : "2012-10-17", "Id" : "key-default-1", "Statement" : [ @@ -67,7 +67,7 @@ func (m *mockKeyPolicyClient) GetKeyPolicy(ctx context.Context, params *kms.GetK } ] }`), - PolicyName: PtrString("default"), + PolicyName: new("default"), }, nil } @@ -82,7 +82,7 @@ func TestGetKeyPolicyFunc(t *testing.T) { cli := &mockKeyPolicyClient{} item, err := getKeyPolicyFunc(ctx, cli, "scope", &kms.GetKeyPolicyInput{ - KeyId: PtrString("1234abcd-12ab-34cd-56ef-1234567890ab"), + KeyId: new("1234abcd-12ab-34cd-56ef-1234567890ab"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/kms-key_test.go b/aws-source/adapters/kms-key_test.go index 9927110e..03e0f887 100644 --- a/aws-source/adapters/kms-key_test.go +++ b/aws-source/adapters/kms-key_test.go @@ -15,12 +15,12 @@ type kmsTestClient struct{} func (t kmsTestClient) DescribeKey(ctx context.Context, params *kms.DescribeKeyInput, optFns ...func(*kms.Options)) (*kms.DescribeKeyOutput, error) { return &kms.DescribeKeyOutput{ KeyMetadata: &types.KeyMetadata{ - AWSAccountId: PtrString("846764612917"), - KeyId: PtrString("b8a9477d-836c-491f-857e-07937918959b"), - Arn: PtrString("arn:aws:kms:us-west-2:846764612917:key/b8a9477d-836c-491f-857e-07937918959b"), - CreationDate: PtrTime(time.Date(2017, 6, 30, 21, 44, 32, 140000000, time.UTC)), + AWSAccountId: new("846764612917"), + KeyId: new("b8a9477d-836c-491f-857e-07937918959b"), + Arn: new("arn:aws:kms:us-west-2:846764612917:key/b8a9477d-836c-491f-857e-07937918959b"), + CreationDate: new(time.Date(2017, 6, 30, 21, 44, 32, 140000000, time.UTC)), Enabled: true, - Description: PtrString("Default KMS key that protects my S3 objects when no other key is defined"), + Description: new("Default KMS key that protects my S3 objects when no other key is defined"), KeyUsage: types.KeyUsageTypeEncryptDecrypt, KeyState: types.KeyStateEnabled, Origin: types.OriginTypeAwsKms, @@ -37,16 +37,16 @@ func (t kmsTestClient) ListKeys(context.Context, *kms.ListKeysInput, ...func(*km return &kms.ListKeysOutput{ Keys: []types.KeyListEntry{ { - KeyArn: PtrString("arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"), - KeyId: PtrString("1234abcd-12ab-34cd-56ef-1234567890ab"), + KeyArn: new("arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"), + KeyId: new("1234abcd-12ab-34cd-56ef-1234567890ab"), }, { - KeyArn: PtrString("arn:aws:kms:us-west-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321"), - KeyId: PtrString("0987dcba-09fe-87dc-65ba-ab0987654321"), + KeyArn: new("arn:aws:kms:us-west-2:111122223333:key/0987dcba-09fe-87dc-65ba-ab0987654321"), + KeyId: new("0987dcba-09fe-87dc-65ba-ab0987654321"), }, { - KeyArn: PtrString("arn:aws:kms:us-east-2:111122223333:key/1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"), - KeyId: PtrString("1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"), + KeyArn: new("arn:aws:kms:us-east-2:111122223333:key/1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"), + KeyId: new("1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d"), }, }, }, nil @@ -56,16 +56,16 @@ func (t kmsTestClient) ListResourceTags(context.Context, *kms.ListResourceTagsIn return &kms.ListResourceTagsOutput{ Tags: []types.Tag{ { - TagKey: PtrString("Dept"), - TagValue: PtrString("IT"), + TagKey: new("Dept"), + TagValue: new("IT"), }, { - TagKey: PtrString("Purpose"), - TagValue: PtrString("Test"), + TagKey: new("Purpose"), + TagValue: new("Test"), }, { - TagKey: PtrString("Name"), - TagValue: PtrString("Test"), + TagKey: new("Name"), + TagValue: new("Test"), }, }, }, nil @@ -76,7 +76,7 @@ func TestKMSGetFunc(t *testing.T) { cli := kmsTestClient{} item, err := kmsKeyGetFunc(ctx, cli, "scope", &kms.DescribeKeyInput{ - KeyId: PtrString("1234abcd-12ab-34cd-56ef-1234567890ab"), + KeyId: new("1234abcd-12ab-34cd-56ef-1234567890ab"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/lambda-event-source-mapping_test.go b/aws-source/adapters/lambda-event-source-mapping_test.go index 9a9206ad..0959b810 100644 --- a/aws-source/adapters/lambda-event-source-mapping_test.go +++ b/aws-source/adapters/lambda-event-source-mapping_test.go @@ -15,22 +15,22 @@ type TestLambdaEventSourceMappingClient struct{} func (t *TestLambdaEventSourceMappingClient) ListEventSourceMappings(ctx context.Context, params *lambda.ListEventSourceMappingsInput, optFns ...func(*lambda.Options)) (*lambda.ListEventSourceMappingsOutput, error) { allMappings := []types.EventSourceMappingConfiguration{ { - UUID: stringPtr("test-uuid-1"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function"), - EventSourceArn: stringPtr("arn:aws:sqs:us-east-1:123456789012:test-queue"), - State: stringPtr("Enabled"), + UUID: new("test-uuid-1"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function"), + EventSourceArn: new("arn:aws:sqs:us-east-1:123456789012:test-queue"), + State: new("Enabled"), }, { - UUID: stringPtr("test-uuid-2"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function-2"), - EventSourceArn: stringPtr("arn:aws:dynamodb:us-east-1:123456789012:table/test-table"), - State: stringPtr("Creating"), + UUID: new("test-uuid-2"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function-2"), + EventSourceArn: new("arn:aws:dynamodb:us-east-1:123456789012:table/test-table"), + State: new("Creating"), }, { - UUID: stringPtr("test-uuid-3"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function-3"), - EventSourceArn: stringPtr("arn:aws:rds:us-east-1:123456789012:cluster:test-docdb-cluster"), - State: stringPtr("Enabled"), + UUID: new("test-uuid-3"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function-3"), + EventSourceArn: new("arn:aws:rds:us-east-1:123456789012:cluster:test-docdb-cluster"), + State: new("Enabled"), }, } @@ -60,34 +60,30 @@ func (t *TestLambdaEventSourceMappingClient) GetEventSourceMapping(ctx context.C switch *params.UUID { case "test-uuid-1": return &lambda.GetEventSourceMappingOutput{ - UUID: stringPtr("test-uuid-1"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function"), - EventSourceArn: stringPtr("arn:aws:sqs:us-east-1:123456789012:test-queue"), - State: stringPtr("Enabled"), + UUID: new("test-uuid-1"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function"), + EventSourceArn: new("arn:aws:sqs:us-east-1:123456789012:test-queue"), + State: new("Enabled"), }, nil case "test-uuid-2": return &lambda.GetEventSourceMappingOutput{ - UUID: stringPtr("test-uuid-2"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function-2"), - EventSourceArn: stringPtr("arn:aws:dynamodb:us-east-1:123456789012:table/test-table"), - State: stringPtr("Creating"), + UUID: new("test-uuid-2"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function-2"), + EventSourceArn: new("arn:aws:dynamodb:us-east-1:123456789012:table/test-table"), + State: new("Creating"), }, nil case "test-uuid-3": return &lambda.GetEventSourceMappingOutput{ - UUID: stringPtr("test-uuid-3"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function-3"), - EventSourceArn: stringPtr("arn:aws:rds:us-east-1:123456789012:cluster:test-docdb-cluster"), - State: stringPtr("Enabled"), + UUID: new("test-uuid-3"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function-3"), + EventSourceArn: new("arn:aws:rds:us-east-1:123456789012:cluster:test-docdb-cluster"), + State: new("Enabled"), }, nil default: return nil, &types.ResourceNotFoundException{} } } -func stringPtr(s string) *string { - return &s -} - func TestLambdaEventSourceMappingAdapter(t *testing.T) { adapter := NewLambdaEventSourceMappingAdapter(&TestLambdaEventSourceMappingClient{}, "123456789012", "us-east-1", sdpcache.NewNoOpCache()) @@ -150,10 +146,10 @@ func TestLambdaEventSourceMappingItemMapper(t *testing.T) { // Test mapping with SQS event source awsItem := &types.EventSourceMappingConfiguration{ - UUID: stringPtr("test-uuid-1"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function"), - EventSourceArn: stringPtr("arn:aws:sqs:us-east-1:123456789012:test-queue"), - State: stringPtr("Enabled"), + UUID: new("test-uuid-1"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function"), + EventSourceArn: new("arn:aws:sqs:us-east-1:123456789012:test-queue"), + State: new("Enabled"), } item, err := adapter.ItemMapper("test-uuid-1", "123456789012.us-east-1", awsItem) @@ -213,10 +209,10 @@ func TestLambdaEventSourceMappingItemMapperWithDynamoDB(t *testing.T) { // Test mapping with DynamoDB event source awsItem := &types.EventSourceMappingConfiguration{ - UUID: stringPtr("test-uuid-2"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function-2"), - EventSourceArn: stringPtr("arn:aws:dynamodb:us-east-1:123456789012:table/test-table"), - State: stringPtr("Creating"), + UUID: new("test-uuid-2"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function-2"), + EventSourceArn: new("arn:aws:dynamodb:us-east-1:123456789012:table/test-table"), + State: new("Creating"), } item, err := adapter.ItemMapper("test-uuid-2", "123456789012.us-east-1", awsItem) @@ -246,10 +242,10 @@ func TestLambdaEventSourceMappingItemMapperWithRDS(t *testing.T) { // Test mapping with RDS/DocumentDB event source awsItem := &types.EventSourceMappingConfiguration{ - UUID: stringPtr("test-uuid-3"), - FunctionArn: stringPtr("arn:aws:lambda:us-east-1:123456789012:function:test-function-3"), - EventSourceArn: stringPtr("arn:aws:rds:us-east-1:123456789012:cluster:test-docdb-cluster"), - State: stringPtr("Enabled"), + UUID: new("test-uuid-3"), + FunctionArn: new("arn:aws:lambda:us-east-1:123456789012:function:test-function-3"), + EventSourceArn: new("arn:aws:rds:us-east-1:123456789012:cluster:test-docdb-cluster"), + State: new("Enabled"), } item, err := adapter.ItemMapper("test-uuid-3", "123456789012.us-east-1", awsItem) diff --git a/aws-source/adapters/lambda-function_test.go b/aws-source/adapters/lambda-function_test.go index 8a444c81..df130bcc 100644 --- a/aws-source/adapters/lambda-function_test.go +++ b/aws-source/adapters/lambda-function_test.go @@ -12,18 +12,18 @@ import ( ) var testFuncConfig = &types.FunctionConfiguration{ - FunctionName: PtrString("aws-controltower-NotificationForwarder"), - FunctionArn: PtrString("arn:aws:lambda:eu-west-2:052392120703:function:aws-controltower-NotificationForwarder"), + FunctionName: new("aws-controltower-NotificationForwarder"), + FunctionArn: new("arn:aws:lambda:eu-west-2:052392120703:function:aws-controltower-NotificationForwarder"), Runtime: types.RuntimePython39, - Role: PtrString("arn:aws:iam::052392120703:role/aws-controltower-ForwardSnsNotificationRole"), // link - Handler: PtrString("index.lambda_handler"), + Role: new("arn:aws:iam::052392120703:role/aws-controltower-ForwardSnsNotificationRole"), // link + Handler: new("index.lambda_handler"), CodeSize: 473, - Description: PtrString("SNS message forwarding function for aggregating account notifications."), - Timeout: PtrInt32(60), - MemorySize: PtrInt32(128), - LastModified: PtrString("2022-12-13T15:22:48.157+0000"), - CodeSha256: PtrString("3zU7iYiZektHRaog6qOFvv34ggadB56rd/UMjnYms6A="), - Version: PtrString("$LATEST"), + Description: new("SNS message forwarding function for aggregating account notifications."), + Timeout: new(int32(60)), + MemorySize: new(int32(128)), + LastModified: new("2022-12-13T15:22:48.157+0000"), + CodeSha256: new("3zU7iYiZektHRaog6qOFvv34ggadB56rd/UMjnYms6A="), + Version: new("$LATEST"), Environment: &types.EnvironmentResponse{ Variables: map[string]string{ "sns_arn": "arn:aws:sns:eu-west-2:347195421325:aws-controltower-AggregateSecurityNotifications", @@ -32,7 +32,7 @@ var testFuncConfig = &types.FunctionConfiguration{ TracingConfig: &types.TracingConfigResponse{ Mode: types.TracingModePassThrough, }, - RevisionId: PtrString("b00dd2e6-eec3-48b0-abf1-f84406e00a3e"), + RevisionId: new("b00dd2e6-eec3-48b0-abf1-f84406e00a3e"), State: types.StateActive, LastUpdateStatus: types.LastUpdateStatusSuccessful, PackageType: types.PackageTypeZip, @@ -40,47 +40,47 @@ var testFuncConfig = &types.FunctionConfiguration{ types.ArchitectureX8664, }, EphemeralStorage: &types.EphemeralStorage{ - Size: PtrInt32(512), + Size: new(int32(512)), }, DeadLetterConfig: &types.DeadLetterConfig{ - TargetArn: PtrString("arn:aws:sns:us-east-2:444455556666:MyTopic"), // links + TargetArn: new("arn:aws:sns:us-east-2:444455556666:MyTopic"), // links }, FileSystemConfigs: []types.FileSystemConfig{ { - Arn: PtrString("arn:aws:service:region:account:type/id"), // links - LocalMountPath: PtrString("/config"), + Arn: new("arn:aws:service:region:account:type/id"), // links + LocalMountPath: new("/config"), }, }, ImageConfigResponse: &types.ImageConfigResponse{ Error: &types.ImageConfigError{ - ErrorCode: PtrString("500"), - Message: PtrString("borked"), + ErrorCode: new("500"), + Message: new("borked"), }, ImageConfig: &types.ImageConfig{ Command: []string{"echo", "foo"}, EntryPoint: []string{"/bin"}, - WorkingDirectory: PtrString("/"), + WorkingDirectory: new("/"), }, }, - KMSKeyArn: PtrString("arn:aws:service:region:account:type/id"), // link - LastUpdateStatusReason: PtrString("reason"), + KMSKeyArn: new("arn:aws:service:region:account:type/id"), // link + LastUpdateStatusReason: new("reason"), LastUpdateStatusReasonCode: types.LastUpdateStatusReasonCodeDisabledKMSKey, Layers: []types.Layer{ { - Arn: PtrString("arn:aws:service:region:account:layer:name:version"), // link + Arn: new("arn:aws:service:region:account:layer:name:version"), // link CodeSize: 128, - SigningJobArn: PtrString("arn:aws:service:region:account:type/id"), // link - SigningProfileVersionArn: PtrString("arn:aws:service:region:account:type/id"), // link + SigningJobArn: new("arn:aws:service:region:account:type/id"), // link + SigningProfileVersionArn: new("arn:aws:service:region:account:type/id"), // link }, }, - MasterArn: PtrString("arn:aws:service:region:account:type/id"), // link - SigningJobArn: PtrString("arn:aws:service:region:account:type/id"), // link - SigningProfileVersionArn: PtrString("arn:aws:service:region:account:type/id"), // link + MasterArn: new("arn:aws:service:region:account:type/id"), // link + SigningJobArn: new("arn:aws:service:region:account:type/id"), // link + SigningProfileVersionArn: new("arn:aws:service:region:account:type/id"), // link SnapStart: &types.SnapStartResponse{ ApplyOn: types.SnapStartApplyOnPublishedVersions, OptimizationStatus: types.SnapStartOptimizationStatusOn, }, - StateReason: PtrString("reason"), + StateReason: new("reason"), StateReasonCode: types.StateReasonCodeCreating, VpcConfig: &types.VpcConfigResponse{ SecurityGroupIds: []string{ @@ -89,15 +89,15 @@ var testFuncConfig = &types.FunctionConfiguration{ SubnetIds: []string{ "id", // link }, - VpcId: PtrString("id"), // link + VpcId: new("id"), // link }, } var testFuncCode = &types.FunctionCodeLocation{ - RepositoryType: PtrString("S3"), - Location: PtrString("https://awslambda-eu-west-2-tasks.s3.eu-west-2.amazonaws.com/snapshots/052392120703/aws-controltower-NotificationForwarder-bcea303b-7721-4cf0-b8db-7a0e6dca76dd?versionId=3Lk06tjGEoY451GYYupIohtTV96CkVKC&X-Amz-Security-Token=IQoJb3JpZ2l&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Etc=etcetcetc"), // link - ImageUri: PtrString("https://foo"), // link - ResolvedImageUri: PtrString("https://foo"), // link + RepositoryType: new("S3"), + Location: new("https://awslambda-eu-west-2-tasks.s3.eu-west-2.amazonaws.com/snapshots/052392120703/aws-controltower-NotificationForwarder-bcea303b-7721-4cf0-b8db-7a0e6dca76dd?versionId=3Lk06tjGEoY451GYYupIohtTV96CkVKC&X-Amz-Security-Token=IQoJb3JpZ2l&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Etc=etcetcetc"), // link + ImageUri: new("https://foo"), // link + ResolvedImageUri: new("https://foo"), // link } func (t *TestLambdaClient) GetFunction(ctx context.Context, params *lambda.GetFunctionInput, optFns ...func(*lambda.Options)) (*lambda.GetFunctionOutput, error) { @@ -118,16 +118,16 @@ func (t *TestLambdaClient) ListFunctionEventInvokeConfigs(context.Context, *lamb { DestinationConfig: &types.DestinationConfig{ OnFailure: &types.OnFailure{ - Destination: PtrString("arn:aws:events:region:account:event-bus/event-bus-name"), // link + Destination: new("arn:aws:events:region:account:event-bus/event-bus-name"), // link }, OnSuccess: &types.OnSuccess{ - Destination: PtrString("arn:aws:events:region:account:event-bus/event-bus-name"), // link + Destination: new("arn:aws:events:region:account:event-bus/event-bus-name"), // link }, }, - FunctionArn: PtrString("arn:aws:service:region:account:type/id"), - LastModified: PtrTime(time.Now()), - MaximumEventAgeInSeconds: PtrInt32(10), - MaximumRetryAttempts: PtrInt32(20), + FunctionArn: new("arn:aws:service:region:account:type/id"), + LastModified: new(time.Now()), + MaximumEventAgeInSeconds: new(int32(10)), + MaximumRetryAttempts: new(int32(20)), }, }, }, nil @@ -138,17 +138,17 @@ func (t *TestLambdaClient) ListFunctionUrlConfigs(context.Context, *lambda.ListF FunctionUrlConfigs: []types.FunctionUrlConfig{ { AuthType: types.FunctionUrlAuthTypeNone, - CreationTime: PtrString("recently"), - FunctionArn: PtrString("arn:aws:service:region:account:type/id"), - FunctionUrl: PtrString("https://bar"), // link - LastModifiedTime: PtrString("recently"), + CreationTime: new("recently"), + FunctionArn: new("arn:aws:service:region:account:type/id"), + FunctionUrl: new("https://bar"), // link + LastModifiedTime: new("recently"), Cors: &types.Cors{ - AllowCredentials: PtrBool(true), + AllowCredentials: new(true), AllowHeaders: []string{"X-Forwarded-For"}, AllowMethods: []string{"GET"}, AllowOrigins: []string{"https://bar"}, ExposeHeaders: []string{"X-Authentication"}, - MaxAge: PtrInt32(10), + MaxAge: new(int32(10)), }, }, }, diff --git a/aws-source/adapters/lambda-layer-version.go b/aws-source/adapters/lambda-layer-version.go index a62e6d71..aa505049 100644 --- a/aws-source/adapters/lambda-layer-version.go +++ b/aws-source/adapters/lambda-layer-version.go @@ -28,7 +28,7 @@ func layerVersionGetInputMapper(scope, query string) *lambda.GetLayerVersionInpu return &lambda.GetLayerVersionInput{ LayerName: &name, - VersionNumber: PtrInt64(int64(versionInt)), + VersionNumber: new(int64(versionInt)), } } @@ -107,7 +107,7 @@ func NewLambdaLayerVersionAdapter(client LambdaClient, accountID string, region GetFunc: layerVersionGetFunc, ListInput: &lambda.ListLayerVersionsInput{}, AdapterMetadata: layerVersionAdapterMetadata, - cache: cache, + cache: cache, ListFuncOutputMapper: func(output *lambda.ListLayerVersionsOutput, input *lambda.ListLayerVersionsInput) ([]*lambda.GetLayerVersionInput, error) { return []*lambda.GetLayerVersionInput{}, nil }, diff --git a/aws-source/adapters/lambda-layer-version_test.go b/aws-source/adapters/lambda-layer-version_test.go index 15d1f253..ccf6785b 100644 --- a/aws-source/adapters/lambda-layer-version_test.go +++ b/aws-source/adapters/lambda-layer-version_test.go @@ -62,17 +62,17 @@ func (t *TestLambdaClient) GetLayerVersion(ctx context.Context, params *lambda.G types.RuntimeDotnet6, }, Content: &types.LayerVersionContentOutput{ - CodeSha256: PtrString("sha"), + CodeSha256: new("sha"), CodeSize: 100, - Location: PtrString("somewhere"), - SigningJobArn: PtrString("arn:aws:service:region:account:type/id"), - SigningProfileVersionArn: PtrString("arn:aws:service:region:account:type/id"), + Location: new("somewhere"), + SigningJobArn: new("arn:aws:service:region:account:type/id"), + SigningProfileVersionArn: new("arn:aws:service:region:account:type/id"), }, - CreatedDate: PtrString("YYYY-MM-DDThh:mm:ss.sTZD"), - Description: PtrString("description"), - LayerArn: PtrString("arn:aws:service:region:account:type/id"), - LayerVersionArn: PtrString("arn:aws:service:region:account:type/id"), - LicenseInfo: PtrString("info"), + CreatedDate: new("YYYY-MM-DDThh:mm:ss.sTZD"), + Description: new("description"), + LayerArn: new("arn:aws:service:region:account:type/id"), + LayerVersionArn: new("arn:aws:service:region:account:type/id"), + LicenseInfo: new("info"), Version: *params.VersionNumber, }, nil } @@ -83,8 +83,8 @@ func (t *TestLambdaClient) ListLayerVersions(context.Context, *lambda.ListLayerV func TestLayerVersionGetFunc(t *testing.T) { item, err := layerVersionGetFunc(context.Background(), &TestLambdaClient{}, "foo", &lambda.GetLayerVersionInput{ - LayerName: PtrString("layer"), - VersionNumber: PtrInt64(999), + LayerName: new("layer"), + VersionNumber: new(int64(999)), }) if err != nil { diff --git a/aws-source/adapters/lambda-layer_test.go b/aws-source/adapters/lambda-layer_test.go index d402bdd1..0bf3d3db 100644 --- a/aws-source/adapters/lambda-layer_test.go +++ b/aws-source/adapters/lambda-layer_test.go @@ -19,14 +19,14 @@ func TestLayerItemMapper(t *testing.T) { CompatibleRuntimes: []types.Runtime{ types.RuntimeJava11, }, - CreatedDate: PtrString("2018-11-27T15:10:45.123+0000"), - Description: PtrString("description"), - LayerVersionArn: PtrString("arn:aws:service:region:account:type/id"), - LicenseInfo: PtrString("info"), + CreatedDate: new("2018-11-27T15:10:45.123+0000"), + Description: new("description"), + LayerVersionArn: new("arn:aws:service:region:account:type/id"), + LicenseInfo: new("info"), Version: 10, }, - LayerArn: PtrString("arn:aws:service:region:account:type/id"), - LayerName: PtrString("name"), + LayerArn: new("arn:aws:service:region:account:type/id"), + LayerName: new("name"), } item, err := layerItemMapper("", "foo", &layer) diff --git a/aws-source/adapters/lambda.go b/aws-source/adapters/lambda.go index 6d9ed511..30fe81a9 100644 --- a/aws-source/adapters/lambda.go +++ b/aws-source/adapters/lambda.go @@ -30,8 +30,8 @@ type PolicyDocument struct { // PolicyStatement defines a statement in a policy document. type PolicyStatement struct { Action string - Principal Principal `json:",omitempty"` - Condition Condition `json:",omitempty"` + Principal Principal + Condition Condition } type Principal struct { @@ -39,8 +39,8 @@ type Principal struct { } type Condition struct { - ArnLike ArnLikeCondition `json:",omitempty"` - StringEquals StringEqualsCondition `json:",omitempty"` + ArnLike ArnLikeCondition + StringEquals StringEqualsCondition } type StringEqualsCondition struct { diff --git a/aws-source/adapters/network-firewall-firewall-policy_test.go b/aws-source/adapters/network-firewall-firewall-policy_test.go index 092aab28..89e4a01a 100644 --- a/aws-source/adapters/network-firewall-firewall-policy_test.go +++ b/aws-source/adapters/network-firewall-firewall-policy_test.go @@ -13,23 +13,23 @@ func (c testNetworkFirewallClient) DescribeFirewallPolicy(ctx context.Context, p now := time.Now() return &networkfirewall.DescribeFirewallPolicyOutput{ FirewallPolicyResponse: &types.FirewallPolicyResponse{ - FirewallPolicyArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), - FirewallPolicyId: PtrString("test"), - FirewallPolicyName: PtrString("test"), - ConsumedStatefulRuleCapacity: PtrInt32(1), - ConsumedStatelessRuleCapacity: PtrInt32(1), - Description: PtrString("test"), + FirewallPolicyArn: new("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), + FirewallPolicyId: new("test"), + FirewallPolicyName: new("test"), + ConsumedStatefulRuleCapacity: new(int32(1)), + ConsumedStatelessRuleCapacity: new(int32(1)), + Description: new("test"), EncryptionConfiguration: &types.EncryptionConfiguration{ Type: types.EncryptionTypeAwsOwnedKmsKey, - KeyId: PtrString("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) + KeyId: new("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) }, FirewallPolicyStatus: types.ResourceStatusActive, // health LastModifiedTime: &now, - NumberOfAssociations: PtrInt32(1), + NumberOfAssociations: new(int32(1)), Tags: []types.Tag{ { - Key: PtrString("test"), - Value: PtrString("test"), + Key: new("test"), + Value: new("test"), }, }, }, @@ -50,11 +50,11 @@ func (c testNetworkFirewallClient) DescribeFirewallPolicy(ctx context.Context, p }, StatefulRuleGroupReferences: []types.StatefulRuleGroupReference{ { - ResourceArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/aws-network-firewall-DefaultStatefulRuleGroup-1J3Z3W2ZQXV3"), // link + ResourceArn: new("arn:aws:network-firewall:us-east-1:123456789012:stateful-rulegroup/aws-network-firewall-DefaultStatefulRuleGroup-1J3Z3W2ZQXV3"), // link Override: &types.StatefulRuleGroupOverride{ Action: types.OverrideActionDropToAlert, }, - Priority: PtrInt32(1), + Priority: new(int32(1)), }, }, StatelessCustomActions: []types.CustomAction{ @@ -64,16 +64,16 @@ func (c testNetworkFirewallClient) DescribeFirewallPolicy(ctx context.Context, p Dimensions: []types.Dimension{}, }, }, - ActionName: PtrString("test"), + ActionName: new("test"), }, }, StatelessRuleGroupReferences: []types.StatelessRuleGroupReference{ { - Priority: PtrInt32(1), - ResourceArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), // link + Priority: new(int32(1)), + ResourceArn: new("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), // link }, }, - TLSInspectionConfigurationArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:tls-inspection-configuration/aws-network-firewall-DefaultTlsInspectionConfiguration-1J3Z3W2ZQXV3"), // link + TLSInspectionConfigurationArn: new("arn:aws:network-firewall:us-east-1:123456789012:tls-inspection-configuration/aws-network-firewall-DefaultTlsInspectionConfiguration-1J3Z3W2ZQXV3"), // link }, }, nil } @@ -82,7 +82,7 @@ func (c testNetworkFirewallClient) ListFirewallPolicies(context.Context, *networ return &networkfirewall.ListFirewallPoliciesOutput{ FirewallPolicies: []types.FirewallPolicyMetadata{ { - Arn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), + Arn: new("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), }, }, }, nil diff --git a/aws-source/adapters/network-firewall-firewall_test.go b/aws-source/adapters/network-firewall-firewall_test.go index 1092e319..8506ecfa 100644 --- a/aws-source/adapters/network-firewall-firewall_test.go +++ b/aws-source/adapters/network-firewall-firewall_test.go @@ -13,29 +13,29 @@ import ( func (c testNetworkFirewallClient) DescribeFirewall(ctx context.Context, params *networkfirewall.DescribeFirewallInput, optFns ...func(*networkfirewall.Options)) (*networkfirewall.DescribeFirewallOutput, error) { return &networkfirewall.DescribeFirewallOutput{ Firewall: &types.Firewall{ - FirewallId: PtrString("test"), - FirewallPolicyArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), // link + FirewallId: new("test"), + FirewallPolicyArn: new("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), // link SubnetMappings: []types.SubnetMapping{ { - SubnetId: PtrString("subnet-12345678901234567"), // link + SubnetId: new("subnet-12345678901234567"), // link IPAddressType: types.IPAddressTypeIpv4, }, }, - VpcId: PtrString("vpc-12345678901234567"), // link + VpcId: new("vpc-12345678901234567"), // link DeleteProtection: false, - Description: PtrString("test"), + Description: new("test"), EncryptionConfiguration: &types.EncryptionConfiguration{ Type: types.EncryptionTypeAwsOwnedKmsKey, - KeyId: PtrString("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) + KeyId: new("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) }, - FirewallArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), - FirewallName: PtrString("test"), + FirewallArn: new("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), + FirewallName: new("test"), FirewallPolicyChangeProtection: false, SubnetChangeProtection: false, Tags: []types.Tag{ { - Key: PtrString("test"), - Value: PtrString("test"), + Key: new("test"), + Value: new("test"), }, }, }, @@ -44,22 +44,22 @@ func (c testNetworkFirewallClient) DescribeFirewall(ctx context.Context, params Status: types.FirewallStatusValueDeleting, CapacityUsageSummary: &types.CapacityUsageSummary{ CIDRs: &types.CIDRSummary{ - AvailableCIDRCount: PtrInt32(1), + AvailableCIDRCount: new(int32(1)), IPSetReferences: map[string]types.IPSetMetadata{ "test": { - ResolvedCIDRCount: PtrInt32(1), + ResolvedCIDRCount: new(int32(1)), }, }, - UtilizedCIDRCount: PtrInt32(1), + UtilizedCIDRCount: new(int32(1)), }, }, SyncStates: map[string]types.SyncState{ "test": { Attachment: &types.Attachment{ - EndpointId: PtrString("test"), + EndpointId: new("test"), Status: types.AttachmentStatusCreating, - StatusMessage: PtrString("test"), - SubnetId: PtrString("test"), // link, + StatusMessage: new("test"), + SubnetId: new("test"), // link, }, }, }, @@ -69,7 +69,7 @@ func (c testNetworkFirewallClient) DescribeFirewall(ctx context.Context, params func (c testNetworkFirewallClient) DescribeLoggingConfiguration(ctx context.Context, params *networkfirewall.DescribeLoggingConfigurationInput, optFns ...func(*networkfirewall.Options)) (*networkfirewall.DescribeLoggingConfigurationOutput, error) { return &networkfirewall.DescribeLoggingConfigurationOutput{ - FirewallArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), + FirewallArn: new("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), LoggingConfiguration: &types.LoggingConfiguration{ LogDestinationConfigs: []types.LogDestinationConfig{ { @@ -101,7 +101,7 @@ func (c testNetworkFirewallClient) DescribeLoggingConfiguration(ctx context.Cont func (c testNetworkFirewallClient) DescribeResourcePolicy(ctx context.Context, params *networkfirewall.DescribeResourcePolicyInput, optFns ...func(*networkfirewall.Options)) (*networkfirewall.DescribeResourcePolicyOutput, error) { return &networkfirewall.DescribeResourcePolicyOutput{ - Policy: PtrString("test"), // link + Policy: new("test"), // link }, nil } @@ -109,7 +109,7 @@ func (c testNetworkFirewallClient) ListFirewalls(context.Context, *networkfirewa return &networkfirewall.ListFirewallsOutput{ Firewalls: []types.FirewallMetadata{ { - FirewallArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), + FirewallArn: new("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), }, }, }, nil diff --git a/aws-source/adapters/network-firewall-rule-group_test.go b/aws-source/adapters/network-firewall-rule-group_test.go index c5dd65a6..afd83434 100644 --- a/aws-source/adapters/network-firewall-rule-group_test.go +++ b/aws-source/adapters/network-firewall-rule-group_test.go @@ -14,37 +14,37 @@ func (c testNetworkFirewallClient) DescribeRuleGroup(ctx context.Context, params return &networkfirewall.DescribeRuleGroupOutput{ RuleGroupResponse: &types.RuleGroupResponse{ - RuleGroupArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), - RuleGroupId: PtrString("test"), - RuleGroupName: PtrString("test"), + RuleGroupArn: new("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), + RuleGroupId: new("test"), + RuleGroupName: new("test"), AnalysisResults: []types.AnalysisResult{ { - AnalysisDetail: PtrString("test"), + AnalysisDetail: new("test"), IdentifiedRuleIds: []string{ "test", }, IdentifiedType: types.IdentifiedTypeStatelessRuleContainsTcpFlags, }, }, - Capacity: PtrInt32(1), - ConsumedCapacity: PtrInt32(1), - Description: PtrString("test"), + Capacity: new(int32(1)), + ConsumedCapacity: new(int32(1)), + Description: new("test"), EncryptionConfiguration: &types.EncryptionConfiguration{ Type: types.EncryptionTypeAwsOwnedKmsKey, - KeyId: PtrString("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) + KeyId: new("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) }, LastModifiedTime: &now, - NumberOfAssociations: PtrInt32(1), - RuleGroupStatus: types.ResourceStatusActive, // health - SnsTopic: PtrString("arn:aws:sns:us-east-1:123456789012:aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), // link + NumberOfAssociations: new(int32(1)), + RuleGroupStatus: types.ResourceStatusActive, // health + SnsTopic: new("arn:aws:sns:us-east-1:123456789012:aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), // link SourceMetadata: &types.SourceMetadata{ - SourceArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), // link - SourceUpdateToken: PtrString("test"), + SourceArn: new("arn:aws:network-firewall:us-east-1:123456789012:firewall/aws-network-firewall-DefaultFirewall-1J3Z3W2ZQXV3"), // link + SourceUpdateToken: new("test"), }, Tags: []types.Tag{ { - Key: PtrString("test"), - Value: PtrString("test"), + Key: new("test"), + Value: new("test"), }, }, Type: types.RuleGroupTypeStateless, @@ -60,24 +60,24 @@ func (c testNetworkFirewallClient) DescribeRuleGroup(ctx context.Context, params "foo.bar.com", // link }, }, - RulesString: PtrString("test"), + RulesString: new("test"), StatefulRules: []types.StatefulRule{ { Action: types.StatefulActionAlert, Header: &types.Header{ - Destination: PtrString("1.1.1.1"), - DestinationPort: PtrString("8080"), + Destination: new("1.1.1.1"), + DestinationPort: new("8080"), Direction: types.StatefulRuleDirectionForward, Protocol: types.StatefulRuleProtocolDcerpc, - Source: PtrString("test"), - SourcePort: PtrString("8080"), + Source: new("test"), + SourcePort: new("8080"), }, }, }, StatelessRulesAndCustomActions: &types.StatelessRulesAndCustomActions{ StatelessRules: []types.StatelessRule{ { - Priority: PtrInt32(1), + Priority: new(int32(1)), RuleDefinition: &types.RuleDefinition{ Actions: []string{}, MatchAttributes: &types.MatchAttributes{ @@ -89,7 +89,7 @@ func (c testNetworkFirewallClient) DescribeRuleGroup(ctx context.Context, params }, Destinations: []types.Address{ { - AddressDefinition: PtrString("1.1.1.1/1"), + AddressDefinition: new("1.1.1.1/1"), }, }, Protocols: []int32{1}, @@ -120,12 +120,12 @@ func (c testNetworkFirewallClient) DescribeRuleGroup(ctx context.Context, params PublishMetricAction: &types.PublishMetricAction{ Dimensions: []types.Dimension{ { - Value: PtrString("test"), + Value: new("test"), }, }, }, }, - ActionName: PtrString("test"), + ActionName: new("test"), }, }, }, @@ -138,7 +138,7 @@ func (c testNetworkFirewallClient) ListRuleGroups(ctx context.Context, params *n return &networkfirewall.ListRuleGroupsOutput{ RuleGroups: []types.RuleGroupMetadata{ { - Arn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), + Arn: new("arn:aws:network-firewall:us-east-1:123456789012:stateless-rulegroup/aws-network-firewall-DefaultStatelessRuleGroup-1J3Z3W2ZQXV3"), }, }, }, nil diff --git a/aws-source/adapters/network-firewall-tls-inspection-configuration_test.go b/aws-source/adapters/network-firewall-tls-inspection-configuration_test.go index 1ff37cdb..4b2333dd 100644 --- a/aws-source/adapters/network-firewall-tls-inspection-configuration_test.go +++ b/aws-source/adapters/network-firewall-tls-inspection-configuration_test.go @@ -13,42 +13,42 @@ func (c testNetworkFirewallClient) DescribeTLSInspectionConfiguration(ctx contex now := time.Now() return &networkfirewall.DescribeTLSInspectionConfigurationOutput{ TLSInspectionConfigurationResponse: &types.TLSInspectionConfigurationResponse{ - TLSInspectionConfigurationArn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:tls-inspection-configuration/aws-network-firewall-DefaultTLSInspectionConfiguration-1J3Z3W2ZQXV3"), - TLSInspectionConfigurationId: PtrString("test"), - TLSInspectionConfigurationName: PtrString("test"), + TLSInspectionConfigurationArn: new("arn:aws:network-firewall:us-east-1:123456789012:tls-inspection-configuration/aws-network-firewall-DefaultTLSInspectionConfiguration-1J3Z3W2ZQXV3"), + TLSInspectionConfigurationId: new("test"), + TLSInspectionConfigurationName: new("test"), CertificateAuthority: &types.TlsCertificateData{ - CertificateArn: PtrString("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"), // link - CertificateSerial: PtrString("test"), - Status: PtrString("OK"), - StatusMessage: PtrString("test"), + CertificateArn: new("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"), // link + CertificateSerial: new("test"), + Status: new("OK"), + StatusMessage: new("test"), }, Certificates: []types.TlsCertificateData{ { - CertificateArn: PtrString("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"), // link - CertificateSerial: PtrString("test"), - Status: PtrString("OK"), - StatusMessage: PtrString("test"), + CertificateArn: new("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"), // link + CertificateSerial: new("test"), + Status: new("OK"), + StatusMessage: new("test"), }, }, - Description: PtrString("test"), + Description: new("test"), EncryptionConfiguration: &types.EncryptionConfiguration{ Type: types.EncryptionTypeAwsOwnedKmsKey, - KeyId: PtrString("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) + KeyId: new("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"), // link (this can be an ARN or ID) }, LastModifiedTime: &now, - NumberOfAssociations: PtrInt32(1), + NumberOfAssociations: new(int32(1)), TLSInspectionConfigurationStatus: types.ResourceStatusActive, // health Tags: []types.Tag{ { - Key: PtrString("test"), - Value: PtrString("test"), + Key: new("test"), + Value: new("test"), }, }, }, TLSInspectionConfiguration: &types.TLSInspectionConfiguration{ ServerCertificateConfigurations: []types.ServerCertificateConfiguration{ { - CertificateAuthorityArn: PtrString("arn:aws:acm:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012"), // link + CertificateAuthorityArn: new("arn:aws:acm:us-east-1:123456789012:certificate-authority/12345678-1234-1234-1234-123456789012"), // link CheckCertificateRevocationStatus: &types.CheckCertificateRevocationStatusActions{ RevokedStatusAction: types.RevocationCheckActionPass, UnknownStatusAction: types.RevocationCheckActionPass, @@ -63,7 +63,7 @@ func (c testNetworkFirewallClient) DescribeTLSInspectionConfiguration(ctx contex }, Destinations: []types.Address{ { - AddressDefinition: PtrString("test"), + AddressDefinition: new("test"), }, }, Protocols: []int32{1}, @@ -75,14 +75,14 @@ func (c testNetworkFirewallClient) DescribeTLSInspectionConfiguration(ctx contex }, Sources: []types.Address{ { - AddressDefinition: PtrString("test"), + AddressDefinition: new("test"), }, }, }, }, ServerCertificates: []types.ServerCertificate{ { - ResourceArn: PtrString("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"), // link + ResourceArn: new("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"), // link }, }, }, @@ -95,7 +95,7 @@ func (c testNetworkFirewallClient) ListTLSInspectionConfigurations(ctx context.C return &networkfirewall.ListTLSInspectionConfigurationsOutput{ TLSInspectionConfigurations: []types.TLSInspectionConfigurationMetadata{ { - Arn: PtrString("arn:aws:network-firewall:us-east-1:123456789012:tls-inspection-configuration/aws-network-firewall-DefaultTLSInspectionConfiguration-1J3Z3W2ZQXV3"), + Arn: new("arn:aws:network-firewall:us-east-1:123456789012:tls-inspection-configuration/aws-network-firewall-DefaultTLSInspectionConfiguration-1J3Z3W2ZQXV3"), }, }, }, nil diff --git a/aws-source/adapters/networkmanager-connect-attachment_test.go b/aws-source/adapters/networkmanager-connect-attachment_test.go index 7e9479fe..b136dfc5 100644 --- a/aws-source/adapters/networkmanager-connect-attachment_test.go +++ b/aws-source/adapters/networkmanager-connect-attachment_test.go @@ -12,9 +12,9 @@ func TestConnectAttachmentItemMapper(t *testing.T) { scope := "123456789012.eu-west-2" item, err := connectAttachmentItemMapper("", scope, &types.ConnectAttachment{ Attachment: &types.Attachment{ - AttachmentId: PtrString("att-1"), - CoreNetworkId: PtrString("cn-1"), - CoreNetworkArn: PtrString("arn:aws:networkmanager:eu-west-2:123456789012:core-network/cn-1"), + AttachmentId: new("att-1"), + CoreNetworkId: new("cn-1"), + CoreNetworkArn: new("arn:aws:networkmanager:eu-west-2:123456789012:core-network/cn-1"), }, }) if err != nil { diff --git a/aws-source/adapters/networkmanager-connect-peer-association_test.go b/aws-source/adapters/networkmanager-connect-peer-association_test.go index 501a4c56..dfa333e3 100644 --- a/aws-source/adapters/networkmanager-connect-peer-association_test.go +++ b/aws-source/adapters/networkmanager-connect-peer-association_test.go @@ -14,10 +14,10 @@ func TestConnectPeerAssociationsOutputMapper(t *testing.T) { output := networkmanager.GetConnectPeerAssociationsOutput{ ConnectPeerAssociations: []types.ConnectPeerAssociation{ { - ConnectPeerId: PtrString("cp-1"), - DeviceId: PtrString("dvc-1"), - GlobalNetworkId: PtrString("default"), - LinkId: PtrString("link-1"), + ConnectPeerId: new("cp-1"), + DeviceId: new("dvc-1"), + GlobalNetworkId: new("default"), + LinkId: new("link-1"), }, }, } diff --git a/aws-source/adapters/networkmanager-connect-peer_test.go b/aws-source/adapters/networkmanager-connect-peer_test.go index c653214d..7f11e080 100644 --- a/aws-source/adapters/networkmanager-connect-peer_test.go +++ b/aws-source/adapters/networkmanager-connect-peer_test.go @@ -16,21 +16,21 @@ func (n NetworkManagerTestClient) GetConnectPeer(ctx context.Context, params *ne Configuration: &types.ConnectPeerConfiguration{ BgpConfigurations: []types.ConnectPeerBgpConfiguration{ { - CoreNetworkAddress: PtrString("1.4.2.4"), // link - CoreNetworkAsn: PtrInt64(64512), // link - PeerAddress: PtrString("123.123.123.123"), // link - PeerAsn: PtrInt64(64513), // link + CoreNetworkAddress: new("1.4.2.4"), // link + CoreNetworkAsn: new(int64(64512)), // link + PeerAddress: new("123.123.123.123"), // link + PeerAsn: new(int64(64513)), // link }, }, - CoreNetworkAddress: PtrString("1.1.1.3"), // link - PeerAddress: PtrString("1.1.1.45"), // link + CoreNetworkAddress: new("1.1.1.3"), // link + PeerAddress: new("1.1.1.45"), // link }, - ConnectAttachmentId: PtrString("ca-1"), // link - ConnectPeerId: PtrString("cp-1"), - CoreNetworkId: PtrString("cn-1"), // link - EdgeLocation: PtrString("us-west-2"), + ConnectAttachmentId: new("ca-1"), // link + ConnectPeerId: new("cp-1"), + CoreNetworkId: new("cn-1"), // link + EdgeLocation: new("us-west-2"), State: types.ConnectPeerStateAvailable, - SubnetArn: PtrString("arn:aws:ec2:us-west-2:123456789012:subnet/subnet-1"), // link + SubnetArn: new("arn:aws:ec2:us-west-2:123456789012:subnet/subnet-1"), // link }, }, nil } diff --git a/aws-source/adapters/networkmanager-connection_test.go b/aws-source/adapters/networkmanager-connection_test.go index a2cf5f1d..957ff9fb 100644 --- a/aws-source/adapters/networkmanager-connection_test.go +++ b/aws-source/adapters/networkmanager-connection_test.go @@ -15,12 +15,12 @@ func TestConnectionOutputMapper(t *testing.T) { output := networkmanager.GetConnectionsOutput{ Connections: []types.Connection{ { - GlobalNetworkId: PtrString("default"), - ConnectionId: PtrString("conn-1"), - DeviceId: PtrString("dvc-1"), - ConnectedDeviceId: PtrString("dvc-2"), - LinkId: PtrString("link-1"), - ConnectedLinkId: PtrString("link-2"), + GlobalNetworkId: new("default"), + ConnectionId: new("conn-1"), + DeviceId: new("dvc-1"), + ConnectedDeviceId: new("dvc-2"), + LinkId: new("link-1"), + ConnectedLinkId: new("link-2"), }, }, } @@ -103,7 +103,7 @@ func TestConnectionInputMapperSearch(t *testing.T) { name: "Valid networkmanager-connection ARN", query: "arn:aws:networkmanager::123456789012:device/global-network-0d47f6t230mz46dy4/connection-07f6fd08867abc123", expectedInput: &networkmanager.GetConnectionsInput{ - GlobalNetworkId: PtrString("global-network-0d47f6t230mz46dy4"), + GlobalNetworkId: new("global-network-0d47f6t230mz46dy4"), ConnectionIds: []string{"connection-07f6fd08867abc123"}, }, expectError: false, @@ -112,8 +112,8 @@ func TestConnectionInputMapperSearch(t *testing.T) { name: "Valid networkmanager-device ARN", query: "arn:aws:networkmanager::123456789012:device/global-network-01231231231231231/device-07f6fd08867abc123", expectedInput: &networkmanager.GetConnectionsInput{ - GlobalNetworkId: PtrString("global-network-01231231231231231"), - DeviceId: PtrString("device-07f6fd08867abc123"), + GlobalNetworkId: new("global-network-01231231231231231"), + DeviceId: new("device-07f6fd08867abc123"), }, expectError: false, }, @@ -121,7 +121,7 @@ func TestConnectionInputMapperSearch(t *testing.T) { name: "Global Network ID only", query: "global-network-123456789", expectedInput: &networkmanager.GetConnectionsInput{ - GlobalNetworkId: PtrString("global-network-123456789"), + GlobalNetworkId: new("global-network-123456789"), }, expectError: false, }, @@ -129,8 +129,8 @@ func TestConnectionInputMapperSearch(t *testing.T) { name: "Global Network ID and Device ID", query: "global-network-123456789|device-987654321", expectedInput: &networkmanager.GetConnectionsInput{ - GlobalNetworkId: PtrString("global-network-123456789"), - DeviceId: PtrString("device-987654321"), + GlobalNetworkId: new("global-network-123456789"), + DeviceId: new("device-987654321"), }, expectError: false, }, diff --git a/aws-source/adapters/networkmanager-core-network-policy_test.go b/aws-source/adapters/networkmanager-core-network-policy_test.go index b0bd39c5..0307808b 100644 --- a/aws-source/adapters/networkmanager-core-network-policy_test.go +++ b/aws-source/adapters/networkmanager-core-network-policy_test.go @@ -11,8 +11,8 @@ func TestCoreNetworkPolicyItemMapper(t *testing.T) { scope := "123456789012.eu-west-2" item, err := coreNetworkPolicyItemMapper("", scope, &types.CoreNetworkPolicy{ - CoreNetworkId: PtrString("cn-1"), - PolicyVersionId: PtrInt32(1), + CoreNetworkId: new("cn-1"), + PolicyVersionId: new(int32(1)), }) if err != nil { t.Error(err) diff --git a/aws-source/adapters/networkmanager-core-network_test.go b/aws-source/adapters/networkmanager-core-network_test.go index 9283b49d..aa43722a 100644 --- a/aws-source/adapters/networkmanager-core-network_test.go +++ b/aws-source/adapters/networkmanager-core-network_test.go @@ -13,21 +13,21 @@ import ( func (n NetworkManagerTestClient) GetCoreNetwork(ctx context.Context, params *networkmanager.GetCoreNetworkInput, optFns ...func(*networkmanager.Options)) (*networkmanager.GetCoreNetworkOutput, error) { return &networkmanager.GetCoreNetworkOutput{ CoreNetwork: &types.CoreNetwork{ - CoreNetworkArn: PtrString("arn:aws:networkmanager:us-west-2:123456789012:core-network/cn-1"), - CoreNetworkId: PtrString("cn-1"), - GlobalNetworkId: PtrString("default"), - Description: PtrString("core network description"), + CoreNetworkArn: new("arn:aws:networkmanager:us-west-2:123456789012:core-network/cn-1"), + CoreNetworkId: new("cn-1"), + GlobalNetworkId: new("default"), + Description: new("core network description"), State: types.CoreNetworkStateAvailable, Edges: []types.CoreNetworkEdge{ { - Asn: PtrInt64(64512), // link - EdgeLocation: PtrString("us-west-2"), + Asn: new(int64(64512)), // link + EdgeLocation: new("us-west-2"), }, }, Segments: []types.CoreNetworkSegment{ { EdgeLocations: []string{"us-west-2"}, - Name: PtrString("segment-1"), + Name: new("segment-1"), }, }, }, diff --git a/aws-source/adapters/networkmanager-device_test.go b/aws-source/adapters/networkmanager-device_test.go index 190278c3..7c800872 100644 --- a/aws-source/adapters/networkmanager-device_test.go +++ b/aws-source/adapters/networkmanager-device_test.go @@ -15,10 +15,10 @@ func TestDeviceOutputMapper(t *testing.T) { output := networkmanager.GetDevicesOutput{ Devices: []types.Device{ { - DeviceId: PtrString("dvc-1"), - GlobalNetworkId: PtrString("default"), - SiteId: PtrString("site-1"), - DeviceArn: PtrString("arn:aws:networkmanager:us-west-2:123456789012:device/dvc-1"), + DeviceId: new("dvc-1"), + GlobalNetworkId: new("default"), + SiteId: new("site-1"), + DeviceArn: new("arn:aws:networkmanager:us-west-2:123456789012:device/dvc-1"), }, }, } @@ -101,7 +101,7 @@ func TestDeviceInputMapperSearch(t *testing.T) { name: "Valid networkmanager-device ARN", query: "arn:aws:networkmanager::123456789012:device/global-network-01231231231231231/device-07f6fd08867abc123", expectedInput: &networkmanager.GetDevicesInput{ - GlobalNetworkId: PtrString("global-network-01231231231231231"), + GlobalNetworkId: new("global-network-01231231231231231"), DeviceIds: []string{"device-07f6fd08867abc123"}, }, expectError: false, @@ -110,7 +110,7 @@ func TestDeviceInputMapperSearch(t *testing.T) { name: "Global Network ID only", query: "global-network-123456789", expectedInput: &networkmanager.GetDevicesInput{ - GlobalNetworkId: PtrString("global-network-123456789"), + GlobalNetworkId: new("global-network-123456789"), }, expectError: false, }, @@ -118,8 +118,8 @@ func TestDeviceInputMapperSearch(t *testing.T) { name: "Global Network ID and Site ID", query: "global-network-123456789|site-987654321", expectedInput: &networkmanager.GetDevicesInput{ - GlobalNetworkId: PtrString("global-network-123456789"), - SiteId: PtrString("site-987654321"), + GlobalNetworkId: new("global-network-123456789"), + SiteId: new("site-987654321"), }, expectError: false, }, diff --git a/aws-source/adapters/networkmanager-global-network_test.go b/aws-source/adapters/networkmanager-global-network_test.go index 8407c7b7..795e4f9b 100644 --- a/aws-source/adapters/networkmanager-global-network_test.go +++ b/aws-source/adapters/networkmanager-global-network_test.go @@ -14,8 +14,8 @@ func TestGlobalNetworkOutputMapper(t *testing.T) { output := networkmanager.DescribeGlobalNetworksOutput{ GlobalNetworks: []types.GlobalNetwork{ { - GlobalNetworkArn: PtrString("arn:aws:networkmanager:eu-west-2:052392120703:networkmanager/global-network/default"), - GlobalNetworkId: PtrString("default"), + GlobalNetworkArn: new("arn:aws:networkmanager:eu-west-2:052392120703:networkmanager/global-network/default"), + GlobalNetworkId: new("default"), }, }, } diff --git a/aws-source/adapters/networkmanager-link-association_test.go b/aws-source/adapters/networkmanager-link-association_test.go index cb74c060..7f9c21d6 100644 --- a/aws-source/adapters/networkmanager-link-association_test.go +++ b/aws-source/adapters/networkmanager-link-association_test.go @@ -14,9 +14,9 @@ func TestLinkAssociationOutputMapper(t *testing.T) { output := networkmanager.GetLinkAssociationsOutput{ LinkAssociations: []types.LinkAssociation{ { - LinkId: PtrString("link-1"), - GlobalNetworkId: PtrString("default"), - DeviceId: PtrString("dvc-1"), + LinkId: new("link-1"), + GlobalNetworkId: new("default"), + DeviceId: new("dvc-1"), }, }, } diff --git a/aws-source/adapters/networkmanager-link_test.go b/aws-source/adapters/networkmanager-link_test.go index 363d6838..c9b21afd 100644 --- a/aws-source/adapters/networkmanager-link_test.go +++ b/aws-source/adapters/networkmanager-link_test.go @@ -15,10 +15,10 @@ func TestLinkOutputMapper(t *testing.T) { output := networkmanager.GetLinksOutput{ Links: []types.Link{ { - LinkId: PtrString("link-1"), - GlobalNetworkId: PtrString("default"), - SiteId: PtrString("site-1"), - LinkArn: PtrString("arn:aws:networkmanager:us-west-2:123456789012:link/link-1"), + LinkId: new("link-1"), + GlobalNetworkId: new("default"), + SiteId: new("site-1"), + LinkArn: new("arn:aws:networkmanager:us-west-2:123456789012:link/link-1"), }, }, } @@ -95,7 +95,7 @@ func TestLinkInputMapperSearch(t *testing.T) { name: "Valid networkmanager-link ARN", query: "arn:aws:networkmanager::123456789012:link/global-network-01231231231231231/link-11112222aaaabbbb1", expectedInput: &networkmanager.GetLinksInput{ - GlobalNetworkId: PtrString("global-network-01231231231231231"), + GlobalNetworkId: new("global-network-01231231231231231"), LinkIds: []string{"link-11112222aaaabbbb1"}, }, expectError: false, @@ -104,7 +104,7 @@ func TestLinkInputMapperSearch(t *testing.T) { name: "Global Network ID only", query: "global-network-123456789", expectedInput: &networkmanager.GetLinksInput{ - GlobalNetworkId: PtrString("global-network-123456789"), + GlobalNetworkId: new("global-network-123456789"), }, expectError: false, }, @@ -112,8 +112,8 @@ func TestLinkInputMapperSearch(t *testing.T) { name: "Global Network ID and Site ID", query: "global-network-123456789|site-987654321", expectedInput: &networkmanager.GetLinksInput{ - GlobalNetworkId: PtrString("global-network-123456789"), - SiteId: PtrString("site-987654321"), + GlobalNetworkId: new("global-network-123456789"), + SiteId: new("site-987654321"), }, expectError: false, }, diff --git a/aws-source/adapters/networkmanager-network-resource-relationship.go b/aws-source/adapters/networkmanager-network-resource-relationship.go index cd6019cb..65e41a4d 100644 --- a/aws-source/adapters/networkmanager-network-resource-relationship.go +++ b/aws-source/adapters/networkmanager-network-resource-relationship.go @@ -43,7 +43,7 @@ func networkResourceRelationshipOutputMapper(_ context.Context, _ *networkmanage hasher.Write([]byte(toArn.String())) sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - attrs, err := sdp.ToAttributes(map[string]interface{}{ + attrs, err := sdp.ToAttributes(map[string]any{ "Hash": sha, "From": fromArn.String(), "To": toArn.String(), @@ -197,7 +197,7 @@ func NewNetworkManagerNetworkResourceRelationshipsAdapter(client *networkmanager Region: region, ItemType: "networkmanager-network-resource-relationship", AdapterMetadata: networkResourceRelationshipAdapterMetadata, - cache: cache, + cache: cache, OutputMapper: networkResourceRelationshipOutputMapper, DescribeFunc: func(ctx context.Context, client *networkmanager.Client, input *networkmanager.GetNetworkResourceRelationshipsInput) (*networkmanager.GetNetworkResourceRelationshipsOutput, error) { return client.GetNetworkResourceRelationships(ctx, input) diff --git a/aws-source/adapters/networkmanager-network-resource-relationship_test.go b/aws-source/adapters/networkmanager-network-resource-relationship_test.go index 78dd910c..f3efdc03 100644 --- a/aws-source/adapters/networkmanager-network-resource-relationship_test.go +++ b/aws-source/adapters/networkmanager-network-resource-relationship_test.go @@ -21,72 +21,72 @@ func TestNetworkResourceRelationshipOutputMapper(t *testing.T) { { name: "ok, one entity", input: networkmanager.GetNetworkResourceRelationshipsInput{ - GlobalNetworkId: PtrString("default"), + GlobalNetworkId: new("default"), }, output: networkmanager.GetNetworkResourceRelationshipsOutput{ Relationships: []types.Relationship{ // connection, device { - From: PtrString("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), - To: PtrString("arn:aws:networkmanager:us-west-2:123456789012:device/d-1"), + From: new("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), + To: new("arn:aws:networkmanager:us-west-2:123456789012:device/d-1"), }, { - To: PtrString("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), - From: PtrString("arn:aws:networkmanager:us-west-2:123456789012:device/d-1"), + To: new("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), + From: new("arn:aws:networkmanager:us-west-2:123456789012:device/d-1"), }, // link, site { - From: PtrString("arn:aws:networkmanager:us-west-2:123456789012:link/link-1"), - To: PtrString("arn:aws:networkmanager:us-west-2:123456789012:site/site-1"), + From: new("arn:aws:networkmanager:us-west-2:123456789012:link/link-1"), + To: new("arn:aws:networkmanager:us-west-2:123456789012:site/site-1"), }, { - To: PtrString("arn:aws:networkmanager:us-west-2:123456789012:link/link-1"), - From: PtrString("arn:aws:networkmanager:us-west-2:123456789012:site/site-1"), + To: new("arn:aws:networkmanager:us-west-2:123456789012:link/link-1"), + From: new("arn:aws:networkmanager:us-west-2:123456789012:site/site-1"), }, // directconnect-connection, directconnect-direct-connect-gateway { - From: PtrString("arn:aws:directconnect:us-west-2:123456789012:connection/dxconn-1"), - To: PtrString("arn:aws:directconnect:us-west-2:123456789012:direct-connect-gateway/gw-1"), + From: new("arn:aws:directconnect:us-west-2:123456789012:connection/dxconn-1"), + To: new("arn:aws:directconnect:us-west-2:123456789012:direct-connect-gateway/gw-1"), }, { - To: PtrString("arn:aws:directconnect:us-west-2:123456789012:connection/dxconn-1"), - From: PtrString("arn:aws:directconnect:us-west-2:123456789012:direct-connect-gateway/gw-1"), + To: new("arn:aws:directconnect:us-west-2:123456789012:connection/dxconn-1"), + From: new("arn:aws:directconnect:us-west-2:123456789012:direct-connect-gateway/gw-1"), }, // directconnect-virtual-interface, ec2-customer-gateway { - From: PtrString("arn:aws:directconnect:us-west-2:123456789012:virtual-interface/vif-1"), - To: PtrString("arn:aws:ec2:us-west-2:123456789012:customer-gateway/gw-1"), + From: new("arn:aws:directconnect:us-west-2:123456789012:virtual-interface/vif-1"), + To: new("arn:aws:ec2:us-west-2:123456789012:customer-gateway/gw-1"), }, { - To: PtrString("arn:aws:directconnect:us-west-2:123456789012:virtual-interface/vif-1"), - From: PtrString("arn:aws:ec2:us-west-2:123456789012:customer-gateway/gw-1"), + To: new("arn:aws:directconnect:us-west-2:123456789012:virtual-interface/vif-1"), + From: new("arn:aws:ec2:us-west-2:123456789012:customer-gateway/gw-1"), }, // ec2-transit-gateway, ec2-transit-gateway-attachment { - From: PtrString("arn:aws:ec2:us-east-2:986543144159:transit-gateway/tgw-06910e97a1fbdf66a"), - To: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-attachment/tgwa-1"), + From: new("arn:aws:ec2:us-east-2:986543144159:transit-gateway/tgw-06910e97a1fbdf66a"), + To: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-attachment/tgwa-1"), }, { - To: PtrString("arn:aws:ec2:us-east-2:986543144159:transit-gateway/tgw-06910e97a1fbdf66a"), - From: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-attachment/tgwa-1"), + To: new("arn:aws:ec2:us-east-2:986543144159:transit-gateway/tgw-06910e97a1fbdf66a"), + From: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-attachment/tgwa-1"), }, // ec2-transit-gateway-route-table, ec2-transit-gateway-connect-peer { - From: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-cnp-1"), - To: PtrString("arn:aws:ec2:us-east-2:986543144159:transit-gateway-route-table/tgw-rtb-043b7b4c0db1e4833"), + From: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-cnp-1"), + To: new("arn:aws:ec2:us-east-2:986543144159:transit-gateway-route-table/tgw-rtb-043b7b4c0db1e4833"), }, { - To: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-cnp-1"), - From: PtrString("arn:aws:ec2:us-east-2:986543144159:transit-gateway-route-table/tgw-rtb-043b7b4c0db1e4833"), + To: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer/tgw-cnp-1"), + From: new("arn:aws:ec2:us-east-2:986543144159:transit-gateway-route-table/tgw-rtb-043b7b4c0db1e4833"), }, // connection, ec2-vpn-connection { - From: PtrString("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), - To: PtrString("arn:aws:ec2:us-west-2:123456789012:vpn-connection/conn-1"), + From: new("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), + To: new("arn:aws:ec2:us-west-2:123456789012:vpn-connection/conn-1"), }, { - To: PtrString("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), - From: PtrString("arn:aws:ec2:us-west-2:123456789012:vpn-connection/conn-1"), + To: new("arn:aws:networkmanager:us-west-2:123456789012:connection/conn-1"), + From: new("arn:aws:ec2:us-west-2:123456789012:vpn-connection/conn-1"), }, }, }, diff --git a/aws-source/adapters/networkmanager-site-to-site-vpn-attachment_test.go b/aws-source/adapters/networkmanager-site-to-site-vpn-attachment_test.go index 473d8e11..e8a4e46b 100644 --- a/aws-source/adapters/networkmanager-site-to-site-vpn-attachment_test.go +++ b/aws-source/adapters/networkmanager-site-to-site-vpn-attachment_test.go @@ -20,11 +20,11 @@ func TestSiteToSiteVpnAttachmentOutputMapper(t *testing.T) { name: "ok", item: &types.SiteToSiteVpnAttachment{ Attachment: &types.Attachment{ - AttachmentId: PtrString("stsa-1"), - CoreNetworkId: PtrString("cn-1"), + AttachmentId: new("stsa-1"), + CoreNetworkId: new("cn-1"), State: types.AttachmentStateAvailable, }, - VpnConnectionArn: PtrString("arn:aws:ec2:us-west-2:123456789012:vpn-connection/vpn-1234"), + VpnConnectionArn: new("arn:aws:ec2:us-west-2:123456789012:vpn-connection/vpn-1234"), }, expectedHealth: sdp.Health_HEALTH_OK, expectedAttr: "stsa-1", diff --git a/aws-source/adapters/networkmanager-site_test.go b/aws-source/adapters/networkmanager-site_test.go index 9c2e885f..72f2e90a 100644 --- a/aws-source/adapters/networkmanager-site_test.go +++ b/aws-source/adapters/networkmanager-site_test.go @@ -15,8 +15,8 @@ func TestSiteOutputMapper(t *testing.T) { output := networkmanager.GetSitesOutput{ Sites: []types.Site{ { - SiteId: PtrString("site1"), - GlobalNetworkId: PtrString("default"), + SiteId: new("site1"), + GlobalNetworkId: new("default"), }, }, } @@ -87,7 +87,7 @@ func TestSiteInputMapperSearch(t *testing.T) { name: "Valid networkmanager-site ARN", query: "arn:aws:networkmanager::123456789012:site/global-network-01231231231231231/site-444555aaabbb11223", expectedInput: &networkmanager.GetSitesInput{ - GlobalNetworkId: PtrString("global-network-01231231231231231"), + GlobalNetworkId: new("global-network-01231231231231231"), SiteIds: []string{"site-444555aaabbb11223"}, }, expectError: false, @@ -96,7 +96,7 @@ func TestSiteInputMapperSearch(t *testing.T) { name: "Global Network ID (backward compatibility)", query: "global-network-123456789", expectedInput: &networkmanager.GetSitesInput{ - GlobalNetworkId: PtrString("global-network-123456789"), + GlobalNetworkId: new("global-network-123456789"), }, expectError: false, }, diff --git a/aws-source/adapters/networkmanager-transit-gateway-connect-peer-association_test.go b/aws-source/adapters/networkmanager-transit-gateway-connect-peer-association_test.go index 6db8a6c8..0f8a181b 100644 --- a/aws-source/adapters/networkmanager-transit-gateway-connect-peer-association_test.go +++ b/aws-source/adapters/networkmanager-transit-gateway-connect-peer-association_test.go @@ -24,11 +24,11 @@ func TestTransitGatewayConnectPeerAssociationsOutputMapper(t *testing.T) { out: networkmanager.GetTransitGatewayConnectPeerAssociationsOutput{ TransitGatewayConnectPeerAssociations: []types.TransitGatewayConnectPeerAssociation{ { - GlobalNetworkId: PtrString("default"), - TransitGatewayConnectPeerArn: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer-association/tgw-1234"), + GlobalNetworkId: new("default"), + TransitGatewayConnectPeerArn: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-connect-peer-association/tgw-1234"), State: types.TransitGatewayConnectPeerAssociationStateAvailable, - DeviceId: PtrString("device-1"), - LinkId: PtrString("link-1"), + DeviceId: new("device-1"), + LinkId: new("link-1"), }, }, }, diff --git a/aws-source/adapters/networkmanager-transit-gateway-peering_test.go b/aws-source/adapters/networkmanager-transit-gateway-peering_test.go index 03c8fd28..a09916df 100644 --- a/aws-source/adapters/networkmanager-transit-gateway-peering_test.go +++ b/aws-source/adapters/networkmanager-transit-gateway-peering_test.go @@ -20,12 +20,12 @@ func TestTransitGatewayPeeringOutputMapper(t *testing.T) { name: "ok", item: &types.TransitGatewayPeering{ Peering: &types.Peering{ - PeeringId: PtrString("tgp-1"), - CoreNetworkId: PtrString("cn-1"), + PeeringId: new("tgp-1"), + CoreNetworkId: new("cn-1"), State: types.PeeringStateAvailable, }, - TransitGatewayArn: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-1234"), - TransitGatewayPeeringAttachmentId: PtrString("gpa-1"), + TransitGatewayArn: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-1234"), + TransitGatewayPeeringAttachmentId: new("gpa-1"), }, expectedHealth: sdp.Health_HEALTH_OK, expectedAttr: "tgp-1", diff --git a/aws-source/adapters/networkmanager-transit-gateway-registration_test.go b/aws-source/adapters/networkmanager-transit-gateway-registration_test.go index 2b0fbeed..bbdd99f7 100644 --- a/aws-source/adapters/networkmanager-transit-gateway-registration_test.go +++ b/aws-source/adapters/networkmanager-transit-gateway-registration_test.go @@ -23,8 +23,8 @@ func TestTransitGatewayRegistrationOutputMapper(t *testing.T) { out: networkmanager.GetTransitGatewayRegistrationsOutput{ TransitGatewayRegistrations: []types.TransitGatewayRegistration{ { - GlobalNetworkId: PtrString("default"), - TransitGatewayArn: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-1234"), + GlobalNetworkId: new("default"), + TransitGatewayArn: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-1234"), State: &types.TransitGatewayRegistrationStateReason{ Code: types.TransitGatewayRegistrationStateAvailable, }, @@ -52,8 +52,8 @@ func TestTransitGatewayRegistrationOutputMapper(t *testing.T) { out: networkmanager.GetTransitGatewayRegistrationsOutput{ TransitGatewayRegistrations: []types.TransitGatewayRegistration{ { - GlobalNetworkId: PtrString("default"), - TransitGatewayArn: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-1234"), + GlobalNetworkId: new("default"), + TransitGatewayArn: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway/tgw-1234"), State: &types.TransitGatewayRegistrationStateReason{ Code: types.TransitGatewayRegistrationStateDeleting, }, diff --git a/aws-source/adapters/networkmanager-transit-gateway-route-table-attachment_test.go b/aws-source/adapters/networkmanager-transit-gateway-route-table-attachment_test.go index 92ada429..db9e6658 100644 --- a/aws-source/adapters/networkmanager-transit-gateway-route-table-attachment_test.go +++ b/aws-source/adapters/networkmanager-transit-gateway-route-table-attachment_test.go @@ -19,11 +19,11 @@ func TestTransitGatewayRouteTableAttachmentItemMapper(t *testing.T) { name: "ok", input: types.TransitGatewayRouteTableAttachment{ Attachment: &types.Attachment{ - AttachmentId: PtrString("attachment1"), - CoreNetworkId: PtrString("corenetwork1"), + AttachmentId: new("attachment1"), + CoreNetworkId: new("corenetwork1"), }, - TransitGatewayRouteTableArn: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table/tgw-rtb-9876543210123456"), - PeeringId: PtrString("peer1"), + TransitGatewayRouteTableArn: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table/tgw-rtb-9876543210123456"), + PeeringId: new("peer1"), }, expectedAttr: "attachment1", tests: QueryTests{ @@ -51,8 +51,8 @@ func TestTransitGatewayRouteTableAttachmentItemMapper(t *testing.T) { name: "missing ec2-transit-gateway-route-table", input: types.TransitGatewayRouteTableAttachment{ Attachment: &types.Attachment{ - AttachmentId: PtrString("attachment1"), - CoreNetworkId: PtrString("corenetwork1"), + AttachmentId: new("attachment1"), + CoreNetworkId: new("corenetwork1"), }, }, expectedAttr: "attachment1", @@ -69,10 +69,10 @@ func TestTransitGatewayRouteTableAttachmentItemMapper(t *testing.T) { name: "invalid ec2-transit-gateway-route-table", input: types.TransitGatewayRouteTableAttachment{ Attachment: &types.Attachment{ - AttachmentId: PtrString("attachment1"), - CoreNetworkId: PtrString("corenetwork1"), + AttachmentId: new("attachment1"), + CoreNetworkId: new("corenetwork1"), }, - TransitGatewayRouteTableArn: PtrString("arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table-tgw-rtb-9876543210123456"), + TransitGatewayRouteTableArn: new("arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table-tgw-rtb-9876543210123456"), }, expectedAttr: "attachment1", tests: QueryTests{ diff --git a/aws-source/adapters/networkmanager-vpc-attachment_test.go b/aws-source/adapters/networkmanager-vpc-attachment_test.go index 0e9baa94..026a4696 100644 --- a/aws-source/adapters/networkmanager-vpc-attachment_test.go +++ b/aws-source/adapters/networkmanager-vpc-attachment_test.go @@ -10,8 +10,8 @@ import ( func TestVPCAttachmentItemMapper(t *testing.T) { input := types.VpcAttachment{ Attachment: &types.Attachment{ - AttachmentId: PtrString("attachment1"), - CoreNetworkId: PtrString("corenetwork1"), + AttachmentId: new("attachment1"), + CoreNetworkId: new("corenetwork1"), }, } scope := "123456789012.eu-west-2" diff --git a/aws-source/adapters/rds-db-cluster-parameter-group_test.go b/aws-source/adapters/rds-db-cluster-parameter-group_test.go index e02ff713..7bbc1b96 100644 --- a/aws-source/adapters/rds-db-cluster-parameter-group_test.go +++ b/aws-source/adapters/rds-db-cluster-parameter-group_test.go @@ -11,60 +11,60 @@ import ( func TestDBClusterParameterGroupOutputMapper(t *testing.T) { group := ClusterParameterGroup{ DBClusterParameterGroup: types.DBClusterParameterGroup{ - DBClusterParameterGroupName: PtrString("default.aurora-mysql5.7"), - DBParameterGroupFamily: PtrString("aurora-mysql5.7"), - Description: PtrString("Default cluster parameter group for aurora-mysql5.7"), - DBClusterParameterGroupArn: PtrString("arn:aws:rds:eu-west-1:052392120703:cluster-pg:default.aurora-mysql5.7"), + DBClusterParameterGroupName: new("default.aurora-mysql5.7"), + DBParameterGroupFamily: new("aurora-mysql5.7"), + Description: new("Default cluster parameter group for aurora-mysql5.7"), + DBClusterParameterGroupArn: new("arn:aws:rds:eu-west-1:052392120703:cluster-pg:default.aurora-mysql5.7"), }, Parameters: []types.Parameter{ { - ParameterName: PtrString("activate_all_roles_on_login"), - ParameterValue: PtrString("0"), - Description: PtrString("Automatically set all granted roles as active after the user has authenticated successfully."), - Source: PtrString("engine-default"), - ApplyType: PtrString("dynamic"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(true), + ParameterName: new("activate_all_roles_on_login"), + ParameterValue: new("0"), + Description: new("Automatically set all granted roles as active after the user has authenticated successfully."), + Source: new("engine-default"), + ApplyType: new("dynamic"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(true), ApplyMethod: types.ApplyMethodPendingReboot, SupportedEngineModes: []string{ "provisioned", }, }, { - ParameterName: PtrString("allow-suspicious-udfs"), - Description: PtrString("Controls whether user-defined functions that have only an xxx symbol for the main function can be loaded"), - Source: PtrString("engine-default"), - ApplyType: PtrString("static"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(false), + ParameterName: new("allow-suspicious-udfs"), + Description: new("Controls whether user-defined functions that have only an xxx symbol for the main function can be loaded"), + Source: new("engine-default"), + ApplyType: new("static"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(false), ApplyMethod: types.ApplyMethodPendingReboot, SupportedEngineModes: []string{ "provisioned", }, }, { - ParameterName: PtrString("aurora_binlog_replication_max_yield_seconds"), - Description: PtrString("Controls the number of seconds that binary log dump thread waits up to for the current binlog file to be filled by transactions. This wait period avoids contention that can arise from replicating each binlog event individually."), - Source: PtrString("engine-default"), - ApplyType: PtrString("dynamic"), - DataType: PtrString("integer"), - AllowedValues: PtrString("0-36000"), - IsModifiable: PtrBool(true), + ParameterName: new("aurora_binlog_replication_max_yield_seconds"), + Description: new("Controls the number of seconds that binary log dump thread waits up to for the current binlog file to be filled by transactions. This wait period avoids contention that can arise from replicating each binlog event individually."), + Source: new("engine-default"), + ApplyType: new("dynamic"), + DataType: new("integer"), + AllowedValues: new("0-36000"), + IsModifiable: new(true), ApplyMethod: types.ApplyMethodPendingReboot, SupportedEngineModes: []string{ "provisioned", }, }, { - ParameterName: PtrString("aurora_enable_staggered_replica_restart"), - Description: PtrString("Allow Aurora replicas to follow a staggered restart schedule to increase cluster availability."), - Source: PtrString("system"), - ApplyType: PtrString("dynamic"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(true), + ParameterName: new("aurora_enable_staggered_replica_restart"), + Description: new("Allow Aurora replicas to follow a staggered restart schedule to increase cluster availability."), + Source: new("system"), + ApplyType: new("dynamic"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(true), ApplyMethod: types.ApplyMethodImmediate, SupportedEngineModes: []string{ "provisioned", diff --git a/aws-source/adapters/rds-db-cluster_test.go b/aws-source/adapters/rds-db-cluster_test.go index 898fdfdc..6ce34ed7 100644 --- a/aws-source/adapters/rds-db-cluster_test.go +++ b/aws-source/adapters/rds-db-cluster_test.go @@ -15,115 +15,115 @@ func TestDBClusterOutputMapper(t *testing.T) { output := rds.DescribeDBClustersOutput{ DBClusters: []types.DBCluster{ { - AllocatedStorage: PtrInt32(100), + AllocatedStorage: new(int32(100)), AvailabilityZones: []string{ "eu-west-2c", // link }, - BackupRetentionPeriod: PtrInt32(7), - DBClusterIdentifier: PtrString("database-2"), - DBClusterParameterGroup: PtrString("default.postgres13"), - DBSubnetGroup: PtrString("default-vpc-0d7892e00e573e701"), // link - Status: PtrString("available"), - EarliestRestorableTime: PtrTime(time.Now()), - Endpoint: PtrString("database-2.cluster-camcztjohmlj.eu-west-2.rds.amazonaws.com"), // link - ReaderEndpoint: PtrString("database-2.cluster-ro-camcztjohmlj.eu-west-2.rds.amazonaws.com"), // link - MultiAZ: PtrBool(true), - Engine: PtrString("postgres"), - EngineVersion: PtrString("13.7"), - LatestRestorableTime: PtrTime(time.Now()), - Port: PtrInt32(5432), // link - MasterUsername: PtrString("postgres"), - PreferredBackupWindow: PtrString("04:48-05:18"), - PreferredMaintenanceWindow: PtrString("fri:04:05-fri:04:35"), + BackupRetentionPeriod: new(int32(7)), + DBClusterIdentifier: new("database-2"), + DBClusterParameterGroup: new("default.postgres13"), + DBSubnetGroup: new("default-vpc-0d7892e00e573e701"), // link + Status: new("available"), + EarliestRestorableTime: new(time.Now()), + Endpoint: new("database-2.cluster-camcztjohmlj.eu-west-2.rds.amazonaws.com"), // link + ReaderEndpoint: new("database-2.cluster-ro-camcztjohmlj.eu-west-2.rds.amazonaws.com"), // link + MultiAZ: new(true), + Engine: new("postgres"), + EngineVersion: new("13.7"), + LatestRestorableTime: new(time.Now()), + Port: new(int32(5432)), // link + MasterUsername: new("postgres"), + PreferredBackupWindow: new("04:48-05:18"), + PreferredMaintenanceWindow: new("fri:04:05-fri:04:35"), ReadReplicaIdentifiers: []string{ "arn:aws:rds:eu-west-1:052392120703:cluster:read-replica", // link }, DBClusterMembers: []types.DBClusterMember{ { - DBInstanceIdentifier: PtrString("database-2-instance-3"), // link - IsClusterWriter: PtrBool(false), - DBClusterParameterGroupStatus: PtrString("in-sync"), - PromotionTier: PtrInt32(1), + DBInstanceIdentifier: new("database-2-instance-3"), // link + IsClusterWriter: new(false), + DBClusterParameterGroupStatus: new("in-sync"), + PromotionTier: new(int32(1)), }, }, VpcSecurityGroups: []types.VpcSecurityGroupMembership{ { - VpcSecurityGroupId: PtrString("sg-094e151c9fc5da181"), // link - Status: PtrString("active"), + VpcSecurityGroupId: new("sg-094e151c9fc5da181"), // link + Status: new("active"), }, }, - HostedZoneId: PtrString("Z1TTGA775OQIYO"), // link - StorageEncrypted: PtrBool(true), - KmsKeyId: PtrString("arn:aws:kms:eu-west-2:052392120703:key/9653cbdd-1590-464a-8456-67389cef6933"), // link - DbClusterResourceId: PtrString("cluster-2EW4PDVN7F7V57CUJPYOEAA74M"), - DBClusterArn: PtrString("arn:aws:rds:eu-west-2:052392120703:cluster:database-2"), - IAMDatabaseAuthenticationEnabled: PtrBool(false), - ClusterCreateTime: PtrTime(time.Now()), - EngineMode: PtrString("provisioned"), - DeletionProtection: PtrBool(false), - HttpEndpointEnabled: PtrBool(false), + HostedZoneId: new("Z1TTGA775OQIYO"), // link + StorageEncrypted: new(true), + KmsKeyId: new("arn:aws:kms:eu-west-2:052392120703:key/9653cbdd-1590-464a-8456-67389cef6933"), // link + DbClusterResourceId: new("cluster-2EW4PDVN7F7V57CUJPYOEAA74M"), + DBClusterArn: new("arn:aws:rds:eu-west-2:052392120703:cluster:database-2"), + IAMDatabaseAuthenticationEnabled: new(false), + ClusterCreateTime: new(time.Now()), + EngineMode: new("provisioned"), + DeletionProtection: new(false), + HttpEndpointEnabled: new(false), ActivityStreamStatus: types.ActivityStreamStatusStopped, - CopyTagsToSnapshot: PtrBool(false), - CrossAccountClone: PtrBool(false), + CopyTagsToSnapshot: new(false), + CrossAccountClone: new(false), DomainMemberships: []types.DomainMembership{}, TagList: []types.Tag{}, - DBClusterInstanceClass: PtrString("db.m5d.large"), - StorageType: PtrString("io1"), - Iops: PtrInt32(1000), - PubliclyAccessible: PtrBool(true), - AutoMinorVersionUpgrade: PtrBool(true), - MonitoringInterval: PtrInt32(0), - PerformanceInsightsEnabled: PtrBool(false), - NetworkType: PtrString("IPV4"), - ActivityStreamKinesisStreamName: PtrString("aws-rds-das-db-AB1CDEFG23GHIJK4LMNOPQRST"), // link - ActivityStreamKmsKeyId: PtrString("ab12345e-1111-2bc3-12a3-ab1cd12345e"), // Not linking at the moment because there are too many possible formats. If you want to change this, submit a PR + DBClusterInstanceClass: new("db.m5d.large"), + StorageType: new("io1"), + Iops: new(int32(1000)), + PubliclyAccessible: new(true), + AutoMinorVersionUpgrade: new(true), + MonitoringInterval: new(int32(0)), + PerformanceInsightsEnabled: new(false), + NetworkType: new("IPV4"), + ActivityStreamKinesisStreamName: new("aws-rds-das-db-AB1CDEFG23GHIJK4LMNOPQRST"), // link + ActivityStreamKmsKeyId: new("ab12345e-1111-2bc3-12a3-ab1cd12345e"), // Not linking at the moment because there are too many possible formats. If you want to change this, submit a PR ActivityStreamMode: types.ActivityStreamModeAsync, - AutomaticRestartTime: PtrTime(time.Now()), + AutomaticRestartTime: new(time.Now()), AssociatedRoles: []types.DBClusterRole{}, // EC2 classic roles, ignore - BacktrackConsumedChangeRecords: PtrInt64(1), - BacktrackWindow: PtrInt64(2), - Capacity: PtrInt32(2), - CharacterSetName: PtrString("english"), - CloneGroupId: PtrString("id"), + BacktrackConsumedChangeRecords: new(int64(1)), + BacktrackWindow: new(int64(2)), + Capacity: new(int32(2)), + CharacterSetName: new("english"), + CloneGroupId: new("id"), CustomEndpoints: []string{ "endpoint1", // link dns }, DBClusterOptionGroupMemberships: []types.DBClusterOptionGroupStatus{ { - DBClusterOptionGroupName: PtrString("optionGroupName"), // link - Status: PtrString("good"), + DBClusterOptionGroupName: new("optionGroupName"), // link + Status: new("good"), }, }, - DBSystemId: PtrString("systemId"), - DatabaseName: PtrString("databaseName"), - EarliestBacktrackTime: PtrTime(time.Now()), + DBSystemId: new("systemId"), + DatabaseName: new("databaseName"), + EarliestBacktrackTime: new(time.Now()), EnabledCloudwatchLogsExports: []string{ "logExport1", }, - GlobalWriteForwardingRequested: PtrBool(true), + GlobalWriteForwardingRequested: new(true), GlobalWriteForwardingStatus: types.WriteForwardingStatusDisabled, MasterUserSecret: &types.MasterUserSecret{ - KmsKeyId: PtrString("arn:aws:kms:eu-west-2:052392120703:key/something"), // link - SecretArn: PtrString("arn:aws:service:region:account:type/id"), // link - SecretStatus: PtrString("okay"), + KmsKeyId: new("arn:aws:kms:eu-west-2:052392120703:key/something"), // link + SecretArn: new("arn:aws:service:region:account:type/id"), // link + SecretStatus: new("okay"), }, - MonitoringRoleArn: PtrString("arn:aws:service:region:account:type/id"), // link + MonitoringRoleArn: new("arn:aws:service:region:account:type/id"), // link PendingModifiedValues: &types.ClusterPendingModifiedValues{}, - PercentProgress: PtrString("99"), - PerformanceInsightsKMSKeyId: PtrString("arn:aws:service:region:account:type/id"), // link, assuming it's an ARN - PerformanceInsightsRetentionPeriod: PtrInt32(99), - ReplicationSourceIdentifier: PtrString("arn:aws:rds:eu-west-2:052392120703:cluster:database-1"), // link + PercentProgress: new("99"), + PerformanceInsightsKMSKeyId: new("arn:aws:service:region:account:type/id"), // link, assuming it's an ARN + PerformanceInsightsRetentionPeriod: new(int32(99)), + ReplicationSourceIdentifier: new("arn:aws:rds:eu-west-2:052392120703:cluster:database-1"), // link ScalingConfigurationInfo: &types.ScalingConfigurationInfo{ - AutoPause: PtrBool(true), - MaxCapacity: PtrInt32(10), - MinCapacity: PtrInt32(1), - SecondsBeforeTimeout: PtrInt32(10), - SecondsUntilAutoPause: PtrInt32(10), - TimeoutAction: PtrString("error"), + AutoPause: new(true), + MaxCapacity: new(int32(10)), + MinCapacity: new(int32(1)), + SecondsBeforeTimeout: new(int32(10)), + SecondsUntilAutoPause: new(int32(10)), + TimeoutAction: new("error"), }, ServerlessV2ScalingConfiguration: &types.ServerlessV2ScalingConfigurationInfo{ - MaxCapacity: PtrFloat64(10), - MinCapacity: PtrFloat64(1), + MaxCapacity: new(float64(10)), + MinCapacity: new(float64(1)), }, }, }, diff --git a/aws-source/adapters/rds-db-instance_test.go b/aws-source/adapters/rds-db-instance_test.go index 8ccd1176..0894bb00 100644 --- a/aws-source/adapters/rds-db-instance_test.go +++ b/aws-source/adapters/rds-db-instance_test.go @@ -15,156 +15,156 @@ func TestDBInstanceOutputMapper(t *testing.T) { output := &rds.DescribeDBInstancesOutput{ DBInstances: []types.DBInstance{ { - DBInstanceIdentifier: PtrString("database-1-instance-1"), - DBInstanceClass: PtrString("db.r6g.large"), - Engine: PtrString("aurora-mysql"), - DBInstanceStatus: PtrString("available"), - MasterUsername: PtrString("admin"), + DBInstanceIdentifier: new("database-1-instance-1"), + DBInstanceClass: new("db.r6g.large"), + Engine: new("aurora-mysql"), + DBInstanceStatus: new("available"), + MasterUsername: new("admin"), Endpoint: &types.Endpoint{ - Address: PtrString("database-1-instance-1.camcztjohmlj.eu-west-2.rds.amazonaws.com"), // link - Port: PtrInt32(3306), // link - HostedZoneId: PtrString("Z1TTGA775OQIYO"), // link + Address: new("database-1-instance-1.camcztjohmlj.eu-west-2.rds.amazonaws.com"), // link + Port: new(int32(3306)), // link + HostedZoneId: new("Z1TTGA775OQIYO"), // link }, - AllocatedStorage: PtrInt32(1), - InstanceCreateTime: PtrTime(time.Now()), - PreferredBackupWindow: PtrString("00:05-00:35"), - BackupRetentionPeriod: PtrInt32(1), + AllocatedStorage: new(int32(1)), + InstanceCreateTime: new(time.Now()), + PreferredBackupWindow: new("00:05-00:35"), + BackupRetentionPeriod: new(int32(1)), DBSecurityGroups: []types.DBSecurityGroupMembership{ { - DBSecurityGroupName: PtrString("name"), // This is EC2Classic only so we're skipping this + DBSecurityGroupName: new("name"), // This is EC2Classic only so we're skipping this }, }, VpcSecurityGroups: []types.VpcSecurityGroupMembership{ { - VpcSecurityGroupId: PtrString("sg-094e151c9fc5da181"), // link - Status: PtrString("active"), + VpcSecurityGroupId: new("sg-094e151c9fc5da181"), // link + Status: new("active"), }, }, DBParameterGroups: []types.DBParameterGroupStatus{ { - DBParameterGroupName: PtrString("default.aurora-mysql8.0"), // link - ParameterApplyStatus: PtrString("in-sync"), + DBParameterGroupName: new("default.aurora-mysql8.0"), // link + ParameterApplyStatus: new("in-sync"), }, }, - AvailabilityZone: PtrString("eu-west-2a"), // link + AvailabilityZone: new("eu-west-2a"), // link DBSubnetGroup: &types.DBSubnetGroup{ - DBSubnetGroupName: PtrString("default-vpc-0d7892e00e573e701"), // link - DBSubnetGroupDescription: PtrString("Created from the RDS Management Console"), - VpcId: PtrString("vpc-0d7892e00e573e701"), // link - SubnetGroupStatus: PtrString("Complete"), + DBSubnetGroupName: new("default-vpc-0d7892e00e573e701"), // link + DBSubnetGroupDescription: new("Created from the RDS Management Console"), + VpcId: new("vpc-0d7892e00e573e701"), // link + SubnetGroupStatus: new("Complete"), Subnets: []types.Subnet{ { - SubnetIdentifier: PtrString("subnet-0d8ae4b4e07647efa"), // lnk + SubnetIdentifier: new("subnet-0d8ae4b4e07647efa"), // lnk SubnetAvailabilityZone: &types.AvailabilityZone{ - Name: PtrString("eu-west-2b"), + Name: new("eu-west-2b"), }, SubnetOutpost: &types.Outpost{ - Arn: PtrString("arn:aws:service:region:account:type/id"), // link + Arn: new("arn:aws:service:region:account:type/id"), // link }, - SubnetStatus: PtrString("Active"), + SubnetStatus: new("Active"), }, }, }, - PreferredMaintenanceWindow: PtrString("fri:04:49-fri:05:19"), + PreferredMaintenanceWindow: new("fri:04:49-fri:05:19"), PendingModifiedValues: &types.PendingModifiedValues{}, - MultiAZ: PtrBool(false), - EngineVersion: PtrString("8.0.mysql_aurora.3.02.0"), - AutoMinorVersionUpgrade: PtrBool(true), + MultiAZ: new(false), + EngineVersion: new("8.0.mysql_aurora.3.02.0"), + AutoMinorVersionUpgrade: new(true), ReadReplicaDBInstanceIdentifiers: []string{ "read", }, - LicenseModel: PtrString("general-public-license"), + LicenseModel: new("general-public-license"), OptionGroupMemberships: []types.OptionGroupMembership{ { - OptionGroupName: PtrString("default:aurora-mysql-8-0"), - Status: PtrString("in-sync"), + OptionGroupName: new("default:aurora-mysql-8-0"), + Status: new("in-sync"), }, }, - PubliclyAccessible: PtrBool(false), - StorageType: PtrString("aurora"), - DbInstancePort: PtrInt32(0), - DBClusterIdentifier: PtrString("database-1"), // link - StorageEncrypted: PtrBool(true), - KmsKeyId: PtrString("arn:aws:kms:eu-west-2:052392120703:key/9653cbdd-1590-464a-8456-67389cef6933"), // link - DbiResourceId: PtrString("db-ET7CE5D5TQTK7MXNJGJNFQD52E"), - CACertificateIdentifier: PtrString("rds-ca-2019"), + PubliclyAccessible: new(false), + StorageType: new("aurora"), + DbInstancePort: new(int32(0)), + DBClusterIdentifier: new("database-1"), // link + StorageEncrypted: new(true), + KmsKeyId: new("arn:aws:kms:eu-west-2:052392120703:key/9653cbdd-1590-464a-8456-67389cef6933"), // link + DbiResourceId: new("db-ET7CE5D5TQTK7MXNJGJNFQD52E"), + CACertificateIdentifier: new("rds-ca-2019"), DomainMemberships: []types.DomainMembership{ { - Domain: PtrString("domain"), - FQDN: PtrString("fqdn"), - IAMRoleName: PtrString("role"), - Status: PtrString("enrolled"), + Domain: new("domain"), + FQDN: new("fqdn"), + IAMRoleName: new("role"), + Status: new("enrolled"), }, }, - CopyTagsToSnapshot: PtrBool(false), - MonitoringInterval: PtrInt32(60), - EnhancedMonitoringResourceArn: PtrString("arn:aws:logs:eu-west-2:052392120703:log-group:RDSOSMetrics:log-stream:db-ET7CE5D5TQTK7MXNJGJNFQD52E"), // link - MonitoringRoleArn: PtrString("arn:aws:iam::052392120703:role/rds-monitoring-role"), // link - PromotionTier: PtrInt32(1), - DBInstanceArn: PtrString("arn:aws:rds:eu-west-2:052392120703:db:database-1-instance-1"), - IAMDatabaseAuthenticationEnabled: PtrBool(false), - PerformanceInsightsEnabled: PtrBool(true), - PerformanceInsightsKMSKeyId: PtrString("arn:aws:kms:eu-west-2:052392120703:key/9653cbdd-1590-464a-8456-67389cef6933"), // link - PerformanceInsightsRetentionPeriod: PtrInt32(7), - DeletionProtection: PtrBool(false), + CopyTagsToSnapshot: new(false), + MonitoringInterval: new(int32(60)), + EnhancedMonitoringResourceArn: new("arn:aws:logs:eu-west-2:052392120703:log-group:RDSOSMetrics:log-stream:db-ET7CE5D5TQTK7MXNJGJNFQD52E"), // link + MonitoringRoleArn: new("arn:aws:iam::052392120703:role/rds-monitoring-role"), // link + PromotionTier: new(int32(1)), + DBInstanceArn: new("arn:aws:rds:eu-west-2:052392120703:db:database-1-instance-1"), + IAMDatabaseAuthenticationEnabled: new(false), + PerformanceInsightsEnabled: new(true), + PerformanceInsightsKMSKeyId: new("arn:aws:kms:eu-west-2:052392120703:key/9653cbdd-1590-464a-8456-67389cef6933"), // link + PerformanceInsightsRetentionPeriod: new(int32(7)), + DeletionProtection: new(false), AssociatedRoles: []types.DBInstanceRole{ { - FeatureName: PtrString("something"), - RoleArn: PtrString("arn:aws:service:region:account:type/id"), // link - Status: PtrString("associated"), + FeatureName: new("something"), + RoleArn: new("arn:aws:service:region:account:type/id"), // link + Status: new("associated"), }, }, TagList: []types.Tag{}, - CustomerOwnedIpEnabled: PtrBool(false), - BackupTarget: PtrString("region"), - NetworkType: PtrString("IPV4"), - StorageThroughput: PtrInt32(0), - ActivityStreamEngineNativeAuditFieldsIncluded: PtrBool(true), - ActivityStreamKinesisStreamName: PtrString("aws-rds-das-db-AB1CDEFG23GHIJK4LMNOPQRST"), // link - ActivityStreamKmsKeyId: PtrString("ab12345e-1111-2bc3-12a3-ab1cd12345e"), // Not linking at the moment because there are too many possible formats. If you want to change this, submit a PR + CustomerOwnedIpEnabled: new(false), + BackupTarget: new("region"), + NetworkType: new("IPV4"), + StorageThroughput: new(int32(0)), + ActivityStreamEngineNativeAuditFieldsIncluded: new(true), + ActivityStreamKinesisStreamName: new("aws-rds-das-db-AB1CDEFG23GHIJK4LMNOPQRST"), // link + ActivityStreamKmsKeyId: new("ab12345e-1111-2bc3-12a3-ab1cd12345e"), // Not linking at the moment because there are too many possible formats. If you want to change this, submit a PR ActivityStreamMode: types.ActivityStreamModeAsync, ActivityStreamPolicyStatus: types.ActivityStreamPolicyStatusLocked, ActivityStreamStatus: types.ActivityStreamStatusStarted, - AutomaticRestartTime: PtrTime(time.Now()), + AutomaticRestartTime: new(time.Now()), AutomationMode: types.AutomationModeAllPaused, - AwsBackupRecoveryPointArn: PtrString("arn:aws:service:region:account:type/id"), // link + AwsBackupRecoveryPointArn: new("arn:aws:service:region:account:type/id"), // link CertificateDetails: &types.CertificateDetails{ - CAIdentifier: PtrString("id"), - ValidTill: PtrTime(time.Now()), + CAIdentifier: new("id"), + ValidTill: new(time.Now()), }, - CharacterSetName: PtrString("something"), - CustomIamInstanceProfile: PtrString("arn:aws:service:region:account:type/id"), // link? + CharacterSetName: new("something"), + CustomIamInstanceProfile: new("arn:aws:service:region:account:type/id"), // link? DBInstanceAutomatedBackupsReplications: []types.DBInstanceAutomatedBackupsReplication{ { - DBInstanceAutomatedBackupsArn: PtrString("arn:aws:service:region:account:type/id"), // link + DBInstanceAutomatedBackupsArn: new("arn:aws:service:region:account:type/id"), // link }, }, - DBName: PtrString("name"), - DBSystemId: PtrString("id"), + DBName: new("name"), + DBSystemId: new("id"), EnabledCloudwatchLogsExports: []string{}, - Iops: PtrInt32(10), - LatestRestorableTime: PtrTime(time.Now()), + Iops: new(int32(10)), + LatestRestorableTime: new(time.Now()), ListenerEndpoint: &types.Endpoint{ - Address: PtrString("foo.bar.com"), // link - HostedZoneId: PtrString("id"), // link - Port: PtrInt32(5432), // link + Address: new("foo.bar.com"), // link + HostedZoneId: new("id"), // link + Port: new(int32(5432)), // link }, MasterUserSecret: &types.MasterUserSecret{ - KmsKeyId: PtrString("id"), // link - SecretArn: PtrString("arn:aws:service:region:account:type/id"), // link - SecretStatus: PtrString("okay"), + KmsKeyId: new("id"), // link + SecretArn: new("arn:aws:service:region:account:type/id"), // link + SecretStatus: new("okay"), }, - MaxAllocatedStorage: PtrInt32(10), - NcharCharacterSetName: PtrString("english"), + MaxAllocatedStorage: new(int32(10)), + NcharCharacterSetName: new("english"), ProcessorFeatures: []types.ProcessorFeature{}, ReadReplicaDBClusterIdentifiers: []string{}, - ReadReplicaSourceDBInstanceIdentifier: PtrString("id"), + ReadReplicaSourceDBInstanceIdentifier: new("id"), ReplicaMode: types.ReplicaModeMounted, - ResumeFullAutomationModeTime: PtrTime(time.Now()), - SecondaryAvailabilityZone: PtrString("eu-west-1"), // link + ResumeFullAutomationModeTime: new(time.Now()), + SecondaryAvailabilityZone: new("eu-west-1"), // link StatusInfos: []types.DBInstanceStatusInfo{}, - TdeCredentialArn: PtrString("arn:aws:service:region:account:type/id"), // I don't have a good example for this so skipping for now. PR if required - Timezone: PtrString("GB"), + TdeCredentialArn: new("arn:aws:service:region:account:type/id"), // I don't have a good example for this so skipping for now. PR if required + Timezone: new("GB"), }, }, } diff --git a/aws-source/adapters/rds-db-parameter-group_test.go b/aws-source/adapters/rds-db-parameter-group_test.go index e07c9cc9..24c7606a 100644 --- a/aws-source/adapters/rds-db-parameter-group_test.go +++ b/aws-source/adapters/rds-db-parameter-group_test.go @@ -11,51 +11,51 @@ import ( func TestDBParameterGroupOutputMapper(t *testing.T) { group := ParameterGroup{ DBParameterGroup: types.DBParameterGroup{ - DBParameterGroupName: PtrString("default.aurora-mysql5.7"), - DBParameterGroupFamily: PtrString("aurora-mysql5.7"), - Description: PtrString("Default parameter group for aurora-mysql5.7"), - DBParameterGroupArn: PtrString("arn:aws:rds:eu-west-1:052392120703:pg:default.aurora-mysql5.7"), + DBParameterGroupName: new("default.aurora-mysql5.7"), + DBParameterGroupFamily: new("aurora-mysql5.7"), + Description: new("Default parameter group for aurora-mysql5.7"), + DBParameterGroupArn: new("arn:aws:rds:eu-west-1:052392120703:pg:default.aurora-mysql5.7"), }, Parameters: []types.Parameter{ { - ParameterName: PtrString("activate_all_roles_on_login"), - ParameterValue: PtrString("0"), - Description: PtrString("Automatically set all granted roles as active after the user has authenticated successfully."), - Source: PtrString("engine-default"), - ApplyType: PtrString("dynamic"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(true), + ParameterName: new("activate_all_roles_on_login"), + ParameterValue: new("0"), + Description: new("Automatically set all granted roles as active after the user has authenticated successfully."), + Source: new("engine-default"), + ApplyType: new("dynamic"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(true), ApplyMethod: types.ApplyMethodPendingReboot, }, { - ParameterName: PtrString("allow-suspicious-udfs"), - Description: PtrString("Controls whether user-defined functions that have only an xxx symbol for the main function can be loaded"), - Source: PtrString("engine-default"), - ApplyType: PtrString("static"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(false), + ParameterName: new("allow-suspicious-udfs"), + Description: new("Controls whether user-defined functions that have only an xxx symbol for the main function can be loaded"), + Source: new("engine-default"), + ApplyType: new("static"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(false), ApplyMethod: types.ApplyMethodPendingReboot, }, { - ParameterName: PtrString("aurora_parallel_query"), - Description: PtrString("This parameter can be used to enable and disable Aurora Parallel Query."), - Source: PtrString("engine-default"), - ApplyType: PtrString("dynamic"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(true), + ParameterName: new("aurora_parallel_query"), + Description: new("This parameter can be used to enable and disable Aurora Parallel Query."), + Source: new("engine-default"), + ApplyType: new("dynamic"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(true), ApplyMethod: types.ApplyMethodPendingReboot, }, { - ParameterName: PtrString("autocommit"), - Description: PtrString("Sets the autocommit mode"), - Source: PtrString("engine-default"), - ApplyType: PtrString("dynamic"), - DataType: PtrString("boolean"), - AllowedValues: PtrString("0,1"), - IsModifiable: PtrBool(true), + ParameterName: new("autocommit"), + Description: new("Sets the autocommit mode"), + Source: new("engine-default"), + ApplyType: new("dynamic"), + DataType: new("boolean"), + AllowedValues: new("0,1"), + IsModifiable: new(true), ApplyMethod: types.ApplyMethodPendingReboot, }, }, diff --git a/aws-source/adapters/rds-db-subnet-group_test.go b/aws-source/adapters/rds-db-subnet-group_test.go index 7b7db1dc..fad5cc36 100644 --- a/aws-source/adapters/rds-db-subnet-group_test.go +++ b/aws-source/adapters/rds-db-subnet-group_test.go @@ -15,23 +15,23 @@ func TestDBSubnetGroupOutputMapper(t *testing.T) { output := rds.DescribeDBSubnetGroupsOutput{ DBSubnetGroups: []types.DBSubnetGroup{ { - DBSubnetGroupName: PtrString("default-vpc-0d7892e00e573e701"), - DBSubnetGroupDescription: PtrString("Created from the RDS Management Console"), - VpcId: PtrString("vpc-0d7892e00e573e701"), // link - SubnetGroupStatus: PtrString("Complete"), + DBSubnetGroupName: new("default-vpc-0d7892e00e573e701"), + DBSubnetGroupDescription: new("Created from the RDS Management Console"), + VpcId: new("vpc-0d7892e00e573e701"), // link + SubnetGroupStatus: new("Complete"), Subnets: []types.Subnet{ { - SubnetIdentifier: PtrString("subnet-0450a637af9984235"), // link + SubnetIdentifier: new("subnet-0450a637af9984235"), // link SubnetAvailabilityZone: &types.AvailabilityZone{ - Name: PtrString("eu-west-2c"), // link + Name: new("eu-west-2c"), // link }, SubnetOutpost: &types.Outpost{ - Arn: PtrString("arn:aws:service:region:account:type/id"), // link + Arn: new("arn:aws:service:region:account:type/id"), // link }, - SubnetStatus: PtrString("Active"), + SubnetStatus: new("Active"), }, }, - DBSubnetGroupArn: PtrString("arn:aws:rds:eu-west-2:052392120703:subgrp:default-vpc-0d7892e00e573e701"), + DBSubnetGroupArn: new("arn:aws:rds:eu-west-2:052392120703:subgrp:default-vpc-0d7892e00e573e701"), SupportedNetworkTypes: []string{ "IPV4", }, diff --git a/aws-source/adapters/rds-option-group_test.go b/aws-source/adapters/rds-option-group_test.go index 53fc62a2..fa493476 100644 --- a/aws-source/adapters/rds-option-group_test.go +++ b/aws-source/adapters/rds-option-group_test.go @@ -12,13 +12,13 @@ func TestOptionGroupOutputMapper(t *testing.T) { output := rds.DescribeOptionGroupsOutput{ OptionGroupsList: []types.OptionGroup{ { - OptionGroupName: PtrString("default:aurora-mysql-8-0"), - OptionGroupDescription: PtrString("Default option group for aurora-mysql 8.0"), - EngineName: PtrString("aurora-mysql"), - MajorEngineVersion: PtrString("8.0"), + OptionGroupName: new("default:aurora-mysql-8-0"), + OptionGroupDescription: new("Default option group for aurora-mysql 8.0"), + EngineName: new("aurora-mysql"), + MajorEngineVersion: new("8.0"), Options: []types.Option{}, - AllowsVpcAndNonVpcInstanceMemberships: PtrBool(true), - OptionGroupArn: PtrString("arn:aws:rds:eu-west-2:052392120703:og:default:aurora-mysql-8-0"), + AllowsVpcAndNonVpcInstanceMemberships: new(true), + OptionGroupArn: new("arn:aws:rds:eu-west-2:052392120703:og:default:aurora-mysql-8-0"), }, }, } diff --git a/aws-source/adapters/rds.go b/aws-source/adapters/rds.go index f7aaff05..47dcfb33 100644 --- a/aws-source/adapters/rds.go +++ b/aws-source/adapters/rds.go @@ -33,8 +33,8 @@ func (m mockRdsClient) ListTagsForResource(ctx context.Context, params *rds.List return &rds.ListTagsForResourceOutput{ TagList: []types.Tag{ { - Key: PtrString("key"), - Value: PtrString("value"), + Key: new("key"), + Value: new("value"), }, }, }, nil diff --git a/aws-source/adapters/route53-health-check_test.go b/aws-source/adapters/route53-health-check_test.go index 35939600..91e98f33 100644 --- a/aws-source/adapters/route53-health-check_test.go +++ b/aws-source/adapters/route53-health-check_test.go @@ -12,37 +12,37 @@ import ( func TestHealthCheckItemMapper(t *testing.T) { hc := HealthCheck{ HealthCheck: types.HealthCheck{ - Id: PtrString("d7ce5d72-6d1f-4147-8246-d0ca3fb505d6"), - CallerReference: PtrString("85d56b3f-873c-498b-a2dd-554ec13c5289"), + Id: new("d7ce5d72-6d1f-4147-8246-d0ca3fb505d6"), + CallerReference: new("85d56b3f-873c-498b-a2dd-554ec13c5289"), HealthCheckConfig: &types.HealthCheckConfig{ - IPAddress: PtrString("1.1.1.1"), - Port: PtrInt32(443), + IPAddress: new("1.1.1.1"), + Port: new(int32(443)), Type: types.HealthCheckTypeHttps, - FullyQualifiedDomainName: PtrString("one.one.one.one"), - RequestInterval: PtrInt32(30), - FailureThreshold: PtrInt32(3), - MeasureLatency: PtrBool(false), - Inverted: PtrBool(false), - Disabled: PtrBool(false), - EnableSNI: PtrBool(true), + FullyQualifiedDomainName: new("one.one.one.one"), + RequestInterval: new(int32(30)), + FailureThreshold: new(int32(3)), + MeasureLatency: new(false), + Inverted: new(false), + Disabled: new(false), + EnableSNI: new(true), }, - HealthCheckVersion: PtrInt64(1), + HealthCheckVersion: new(int64(1)), }, HealthCheckObservations: []types.HealthCheckObservation{ { Region: types.HealthCheckRegionApNortheast1, - IPAddress: PtrString("15.177.62.21"), + IPAddress: new("15.177.62.21"), StatusReport: &types.StatusReport{ - Status: PtrString("Success: HTTP Status Code 200, OK"), - CheckedTime: PtrTime(time.Now()), + Status: new("Success: HTTP Status Code 200, OK"), + CheckedTime: new(time.Now()), }, }, { Region: types.HealthCheckRegionEuWest1, - IPAddress: PtrString("15.177.10.21"), + IPAddress: new("15.177.10.21"), StatusReport: &types.StatusReport{ - Status: PtrString("Failure: Connection timed out. The endpoint or the internet connection is down, or requests are being blocked by your firewall. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-router-firewall-rules.html"), - CheckedTime: PtrTime(time.Now()), + Status: new("Failure: Connection timed out. The endpoint or the internet connection is down, or requests are being blocked by your firewall. See https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-router-firewall-rules.html"), + CheckedTime: new(time.Now()), }, }, }, diff --git a/aws-source/adapters/route53-hosted-zone_test.go b/aws-source/adapters/route53-hosted-zone_test.go index da48d7de..11f35f1d 100644 --- a/aws-source/adapters/route53-hosted-zone_test.go +++ b/aws-source/adapters/route53-hosted-zone_test.go @@ -11,17 +11,17 @@ import ( func TestHostedZoneItemMapper(t *testing.T) { zone := types.HostedZone{ - Id: PtrString("/hostedzone/Z08416862SZP5DJXIDB29"), - Name: PtrString("overmind-demo.com."), - CallerReference: PtrString("RISWorkflow-RD:144d3779-1574-42bf-9e75-f309838ea0a1"), + Id: new("/hostedzone/Z08416862SZP5DJXIDB29"), + Name: new("overmind-demo.com."), + CallerReference: new("RISWorkflow-RD:144d3779-1574-42bf-9e75-f309838ea0a1"), Config: &types.HostedZoneConfig{ - Comment: PtrString("HostedZone created by Route53 Registrar"), + Comment: new("HostedZone created by Route53 Registrar"), PrivateZone: false, }, - ResourceRecordSetCount: PtrInt64(3), + ResourceRecordSetCount: new(int64(3)), LinkedService: &types.LinkedService{ - Description: PtrString("service description"), - ServicePrincipal: PtrString("principal"), + Description: new("service description"), + ServicePrincipal: new("principal"), }, } diff --git a/aws-source/adapters/route53-resource-record-set_test.go b/aws-source/adapters/route53-resource-record-set_test.go index d18c15a9..8a12ec22 100644 --- a/aws-source/adapters/route53-resource-record-set_test.go +++ b/aws-source/adapters/route53-resource-record-set_test.go @@ -15,50 +15,50 @@ import ( func TestResourceRecordSetItemMapper(t *testing.T) { recordSet := types.ResourceRecordSet{ - Name: PtrString("overmind-demo.com."), + Name: new("overmind-demo.com."), Type: types.RRTypeNs, - TTL: PtrInt64(172800), + TTL: new(int64(172800)), GeoProximityLocation: &types.GeoProximityLocation{ - AWSRegion: PtrString("us-east-1"), - Bias: PtrInt32(100), + AWSRegion: new("us-east-1"), + Bias: new(int32(100)), Coordinates: &types.Coordinates{}, - LocalZoneGroup: PtrString("group"), + LocalZoneGroup: new("group"), }, ResourceRecords: []types.ResourceRecord{ { - Value: PtrString("ns-1673.awsdns-17.co.uk."), // link + Value: new("ns-1673.awsdns-17.co.uk."), // link }, { - Value: PtrString("ns-1505.awsdns-60.org."), // link + Value: new("ns-1505.awsdns-60.org."), // link }, { - Value: PtrString("ns-955.awsdns-55.net."), // link + Value: new("ns-955.awsdns-55.net."), // link }, { - Value: PtrString("ns-276.awsdns-34.com."), // link + Value: new("ns-276.awsdns-34.com."), // link }, }, AliasTarget: &types.AliasTarget{ - DNSName: PtrString("foo.bar.com"), // link + DNSName: new("foo.bar.com"), // link EvaluateTargetHealth: true, - HostedZoneId: PtrString("id"), + HostedZoneId: new("id"), }, CidrRoutingConfig: &types.CidrRoutingConfig{ - CollectionId: PtrString("id"), - LocationName: PtrString("somewhere"), + CollectionId: new("id"), + LocationName: new("somewhere"), }, Failover: types.ResourceRecordSetFailoverPrimary, GeoLocation: &types.GeoLocation{ - ContinentCode: PtrString("GB"), - CountryCode: PtrString("GB"), - SubdivisionCode: PtrString("ENG"), + ContinentCode: new("GB"), + CountryCode: new("GB"), + SubdivisionCode: new("ENG"), }, - HealthCheckId: PtrString("id"), // link - MultiValueAnswer: PtrBool(true), + HealthCheckId: new("id"), // link + MultiValueAnswer: new(true), Region: types.ResourceRecordSetRegionApEast1, - SetIdentifier: PtrString("identifier"), - TrafficPolicyInstanceId: PtrString("id"), - Weight: PtrInt64(100), + SetIdentifier: new("identifier"), + TrafficPolicyInstanceId: new("id"), + Weight: new(int64(100)), } item, err := resourceRecordSetItemMapper("", "foo", &recordSet) diff --git a/aws-source/adapters/s3.go b/aws-source/adapters/s3.go index 2e187d5e..c4cef606 100644 --- a/aws-source/adapters/s3.go +++ b/aws-source/adapters/s3.go @@ -214,7 +214,7 @@ func getImpl(ctx context.Context, cache sdpcache.Cache, client S3Client, scope s var wg sync.WaitGroup var err error - bucketName := PtrString(query) + bucketName := new(query) location, err = client.GetBucketLocation(ctx, &s3.GetBucketLocationInput{ Bucket: bucketName, diff --git a/aws-source/adapters/s3_test.go b/aws-source/adapters/s3_test.go index 574c87be..42682f47 100644 --- a/aws-source/adapters/s3_test.go +++ b/aws-source/adapters/s3_test.go @@ -160,8 +160,8 @@ func TestS3SourceCaching(t *testing.T) { } var owner = types.Owner{ - DisplayName: PtrString("dylan"), - ID: PtrString("id"), + DisplayName: new("dylan"), + ID: new("id"), } // TestS3Client A client that returns example data @@ -171,8 +171,8 @@ func (t TestS3Client) ListBuckets(ctx context.Context, params *s3.ListBucketsInp return &s3.ListBucketsOutput{ Buckets: []types.Bucket{ { - CreationDate: PtrTime(time.Now()), - Name: PtrString("foo"), + CreationDate: new(time.Now()), + Name: new("foo"), }, }, Owner: &owner, @@ -185,10 +185,10 @@ func (t TestS3Client) GetBucketAcl(ctx context.Context, params *s3.GetBucketAclI { Grantee: &types.Grantee{ Type: types.TypeAmazonCustomerByEmail, - DisplayName: PtrString("dylan"), - EmailAddress: PtrString("dylan@company.com"), - ID: PtrString("id"), - URI: PtrString("uri"), + DisplayName: new("dylan"), + EmailAddress: new("dylan@company.com"), + ID: new("id"), + URI: new("uri"), }, }, }, @@ -199,15 +199,15 @@ func (t TestS3Client) GetBucketAcl(ctx context.Context, params *s3.GetBucketAclI func (t TestS3Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *s3.GetBucketAnalyticsConfigurationInput, optFns ...func(*s3.Options)) (*s3.GetBucketAnalyticsConfigurationOutput, error) { return &s3.GetBucketAnalyticsConfigurationOutput{ AnalyticsConfiguration: &types.AnalyticsConfiguration{ - Id: PtrString("id"), + Id: new("id"), StorageClassAnalysis: &types.StorageClassAnalysis{ DataExport: &types.StorageClassAnalysisDataExport{ Destination: &types.AnalyticsExportDestination{ S3BucketDestination: &types.AnalyticsS3BucketDestination{ - Bucket: PtrString("arn:aws:s3:::amzn-s3-demo-bucket"), + Bucket: new("arn:aws:s3:::amzn-s3-demo-bucket"), Format: types.AnalyticsS3ExportFileFormatCsv, - BucketAccountId: PtrString("id"), - Prefix: PtrString("pre"), + BucketAccountId: new("id"), + Prefix: new("pre"), }, }, OutputSchemaVersion: types.StorageClassAnalysisSchemaVersionV1, @@ -233,8 +233,8 @@ func (t TestS3Client) GetBucketCors(ctx context.Context, params *s3.GetBucketCor ExposeHeaders: []string{ "foo", }, - ID: PtrString("id"), - MaxAgeSeconds: PtrInt32(10), + ID: new("id"), + MaxAgeSeconds: new(int32(10)), }, }, }, nil @@ -247,9 +247,9 @@ func (t TestS3Client) GetBucketEncryption(ctx context.Context, params *s3.GetBuc { ApplyServerSideEncryptionByDefault: &types.ServerSideEncryptionByDefault{ SSEAlgorithm: types.ServerSideEncryptionAes256, - KMSMasterKeyID: PtrString("id"), + KMSMasterKeyID: new("id"), }, - BucketKeyEnabled: PtrBool(true), + BucketKeyEnabled: new(true), }, }, }, @@ -259,12 +259,12 @@ func (t TestS3Client) GetBucketEncryption(ctx context.Context, params *s3.GetBuc func (t TestS3Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *s3.GetBucketIntelligentTieringConfigurationInput, optFns ...func(*s3.Options)) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { return &s3.GetBucketIntelligentTieringConfigurationOutput{ IntelligentTieringConfiguration: &types.IntelligentTieringConfiguration{ - Id: PtrString("id"), + Id: new("id"), Status: types.IntelligentTieringStatusEnabled, Tierings: []types.Tiering{ { AccessTier: types.IntelligentTieringAccessTierDeepArchiveAccess, - Days: PtrInt32(100), + Days: new(int32(100)), }, }, Filter: &types.IntelligentTieringFilter{}, @@ -277,20 +277,20 @@ func (t TestS3Client) GetBucketInventoryConfiguration(ctx context.Context, param InventoryConfiguration: &types.InventoryConfiguration{ Destination: &types.InventoryDestination{ S3BucketDestination: &types.InventoryS3BucketDestination{ - Bucket: PtrString("arn:aws:s3:::amzn-s3-demo-bucket"), + Bucket: new("arn:aws:s3:::amzn-s3-demo-bucket"), Format: types.InventoryFormatCsv, - AccountId: PtrString("id"), + AccountId: new("id"), Encryption: &types.InventoryEncryption{ SSEKMS: &types.SSEKMS{ - KeyId: PtrString("key"), + KeyId: new("key"), }, }, - Prefix: PtrString("pre"), + Prefix: new("pre"), }, }, - Id: PtrString("id"), + Id: new("id"), IncludedObjectVersions: types.InventoryIncludedObjectVersionsAll, - IsEnabled: PtrBool(true), + IsEnabled: new(true), Schedule: &types.InventorySchedule{ Frequency: types.InventoryFrequencyDaily, }, @@ -304,30 +304,30 @@ func (t TestS3Client) GetBucketLifecycleConfiguration(ctx context.Context, param { Status: types.ExpirationStatusEnabled, AbortIncompleteMultipartUpload: &types.AbortIncompleteMultipartUpload{ - DaysAfterInitiation: PtrInt32(1), + DaysAfterInitiation: new(int32(1)), }, Expiration: &types.LifecycleExpiration{ - Date: PtrTime(time.Now()), - Days: PtrInt32(3), - ExpiredObjectDeleteMarker: PtrBool(true), + Date: new(time.Now()), + Days: new(int32(3)), + ExpiredObjectDeleteMarker: new(true), }, - ID: PtrString("id"), + ID: new("id"), NoncurrentVersionExpiration: &types.NoncurrentVersionExpiration{ - NewerNoncurrentVersions: PtrInt32(3), - NoncurrentDays: PtrInt32(1), + NewerNoncurrentVersions: new(int32(3)), + NoncurrentDays: new(int32(1)), }, NoncurrentVersionTransitions: []types.NoncurrentVersionTransition{ { - NewerNoncurrentVersions: PtrInt32(1), - NoncurrentDays: PtrInt32(1), + NewerNoncurrentVersions: new(int32(1)), + NoncurrentDays: new(int32(1)), StorageClass: types.TransitionStorageClassGlacierIr, }, }, - Prefix: PtrString("pre"), + Prefix: new("pre"), Transitions: []types.Transition{ { - Date: PtrTime(time.Now()), - Days: PtrInt32(12), + Date: new(time.Now()), + Days: new(int32(12)), StorageClass: types.TransitionStorageClassGlacierIr, }, }, @@ -345,13 +345,13 @@ func (t TestS3Client) GetBucketLocation(ctx context.Context, params *s3.GetBucke func (t TestS3Client) GetBucketLogging(ctx context.Context, params *s3.GetBucketLoggingInput, optFns ...func(*s3.Options)) (*s3.GetBucketLoggingOutput, error) { return &s3.GetBucketLoggingOutput{ LoggingEnabled: &types.LoggingEnabled{ - TargetBucket: PtrString("bucket"), - TargetPrefix: PtrString("pre"), + TargetBucket: new("bucket"), + TargetPrefix: new("pre"), TargetGrants: []types.TargetGrant{ { Grantee: &types.Grantee{ Type: types.TypeGroup, - ID: PtrString("id"), + ID: new("id"), }, }, }, @@ -362,7 +362,7 @@ func (t TestS3Client) GetBucketLogging(ctx context.Context, params *s3.GetBucket func (t TestS3Client) GetBucketMetricsConfiguration(ctx context.Context, params *s3.GetBucketMetricsConfigurationInput, optFns ...func(*s3.Options)) (*s3.GetBucketMetricsConfigurationOutput, error) { return &s3.GetBucketMetricsConfigurationOutput{ MetricsConfiguration: &types.MetricsConfiguration{ - Id: PtrString("id"), + Id: new("id"), }, }, nil } @@ -372,43 +372,43 @@ func (t TestS3Client) GetBucketNotificationConfiguration(ctx context.Context, pa LambdaFunctionConfigurations: []types.LambdaFunctionConfiguration{ { Events: []types.Event{}, - LambdaFunctionArn: PtrString("arn:partition:service:region:account-id:resource-type:resource-id"), - Id: PtrString("id"), + LambdaFunctionArn: new("arn:partition:service:region:account-id:resource-type:resource-id"), + Id: new("id"), }, }, EventBridgeConfiguration: &types.EventBridgeConfiguration{}, QueueConfigurations: []types.QueueConfiguration{ { Events: []types.Event{}, - QueueArn: PtrString("arn:partition:service:region:account-id:resource-type:resource-id"), + QueueArn: new("arn:partition:service:region:account-id:resource-type:resource-id"), Filter: &types.NotificationConfigurationFilter{ Key: &types.S3KeyFilter{ FilterRules: []types.FilterRule{ { Name: types.FilterRuleNamePrefix, - Value: PtrString("foo"), + Value: new("foo"), }, }, }, }, - Id: PtrString("id"), + Id: new("id"), }, }, TopicConfigurations: []types.TopicConfiguration{ { Events: []types.Event{}, - TopicArn: PtrString("arn:partition:service:region:account-id:resource-type:resource-id"), + TopicArn: new("arn:partition:service:region:account-id:resource-type:resource-id"), Filter: &types.NotificationConfigurationFilter{ Key: &types.S3KeyFilter{ FilterRules: []types.FilterRule{ { Name: types.FilterRuleNameSuffix, - Value: PtrString("fix"), + Value: new("fix"), }, }, }, }, - Id: PtrString("id"), + Id: new("id"), }, }, }, nil @@ -428,14 +428,14 @@ func (t TestS3Client) GetBucketOwnershipControls(ctx context.Context, params *s3 func (t TestS3Client) GetBucketPolicy(ctx context.Context, params *s3.GetBucketPolicyInput, optFns ...func(*s3.Options)) (*s3.GetBucketPolicyOutput, error) { return &s3.GetBucketPolicyOutput{ - Policy: PtrString("policy"), + Policy: new("policy"), }, nil } func (t TestS3Client) GetBucketPolicyStatus(ctx context.Context, params *s3.GetBucketPolicyStatusInput, optFns ...func(*s3.Options)) (*s3.GetBucketPolicyStatusOutput, error) { return &s3.GetBucketPolicyStatusOutput{ PolicyStatus: &types.PolicyStatus{ - IsPublic: PtrBool(true), + IsPublic: new(true), }, }, nil } @@ -443,28 +443,28 @@ func (t TestS3Client) GetBucketPolicyStatus(ctx context.Context, params *s3.GetB func (t TestS3Client) GetBucketReplication(ctx context.Context, params *s3.GetBucketReplicationInput, optFns ...func(*s3.Options)) (*s3.GetBucketReplicationOutput, error) { return &s3.GetBucketReplicationOutput{ ReplicationConfiguration: &types.ReplicationConfiguration{ - Role: PtrString("role"), + Role: new("role"), Rules: []types.ReplicationRule{ { Destination: &types.Destination{ - Bucket: PtrString("bucket"), + Bucket: new("bucket"), AccessControlTranslation: &types.AccessControlTranslation{ Owner: types.OwnerOverrideDestination, }, - Account: PtrString("account"), + Account: new("account"), EncryptionConfiguration: &types.EncryptionConfiguration{ - ReplicaKmsKeyID: PtrString("keyId"), + ReplicaKmsKeyID: new("keyId"), }, Metrics: &types.Metrics{ Status: types.MetricsStatusEnabled, EventThreshold: &types.ReplicationTimeValue{ - Minutes: PtrInt32(1), + Minutes: new(int32(1)), }, }, ReplicationTime: &types.ReplicationTime{ Status: types.ReplicationTimeStatusEnabled, Time: &types.ReplicationTimeValue{ - Minutes: PtrInt32(1), + Minutes: new(int32(1)), }, }, StorageClass: types.StorageClassGlacier, @@ -497,23 +497,23 @@ func (t TestS3Client) GetBucketVersioning(ctx context.Context, params *s3.GetBuc func (t TestS3Client) GetBucketWebsite(ctx context.Context, params *s3.GetBucketWebsiteInput, optFns ...func(*s3.Options)) (*s3.GetBucketWebsiteOutput, error) { return &s3.GetBucketWebsiteOutput{ ErrorDocument: &types.ErrorDocument{ - Key: PtrString("key"), + Key: new("key"), }, IndexDocument: &types.IndexDocument{ - Suffix: PtrString("html"), + Suffix: new("html"), }, RedirectAllRequestsTo: &types.RedirectAllRequestsTo{ - HostName: PtrString("hostname"), + HostName: new("hostname"), Protocol: types.ProtocolHttps, }, RoutingRules: []types.RoutingRule{ { Redirect: &types.Redirect{ - HostName: PtrString("hostname"), - HttpRedirectCode: PtrString("303"), + HostName: new("hostname"), + HttpRedirectCode: new("303"), Protocol: types.ProtocolHttp, - ReplaceKeyPrefixWith: PtrString("pre"), - ReplaceKeyWith: PtrString("key"), + ReplaceKeyPrefixWith: new("pre"), + ReplaceKeyWith: new("key"), }, }, }, diff --git a/aws-source/adapters/sns-data-protection-policy.go b/aws-source/adapters/sns-data-protection-policy.go index 5d54efe7..3fc23916 100644 --- a/aws-source/adapters/sns-data-protection-policy.go +++ b/aws-source/adapters/sns-data-protection-policy.go @@ -28,7 +28,7 @@ func getDataProtectionPolicyFunc(ctx context.Context, client dataProtectionPolic } // ResourceArn is the topic ARN that the policy is associated with - attr := map[string]interface{}{ + attr := map[string]any{ "TopicArn": *input.ResourceArn, } @@ -64,7 +64,7 @@ func NewSNSDataProtectionPolicyAdapter(client dataProtectionPolicyClient, accoun Region: region, DisableList: true, AdapterMetadata: dataProtectionPolicyAdapterMetadata, - cache: cache, + cache: cache, GetInputMapper: func(scope, query string) *sns.GetDataProtectionPolicyInput { return &sns.GetDataProtectionPolicyInput{ ResourceArn: &query, diff --git a/aws-source/adapters/sns-data-protection-policy_test.go b/aws-source/adapters/sns-data-protection-policy_test.go index 3e3fe366..1066d7a9 100644 --- a/aws-source/adapters/sns-data-protection-policy_test.go +++ b/aws-source/adapters/sns-data-protection-policy_test.go @@ -13,7 +13,7 @@ type mockDataProtectionPolicyClient struct{} func (m mockDataProtectionPolicyClient) GetDataProtectionPolicy(ctx context.Context, params *sns.GetDataProtectionPolicyInput, optFns ...func(*sns.Options)) (*sns.GetDataProtectionPolicyOutput, error) { return &sns.GetDataProtectionPolicyOutput{ - DataProtectionPolicy: PtrString("{\"Name\":\"data_protection_policy\",\"Description\":\"Example data protection policy\",\"Version\":\"2021-06-01\",\"Statement\":[{\"DataDirection\":\"Inbound\",\"Principal\":[\"*\"],\"DataIdentifier\":[\"arn:aws:dataprotection::aws:data-identifier/CreditCardNumber\"],\"Operation\":{\"Deny\":{}}}]}"), + DataProtectionPolicy: new("{\"Name\":\"data_protection_policy\",\"Description\":\"Example data protection policy\",\"Version\":\"2021-06-01\",\"Statement\":[{\"DataDirection\":\"Inbound\",\"Principal\":[\"*\"],\"DataIdentifier\":[\"arn:aws:dataprotection::aws:data-identifier/CreditCardNumber\"],\"Operation\":{\"Deny\":{}}}]}"), }, nil } @@ -22,7 +22,7 @@ func TestGetDataProtectionPolicyFunc(t *testing.T) { cli := &mockDataProtectionPolicyClient{} item, err := getDataProtectionPolicyFunc(ctx, cli, "scope", &sns.GetDataProtectionPolicyInput{ - ResourceArn: PtrString("arn:aws:sns:us-east-1:123456789012:mytopic"), + ResourceArn: new("arn:aws:sns:us-east-1:123456789012:mytopic"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/sns-endpoint_test.go b/aws-source/adapters/sns-endpoint_test.go index b26f5b65..8b40aded 100644 --- a/aws-source/adapters/sns-endpoint_test.go +++ b/aws-source/adapters/sns-endpoint_test.go @@ -44,7 +44,7 @@ func TestGetEndpointFunc(t *testing.T) { cli := &mockEndpointClient{} item, err := getEndpointFunc(ctx, cli, "scope", &sns.GetEndpointAttributesInput{ - EndpointArn: PtrString("arn:aws:sns:us-west-2:123456789012:endpoint/GCM/MyApplication/12345678-abcd-9012-efgh-345678901234"), + EndpointArn: new("arn:aws:sns:us-west-2:123456789012:endpoint/GCM/MyApplication/12345678-abcd-9012-efgh-345678901234"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/sns-platform-application_test.go b/aws-source/adapters/sns-platform-application_test.go index f7fffbd6..fb821785 100644 --- a/aws-source/adapters/sns-platform-application_test.go +++ b/aws-source/adapters/sns-platform-application_test.go @@ -15,8 +15,8 @@ type mockPlatformApplicationClient struct{} func (m mockPlatformApplicationClient) ListTagsForResource(ctx context.Context, input *sns.ListTagsForResourceInput, f ...func(*sns.Options)) (*sns.ListTagsForResourceOutput, error) { return &sns.ListTagsForResourceOutput{ Tags: []types.Tag{ - {Key: PtrString("tag1"), Value: PtrString("value1")}, - {Key: PtrString("tag2"), Value: PtrString("value2")}, + {Key: new("tag1"), Value: new("value1")}, + {Key: new("tag2"), Value: new("value2")}, }, }, nil } @@ -34,14 +34,14 @@ func (m mockPlatformApplicationClient) ListPlatformApplications(ctx context.Cont return &sns.ListPlatformApplicationsOutput{ PlatformApplications: []types.PlatformApplication{ { - PlatformApplicationArn: PtrString("arn:aws:sns:us-west-2:123456789012:app/ADM/MyApplication"), + PlatformApplicationArn: new("arn:aws:sns:us-west-2:123456789012:app/ADM/MyApplication"), Attributes: map[string]string{ "SuccessFeedbackSampleRate": "100", "Enabled": "true", }, }, { - PlatformApplicationArn: PtrString("arn:aws:sns:us-west-2:123456789012:app/MPNS/MyOtherApplication"), + PlatformApplicationArn: new("arn:aws:sns:us-west-2:123456789012:app/MPNS/MyOtherApplication"), Attributes: map[string]string{ "SuccessFeedbackSampleRate": "100", "Enabled": "true", @@ -56,7 +56,7 @@ func TestGetPlatformApplicationFunc(t *testing.T) { cli := mockPlatformApplicationClient{} item, err := getPlatformApplicationFunc(ctx, cli, "scope", &sns.GetPlatformApplicationAttributesInput{ - PlatformApplicationArn: PtrString("arn:aws:sns:us-west-2:123456789012:my-topic"), + PlatformApplicationArn: new("arn:aws:sns:us-west-2:123456789012:my-topic"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/sns-subscription_test.go b/aws-source/adapters/sns-subscription_test.go index d83d419f..bd8af04f 100644 --- a/aws-source/adapters/sns-subscription_test.go +++ b/aws-source/adapters/sns-subscription_test.go @@ -29,11 +29,11 @@ func (t snsTestClient) ListSubscriptions(context.Context, *sns.ListSubscriptions return &sns.ListSubscriptionsOutput{ Subscriptions: []types.Subscription{ { - Owner: PtrString("123456789012"), - Endpoint: PtrString("my-email@example.com"), - Protocol: PtrString("email"), - TopicArn: PtrString("arn:aws:sns:us-west-2:123456789012:my-topic"), - SubscriptionArn: PtrString("arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f"), + Owner: new("123456789012"), + Endpoint: new("my-email@example.com"), + Protocol: new("email"), + TopicArn: new("arn:aws:sns:us-west-2:123456789012:my-topic"), + SubscriptionArn: new("arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f"), }, }, }, nil @@ -42,8 +42,8 @@ func (t snsTestClient) ListSubscriptions(context.Context, *sns.ListSubscriptions func (t snsTestClient) ListTagsForResource(context.Context, *sns.ListTagsForResourceInput, ...func(*sns.Options)) (*sns.ListTagsForResourceOutput, error) { return &sns.ListTagsForResourceOutput{ Tags: []types.Tag{ - {Key: PtrString("tag1"), Value: PtrString("value1")}, - {Key: PtrString("tag2"), Value: PtrString("value2")}, + {Key: new("tag1"), Value: new("value1")}, + {Key: new("tag2"), Value: new("value2")}, }, }, nil } @@ -53,7 +53,7 @@ func TestSNSGetFunc(t *testing.T) { cli := snsTestClient{} item, err := getSubsFunc(ctx, cli, "scope", &sns.GetSubscriptionAttributesInput{ - SubscriptionArn: PtrString("arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f"), + SubscriptionArn: new("arn:aws:sns:us-west-2:123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/sns-topic_test.go b/aws-source/adapters/sns-topic_test.go index 57f344de..d0208bf0 100644 --- a/aws-source/adapters/sns-topic_test.go +++ b/aws-source/adapters/sns-topic_test.go @@ -30,7 +30,7 @@ func (t testTopicClient) ListTopics(context.Context, *sns.ListTopicsInput, ...fu return &sns.ListTopicsOutput{ Topics: []types.Topic{ { - TopicArn: PtrString("arn:aws:sns:us-west-2:123456789012:my-topic"), + TopicArn: new("arn:aws:sns:us-west-2:123456789012:my-topic"), }, }, }, nil @@ -39,8 +39,8 @@ func (t testTopicClient) ListTopics(context.Context, *sns.ListTopicsInput, ...fu func (t testTopicClient) ListTagsForResource(context.Context, *sns.ListTagsForResourceInput, ...func(*sns.Options)) (*sns.ListTagsForResourceOutput, error) { return &sns.ListTagsForResourceOutput{ Tags: []types.Tag{ - {Key: PtrString("tag1"), Value: PtrString("value1")}, - {Key: PtrString("tag2"), Value: PtrString("value2")}, + {Key: new("tag1"), Value: new("value1")}, + {Key: new("tag2"), Value: new("value2")}, }, }, nil } @@ -50,7 +50,7 @@ func TestGetTopicFunc(t *testing.T) { cli := testTopicClient{} item, err := getTopicFunc(ctx, cli, "scope", &sns.GetTopicAttributesInput{ - TopicArn: PtrString("arn:aws:sns:us-west-2:123456789012:my-topic"), + TopicArn: new("arn:aws:sns:us-west-2:123456789012:my-topic"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/sqs-queue.go b/aws-source/adapters/sqs-queue.go index 7283bf7d..71ade1e4 100644 --- a/aws-source/adapters/sqs-queue.go +++ b/aws-source/adapters/sqs-queue.go @@ -98,7 +98,7 @@ func sqsQueueSearchInputMapper(scope string, query string) (*sqs.GetQueueAttribu } return &sqs.GetQueueAttributesInput{ - QueueUrl: PtrString(fmt.Sprintf("https://sqs.%s.%s/%s/%s", arn.Region, GetPartitionDNSSuffix(arn.Partition), arn.AccountID, arn.Resource)), + QueueUrl: new(fmt.Sprintf("https://sqs.%s.%s/%s/%s", arn.Region, GetPartitionDNSSuffix(arn.Partition), arn.AccountID, arn.Resource)), AttributeNames: []types.QueueAttributeName{"All"}, }, nil } @@ -111,7 +111,7 @@ func NewSQSQueueAdapter(client sqsClient, accountID string, region string, cache Region: region, ListInput: &sqs.ListQueuesInput{}, AdapterMetadata: sqsQueueAdapterMetadata, - cache: cache, + cache: cache, GetInputMapper: func(scope, query string) *sqs.GetQueueAttributesInput { return &sqs.GetQueueAttributesInput{ QueueUrl: &query, diff --git a/aws-source/adapters/sqs-queue_test.go b/aws-source/adapters/sqs-queue_test.go index 11524d97..449769fd 100644 --- a/aws-source/adapters/sqs-queue_test.go +++ b/aws-source/adapters/sqs-queue_test.go @@ -54,7 +54,7 @@ func TestGetFunc(t *testing.T) { cli := testClient{} item, err := getFunc(ctx, cli, "scope", &sqs.GetQueueAttributesInput{ - QueueUrl: PtrString("https://sqs.us-west-2.amazonaws.com/123456789012/MyQueue"), + QueueUrl: new("https://sqs.us-west-2.amazonaws.com/123456789012/MyQueue"), }) if err != nil { t.Fatal(err) diff --git a/aws-source/adapters/ssm-parameter.go b/aws-source/adapters/ssm-parameter.go index 0294d5a3..434c4d3f 100644 --- a/aws-source/adapters/ssm-parameter.go +++ b/aws-source/adapters/ssm-parameter.go @@ -186,7 +186,7 @@ func ssmParameterOutputMapper(ctx context.Context, client ssmClient, scope strin if parameter.Type != types.ParameterTypeSecureString { request := &ssm.GetParameterInput{ Name: parameter.Name, - WithDecryption: PtrBool(false), // let's be double sure we don't get any secrets + WithDecryption: new(false), // let's be double sure we don't get any secrets } paramResp, err := client.GetParameter(ctx, request) if err != nil { @@ -231,8 +231,8 @@ func NewSSMParameterAdapter(client ssmClient, accountID string, region string, c return &ssm.DescribeParametersInput{ ParameterFilters: []types.ParameterStringFilter{ { - Key: PtrString("Name"), - Option: PtrString("Equals"), + Key: new("Name"), + Option: new("Equals"), Values: []string{query}, }, }, diff --git a/aws-source/build/package/Dockerfile b/aws-source/build/package/Dockerfile index 5ea83d03..e6cd5153 100644 --- a/aws-source/build/package/Dockerfile +++ b/aws-source/build/package/Dockerfile @@ -1,5 +1,5 @@ # Build the source binary -FROM golang:1.25-alpine AS builder +FROM golang:1.26-alpine AS builder ARG TARGETOS ARG TARGETARCH ARG BUILD_VERSION diff --git a/aws-source/module/provider/.github/workflows/finalize-copybara-sync.yml b/aws-source/module/provider/.github/workflows/finalize-copybara-sync.yml index 3a59a73a..f4bab3a0 100644 --- a/aws-source/module/provider/.github/workflows/finalize-copybara-sync.yml +++ b/aws-source/module/provider/.github/workflows/finalize-copybara-sync.yml @@ -42,11 +42,13 @@ jobs: run: go mod tidy - name: Commit and push go mod tidy changes + env: + HEAD_BRANCH: ${{ github.ref_name }} run: | if ! git diff --quiet go.mod go.sum; then git add go.mod go.sum git commit -m "Run go mod tidy" - git push origin ${{ github.ref_name }} + git push origin "$HEAD_BRANCH" else echo "No changes from go mod tidy" fi @@ -73,6 +75,7 @@ jobs: AUTHOR_NAME: ${{ steps.author.outputs.name }} AUTHOR_EMAIL: ${{ steps.author.outputs.email }} GITHUB_USER: ${{ steps.author.outputs.github_user }} + HEAD_BRANCH: ${{ github.ref_name }} run: | PR_BODY="## Copybara Sync - Release ${VERSION} @@ -97,7 +100,7 @@ jobs: PR_URL=$(gh pr create \ --base main \ - --head "${{ github.ref_name }}" \ + --head "$HEAD_BRANCH" \ --title "Release ${VERSION}" \ --body "$PR_BODY") diff --git a/aws-source/module/provider/.github/workflows/release.yml b/aws-source/module/provider/.github/workflows/release.yml index 130f5746..ae76b5a4 100644 --- a/aws-source/module/provider/.github/workflows/release.yml +++ b/aws-source/module/provider/.github/workflows/release.yml @@ -31,7 +31,7 @@ jobs: GPG_FINGERPRINT: 'op://global/Terraform Provider GPG Key/fingerprint' - name: Import GPG key - uses: crazy-max/ghaction-import-gpg@v6 + uses: crazy-max/ghaction-import-gpg@v7 id: import_gpg with: gpg_private_key: ${{ env.GPG_PRIVATE_KEY }} @@ -43,7 +43,7 @@ jobs: go-version-file: go.mod - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6 + uses: goreleaser/goreleaser-action@v7 with: version: latest args: release --clean diff --git a/aws-source/module/provider/.github/workflows/tag-on-merge.yml b/aws-source/module/provider/.github/workflows/tag-on-merge.yml index 800ccb96..44dc9c22 100644 --- a/aws-source/module/provider/.github/workflows/tag-on-merge.yml +++ b/aws-source/module/provider/.github/workflows/tag-on-merge.yml @@ -17,8 +17,9 @@ jobs: steps: - name: Extract version from branch name id: version + env: + BRANCH: ${{ github.event.pull_request.head.ref }} run: | - BRANCH="${{ github.event.pull_request.head.ref }}" VERSION=$(echo "$BRANCH" | sed 's|copybara/||') echo "version=$VERSION" >> $GITHUB_OUTPUT echo "Extracted version: $VERSION" @@ -46,7 +47,7 @@ jobs: - name: Delete copybara branch env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: ${{ github.event.pull_request.head.ref }} run: | - BRANCH="${{ github.event.pull_request.head.ref }}" echo "Deleting branch: $BRANCH" git push origin --delete "$BRANCH" || echo "Branch may have already been deleted" diff --git a/aws-source/module/terraform/.github/workflows/finalize-copybara-sync.yml b/aws-source/module/terraform/.github/workflows/finalize-copybara-sync.yml index e6c0f5e0..f0d30412 100644 --- a/aws-source/module/terraform/.github/workflows/finalize-copybara-sync.yml +++ b/aws-source/module/terraform/.github/workflows/finalize-copybara-sync.yml @@ -50,6 +50,7 @@ jobs: AUTHOR_NAME: ${{ steps.author.outputs.name }} AUTHOR_EMAIL: ${{ steps.author.outputs.email }} GITHUB_USER: ${{ steps.author.outputs.github_user }} + HEAD_BRANCH: ${{ github.ref_name }} run: | PR_BODY="## Copybara Sync - Release ${VERSION} @@ -69,7 +70,7 @@ jobs: PR_URL=$(gh pr create \ --base main \ - --head "${{ github.ref_name }}" \ + --head "$HEAD_BRANCH" \ --title "Release ${VERSION}" \ --body "$PR_BODY") diff --git a/aws-source/module/terraform/.github/workflows/tag-on-merge.yml b/aws-source/module/terraform/.github/workflows/tag-on-merge.yml index 800ccb96..44dc9c22 100644 --- a/aws-source/module/terraform/.github/workflows/tag-on-merge.yml +++ b/aws-source/module/terraform/.github/workflows/tag-on-merge.yml @@ -17,8 +17,9 @@ jobs: steps: - name: Extract version from branch name id: version + env: + BRANCH: ${{ github.event.pull_request.head.ref }} run: | - BRANCH="${{ github.event.pull_request.head.ref }}" VERSION=$(echo "$BRANCH" | sed 's|copybara/||') echo "version=$VERSION" >> $GITHUB_OUTPUT echo "Extracted version: $VERSION" @@ -46,7 +47,7 @@ jobs: - name: Delete copybara branch env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: ${{ github.event.pull_request.head.ref }} run: | - BRANCH="${{ github.event.pull_request.head.ref }}" echo "Deleting branch: $BRANCH" git push origin --delete "$BRANCH" || echo "Branch may have already been deleted" diff --git a/cmd/changes_submit_plan.go b/cmd/changes_submit_plan.go index e1eb5035..d3d8171d 100644 --- a/cmd/changes_submit_plan.go +++ b/cmd/changes_submit_plan.go @@ -52,7 +52,7 @@ func changeTitle(ctx context.Context, arg string) string { return arg } - describeBytes, err := exec.CommandContext(ctx, "git", "describe", "--long").Output() + describeBytes, err := exec.CommandContext(ctx, "git", "describe", "--long").Output() //nolint:gosec // G702: all arguments are hardcoded string literals; no user input reaches this command describe := strings.TrimSpace(string(describeBytes)) if err != nil { log.WithError(err).Trace("failed to run 'git describe' for default title") @@ -289,7 +289,8 @@ func SubmitPlan(cmd *cobra.Command, args []string) error { } // Discover and convert knowledge files - sdpKnowledge := knowledge.DiscoverAndConvert(ctx, ".overmind/knowledge/") + knowledgeDir := knowledge.FindKnowledgeDir(".") + sdpKnowledge := knowledge.DiscoverAndConvert(ctx, knowledgeDir) _, err = client.StartChangeAnalysis(ctx, &connect.Request[sdp.StartChangeAnalysisRequest]{ Msg: &sdp.StartChangeAnalysisRequest{ diff --git a/cmd/explore.go b/cmd/explore.go index 20df5072..76711032 100644 --- a/cmd/explore.go +++ b/cmd/explore.go @@ -104,6 +104,7 @@ func StartLocalSources(ctx context.Context, oi sdp.OvermindInstance, token *oaut snapshotSpinner.Fail(fmt.Sprintf("Failed to initialize snapshot source adapters: %v", err)) return func() {}, fmt.Errorf("failed to initialize snapshot source adapters: %w", err) } + snapshotEngine.MarkAdaptersInitialized() err = snapshotEngine.Start(ctx) if err != nil { snapshotSpinner.Fail(fmt.Sprintf("Failed to start snapshot source engine: %v", err)) @@ -182,6 +183,7 @@ func StartLocalSources(ctx context.Context, oi sdp.OvermindInstance, token *oaut return nil, fmt.Errorf("failed to initialize stdlib source adapters: %w", err) } // todo: pass in context with timeout to abort timely and allow Ctrl-C to work + stdlibEngine.MarkAdaptersInitialized() err = stdlibEngine.Start(ctx) if err != nil { stdlibSpinner.Fail("Failed to start stdlib source engine") @@ -280,6 +282,7 @@ func StartLocalSources(ctx context.Context, oi sdp.OvermindInstance, token *oaut return nil, fmt.Errorf("failed to initialize AWS source adapters: %w", err) } + awsEngine.MarkAdaptersInitialized() err = awsEngine.Start(ctx) if err != nil { awsSpinner.Fail("Failed to start AWS source engine") @@ -389,6 +392,7 @@ func StartLocalSources(ctx context.Context, oi sdp.OvermindInstance, token *oaut continue // Skip this engine but continue with others } + gcpEngine.MarkAdaptersInitialized() err = gcpEngine.Start(ctx) if err != nil { if gcpConfig == nil { @@ -535,6 +539,7 @@ func StartLocalSources(ctx context.Context, oi sdp.OvermindInstance, token *oaut continue // Skip this engine but continue with others } + azureEngine.MarkAdaptersInitialized() err = azureEngine.Start(ctx) if err != nil { statusArea.Println(fmt.Sprintf("Failed to start Azure source for subscription %s: %s", azureConfig.SubscriptionID, err.Error())) diff --git a/cmd/knowledge.go b/cmd/knowledge.go new file mode 100644 index 00000000..b0e6285a --- /dev/null +++ b/cmd/knowledge.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +// knowledgeCmd represents the knowledge command +var knowledgeCmd = &cobra.Command{ + Use: "knowledge", + GroupID: "iac", + Short: "Manage tribal knowledge files used for change analysis", + Long: `Knowledge files in .overmind/knowledge/ help Overmind understand your infrastructure +context, giving better change analysis and risk assessment. + +The 'list' subcommand shows which knowledge files Overmind would discover from your +current location, using the same logic as 'overmind terraform plan'.`, + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + }, +} + +func init() { + rootCmd.AddCommand(knowledgeCmd) +} diff --git a/cmd/knowledge_list.go b/cmd/knowledge_list.go new file mode 100644 index 00000000..44bdfc47 --- /dev/null +++ b/cmd/knowledge_list.go @@ -0,0 +1,111 @@ +package cmd + +import ( + "errors" + "fmt" + "strings" + + "github.com/overmindtech/pterm" + "github.com/overmindtech/cli/knowledge" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +// ErrInvalidKnowledgeFiles is returned when one or more knowledge files are invalid/skipped. +// Used so "knowledge list" can exit non-zero in CI when invalid files are found. +var ErrInvalidKnowledgeFiles = errors.New("invalid knowledge files found") + +// knowledgeListCmd represents the knowledge list command +var knowledgeListCmd = &cobra.Command{ + Use: "list", + Short: "Lists knowledge files that would be used from the current location", + PreRun: PreRunSetup, + RunE: KnowledgeList, +} + +func KnowledgeList(cmd *cobra.Command, args []string) error { + startDir := viper.GetString("dir") + output, err := renderKnowledgeList(startDir) + fmt.Print(output) + if err != nil { + return err + } + return nil +} + +// renderKnowledgeList handles the knowledge list logic and returns formatted output. +// This is separated from the command for testability. +func renderKnowledgeList(startDir string) (string, error) { + var output strings.Builder + + knowledgeDir := knowledge.FindKnowledgeDir(startDir) + + if knowledgeDir == "" { + output.WriteString(pterm.Info.Sprint("No .overmind/knowledge/ directory found from current location\n\n")) + output.WriteString("Knowledge files help Overmind understand your infrastructure context.\n") + output.WriteString("Create a .overmind/knowledge/ directory to add knowledge files.\n") + output.WriteString("Without knowledge files, 'terraform plan' will proceed with standard analysis.\n") + return output.String(), nil + } + + files, warnings := knowledge.Discover(knowledgeDir) + + // Show resolved directory + output.WriteString(pterm.Info.Sprintf("Knowledge directory: %s\n\n", knowledgeDir)) + + // Show valid files + if len(files) > 0 { + output.WriteString(pterm.DefaultHeader.Sprint("Valid Knowledge Files") + "\n\n") + + // Create table data + tableData := pterm.TableData{ + {"Name", "Description", "File Path"}, + } + + for _, f := range files { + tableData = append(tableData, []string{ + f.Name, + truncateDescription(f.Description, 60), + f.FileName, + }) + } + + table, err := pterm.DefaultTable.WithHasHeader().WithData(tableData).Srender() + if err != nil { + return "", fmt.Errorf("failed to render table: %w", err) + } + output.WriteString(table) + output.WriteString("\n") + } else if len(warnings) == 0 { + output.WriteString(pterm.Info.Sprint("No knowledge files found\n\n")) + } + + // Show warnings + if len(warnings) > 0 { + output.WriteString(pterm.DefaultHeader.Sprint("Invalid/Skipped Files") + "\n\n") + + for _, w := range warnings { + output.WriteString(pterm.Warning.Sprintf(" %s\n", w.Path)) + fmt.Fprintf(&output, " Reason: %s\n", w.Reason) + } + output.WriteString("\n") + return output.String(), fmt.Errorf("%w (%d file(s))", ErrInvalidKnowledgeFiles, len(warnings)) + } + + return output.String(), nil +} + +// truncateDescription truncates a description to maxLen characters, adding "..." if truncated +func truncateDescription(desc string, maxLen int) string { + if len(desc) <= maxLen { + return desc + } + return desc[:maxLen-3] + "..." +} + +func init() { + knowledgeCmd.AddCommand(knowledgeListCmd) + + knowledgeListCmd.Flags().String("dir", ".", "Directory to start searching from") + knowledgeListCmd.Flags().MarkHidden("dir") //nolint:errcheck // not possible to error +} diff --git a/cmd/knowledge_list_test.go b/cmd/knowledge_list_test.go new file mode 100644 index 00000000..35c12a0d --- /dev/null +++ b/cmd/knowledge_list_test.go @@ -0,0 +1,399 @@ +package cmd + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestRenderKnowledgeList_NoKnowledgeDir(t *testing.T) { + dir := t.TempDir() + + output, err := renderKnowledgeList(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(output, "No .overmind/knowledge/ directory found") { + t.Errorf("expected message about no directory found, got: %s", output) + } + if !strings.Contains(output, "Create a .overmind/knowledge/ directory") { + t.Errorf("expected helpful message about creating directory, got: %s", output) + } + if !strings.Contains(output, "terraform plan") { + t.Errorf("expected reference to terraform plan, got: %s", output) + } +} + +func TestRenderKnowledgeList_EmptyKnowledgeDir(t *testing.T) { + dir := t.TempDir() + knowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(knowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + + output, err := renderKnowledgeList(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !strings.Contains(output, "Knowledge directory:") { + t.Errorf("expected resolved directory message, got: %s", output) + } + if !strings.Contains(output, knowledgeDir) { + t.Errorf("expected directory path %s in output, got: %s", knowledgeDir, output) + } + if !strings.Contains(output, "No knowledge files found") { + t.Errorf("expected 'No knowledge files found' message, got: %s", output) + } +} + +func TestRenderKnowledgeList_ValidFiles(t *testing.T) { + dir := t.TempDir() + knowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(knowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + + // Create valid knowledge files + writeTestFile(t, filepath.Join(knowledgeDir, "aws-s3.md"), `--- +name: aws-s3-security +description: Security best practices for S3 buckets +--- +# AWS S3 Security +Content here. +`) + + subdir := filepath.Join(knowledgeDir, "cloud") + err = os.Mkdir(subdir, 0755) + if err != nil { + t.Fatal(err) + } + writeTestFile(t, filepath.Join(subdir, "gcp.md"), `--- +name: gcp-compute +description: GCP Compute Engine guidelines +--- +# GCP Compute +Content here. +`) + + output, err := renderKnowledgeList(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Check for resolved directory + if !strings.Contains(output, "Knowledge directory:") { + t.Errorf("expected resolved directory message, got: %s", output) + } + if !strings.Contains(output, knowledgeDir) { + t.Errorf("expected directory path in output, got: %s", output) + } + + // Check for header + if !strings.Contains(output, "Valid Knowledge Files") { + t.Errorf("expected 'Valid Knowledge Files' header, got: %s", output) + } + + // Check for first file details + if !strings.Contains(output, "aws-s3-security") { + t.Errorf("expected file name 'aws-s3-security', got: %s", output) + } + if !strings.Contains(output, "Security best practices for S3 buckets") { + t.Errorf("expected file description, got: %s", output) + } + if !strings.Contains(output, "aws-s3.md") { + t.Errorf("expected file path 'aws-s3.md', got: %s", output) + } + + // Check for second file details + if !strings.Contains(output, "gcp-compute") { + t.Errorf("expected file name 'gcp-compute', got: %s", output) + } + if !strings.Contains(output, "GCP Compute Engine guidelines") { + t.Errorf("expected file description, got: %s", output) + } + if !strings.Contains(output, filepath.Join("cloud", "gcp.md")) { + t.Errorf("expected file path 'cloud/gcp.md', got: %s", output) + } +} + +func TestRenderKnowledgeList_InvalidFiles(t *testing.T) { + dir := t.TempDir() + knowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(knowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + + // Create valid file + writeTestFile(t, filepath.Join(knowledgeDir, "valid.md"), `--- +name: valid-file +description: A valid knowledge file +--- +Content here. +`) + + // Create invalid file (missing frontmatter) + writeTestFile(t, filepath.Join(knowledgeDir, "invalid.md"), `# No frontmatter +This file is missing frontmatter. +`) + + output, err := renderKnowledgeList(dir) + if err == nil { + t.Fatal("expected error when invalid files present, got nil") + } + if !errors.Is(err, ErrInvalidKnowledgeFiles) { + t.Errorf("expected ErrInvalidKnowledgeFiles, got: %v", err) + } + + // Check for valid file + if !strings.Contains(output, "Valid Knowledge Files") { + t.Errorf("expected 'Valid Knowledge Files' header, got: %s", output) + } + if !strings.Contains(output, "valid-file") { + t.Errorf("expected valid file name, got: %s", output) + } + + // Check for warnings section + if !strings.Contains(output, "Invalid/Skipped Files") { + t.Errorf("expected 'Invalid/Skipped Files' header, got: %s", output) + } + if !strings.Contains(output, "invalid.md") { + t.Errorf("expected invalid file path in warnings, got: %s", output) + } + if !strings.Contains(output, "Reason:") { + t.Errorf("expected reason in warnings, got: %s", output) + } +} + +func TestRenderKnowledgeList_OnlyInvalidFiles(t *testing.T) { + dir := t.TempDir() + knowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(knowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + + // Create only invalid files + writeTestFile(t, filepath.Join(knowledgeDir, "bad1.md"), `# No frontmatter`) + writeTestFile(t, filepath.Join(knowledgeDir, "bad2.md"), `--- +name: invalid name with spaces +description: This has an invalid name +--- +Content. +`) + + output, err := renderKnowledgeList(dir) + if err == nil { + t.Fatal("expected error when only invalid files present, got nil") + } + if !errors.Is(err, ErrInvalidKnowledgeFiles) { + t.Errorf("expected ErrInvalidKnowledgeFiles, got: %v", err) + } + + // Should NOT have valid files section + if strings.Contains(output, "Valid Knowledge Files") { + t.Errorf("should not have 'Valid Knowledge Files' header when all files are invalid, got: %s", output) + } + + // Should have warnings + if !strings.Contains(output, "Invalid/Skipped Files") { + t.Errorf("expected 'Invalid/Skipped Files' header, got: %s", output) + } + if !strings.Contains(output, "bad1.md") { + t.Errorf("expected bad1.md in warnings, got: %s", output) + } + if !strings.Contains(output, "bad2.md") { + t.Errorf("expected bad2.md in warnings, got: %s", output) + } +} + +func TestRenderKnowledgeList_SubdirectoryUsesLocal(t *testing.T) { + dir := t.TempDir() + + // Create parent knowledge dir + parentKnowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(parentKnowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + writeTestFile(t, filepath.Join(parentKnowledgeDir, "parent.md"), `--- +name: parent-file +description: Parent knowledge file +--- +Content. +`) + + // Create subdirectory with its own knowledge dir + childDir := filepath.Join(dir, "child") + childKnowledgeDir := filepath.Join(childDir, ".overmind", "knowledge") + err = os.MkdirAll(childKnowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + writeTestFile(t, filepath.Join(childKnowledgeDir, "child.md"), `--- +name: child-file +description: Child knowledge file +--- +Content. +`) + + output, err := renderKnowledgeList(childDir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Should use child knowledge dir + if !strings.Contains(output, childKnowledgeDir) { + t.Errorf("expected child knowledge dir %s, got: %s", childKnowledgeDir, output) + } + if strings.Contains(output, parentKnowledgeDir) { + t.Errorf("should not mention parent knowledge dir, got: %s", output) + } + + // Should show child file, not parent file + if !strings.Contains(output, "child-file") { + t.Errorf("expected child file, got: %s", output) + } + if strings.Contains(output, "parent-file") { + t.Errorf("should not show parent file, got: %s", output) + } +} + +func TestRenderKnowledgeList_SubdirectoryUsesParent(t *testing.T) { + dir := t.TempDir() + + // Create parent knowledge dir + parentKnowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(parentKnowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + writeTestFile(t, filepath.Join(parentKnowledgeDir, "parent.md"), `--- +name: parent-file +description: Parent knowledge file +--- +Content. +`) + + // Create subdirectory WITHOUT its own knowledge dir + childDir := filepath.Join(dir, "child") + err = os.Mkdir(childDir, 0755) + if err != nil { + t.Fatal(err) + } + + output, err := renderKnowledgeList(childDir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Should use parent knowledge dir + if !strings.Contains(output, parentKnowledgeDir) { + t.Errorf("expected parent knowledge dir %s, got: %s", parentKnowledgeDir, output) + } + + // Should show parent file + if !strings.Contains(output, "parent-file") { + t.Errorf("expected parent file, got: %s", output) + } +} + +func TestRenderKnowledgeList_StopsAtGitBoundary(t *testing.T) { + dir := t.TempDir() + + // Create outer directory with knowledge (outside git repo) + outerKnowledgeDir := filepath.Join(dir, ".overmind", "knowledge") + err := os.MkdirAll(outerKnowledgeDir, 0755) + if err != nil { + t.Fatal(err) + } + writeTestFile(t, filepath.Join(outerKnowledgeDir, "outer.md"), `--- +name: outer-file +description: Knowledge file outside git repo +--- +Content. +`) + + // Create a git repo subdirectory + repoDir := filepath.Join(dir, "my-repo") + repoGitDir := filepath.Join(repoDir, ".git") + err = os.MkdirAll(repoGitDir, 0755) + if err != nil { + t.Fatal(err) + } + + // Create a workspace dir inside the repo (without its own knowledge) + workspaceDir := filepath.Join(repoDir, "workspace") + err = os.Mkdir(workspaceDir, 0755) + if err != nil { + t.Fatal(err) + } + + output, err := renderKnowledgeList(workspaceDir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Should NOT find outer knowledge dir (stops at .git boundary) + if !strings.Contains(output, "No .overmind/knowledge/ directory found") { + t.Errorf("expected no knowledge dir found (should stop at .git), got: %s", output) + } + if strings.Contains(output, "outer-file") { + t.Errorf("should not find knowledge from outside git repo, got: %s", output) + } +} + +func TestTruncateDescription(t *testing.T) { + tests := []struct { + name string + desc string + maxLen int + expected string + }{ + { + name: "short description", + desc: "Short", + maxLen: 20, + expected: "Short", + }, + { + name: "exact length", + desc: "Exactly twenty char", + maxLen: 20, + expected: "Exactly twenty char", + }, + { + name: "needs truncation", + desc: "This is a very long description that needs to be truncated", + maxLen: 20, + expected: "This is a very lo...", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := truncateDescription(tt.desc, tt.maxLen) + if result != tt.expected { + t.Errorf("expected %q, got %q", tt.expected, result) + } + if len(result) > tt.maxLen { + t.Errorf("result length %d exceeds maxLen %d", len(result), tt.maxLen) + } + }) + } +} + +// Helper function for tests +func writeTestFile(t *testing.T, path, content string) { + t.Helper() + err := os.WriteFile(path, []byte(content), 0644) + if err != nil { + t.Fatalf("failed to write file %s: %v", path, err) + } +} diff --git a/cmd/pterm.go b/cmd/pterm.go index 3818ec37..0b533d76 100644 --- a/cmd/pterm.go +++ b/cmd/pterm.go @@ -42,7 +42,7 @@ func PTermSetup() { // disrupting bubbletea rendering (and potentially getting overwritten). // Otherwise, when TEABUG is set, log to a file. if len(os.Getenv("TEABUG")) > 0 { - f, err := os.OpenFile("teabug.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) //nolint:gomnd + f, err := os.OpenFile("teabug.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) if err != nil { fmt.Println("fatal:", err) os.Exit(1) @@ -151,7 +151,7 @@ func RunRevlinkWarmup(ctx context.Context, oi sdp.OvermindInstance, postPlanPrin } func RunPlan(ctx context.Context, args []string) error { - c := exec.CommandContext(ctx, "terraform", args...) + c := exec.CommandContext(ctx, "terraform", args...) //nolint:gosec // G702: args are CLI arguments from the local user who invoked this command; this tool runs on the user's own machine // remove go's default process cancel behaviour, so that terraform has a // chance to gracefully shutdown when ^C is pressed. Otherwise the @@ -180,7 +180,7 @@ func RunPlan(ctx context.Context, args []string) error { } func RunApply(ctx context.Context, args []string) error { - c := exec.CommandContext(ctx, "terraform", args...) + c := exec.CommandContext(ctx, "terraform", args...) //nolint:gosec // G702: args are CLI arguments from the local user who invoked this command; this tool runs on the user's own machine // remove go's default process cancel behaviour, so that terraform has a // chance to gracefully shutdown when ^C is pressed. Otherwise the diff --git a/cmd/terraform_plan.go b/cmd/terraform_plan.go index 8a3d343e..0bf762af 100644 --- a/cmd/terraform_plan.go +++ b/cmd/terraform_plan.go @@ -12,7 +12,7 @@ import ( "time" "connectrpc.com/connect" - lipgloss "github.com/charmbracelet/lipgloss/v2" + lipgloss "charm.land/lipgloss/v2" "github.com/google/uuid" "github.com/muesli/reflow/wordwrap" "github.com/overmindtech/pterm" @@ -115,7 +115,7 @@ func TerraformPlanImpl(ctx context.Context, cmd *cobra.Command, oi sdp.OvermindI // Convert provided plan into JSON for easier parsing /////////////////////////////////////////////////////////////////// - tfPlanJsonCmd := exec.CommandContext(ctx, "terraform", "show", "-json", planFile) + tfPlanJsonCmd := exec.CommandContext(ctx, "terraform", "show", "-json", planFile) //nolint:gosec // G702: "terraform", "show", "-json" are hardcoded; planFile is from the local user's CLI -out flag tfPlanJsonCmd.Stderr = multi.NewWriter() // send output through PTerm; is usually empty @@ -181,7 +181,7 @@ func TerraformPlanImpl(ctx context.Context, cmd *cobra.Command, oi sdp.OvermindI } line := printer.Sprintf("%v (%v)", mapping.TerraformName, mapping.Message) - _, err = fmt.Fprintf(resourceExtractionResults, " %v\n", line) + _, err = fmt.Fprintf(resourceExtractionResults, " %v\n", line) //nolint:gosec // G203: resourceExtractionResults is a pterm.MultiPrinter writer (terminal UI), not an http.ResponseWriter; no XSS vector if err != nil { return fmt.Errorf("error writing to resource extraction results: %w", err) } @@ -229,7 +229,7 @@ func TerraformPlanImpl(ctx context.Context, cmd *cobra.Command, oi sdp.OvermindI } title := changeTitle(ctx, viper.GetString("title")) - tfPlanTextCmd := exec.CommandContext(ctx, "terraform", "show", planFile) + tfPlanTextCmd := exec.CommandContext(ctx, "terraform", "show", planFile) //nolint:gosec // G702: "terraform" and "show" are hardcoded; planFile is from the local user's CLI -out flag tfPlanTextCmd.Stderr = multi.NewWriter() // send output through PTerm; is usually empty @@ -316,7 +316,8 @@ func TerraformPlanImpl(ctx context.Context, cmd *cobra.Command, oi sdp.OvermindI log.WithField("change", changeUuid).Debug("Uploading planned changes") // Discover and convert knowledge files - sdpKnowledge := knowledge.DiscoverAndConvert(ctx, ".overmind/knowledge/") + knowledgeDir := knowledge.FindKnowledgeDir(".") + sdpKnowledge := knowledge.DiscoverAndConvert(ctx, knowledgeDir) _, err = client.StartChangeAnalysis(ctx, &connect.Request[sdp.StartChangeAnalysisRequest]{ Msg: &sdp.StartChangeAnalysisRequest{ @@ -459,7 +460,7 @@ retryLoop: // getTicketLinkFromPlan reads the plan file to create a unique hash to identify this change func getTicketLinkFromPlan(planFile string) (string, error) { - plan, err := os.ReadFile(planFile) + plan, err := os.ReadFile(planFile) //nolint:gosec // G703: planFile is from the local user's CLI args; reading their chosen file is the intended behavior of this CLI tool if err != nil { return "", fmt.Errorf("failed to read plan file (%v): %w", planFile, err) } diff --git a/cmd/theme.go b/cmd/theme.go index 5758f0a4..bdb915eb 100644 --- a/cmd/theme.go +++ b/cmd/theme.go @@ -8,7 +8,7 @@ import ( "github.com/charmbracelet/glamour" "github.com/charmbracelet/glamour/ansi" - lipgloss "github.com/charmbracelet/lipgloss/v2" + lipgloss "charm.land/lipgloss/v2" ) // constrain the maximum terminal width to avoid readability issues with too @@ -103,21 +103,21 @@ func MarkdownStyle() ansi.StyleConfig { BlockSuffix: "\n", Color: getHex(ColorPalette.LabelBase), }, - Indent: ptrUint(2), + Indent: new(uint(2)), }, BlockQuote: ansi.StyleBlock{ StylePrimitive: ansi.StylePrimitive{ - Italic: ptrBool(true), + Italic: new(true), }, - Indent: ptrUint(1), - IndentToken: ptrString("│ "), + Indent: new(uint(1)), + IndentToken: new("│ "), }, List: ansi.StyleList{ LevelIndent: 2, }, Heading: ansi.StyleBlock{ StylePrimitive: ansi.StylePrimitive{ - Bold: ptrBool(true), + Bold: new(true), Color: getHex(ColorPalette.LabelTitle), BlockSuffix: "\n", }, @@ -146,17 +146,17 @@ func MarkdownStyle() ansi.StyleConfig { H6: ansi.StyleBlock{ StylePrimitive: ansi.StylePrimitive{ Prefix: "###### ", - Bold: ptrBool(false), + Bold: new(false), }, }, Strikethrough: ansi.StylePrimitive{ - CrossedOut: ptrBool(true), + CrossedOut: new(true), }, Emph: ansi.StylePrimitive{ - Italic: ptrBool(true), + Italic: new(true), }, Strong: ansi.StylePrimitive{ - Bold: ptrBool(true), + Bold: new(true), }, HorizontalRule: ansi.StylePrimitive{ Color: getHex(ColorPalette.LabelBase), @@ -174,16 +174,16 @@ func MarkdownStyle() ansi.StyleConfig { }, Link: ansi.StylePrimitive{ Color: getHex(ColorPalette.LabelLink), - Underline: ptrBool(true), + Underline: new(true), BlockPrefix: "(", BlockSuffix: ")", }, LinkText: ansi.StylePrimitive{ - Bold: ptrBool(true), + Bold: new(true), }, Image: ansi.StylePrimitive{ Color: getHex(ColorPalette.LabelLink), - Underline: ptrBool(true), + Underline: new(true), BlockPrefix: "(", BlockSuffix: ")", }, @@ -192,14 +192,14 @@ func MarkdownStyle() ansi.StyleConfig { }, CodeBlock: ansi.StyleCodeBlock{ StyleBlock: ansi.StyleBlock{ - Margin: ptrUint(4), + Margin: new(uint(4)), }, Theme: "solarized-light", }, Table: ansi.StyleTable{ - CenterSeparator: ptrString("┼"), - ColumnSeparator: ptrString("│"), - RowSeparator: ptrString("─"), + CenterSeparator: new("┼"), + ColumnSeparator: new("│"), + RowSeparator: new("─"), }, DefinitionDescription: ansi.StylePrimitive{ BlockPrefix: "\n🠶 ", @@ -270,16 +270,6 @@ func IndentSymbol() string { return " " } -func ptrBool(b bool) *bool { - return &b -} -func ptrUint(u uint) *uint { - return &u -} -func ptrString(s string) *string { - return &s -} - func getHex(c color.Color) *string { r, g, b, _ := c.RGBA() // RGBA returns values in 0-65535, convert to 0-255 diff --git a/cmd/version_check.go b/cmd/version_check.go index 1be29d5f..62ac39fd 100644 --- a/cmd/version_check.go +++ b/cmd/version_check.go @@ -55,7 +55,7 @@ func checkVersion(ctx context.Context, currentVersion string) (latestVersion str req.Header.Set("User-Agent", fmt.Sprintf("overmind-cli/%s", currentVersion)) req.Header.Set("Accept", "application/vnd.github.v3+json") - resp, err := client.Do(req) + resp, err := client.Do(req) //nolint:gosec // G704: URL is the hardcoded constant githubReleasesURL; no user input reaches the request URL if err != nil { log.WithError(err).Debug("Failed to check for CLI updates") return "", false diff --git a/docs.overmind.tech/docs/sources/aws/account_settings.png b/docs.overmind.tech/docs/sources/aws/account_settings.png index a244a522..c7e184cb 100644 Binary files a/docs.overmind.tech/docs/sources/aws/account_settings.png and b/docs.overmind.tech/docs/sources/aws/account_settings.png differ diff --git a/docs.overmind.tech/docs/sources/aws/aws_source_settings.png b/docs.overmind.tech/docs/sources/aws/aws_source_settings.png index f8f38128..b6eca5e4 100644 Binary files a/docs.overmind.tech/docs/sources/aws/aws_source_settings.png and b/docs.overmind.tech/docs/sources/aws/aws_source_settings.png differ diff --git a/docs.overmind.tech/docs/sources/aws/configuration.md b/docs.overmind.tech/docs/sources/aws/configuration.md index 9d2c115a..329d3763 100644 --- a/docs.overmind.tech/docs/sources/aws/configuration.md +++ b/docs.overmind.tech/docs/sources/aws/configuration.md @@ -10,15 +10,15 @@ To be able to analyse and discover your infrastructure, Overmind requires read-o ## Configure a Managed Source -To create an AWS source, open [settings](https://app.overmind.tech/settings) by clicking your profile picture in the top right of the screen, then clicking Account Settings, then [Sources](https://app.overmind.tech/settings/sources) +To create an AWS source, open [Settings](https://app.overmind.tech/settings) by clicking your avatar in the sidebar, then navigating to [Sources](https://app.overmind.tech/settings/sources). -![Screenshot of the "User settings" menu, showing the first steps to take: Click "Account Settings"](./account_settings.png) +![User settings menu in the sidebar](./account_settings.png) -Then click Add Source > AWS. +Click **Add source** and select **AWS**. -![Screenshot of the sources subsection of the Overmind settings with the Add Source > AWS button highlighted](./aws_source_settings.png) +![Sources settings page with Add source popover](./aws_source_settings.png) -Then, use "Deploy with AWS CloudFormation" to be taken to the AWS console. You may need to sign in and reload the page. With the results from the CloudFormation deployment, choose a name for your source (e.g. "prod") and fill in "Region" and "AWSTargetRoleARN". +Use "Deploy with AWS CloudFormation" to be taken to the AWS console. You may need to sign in and reload the page. With the results from the CloudFormation deployment, choose a name for your source (e.g. "prod") and fill in "Region" and "AWSTargetRoleARN". ![Screenshot of the "Add AWS Source" dialogue, showing tabs for automatic and manual setup. The automatic setup pane is selected. There is explanation text and input fields for Source name, Region and AWSTargetRoleARN.](./configure-aws.png) @@ -111,7 +111,7 @@ At this point the permissions are complete, the last step is to copy the ARN of ## Check your sources -After you have configured a source, it'll show up in the [Source Settings](https://app.overmind.tech/changes?settings=1&activeTab=sources). There you can check that the source is healthy. +After you have configured a source, it'll show up in [Settings › Sources](https://app.overmind.tech/settings/sources). There you can check that the source is healthy. ## Explore your new data diff --git a/docs.overmind.tech/docs/sources/aws/configure-aws.png b/docs.overmind.tech/docs/sources/aws/configure-aws.png index e2548560..c88e146f 100644 Binary files a/docs.overmind.tech/docs/sources/aws/configure-aws.png and b/docs.overmind.tech/docs/sources/aws/configure-aws.png differ diff --git a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-association.json b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-association.json index 295f0267..a056602b 100644 --- a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-association.json +++ b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-association.json @@ -1 +1,25 @@ -{"type":"ec2-transit-gateway-route-table-association","category":3,"potentialLinks":["ec2-transit-gateway-route-table","ec2-transit-gateway-attachment","ec2-vpc","ec2-vpn-connection","directconnect-direct-connect-gateway"],"descriptiveName":"Transit Gateway Route Table Association","supportedQueryMethods":{"get":true,"getDescription":"Get by TransitGatewayRouteTableId|TransitGatewayAttachmentId","list":true,"listDescription":"List all route table associations","search":true,"searchDescription":"Search by TransitGatewayRouteTableId to list associations for that route table"},"terraformMappings":[{"terraformQueryMap":"aws_ec2_transit_gateway_route_table_association.id"}]} +{ + "type": "ec2-transit-gateway-route-table-association", + "category": 3, + "potentialLinks": [ + "ec2-transit-gateway-route-table", + "ec2-transit-gateway-attachment", + "ec2-vpc", + "ec2-vpn-connection", + "directconnect-direct-connect-gateway" + ], + "descriptiveName": "Transit Gateway Route Table Association", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get by TransitGatewayRouteTableId|TransitGatewayAttachmentId", + "list": true, + "listDescription": "List all route table associations", + "search": true, + "searchDescription": "Search by TransitGatewayRouteTableId to list associations for that route table" + }, + "terraformMappings": [ + { + "terraformQueryMap": "aws_ec2_transit_gateway_route_table_association.id" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-propagation.json b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-propagation.json index f7f37b4d..90825ea7 100644 --- a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-propagation.json +++ b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table-propagation.json @@ -1 +1,26 @@ -{"type":"ec2-transit-gateway-route-table-propagation","category":3,"potentialLinks":["ec2-transit-gateway-route-table","ec2-transit-gateway-route-table-association","ec2-transit-gateway-attachment","ec2-vpc","ec2-vpn-connection","directconnect-direct-connect-gateway"],"descriptiveName":"Transit Gateway Route Table Propagation","supportedQueryMethods":{"get":true,"getDescription":"Get by TransitGatewayRouteTableId|TransitGatewayAttachmentId","list":true,"listDescription":"List all route table propagations","search":true,"searchDescription":"Search by TransitGatewayRouteTableId to list propagations for that route table"},"terraformMappings":[{"terraformQueryMap":"aws_ec2_transit_gateway_route_table_propagation.id"}]} +{ + "type": "ec2-transit-gateway-route-table-propagation", + "category": 3, + "potentialLinks": [ + "ec2-transit-gateway-route-table", + "ec2-transit-gateway-route-table-association", + "ec2-transit-gateway-attachment", + "ec2-vpc", + "ec2-vpn-connection", + "directconnect-direct-connect-gateway" + ], + "descriptiveName": "Transit Gateway Route Table Propagation", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get by TransitGatewayRouteTableId|TransitGatewayAttachmentId", + "list": true, + "listDescription": "List all route table propagations", + "search": true, + "searchDescription": "Search by TransitGatewayRouteTableId to list propagations for that route table" + }, + "terraformMappings": [ + { + "terraformQueryMap": "aws_ec2_transit_gateway_route_table_propagation.id" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table.json b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table.json index d0dad238..32375b58 100644 --- a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table.json +++ b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route-table.json @@ -1 +1,22 @@ -{"type":"ec2-transit-gateway-route-table","category":3,"potentialLinks":["ec2-transit-gateway","ec2-transit-gateway-route-table-association","ec2-transit-gateway-route-table-propagation","ec2-transit-gateway-route"],"descriptiveName":"Transit Gateway Route Table","supportedQueryMethods":{"get":true,"getDescription":"Get a transit gateway route table by ID","list":true,"listDescription":"List all transit gateway route tables","search":true,"searchDescription":"Search transit gateway route tables by ARN"},"terraformMappings":[{"terraformQueryMap":"aws_ec2_transit_gateway_route_table.id"}]} +{ + "type": "ec2-transit-gateway-route-table", + "category": 3, + "potentialLinks": [ + "ec2-transit-gateway", + "ec2-transit-gateway-route-table-association", + "ec2-transit-gateway-route-table-propagation", + "ec2-transit-gateway-route" + ], + "descriptiveName": "Transit Gateway Route Table", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get a transit gateway route table by ID", + "list": true, + "listDescription": "List all transit gateway route tables", + "search": true, + "searchDescription": "Search transit gateway route tables by ARN" + }, + "terraformMappings": [ + { "terraformQueryMap": "aws_ec2_transit_gateway_route_table.id" } + ] +} diff --git a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route.json b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route.json index 01c80f1b..7936217f 100644 --- a/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route.json +++ b/docs.overmind.tech/docs/sources/aws/data/ec2-transit-gateway-route.json @@ -1 +1,26 @@ -{"type":"ec2-transit-gateway-route","category":3,"potentialLinks":["ec2-transit-gateway-route-table","ec2-transit-gateway-route-table-association","ec2-transit-gateway-attachment","ec2-transit-gateway-route-table-announcement","ec2-vpc","ec2-vpn-connection","ec2-managed-prefix-list","directconnect-direct-connect-gateway"],"descriptiveName":"Transit Gateway Route","supportedQueryMethods":{"get":true,"getDescription":"Get by TransitGatewayRouteTableId|Destination (CIDR or pl:PrefixListId)","list":true,"listDescription":"List all transit gateway routes","search":true,"searchDescription":"Search by TransitGatewayRouteTableId to list routes for that route table"},"terraformMappings":[{"terraformQueryMap":"aws_ec2_transit_gateway_route.id"}]} +{ + "type": "ec2-transit-gateway-route", + "category": 3, + "potentialLinks": [ + "ec2-transit-gateway-route-table", + "ec2-transit-gateway-route-table-association", + "ec2-transit-gateway-attachment", + "ec2-transit-gateway-route-table-announcement", + "ec2-vpc", + "ec2-vpn-connection", + "ec2-managed-prefix-list", + "directconnect-direct-connect-gateway" + ], + "descriptiveName": "Transit Gateway Route", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get by TransitGatewayRouteTableId|Destination (CIDR or pl:PrefixListId)", + "list": true, + "listDescription": "List all transit gateway routes", + "search": true, + "searchDescription": "Search by TransitGatewayRouteTableId to list routes for that route table" + }, + "terraformMappings": [ + { "terraformQueryMap": "aws_ec2_transit_gateway_route.id" } + ] +} diff --git a/docs.overmind.tech/docs/sources/aws/terraform.md b/docs.overmind.tech/docs/sources/aws/terraform.md index e976c988..9cbe206a 100644 --- a/docs.overmind.tech/docs/sources/aws/terraform.md +++ b/docs.overmind.tech/docs/sources/aws/terraform.md @@ -1,9 +1,9 @@ --- -title: Configure with Terraform +title: Configure with Terraform / OpenTofu sidebar_position: 2 --- -The [Overmind Terraform module](https://registry.terraform.io/modules/overmindtech/aws-source/overmind) configures an AWS account for Overmind infrastructure discovery in a single `terraform apply`. It creates an IAM role with a read-only policy, sets up the trust relationship, and registers the source with Overmind's API. The module is fully compatible with [OpenTofu](https://opentofu.org/). +The Overmind Terraform module configures an AWS account for Overmind infrastructure discovery in a single `terraform apply` (or `tofu apply`). It creates an IAM role with a read-only policy, sets up the trust relationship, and registers the source with Overmind's API. The module and provider are available on both the [Terraform Registry](https://registry.terraform.io/modules/overmindtech/aws-source/overmind) and the [OpenTofu Registry](https://search.opentofu.org/module/overmindtech/aws-source/overmind). ## Prerequisites @@ -44,6 +44,15 @@ terraform plan terraform apply ``` +Or with OpenTofu: + +```bash +export OVERMIND_API_KEY="your-api-key" +tofu init +tofu plan +tofu apply +``` + ## Authentication ### Overmind Provider @@ -111,19 +120,19 @@ module "overmind_staging" { ## Inputs -| Name | Description | Type | Default | Required | -| --- | --- | --- | --- | --- | -| `name` | Descriptive name for the source in Overmind | `string` | n/a | yes | -| `regions` | AWS regions to discover (defaults to all non-opt-in regions) | `list(string)` | All 17 standard regions | no | -| `role_name` | Name for the IAM role created in this account | `string` | `"overmind-read-only"` | no | -| `tags` | Additional tags to apply to IAM resources | `map(string)` | `{}` | no | +| Name | Description | Type | Default | Required | +| ----------- | ------------------------------------------------------------ | -------------- | ----------------------- | -------- | +| `name` | Descriptive name for the source in Overmind | `string` | n/a | yes | +| `regions` | AWS regions to discover (defaults to all non-opt-in regions) | `list(string)` | All 17 standard regions | no | +| `role_name` | Name for the IAM role created in this account | `string` | `"overmind-read-only"` | no | +| `tags` | Additional tags to apply to IAM resources | `map(string)` | `{}` | no | ## Outputs -| Name | Description | -| --- | --- | -| `role_arn` | ARN of the created IAM role | -| `source_id` | UUID of the Overmind source | +| Name | Description | +| ------------- | -------------------------------------------- | +| `role_arn` | ARN of the created IAM role | +| `source_id` | UUID of the Overmind source | | `external_id` | AWS STS external ID used in the trust policy | ## Importing Existing Sources @@ -157,7 +166,7 @@ After `terraform apply` completes: ## Registry Links - **Terraform Registry**: [overmindtech/overmind provider](https://registry.terraform.io/providers/overmindtech/overmind/latest) | [overmindtech/aws-source module](https://registry.terraform.io/modules/overmindtech/aws-source/overmind/latest) -- **OpenTofu Registry**: coming soon +- **OpenTofu Registry**: [overmindtech/overmind provider](https://search.opentofu.org/provider/overmindtech/overmind) | [overmindtech/aws-source module](https://search.opentofu.org/module/overmindtech/aws-source/overmind) ## Troubleshooting diff --git a/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md b/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md index 64c40288..ae30c276 100644 --- a/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md +++ b/docs.overmind.tech/docs/sources/aws/update-to-pod-identity.md @@ -26,11 +26,11 @@ You can check if your IAM role needs updating by looking at the version tag: 3. Click on the role and go to the **Tags** tab 4. Look for the `overmind.version` tag -| Version Tag | Status | -|-------------|--------| -| `2025-12-01` or later | ✅ Up to date | +| Version Tag | Status | +| ----------------------- | ------------------ | +| `2025-12-01` or later | ✅ Up to date | | `2023-03-14` or earlier | ⚠️ Update required | -| No tag | ⚠️ Update required | +| No tag | ⚠️ Update required | ## Update Instructions @@ -62,6 +62,7 @@ Look for a stack named "Overmind" or "OvermindDevelopment" in the region where y :::info Finding the CloudFormation Template URL To get the latest CloudFormation template URL: + 1. Go to [Overmind Settings > Sources](https://app.overmind.tech/settings/sources) 2. Click **Add Source > AWS** 3. Right-click the "Deploy" button and copy the link - the URL contains the `templateURL` parameter @@ -96,11 +97,11 @@ Search for and select your Overmind role (usually named "Overmind" or the name y ```json { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::944651592624:root" - }, - "Action": "sts:TagSession" + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::944651592624:root" + }, + "Action": "sts:TagSession" } ``` @@ -108,32 +109,32 @@ Your complete trust policy should look like this: ```json { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::944651592624:root" - }, - "Action": "sts:AssumeRole", - "Condition": { - "StringEquals": { - "sts:ExternalId": "YOUR-EXTERNAL-ID-HERE" - } - } - }, - { - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::944651592624:root" - }, - "Action": "sts:TagSession" + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::944651592624:root" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "YOUR-EXTERNAL-ID-HERE" } - ] + } + }, + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::944651592624:root" + }, + "Action": "sts:TagSession" + } + ] } ``` -4. Click **"Update policy"** +1. Click **"Update policy"** #### Step 4: Update the Version Tag (Optional) @@ -149,6 +150,7 @@ To help track the version of your role configuration: After updating, your existing AWS sources will continue to work without interruption. The enhanced security features will be automatically enabled within the next few minutes. You can verify the update was successful by: + 1. Checking that your source shows a green status in [Overmind Settings > Sources](https://app.overmind.tech/settings/sources) 2. Verifying the role's `overmind.version` tag shows `2025-12-01` or later diff --git a/docs.overmind.tech/docs/sources/azure/_category_.json b/docs.overmind.tech/docs/sources/azure/_category_.json new file mode 100644 index 00000000..ccab81aa --- /dev/null +++ b/docs.overmind.tech/docs/sources/azure/_category_.json @@ -0,0 +1,9 @@ +{ + "label": "Azure", + "position": 4, + "collapsed": true, + "link": { + "type": "generated-index", + "description": "How to integrate your Azure subscription." + } +} diff --git a/docs.overmind.tech/docs/sources/azure/configuration.md b/docs.overmind.tech/docs/sources/azure/configuration.md new file mode 100644 index 00000000..d0cf0ca9 --- /dev/null +++ b/docs.overmind.tech/docs/sources/azure/configuration.md @@ -0,0 +1,59 @@ +--- +title: Azure Configuration +sidebar_position: 1 +--- + +# Azure Configuration + +## Overview + +Overmind's Azure infrastructure discovery provides visibility into your Microsoft Azure resources through secure, read-only access. Overmind uses an Azure AD App Registration with federated credentials (workload identity) when running the source for you—no client secrets are stored or entered in the UI. + +To connect an Azure source, you need a **Name** (friendly label in Overmind), **Subscription ID**, **Tenant ID**, and **Client ID**. Overmind only ever requests read-only access (minimum **Reader** role on the subscription). + +## Prerequisites + +- **Azure subscription**: An active subscription you want to discover. +- **Azure AD App Registration**: An app registered in Azure AD with at least **Reader** role on the subscription (used for workload identity; no client secret is required in the Overmind UI). +- **Permissions**: Ability to create an App Registration and assign roles in the subscription (e.g. Owner or User Access Administrator). + +## Where to get the IDs + +You need three values from Azure. All are GUIDs. + +### Subscription ID + +- **Azure Portal:** In the portal, go to **Cost Management + Billing** → **Subscriptions** (or see [View subscriptions in the Azure portal](https://learn.microsoft.com/en-us/azure/cost-management-billing/manage/view-all-accounts)), select your subscription, and copy **Subscription ID**. +- **Azure CLI:** Run `az account show --query id -o tsv` (after `az login` and, if needed, `az account set --subscription "your-subscription-name-or-id"`). + +### Tenant ID + +- **Azure Portal:** See [Find your Azure AD tenant ID](https://learn.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant) — in the portal, go to **Azure Active Directory** → **Overview** and copy **Tenant ID**. +- **Azure CLI:** Run `az account show --query tenantId -o tsv`. + +### Client ID (Application ID) + +- **Azure Portal:** See [Register an application](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) — in **Azure Active Directory** → **App registrations**, select your app (or create one) and copy **Application (client) ID**. +- **If you create a service principal via CLI:** The **appId** in the command output is your Client ID. + +Your app must have at least **Reader** on the subscription. For Overmind’s managed source we use federated credentials (workload identity), so you do **not** need to create or paste a client secret in Overmind. + +For detailed setup (e.g. App Registration, role assignment, federated credentials), see [Microsoft’s documentation on registering an application](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app) and [Reader role](https://learn.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#reader). + +## Add an Azure source in Overmind + +1. In Overmind, go to **Settings** (profile menu) → **Sources** → **Add source** → **Azure**. +2. Enter a **Name** (e.g. "Production Azure") so you can identify the source in Overmind. +3. Enter **Subscription ID**, **Tenant ID**, and **Client ID** using the values from [Where to get the IDs](#where-to-get-the-ids) above. +4. (Optional) **Regions:** Select specific Azure regions to limit discovery. If you leave this empty, Overmind discovers resources in all regions in the subscription. +5. Click **Create source**. + +The source will appear in your Sources list. Once the connection is established, its status will show as healthy and you can use it in Explore and change analysis. + +## Check your sources + +After you have configured a source, it will appear under [Settings → Sources](https://app.overmind.tech/settings/sources). There you can confirm the source is healthy and view its details (Source UUID, Subscription ID, Tenant ID, Client ID, and Regions). + +## Explore your data + +Once your Azure source is healthy, go to the [Explore page](https://app.overmind.tech/explore) to browse your Azure resources and their relationships. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md index 58334249..fae56aff 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-batch-prediction-job.md @@ -3,7 +3,8 @@ title: GCP Ai Platform Batch Prediction Job sidebar_label: gcp-ai-platform-batch-prediction-job --- -A GCP AI Platform (Vertex AI) Batch Prediction Job is a managed job that runs a trained model against a large, static dataset to generate predictions asynchronously. It allows you to score data stored in Cloud Storage or BigQuery and write the results back to either service, without having to manage your own compute infrastructure. For full details see the official documentation: https://docs.cloud.google.com/vertex-ai/docs/predictions/get-batch-predictions +A **Batch Prediction Job** in Google Cloud’s AI Platform (now part of Vertex AI) lets you run large-scale, asynchronous inference on a saved Machine Learning model. Instead of serving predictions request-by-request, you supply a dataset stored in Cloud Storage or BigQuery and the service spins up the necessary compute, distributes the workload, writes the predictions to your chosen destination, and then shuts itself down. This is ideal for one-off or periodic scoring of very large datasets. +Official documentation: https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions ## Supported Methods @@ -13,22 +14,30 @@ A GCP AI Platform (Vertex AI) Batch Prediction Job is a managed job that runs a ## Possible Links +### [`gcp-ai-platform-endpoint`](/sources/gcp/Types/gcp-ai-platform-endpoint) + +A Batch Prediction Job can read from a Model that is already deployed to an Endpoint; when that is the case the job records the Endpoint name it referenced, creating this link. + ### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) -The batch prediction job references a trained model that provides the prediction logic. The job cannot run without specifying this model. +Every Batch Prediction Job must specify the Model it will use for inference. The job stores the fully-qualified model resource name, creating a direct dependency on this Model. ### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) -Input data for a batch prediction can come from a BigQuery table, and the job can also write the prediction results to another BigQuery table. +The job may take its input instances from a BigQuery table or write its prediction outputs to one. When either the source or destination is a BigQuery table, that table is linked to the job. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -Customer-managed encryption keys (CMEK) from Cloud KMS may be attached to the job to encrypt its output artefacts stored in Cloud Storage or BigQuery. +If customer-managed encryption keys (CMEK) are chosen, the Batch Prediction Job references the CryptoKey that encrypts the job metadata and any intermediate files, producing this link. + +### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) + +When the job is configured for private service access, it is attached to a specific VPC network for egress. That VPC network is therefore related to, and linked from, the job. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -The job is executed under a specific IAM service account, which grants it permissions to read inputs, write outputs, and access the model. +The Batch Prediction Job executes under a user-specified or default service account, which needs permission to read the model and the input data and to write outputs. That execution identity is linked here. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud Storage buckets are commonly used to supply the input files (in JSONL or CSV) and/or to store the prediction output files produced by the batch job. +Cloud Storage buckets are commonly used both for the input artefacts (CSV/JSON/TFRecord files) and for the output prediction files. Any bucket mentioned in the job’s specification is linked to the job. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md index 4a4c1935..510763fa 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-custom-job.md @@ -3,7 +3,7 @@ title: GCP Ai Platform Custom Job sidebar_label: gcp-ai-platform-custom-job --- -A GCP AI Platform Custom Job (now part of Vertex AI) is a fully-managed training workload that runs user-supplied code inside one or more container images on Google Cloud infrastructure. It allows you to specify machine types, accelerators, networking and encryption settings, then orchestrates the provisioning, execution and clean-up of the training cluster. Custom Jobs are typically used when pre-built AutoML options are insufficient and you need complete control over your training loop. +A Vertex AI / AI Platform Custom Job represents an ad-hoc machine-learning workload that you want Google Cloud to run on managed infrastructure. By pointing the job at a custom container image or a Python package, you can execute training, hyper-parameter tuning or batch-processing logic with fine-grained control over machine types, accelerators, networking and encryption. The job definition is submitted to the `projects.locations.customJobs` API and Google Cloud provisions the required compute, streams logs, stores artefacts and tears the resources down once the job finishes. Official documentation: https://cloud.google.com/vertex-ai/docs/training/create-custom-job ## Supported Methods @@ -16,20 +16,24 @@ Official documentation: https://cloud.google.com/vertex-ai/docs/training/create- ### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) -A successful Custom Job can optionally upload the trained artefacts as a Vertex AI Model resource; if that happens, the job will reference (and be referenced by) the resulting `gcp-ai-platform-model`. +A successful Custom Job can optionally call `model.upload()` or configure `model_to_upload`, causing Vertex AI to register a `Model` resource containing the trained artefacts. Overmind links the job to the resulting `gcp-ai-platform-model` so you can trace how the model was produced. + +### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) + +Custom Jobs usually run inside user-supplied container images. When the image is stored in Artifact Registry, Overmind records a link between the job and the specific `gcp-artifact-registry-docker-image` it pulled, making it easy to audit code and dependency provenance. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -Custom Jobs support customer-managed encryption keys (CMEK). When a CMEK is specified, the job resource, its logs and any artefacts it creates are encrypted with the referenced `gcp-cloud-kms-crypto-key`. +If you enable customer-managed encryption keys (CMEK) for the job, Google Cloud encrypts logs, checkpoints and model files with the specified KMS key. The job therefore references a `gcp-cloud-kms-crypto-key`, which Overmind surfaces to highlight encryption dependencies and key-rotation risks. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -You can run Custom Jobs inside a specific VPC network to reach private data sources or to avoid egress to the public internet. In that case the job is linked to the chosen `gcp-compute-network`. +Custom Jobs can be configured to run on a private VPC network (VPC-SC or VPC-hosted training). In that case the job is associated with the chosen `gcp-compute-network`, allowing Overmind to show ingress/egress paths and potential network exposure. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Execution of a Custom Job occurs under a user-specified service account, which determines the permissions the training containers possess. The job therefore has a direct relationship to a `gcp-iam-service-account`. +Vertex AI executes the workload under a user-specified or default service account. The job’s permissions—and hence its ability to read data, write artefacts or call other Google APIs—are determined by this `gcp-iam-service-account`. Overmind links them to flag overly-privileged identities. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Training code commonly reads data from, and writes checkpoints or model artefacts to, Cloud Storage. The buckets used for staging, input or output will be surfaced as linked `gcp-storage-bucket` resources. +Training data, intermediate checkpoints and exported models are commonly read from or written to Cloud Storage. The Custom Job specifies bucket URIs (e.g., `gs://my-dataset/*`, `gs://my-model-output/`). Overmind connects the job to each referenced `gcp-storage-bucket` so you can assess data residency and access controls. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md index bcbe74fe..6867c939 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-endpoint.md @@ -3,8 +3,8 @@ title: GCP Ai Platform Endpoint sidebar_label: gcp-ai-platform-endpoint --- -A Vertex AI (formerly AI Platform) **Endpoint** is a regional resource that serves as an entry-point for online prediction requests in Google Cloud. One or more trained **Models** can be deployed to an Endpoint, after which client applications invoke the Endpoint’s HTTPS URL (or Private Service Connect address) to obtain real-time predictions. The resource stores configuration such as traffic splitting between models, logging settings, encryption settings and the VPC network to be used for private access. -Official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.endpoints +A **Google Cloud AI Platform Endpoint** (now part of Vertex AI) is a regional, fully-managed HTTPS entry point that receives online prediction requests and routes them to one or more deployed models. Endpoints let you perform low-latency, autoscaled inference, apply access controls, add request/response logging and attach monitoring jobs. +Official documentation: https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions#deploy_model_to_endpoint ## Supported Methods @@ -16,20 +16,24 @@ Official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v ### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) -An Endpoint may contain one or many `deployedModel` blocks, each of which references a separate Model resource. Overmind links the Endpoint to every Model that is currently deployed or that has traffic allocated to it. +An Endpoint hosts one or more _DeployedModels_, each of which references a standalone AI Platform/Vertex AI Model resource. The link shows which models are currently deployed to, or have traffic routed through, the endpoint. ### [`gcp-ai-platform-model-deployment-monitoring-job`](/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job) -If model-deployment monitoring has been enabled, the monitoring job resource records statistics and drift detection for a specific Endpoint. Overmind links the Endpoint to all monitoring jobs that target it. +If data-drift or prediction-quality monitoring has been enabled, a Model Deployment Monitoring Job is attached to the endpoint. This relationship identifies the monitoring configuration that observes traffic on the endpoint. ### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) -Prediction logging and monitoring can be configured to write request/response data into BigQuery tables. Those tables are therefore linked to the Endpoint that produced the records. +Prediction request and response payloads can be logged to a BigQuery table when logging is enabled on the endpoint. The link indicates which table is used as the logging sink for the endpoint’s traffic. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -Endpoints can be created with a Customer-Managed Encryption Key (CMEK) via the `encryptionSpec.kmsKeyName` field. Overmind links the Endpoint to the specific Cloud KMS CryptoKey it uses for at-rest encryption. +Customer-managed encryption keys (CMEK) from Cloud KMS can be specified to encrypt endpoint resources at rest. This link reveals the KMS key protecting the endpoint and its deployed models. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -When an Endpoint is set up for private predictions, it must specify a VPC network (`network` field) that will be used for Private Service Connect. This creates a relationship between the Endpoint and the referenced Compute Network. +Endpoints can be configured for private service access, allowing prediction traffic to stay within a specified VPC network. The relationship points to the Compute Network that provides the private connectivity for the endpoint. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Each deployed model on an endpoint runs under a service account whose permissions govern access to other GCP resources (e.g., storage buckets, KMS keys). The link shows which IAM service account is associated with the endpoint’s runtime. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md index 69cb6b26..a5178ca8 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model-deployment-monitoring-job.md @@ -3,9 +3,8 @@ title: GCP Ai Platform Model Deployment Monitoring Job sidebar_label: gcp-ai-platform-model-deployment-monitoring-job --- -A Model Deployment Monitoring Job in Vertex AI (formerly AI Platform) performs continuous evaluation of a model that has been deployed to an endpoint. The job collects prediction requests and responses, analyses them for data drift, feature skew, and other anomalies, and can raise alerts when thresholds are exceeded. This enables teams to detect issues in production models early and take corrective action before business impact occurs. - -Official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.modelDeploymentMonitoringJobs +Google Cloud’s Model Deployment Monitoring Job is a managed Vertex AI (formerly AI Platform) service that continuously analyses a deployed model’s predictions to detect data drift, prediction drift and skew between training and online data. A job is attached to one or more deployed models on an Endpoint and periodically samples incoming predictions, calculates statistics, raises alerts and writes monitoring reports to BigQuery or Cloud Storage. +Official documentation: https://cloud.google.com/vertex-ai/docs/model-monitoring/overview ## Supported Methods @@ -17,16 +16,24 @@ Official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v ### [`gcp-ai-platform-endpoint`](/sources/gcp/Types/gcp-ai-platform-endpoint) -A Model Deployment Monitoring Job is always attached to a specific Vertex AI endpoint; it monitors one or more model deployments that live on that endpoint. The link represents the `endpoint` field inside the job resource. +The monitoring job is created against a specific Endpoint; it inspects the request/response traffic that the Endpoint receives for the deployed model versions. ### [`gcp-ai-platform-model`](/sources/gcp/Types/gcp-ai-platform-model) -Within `modelDeploymentMonitoringObjectiveConfigs`, the job specifies the deployed model(s) it should watch. This link captures that relationship between the monitoring job and the underlying Vertex AI model resources. +Each job’s `modelDeploymentMonitoringObjectiveConfigs` identifies the Model (or model version) whose predictions are being monitored for drift or skew. + +### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) + +If BigQuery is chosen as the analysis destination, the job writes sampled prediction data and computed statistics into a BigQuery table referenced by this link. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If the job is created with `encryptionSpec`, it uses a customer-managed Cloud KMS key to encrypt monitoring logs and metadata. The linked Crypto Key represents that key. +The `encryptionSpec.kmsKeyName` field can point to a customer-managed KMS key that encrypts all monitoring artefacts produced by the job. ### [`gcp-monitoring-notification-channel`](/sources/gcp/Types/gcp-monitoring-notification-channel) -Alerting for drift or skew relies on Cloud Monitoring notification channels listed in the job’s `alertConfig.notificationChannels`. This link connects the monitoring job to those channels so users can trace how alerts will be delivered. +Alerting rules created by the job use Cloud Monitoring notification channels (e-mail, Pub/Sub, SMS, etc.) to notify operators when drift thresholds are breached. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +When Cloud Storage is selected, the job stores prediction samples, intermediate files and final monitoring reports in a user-provided bucket. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md index 6e685c19..ad28f59f 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-model.md @@ -3,8 +3,7 @@ title: GCP Ai Platform Model sidebar_label: gcp-ai-platform-model --- -A **GCP AI Platform Model** (now part of Vertex AI) is a top-level resource that represents a machine-learning model and its metadata. It groups together one or more model versions (or “Model resources” in Vertex AI terminology), defines the serving container, encryption settings and access controls, and can be deployed to online prediction endpoints or used by batch prediction jobs. -For full details, see the official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models +A GCP AI Platform Model (now part of Vertex AI) is a logical container that holds the metadata and artefacts required to serve machine-learning predictions. A model record points to one or more model versions or container images, the Cloud Storage location of the trained parameters, and optional encryption settings. Models are deployed to Endpoints for online prediction or used directly in batch/streaming inference jobs. Official documentation: https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.models ## Supported Methods @@ -16,16 +15,20 @@ For full details, see the official documentation: https://cloud.google.com/verte ### [`gcp-ai-platform-endpoint`](/sources/gcp/Types/gcp-ai-platform-endpoint) -An AI Platform Model can be deployed to one or more endpoints. When Overmind detects that a model has been deployed, it links the model to the corresponding `gcp-ai-platform-endpoint` resource so that you can see where the model is serving traffic. +A model is deployed to one or more Endpoints. The link shows where this model is currently serving traffic or could be routed for prediction. ### [`gcp-ai-platform-pipeline-job`](/sources/gcp/Types/gcp-ai-platform-pipeline-job) -Vertex AI Pipeline Jobs often produce models as artefacts at the end of a training pipeline. Overmind links a `gcp-ai-platform-pipeline-job` to the `gcp-ai-platform-model` it created (or updated) so you can trace the provenance of a model back to the pipeline run that generated it. +Training or transformation Pipeline Jobs often create or update Model resources; linking them highlights which automated workflow produced the model and therefore which code/data lineage applies. ### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) -Models use a container image for prediction service. If that container image is stored in Artifact Registry, Overmind establishes a link between the model and the `gcp-artifact-registry-docker-image` representing the serving container. This highlights dependencies on specific container images and versions. +If the model is served via a custom prediction container, the Model record references a Docker image stored in Artifact Registry. This link surfaces that underlying image and its associated vulnerabilities. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If Customer-Managed Encryption Keys (CMEK) are enabled for the model, the model resource references the Cloud KMS Crypto Key used to encrypt the model data at rest. Overmind links the model to the `gcp-cloud-kms-crypto-key` to surface encryption dependencies and potential key-rotation risks. +Models can be protected with customer-managed encryption keys (CMEK). Overmind links the model to the specific KMS key to expose encryption scope and key rotation risks. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +The model’s artefacts (e.g., SavedModel, scikit-learn pickle, PyTorch state) reside in a Cloud Storage bucket referenced by `artifactUri`. Linking to the bucket reveals data-at-rest location and its IAM policy. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md index d15fdf9f..23aab5ae 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-ai-platform-pipeline-job.md @@ -3,8 +3,8 @@ title: GCP Ai Platform Pipeline Job sidebar_label: gcp-ai-platform-pipeline-job --- -A **GCP AI Platform Pipeline Job** (now part of Vertex AI Pipelines) represents a managed execution of a Kubeflow pipeline on Google Cloud. It orchestrates a series of container-based tasks—such as data preprocessing, model training, and deployment—into a reproducible workflow that runs on Google-managed infrastructure. Each job stores its metadata, intermediate artefacts and logs in Google-hosted services, and can be monitored, retried or version-controlled through the Vertex AI console or API. -For full details, see the official documentation: [Vertex AI Pipelines – Run pipeline jobs](https://docs.cloud.google.com/vertex-ai/docs/pipelines/run-pipeline). +A GCP AI Platform Pipeline Job (now part of Vertex AI Pipelines) represents a single execution of a machine-learning workflow defined in a Kubeflow/Vertex AI pipeline. The job orchestrates a directed acyclic graph (DAG) of pipeline components such as data preparation, model training and evaluation, and optionally deployment. Each run is stored as a resource that tracks the DAG definition, runtime parameters, execution state, logs and metadata. +Official documentation: https://cloud.google.com/vertex-ai/docs/pipelines/introduction ## Supported Methods @@ -16,16 +16,16 @@ For full details, see the official documentation: [Vertex AI Pipelines – Run p ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A pipeline job can be configured to use customer-managed encryption keys (CMEK) so that all intermediate artefacts and metadata produced by the pipeline are encrypted with a specific Cloud KMS crypto key. Overmind therefore surfaces a link to the `gcp-cloud-kms-crypto-key` that protects the job’s resources. +If the pipeline job is configured to use customer-managed encryption keys (CMEK), the key referenced here encrypts pipeline artefacts such as metadata, intermediate files and model checkpoints. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Pipeline components often run on GKE clusters or custom training/serving services that are attached to a VPC network. When a job specifies a `network` or `privateClusterConfig`, Overmind links the job to the corresponding `gcp-compute-network`, highlighting network-level exposure or egress restrictions that may affect the pipeline. +Pipeline components that run in custom training containers or Dataflow/Dataproc jobs may be attached to a specific VPC network to control egress, ingress and private service access. The pipeline job therefore has an implicit or explicit relationship with the VPC network used at execution time. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every pipeline job executes under a service account whose IAM permissions determine which Google Cloud resources the job can access (e.g. storage buckets, BigQuery datasets). Overmind connects the job to that `gcp-iam-service-account` so that permission scopes and potential privilege escalations can be inspected. +The pipeline job executes under a service account which grants it permissions to create and manage downstream resources (e.g. training jobs, storage objects, BigQuery datasets). Overmind links the job to the service account that appears in its runtime configuration. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Pipeline jobs read from and write to Cloud Storage for dataset ingestion, model artefact output and pipeline metadata storage. Any bucket referenced in the job’s `pipeline_root`, component arguments or logging configuration is linked here, allowing visibility into data residency, ACLs and lifecycle policies relevant to the pipeline’s operation. +Vertex AI Pipelines store pipeline definitions, intermediate artefacts, and output models in Cloud Storage. A pipeline job will reference one or more buckets for source code, artefacts and logging, so Overmind creates links to each bucket it touches. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md index ce46b462..b346f80f 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-artifact-registry-docker-image.md @@ -3,8 +3,8 @@ title: GCP Artifact Registry Docker Image sidebar_label: gcp-artifact-registry-docker-image --- -A GCP Artifact Registry Docker Image resource represents a single immutable image stored in Google Cloud’s Artifact Registry. It contains metadata such as the image digest, tags, size and creation timestamp, and can be queried to understand exactly which layers and versions are about to be deployed. Managing this resource allows you to verify provenance, scan for vulnerabilities and enforce policies before the image ever reaches production. -For a full description of the REST resource, see Google’s official documentation: https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages +A GCP Artifact Registry Docker Image represents a single container image stored within Google Cloud Artifact Registry. Artifact Registry is Google Cloud’s fully-managed, secure, and scalable repository service that allows teams to store, manage and secure their build artefacts, including Docker container images. Each Docker image is identified by its path in the form `projects/{project}/locations/{location}/repositories/{repository}/dockerImages/{image}` and can hold multiple tags and versions. Managing images through Artifact Registry enables fine-grained IAM permissions, vulnerability scanning, and seamless integration with Cloud Build and Cloud Run. +For more information, see the official documentation: https://cloud.google.com/artifact-registry/docs/docker **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md index 14a1cebc..c7dc4bae 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-data-transfer-transfer-config.md @@ -3,8 +3,8 @@ title: GCP Big Query Data Transfer Transfer Config sidebar_label: gcp-big-query-data-transfer-transfer-config --- -The BigQuery Data Transfer Service Transfer Config defines a scheduled data-transfer job in Google Cloud. It specifies where the data comes from (for example Google Ads, YouTube or an external Cloud Storage bucket), the destination BigQuery dataset, the refresh window, schedule, run-options, encryption settings and notification preferences. In essence, it is the canonical object that tells BigQuery Data Transfer Service what to move, when to move it and how to handle the resulting tables. -Official documentation: https://docs.cloud.google.com/bigquery/docs/working-with-transfers +A BigQuery Data Transfer transfer configuration defines the schedule, destination dataset and credentials that the BigQuery Data Transfer Service will use to load data from a supported SaaS application, Google service or external data source into BigQuery. Each configuration specifies when transfers should run, the parameters required by the source system and, optionally, Pub/Sub notification settings and Cloud KMS encryption keys. +For a full description of the resource see the Google Cloud documentation: https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/v1/projects.locations.transferConfigs **Terrafrom Mappings:** @@ -20,12 +20,16 @@ Official documentation: https://docs.cloud.google.com/bigquery/docs/working-with ### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) -The transfer config’s `destinationDatasetId` points to the BigQuery dataset that will receive the imported data, so the config depends on – and is intrinsically linked to – that dataset. +The transfer configuration writes its imported data into a specific BigQuery dataset; the dataset’s identifier is stored in the configuration’s `destinationDatasetId` field. Overmind therefore links the config to the dataset that will receive the transferred data. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If customer-managed encryption is enabled, the transfer config references a Cloud KMS CryptoKey that is used to encrypt the tables created by the transfer, creating a dependency on the key. +If the destination dataset is protected with customer-managed encryption keys (CMEK), the transfer runs inherit that key. Consequently, the configuration is indirectly associated with the Cloud KMS crypto key that encrypts the loaded tables, allowing Overmind to surface encryption-related risks. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Transfers execute using a dedicated service account (`project-number@gcp-sa-bigquerydt.iam.gserviceaccount.com`) or, in some cases, a user-provided service account. The configuration stores this principal, and appropriate IAM roles must be granted. Overmind links the transfer config to the service account to assess permission scopes. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -Through the `notificationPubsubTopic` field, the transfer config can publish status and error messages about individual transfer runs to a Pub/Sub topic, establishing an outgoing link to that topic. +A transfer configuration can be set to publish run status notifications to a Pub/Sub topic specified in its `notificationPubsubTopic` field. Overmind links the configuration to that topic so that message-flow and permissions between the two resources can be evaluated. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md index cccc66c4..41435289 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-dataset.md @@ -3,12 +3,14 @@ title: GCP Big Query Dataset sidebar_label: gcp-big-query-dataset --- -A BigQuery Dataset is a top-level container that holds BigQuery tables, views, models and routines, and defines the geographic location where that data is stored. It also acts as the unit for access control, default encryption configuration and data lifecycle policies. -For full details see the Google Cloud documentation: https://cloud.google.com/bigquery/docs/datasets +A Google Cloud BigQuery Dataset is a logical container that holds tables, views, routines (stored procedures and functions) and metadata, and defines the geographic location where the underlying data is stored. Datasets also act as the administrative boundary for access-control policies and encryption configuration. For a full description, see the official documentation: https://cloud.google.com/bigquery/docs/datasets-intro **Terrafrom Mappings:** - `google_bigquery_dataset.dataset_id` +- `google_bigquery_dataset_iam_binding.dataset_id` +- `google_bigquery_dataset_iam_member.dataset_id` +- `google_bigquery_dataset_iam_policy.dataset_id` ## Supported Methods @@ -20,20 +22,20 @@ For full details see the Google Cloud documentation: https://cloud.google.com/bi ### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) -A dataset can reference other datasets via authorised views or cross-dataset access entries. Those referenced datasets will be linked to the current item. +Datasets can reference, copy from or authorise access to other BigQuery datasets, so Overmind may surface links where cross-dataset operations or shared access exist. -### [`gcp-big-query-model`](/sources/gcp/Types/gcp-big-query-model) +### [`gcp-big-query-routine`](/sources/gcp/Types/gcp-big-query-routine) -Every BigQuery ML model belongs to exactly one dataset. All models whose `dataset_id` matches this dataset will be linked. +Every BigQuery routine (stored procedure or user-defined function) resides inside a specific dataset; therefore routines are children of the current dataset. ### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) -Tables and views are stored inside a dataset. All tables whose `dataset_id` equals this dataset will be linked. +Tables and views are stored within a dataset. All tables that belong to this dataset will be linked here. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If the dataset is encrypted with a customer-managed key, the KMS Crypto Key used for default encryption will be linked here. +If customer-managed encryption is enabled, the dataset (and everything inside it) may be encrypted with a specific Cloud KMS crypto key. This link shows which key is in use. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Service accounts that appear in the dataset’s IAM policy (for example as editors, owners, readers or custom roles) will be linked to show who can access or manage the dataset. +Access to a dataset is granted via IAM, often to service accounts. Linked service accounts represent principals that have explicit permissions on the dataset. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-model.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-model.md deleted file mode 100644 index d570b152..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-model.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: GCP Big Query Model -sidebar_label: gcp-big-query-model ---- - -A BigQuery Model is a logical resource that stores the metadata and artefacts produced by BigQuery ML when you train a machine-learning model. It lives inside a BigQuery dataset and can subsequently be queried, evaluated, exported or further trained. For a full description see the official Google Cloud documentation: https://cloud.google.com/bigquery/docs/reference/rest/v2/models - -## Supported Methods - -- `GET`: Get GCP Big Query Model by "gcp-big-query-dataset-id|gcp-big-query-model-id" -- ~~`LIST`~~ -- `SEARCH`: Search for GCP Big Query Model by "gcp-big-query-model-id" - -## Possible Links - -### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) - -Each model is contained within exactly one BigQuery dataset. The link represents this parent–child relationship and allows Overmind to surface the impact of changes to the dataset on the model. - -### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) - -A model is usually trained from, and may reference, one or more BigQuery tables (for example, the training, validation and prediction input tables). This link lets Overmind trace how alterations to those tables could affect the model’s behaviour or validity. - -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) - -If customer-managed encryption keys (CMEK) are enabled, the model’s data is encrypted with a Cloud KMS crypto-key. Linking the model to the crypto-key allows Overmind to assess the consequences of key rotation, deletion or permission changes on the model’s availability. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md index b3534ec1..62ddb34a 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-routine.md @@ -3,11 +3,12 @@ title: GCP Big Query Routine sidebar_label: gcp-big-query-routine --- -A BigQuery Routine represents a user-defined piece of reusable logic—such as a stored procedure or user-defined function—that is stored inside a BigQuery dataset and can be invoked from SQL. Routines let teams encapsulate data-processing logic, share it across queries, and manage it with version control and Infrastructure-as-Code tools. For a full description of the capabilities and configuration options, see the Google Cloud documentation on routines (https://cloud.google.com/bigquery/docs/routines-intro). +A BigQuery Routine is a reusable piece of SQL or JavaScript logic—such as a stored procedure, user-defined function (UDF), or table-valued function—stored inside a BigQuery dataset. Routines let you encapsulate complex transformations, calculations, or business rules and call them from queries just like native BigQuery functions. They can reference other BigQuery objects (tables, views, models, etc.) and may be version-controlled and secured independently of the data they operate on. +Official documentation: https://cloud.google.com/bigquery/docs/reference/rest/v2/routines **Terrafrom Mappings:** -- `google_bigquery_routine.routine_id` +- `google_bigquery_routine.id` ## Supported Methods @@ -19,4 +20,8 @@ A BigQuery Routine represents a user-defined piece of reusable logic—such as a ### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) -A routine is defined within a specific BigQuery dataset; the link shows the parent dataset that contains the routine. +A routine is always contained within exactly one BigQuery dataset. The link lets you trace from a routine to its parent dataset to understand data location, access controls, and retention policies that also apply to the routine. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +If a routine’s SQL references an external table backed by Cloud Storage, or if the routine loads/stages data via the `LOAD DATA` or `EXPORT DATA` statements, the routine implicitly depends on the corresponding Cloud Storage bucket. This link surfaces that dependency so you can assess the impact of bucket-level permissions and lifecycle rules on the routine’s execution. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md index 77f8bde5..5b9487fc 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-query-table.md @@ -3,11 +3,15 @@ title: GCP Big Query Table sidebar_label: gcp-big-query-table --- -A BigQuery table is the fundamental unit of storage in Google Cloud BigQuery. It holds the rows of structured data that analysts query using SQL, and it defines the schema, partitioning, clustering, and encryption settings that govern how that data is stored and accessed. For a full description see the Google Cloud documentation: https://cloud.google.com/bigquery/docs/tables +A BigQuery table is the fundamental storage unit inside Google Cloud BigQuery. It holds the actual rows of structured data that can be queried with SQL, shared, exported or used to build materialised views and machine-learning models. Tables live inside a dataset, can be partitioned or clustered, and may be encrypted either with Google-managed keys or customer-managed keys stored in Cloud KMS. They can also act as logical wrappers around external data held in Cloud Storage. +Official documentation: https://cloud.google.com/bigquery/docs/tables **Terrafrom Mappings:** - `google_bigquery_table.id` +- `google_bigquery_table_iam_binding.dataset_id` +- `google_bigquery_table_iam_member.dataset_id` +- `google_bigquery_table_iam_policy.dataset_id` ## Supported Methods @@ -19,8 +23,16 @@ A BigQuery table is the fundamental unit of storage in Google Cloud BigQuery. It ### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) -Every BigQuery table is contained within exactly one dataset. This link represents that parent–child relationship, enabling Overmind to trace from a table back to the dataset that organises and administers it. +The dataset is the immediate parent container of the table; every table must belong to exactly one dataset and inherits default encryption, location and IAM settings from it. + +### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) + +BigQuery tables can reference, copy from, or be copied to other tables (for example when creating snapshots, clones, views with explicit table references or COPY jobs). Such relationships are captured as links between table resources. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If a BigQuery table is encrypted with a customer-managed encryption key (CMEK), this link points to the specific Cloud KMS crypto key in use. It allows Overmind to surface risks associated with key rotation, permissions, or key deletion that could affect the table’s availability or compliance posture. +If the table (or its parent dataset) is configured to use customer-managed encryption, it points to the Cloud KMS CryptoKey that protects the data at rest. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +An external BigQuery table may use objects stored in a Cloud Storage bucket as its underlying data source; in that case the table is linked to the bucket holding those objects. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md index 76c988b0..ab874cf5 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-app-profile.md @@ -3,8 +3,8 @@ title: GCP Big Table Admin App Profile sidebar_label: gcp-big-table-admin-app-profile --- -A Bigtable **App Profile** is a logical wrapper that tells Cloud Bigtable _how_ an application’s traffic should be routed, which clusters it can use, and what fail-over behaviour to apply. By creating multiple app profiles you can isolate workloads, direct different applications to specific clusters, or enable multi-cluster routing for higher availability. -For an in-depth explanation see the official documentation: https://cloud.google.com/bigtable/docs/app-profiles +A Bigtable **App Profile** is a logical configuration that tells Google Cloud Bigtable how client traffic for a particular application should be routed to one or more clusters within an instance. It lets you choose between single-cluster routing (for the lowest latency within a specific region) or multi-cluster routing (for higher availability across several regions) and also defines the consistency model that the application will see. Because app profiles govern the path that live data takes, mis-configuration can lead to increased latency, unexpected fail-over behaviour, or cross-region egress costs. +Official documentation: https://cloud.google.com/bigtable/docs/app-profiles **Terrafrom Mappings:** @@ -20,8 +20,8 @@ For an in-depth explanation see the official documentation: https://cloud.google ### [`gcp-big-table-admin-cluster`](/sources/gcp/Types/gcp-big-table-admin-cluster) -Every app profile specifies one or more clusters that client traffic may reach. Therefore an App Profile is directly linked to the Bigtable Cluster(s) it can route requests to. +An App Profile points client traffic towards one or more specific clusters. Each routing policy within the profile references the cluster identifiers defined by `gcp-big-table-admin-cluster`. Observing this link lets you see which clusters will receive traffic from the application and assess redundancy or regional placement risks. ### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) -An App Profile always belongs to exactly one Bigtable Instance; it cannot exist outside that instance’s administrative scope. +Every App Profile exists inside a single Bigtable instance. Linking to `gcp-big-table-admin-instance` shows the broader configuration—such as replication settings and all clusters—that frames the context in which the App Profile operates. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md index 58520f6c..b7cdf6a7 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-backup.md @@ -3,8 +3,8 @@ title: GCP Big Table Admin Backup sidebar_label: gcp-big-table-admin-backup --- -A Cloud Bigtable Backup is a point-in-time copy of a Bigtable table that is managed by the Bigtable Admin API. It allows you to protect data against accidental deletion or corruption and to restore the table later, either in the same cluster or in a different one within the same instance. Each backup is stored in a specific cluster, retains the table’s schema and data as they existed at the moment the backup was taken, and can be kept for a user-defined retention period. -Official documentation: https://docs.cloud.google.com/bigtable/docs/backups +A Cloud Bigtable Admin Backup represents a point-in-time copy of a single Bigtable table that is stored within the same Bigtable cluster for a user-defined retention period. Back-ups allow you to restore data that has been deleted or corrupted without replaying your entire write history, and they can also be copied to other regions for disaster-recovery purposes. The resource is created, managed and deleted through the Cloud Bigtable Admin API. +Official documentation: https://cloud.google.com/bigtable/docs/backups ## Supported Methods @@ -16,12 +16,16 @@ Official documentation: https://docs.cloud.google.com/bigtable/docs/backups ### [`gcp-big-table-admin-backup`](/sources/gcp/Types/gcp-big-table-admin-backup) -The current item represents the Backup resource itself, containing metadata such as name, creation time, size, expiration time and the source table it protects. +If the current backup is used as the source for a cross-cluster copy, or if multiple back-ups are chained through copy operations, Overmind links the related `gcp-big-table-admin-backup` resources together so you can trace provenance and inheritance of data. ### [`gcp-big-table-admin-cluster`](/sources/gcp/Types/gcp-big-table-admin-cluster) -Each backup is physically stored in exactly one Bigtable cluster; this link shows the parent cluster that owns and stores the backup. +Every backup is physically stored in the Bigtable cluster where it was created. The backup therefore links to its parent `gcp-big-table-admin-cluster`, enabling you to understand locality, storage costs and the failure domain that may affect both the cluster and its back-ups. ### [`gcp-big-table-admin-table`](/sources/gcp/Types/gcp-big-table-admin-table) -A backup is created from a specific table; this link identifies that source table and allows you to see which tables can be restored from the backup. +A backup is a snapshot of a specific Bigtable table at the moment the backup was taken. This link points back to that source `gcp-big-table-admin-table`, allowing you to see which dataset the backup protects and to assess the impact of schema or data changes. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +When customer-managed encryption (CMEK) is enabled, the backup’s data is encrypted with a particular Cloud KMS key version. Linking to `gcp-cloud-kms-crypto-key-version` lets you audit encryption lineage and verify that the correct key material is being used for protecting the backup. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md index c0a71a23..8160b874 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-cluster.md @@ -3,8 +3,8 @@ title: GCP Big Table Admin Cluster sidebar_label: gcp-big-table-admin-cluster --- -A Cloud Bigtable cluster represents the set of serving and storage resources that handle all reads and writes for a Cloud Bigtable instance. Each cluster belongs to a single instance, lives in one Google Cloud zone, and is configured with a certain number of nodes and a specific storage type (SSD or HDD). Clusters can be added or removed to provide high availability, geographic redundancy, or additional throughput. With Overmind you can surface mis-configurations such as a single-zone deployment, inadequate node counts, or missing encryption settings before your change reaches production. -Official Google documentation: https://cloud.google.com/bigtable/docs/overview#clusters +A GCP Bigtable Admin Cluster resource represents the configuration of a single cluster that belongs to a Cloud Bigtable instance. The cluster defines the geographic location where data is stored, the number and type of serving nodes, the storage type (HDD or SSD), autoscaling settings, and any customer-managed encryption keys (CMEK) that protect the data. It is managed through the Cloud Bigtable Admin API, which allows you to create, update, or delete clusters programmatically. +For further details, see Google’s official documentation: https://cloud.google.com/bigtable/docs/instances-clusters-nodes ## Supported Methods @@ -16,8 +16,8 @@ Official Google documentation: https://cloud.google.com/bigtable/docs/overview#c ### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) -Every cluster is a child resource of a Cloud Bigtable instance. Overmind links the cluster back to its parent instance so you can see which database workloads will be affected if you modify or delete the cluster. +A cluster is always a child of a Bigtable instance. This link represents the parent–child relationship: the instance contains one or more clusters, and every cluster must reference its parent instance. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -When customer-managed encryption keys (CMEK) are enabled for a Bigtable cluster, the cluster references a Cloud KMS crypto key. Overmind creates a link to that key so you can verify the key’s status, rotation schedule, and IAM policy before deploying changes to the cluster. +If Customer-Managed Encryption Keys (CMEK) are enabled, the cluster’s encryption configuration points to the Cloud KMS CryptoKey that is used to encrypt data at rest. This link captures that dependency between the cluster and the key. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md index 7c8d7a88..6b010f4f 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-instance.md @@ -3,13 +3,14 @@ title: GCP Big Table Admin Instance sidebar_label: gcp-big-table-admin-instance --- -Google Cloud Bigtable is Google’s fully managed, scalable NoSQL database service. -A Bigtable _instance_ is the administrative parent resource that defines the geographic placement, replication strategy, encryption settings and service-level configuration for the tables that will live inside it. Every instance contains one or more clusters, and each cluster in turn contains the nodes that serve user data. Creating or modifying an instance therefore determines where and how your Bigtable data will be stored and replicated. -For further details, refer to the official Google Cloud documentation: https://cloud.google.com/bigtable/docs/instances-clusters-nodes +Cloud Bigtable instances are the top-level administrative containers for all tables and data stored in Bigtable. An instance defines the service tier (production or development), the geographic placement of data through its clusters, and provides the entry point for IAM policy management, encryption settings, labelling and more. For a detailed overview of instances, see the official Google Cloud documentation: https://cloud.google.com/bigtable/docs/instances-clusters-nodes **Terrafrom Mappings:** - `google_bigtable_instance.name` +- `google_bigtable_instance_iam_binding.instance` +- `google_bigtable_instance_iam_member.instance` +- `google_bigtable_instance_iam_policy.instance` ## Supported Methods @@ -21,4 +22,4 @@ For further details, refer to the official Google Cloud documentation: https://c ### [`gcp-big-table-admin-cluster`](/sources/gcp/Types/gcp-big-table-admin-cluster) -A Bigtable Admin Instance is the parent of one or more Bigtable Admin Clusters. Each cluster resource belongs to exactly one instance, inheriting its replication and localisation settings. When Overmind discovers or updates a gcp-big-table-admin-instance, it follows this relationship to enumerate the gcp-big-table-admin-cluster resources that compose the instance’s underlying serving infrastructure. +Every Bigtable instance is composed of one or more clusters. A `gcp-big-table-admin-cluster` represents the individual cluster resources that reside within, and are owned by, a given Bigtable instance. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md index a2196b32..62f6a263 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-big-table-admin-table.md @@ -3,11 +3,14 @@ title: GCP Big Table Admin Table sidebar_label: gcp-big-table-admin-table --- -Google Cloud Bigtable tables are the primary data containers inside a Bigtable instance. A table holds rows of schemaless, wide-column data that can scale to petabytes while maintaining low-latency access. The Admin Table resource represents the configuration and lifecycle metadata for a table (for example, column families, garbage-collection rules, encryption settings and replication state). For a detailed explanation see the official documentation: https://docs.cloud.google.com/bigtable/docs/reference/admin/rpc. +Google Cloud Bigtable is a scalable NoSQL database service for large analytical and operational workloads. A Bigtable **table** is the primary data container within an instance, organised into rows and column families. The Bigtable Admin API allows you to create, configure, list, and delete tables, as well as manage their IAM policies and column–family schemas. Full details can be found in the official documentation: https://cloud.google.com/bigtable/docs/reference/admin/rest **Terrafrom Mappings:** - `google_bigtable_table.id` +- `google_bigtable_table_iam_binding.instance_name` +- `google_bigtable_table_iam_member.instance_name` +- `google_bigtable_table_iam_policy.instance_name` ## Supported Methods @@ -19,12 +22,12 @@ Google Cloud Bigtable tables are the primary data containers inside a Bigtable i ### [`gcp-big-table-admin-backup`](/sources/gcp/Types/gcp-big-table-admin-backup) -A backup is a point-in-time snapshot that is created from a specific table. From a table resource you can enumerate the backups that protect it, or follow a backup back to the source table from which it was taken. +A Bigtable table can have one or more backups. Overmind links a table to its related `gcp-big-table-admin-backup` resources, making it easy to assess how backup configurations might be impacted by changes to the table. ### [`gcp-big-table-admin-instance`](/sources/gcp/Types/gcp-big-table-admin-instance) -Every table belongs to exactly one Bigtable instance. The instance is the parent container that defines the clusters, replication topology and IAM policy under which the table operates. +Every table is created inside a single Bigtable instance. This link shows the parent `gcp-big-table-admin-instance` that owns the table so you can understand instance-level settings (such as clusters and IAM) that may affect the table. ### [`gcp-big-table-admin-table`](/sources/gcp/Types/gcp-big-table-admin-table) -Tables of the same type within the same project or instance can be cross-referenced for comparison, migration or restore operations (for example, when restoring a backup into a new table). Overmind links tables to other tables so you can trace relationships such as clone targets, restore destinations or sibling tables in the same instance. +Tables may reference each other indirectly through IAM policies or schema design. Overmind links tables to other tables when such relationships are detected, allowing you to trace dependencies across multiple Bigtable tables within or across instances. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-certificate-manager-certificate.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-certificate-manager-certificate.md new file mode 100644 index 00000000..3623be9f --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-certificate-manager-certificate.md @@ -0,0 +1,16 @@ +--- +title: GCP Certificate Manager Certificate +sidebar_label: gcp-certificate-manager-certificate +--- + +A **GCP Certificate Manager Certificate** represents an SSL/TLS certificate that is stored and managed by Google Cloud Certificate Manager. Certificates configured here can be Google-managed (automatically provisioned and renewed by Google) or self-managed (imported by the user) and can be attached to load balancers, Cloud CDN, or other Google Cloud resources to provide encrypted connections. Managing certificates through Certificate Manager centralises lifecycle operations such as issuance, rotation and revocation, reducing operational overhead and the risk of serving expired certificates. For full details, see the official documentation: https://cloud.google.com/certificate-manager/docs + +**Terrafrom Mappings:** + +- `google_certificate_manager_certificate.id` + +## Supported Methods + +- `GET`: Get GCP Certificate Manager Certificate by "gcp-certificate-manager-certificate-location|gcp-certificate-manager-certificate-name" +- ~~`LIST`~~ +- `SEARCH`: Search for GCP Certificate Manager Certificate by "gcp-certificate-manager-certificate-location" diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md index 59bf138c..08b81211 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-billing-billing-info.md @@ -3,9 +3,14 @@ title: GCP Cloud Billing Billing Info sidebar_label: gcp-cloud-billing-billing-info --- -`gcp-cloud-billing-billing-info` represents a Google Cloud **ProjectBillingInfo** resource, i.e. the object that records which Cloud Billing Account a particular GCP project is attached to and whether billing is currently enabled. -Knowing which Billing Account is used – and whether charges can actually accrue – is often vital when assessing the financial risk of a new deployment. -Official documentation: https://cloud.google.com/billing/docs/reference/rest/v1/projects/getBillingInfo +The **Cloud Billing – Billing Info** resource represents the billing configuration that is attached to an individual Google Cloud project. +For a given project it records which Cloud Billing Account is linked, whether billing is currently enabled, and other metadata that controls how usage costs are charged. +The resource is surfaced by the Cloud Billing API endpoint +`cloudbilling.googleapis.com/v1/projects/{projectId}/billingInfo`. +Full details are available in the official Google documentation: +https://cloud.google.com/billing/docs/reference/rest/v1/projects/getBillingInfo + +Knowing the contents of this object allows Overmind to determine, for example, whether a project is running with an unexpectedly disabled billing account or whether it is tied to the correct cost centre before a deployment is made. ## Supported Methods @@ -17,4 +22,5 @@ Official documentation: https://cloud.google.com/billing/docs/reference/rest/v1/ ### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) -Every ProjectBillingInfo belongs to exactly one Cloud project. Overmind therefore links the `gcp-cloud-billing-billing-info` item to the corresponding `gcp-cloud-resource-manager-project` item, allowing you to trace billing-account associations back to the project that will generate the spend. +Every Billing Info object belongs to exactly one Cloud Resource Manager Project. +Overmind creates a link from `gcp-cloud-billing-billing-info` → `gcp-cloud-resource-manager-project` so that users can trace the billing configuration back to the workload and other resources that live inside the same project. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md index 0ecba466..3ff463e4 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-build-build.md @@ -3,8 +3,8 @@ title: GCP Cloud Build Build sidebar_label: gcp-cloud-build-build --- -A GCP Cloud Build Build represents a single execution of Google Cloud Build, Google’s fully-managed continuous integration and delivery service. A build encapsulates the series of build steps, source code location, build artefacts, substitutions and metadata that are executed within an isolated builder environment. Each build is uniquely identified by its `name` (formatted as `projects/{projectId}/builds/{buildId}`) and records status, timing information, logs location and any images or other artefacts produced. -For full details see the official documentation: https://cloud.google.com/build/docs/api/reference/rest/v1/projects.builds +A **Cloud Build Build** represents a single execution of Google Cloud Build, Google Cloud’s CI/CD service. Each build contains one or more build steps (Docker containers) that run in sequence or in parallel to compile code, run tests, or package and deploy artefacts. Metadata recorded on the build includes its source, substitutions, images, logs, secrets used, time-stamps, and overall status. +See the official documentation for full details: https://cloud.google.com/build/docs/api/reference/rest/v1/projects.builds ## Supported Methods @@ -16,16 +16,24 @@ For full details see the official documentation: https://cloud.google.com/build/ ### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) -If the build definition contains a step that builds and pushes a Docker image, the resulting image is usually pushed to Artifact Registry. The build therefore produces — and is linked to — one or more `gcp-artifact-registry-docker-image` resources representing the images it published. +A build often produces container images and pushes them to Artifact Registry. Overmind links the build to every `gcp-artifact-registry-docker-image` whose digest or tag is declared in the build’s `images` field. + +### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) + +Builds can be configured to decrypt secrets with Cloud KMS. If the build specification references a KMS key (for example in `secretEnv`), Overmind records a link to the corresponding `gcp-cloud-kms-crypto-key`. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every Cloud Build execution runs under a specific IAM service account (commonly the project-level Cloud Build service account or a custom account) which grants it permissions to fetch source, write logs and push artefacts. The build is thus associated with the `gcp-iam-service-account` used during its execution. +Cloud Build runs under a service account (`serviceAccount` field). The build is therefore linked to the `gcp-iam-service-account` that actually executes the build steps and accesses other resources. ### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) -Cloud Build streams build logs to Cloud Logging; organisations often route these logs into dedicated Logging buckets for retention or analysis. When such routing is configured, the build’s log entries will appear in (and therefore relate to) the relevant `gcp-logging-bucket`. +Build logs are written to Cloud Logging and can be routed into a custom logging bucket. If log sink routing points the build’s logs to a specific `gcp-logging-bucket`, Overmind associates the two objects. + +### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) + +Secrets injected into build steps via `secretEnv` or `availableSecrets` are stored in Secret Manager. A link is created between the build and every `gcp-secret-manager-secret` it consumes. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Source code for a build can be fetched from a Cloud Storage bucket, and build logs or artefact archives can also be stored in buckets created by Cloud Build (e.g. `gs://{projectId}_cloudbuild`). Consequently, a build may read from or write to one or more `gcp-storage-bucket` resources. +Cloud Build can pull its source from a Cloud Storage bucket and write build logs or artefacts back to buckets (e.g. via the `logsBucket` or `artifacts` fields). These buckets appear as related `gcp-storage-bucket` resources. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md index 119b1da2..07856620 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-functions-function.md @@ -3,7 +3,7 @@ title: GCP Cloud Functions Function sidebar_label: gcp-cloud-functions-function --- -A Google Cloud Functions Function is a serverless, event-driven compute resource that executes user-supplied code in response to HTTP requests or a wide range of Google Cloud events. Because Google Cloud manages the underlying infrastructure, you only specify the code, runtime, memory, timeout, trigger and IAM policy, and you are billed solely for the resources actually consumed while the function is running. For more detail, see Google’s official documentation: https://cloud.google.com/functions/docs/concepts/overview. +Google Cloud Functions is a server-less execution environment that lets you run event-driven code without provisioning or managing servers. A “Function” is the deployed piece of code together with its configuration (runtime, memory/CPU limits, environment variables, ingress/egress settings, triggers and IAM bindings). Documentation: https://cloud.google.com/functions/docs ## Supported Methods @@ -15,20 +15,20 @@ A Google Cloud Functions Function is a serverless, event-driven compute resource ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If Customer-Managed Encryption Keys (CMEK) are enabled, the function’s source code, environment variables or secret volumes are encrypted with a Cloud KMS CryptoKey. Overmind links the function to any CryptoKey that protects its assets so you can assess key rotation or deletion risks. +A function can reference a Cloud KMS crypto key to decrypt secrets or to use Customer-Managed Encryption Keys (CMEK) for its source code stored in Cloud Storage. Overmind therefore links the function to any KMS keys it is authorised to use. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every Cloud Function runs as an IAM Service Account. The permissions granted to this account define what the function can read or modify at runtime. Overmind links the function to its execution service account, allowing you to evaluate privilege levels and potential lateral-movement paths. +Each Cloud Function executes as a service account, and other service accounts may be granted permission to invoke or manage it. Overmind links the function to the runtime service account and to any caller or admin accounts discovered in its IAM policy. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -A function can be triggered by a Pub/Sub topic or publish messages to one. Overmind records these relationships so you can see which topics will invoke the function and what downstream systems might be affected if the function misbehaves. +Pub/Sub topics are commonly used as event triggers. When a function is configured to fire on messages published to a topic, Overmind records a link between the function and that topic. ### [`gcp-run-service`](/sources/gcp/Types/gcp-run-service) -Second-generation Cloud Functions are deployed on Cloud Run. Overmind links the function to the underlying Cloud Run Service, exposing additional configuration such as VPC connectors, ingress settings and revision history that may introduce risk. +Second-generation Cloud Functions are built and deployed as Cloud Run services under the hood. Overmind links the function to the underlying Cloud Run service so you can trace configuration and runtime dependencies. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud Functions often interact with Cloud Storage: source code may be stored in a staging bucket, and functions can be triggered by bucket events (e.g., object creation). Overmind links the function to any associated buckets, helping you identify data-exfiltration risks and unintended public access. +Cloud Storage buckets can be both event sources (object create/delete triggers) and repositories for a function’s source code during deployment. Overmind links the function to any bucket that serves as a trigger or holds its source archive. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md index 64bb5960..ef5e8169 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key-version.md @@ -1,30 +1,22 @@ --- -title: GCP Cloud KMS Crypto Key Version +title: GCP Cloud Kms Crypto Key Version sidebar_label: gcp-cloud-kms-crypto-key-version --- -A CryptoKeyVersion represents an individual cryptographic key and its associated key material within a Cloud KMS CryptoKey. An ENABLED version can be used for cryptographic operations. Each CryptoKey can have multiple versions, allowing for key rotation. For security reasons, the raw cryptographic key material can never be viewed or exported - it can only be used to encrypt, decrypt, or sign data when an authorized user or application invokes Cloud KMS. For more information, refer to the [official documentation](https://docs.cloud.google.com/kms/docs/key-states). +A **Cloud KMS CryptoKeyVersion** is an immutable representation of a single piece of key material managed by Google Cloud Key Management Service. Each CryptoKey can have many versions, allowing you to rotate key material without changing the logical key that your workloads use. A version holds state (e.g., `ENABLED`, `DISABLED`, `DESTROYED`), an algorithm specification (RSA, AES-GCM, etc.), and lifecycle metadata such as creation and destruction timestamps. See the official Google documentation for full details: https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions -**Terraform Mappings:** +**Terrafrom Mappings:** - `google_kms_crypto_key_version.id` ## Supported Methods -- `GET`: Get GCP Cloud KMS Crypto Key Version by "location|keyRing|cryptoKey|version" +- `GET`: Get GCP Cloud Kms Crypto Key Version by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name|gcp-cloud-kms-crypto-key-version-version" - ~~`LIST`~~ -- `SEARCH`: Search for GCP Cloud KMS Crypto Key Versions by "location|keyRing|cryptoKey" (returns all versions of the specified CryptoKey) +- `SEARCH`: Search for GCP Cloud Kms Crypto Key Version by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name" ## Possible Links ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A CryptoKeyVersion belongs to exactly one parent CryptoKey. The parent CryptoKey contains the version's configuration and purpose. Deleting the parent CryptoKey will delete all of its CryptoKeyVersions, but deleting a CryptoKeyVersion does not affect the parent key. - -### `gcp-cloudkms-importjob` - -If the key material was imported (rather than generated by KMS), the CryptoKeyVersion references the ImportJob that was used for the import operation. The ImportJob contains metadata about how the key material was imported. Deleting the ImportJob after a successful import does not affect the CryptoKeyVersion. - -### `gcp-cloudkms-ekmconnection` - -For CryptoKeyVersions with EXTERNAL_VPC protection level, the version links to an EKM (External Key Manager) connection that manages the external key material. This is used when keys are stored and operated on in an external key management system rather than within Google Cloud KMS. +A CryptoKeyVersion is always a child of a CryptoKey. The `gcp-cloud-kms-crypto-key` resource represents the logical key, while the current item represents a particular version of that key’s material. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md index 2813c0fb..25af555c 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-crypto-key.md @@ -3,9 +3,13 @@ title: GCP Cloud Kms Crypto Key sidebar_label: gcp-cloud-kms-crypto-key --- -A Google Cloud KMS Crypto Key is a logical key resource that performs cryptographic operations such as encryption/de-encryption, signing, and message authentication. Each Crypto Key sits inside a Key Ring, which in turn lives in a specific GCP location (region). The key material for a Crypto Key can be rotated, versioned, and protected by Cloud KMS or by customer-managed hardware security modules, and it is referenced by other Google Cloud services whenever those services need to encrypt or sign data on your behalf. +A **Cloud KMS CryptoKey** is the logical resource in Google Cloud that represents a single cryptographic key and its primary metadata. It defines the algorithm, purpose (encryption/decryption, signing/verification, MAC, etc.), rotation schedule, and IAM policy for the key. Each CryptoKey lives inside a Key Ring, can have multiple immutable versions, and is used by Google-managed services (or your own applications) to perform cryptographic operations. Official documentation: https://cloud.google.com/kms/docs/object-hierarchy#key +**Terrafrom Mappings:** + +- `google_kms_crypto_key.id` + ## Supported Methods - `GET`: Get GCP Cloud Kms Crypto Key by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name" @@ -14,6 +18,10 @@ Official documentation: https://cloud.google.com/kms/docs/object-hierarchy#key ## Possible Links +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +A CryptoKey is the parent of one or more CryptoKeyVersions. Each version contains the actual key material and its own state (enabled, disabled, destroyed, etc.). Overmind links to these versions so you can inspect individual key material lifecycles and detect risks such as disabled or scheduled-for-destruction versions. + ### [`gcp-cloud-kms-key-ring`](/sources/gcp/Types/gcp-cloud-kms-key-ring) -A Crypto Key is always a child resource of a Key Ring. The `gcp-cloud-kms-key-ring` link allows Overmind to trace from the key to its parent container, establishing the hierarchical relationship needed to understand inheritance of IAM policies, location constraints, and aggregated risk. +Every CryptoKey resides within a Key Ring, which provides a namespace and location boundary. This link shows the Key Ring that owns the CryptoKey, allowing you to trace location-specific compliance requirements or IAM inheritance issues. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md index 5dd85555..9893a1d2 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-kms-key-ring.md @@ -3,20 +3,21 @@ title: GCP Cloud Kms Key Ring sidebar_label: gcp-cloud-kms-key-ring --- -A Cloud KMS Key Ring is a logical container used to group related customer-managed encryption keys within Google Cloud’s Key Management Service (KMS). All Crypto Keys created inside the same Key Ring share the same geographic location, and access control can be applied at the Key Ring level to govern every key it contains. For more information, refer to the [official documentation](https://cloud.google.com/kms/docs/create-key-ring). +A **Cloud KMS Key Ring** is a top-level container within Google Cloud KMS that groups one or more CryptoKeys in a specific GCP location (region). It acts as both an organisational unit and an IAM boundary: all CryptoKeys inside a Key Ring inherit the same location and share the same access-control policies. Creating a Key Ring is an irreversible, free operation and is a prerequisite for creating any CryptoKeys. +For full details, see the official documentation: https://cloud.google.com/kms/docs/object-hierarchy#key_rings **Terrafrom Mappings:** -- `google_kms_key_ring.name` +- `google_kms_key_ring.id` ## Supported Methods - `GET`: Get GCP Cloud Kms Key Ring by "gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name" -- `LIST`: List all GCP Cloud Kms Key Rings across all locations in the project +- `LIST`: List all GCP Cloud Kms Key Ring items - `SEARCH`: Search for GCP Cloud Kms Key Ring by "gcp-cloud-kms-key-ring-location" ## Possible Links ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A Key Ring is the direct parent of one or more Crypto Keys. Every Crypto Key resource must belong to exactly one Key Ring, so Overmind creates this link to allow navigation from the Key Ring to all the keys it contains (and vice-versa), making it easier to assess the full cryptographic surface associated with a given deployment. +Each CryptoKey belongs to exactly one Key Ring. Linking a Key Ring to its child `gcp-cloud-kms-crypto-key` items lets Overmind surface all encryption keys that share the same location and IAM policy, making it easier to assess the blast radius of any permission or configuration changes applied to the Key Ring. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md index 19130df9..492a056e 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-project.md @@ -3,15 +3,8 @@ title: GCP Cloud Resource Manager Project sidebar_label: gcp-cloud-resource-manager-project --- -A **Google Cloud Platform (GCP) Project** is the fundamental organising entity managed by the Cloud Resource Manager service. Every GCP workload—whether it is a single virtual machine or a complex, multi-region Kubernetes deployment—must reside inside a Project. The Project acts as a logical container for: - -- All GCP resources (compute, storage, networking, databases, etc.) -- Identity and Access Management (IAM) policies -- Billing configuration -- Quotas and limits -- Metadata such as labels and organisation/folder hierarchy - -Because policies and billing are enforced at the Project level, understanding the state of a Project is critical when assessing deployment risk. For detailed information, refer to the official Google documentation: https://cloud.google.com/resource-manager/docs/creating-managing-projects +A Google Cloud Resource Manager Project represents the fundamental organisational unit within Google Cloud Platform (GCP). Every compute, storage or networking asset you create must live inside a Project, which in turn sits under a Folder or Organisation node. Projects provide isolated boundaries for Identity and Access Management (IAM), quotas, billing, API enablement and lifecycle operations such as creation, update, suspension and deletion. By modelling Projects, Overmind can surface risks linked to mis-scoped IAM roles, neglected billing settings or interactions with other resources _before_ any change is pushed to production. +Official documentation: https://cloud.google.com/resource-manager/docs/creating-managing-projects ## Supported Methods diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md index bcf456a7..2d62d471 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-cloud-resource-manager-tag-value.md @@ -3,7 +3,8 @@ title: GCP Cloud Resource Manager Tag Value sidebar_label: gcp-cloud-resource-manager-tag-value --- -A Tag Value is the value component of Google Cloud’s hierarchical tagging system, which allows you to attach fine-grained, policy-aware metadata to resources. Each Tag Value sits under a Tag Key and, together, the pair forms a tag that can be propagated across projects and folders within an organisation. Tags enable centralised governance, cost allocation, and conditional access control through IAM and Org Policy. For full details, see the official Google Cloud documentation: https://cloud.google.com/resource-manager/docs/tags/tags-creating-and-managing#tag-values +A GCP Cloud Resource Manager **Tag Value** is the second layer in Google Cloud’s new tagging hierarchy, sitting beneath a Tag Key and above the individual resources to which it is applied. Together, Tag Keys and Tag Values allow administrators to attach fine-grained, organisation-wide metadata to projects, folders and individual cloud resources, enabling consistent policy enforcement, cost allocation, automation and reporting across an estate. Each Tag Value represents a specific, permitted value for a given Tag Key (e.g. Tag Key `environment` may have Tag Values `production`, `staging`, `test`). +For a full description of Tag Values and how they fit into the tagging system, refer to Google’s documentation: https://cloud.google.com/resource-manager/reference/rest/v3/tagValues. **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md index b9b328cd..bc91e016 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-address.md @@ -3,8 +3,7 @@ title: GCP Compute Address sidebar_label: gcp-compute-address --- -A GCP Compute Address is a statically-reserved IPv4 or IPv6 address that can be assigned to Compute Engine resources such as virtual machine instances, forwarding rules, VPN gateways and load-balancers. Reserving the address stops it from changing when the attached resource is restarted and allows the address to be re-used on other resources later. Addresses may be global (for external HTTP(S) load-balancers) or regional (for most other use-cases), and internal addresses can be tied to a specific VPC network and sub-network. -For full details see the official documentation: https://docs.cloud.google.com/compute/docs/reference/rest/v1/addresses +A GCP Compute Address is a reserved, static IP address that can be either regional (tied to a specific region and VPC network) or global (usable by global load-balancing resources). Once reserved, the address can be attached to forwarding rules, virtual machine (VM) instances, Cloud NAT configurations and other networking resources, ensuring its IP does not change even if the underlying resource is recreated. See the official documentation for full details: https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address. **Terrafrom Mappings:** @@ -20,12 +19,32 @@ For full details see the official documentation: https://docs.cloud.google.com/c ### [`gcp-compute-address`](/sources/gcp/Types/gcp-compute-address) -A self-link that allows Overmind to relate this address to other instances of the same type (for example, distinguishing between regional and global addresses with identical names). +Static addresses rarely reference one another directly, but Overmind may surface links where an address is used as a reference target (for example, when one resource releases and another takes ownership of the same address). + +### [`gcp-compute-forwarding-rule`](/sources/gcp/Types/gcp-compute-forwarding-rule) + +Regional forwarding rules for Network Load Balancers or protocol forwarding can be configured with a specific static IP. The forwarding rule’s `IPAddress` field points to the Compute Address. + +### [`gcp-compute-global-forwarding-rule`](/sources/gcp/Types/gcp-compute-global-forwarding-rule) + +Global forwarding rules, used by HTTP(S), SSL, or TCP Proxy load balancers, reference a global static IP address. The global forwarding rule therefore links back to the associated Compute Address. + +### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) + +A VM instance’s network interface may be assigned a reserved external or internal IP. If an instance uses a static IP, the instance resource contains a link to the corresponding Compute Address. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Internal (private) addresses are reserved within a specific VPC network, so an address will be linked to the `gcp-compute-network` that owns the IP range from which it is allocated. +Internal (private) static addresses are always allocated within a specific VPC network. The Compute Address resource stores the ID of the network from which the IP is taken, creating a link to the Network. + +### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) + +When you own a public delegated prefix, you can allocate individual static addresses from that range. Each resulting Compute Address records the delegated prefix it belongs to. + +### [`gcp-compute-router`](/sources/gcp/Types/gcp-compute-router) + +Cloud NAT configurations on a Cloud Router can consume one or more reserved external IP addresses. The router’s NAT config lists the Compute Addresses being used, forming a link. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -When an internal address is scoped to a particular sub-network, Overmind records this dependency by linking the address to the corresponding `gcp-compute-subnetwork`. +For regional internal addresses you must specify the subnetwork (IP range) to allocate from. The Compute Address therefore references, and is linked to, the Subnetwork resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md index 7d468e6d..2ee81ac6 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-autoscaler.md @@ -3,8 +3,7 @@ title: GCP Compute Autoscaler sidebar_label: gcp-compute-autoscaler --- -The Google Cloud Compute Autoscaler is a regional or zonal resource that automatically adds or removes VM instances from a Managed Instance Group in response to workload demand. By scaling on CPU utilisation, load-balancing capacity, Cloud Monitoring metrics, or pre-defined schedules, it helps keep applications responsive while keeping infrastructure spending under control. -For detailed information, consult the official documentation: https://cloud.google.com/compute/docs/autoscaler +A GCP Compute Autoscaler is a zonal or regional resource that automatically adds or removes VM instances from a managed instance group to keep your application running at the desired performance level and cost. Scaling decisions can be driven by policies based on average CPU utilisation, HTTP load-balancing capacity, Cloud Monitoring metrics, schedules, or per-instance utilisation. Full details can be found in the official documentation: https://cloud.google.com/compute/docs/autoscaler **Terrafrom Mappings:** @@ -15,3 +14,9 @@ For detailed information, consult the official documentation: https://cloud.goog - `GET`: Get GCP Compute Autoscaler by "gcp-compute-autoscaler-name" - `LIST`: List all GCP Compute Autoscaler items - ~~`SEARCH`~~ + +## Possible Links + +### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) + +Every autoscaler is attached to exactly one managed instance group; in the GCP API this relationship is expressed through the `target` field, which points to the relevant `instanceGroupManager` resource. Following this link in Overmind reveals which VM instances the autoscaler is responsible for scaling. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md index f295c81b..b29c676f 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-backend-service.md @@ -3,14 +3,13 @@ title: GCP Compute Backend Service sidebar_label: gcp-compute-backend-service --- -A GCP Compute Backend Service is the central configuration object that tells a Google Cloud load balancer where and how to send traffic. -It groups one or more back-end targets (for example instance groups, zonal NEG or serverless NEG), specifies the load-balancing scheme (internal or external), session affinity, health checks, protocol, timeout and (optionally) Cloud Armor security policies. -Because almost every Google Cloud load-balancing product routes traffic through a backend service, it is a critical part of any production deployment. -Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/backendServices +A Compute Backend Service defines how Google Cloud Load Balancers distribute traffic to one or more back-end targets (Instance Groups, Network Endpoint Groups, or serverless workloads). It specifies the load-balancing algorithm, session affinity, capacity controls, health checks, time-outs, protocol and (optionally) a Cloud Armor security policy. Backend services exist as either regional or global resources, depending on the load balancer type. +For full details see the official Google Cloud documentation: https://cloud.google.com/load-balancing/docs/backend-service **Terrafrom Mappings:** - `google_compute_backend_service.name` +- `google_compute_region_backend_service.name` ## Supported Methods @@ -20,10 +19,26 @@ Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/ ## Possible Links +### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) + +A backend service is required to reference one or more Health Checks. These determine the health of each backend target and whether traffic should be sent to it. + +### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) + +Individual VM instances receive traffic indirectly through a backend service when they belong to an instance group or unmanaged instance list that the backend service uses. + +### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) + +Managed or unmanaged Instance Groups are the most common type of backend that a backend service points to. The group’s VMs are the actual targets for load-balanced traffic. + ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -A backend service implicitly belongs to the same VPC network as the back-end resources (instance groups or NEGs) it references. Consequently, the service’s reachability, IP ranges and firewall posture are constrained by that network, so Overmind creates a link to the corresponding `gcp-compute-network` to surface these dependencies. +Backends referenced by a backend service must reside in a specific VPC network; therefore the backend service is effectively bound to that network and its associated subnets and firewall rules. + +### [`gcp-compute-network-endpoint-group`](/sources/gcp/Types/gcp-compute-network-endpoint-group) + +Network Endpoint Groups (NEGs) can be configured as backends of a backend service to route traffic to endpoints such as containers, serverless services, or on-premises resources. ### [`gcp-compute-security-policy`](/sources/gcp/Types/gcp-compute-security-policy) -If Cloud Armor is enabled, the backend service contains a direct reference to a `securityPolicy`. This link allows Overmind to show how web-application-firewall rules and rate-limiting policies are applied to traffic flowing through the backend service. +A backend service can optionally attach a Cloud Armor Security Policy to enforce L7 firewall rules, rate limiting, and other protective measures on incoming traffic before it reaches the backends. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md index 6b34dc83..4d61d32c 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-disk.md @@ -3,8 +3,7 @@ title: GCP Compute Disk sidebar_label: gcp-compute-disk --- -A GCP Compute Disk is a durable, high-performance block-storage volume that can be attached to one or more Compute Engine virtual machine instances. Persistent disks can act as boot devices or as additional data volumes, are automatically replicated within a zone or region, and can be backed up through snapshots or turned into custom images for rapid redeployment. -For full details see the official Google Cloud documentation: https://cloud.google.com/compute/docs/disks +A GCP Compute Disk—formally known as a Persistent Disk—is block-level storage that can be attached to Google Compute Engine virtual machine (VM) instances. Disks may be zonal or regional, support features such as snapshots, replication, and Customer-Managed Encryption Keys (CMEK), and can be resized or detached without data loss. Official documentation: https://cloud.google.com/compute/docs/disks **Terrafrom Mappings:** @@ -18,22 +17,30 @@ For full details see the official Google Cloud documentation: https://cloud.goog ## Possible Links +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +Indicates the specific Cloud KMS key version used when the disk is encrypted with a customer-managed encryption key. + ### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) -This link appears when one persistent disk has been cloned or recreated from another (for example, using the `--source-disk` flag), allowing Overmind to follow ancestry or duplication chains between disks. +For regional or replicated disks, the resource records the relationship to its source or replica peer disk. ### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) -A custom image may have been created from the current disk, or conversely the disk may have been created from an image. Overmind records this link so you can see which images depend on, or are the origin of, a particular disk. +Shows the image from which the disk was created, or images that have been built from this disk. ### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) -Virtual machine instances to which the disk is attached (either as a boot disk or as an additional mounted volume) are linked here. This allows you to view the blast-radius of any change to the disk in terms of running workloads. +Lists the VM instances to which the disk is currently attached or has been attached historically. ### [`gcp-compute-instant-snapshot`](/sources/gcp/Types/gcp-compute-instant-snapshot) -If an instant snapshot has been taken from the disk, or if the disk has been created from an instant snapshot, Overmind records the relationship via this link. +Captures the association between the disk and any instant snapshots taken for rapid backup or restore operations. ### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) -Standard persistent disk snapshots derived from the disk, or snapshots that were used to create the disk, are linked here, enabling traceability between long-term backups and the live volume. +Represents traditional snapshots for the disk, enabling point-in-time recovery or disk cloning. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +If disk snapshots or images are exported to Cloud Storage, this link records the destination bucket holding those exports. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md index 5877253d..353fc407 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-external-vpn-gateway.md @@ -3,8 +3,8 @@ title: GCP Compute External Vpn Gateway sidebar_label: gcp-compute-external-vpn-gateway --- -A GCP Compute External VPN Gateway represents a VPN gateway device that resides outside of Google Cloud—typically an on-premises firewall, router or a third-party cloud appliance. In High-Availability VPN (HA VPN) configurations it is used to describe the peer gateway so that Cloud Router and HA VPN tunnels can be created and managed declaratively. Each external gateway resource records the device’s public IP addresses and routing style, allowing Google Cloud to treat the remote endpoint as a first-class object and to validate or reference it from other VPN and network resources. -For full details, see the official Google documentation: https://cloud.google.com/sdk/gcloud/reference/compute/external-vpn-gateways +A **Compute External VPN Gateway** is a Google Cloud resource that represents a customer-managed VPN appliance that resides outside of Google’s network (for example, in an on-premises data centre or another cloud). By defining one or more external interface IP addresses and an associated redundancy type, it tells Cloud VPN (HA VPN or Classic VPN) where to terminate its tunnels. In other words, the resource is the “remote end” of a Cloud VPN connection, allowing Google Cloud to establish secure IPSec tunnels to external infrastructure. +For further details, see the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/externalVpnGateways **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md index 074bee1a..e970ca4b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-firewall.md @@ -3,7 +3,8 @@ title: GCP Compute Firewall sidebar_label: gcp-compute-firewall --- -A GCP Compute Firewall is a set of rules that control incoming and outgoing network traffic to Virtual Machine (VM) instances within a Google Cloud Virtual Private Cloud (VPC) network. Each rule defines whether specific connections (identified by protocol, port, source, destination and direction) are allowed or denied, thereby providing network-level security and segmentation for workloads running on Google Cloud. +A Google Cloud VPC firewall rule controls inbound and outbound traffic to and from the virtual machine (VM) instances that are attached to a particular VPC network. Each rule specifies a direction, priority, action (allow or deny), protocol and port list, and a target (network tags or service accounts). Rules are stateful and are evaluated before traffic reaches any instance, allowing you to centrally enforce network security policy across your workloads. +Official documentation: https://cloud.google.com/vpc/docs/firewalls **Terrafrom Mappings:** @@ -13,14 +14,18 @@ A GCP Compute Firewall is a set of rules that control incoming and outgoing netw - `GET`: Get a gcp-compute-firewall by its "name" - `LIST`: List all gcp-compute-firewall -- ~~`SEARCH`~~ +- `SEARCH`: Search for firewalls by network tag. The query is a plain network tag name. ## Possible Links +### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) + +Firewall rules apply to VM instances that match their target criteria (network tags or service accounts). Therefore, an instance is linked to the firewall rules that currently govern the traffic it may send or receive. + ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -A firewall rule is always created inside a single VPC network; that network determines the scope within which the rule is evaluated. Overmind therefore links a gcp-compute-firewall to the gcp-compute-network that owns it. +Every firewall rule is created within a specific VPC network. The rule only affects resources that are attached to that network, so it is linked to its parent network resource. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Firewall rules can specify target or source service accounts, allowing traffic to be filtered based on the workload identity running on a VM. Overmind links the firewall rule to any gcp-iam-service-account referenced in its `target_service_accounts` or `source_service_accounts` fields. +Firewall rules can target VM instances by the service account they are running as. When a rule uses the `target_service_accounts` field, it is related to those IAM service accounts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md index 9d622a78..39d030b3 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-forwarding-rule.md @@ -3,8 +3,8 @@ title: GCP Compute Forwarding Rule sidebar_label: gcp-compute-forwarding-rule --- -A GCP Compute Forwarding Rule defines how incoming packets are directed within Google Cloud. It associates an IP address, protocol and port range with a specific target—such as a load-balancer target proxy, VPN gateway, or, for certain internal load-balancer variants, a backend service—so that traffic is forwarded correctly. Forwarding rules can be global or regional and, when internal, are bound to a particular VPC network (and optionally a subnetwork) to control the scope of traffic distribution. -For full details see the official documentation: https://docs.cloud.google.com/load-balancing/docs/forwarding-rule-concepts +A GCP Compute Forwarding Rule defines how incoming packets are handled within Google Cloud. It binds an IP address, protocol and (optionally) port range to a specific target resource such as a backend service, target proxy or target pool. Forwarding rules underpin both external and internal load-balancing solutions and can be either regional or global in scope. +For full details see the official documentation: https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts. **Terrafrom Mappings:** @@ -20,12 +20,32 @@ For full details see the official documentation: https://docs.cloud.google.com/l ### [`gcp-compute-backend-service`](/sources/gcp/Types/gcp-compute-backend-service) -For certain internal load balancers (e.g. Internal TCP/UDP Load Balancer), the forwarding rule points directly to a backend service. Overmind records this as a link so that any risk identified on the backend service can be surfaced when assessing the forwarding rule. +The forwarding rule may specify a backend service as its target (for example, when configuring an Internal TCP/UDP Load Balancer or External HTTP(S) Load Balancer). + +### [`gcp-compute-forwarding-rule`](/sources/gcp/Types/gcp-compute-forwarding-rule) + +This represents the same forwarding-rule resource; Overmind links to it so that self-references or associations between global and regional rules can be tracked. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -An internal forwarding rule is created inside a specific VPC network; the rule determines how traffic is routed within that network. Linking the forwarding rule to its VPC allows Overmind to trace network-level misconfigurations that could affect traffic flow. +For internal forwarding rules, the rule is created inside a specific VPC network; the link identifies that parent network. + +### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) + +If the rule’s IP address is allocated from a delegated public prefix, it will be linked to that prefix to show the allocation source. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -When a regional internal forwarding rule is restricted to a particular subnetwork, the subnetwork is explicitly referenced. This link lets Overmind evaluate subnet-level controls (such as secondary ranges and IAM bindings) in the context of the forwarding rule’s traffic path. +Internal forwarding rules also reference the subnetwork from which their internal IP address is drawn. + +### [`gcp-compute-target-http-proxy`](/sources/gcp/Types/gcp-compute-target-http-proxy) + +External HTTP Load Balancer forwarding rules target an HTTP proxy, so the rule links to the relevant `target-http-proxy` resource. + +### [`gcp-compute-target-https-proxy`](/sources/gcp/Types/gcp-compute-target-https-proxy) + +External HTTPS Load Balancer forwarding rules target an HTTPS proxy; this link identifies that proxy. + +### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) + +Legacy Network Load Balancer forwarding rules can point directly to a target pool; the link shows which pool receives the traffic. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md index 85788cc6..ffbe9cb8 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-address.md @@ -3,8 +3,8 @@ title: GCP Compute Global Address sidebar_label: gcp-compute-global-address --- -A Compute Global Address is a static, reserved IP address that is accessible from any Google Cloud region. It can be either external (public) or internal, and is typically used by globally distributed resources such as HTTP(S) load balancers, Cloud Run services, or global internal load balancers. Once reserved, the address can be bound to forwarding rules or other network endpoints, ensuring that the same IP is advertised worldwide. -For full details, see the official documentation: https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address#global_addresses +A **Compute Global Address** in Google Cloud Platform is a statically-reserved IP address that is reachable from, or usable across, all regions. It can be external (used, for example, by a global HTTP(S) load balancer) or internal (used by regional resources that require a routable, private global IP). Reserving the address ensures it does not change while it is in use, and allows it to be assigned to resources at creation time or later. +Official documentation: https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address **Terrafrom Mappings:** @@ -20,4 +20,12 @@ For full details, see the official documentation: https://cloud.google.com/compu ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Global internal addresses must be created within a specific VPC network, and the `network` attribute on the address points to that VPC. Overmind therefore links a gcp-compute-global-address to the corresponding gcp-compute-network so that you can understand which network context the IP address belongs to and assess any related risks. +A global address may be bound to a specific VPC network when it is reserved as an internal global IP. Overmind links the address to the `gcp-compute-network` so you can see in which network the address is routable and assess overlapping CIDR or routing risks. + +### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) + +If the address is carved out of a public delegated prefix that your project controls, Overmind links it to that `gcp-compute-public-delegated-prefix` to show the parent block and enable checks for exhaustion or mis-allocation. + +### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) + +For internal global addresses that are further scoped to a particular subnetwork, Overmind establishes a link to the `gcp-compute-subnetwork` so you can trace which subnet’s routing table and firewall rules apply to traffic destined for the address. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md index fae8d4a0..4f56abf5 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-global-forwarding-rule.md @@ -3,8 +3,7 @@ title: GCP Compute Global Forwarding Rule sidebar_label: gcp-compute-global-forwarding-rule --- -A Google Compute Engine **Global Forwarding Rule** represents the externally-visible IP address and port(s) that receive traffic for a global load balancer. It defines where packets that enter on a particular protocol/port combination should be sent, pointing them at a target proxy (for HTTP(S), SSL or TCP Proxy load balancers) or target VPN gateway. In the case of Internal Global Load Balancing it may also specify the VPC network and subnetwork that own the virtual IP address. In short, the forwarding rule is the public (or internal) entry-point that maps client traffic to the load balancer’s control plane. -Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules +A Google Cloud Compute Global Forwarding Rule defines a single anycast virtual IP address that routes incoming traffic at the global level to a specified target (such as an HTTP(S) proxy, SSL proxy or TCP proxy) or, for internal load balancing, directly to a backend service. It is the entry-point resource for most external HTTP(S) and proxy load balancers and for internal global load balancers. For full details see the Google Cloud documentation: https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts **Terrafrom Mappings:** @@ -20,12 +19,16 @@ Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/ ### [`gcp-compute-backend-service`](/sources/gcp/Types/gcp-compute-backend-service) -A global forwarding rule ultimately delivers traffic to one or more backend services via a chain of resources (target proxy → URL map → backend service). Overmind surfaces this indirect relationship so that you can trace the path from the exposed IP address all the way to the workloads that will handle the request. +When the forwarding rule is created for an internal global load balancer, it references a backend service directly; the rule’s traffic is delivered to the backends listed in that service. Analysing this link lets Overmind trace traffic paths from the VIP to the actual instances or endpoints. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -When the forwarding rule is used for internal global load balancing, it contains a `network` field that points to the VPC network that owns the virtual IP address. This link allows Overmind to show which network the listener lives in and what other resources share that network. +Internal global forwarding rules must be attached to a specific VPC network. Linking to the network resource reveals which project-wide connectivity domain the VIP belongs to and helps surface risks such as unintended exposure to peered networks. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Similar to the network link, internal forwarding rules may reference a specific `subnetwork`. Overmind records this connection so you can identify the exact IP range and region in which the internal load balancer’s virtual IP is allocated. +If the forwarding rule is internal, it is scoped to a particular subnetwork. Understanding this relationship identifies the IP range in which the virtual IP lives and highlights segmentation or overlapping-CIDR issues. + +### [`gcp-compute-target-http-proxy`](/sources/gcp/Types/gcp-compute-target-http-proxy) + +For external HTTP(S), SSL or TCP proxy load balancers, the forwarding rule points to a target proxy resource. The proxy terminates the client connection before forwarding to backend services. Linking these resources enables Overmind to trace configuration chains and detect misconfigurations such as SSL policy mismatches or missing backends. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md index 61efaaff..b94f59de 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-health-check.md @@ -3,12 +3,13 @@ title: GCP Compute Health Check sidebar_label: gcp-compute-health-check --- -A **GCP Compute Health Check** is a monitored probe that periodically tests the reachability and responsiveness of Google Cloud resources—such as VM instances, managed instance groups, or back-ends behind a load balancer—and reports their health status. These checks allow Google Cloud’s load balancers and auto-healing mechanisms to route traffic only to healthy instances, improving service reliability and availability. You can configure different protocols (HTTP, HTTPS, TCP, SSL, or HTTP/2), thresholds, and time-outs to suit your workload’s requirements. -For full details, see the official documentation: https://cloud.google.com/load-balancing/docs/health-checks +A GCP Compute Health Check is a Google Cloud resource that periodically probes virtual machine instances or endpoints to decide whether they are fit to receive production traffic. The check runs from the Google-managed control plane using protocols such as TCP, SSL, HTTP(S), HTTP/2 or gRPC, and compares the response to thresholds you configure (e.g. response code, timeout, healthy/unhealthy counts). Backend services, target pools and managed instance groups use the resulting health status to route requests only to healthy instances and to trigger autoscaling or fail-over behaviour. Health checks come in global and regional flavours, aligning with global and regional load balancers respectively. +Official documentation: https://cloud.google.com/load-balancing/docs/health-checks **Terrafrom Mappings:** - `google_compute_health_check.name` +- `google_compute_region_health_check.name` ## Supported Methods diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md index 095d2e51..987b860c 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-http-health-check.md @@ -3,9 +3,8 @@ title: GCP Compute Http Health Check sidebar_label: gcp-compute-http-health-check --- -A **Google Cloud Compute HTTP Health Check** is a legacy, regional health-check resource that periodically issues HTTP `GET` requests to a specified path on your instances or load-balanced back-ends. If an instance responds with an acceptable status code (e.g. `200–299`) within the configured timeout for the required number of consecutive probes, it is marked healthy; otherwise, it is marked unhealthy. Load balancers and target pools use this signal to route traffic only to healthy instances, helping to maintain application availability. -Google now recommends the newer, unified _Health Check_ resource for most use-cases, but HTTP Health Checks remain fully supported and are still encountered in many estates. -For full details, see the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/httpHealthChecks +A GCP Compute HTTP Health Check is a globally scoped resource that periodically sends HTTP requests to a specified port and path on your instances or endpoints to verify that they are responding correctly. Load balancers, managed instance groups and other Google Cloud services use the results of these checks to decide whether traffic should be routed to a given backend. Each check can be customised with parameters such as the request path, host header, check interval, timeout, and healthy/unhealthy thresholds. +For further details see the official documentation: https://cloud.google.com/compute/docs/load-balancing/health-checks#http-health-checks **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md index 77040607..9ccc9158 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-image.md @@ -3,8 +3,8 @@ title: GCP Compute Image sidebar_label: gcp-compute-image --- -A GCP Compute Image represents a bootable disk image in Google Compute Engine. Images capture the contents of a virtual machine’s root volume (operating system, installed packages, configuration files, etc.) and act as the template from which new persistent disks and VM instances are created. Teams use images to standardise the base operating-system layer across their fleet, speed up instance provisioning, and ensure consistency between environments. Modifying or deleting an image can therefore have an immediate impact on every workload that references it, including instance templates and managed instance groups. -Official documentation: https://cloud.google.com/compute/docs/images +A Google Cloud Compute Image is a read-only template that contains a boot disk configuration (including the operating system and any installed software) which can be used to create new persistent disks or VM instances. Images may be publicly provided by Google, published by third-party vendors, or built privately within your own project. They support features such as image families, deprecation, and customer-managed encryption keys (CMEK). +For full details see the official documentation: https://cloud.google.com/compute/docs/images **Terrafrom Mappings:** @@ -14,4 +14,34 @@ Official documentation: https://cloud.google.com/compute/docs/images - `GET`: Get GCP Compute Image by "gcp-compute-image-name" - `LIST`: List all GCP Compute Image items -- ~~`SEARCH`~~ +- `SEARCH`: Search for GCP Compute Image by "gcp-compute-image-family" + +## Possible Links + +### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) + +If the image is protected with a customer-managed encryption key (CMEK), Overmind links the image to the Cloud KMS Crypto Key that encrypts its contents. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +When CMEK protection specifies an explicit key version, the image is linked to that exact Crypto Key Version so you can trace roll-overs or revocations that might affect instance bootability. + +### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) + +Images can be created from existing persistent disks, and new disks can be created from an image. Overmind therefore links images to the disks that serve as their source or to the disks that have been instantiated from them. + +### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) + +Images belonging to the same image family or derived from one another (for example, when rolling a new version) are cross-linked so you can understand upgrade paths and deprecations within a family. + +### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) + +An image may be built from one or more snapshots of a disk, and snapshots can be exported from an image. Overmind links images to the snapshots that contributed to, or were generated from, them. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Access to create, deprecate or use an image is controlled through IAM roles. Overmind shows the service accounts that have permissions on the image, helping you assess who can launch VMs from it. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +During import or export operations, raw disk files are stored in Cloud Storage. Overmind links an image to the Storage Buckets that hosted its source or export objects, enabling you to trace data residency and clean-up unused artefacts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md index bbe86356..c9e3bf24 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group-manager.md @@ -3,7 +3,8 @@ title: GCP Compute Instance Group Manager sidebar_label: gcp-compute-instance-group-manager --- -A Google Cloud Compute Instance Group Manager is the control plane object that creates and maintains a Managed Instance Group (MIG). It provisions Virtual Machine (VM) instances from an Instance Template, keeps their number in line with the desired size, and automatically repairs or replaces unhealthy VMs to ensure uniformity across the group. In effect, it is the resource that makes a MIG self-healing and declarative. For full details see the official documentation: https://docs.cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers. +A Compute Instance Group Manager (IGM) is the control plane object for a Managed Instance Group in Google Cloud Platform. It is responsible for creating, deleting, and maintaining a homogeneous fleet of Compute Engine virtual machines according to a declarative configuration such as target size, instance template and update policy. Because the manager continually reconciles the group’s actual state with the desired state, it underpins features like rolling updates, auto-healing and autoscaling. +Official documentation: https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances **Terrafrom Mappings:** @@ -19,16 +20,20 @@ A Google Cloud Compute Instance Group Manager is the control plane object that c ### [`gcp-compute-autoscaler`](/sources/gcp/Types/gcp-compute-autoscaler) -An autoscaler resource can reference a particular Instance Group Manager and adjust the group’s target size according to load metrics. When a link exists, Overmind shows which autoscaler is controlling the scaling behaviour of the MIG managed by this Instance Group Manager. +An Autoscaler resource can target a Managed Instance Group via its Instance Group Manager, dynamically increasing or decreasing the group’s size based on utilisation metrics or schedules. + +### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) + +Within an auto-healing policy the Instance Group Manager references one or more Health Check resources to decide when individual instances should be recreated. ### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) -The Instance Group Manager owns and controls a specific managed instance group. This link reveals the underlying Instance Group object that represents the collection of VMs created by the manager. +The Instance Group Manager encapsulates and manages an underlying (managed) Instance Group resource that represents the actual collection of VM instances. ### [`gcp-compute-instance-template`](/sources/gcp/Types/gcp-compute-instance-template) -Every Instance Group Manager specifies an Instance Template that defines the configuration of the VMs it will create (machine type, disks, metadata, etc.). Overmind links the manager to its template so you can trace configuration drift risks back to the source template. +The manager uses an Instance Template to define the configuration (machine type, disks, metadata, etc.) of every VM it creates in the group. ### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) -When using legacy network load balancers, an Instance Group Manager may add its instances to one or more Target Pools. This link identifies the load-balancing back-ends that depend on the instances generated by the manager, helping to surface blast-radius considerations for networking changes. +For legacy network load balancing, an Instance Group Manager can be configured to automatically add or remove its instances from a Target Pool, enabling them to receive traffic from a forwarding rule. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md index ee62276d..5b45fd96 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-group.md @@ -3,8 +3,8 @@ title: GCP Compute Instance Group sidebar_label: gcp-compute-instance-group --- -A Google Cloud Compute Instance Group is a logical collection of virtual machine (VM) instances that you manage as a single entity. Instance groups can be either managed (where the group is tied to an instance template and can perform auto-healing, autoscaling and rolling updates) or unmanaged (a simple grouping of individually created VMs). They are commonly used to distribute traffic across identical instances and to simplify operational tasks such as scaling and updates. -For an in-depth explanation, refer to the official documentation: https://cloud.google.com/compute/docs/instance-groups +A Google Cloud Compute Instance Group is a logical collection of Virtual Machine (VM) instances running on Google Compute Engine that are treated as a single entity for deployment, scaling and load-balancing purposes. Instance groups can be managed (all VMs created from a common template and automatically kept in the desired size/state) or unmanaged (a user-assembled set of individual VMs). They are commonly used behind load balancers to provide highly available, horizontally scalable services. +For full details see the official Google Cloud documentation: https://cloud.google.com/compute/docs/instance-groups **Terrafrom Mappings:** @@ -20,8 +20,8 @@ For an in-depth explanation, refer to the official documentation: https://cloud. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Each VM contained in the instance group is attached to a specific VPC network. Consequently, the instance group inherits a dependency on that GCP Compute Network; changes to the network (e.g., firewall rules, routing) can directly impact the availability or behaviour of all instances in the group. +Every VM in an Instance Group must be attached to a VPC network. Overmind therefore links a Compute Instance Group to the Compute Network that provides its underlying connectivity, enabling you to trace how network-level policies or mis-configurations might affect the availability of the workload hosted by the group. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Within its parent VPC network, every instance is placed in a particular subnetwork. Therefore, the instance group is transitively linked to the associated GCP Compute Subnetwork. Subnetwork configuration—such as IP ranges or regional placement—affects how the grouped instances communicate internally and with external resources. +Within a given VPC network, all VMs in the Instance Group reside in a specific subnetwork. Overmind links the Instance Group to that Subnetwork so you can understand IP address allocation, regional placement and any subnet-specific firewall rules that could impact the instances. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md index fd742bd5..a109bd06 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance-template.md @@ -3,7 +3,7 @@ title: GCP Compute Instance Template sidebar_label: gcp-compute-instance-template --- -A Compute Engine instance template is a reusable blueprint that captures almost all of the configuration needed to launch a Virtual Machine (VM) instance in Google Cloud: machine type, boot image, attached disks, network interfaces, metadata, service accounts, shielded-VM options and more. Templates allow you to create individual VM instances consistently or serve as the basis for managed instance groups that can scale automatically. +A Google Cloud Compute Instance Template is a reusable description of the properties required to create a virtual machine (VM) instance. It encapsulates details such as machine type, boot image, disks, network interfaces, metadata, tags, and service-account settings. Once defined, the template can be used by users, managed instance groups, autoscalers, or other automation to create identically configured VMs at scale. Official documentation: https://cloud.google.com/compute/docs/instance-templates **Terrafrom Mappings:** @@ -14,50 +14,58 @@ Official documentation: https://cloud.google.com/compute/docs/instance-templates - `GET`: Get a gcp-compute-instance-template by its "name" - `LIST`: List all gcp-compute-instance-template -- ~~`SEARCH`~~ +- `SEARCH`: Search for instance templates by network tag. The query is a plain network tag name. ## Possible Links ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If customer-managed encryption keys (CMEK) are specified in the template, they reference a Cloud KMS crypto-key that will be used to encrypt the boot or data disks of any VM created from the template. +An instance template can reference a customer-managed encryption key (CMEK) from Cloud KMS to encrypt the persistent disks defined in the template. ### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) -The template can define additional persistent disks to be auto-created and attached, or it can attach existing disks in read-only or read-write mode. +Boot and additional persistent disks are specified inside the template. Any disk image or snapshot expanded into an actual persistent disk at instance-creation time will appear as a linked compute-disk resource. + +### [`gcp-compute-firewall`](/sources/gcp/Types/gcp-compute-firewall) + +The network tags set in the template are used by VMs launched from it. Firewall rules that target those tags therefore become effective for every instance derived from the template. ### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) -The boot disk section of the template points to a Compute Engine image that is cloned each time a new VM is launched. +The template’s boot disk references a specific compute image (public, custom, or shared). This image is the source from which the VM’s root filesystem is created. ### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) -When a user or an autoscaler instantiates the template, it materialises as one or more Compute Engine instances that inherit every property defined in the template. +When a VM is launched using this template—either manually or by a managed instance group—the resulting resource is a compute-instance that maintains a provenance link back to the template. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every network interface defined in the template must belong to a VPC network, so the template contains links to the relevant network resources. +Each network interface declared in the template must point to a VPC network, establishing the connectivity context for all future instances based on the template. ### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) -If the template targets sole-tenant nodes, it can specify a node group affinity so that all created VMs land on a particular node group. +If node affinity is configured in the template, instances created from it will attempt to schedule onto the specified sole-tenant node group. ### [`gcp-compute-reservation`](/sources/gcp/Types/gcp-compute-reservation) -Templates may be configured to consume capacity from an existing reservation, ensuring launched VMs fit within reserved resources. +A template can include reservation affinity, causing newly created VMs to consume capacity from a specific Compute Engine reservation. + +### [`gcp-compute-route`](/sources/gcp/Types/gcp-compute-route) + +Although routes are defined at the network level, all VMs derived from the template inherit those routes through their attached network, so routing behaviour is indirectly influenced by the template. ### [`gcp-compute-security-policy`](/sources/gcp/Types/gcp-compute-security-policy) -Tags or service-account settings in the template can cause the resulting instances to match Cloud Armor security policies applied at the project or network level. +If instances launched from the template are later attached to backend services that use Cloud Armor security policies, their traffic will be evaluated against those policies; tracing the link helps assess exposure. ### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) -Instead of an image, the template can build new disks from a snapshot, linking the template to that snapshot resource. +The template may specify a source snapshot instead of an image for one or more disks, resulting in disks that are restored from those snapshots at VM creation time. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -For networks that are in auto or custom subnet mode, the template points to the exact subnetwork each NIC should join. +For each network interface, the template can identify a specific subnetwork, dictating the IP range from which the instance will draw its primary internal address. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -The template includes a service account and its OAuth scopes; the created VMs will assume that service account’s identity and permissions. +A service account can be attached in the template so that every VM started from it runs with the same IAM identity and associated OAuth scopes. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md index ffa3f837..a945f36a 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instance.md @@ -3,7 +3,7 @@ title: GCP Compute Instance sidebar_label: gcp-compute-instance --- -A GCP Compute Instance is a virtual machine (VM) hosted on Google Cloud’s Compute Engine service. It provides configurable CPU, memory, storage and operating-system options, enabling you to run anything from small test services to large-scale production workloads. Instances can be created from public images or custom images, can have one or more network interfaces, and can attach multiple persistent or ephemeral disks. For full details see the official documentation: https://cloud.google.com/compute/docs/instances +A Google Cloud Compute Engine instance is a virtual machine (VM) that runs on Google’s infrastructure. It provides configurable CPU, memory, disk and network resources so you can run workloads in a scalable, on-demand manner. For full details see the official documentation: https://cloud.google.com/compute/docs/instances. **Terrafrom Mappings:** @@ -13,18 +13,54 @@ A GCP Compute Instance is a virtual machine (VM) hosted on Google Cloud’s Comp - `GET`: Get GCP Compute Instance by "gcp-compute-instance-name" - `LIST`: List all GCP Compute Instance items -- ~~`SEARCH`~~ +- `SEARCH`: Search for GCP Compute Instance by "gcp-compute-instance-networkTag" ## Possible Links +### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) + +If the instance’s boot or data disks are encrypted with customer-managed encryption keys (CMEK), it references a Cloud KMS crypto key. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +A specific version of the KMS key may be recorded when CMEK encryption is enabled on the instance’s disks. + ### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) -A Compute Instance normally boots from and/or mounts one or more persistent disks. Overmind links an instance to every `gcp-compute-disk` that is attached to it so you can assess the impact of changes to those disks on the VM. +Boot and additional persistent disks are attached to the instance; these disks back the VM’s storage. + +### [`gcp-compute-firewall`](/sources/gcp/Types/gcp-compute-firewall) + +Firewall rules that target the instance’s network tags or service account control inbound and outbound traffic for the VM. + +### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) + +The instance’s boot disk is created from a Compute Engine image, capturing the operating system and initial state. + +### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) + +When the VM is part of a managed instance group (MIG), the group manager is responsible for creating, deleting and updating the instance. + +### [`gcp-compute-instance-template`](/sources/gcp/Types/gcp-compute-instance-template) + +Instances launched via a template inherit machine type, disks, metadata and network settings defined in that template. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every network interface on a Compute Instance is connected to a VPC network. Overmind records this relationship to show how altering a `gcp-compute-network` (for example, changing routing or firewall rules) could affect the instance’s connectivity. +Every network interface on the instance is connected to a VPC network, determining the VM’s reachable address space. + +### [`gcp-compute-route`](/sources/gcp/Types/gcp-compute-route) + +Routes in the attached VPC network dictate how the instance’s traffic is forwarded; some routes may apply only to instances with specific tags. + +### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) + +Snapshots can be taken from the instance’s persistent disks for backup or cloning purposes, creating a link between the VM and its snapshots. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Within a VPC network, an interface resides in a specific subnetwork. Overmind links the instance to its `gcp-compute-subnetwork` so you can evaluate risks related to IP ranges, regional availability or subnet-level security policies that might influence the VM. +Each network interface is placed within a subnetwork, assigning the instance its internal IP range and regional scope. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +An optional service account is attached to the instance, granting it IAM-scoped credentials to access Google APIs. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md index b26e582d..82699609 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-instant-snapshot.md @@ -3,8 +3,8 @@ title: GCP Compute Instant Snapshot sidebar_label: gcp-compute-instant-snapshot --- -A GCP Compute Instant Snapshot is a point-in-time, crash-consistent copy of a persistent disk that is captured almost immediately, irrespective of the size of the disk. It is stored in the same region as the source disk and is intended for rapid backup, testing, or disaster-recovery scenarios where minimal creation time is essential. Instant snapshots are ephemeral by design (they are automatically deleted after seven days unless converted to a regular snapshot) and incur lower network egress because the data never leaves the region. -For full details, refer to the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/instantSnapshots +A GCP Compute Instant Snapshot is a point-in-time, crash-consistent copy of a Compute Engine persistent disk that is created almost instantaneously, permitting rapid backup, cloning, and disaster-recovery workflows. Instant snapshots can be used to restore a disk to the exact state it was in when the snapshot was taken or to create new disks that replicate that state. They differ from traditional snapshots primarily in the speed at which they are taken and restored. +Official documentation: https://cloud.google.com/compute/docs/disks/instant-snapshots **Terrafrom Mappings:** @@ -20,4 +20,4 @@ For full details, refer to the official documentation: https://cloud.google.com/ ### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) -An Instant Snapshot is created from a persistent disk. The snapshot’s `source_disk` field references the original `gcp-compute-disk`, and any restore or promotion operation will require access to that underlying disk or its region. +An instant snapshot is always sourced from an existing Compute Engine persistent disk. Therefore, each `gcp-compute-instant-snapshot` has a direct parent–child relationship with the `gcp-compute-disk` it captures, and Overmind links the snapshot back to the originating disk to surface dependency and recovery paths. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md index a8df0527..5cb79bfb 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-machine-image.md @@ -3,8 +3,7 @@ title: GCP Compute Machine Image sidebar_label: gcp-compute-machine-image --- -A Google Cloud Compute Engine **Machine Image** is a first-class resource that stores all the information required to recreate one or more identical virtual machine instances, including boot and data disks, instance metadata, machine type, service accounts, and network interface definitions. Machine images make it easy to version-control complete VM templates and roll them out across projects or organisations. -Official documentation: https://cloud.google.com/compute/docs/machine-images +A Google Cloud Compute Machine Image is a first-class resource that captures the full state of a virtual machine at a point in time, including all attached disks, metadata, instance properties, service-accounts, and network configuration. It can be used to recreate identical VMs quickly or share a golden template across projects and organisations. See the official documentation for full details: https://cloud.google.com/compute/docs/machine-images **Terrafrom Mappings:** @@ -18,18 +17,34 @@ Official documentation: https://cloud.google.com/compute/docs/machine-images ## Possible Links +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +A machine image may be protected with customer-managed encryption keys (CMEK); when this option is used it references the specific Cloud KMS Crypto Key Version that encrypts the image data. + ### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) -The machine image contains snapshots of every persistent disk that was attached to the source VM. Linking a machine image to its underlying disks allows Overmind to surface risks such as outdated disk encryption keys or insufficient replication settings. +The boot disk and any additional data disks attached to the source instance are incorporated into the machine image. When a new instance is created from the machine image, new persistent disks are instantiated from these definitions. + +### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) + +Within a machine image the boot disk is ultimately based on a Compute Image. Thus the machine image indirectly depends on, and records, the image that was used to build the source VM. ### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) -A machine image is normally created from, or used to instantiate, Compute Engine instances. Tracking this relationship lets you see which VMs were the origin of the image and which new VMs will inherit its configuration or vulnerabilities. +A machine image is created from a source Compute Instance and can in turn be used to launch new instances that replicate the captured configuration. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Network interface settings embedded in the machine image reference specific VPC networks. Connecting the image to those networks helps identify issues like deprecated network configurations that new VMs would inherit. +Network interface settings, including the VPC network IDs, are stored in the machine image so that any VM instantiated from it can attach to the same or equivalent networks. + +### [`gcp-compute-snapshot`](/sources/gcp/Types/gcp-compute-snapshot) + +Internally, Google Cloud may use snapshots of the instance’s disks when building the machine image. Conversely, users can export disks from a machine image as individual snapshots. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Each network interface in the machine image also specifies a subnetwork. Mapping this linkage highlights potential problems such as subnet IP exhaustion or mismatched IAM policies that could affect any instance launched from the image. +The machine image stores the exact subnetwork configuration of each NIC, allowing recreated VMs to provision themselves in the same subnetworks. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Service accounts attached to the source instance are recorded in the machine image; any VM launched from the image inherits those service account bindings unless overridden. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md index 4377abfc..818ed55f 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network-endpoint-group.md @@ -3,7 +3,7 @@ title: GCP Compute Network Endpoint Group sidebar_label: gcp-compute-network-endpoint-group --- -A Google Cloud Compute Network Endpoint Group (NEG) is a collection of network endpoints—VM NICs, IP and port pairs, or fully-managed serverless targets such as Cloud Run and Cloud Functions—that you treat as a single backend for Google Cloud Load Balancing. By grouping endpoints into a NEG you can precisely steer traffic, perform health-checking, and scale back-end capacity without exposing individual resources. See the official documentation for full details: https://cloud.google.com/load-balancing/docs/negs/. +A Google Cloud Platform Compute Network Endpoint Group (NEG) is a collection of network endpoints—such as VM NICs, container pods, Cloud Run services, or Cloud Functions—that can be treated as a single backend target by Load Balancing and Service Directory. NEGs give fine-grained control over which exact endpoints receive traffic and allow serverless or hybrid back-ends to participate in layer-4/7 load balancing. See the official documentation for full details: https://cloud.google.com/load-balancing/docs/negs. **Terrafrom Mappings:** @@ -19,16 +19,16 @@ A Google Cloud Compute Network Endpoint Group (NEG) is a collection of network e ### [`gcp-cloud-functions-function`](/sources/gcp/Types/gcp-cloud-functions-function) -Serverless NEGs can reference a Cloud Functions function as their target, allowing the function to serve as a backend to an HTTP(S) load balancer. Overmind links a NEG to the Cloud Functions function it fronts. +A serverless NEG can reference a specific Cloud Function. Overmind therefore links the NEG to the underlying `gcp-cloud-functions-function` it represents, showing which function will receive traffic through the load balancer. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -A VM-based or hybrid NEG is created inside a specific VPC network; all its endpoints must belong to that network. Overmind therefore relates the NEG to the corresponding `gcp-compute-network`. +Zonal and regional NEGs are created inside a particular VPC network. The link indicates the network context in which the endpoints exist, helping to surface routing and firewall considerations. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -For regional VM NEGs, each endpoint is an interface on a VM residing in a particular subnetwork. Overmind surfaces this dependency by linking the NEG to each associated `gcp-compute-subnetwork`. +When a NEG is scoped to a subnetwork (for example for VM or GKE pod endpoints), Overmind links it to that subnetwork so you can trace how traffic enters specific IP ranges. ### [`gcp-run-service`](/sources/gcp/Types/gcp-run-service) -When a Cloud Run service is exposed through an external HTTP(S) load balancer, Google automatically creates a serverless NEG representing that service. Overmind links the NEG back to its originating `gcp-run-service`. +Serverless NEGs can point to Cloud Run services. This link shows which `gcp-run-service` is exposed through the NEG and subsequently through any HTTP(S) load balancer. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md index 368d7337..64c63197 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-network.md @@ -3,8 +3,7 @@ title: GCP Compute Network sidebar_label: gcp-compute-network --- -A Google Cloud VPC (Virtual Private Cloud) network is a global, logically-isolated network that spans all regions within a Google Cloud project. It defines the IP address space, routing tables, firewall rules and connectivity options (for example, VPN, Cloud Interconnect and peering) for the resources that are attached to it. Each VPC network can contain one or more regional subnetworks that allocate IP addresses to individual resources. -For a full description see the official Google Cloud documentation: https://cloud.google.com/vpc/docs/vpc. +A Google Cloud Platform (GCP) Compute Network—commonly called a Virtual Private Cloud (VPC) network—provides the fundamental isolation and IP address space in which all other networking resources (subnetworks, routes, firewall rules, VPNs, etc.) are created. It is a global resource that spans all regions in a project, allowing workloads to communicate securely inside Google’s backbone and to the internet where required. For a full description see the official documentation: https://cloud.google.com/vpc/docs/vpc **Terrafrom Mappings:** @@ -20,8 +19,8 @@ For a full description see the official Google Cloud documentation: https://clou ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -A gcp-compute-network can be linked to another gcp-compute-network when the two are connected using VPC Network Peering. This relationship allows traffic to flow privately between the two VPC networks and is modelled in Overmind as a link between the respective network resources. +A Compute Network can be peered with, or shared to, another Compute Network. Overmind records these peer or shared-VPC relationships by linking one `gcp-compute-network` item to the other(s). ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Each gcp-compute-network contains one or more gcp-compute-subnetwork resources. Overmind links a network to all of its subnetworks to show the hierarchy and to surface any risks that originate in the subnetwork configuration. +Every subnetwork is created inside exactly one VPC network. Overmind therefore links each `gcp-compute-subnetwork` back to its parent `gcp-compute-network`, and conversely shows the network’s collection of subnetworks. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md index a5493f53..68faa42b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-group.md @@ -3,8 +3,7 @@ title: GCP Compute Node Group sidebar_label: gcp-compute-node-group --- -A **Google Cloud Compute Node Group** is a logical grouping of one or more sole-tenant nodes – dedicated physical Compute Engine servers that are exclusively reserved for your projects. Node groups let you manage the life-cycle, scheduling policies and placement of these nodes as a single resource. They are typically used when you need hardware isolation for licensing or security reasons, or when you require predictable performance unaffected by noisy neighbours. Each node in the group is created from a Node Template that defines the machine type, CPU platform, labels and maintenance behaviour for the nodes. -Official documentation: https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes +A GCP Compute Node Group is a managed collection of sole-tenant nodes that are all created from the same node template. These groups allow you to provision and administer dedicated physical servers for your Compute Engine virtual machines, giving you fine-grained control over workload isolation, hardware affinity, licensing, and maintenance windows. For a detailed explanation, see the official Google Cloud documentation: https://cloud.google.com/compute/docs/nodes. **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-template.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-template.md new file mode 100644 index 00000000..b44b560a --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-node-template.md @@ -0,0 +1,23 @@ +--- +title: GCP Compute Node Template +sidebar_label: gcp-compute-node-template +--- + +A GCP Compute Node Template is a reusable description of the hardware configuration and host maintenance policies that will be applied to one or more Sole-Tenant Nodes in Google Cloud. The template specifies attributes such as CPU platform, virtual CPU count, memory, node affinity labels, and automatic restart behaviour. When you later create a Node Group, the group references a single Node Template, ensuring that every node in the group is created with an identical shape. +For a full specification of the resource, see the official Google Cloud documentation: https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes + +**Terrafrom Mappings:** + +- `google_compute_node_template.name` + +## Supported Methods + +- `GET`: Get GCP Compute Node Template by "gcp-compute-node-template-name" +- `LIST`: List all GCP Compute Node Template items +- ~~`SEARCH`~~ + +## Possible Links + +### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) + +A GCP Compute Node Group consumes a single Node Template. Overmind creates a link from a node group back to the template it references so that you can assess how changes to the template (for example, switching CPU platforms) will affect every node that belongs to the group. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md index 11c1ab06..776fa9dd 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-project.md @@ -3,8 +3,18 @@ title: GCP Compute Project sidebar_label: gcp-compute-project --- -A Google Cloud project is the top-level, logical container for every resource you create in Google Cloud. It stores metadata such as billing configuration, IAM policy, APIs that are enabled, default network settings and quotas, and it provides an isolated namespace for resource names. In the context of Compute Engine, the project determines which VM instances, disks, firewalls and other compute resources can interact, and it is the unit against which most permissions and quotas are enforced. -Official documentation: https://cloud.google.com/resource-manager/docs/creating-managing-projects +A Google Cloud Project is the fundamental organisational unit in Google Cloud Platform. It acts as a logical container for all your Google Cloud resources, identity and access management (IAM) policies, APIs, quotas and billing information. Every resource – from virtual machines to service accounts – is created in exactly one project, and project-level settings (such as audit logging, labels and network host project status) govern how those resources operate. See the official documentation for full details: https://cloud.google.com/resource-manager/docs/creating-managing-projects + +**Terrafrom Mappings:** + +- `google_project.project_id` +- `google_compute_shared_vpc_host_project.project` +- `google_compute_shared_vpc_service_project.service_project` +- `google_compute_shared_vpc_service_project.host_project` +- `google_project_iam_binding.project` +- `google_project_iam_member.project` +- `google_project_iam_policy.project` +- `google_project_iam_audit_config.project` ## Supported Methods @@ -16,8 +26,8 @@ Official documentation: https://cloud.google.com/resource-manager/docs/creating- ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every service account is created inside a single project and inherits that project’s IAM policy unless overridden. Overmind links a `gcp-compute-project` to the `gcp-iam-service-account` resources it owns so that you can trace how credentials and permissions propagate within the project. +Service accounts are identities that live inside a project. Overmind links a gcp-iam-service-account to its parent gcp-compute-project to show which project owns and governs the credentials and IAM permissions of that service account. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud Storage buckets live inside a project and consume that project’s quotas and billing account. Linking a `gcp-compute-project` to its `gcp-storage-bucket` resources lets you see which data stores are affected by changes to project-wide settings such as IAM roles or organisation policies. +Every Cloud Storage bucket is created within a specific project. Overmind establishes a link from a gcp-storage-bucket back to its gcp-compute-project so you can trace ownership, billing and IAM inheritance for the bucket. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md index 32dd6369..79c4a254 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-public-delegated-prefix.md @@ -3,8 +3,8 @@ title: GCP Compute Public Delegated Prefix sidebar_label: gcp-compute-public-delegated-prefix --- -A Google Cloud Compute Public Delegated Prefix represents a block of publicly-routable IPv4 or IPv6 addresses that Google has reserved and delegated to your project in a given region. Once the prefix exists you can further subdivide it into smaller delegated prefixes or assign individual addresses to resources such as VM instances, forwarding rules, or load balancers. Public Delegated Prefixes enable you to bring your own IP space, ensure predictable address allocation and control how traffic enters your network. -Official documentation: https://docs.cloud.google.com/vpc/docs/create-pdp +A Public Delegated Prefix is a regional IPv4 or IPv6 address range that you reserve from Google Cloud and can then subdivide and delegate to other projects, VPC networks, or Private Service Connect service attachments. It allows you to keep ownership of the parent prefix while giving consumers controlled use of sub-prefixes, simplifying address management and avoiding manual peering or routing configurations. +For full details, see the official documentation: https://cloud.google.com/vpc/docs/create-pdp **Terrafrom Mappings:** @@ -20,8 +20,8 @@ Official documentation: https://docs.cloud.google.com/vpc/docs/create-pdp ### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) -A Public Delegated Prefix is created within, and therefore belongs to, a specific Cloud Resource Manager project. The project provides billing, IAM, and quota context for the prefix. +This prefix belongs to and is created within a specific Google Cloud project; the link points from the Public Delegated Prefix to its parent project. ### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) -A Public Delegated Prefix can itself be the parent of smaller delegated prefixes; these child prefixes are represented by additional `gcp-compute-public-delegated-prefix` resources that reference the parent block. +A parent Public Delegated Prefix can be linked to child delegated sub-prefixes (or vice-versa) to represent hierarchy and inheritance of the IP space. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-backend-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-backend-service.md deleted file mode 100644 index 4c64e0d0..00000000 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-backend-service.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: GCP Compute Region Backend Service -sidebar_label: gcp-compute-region-backend-service ---- - -A **GCP Compute Region Backend Service** is a regional load-balancing resource that defines how traffic is distributed to one or more back-end targets (such as Managed Instance Groups or Network Endpoint Groups) that all live in the same Google Cloud region. The service specifies settings such as the load-balancing protocol (HTTP, HTTPS, TCP, SSL etc.), session affinity, connection draining, health checks, fail-over behaviour and (optionally) Cloud Armor security policies. Regional backend services are used by Internal HTTP(S) Load Balancers, Internal TCP/UDP Load Balancers and several other Google Cloud load-balancing products. -Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices - -**Terrafrom Mappings:** - -- `google_compute_region_backend_service.name` - -## Supported Methods - -- `GET`: Get GCP Compute Region Backend Service by "gcp-compute-region-backend-service-name" -- `LIST`: List all GCP Compute Region Backend Service items -- ~~`SEARCH`~~ - -## Possible Links - -### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) - -A region backend service lists one or more Managed Instance Groups (or unmanaged instance groups) as its back-ends; the load balancer distributes traffic across the VMs contained in these instance groups. - -### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) - -For internal load balancing, the region backend service is tied to a specific VPC network. All back-ends must reside in subnets that belong to this network and traffic from the forwarding rule is delivered through it. - -### [`gcp-compute-security-policy`](/sources/gcp/Types/gcp-compute-security-policy) - -A backend service can optionally reference a Cloud Armor security policy. When attached, that policy governs and filters incoming requests before they reach the back-end targets. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md index 345b2e62..385724cc 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-region-commitment.md @@ -3,7 +3,7 @@ title: GCP Compute Region Commitment sidebar_label: gcp-compute-region-commitment --- -A GCP Compute Region Commitment is an agreement in which you purchase a predefined amount of vCPU, memory or GPU capacity in a specific region for a fixed term (one or three years) in return for a reduced hourly price. Commitments are applied automatically to matching usage within the chosen region, helping to lower running costs while guaranteeing a baseline level of capacity. For a detailed explanation of the feature, see the official documentation: https://docs.cloud.google.com/compute/docs/reference/rest/v1/regionCommitments/list. +A Compute Region Commitment in Google Cloud Platform (GCP) represents a contractual agreement to purchase a certain amount of vCPU, memory, GPUs or local SSD capacity within a specific region for one or three years. In exchange for this up-front commitment, you receive a discounted hourly rate for the covered resources, regardless of whether the capacity is actually in use. Commitments are created per-project and per-region, and the discount automatically applies to any eligible VM instances running in that region. For full details see the official documentation: https://cloud.google.com/compute/docs/instances/signing-up-committed-use-discounts **Terrafrom Mappings:** @@ -19,4 +19,4 @@ A GCP Compute Region Commitment is an agreement in which you purchase a predefin ### [`gcp-compute-reservation`](/sources/gcp/Types/gcp-compute-reservation) -A region commitment can be consumed by one or more compute reservations in the same region. When a reservation launches virtual machine instances, the resources they use are first drawn from any applicable commitments so that the discounted commitment pricing is applied automatically. +Reservations and commitments often work together: a reservation guarantees that capacity is available, while a commitment provides a discount for that capacity. When Overmind discovers a region commitment it links it to any compute reservations in the same project and region so you can see both the cost commitment and the capacity guarantee in one place. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-regional-instance-group-manager.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-regional-instance-group-manager.md new file mode 100644 index 00000000..f291a1ce --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-regional-instance-group-manager.md @@ -0,0 +1,39 @@ +--- +title: GCP Compute Regional Instance Group Manager +sidebar_label: gcp-compute-regional-instance-group-manager +--- + +A Google Cloud Compute Regional Instance Group Manager (RIGM) is a control plane resource that creates, deletes, updates and monitors a homogeneous set of virtual machine (VM) instances that are distributed across two or more zones within the same region. By using a RIGM you gain automated rolling updates, proactive auto-healing and the ability to spread workload across zones for higher availability. +Official documentation: https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#regional + +**Terrafrom Mappings:** + +- `google_compute_region_instance_group_manager.name` + +## Supported Methods + +- `GET`: Get GCP Compute Regional Instance Group Manager by "gcp-compute-regional-instance-group-manager-name" +- `LIST`: List all GCP Compute Regional Instance Group Manager items +- ~~`SEARCH`~~ + +## Possible Links + +### [`gcp-compute-autoscaler`](/sources/gcp/Types/gcp-compute-autoscaler) + +A regional instance group manager can be linked to an Autoscaler resource that dynamically adjusts the number of VM instances in the managed group based on load, schedules or custom metrics. + +### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) + +Health checks are referenced by the RIGM to perform auto-healing; instances that fail the configured health check are recreated automatically. + +### [`gcp-compute-instance-group`](/sources/gcp/Types/gcp-compute-instance-group) + +The RIGM creates and controls a Regional Managed Instance Group. This underlying instance group is where the actual VM instances live and where traffic is balanced. + +### [`gcp-compute-instance-template`](/sources/gcp/Types/gcp-compute-instance-template) + +Every RIGM points to an Instance Template that defines the machine type, boot disk, metadata and other properties used when new VM instances are instantiated. + +### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) + +For legacy network load balancing, a RIGM can register its instances with a Target Pool so that traffic from a network load balancer is distributed across the managed instances. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md index 446cca00..1fe5fe3e 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-reservation.md @@ -3,7 +3,7 @@ title: GCP Compute Reservation sidebar_label: gcp-compute-reservation --- -A GCP Compute Reservation is a zonal reservation of Compute Engine capacity that guarantees the availability of a specific machine type (and, optionally, attached GPUs, local SSDs, etc.) for when you later launch virtual machine (VM) instances. By pre-allocating vCPU and memory resources, reservations help you avoid capacity-related scheduling failures in busy zones and can be shared across projects inside the same organisation if desired. See the official documentation for full details: https://docs.cloud.google.com/compute/docs/instances/reservations-overview. +A GCP Compute Reservation is a zonal capacity-planning resource that lets you pre-allocate Compute Engine virtual machine capacity so that it is always available when your workloads need it. By creating a reservation you can guarantee that the required number and type of vCPUs, memory and accelerators are held for your project in a particular zone, avoiding scheduling failures during peaks or regional outages. For full details, see the official Google Cloud documentation: https://cloud.google.com/compute/docs/instances/reserving-zonal-resources **Terrafrom Mappings:** @@ -19,4 +19,4 @@ A GCP Compute Reservation is a zonal reservation of Compute Engine capacity that ### [`gcp-compute-region-commitment`](/sources/gcp/Types/gcp-compute-region-commitment) -Capacity held by a reservation counts against any existing regional commitment in the same region. By linking a reservation to its corresponding `gcp-compute-region-commitment`, you can see whether the reserved resources are already discounted or whether additional commitments may be required. +Reservations guarantee capacity, while regional commitments provide sustained-use discounts for that capacity. A reservation created in a zone may be covered by, or contribute to the utilisation of, a regional commitment in the same region, so analysing the commitment alongside the reservation reveals both availability and cost-optimisation aspects of the deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md index 3cbe6af4..e4031983 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-route.md @@ -3,8 +3,8 @@ title: GCP Compute Route sidebar_label: gcp-compute-route --- -A GCP Compute Route is an entry in the routing table of a Google Cloud VPC network that determines how packets are forwarded from its subnets. Each route specifies a destination CIDR block and a next hop (for example, an instance, VPN tunnel, gateway, or peered network). Custom routes can be created to direct traffic through specific appliances, across VPNs, or towards on-premises networks, while system-generated routes provide default Internet and subnet behaviour. -See the official documentation for full details: https://cloud.google.com/vpc/docs/routes +A **GCP Compute Route** is a routing rule attached to a Google Cloud Virtual Private Cloud (VPC) network that determines how packets are forwarded from instances towards their destinations. Each route contains a destination CIDR block and a single next-hop target, such as an instance, VPN tunnel, gateway or internal load-balancer forwarding rule. Routes can be either system-generated (e.g. subnet and peering routes) or user-defined to control custom traffic flows, enforce security boundaries or implement hybrid-connectivity scenarios. +Official documentation: https://cloud.google.com/vpc/docs/routes **Terrafrom Mappings:** @@ -14,18 +14,22 @@ See the official documentation for full details: https://cloud.google.com/vpc/do - `GET`: Get a gcp-compute-route by its "name" - `LIST`: List all gcp-compute-route -- ~~`SEARCH`~~ +- `SEARCH`: Search for routes by network tag. The query is a plain network tag name. ## Possible Links +### [`gcp-compute-forwarding-rule`](/sources/gcp/Types/gcp-compute-forwarding-rule) + +A route may specify an internal TCP/UDP load balancer (ILB) forwarding rule as its `nextHopIlb`, so the route is linked to the forwarding rule that receives the traffic. + ### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) -If `next_hop_instance` is set, the route forwards matching traffic to the specified VM instance. Overmind therefore links the route to that Compute Instance, as deleting or modifying the instance will break the route. +When `nextHopInstance` is used, the route points to a specific Compute Engine instance that acts as a gateway. Instances are therefore linked as potential next hops for the route. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every route belongs to exactly one VPC network, referenced in the `network` field. The network’s routing table is the context in which the route operates, so Overmind links the route to its parent network. +Every route is created inside exactly one VPC network, referenced by the `network` field. The relationship ties the route to the network whose traffic it influences. ### [`gcp-compute-vpn-tunnel`](/sources/gcp/Types/gcp-compute-vpn-tunnel) -When `next_hop_vpn_tunnel` is used, the route sends traffic into a specific VPN tunnel. This dependency is captured by linking the route to the corresponding Compute VPN Tunnel, since changes to the tunnel affect the route’s viability. +If `nextHopVpnTunnel` is set, the route forwards matching traffic into a Cloud VPN tunnel. The route is consequently linked to the VPN tunnel resource it targets. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md index 107de7f2..fcb42c21 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-router.md @@ -3,8 +3,7 @@ title: GCP Compute Router sidebar_label: gcp-compute-router --- -A Google Cloud **Compute Router** is a regional, fully distributed control-plane resource that learns and exchanges dynamic routes between your Virtual Private Cloud (VPC) network and on-premises or partner networks. It implements the Border Gateway Protocol (BGP) on your behalf, allowing Cloud VPN tunnels and Cloud Interconnect attachments (VLANs) to advertise and receive custom routes without manual updates. Compute Routers are attached to a specific VPC network and region, but they propagate learned routes across the entire VPC through Google’s global backbone. -For a comprehensive overview, refer to the official Google Cloud documentation: https://cloud.google.com/network-connectivity/docs/router/how-to/creating-routers +A Google Cloud Compute Router is a fully distributed and managed Border Gateway Protocol (BGP) routing service that dynamically exchanges routes between your Virtual Private Cloud (VPC) network and on-premises or cloud networks connected via VPN or Cloud Interconnect. By advertising only the necessary prefixes, it enables highly available, scalable, and policy-driven traffic engineering without the need to run or maintain your own routing appliances. See the official documentation for full details: https://cloud.google.com/network-connectivity/docs/router **Terrafrom Mappings:** @@ -20,12 +19,12 @@ For a comprehensive overview, refer to the official Google Cloud documentation: ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every Compute Router is created inside a particular VPC network; the router exchanges routes on behalf of that network. Therefore, a gcp-compute-router will always have an owning gcp-compute-network. +A Compute Router is created inside a specific VPC network and advertises routes for that network; therefore it is directly linked to the gcp-compute-network resource in which it resides. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Subnets define the IP ranges that the Compute Router ultimately advertises (or learns routes for) within the VPC. Routes learned or propagated by the router directly affect traffic flowing to and from gcp-compute-subnetwork resources. +Subnets within the parent VPC network can have their routes propagated or learned via the Compute Router, especially when using dynamic routing modes; this establishes an indirect but important relationship with each gcp-compute-subnetwork. ### [`gcp-compute-vpn-tunnel`](/sources/gcp/Types/gcp-compute-vpn-tunnel) -Compute Routers terminate the BGP sessions used by Cloud VPN (HA VPN) tunnels. Each gcp-compute-vpn-tunnel can be configured to peer with a Compute Router interface, enabling dynamic route exchange between the tunnel and the VPC. +When Cloud VPN is configured in dynamic mode, the VPN tunnel relies on a Compute Router to exchange BGP routes with the peer gateway, making the tunnel dependent on, and logically linked to, the corresponding gcp-compute-vpn-tunnel resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md index e4013647..60cb8cec 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-security-policy.md @@ -3,9 +3,8 @@ title: GCP Compute Security Policy sidebar_label: gcp-compute-security-policy --- -A GCP Compute Security Policy represents a Cloud Armor security policy. It contains an ordered set of layer-7 filtering rules that allow, deny, or rate-limit traffic directed at a load balancer or backend service. By attaching a security policy you can enforce web-application-firewall (WAF) protections, mitigate DDoS attacks, and define custom match conditions—all without changing your application code. Overmind ingests these resources so you can understand how proposed changes will affect the exposure and resilience of your workloads before you deploy them. - -For full details see the official Google Cloud documentation: https://cloud.google.com/armor/docs/security-policy-concepts +A GCP Compute Security Policy represents a Google Cloud Armor security policy that you configure to protect your applications and services from malicious or unwanted traffic. Each policy is made up of an ordered list of rules that allow, deny, or rate-limit requests based on layer-3/4 characteristics or custom layer-7 expressions. Security policies can be associated with external Application Load Balancers, Cloud CDN, and other HTTP(S)-based backend services, enabling centralised, declarative control over inbound traffic behaviour. +For full details, see the official Google documentation: https://cloud.google.com/compute/docs/reference/rest/v1/securityPolicies **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md index 2dba8fa9..bc72cc89 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-snapshot.md @@ -3,8 +3,8 @@ title: GCP Compute Snapshot sidebar_label: gcp-compute-snapshot --- -A GCP Compute Snapshot is a point-in-time, incremental backup of a Compute Engine persistent disk. Snapshots allow you to restore data following accidental deletion, corruption, or regional outage, and can also be used to create new disks in the same or a different project/region. Because snapshots are incremental, only the blocks that have changed since the last snapshot are stored, reducing cost and network egress. Snapshots can be scheduled, encrypted with customer-managed keys, and shared across projects through Cloud Storage-backed snapshot storage. -Official documentation: https://cloud.google.com/compute/docs/disks/snapshots +A **GCP Compute Snapshot** is a point-in-time, incremental backup of a Compute Engine persistent or regional disk. Snapshots can be stored in multiple regions, encrypted with customer-managed keys, and used to create new disks, thereby providing a simple mechanism for backup, disaster recovery and environment cloning. +Official documentation: https://cloud.google.com/compute/docs/disks/create-snapshots **Terrafrom Mappings:** @@ -18,10 +18,14 @@ Official documentation: https://cloud.google.com/compute/docs/disks/snapshots ## Possible Links +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +If the snapshot is encrypted with a customer-managed encryption key (CMEK), it references the specific Cloud KMS CryptoKeyVersion that holds the key material. Overmind links the snapshot to that key version so you can trace encryption dependencies and confirm key rotation policies. + ### [`gcp-compute-disk`](/sources/gcp/Types/gcp-compute-disk) -A snapshot is created from a specific persistent disk; the link lets you trace a snapshot back to the disk it protects, or discover all snapshots derived from that disk. +Every snapshot originates from a source disk. This link shows which Compute Engine disk (zonal or regional) was used to create the snapshot, letting you assess blast radius and recovery workflows. ### [`gcp-compute-instant-snapshot`](/sources/gcp/Types/gcp-compute-instant-snapshot) -An instant snapshot can later be converted into a standard snapshot, or serve as an intermediary during a snapshot operation. This link shows lineage between an instant snapshot and the resulting persistent snapshot resource. +An instant snapshot is a fast, crash-consistent capture that can later be converted into a regular snapshot. When such a conversion occurs, Overmind links the resulting standard snapshot to its originating instant snapshot, giving visibility into the lineage of your backups. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md index 350c08be..bcdd663c 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-certificate.md @@ -3,8 +3,7 @@ title: GCP Compute Ssl Certificate sidebar_label: gcp-compute-ssl-certificate --- -A GCP Compute SSL Certificate is a regional resource that stores the public and private key material required to terminate TLS for Google Cloud load balancers and proxy targets. Once created, the certificate can be attached to target HTTPS proxies (for external HTTP(S) Load Balancing) or target SSL proxies (for SSL Proxy Load Balancing) so that incoming connections can be securely encrypted in transit. Certificate data is provided by the user (self-managed) and can later be rotated or deleted as required. -For full details see the Google Cloud documentation: https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates +A **Google Compute SSL Certificate** represents an SSL certificate resource that can be attached to Google Cloud load-balancers to provide encrypted (HTTPS or SSL proxy) traffic termination. It stores the public certificate and its corresponding private key, enabling Compute Engine and Cloud Load Balancing to serve traffic securely on the specified domains. Certificates can be self-managed (you upload the PEM-encoded certificate and key) or Google-managed (Google provisions and renews them automatically). Full details are available in the official documentation: [Google Compute Engine – SSL certificates](https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates). **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md index 020ac502..f0b5c098 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-ssl-policy.md @@ -3,8 +3,8 @@ title: GCP Compute Ssl Policy sidebar_label: gcp-compute-ssl-policy --- -Google Cloud SSL policies allow you to define which TLS protocol versions and cipher suites can be used when clients negotiate secure connections with Google Cloud load balancers. By attaching an SSL policy to an HTTPS, SSL, or TCP proxy load balancer, you can enforce modern cryptographic standards, disable deprecated protocols, or maintain compatibility with legacy clients, thereby controlling the security posture of your services. Overmind can surface potential risks—such as the continued availability of weak ciphers—before you deploy. -For more information, see the official Google Cloud documentation: [SSL policies overview](https://cloud.google.com/compute/docs/reference/rest/v1/sslPolicies/get). +A Google Cloud Compute **SSL Policy** specifies the minimum TLS protocol version and the set of supported cipher suites that HTTPS or SSL-proxy load balancers are allowed to use when negotiating SSL/TLS with clients. By attaching an SSL Policy to a target HTTPS proxy or target SSL proxy, you can enforce stronger security standards, ensure compliance, and gradually deprecate outdated encryption algorithms without disrupting traffic. +For detailed information, refer to the official Google Cloud documentation: https://cloud.google.com/load-balancing/docs/ssl-policies-concepts. **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md index 00e54145..229d73bf 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-subnetwork.md @@ -3,7 +3,7 @@ title: GCP Compute Subnetwork sidebar_label: gcp-compute-subnetwork --- -A GCP Compute Subnetwork is a regional segment of a Virtual Private Cloud (VPC) network that defines an IP address range from which resources such as VM instances, GKE nodes, and internal load balancers receive their internal IP addresses. Each subnetwork is bound to a single region, can be configured for automatic or custom IP allocation, and supports features such as Private Google Access and flow logs. For full details see the official Google Cloud documentation: https://cloud.google.com/vpc/docs/subnets +A GCP Compute Subnetwork is a regional, layer-3 virtual network segment that belongs to a single Google Cloud VPC network. It defines an internal RFC 1918 IP address range (primary and optional secondary ranges) from which VM instances, containers and other resources receive their internal IPs. Within each subnetwork you can enable or disable Private Google Access, set flow-log export settings, IPv6 configurations, and control access through firewall rules inherited from the parent VPC. For a comprehensive overview refer to the official documentation: https://cloud.google.com/vpc/docs/subnets. **Terrafrom Mappings:** @@ -19,4 +19,8 @@ A GCP Compute Subnetwork is a regional segment of a Virtual Private Cloud (VPC) ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every subnetwork is a child resource of a VPC network. The `gcp-compute-network` item represents that parent VPC; a single network can contain multiple subnetworks, while each subnetwork is associated with exactly one network. +Every subnetwork is created inside exactly one VPC network. This link represents that parent–child relationship, allowing Overmind to show which VPC a particular subnetwork belongs to and, conversely, to enumerate all subnetworks within a given VPC. + +### [`gcp-compute-public-delegated-prefix`](/sources/gcp/Types/gcp-compute-public-delegated-prefix) + +A public delegated prefix can be assigned to a subnetwork so that resources inside the subnet can use public IPv4 addresses from that prefix. This link highlights which delegated prefixes are associated with, or routed through, the subnetwork, helping users trace external IP allocations and their exposure. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md index 980032c0..612ebcb2 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-http-proxy.md @@ -3,8 +3,8 @@ title: GCP Compute Target Http Proxy sidebar_label: gcp-compute-target-http-proxy --- -A Google Cloud Compute Target HTTP Proxy acts as the intermediary between a forwarding rule and your defined URL map. When an incoming request reaches the load balancer, the proxy evaluates the host and path rules in the URL map and then forwards the request to the selected backend service. In essence, it is the control point that translates external client traffic into internal service calls, supporting features such as global anycast IPs, health-checking, and intelligent request routing for high-availability web applications. -For further information, see the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpProxies +A **GCP Compute Target HTTP Proxy** routes incoming HTTP requests to the appropriate backend service based on rules defined in a URL map. It terminates the client connection, consults the associated `google_compute_url_map`, and then forwards traffic to the selected backend (for example, a backend service or serverless NEG). Target HTTP proxies are a key component of Google Cloud external HTTP(S) Load Balancing. +See the official documentation for full details: https://cloud.google.com/load-balancing/docs/target-proxies#target_http_proxy **Terrafrom Mappings:** @@ -15,3 +15,9 @@ For further information, see the official documentation: https://cloud.google.co - `GET`: Get a gcp-compute-target-http-proxy by its "name" - `LIST`: List all gcp-compute-target-http-proxy - ~~`SEARCH`~~ + +## Possible Links + +### [`gcp-compute-url-map`](/sources/gcp/Types/gcp-compute-url-map) + +A Target HTTP Proxy must reference exactly one URL map. Overmind uses this link to trace from the proxy to the URL map that defines its routing rules, enabling you to understand and surface any risks associated with misconfigured path matchers or backend services. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md index 1f9a673c..09ffa4b7 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-https-proxy.md @@ -3,8 +3,8 @@ title: GCP Compute Target Https Proxy sidebar_label: gcp-compute-target-https-proxy --- -A **Target HTTPS Proxy** is a global Google Cloud resource that terminates incoming HTTPS traffic and forwards the decrypted requests to the appropriate backend service according to a referenced URL map. It is a central component of the External HTTP(S) Load Balancer, holding one or more SSL certificates that are presented to clients during the TLS handshake and optionally enforcing an SSL policy that dictates the allowed protocol versions and cipher suites. -Official documentation: https://docs.cloud.google.com/sdk/gcloud/reference/compute/target-https-proxies +A **Target HTTPS Proxy** is a global Google Cloud resource that terminates incoming HTTPS connections at the edge of Google’s network, presents one or more SSL certificates, and then forwards the decrypted requests to the appropriate backend service according to a URL map. In essence, it is the control point that binds SSL certificates, SSL policies, and URL maps together to enable HTTPS traffic on an External HTTP(S) Load Balancer. +For full details see the official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/targetHttpsProxies **Terrafrom Mappings:** @@ -20,12 +20,12 @@ Official documentation: https://docs.cloud.google.com/sdk/gcloud/reference/compu ### [`gcp-compute-ssl-certificate`](/sources/gcp/Types/gcp-compute-ssl-certificate) -The proxy references one or more SSL certificates that are served to clients when they initiate an HTTPS connection. These certificates are specified in the `ssl_certificates` field of the target HTTPS proxy. +A Target HTTPS Proxy references one or more SSL certificates that it presents to clients during the TLS handshake. Overmind links these certificates so you can track which certificate is in use and assess expiry or misconfiguration risks. ### [`gcp-compute-ssl-policy`](/sources/gcp/Types/gcp-compute-ssl-policy) -An optional SSL policy can be attached to the proxy to control minimum TLS versions, allowed cipher suites, and other security settings. The policy is linked through the `ssl_policy` attribute. +An optional SSL policy can be attached to a Target HTTPS Proxy to enforce minimum TLS versions and cipher suites. Overmind exposes this link to highlight the security posture enforced on the proxy. ### [`gcp-compute-url-map`](/sources/gcp/Types/gcp-compute-url-map) -Each target HTTPS proxy must reference exactly one URL map, which defines the routing rules that determine which backend service receives each request after SSL/TLS termination. +Every Target HTTPS Proxy must point to exactly one URL map, which defines how incoming requests are routed to backend services. Overmind links the URL map so you can trace the full request path and evaluate routing risks before deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md index b7b49d1b..1ee2983a 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-target-pool.md @@ -3,7 +3,7 @@ title: GCP Compute Target Pool sidebar_label: gcp-compute-target-pool --- -A Compute Target Pool is a regional resource that groups multiple VM instances so they can receive incoming traffic from legacy network TCP load balancers or be used as failover targets for forwarding rules. Target pools can also be linked to one or more Health Checks to determine the availability of their member instances. Official documentation: https://docs.cloud.google.com/load-balancing/docs/target-pools +A Google Cloud Compute Target Pool is a regional grouping of VM instances that acts as the backend for the legacy TCP/UDP network load balancer. The pool defines which instances receive traffic, the optional session-affinity policy, the associated health checks that determine instance health, and an optional fail-over target pool for backup. See the official documentation for full details: https://cloud.google.com/compute/docs/reference/rest/v1/targetPools **Terrafrom Mappings:** @@ -19,12 +19,12 @@ A Compute Target Pool is a regional resource that groups multiple VM instances s ### [`gcp-compute-health-check`](/sources/gcp/Types/gcp-compute-health-check) -A target pool may reference one or more Health Checks. These checks are executed against each instance in the pool to decide whether the instance should receive traffic. Overmind links a target pool to any health check resources it is configured to use. +A target pool may reference one or more health checks through its `healthChecks` field. These health checks are used by Google Cloud to probe the instances in the pool and decide whether traffic should be sent to a particular VM. ### [`gcp-compute-instance`](/sources/gcp/Types/gcp-compute-instance) -Member virtual machines are registered in the target pool. Overmind establishes links from the target pool to every compute instance that is currently part of the pool. +Each target pool contains a list of VM instances (`instances` field) that will receive load-balanced traffic. Overmind links the pool to every instance it contains. ### [`gcp-compute-target-pool`](/sources/gcp/Types/gcp-compute-target-pool) -Target pools can appear as dependencies of other target pools in scenarios such as cross-region failover configurations. Overmind represents these intra-type relationships with links between the relevant target pool resources. +A target pool can specify another target pool as its `backupPool` to provide fail-over capacity, and it can itself be referenced as a backup by other pools. Overmind surfaces these peer-to-peer relationships between target pools. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md index 40025ed3..339a1581 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-url-map.md @@ -3,8 +3,8 @@ title: GCP Compute Url Map sidebar_label: gcp-compute-url-map --- -A Google Cloud Platform (GCP) Compute URL Map is the routing table used by an External or Internal HTTP(S) Load Balancer. It evaluates the host and path of each incoming request and, according to the host rules and path matchers you configure, forwards that request to the appropriate backend service or backend bucket. In other words, the URL map determines “which traffic goes where” once it reaches the load balancer, making it a critical part of any web-facing deployment. -Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps +A Google Cloud Platform (GCP) Compute URL Map is a routing table used by HTTP(S) load balancers to decide where an incoming request should be sent. It matches the request’s host name and URL path to a set of rules and then forwards the traffic to the appropriate backend service or backend bucket. URL Maps make it possible to implement advanced traffic-management patterns such as domain-based and path-based routing, default fall-back targets, and traffic migration between versions of a service. +Official documentation: https://cloud.google.com/load-balancing/docs/url-map-concepts **Terrafrom Mappings:** @@ -20,4 +20,4 @@ Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/ ### [`gcp-compute-backend-service`](/sources/gcp/Types/gcp-compute-backend-service) -Each URL map references one or more backend services in its path-matcher rules. Overmind therefore creates outbound links from a `gcp-compute-url-map` to every `gcp-compute-backend-service` that might receive traffic, allowing you to trace the full request path and identify downstream risks. +A URL Map points to one or more backend services as its routing targets. Each rule in the map specifies which `gcp-compute-backend-service` should receive the traffic that matches the rule’s host and path conditions. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md index b51f2540..9e892355 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-gateway.md @@ -3,8 +3,8 @@ title: GCP Compute Vpn Gateway sidebar_label: gcp-compute-vpn-gateway --- -A Google Cloud Compute VPN Gateway (specifically, the High-Availability VPN Gateway) provides a managed, highly available IPsec VPN endpoint that allows encrypted traffic to flow between a Google Cloud Virtual Private Cloud (VPC) network and an on-premises network or another cloud provider. By deploying a VPN Gateway you can create site-to-site tunnels that automatically scale their throughput and offer automatic fail-over across two interfaces in different zones within the same region. -For full details see the official documentation: https://cloud.google.com/network-connectivity/docs/vpn/concepts/overview +A GCP Compute High-Availability (HA) VPN Gateway is a regional resource that provides secure, encrypted IPsec tunnels between a Google Cloud Virtual Private Cloud (VPC) network and peer networks (on-premises data centres, other clouds, or different GCP projects). The gateway offers redundancy by using two external interfaces, each of which can establish a pair of active tunnels, ensuring traffic continues to flow even during maintenance events or failures. Because the gateway is tightly coupled to a specific VPC network and region, it influences routing, firewall behaviour and overall network reachability. +See the official Google Cloud documentation for full details: https://cloud.google.com/network-connectivity/docs/vpn/concepts/overview **Terrafrom Mappings:** @@ -20,4 +20,4 @@ For full details see the official documentation: https://cloud.google.com/networ ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -An HA VPN Gateway is created inside, and tightly bound to, a specific VPC network and region. It inherits the network’s subnet routes and advertises them across its VPN tunnels, and all incoming VPN traffic is delivered into that network. +Each HA VPN Gateway is created inside a single VPC network. Linking the gateway to its `gcp-compute-network` allows Overmind to trace which IP ranges, routes and firewall rules may be affected by the gateway’s tunnels, and to evaluate the blast radius of any proposed changes to either resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md index ddd9908b..af31b75c 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-compute-vpn-tunnel.md @@ -3,8 +3,8 @@ title: GCP Compute Vpn Tunnel sidebar_label: gcp-compute-vpn-tunnel --- -A **GCP Compute VPN Tunnel** represents a single IPSec tunnel that is part of a Cloud VPN connection. It contains the parameters needed to establish and maintain the encrypted link – peer IP address, shared secret, IKE version, traffic selectors, and the attachment to either a Classic VPN gateway or an HA VPN gateway. In most deployments two or more tunnels are created for redundancy. -For the full specification see the official Google documentation: https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels +A Compute VPN Tunnel is the logical link that carries encrypted IP-sec traffic between Google Cloud and another network. It is created on top of a Google Cloud VPN Gateway and points at a peer gateway, defining parameters such as IKE version, shared secrets and traffic selectors. Each tunnel secures packets as they traverse the public Internet, allowing workloads in a VPC network to communicate privately with on-premises resources, other clouds, or additional GCP projects. +Official documentation: https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels **Terrafrom Mappings:** @@ -20,12 +20,12 @@ For the full specification see the official Google documentation: https://cloud. ### [`gcp-compute-external-vpn-gateway`](/sources/gcp/Types/gcp-compute-external-vpn-gateway) -When the tunnel terminates on equipment outside Google Cloud, the `externalVpnGateway` field is set. This creates a relationship between the VPN tunnel and the corresponding External VPN Gateway resource. +A VPN tunnel targets an External VPN Gateway when its peer endpoint resides outside Google Cloud. The tunnel resource holds the reference that binds the Google side of the connection to the defined external gateway interface. ### [`gcp-compute-router`](/sources/gcp/Types/gcp-compute-router) -If dynamic routing is enabled (HA VPN or dynamic Classic VPN), the tunnel is attached to a Cloud Router, which advertises and learns routes via BGP. The `router` field therefore links the VPN tunnel to a specific Cloud Router. +For dynamic (BGP) routing, a VPN tunnel is attached to a Cloud Router. The router exchanges routes with the peer across the tunnel, advertising VPC prefixes and learning remote prefixes. ### [`gcp-compute-vpn-gateway`](/sources/gcp/Types/gcp-compute-vpn-gateway) -Every tunnel belongs to a Google-managed VPN gateway (`targetVpnGateway` for Classic VPN or `vpnGateway` for HA VPN). This link captures that parent-child relationship, allowing Overmind to evaluate the impact of gateway changes on its tunnels. +Every VPN tunnel is created on a specific VPN Gateway (Classic or HA). The gateway provides the Google Cloud termination point, while the tunnel specifies the individual encrypted session parameters. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md index eed12aa9..155018b9 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-cluster.md @@ -3,8 +3,8 @@ title: GCP Container Cluster sidebar_label: gcp-container-cluster --- -Google Kubernetes Engine (GKE) Container Clusters provide managed Kubernetes control-planes and node infrastructure on Google Cloud Platform. A cluster groups together one or more node pools running containerised workloads, and exposes both the Kubernetes API server and optional add-ons such as Cloud Monitoring, Cloud Logging, Workload Identity and Binary Authorisation. -For a full description of the service see the official Google documentation: https://cloud.google.com/kubernetes-engine/docs +Google Kubernetes Engine (GKE) Container Clusters provide fully-managed Kubernetes control planes running on Google Cloud. A cluster groups the Kubernetes control plane and the worker nodes that run your containerised workloads, and exposes a single API endpoint for deployment and management. Clusters can be regional or zonal, support autoscaling, automatic upgrades and many advanced networking, security and observability features. +Official documentation: https://cloud.google.com/kubernetes-engine/docs/concepts/kubernetes-engine-overview **Terrafrom Mappings:** @@ -18,30 +18,38 @@ For a full description of the service see the official Google documentation: htt ## Possible Links +### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) + +GKE can export usage metering and cost allocation data, as well as logs via Cloud Logging sinks, to a BigQuery dataset. When a cluster is configured for resource usage metering, it is linked to the destination dataset. + ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A cluster can be configured to encrypt Kubernetes secrets and etcd data at rest using a customer-managed Cloud KMS crypto key. When customer-managed encryption is enabled, the cluster stores the resource ID of the key that protects its control-plane data, creating a link between the cluster and the KMS crypto key. +Clusters may use a customer-managed encryption key (CMEK) from Cloud KMS to encrypt Kubernetes Secrets and other etcd data at rest. The CMEK key configured for a cluster or for its persistent disks is therefore related. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +A specific key version is referenced by the cluster for CMEK encryption. Rotating the key version affects the cluster’s data-at-rest encryption. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every GKE cluster is deployed into a VPC network. All control-plane and node traffic flows inside this network, and the cluster stores the name of the network it belongs to, creating a relationship with the corresponding gcp-compute-network resource. +Every cluster is deployed into a VPC network; all control-plane and node traffic flows across this network. The network selected during cluster creation is linked here. ### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) -If a node pool is configured to run on sole-tenant nodes, GKE provisions or attaches to Compute Engine node groups for placement. The cluster will therefore reference any node groups used by its node pools. +If the cluster uses sole-tenant nodes or node auto-provisioning, the underlying Compute Engine Node Groups that host GKE nodes are related to the cluster. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Within the chosen VPC, a cluster is attached to one or more subnetworks to allocate IP ranges for nodes, pods and services. The subnetwork resource(s) appear in the cluster’s configuration and are linked to the cluster. +Clusters (and their node pools) are placed in one or more subnets within the VPC for pod and service IP ranges. These subnetworks are therefore linked to the cluster. ### [`gcp-container-node-pool`](/sources/gcp/Types/gcp-container-node-pool) -A cluster is composed of one or more node pools that provide the actual worker nodes. Each node pool references its parent cluster, and the cluster maintains a list of all associated node pools. +A cluster contains one or more node pools that define the configuration of its worker nodes (machine type, autoscaling settings, etc.). Each node pool resource is directly associated with its parent cluster. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -GKE uses service accounts for both the control-plane (Google-managed) and the nodes (user-specified or default). Additionally, Workload Identity maps Kubernetes service accounts to IAM service accounts. Any service account configured for node pools, Workload Identity or authorised networks will be linked to the cluster. +GKE uses IAM service accounts for the control plane, node VMs and workload identity. Service accounts granted to the cluster (e.g., Google APIs service agent, node service account) are linked. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -Audit logs and event streams originating from a GKE cluster can be exported via Logging sinks to Pub/Sub topics for downstream processing. When such a sink targets a Pub/Sub topic, the cluster indirectly references that topic, creating a link captured by Overmind. +Cluster audit logs, events or notifications can be exported to a Pub/Sub topic (e.g., via Log Sinks or Notification Channels). Any topic configured as a destination for the cluster is related here. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md index dff4c40d..a0190d92 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-container-node-pool.md @@ -3,7 +3,8 @@ title: GCP Container Node Pool sidebar_label: gcp-container-node-pool --- -A Google Cloud Platform (GCP) Container Node Pool is a logical grouping of worker nodes within a Google Kubernetes Engine (GKE) cluster. All nodes in a pool share the same configuration (machine type, disk size, metadata, labels, etc.) and are managed as a single unit for operations such as upgrades, autoscaling and maintenance. Node pools allow you to mix and match node types inside a single cluster, enabling workload-specific optimisation, cost control and security hardening. +Google Kubernetes Engine (GKE) runs worker nodes in groups called _node pools_. +Each pool defines the machine type, disk configuration, Kubernetes version and other attributes for the virtual machines that will back your workloads, and can be scaled or upgraded independently from the rest of the cluster. Official documentation: https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools **Terrafrom Mappings:** @@ -20,16 +21,28 @@ Official documentation: https://cloud.google.com/kubernetes-engine/docs/concepts ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A node pool can be configured to use a Cloud KMS CryptoKey for at-rest encryption of node boot disks or customer-managed encryption keys (CMEK) for GKE secrets. Overmind links the node pool to the KMS key that protects its data, allowing you to trace encryption dependencies. +When customer-managed encryption keys (CMEK) are enabled for node disks, the node pool stores a reference to the Cloud KMS crypto key that encrypts each node’s boot and attached data volumes. + +### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) + +Every node pool is implemented as a regional or zonal Managed Instance Group (MIG) that GKE creates and controls; the Instance Group Manager handles the lifecycle of the virtual machines that make up the pool. + +### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) + +Nodes launched by the pool are attached to a specific VPC network (and its associated routes and firewall rules), so the pool maintains a link to the Compute Network used by the cluster. ### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) -When a node pool is created on sole-tenant nodes, GKE provisions the underlying Compute Engine Node Group that hosts those VMs. Linking highlights which Node Group provides the physical tenancy for the pool’s nodes. +If the node pool is configured to run on sole-tenant nodes, it will reference the Compute Node Group that represents the underlying dedicated hosts reserved for those nodes. + +### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) + +The pool records the particular subnetwork into which its nodes are placed, controlling the IP range from which node addresses are allocated. ### [`gcp-container-cluster`](/sources/gcp/Types/gcp-container-cluster) -Every node pool belongs to exactly one GKE cluster. This parent-child relationship is surfaced so you can quickly navigate from a pool to its cluster and understand cluster-level configuration and risk. +A node pool is a child resource of a GKE cluster; this link identifies the parent `gcp-container-cluster` that owns and orchestrates the pool. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Each VM in a node pool runs as an IAM service account (often the “default” compute service account or a custom node service account). Overmind links the pool to that service account to expose permissions granted to workloads running on the nodes. +Each node runs with a Google service account that provides credentials for pulling container images, writing logs, and calling Google APIs. The pool stores a reference to that IAM Service Account. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md index dccaedcb..b4d0c48b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataform-repository.md @@ -3,8 +3,8 @@ title: GCP Dataform Repository sidebar_label: gcp-dataform-repository --- -A GCP Dataform Repository is the top-level, version-controlled container that stores all the SQL workflow code, configuration files and commit history used by Dataform in Google Cloud. It functions much like a Git repository, allowing data teams to develop, test and deploy BigQuery pipelines through branches, pull requests and releases. Repositories live under a specific project and location and can be connected to Cloud Source Repositories or external Git providers. -Official documentation: https://cloud.google.com/dataform/docs/repositories +A Google Cloud Dataform Repository represents the source-controlled codebase that defines your Dataform workflows. It stores SQLX files, declarations and configuration that Dataform uses to build, test and deploy transformations in BigQuery. A repository can point to an internal workspace or to an external Git repository and may reference service accounts, Secret Manager secrets and customer-managed encryption keys. +Official documentation: https://cloud.google.com/dataform/reference/rest **Terrafrom Mappings:** @@ -20,12 +20,16 @@ Official documentation: https://cloud.google.com/dataform/docs/repositories ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If Customer-Managed Encryption Keys (CMEK) are enabled for the repository, it contains a reference to the Cloud KMS crypto key that encrypts its metadata. Overmind follows this link to verify key existence, rotation policy and wider blast radius. +A repository can be configured with a customer-managed encryption key (`kms_key_name`) to encrypt its metadata and compiled artefacts, creating a dependency on the corresponding Cloud KMS crypto-key. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +If CMEK is enabled, the repository points to a specific crypto-key version that is actually used for encryption; rotating or disabling that version will affect the repository. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Dataform executes queries and workflow steps using a service account specified in the repository or workspace settings. Linking to the IAM service account lets Overmind trace which identities can act on behalf of the repository and assess permission risks. +Dataform uses a service account to fetch code from remote Git repositories and to execute compilation and workflow tasks; the repository stores the e-mail address of that service account. ### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) -A repository may reference secrets (such as connection strings or API tokens) stored in Secret Manager via environment variables or workflow configurations. Overmind links to these secrets to ensure they exist, are properly protected and are not about to be rotated or deleted. +When a repository is linked to an external Git provider, the authentication token is stored in Secret Manager. The field `authentication_token_secret_version` references the secret (and version) that holds the token. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md index af6be780..5261705d 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-aspect-type.md @@ -3,8 +3,8 @@ title: GCP Dataplex Aspect Type sidebar_label: gcp-dataplex-aspect-type --- -A Google Cloud Dataplex Aspect Type is a reusable template that describes the structure and semantics of a particular piece of metadata—an _aspect_—that can later be attached to Dataplex assets, entries, or partitions. By defining aspect types centrally, an organisation can guarantee that the same metadata schema (for example, “Personally Identifiable Information classification” or “Data-quality score”) is applied consistently across lakes, zones, and assets, thereby strengthening governance, lineage, and discovery capabilities. -For further details, see the official Dataplex REST reference: https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.aspectTypes +A Dataplex Aspect Type is a top-level resource within Google Cloud Dataplex’s metadata service that defines the structure of a metadata “aspect” – a reusable schema describing a set of attributes you want to attach to data assets (for example, data quality scores or business classifications). Once an aspect type is created, individual assets such as tables, files or columns can be annotated with concrete “aspects” that conform to that schema, ensuring consistent, centrally-governed metadata across your lake. +For further details see the official API reference: https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.aspectTypes **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md index a9caa00a..dbcd9070 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-data-scan.md @@ -3,7 +3,8 @@ title: GCP Dataplex Data Scan sidebar_label: gcp-dataplex-data-scan --- -A Dataplex Data Scan is a managed resource that schedules and executes automated profiling or data-quality checks over data held in Google Cloud Platform (GCP) storage systems such as Cloud Storage and BigQuery. The scan stores its configuration, execution history and results, allowing teams to understand the structure, completeness and validity of their datasets before those datasets are used downstream. Full details can be found in the official Google Cloud documentation: https://docs.cloud.google.com/dataplex/docs/use-data-profiling +A GCP Dataplex Data Scan is a first-class Dataplex resource that encapsulates the configuration and schedule for profiling data or validating data-quality rules against a registered asset such as a BigQuery table or files held in Cloud Storage. Each scan lives in a specific Google Cloud location and records its execution history, metrics and detected issues, allowing teams to understand data health before downstream workloads rely on it. +For full details see the official REST reference: https://cloud.google.com/dataplex/docs/reference/rest/v1/projects.locations.dataScans **Terrafrom Mappings:** @@ -17,6 +18,10 @@ A Dataplex Data Scan is a managed resource that schedules and executes automated ## Possible Links +### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) + +A Dataplex Data Scan may target a BigQuery table as its data source; linking the scan to the table lets Overmind trace quality findings back to the exact table that will be affected by the deployment. + ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -A Dataplex Data Scan can target objects stored in a Cloud Storage bucket for profiling or quality validation. Therefore, Overmind links the scan resource to the bucket that contains the underlying data being analysed, enabling a complete view of the data-quality pipeline and its dependencies. +When the data asset under review is a set of files stored in Cloud Storage, Dataplex references the underlying bucket. Linking the scan to the bucket reveals how changes to bucket configuration or contents could influence upcoming scan results. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md index 04d5e81f..ffcc1c6b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataplex-entry-group.md @@ -3,8 +3,8 @@ title: GCP Dataplex Entry Group sidebar_label: gcp-dataplex-entry-group --- -A Dataplex Entry Group is a logical container in Google Cloud that lives in the Data Catalog service and is used by Dataplex to organise metadata about datasets, tables and other data assets. By grouping related Data Catalog entries together, Entry Groups enable consistent discovery, governance and lineage tracking across lakes, zones and projects. Each Entry Group is created in a specific project and location and can be referenced by Dataplex jobs, policies and fine-grained IAM settings. -For full details see Google’s REST reference: https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.entryGroups +A Dataplex Entry Group is a logical container that holds one or more metadata entries within Google Cloud’s unified Data Catalog. By grouping related entries together, it helps data stewards organise, secure and search metadata that describe the underlying data assets managed by Dataplex (such as tables, files or streams). Each Entry Group lives in a specific project and location and can be granted IAM permissions independently, allowing fine-grained access control over the metadata it contains. +Official documentation: https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.entryGroups **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md index 9532af2f..f897865f 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-autoscaling-policy.md @@ -3,9 +3,10 @@ title: GCP Dataproc Autoscaling Policy sidebar_label: gcp-dataproc-autoscaling-policy --- -A Google Cloud Dataproc Autoscaling Policy defines how a Dataproc cluster should automatically grow or shrink its worker and secondary-worker (pre-emptible) node groups in response to load. Policies specify minimum and maximum instance counts, cooldown periods, and scaling rules based on YARN memory or CPU utilisation, allowing clusters to meet workload demand while controlling cost. Once created at the project or region level, a policy can be referenced by any Dataproc cluster in that location. For more detail see the official documentation: https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling. +A GCP Dataproc Autoscaling Policy defines the rules that Google Cloud Dataproc uses to automatically add or remove worker nodes from a Dataproc cluster in response to workload demand. By specifying target utilisation levels, cooldown periods, graceful decommissioning time-outs and per-node billing settings, the policy ensures that clusters expand to meet spikes in processing requirements and shrink when demand falls, optimising both performance and cost. +For a full description of each field and the underlying API, see the official Google Cloud documentation: https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.autoscalingPolicies. -**Terrafrom Mappings:** +**Terraform Mappings:** - `google_dataproc_autoscaling_policy.name` diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md index 23805167..82fba59e 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dataproc-cluster.md @@ -3,7 +3,7 @@ title: GCP Dataproc Cluster sidebar_label: gcp-dataproc-cluster --- -A Google Cloud Dataproc Cluster is a managed cluster of Compute Engine virtual machines that runs open-source data-processing frameworks such as Apache Spark, Apache Hadoop, Presto and Trino. Dataproc handles the provisioning, configuration and ongoing management of the cluster, allowing you to submit jobs or create ephemeral clusters on demand while paying only for the compute you use. For full feature details see the official documentation: https://docs.cloud.google.com/dataproc/docs/concepts/overview. +A Google Cloud Dataproc Cluster is a managed group of Compute Engine virtual machines configured to run big-data workloads such as Apache Hadoop, Spark, Hive and Presto. Dataproc abstracts away the operational overhead of provisioning, configuring and scaling the underlying infrastructure, allowing you to launch fully-featured clusters in minutes and shut them down just as quickly. See the official documentation for full details: https://cloud.google.com/dataproc/docs/concepts/overview **Terrafrom Mappings:** @@ -19,36 +19,48 @@ A Google Cloud Dataproc Cluster is a managed cluster of Compute Engine virtual m ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A Dataproc cluster can be configured to use a customer-managed encryption key (CMEK) from Cloud KMS to encrypt the persistent disks attached to its nodes as well as the cluster’s Cloud Storage staging bucket. +If customer-managed encryption keys (CMEK) are enabled, a Dataproc Cluster references a Cloud KMS Crypto Key to encrypt the persistent disks attached to its virtual machines. ### [`gcp-compute-image`](/sources/gcp/Types/gcp-compute-image) -Each Dataproc cluster is built from a specific Dataproc image (e.g., `2.1-debian11`). The image determines the operating system and the versions of Hadoop, Spark and other components installed on the VM instances. +Each node in a Dataproc Cluster boots from a specific Compute Engine image (e.g., a Dataproc-prebuilt image or a custom image), so the cluster has a dependency on that image. ### [`gcp-compute-instance-group-manager`](/sources/gcp/Types/gcp-compute-instance-group-manager) -Behind the scenes Dataproc creates managed instance groups for the primary, secondary and optional pre-emptible worker node pools. These MIGs handle instance creation, health-checking and replacement. +Dataproc automatically creates Managed Instance Groups (MIGs) for the primary, worker and optional secondary-worker node pools; these MIGs are children of the Dataproc Cluster. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -The cluster’s VMs are attached to a specific VPC network, determining their routability and ability to reach other Google Cloud services or on-premises systems. +The cluster’s VMs are attached to a particular VPC network, dictating their reachability, firewall rules and routing behaviour. ### [`gcp-compute-node-group`](/sources/gcp/Types/gcp-compute-node-group) -If you run Dataproc on sole-tenant nodes, the cluster associates each VM with a Compute Node Group to guarantee dedicated physical hardware. +If the cluster is deployed on sole-tenant nodes, it is associated with a Compute Node Group that provides dedicated hardware isolation. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Within the chosen VPC, the cluster can be pinned to a particular subnetwork to control IP address ranges, firewall rules and routing. +Within the selected VPC, the Dataproc Cluster attaches its instances to a specific subnetwork where IP addressing, Private Google Access and regional placement are defined. + +### [`gcp-container-cluster`](/sources/gcp/Types/gcp-container-cluster) + +For Dataproc on GKE deployments, the Dataproc Cluster is layered on top of an existing Google Kubernetes Engine cluster, creating a parent–child relationship. + +### [`gcp-container-node-pool`](/sources/gcp/Types/gcp-container-node-pool) + +When running Dataproc on GKE, the workloads execute on one or more GKE node pools; the Dataproc service references these node pools for capacity. ### [`gcp-dataproc-autoscaling-policy`](/sources/gcp/Types/gcp-dataproc-autoscaling-policy) -Clusters may reference an Autoscaling Policy that automatically adds or removes worker nodes based on YARN or Spark metrics, optimising performance and cost. +A Dataproc Cluster can be bound to an Autoscaling Policy that dynamically adjusts the number of worker nodes based on workload metrics. + +### [`gcp-dataproc-cluster`](/sources/gcp/Types/gcp-dataproc-cluster) + +Clusters can reference other clusters as templates or in workflows that orchestrate multiple clusters; Overmind represents these peer or predecessor relationships with a self-link. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every Dataproc node runs under a Compute Engine service account. This account’s IAM roles determine the cluster’s permission to read/write Cloud Storage, publish metrics, access BigQuery, etc. +The VMs within the cluster run under one or more IAM Service Accounts that grant them permissions to access other Google Cloud services. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Dataproc uses Cloud Storage buckets for staging job files, storing cluster logs and optionally as a default HDFS replacement via the `gcs://` connector. The cluster therefore references one or more buckets during its lifecycle. +During creation, the cluster specifies Cloud Storage buckets for staging, temp and log output, making those buckets upstream dependencies. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md index 7f296db9..f3a4f130 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-dns-managed-zone.md @@ -3,8 +3,8 @@ title: GCP Dns Managed Zone sidebar_label: gcp-dns-managed-zone --- -A Cloud DNS Managed Zone is a logical container within Google Cloud that holds the DNS records for a particular namespace (for example, `example.com`). Each managed zone is served by a set of authoritative name servers and can be either public (resolvable on the public internet) or private (resolvable only from selected VPC networks). Managed zones let you create, update, and delete DNS resource-record sets using the Cloud DNS API, gcloud CLI, or Terraform. -For full details see Google’s documentation: https://docs.cloud.google.com/dns/docs/zones +A Google Cloud DNS Managed Zone is a logical container for DNS resource records that share the same DNS name suffix. Managed zones can be configured as public (resolvable from the internet) or private (resolvable only from one or more selected VPC networks). They are the fundamental unit that Cloud DNS uses to host, serve and manage authoritative DNS data for your domains. +Official documentation: https://cloud.google.com/dns/docs/zones **Terrafrom Mappings:** @@ -20,8 +20,8 @@ For full details see Google’s documentation: https://docs.cloud.google.com/dns ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Private managed zones can be attached to one or more VPC networks. When such a link exists, DNS queries originating from resources inside the referenced `gcp-compute-network` are resolved using the records defined in the managed zone. Overmind surfaces this relationship to show which networks will be affected by changes to the zone’s records or visibility settings. +Private managed zones are explicitly linked to one or more VPC networks. The association determines which networks can resolve the zone’s records, so an Overmind relationship helps surface reachability and leakage risks between a DNS zone and the networks that consume it. ### [`gcp-container-cluster`](/sources/gcp/Types/gcp-container-cluster) -Google Kubernetes Engine may automatically create or rely on Cloud DNS managed zones for features such as service discovery, Cloud DNS-based Pod/Service FQDN resolution, or workload identity federation. Linking a `gcp-dns-managed-zone` to a `gcp-container-cluster` allows Overmind to highlight how DNS adjustments could impact cluster-internal name resolution or ingress behaviour for that cluster. +GKE clusters frequently create or rely on Cloud DNS managed zones for service discovery and in-cluster load-balancing (e.g., when CloudDNS for Service Directory is enabled). Mapping a cluster to its managed zones reveals dependencies that affect name resolution, cross-cluster communication and potential namespace conflicts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md index d3797d1d..96d94d99 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-essential-contacts-contact.md @@ -3,8 +3,8 @@ title: GCP Essential Contacts Contact sidebar_label: gcp-essential-contacts-contact --- -Google Cloud’s Essential Contacts service allows an organisation to register one or more e-mail addresses that will receive important operational and security notifications about a project, folder, or organisation. A “contact” resource represents a single recipient and records the e-mail address, preferred language and notification categories that the person should receive. More than one contact can be added so that the right teams are informed whenever Google issues mandatory or time-sensitive messages. -For a full description of the resource and its fields, refer to the official documentation: https://cloud.google.com/resource-manager/docs/managing-notification-contacts +A **Google Cloud Essential Contact** represents an email address or Google Group that Google Cloud will use to send important notifications about incidents, security issues, and other critical updates for a project, folder, or organisation. Each contact is stored under a parent resource (e.g. `projects/123456789`, `folders/987654321`, or `organizations/555555555`) and can be categorised by notification types such as `SECURITY`, `TECHNICAL`, or `LEGAL`. +For further details, refer to the official Google Cloud documentation: https://cloud.google.com/resource-manager/docs/reference/essentialcontacts/rest **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md index f9a224ba..4f4693ee 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-file-instance.md @@ -3,8 +3,8 @@ title: GCP File Instance sidebar_label: gcp-file-instance --- -A GCP Filestore instance is a fully-managed network file system that provides high-performance, scalable Network File System (NFS) shares to Google Cloud workloads. It allows you to mount POSIX-compliant file storage from Compute Engine VMs, GKE clusters and other services without having to provision or manage the underlying storage infrastructure yourself. Each instance resides in a specific region and VPC network, exposes one or more IP addresses, and can be encrypted with either Google-managed or customer-managed keys. -For full details, refer to the official documentation: https://cloud.google.com/filestore/docs. +A GCP File Instance represents a Cloud Filestore instance – a managed network file storage appliance that provides an NFSv3 or NFSv4-compatible file share, typically used by GKE clusters or Compute Engine VMs that require shared, POSIX-compliant storage. Each instance is created in a specific GCP region and zone, connected to a VPC network, and exposes one or more file shares (called “filesets”) over a private RFC-1918 address. Instances can be customised for capacity and performance tiers, and may optionally use customer-managed encryption keys (CMEK) for data-at-rest encryption. +Official documentation: https://cloud.google.com/filestore/docs/overview **Terrafrom Mappings:** @@ -20,8 +20,8 @@ For full details, refer to the official documentation: https://cloud.google.com/ ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A Filestore instance can be configured to use a customer-managed encryption key (CMEK) stored in Cloud KMS. When CMEK is enabled, the instance has a direct dependency on the specified `gcp-cloud-kms-crypto-key`, and loss or revocation of that key will render the file share inaccessible. +A Filestore instance can be encrypted with a customer-managed Cloud KMS key (CMEK). The link shows which KMS Crypto Key is protecting the data-at-rest of this storage appliance. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Every Filestore instance is attached to a single VPC network and is reachable through an internal IP address range that you specify. This link represents the network in which the instance’s NFS endpoints are published and through which client traffic must flow. +Filestore instances are deployed into and reachable through a specific VPC network. This link identifies the Compute Network whose subnet provides the private IP addresses through which clients access the file share. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md index 0867be52..f4ffc0ed 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-role.md @@ -3,8 +3,7 @@ title: GCP Iam Role sidebar_label: gcp-iam-role --- -Google Cloud Identity and Access Management (IAM) roles are collections of granular permissions that you grant to principals—such as users, groups or service accounts—so they can interact with Google Cloud resources. Roles come in three varieties (basic, predefined and custom) and are the chief mechanism for enforcing the principle of least privilege across your estate. Overmind represents each IAM role as an individual resource, enabling you to surface the blast-radius of creating, modifying or deleting a role before you commit the change. -For further details, refer to the official Google Cloud documentation: https://cloud.google.com/iam/docs/understanding-roles +A **Google Cloud IAM Role** is a logical grouping of one or more IAM permissions that can be granted to principals (users, service accounts, groups or Google Workspace domains) to control their access to Google Cloud resources. Roles come in three flavours—basic, predefined and custom—allowing organisations to strike a balance between least-privilege access and administrative convenience. For a full explanation see the Google Cloud documentation: https://cloud.google.com/iam/docs/understanding-roles ## Supported Methods diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md index 79313f2f..a2e0d84e 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account-key.md @@ -3,8 +3,8 @@ title: GCP Iam Service Account Key sidebar_label: gcp-iam-service-account-key --- -A GCP IAM Service Account Key is a cryptographic key-pair that allows code or users outside Google Cloud to authenticate as a specific service account. Each key consists of a public key stored by Google and a private key material that can be downloaded once and should be stored securely. Because anyone in possession of the private key can act with all the permissions of the associated service account, these keys are highly sensitive and should be rotated or disabled when no longer required. -For full details, see the official documentation: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +A GCP IAM Service Account Key is a cryptographic key-pair (private and public) that is bound to a specific IAM service account. Possessing the private half of the key allows a workload or user to authenticate to Google Cloud APIs as that service account, making the key one of the most sensitive objects in any Google Cloud environment. Keys can be user-managed or Google-managed, rotated, disabled or deleted, and each service account can hold up to ten user-managed keys at a time. Mis-management of these keys can lead to credential leakage and unauthorised access. +Official documentation: https://cloud.google.com/iam/docs/creating-managing-service-account-keys **Terrafrom Mappings:** @@ -20,4 +20,4 @@ For full details, see the official documentation: https://cloud.google.com/iam/d ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every Service Account Key is attached to exactly one Service Account; this link allows you to trace which principal will be able to use the key and to evaluate the permissions that could be exercised if the key were compromised. +A Service Account Key is always subordinate to, and uniquely associated with, a single IAM service account. Overmind links the key back to its parent service account so you can trace which workload the key belongs to, understand the permissions it inherits, and assess the blast radius should the key be compromised. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md index 92c34a8d..2fa7bf68 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-iam-service-account.md @@ -3,7 +3,8 @@ title: GCP Iam Service Account sidebar_label: gcp-iam-service-account --- -A GCP IAM Service Account is a non-human identity that represents a workload such as a VM, Cloud Function or CI/CD pipeline. It can be granted IAM roles and used to obtain access tokens for calling Google Cloud APIs, allowing software to authenticate securely without relying on end-user credentials. Each service account lives inside a single project (or, less commonly, an organisation or folder) and can be equipped with one or more private keys for external use. See the official documentation for further details: [Google Cloud – Service Accounts](https://cloud.google.com/iam/docs/service-accounts). +A GCP IAM Service Account is a special kind of Google identity that an application or VM instance uses to make authorised calls to Google Cloud APIs, rather than an end-user. Each service account is identified by an email‐style string (e.g. `my-sa@project-id.iam.gserviceaccount.com`) and a stable numeric `unique_id`. Service accounts can be granted IAM roles, can own resources, and may have one or more cryptographic keys used for authentication. +For full details see the official documentation: https://cloud.google.com/iam/docs/service-accounts **Terrafrom Mappings:** @@ -20,8 +21,8 @@ A GCP IAM Service Account is a non-human identity that represents a workload suc ### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) -Every service account is created within exactly one Cloud Resource Manager project. Overmind links the service account to its parent project so that you can trace inheritance of IAM policies and understand the blast radius of changes to either resource. +Every service account is created inside a single Cloud Resource Manager project. This link lets you navigate from the service account to the project that owns it, revealing project-level policies and context. ### [`gcp-iam-service-account-key`](/sources/gcp/Types/gcp-iam-service-account-key) -A service account may have multiple keys (managed by Google or user-managed). These keys allow external systems to impersonate the service account. Overmind enumerates and links all keys associated with a service account, helping you identify stale or over-privileged credentials. +Service account keys are cryptographic credentials associated with a service account. This link lists all keys (active, disabled or expired) that belong to the current service account, allowing you to audit key rotation and exposure risks. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md index 9c101db9..8b2a3d9b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-bucket.md @@ -3,8 +3,8 @@ title: GCP Logging Bucket sidebar_label: gcp-logging-bucket --- -A GCP Logging Bucket is a regional or multi-regional storage container within Cloud Logging that holds log entries for long-term retention, analysis and export. Buckets allow you to isolate logs by project, folder or organisation, set individual retention periods, and apply fine-grained IAM policies. They can be configured for customer-managed encryption and for log routing between projects or across the organisation. -For full details see the Google Cloud documentation: https://cloud.google.com/logging/docs/storage#buckets +A GCP Logging Bucket is a regional or multi-regional storage container managed by Cloud Logging that stores log entries routed from one or more Google Cloud projects, folders or organisations. Buckets provide fine-grained control over where logs are kept, how long they are retained, and which encryption keys protect them. Log buckets behave similarly to Cloud Storage buckets, but are optimised for log data and are accessed through the Cloud Logging API rather than through Cloud Storage. +See the official documentation for full details: https://cloud.google.com/logging/docs/storage ## Supported Methods @@ -16,8 +16,12 @@ For full details see the Google Cloud documentation: https://cloud.google.com/lo ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A logging bucket can be encrypted with a customer-managed encryption key (CMEK). When CMEK is enabled, the bucket stores the full resource name of the Cloud KMS crypto key that protects the log data, creating a dependency on that `gcp-cloud-kms-crypto-key` resource. +A logging bucket can be configured to use customer-managed encryption keys (CMEK). When CMEK is enabled, the bucket references a Cloud KMS Crypto Key that holds the symmetric key material used to encrypt and decrypt the stored log entries. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +If CMEK is active, the bucket also keeps track of the specific key version that is currently in use. This link represents the exact Crypto Key Version providing encryption for the bucket at a given point in time. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Writing, reading and routing logs rely on service accounts such as the Log Router and Google-managed writer accounts. These accounts appear in the bucket’s IAM policy and permissions, so the bucket is linked to the corresponding `gcp-iam-service-account` resources. +Cloud Logging uses service accounts to write, read or route logs into a bucket. The bucket’s IAM policy may grant `roles/logging.bucketWriter` or `roles/logging.viewer` to particular service accounts, and the Log Router’s reserved service account must have permission to encrypt data when CMEK is enabled. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md index e5e10117..9ecd6a83 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-link.md @@ -3,11 +3,12 @@ title: GCP Logging Link sidebar_label: gcp-logging-link --- -A GCP Logging Link is a Cloud Logging resource that connects a Log Bucket to an external analytics destination, currently a BigQuery dataset. Once the link is created, every entry that is written to the bucket is replicated to the linked BigQuery dataset in near real time, letting you query your logs with standard BigQuery SQL without having to configure or manage a separate Log Router sink. -Logging Links are created under the path -`projects/{project}/locations/{location}/buckets/{bucket}/links/{link}` and inherit the life-cycle and IAM policies of their parent bucket. They are regional, can optionally back-fill historical log data at creation time, and can be updated or deleted independently of the bucket or dataset. +A GCP Logging Link is a Cloud Logging resource that continuously streams the log entries stored in a specific Log Bucket into an external BigQuery dataset. By configuring a link you enable near-real-time analytics of your logs with BigQuery without the need for manual exports or scheduled jobs. Links are created under the path -For more information see the official documentation: https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.locations.buckets.links +`projects|folders|organizations|billingAccounts / locations / buckets / links` + +and each link specifies the destination BigQuery dataset, IAM writer identity, and lifecycle state. +For further details see Google’s official documentation: https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.locations.buckets.links ## Supported Methods @@ -19,8 +20,8 @@ For more information see the official documentation: https://cloud.google.com/lo ### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) -A Logging Link points to the BigQuery dataset that serves as the analytics destination. The linked `gcp-big-query-dataset` receives a continuous copy of the logs contained in the parent bucket. +A logging link targets exactly one BigQuery dataset; Overmind establishes this edge so you can trace which dataset is receiving log entries from the bucket. ### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) -Every Logging Link is defined inside a specific `gcp-logging-bucket`. The bucket is the source of the log entries that are streamed to the linked BigQuery dataset. +The logging link is defined inside a specific Log Bucket; this relationship lets you see which buckets are sending their logs onwards and to which destinations. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md index 0502885d..7802591d 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-saved-query.md @@ -3,8 +3,8 @@ title: GCP Logging Saved Query sidebar_label: gcp-logging-saved-query --- -A GCP Logging Saved Query is a reusable, shareable filter definition for Google Cloud Logging (Logs Explorer). It stores the log filter expression, as well as optional display preferences and metadata, so that complex queries can be rerun or shared without having to rewrite the filter each time. Saved queries can be created at the project, folder, billing-account or organisation level and are particularly useful for operational run-books, incident response and dashboards. -Official documentation: https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.locations.savedQueries +A GCP Logging Saved Query is a reusable, named log query that is stored in Google Cloud Logging’s Logs Explorer. It contains the filter expression (or Log Query Language statement), any configured time-range presets and display options, allowing teams to quickly rerun common searches, share queries across projects, and use them as the basis for dashboards, log-based metrics or alerting policies. Because Saved Queries are resources in their own right, they can be created, read, updated and deleted through the Cloud Logging API, and are uniquely identified by the combination of the Google Cloud location and the query name. +Official documentation: https://cloud.google.com/logging/docs/view/building-queries ## Supported Methods diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md index b1e20cc0..e6bf2157 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-logging-sink.md @@ -3,8 +3,8 @@ title: GCP Logging Sink sidebar_label: gcp-logging-sink --- -A GCP Logging Sink is an export rule within Google Cloud Logging that continuously routes selected log entries to a destination such as BigQuery, Cloud Storage, Pub/Sub or another Logging bucket. Sinks allow you to retain logs for longer, perform analytics, or trigger near-real-time workflows outside Cloud Logging. Each sink is defined by three core elements: a filter that selects which log entries to export, a destination, and an IAM service account that is granted permission to write to that destination. -For full details see the official documentation: https://cloud.google.com/logging/docs/export/configure_export +A Logging Sink in Google Cloud Platform (GCP) is a routing rule that selects log entries with a user-defined filter and exports them to a chosen destination such as BigQuery, Cloud Storage, Pub/Sub, or another Cloud Logging bucket. Sinks are the building blocks of GCP’s Log Router and are used to retain, analyse or stream logs outside of the originating project, folder or organisation. +Official documentation: https://cloud.google.com/logging/docs/export ## Supported Methods @@ -16,16 +16,20 @@ For full details see the official documentation: https://cloud.google.com/loggin ### [`gcp-big-query-dataset`](/sources/gcp/Types/gcp-big-query-dataset) -If the sink’s destination is set to a BigQuery dataset, Overmind will create a link from the sink to that `gcp-big-query-dataset` resource because the sink writes log rows directly into the dataset’s `_TABLE_SUFFIX` sharded tables. +If the sink’s destination is a BigQuery table, it must reference a BigQuery dataset where the tables will be created and written to. The dataset therefore appears as a child dependency of the logging sink. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Every sink is assigned a writer_identity, which is an IAM service account that needs permission to write into the chosen destination. The sink’s correct operation depends on this service account having the required roles on the target resource. ### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) -A sink can either originate from a Logging bucket (when the sink is scoped to that bucket) or target a Logging bucket in another project or billing account. Overmind therefore links the sink to the relevant `gcp-logging-bucket` to show where logs are pulled from or pushed to. +A sink can route logs to another Cloud Logging bucket (including aggregated buckets at the folder or organisation level). In this case the sink targets, and must have write access to, the specified logging bucket. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -When a sink exports logs to Pub/Sub, it references a specific topic. Overmind links the sink to the corresponding `gcp-pub-sub-topic` so that users can trace event-driven pipelines or alerting mechanisms that rely on those published log messages. +When the destination is Pub/Sub, the sink exports each matching log entry as a message on a particular topic. The topic therefore represents an external linkage for onward streaming or event-driven processing. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -If the sink is configured to deliver logs to Cloud Storage, the destination bucket appears as a linked `gcp-storage-bucket`. This highlights where log files are archived and the IAM relationship required for the sink’s writer identity to upload objects. +For archival purposes a sink may export logs to a Cloud Storage bucket. The bucket must exist and grant the sink’s writer service account permission to create objects, making the storage bucket a direct dependency of the sink. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md index 3521f688..f5ca12a8 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-alert-policy.md @@ -3,8 +3,7 @@ title: GCP Monitoring Alert Policy sidebar_label: gcp-monitoring-alert-policy --- -A GCP Monitoring Alert Policy defines the conditions under which Google Cloud Monitoring should raise an alert and the actions that should be taken when those conditions are met. It lets you specify metrics to watch, threshold values, duration, notification channels, documentation for responders, and incident autoclose behaviour. Alert policies are a core part of Google Cloud’s observability suite, helping operations teams detect and respond to issues before they affect end-users. -For full details, see the official documentation: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.alertPolicies#AlertPolicy +A Google Cloud Monitoring Alert Policy is a configuration object that defines the conditions under which Cloud Monitoring should create an incident, how incidents are grouped, and which notification channels should be used to inform operators. Alert policies enable proactive observation of metrics, logs and uptime checks across Google Cloud services so that you can respond quickly to anomalies. For more detail see the official Google Cloud documentation: [Create and manage alerting policies](https://cloud.google.com/monitoring/alerts). **Terrafrom Mappings:** @@ -20,4 +19,4 @@ For full details, see the official documentation: https://cloud.google.com/monit ### [`gcp-monitoring-notification-channel`](/sources/gcp/Types/gcp-monitoring-notification-channel) -An alert policy can reference one or more Notification Channels. These channels determine where alerts are delivered (e-mail, SMS, Pub/Sub, PagerDuty, etc.). Overmind therefore creates a link from each gcp-monitoring-alert-policy to the gcp-monitoring-notification-channel resources it targets, allowing you to understand which channels will be invoked when a policy fires. +An alert policy can reference one or more notification channels so that, when its conditions are met, Cloud Monitoring sends notifications (e-mails, webhooks, SMS, etc.) through the linked gcp-monitoring-notification-channels. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md index 2e1aaa0f..98225cf8 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-custom-dashboard.md @@ -3,8 +3,8 @@ title: GCP Monitoring Custom Dashboard sidebar_label: gcp-monitoring-custom-dashboard --- -A Google Cloud Monitoring Custom Dashboard is a user-defined workspace in which you can visualise metrics, logs-based metrics and alerting information collected from your Google Cloud resources and external services. By assembling charts, heatmaps, and scorecards that matter to your organisation, a custom dashboard lets you observe the real-time health and historical behaviour of your workloads, share operational insights with your team, and troubleshoot incidents more quickly. Dashboards are created and managed through the Cloud Monitoring API, the Google Cloud console, or declaratively via Terraform. -Official documentation: https://cloud.google.com/monitoring/api/ref_v3/rest/v1/projects.dashboards#Dashboard +A GCP Monitoring Custom Dashboard is a user-defined collection of charts and widgets that presents metrics, logs, and alerts for resources running in Google Cloud or on-premises. It allows platform teams to visualise performance, capacity, and health in a single view that can be shared across projects. Custom dashboards are managed through Cloud Monitoring and can be created or modified via the Google Cloud Console, the Cloud Monitoring API, or infrastructure-as-code tools such as Terraform. +For full details, see the official documentation: https://cloud.google.com/monitoring/charts/dashboards **Terrafrom Mappings:** diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md index f29eba8b..fab21d69 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-monitoring-notification-channel.md @@ -3,8 +3,7 @@ title: GCP Monitoring Notification Channel sidebar_label: gcp-monitoring-notification-channel --- -A Google Cloud Monitoring Notification Channel is a resource that specifies where and how alerting notifications are delivered from Cloud Monitoring. Channels can point to many target types – e-mail, SMS, mobile push, Slack, PagerDuty, Pub/Sub, webhooks and more – and each channel stores the parameters (addresses, tokens, templates, etc.) required to reach that destination. Alerting policies reference one or more notification channels so that, when a policy is triggered, Cloud Monitoring automatically sends the message to the configured recipients. -For a full description of the resource and its schema, see the official Google Cloud documentation: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannels. +A **Google Cloud Monitoring Notification Channel** specifies where and how Cloud Monitoring delivers alert notifications—for example via email, SMS, Cloud Pub/Sub, Slack or PagerDuty. Each channel stores the configuration necessary for a particular medium (address, webhook URL, Pub/Sub topic name, etc.) and can be referenced by one or more alerting policies. For full details, see the official Google documentation: https://cloud.google.com/monitoring/support/notification-options **Terrafrom Mappings:** @@ -15,3 +14,9 @@ For a full description of the resource and its schema, see the official Google C - `GET`: Get a gcp-monitoring-notification-channel by its "name" - `LIST`: List all gcp-monitoring-notification-channel - `SEARCH`: Search by full resource name: projects/[project]/notificationChannels/[notificationChannel] (used for terraform mapping). + +## Possible Links + +### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) + +If the notification channel’s `type` is `pubsub`, the channel references a specific Cloud Pub/Sub topic where alert messages are published. Overmind therefore links the notification channel to the corresponding `gcp-pub-sub-topic` resource so that you can trace how alerts propagate into event-driven workflows or downstream systems. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md index c9c6788c..9639146b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-orgpolicy-policy.md @@ -3,8 +3,8 @@ title: GCP Orgpolicy Policy sidebar_label: gcp-orgpolicy-policy --- -An Organisation Policy in Google Cloud Platform (GCP) lets administrators enforce or relax specific constraints on GCP resources across the organisation, folder, or project hierarchy. Each policy represents the chosen configuration for a single constraint (for example, restricting service account key creation or limiting the set of permitted VM regions) on a single resource node. By querying an Org Policy, Overmind can reveal whether pending changes will violate existing security or governance rules before deployment. -Official documentation: https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints +An Organisation Policy (orgpolicy) in Google Cloud is a resource that applies a constraint to part of the resource hierarchy (organisation, folder, or project). It allows administrators to enforce governance rules—such as restricting the regions in which resources may be created, blocking the use of certain services, or mandating specific network configurations—before workloads are deployed. +For full details see Google’s official documentation: https://cloud.google.com/resource-manager/docs/organization-policy/overview **Terrafrom Mappings:** @@ -15,3 +15,9 @@ Official documentation: https://cloud.google.com/resource-manager/docs/organizat - `GET`: Get a gcp-orgpolicy-policy by its "name" - `LIST`: List all gcp-orgpolicy-policy - `SEARCH`: Search with the full policy name: projects/[project]/policies/[constraint] (used for terraform mapping). + +## Possible Links + +### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) + +A project is one of the resource hierarchy levels to which an Organisation Policy can be attached. Each gcp-orgpolicy-policy documented here is therefore linked to the gcp-cloud-resource-manager-project that the policy governs. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md index 16c2256e..4b358d98 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-subscription.md @@ -3,12 +3,14 @@ title: GCP Pub Sub Subscription sidebar_label: gcp-pub-sub-subscription --- -A Google Cloud Pub/Sub subscription represents a named endpoint that receives messages that are published to a specific Pub/Sub topic. Subscribers pull (or are pushed) messages from the subscription, acknowledge them, and thereby remove them from the backlog. Subscriptions can be configured for pull or push delivery, control message-retention, enforce acknowledgement deadlines, use filtering, dead-letter topics or BigQuery/Cloud Storage sinks. -For full details see the official documentation: https://cloud.google.com/pubsub/docs/subscriber +A Google Cloud Pub/Sub subscription represents a stream of messages delivered from a single Pub/Sub topic to a consumer application. Each subscription defines how, where and for how long messages are retained, whether the delivery is push or pull, any filters or dead-letter policies, and the IAM principals that are allowed to read from it. Official documentation can be found at Google Cloud – Pub/Sub Subscriptions: https://cloud.google.com/pubsub/docs/subscription-overview **Terrafrom Mappings:** - `google_pubsub_subscription.name` +- `google_pubsub_subscription_iam_binding.subscription` +- `google_pubsub_subscription_iam_member.subscription` +- `google_pubsub_subscription_iam_policy.subscription` ## Supported Methods @@ -20,16 +22,20 @@ For full details see the official documentation: https://cloud.google.com/pubsub ### [`gcp-big-query-table`](/sources/gcp/Types/gcp-big-query-table) -A subscription can be of type “BigQuery subscription”, in which case Pub/Sub automatically streams all received messages into the linked BigQuery table. Overmind therefore links the subscription to the destination `gcp-big-query-table` so that you can see where your data will land. +Pub/Sub can deliver messages directly into BigQuery by means of a BigQuery subscription. When such an integration is configured, the subscription is linked to the destination BigQuery table. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Service accounts are granted roles such as `roles/pubsub.subscriber` on the subscription so that applications can pull or acknowledge messages, or so that Pub/Sub can impersonate them for push deliveries. These IAM bindings create a relationship between the subscription and the service accounts. ### [`gcp-pub-sub-subscription`](/sources/gcp/Types/gcp-pub-sub-subscription) -Multiple subscriptions may exist on the same topic or share common dead-letter topics and filters. Overmind links related subscriptions together so you can understand fan-out patterns or duplicated consumption paths for the same data. +Multiple subscriptions can point at the same topic, or one subscription may forward undelivered messages to another subscription via a dead-letter topic. Overmind shows these peer or chained subscriptions as related items. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -Every subscription is attached to exactly one topic, from which it receives messages. This parent–child relationship is surfaced by Overmind via a direct link to the source `gcp-pub-sub-topic`. +Every subscription is attached to exactly one topic. All messages published to that topic are made available to the subscription, making the topic the primary upstream dependency. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud Storage buckets can emit object-change notifications to Pub/Sub topics. Subscriptions that listen to such topics are therefore operationally coupled to the originating bucket. Overmind links the subscription to the relevant `gcp-storage-bucket` so you can trace the flow of change events from storage to message consumers. +Cloud Storage buckets can emit object-change notifications to a Pub/Sub topic. If the subscription listens to such a topic, it is indirectly linked to the bucket that generated the events, allowing you to trace the flow from storage changes to message consumption. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md index 92968f0f..9434dd26 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-pub-sub-topic.md @@ -3,11 +3,15 @@ title: GCP Pub Sub Topic sidebar_label: gcp-pub-sub-topic --- -A Google Cloud Pub/Sub Topic is a named message stream into which publishers send messages and from which subscribers receive them. Topics act as the core distribution point in the Pub/Sub service, decoupling producers and consumers and enabling asynchronous, scalable communication between systems. For full details see the official documentation: https://docs.cloud.google.com/pubsub/docs/create-topic. +A **Cloud Pub/Sub Topic** is a named message channel in Google Cloud Platform that receives messages from publishers and delivers them to subscribers. Topics decouple senders and receivers, allowing highly-scalable, asynchronous communication between services. Every message published to a topic is retained for the duration of its acknowledgement window and can be encrypted with a customer-managed key. +For comprehensive information, see the official documentation: https://cloud.google.com/pubsub/docs/create-topic. **Terrafrom Mappings:** - `google_pubsub_topic.name` +- `google_pubsub_topic_iam_binding.topic` +- `google_pubsub_topic_iam_member.topic` +- `google_pubsub_topic_iam_policy.topic` ## Supported Methods @@ -19,8 +23,12 @@ A Google Cloud Pub/Sub Topic is a named message stream into which publishers sen ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A Pub/Sub Topic can be encrypted with a customer-managed Cloud KMS key. When such a key is specified, the topic will hold a reference to the corresponding `gcp-cloud-kms-crypto-key`, and Overmind will surface this dependency so you can assess the impact of key rotation or removal. +A Pub/Sub topic may be encrypted using a customer-managed encryption key (CMEK). When CMEK is enabled, the topic resource holds a reference to the Cloud KMS Crypto Key that protects message data at rest. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Access to publish or subscribe is controlled through IAM roles that are granted to service accounts on the topic. The topic’s IAM policy therefore links it to any service account that has roles such as `roles/pubsub.publisher` or `roles/pubsub.subscriber` on the resource. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud Storage buckets can be configured to send event notifications to a Pub/Sub Topic (for example, when objects are created or deleted). Overmind links the bucket to the topic so you can understand which storage resources rely on the topic and evaluate the blast radius of changes to either side. +Cloud Storage buckets can be configured to send change notifications to a Pub/Sub topic (for example, object create or delete events). In such configurations, the bucket acts as a publisher, and the topic appears as a dependent destination for bucket event notifications. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md index fe0ec384..ef6259bf 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-redis-instance.md @@ -3,8 +3,7 @@ title: GCP Redis Instance sidebar_label: gcp-redis-instance --- -Cloud Memorystore for Redis provides a fully managed, in-memory, open-source Redis service on Google Cloud. It is commonly used for low-latency caching, session management, real-time analytics and message brokering. When you create an instance Google handles provisioning, patching, monitoring, fail-over and, if requested, TLS encryption and customer-managed encryption keys (CMEK). -More information can be found in the official documentation: https://cloud.google.com/memorystore/docs/redis +A GCP Redis Instance is a fully managed, in-memory data store provided by Cloud Memorystore for Redis. It offers a drop-in, highly available Redis service that handles provisioning, patching, scaling, monitoring and automatic fail-over, allowing you to use Redis as a cache or primary database without managing the underlying infrastructure yourself. See the official documentation for details: https://cloud.google.com/memorystore/docs/redis **Terrafrom Mappings:** @@ -20,12 +19,12 @@ More information can be found in the official documentation: https://cloud.googl ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If CMEK is enabled, the Redis instance is encrypted at rest using a Cloud KMS CryptoKey. Overmind links the instance to the crypto key so you can trace data-at-rest encryption dependencies and evaluate key rotation or IAM policies. +If Customer-Managed Encryption Keys (CMEK) are enabled for the Redis instance, the data at rest is encrypted with a Cloud KMS Crypto Key. The Redis instance therefore depends on — and is cryptographically linked to — the specified `gcp-cloud-kms-crypto-key`. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Each Redis instance is created inside a specific VPC network and subnet. Linking to the compute network allows you to understand network reachability, firewall rules and peering arrangements that could affect the instance. +A Redis instance is deployed inside a specific VPC network and is reachable only via an internal IP address in that network. Consequently, each instance is associated with a `gcp-compute-network`, which determines its connectivity and firewall boundaries. ### [`gcp-compute-ssl-certificate`](/sources/gcp/Types/gcp-compute-ssl-certificate) -When TLS is enabled, Redis serves Google-managed certificates under the hood. Overmind associates these certificates (represented as Compute SSL Certificate resources) so that certificate expiry and chain of trust can be audited alongside the Redis service. +When TLS is enabled for a Redis instance, it can reference a Compute Engine SSL certificate resource to present during encrypted client connections. The `gcp-compute-ssl-certificate` therefore represents the server certificate used to secure traffic to the Redis instance. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md index 5368eede..1e7b424b 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-revision.md @@ -3,7 +3,8 @@ title: GCP Run Revision sidebar_label: gcp-run-revision --- -A Cloud Run Revision represents an immutable snapshot of the code and configuration that Cloud Run executes. Every time you deploy a new container image or change the runtime configuration of a Cloud Run Service, a new Revision is created and given a unique name. The Revision stores details such as the container image reference, environment variables, scaling limits, traffic settings, networking options and the service account under which the workload runs. Official documentation: https://docs.cloud.google.com/run/docs/managing/revisions +A Cloud Run **Revision** is an immutable snapshot of a Cloud Run Service configuration at a particular point in time. Each time you deploy new code or change configuration, Cloud Run automatically creates a new revision and routes traffic according to your settings. A revision defines the container image to run, environment variables, resource limits, networking options, service account, secret mounts and more. Once created, a revision can never be modified – you can only create a new one. +Official documentation: https://cloud.google.com/run/docs/reference/rest/v1/namespaces.revisions ## Supported Methods @@ -15,32 +16,36 @@ A Cloud Run Revision represents an immutable snapshot of the code and configurat ### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) -The Revision’s `container.image` field points to a Docker image that is normally stored in Artifact Registry (or the older Container Registry). Overmind therefore links the Revision to the exact image digest it deploys, so you can see what code is really running. +The container image specified in the revision is often stored in Artifact Registry. The revision therefore has a **uses-image** relationship with the referenced Docker image. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If the Revision mounts secrets or other resources that are encrypted with Cloud KMS, those crypto-keys are surfaced as links. This helps you understand which keys would be required to decrypt data at runtime. +If the revision is configured with a customer-managed encryption key (CMEK) for encrypted secrets or volumes, it will reference the corresponding Cloud KMS Crypto Key. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -When a Revision is configured with a Serverless VPC Connector or egress settings that reference a particular VPC network, the corresponding `compute.network` is linked. This reveals the network perimeter through which outbound traffic may flow. +When a revision is set up to use Serverless VPC Access, it connects to a specific VPC network, creating a **connects-to-network** relationship. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -Similarly, a Revision may target a specific sub-network (for example `vpcAccess.connectorSubnetwork`). Overmind links the Revision to that `compute.subnetwork` so you can trace which CIDR ranges and routes apply. +The Serverless VPC Access connector used by the revision is attached to a particular subnetwork, so the revision is indirectly linked to that subnetwork. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Each Revision runs with an IAM service account specified in its `serviceAccountName` field. Linking to the service account lets you inspect the permissions that the workload inherits. +Each revision runs with an IAM service account whose permissions govern outbound calls and resource access. The revision therefore **runs-as** the referenced service account. ### [`gcp-run-service`](/sources/gcp/Types/gcp-run-service) -A Revision belongs to exactly one Cloud Run Service. The link to the parent Service shows the traffic allocation, routing configuration and other higher-level settings that govern how the Revision is invoked. +The revision is a child resource of a Cloud Run Service. All traffic routing and lifecycle events are managed at the service level. + +### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) + +Environment variables or mounted volumes in the revision can pull values from Secret Manager. This establishes a **consumes-secret** relationship. ### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) -If the Revision’s metadata includes Cloud SQL connection strings (via the `cloudSqlInstances` setting), Overmind links to the referenced Cloud SQL instances, making database dependencies explicit. +If the revision defines Cloud SQL connections, it will list one or more Cloud SQL instances it can connect to through the Cloud SQL proxy. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Revisions can mount Cloud Storage buckets using Cloud Storage FUSE volumes or reference buckets through environment variables. When such configuration is detected, the corresponding buckets are linked so you can assess data-at-rest exposure. +A revision may read from or write to Cloud Storage buckets (for example for static assets or generated files) when granted the appropriate IAM permissions, creating a potential dependency on those buckets. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md index deff181d..6620ad19 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-run-service.md @@ -3,7 +3,8 @@ title: GCP Run Service sidebar_label: gcp-run-service --- -Cloud Run Service is a fully-managed container execution environment that lets you run stateless HTTP containers on demand within Google Cloud. A Service represents the top-level Cloud Run resource, providing a stable URL, traffic splitting, configuration, and revision management for your containerised workload. For full details see the Google Cloud documentation: https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services +Google Cloud Run Service is a fully-managed compute platform that automatically scales stateless containers on demand. A Service represents the user-facing abstraction of your application, managing one or more immutable Revisions of a container image and routing traffic to them. It provides configuration for networking, environment variables, secrets, concurrency, autoscaling and identity. +Official documentation: https://cloud.google.com/run/docs **Terrafrom Mappings:** @@ -19,36 +20,36 @@ Cloud Run Service is a fully-managed container execution environment that lets y ### [`gcp-artifact-registry-docker-image`](/sources/gcp/Types/gcp-artifact-registry-docker-image) -A Cloud Run Service pulls its container image from Artifact Registry (or Container Registry). The linked `gcp-artifact-registry-docker-image` represents the specific image digest or tag referenced in the Service spec. +A Cloud Run Service deploys one specific container image; most commonly this image is stored in Artifact Registry. The link shows which image version the Service’s active Revision is based on. ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If the Service’s container image or any attached Secret Manager secret is encrypted with a customer-managed encryption key (CMEK), the Cloud Run Service will be linked to the corresponding `gcp-cloud-kms-crypto-key`. +If the Service uses customer-managed encryption keys (CMEK) for at-rest encryption of logs, volumes or secrets, it will reference a Cloud KMS Crypto Key. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -When a Cloud Run Service is configured with a Serverless VPC Access connector, it attaches to a VPC network to reach private resources. That network is represented here as a `gcp-compute-network`. +When the Service is configured with a VPC connector for egress or to reach private resources, it ultimately attaches to a specific Compute Network. ### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) -The Serverless VPC Access connector also lives on a particular subnetwork. The Cloud Run Service therefore relates to the `gcp-compute-subnetwork` used for outbound traffic. +The VPC connector also targets a concrete Subnetwork; this link identifies the precise subnet through which the Service’s traffic is routed. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Every Cloud Run Service executes with an identity (the “service account” set in the Service’s `executionEnvironment` or `serviceAccount`). This runtime identity is captured as a link to `gcp-iam-service-account`. +A Cloud Run Service runs with a dedicated runtime identity. This Service Account is used for accessing other Google Cloud resources and defines the permissions available to the container. ### [`gcp-run-revision`](/sources/gcp/Types/gcp-run-revision) -Each deployment of a Cloud Run Service creates an immutable Revision. The Service maintains traffic routing rules among its Revisions, so it links to one or more `gcp-run-revision` resources. +Each update to configuration or container image creates a new Revision. The Service points traffic to one or more of these Revisions; the link maps the parent-child relationship. ### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) -Environment variables or mounted volumes can reference secrets stored in Secret Manager. Any such secret referenced by the Service or its Revisions appears as a `gcp-secret-manager-secret` link. +Environment variables or mounted volumes in the Service can be sourced from Secret Manager. Linked secrets indicate which sensitive values are injected at runtime. ### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) -If the Service includes a Cloud SQL connection string (via the `cloudsql-instances` annotation), Overmind records a relationship to the corresponding `gcp-sql-admin-instance`. +If Cloud SQL connections are configured via the Cloud SQL Auth Proxy side-car or built-in integration, the Service will reference one or more Cloud SQL instances. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud Run Services may interact with Cloud Storage—for example, by having a URL environment variable or event trigger configuration. Where such a bucket name is detected in the Service configuration, it is linked here as `gcp-storage-bucket`. +The Service may access files in Cloud Storage for static assets or as mounted volumes (Cloud Run volumes). Buckets listed here are those explicitly referenced by environment variables, IAM permissions or volume mounts. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md index be765812..6c8018f7 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-secret-manager-secret.md @@ -3,7 +3,8 @@ title: GCP Secret Manager Secret sidebar_label: gcp-secret-manager-secret --- -A Google Cloud Secret Manager Secret is the logical container for sensitive data such as API keys, passwords and certificates stored in Secret Manager. The secret resource defines metadata and access-control policies, while one or more numbered “versions” hold the actual payload, enabling safe rotation and roll-back. Secrets are encrypted at rest with Google-managed keys by default, or with a user-supplied Cloud KMS key, and access is governed through IAM. For further information see the official documentation: https://cloud.google.com/secret-manager/docs +A Secret in Google Cloud Secret Manager is a secure, version-controlled container for sensitive data such as passwords, API keys, certificates, or any arbitrary text or binary payload. Each Secret holds one or more Secret Versions, allowing you to rotate or roll back the underlying data without changing the resource identifier that your applications refer to. Secrets are encrypted at rest with Google-managed keys by default, or you can supply a customer-managed Cloud KMS key. You can also configure Pub/Sub notifications to be emitted whenever a new version is added or other lifecycle events occur. +For full details see the official documentation: https://cloud.google.com/secret-manager/docs **Terrafrom Mappings:** @@ -19,8 +20,8 @@ A Google Cloud Secret Manager Secret is the logical container for sensitive data ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If a customer-managed encryption key (CMEK) has been configured for this secret, the secret’s `kms_key_name` field will reference a Cloud KMS Crypto Key. Overmind surfaces that link so that you can trace how the secret is encrypted and assess key-management risks. +If a Secret is configured to use customer-managed encryption (CMEK), it references a Cloud KMS Crypto Key that performs the envelope encryption of all Secret Versions. Compromise or mis-configuration of the referenced KMS key directly affects the confidentiality and availability of the Secret’s payloads. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -Secret Manager can be set to publish notifications (e.g. when a new secret version is added or destroyed) to a Pub/Sub topic. When such a notification configuration exists, the secret will link to the relevant Pub/Sub topic, allowing you to review who can subscribe to, or forward, these events. +Secret Manager can publish events—such as the creation of a new Secret Version—to a Pub/Sub topic. This enables automated workflows like triggering Cloud Functions for secret rotation or auditing. The Secret therefore holds an optional link to any Pub/Sub topic configured for such notifications. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md index 32c701bf..25501a0c 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-security-center-management-security-center-service.md @@ -3,11 +3,18 @@ title: GCP Security Center Management Security Center Service sidebar_label: gcp-security-center-management-security-center-service --- -A Security Center Service resource represents the activation and configuration of Google Cloud Security Command Center (SCC) for a particular location (for example `europe-west2`) within a project, folder, or organisation. It records whether SCC is enabled, the current service tier (Standard, Premium, or Enterprise), and other operational metadata such as activation time and billing status. Administrators use this resource to programme-matically enable or disable SCC, upgrade or downgrade the service tier, and verify the health of the service across all regions. -Official documentation: https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/folders.locations.securityCenterServices#SecurityCenterService +The **Security Center Service** resource represents the configuration of Security Command Center (SCC) for a particular Google Cloud location. +Each instance of this resource indicates that SCC is running in the specified region and records the service‐wide settings that govern how findings are ingested, stored and surfaced. +Official documentation: https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/projects.locations.securityCenterServices/list ## Supported Methods - `GET`: Get a gcp-security-center-management-security-center-service by its "locations|securityCenterServices" - ~~`LIST`~~ - `SEARCH`: Search Security Center services in a location. Use the format "location". + +## Possible Links + +### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) + +A Security Center Service exists **inside** a specific Google Cloud project – the project determines billing, IAM policies and the scope of resources that SCC monitors. The Overmind link lets you pivot from the project to every Security Center Service it has enabled (and vice-versa), helping you see which projects have security monitoring active in each region. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md index bb6b4f89..76976f02 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-directory-endpoint.md @@ -3,7 +3,7 @@ title: GCP Service Directory Endpoint sidebar_label: gcp-service-directory-endpoint --- -A Service Directory Endpoint represents a concrete network destination that backs a Service Directory Service inside Google Cloud. Each endpoint records the IP address, port and (optionally) metadata that client workloads use to discover and call the service. Endpoints are created inside a hierarchy of **Project → Location → Namespace → Service → Endpoint** and are resolved at run-time through Service Directory’s DNS or HTTP APIs, allowing producers to register instances and consumers to discover them without hard-coding addresses. +A **Service Directory Endpoint** represents a concrete network endpoint (host/IP address and port) that implements a Service Directory service within a namespace and location. Clients resolve a service and obtain one or more endpoints in order to make network calls. Endpoints can carry arbitrary key-value metadata and may point at instances running inside a VPC, on-premises, or in another cloud. Official documentation: https://cloud.google.com/service-directory/docs/reference/rest/v1/projects.locations.namespaces.services.endpoints **Terrafrom Mappings:** @@ -20,4 +20,4 @@ Official documentation: https://cloud.google.com/service-directory/docs/referenc ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Each endpoint is associated with a specific VPC network; the `network` field determines from which network the endpoint can be reached and which clients can resolve it. When Overmind discovers a Service Directory Endpoint, it links the item to the corresponding gcp-compute-network so you can trace service discovery issues back to network configuration or segmentation problems. +A Service Directory endpoint’s address usually resides within a VPC network. Linking an endpoint to its `gcp-compute-network` resource lets you trace which network the IP belongs to, ensuring that connectivity policies (firewalls, routes, private service access, etc.) permit clients to reach the service before deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md index eb94c112..64ceb440 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-service-usage-service.md @@ -3,9 +3,8 @@ title: GCP Service Usage Service sidebar_label: gcp-service-usage-service --- -Represents an individual Google Cloud API or service (for example, `pubsub.googleapis.com`, `compute.googleapis.com`) that can be enabled or disabled within a project or folder via the Service Usage API. -It holds metadata such as the service’s name, state (ENABLED, DISABLED, etc.), configuration and any consumer-specific settings. Managing this resource controls whether dependent resources in the project are allowed to operate. -Official documentation: https://cloud.google.com/service-usage/docs/overview +A **Service Usage Service** represents an individual Google-managed API or service (e.g. `compute.googleapis.com`, `pubsub.googleapis.com`) and its enablement state inside a single GCP project. By querying this resource you can determine whether a particular service is currently enabled, disabled, or in another transitional state for that project, which is critical for understanding if downstream resources can be created successfully. +Official documentation: https://cloud.google.com/service-usage/docs/reference/rest/v1/services ## Supported Methods @@ -15,6 +14,10 @@ Official documentation: https://cloud.google.com/service-usage/docs/overview ## Possible Links +### [`gcp-cloud-resource-manager-project`](/sources/gcp/Types/gcp-cloud-resource-manager-project) + +Every Service Usage Service exists **within** a single Cloud Resource Manager project. The project acts as the parent container and dictates billing, IAM policies and quota that apply to the service. + ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -A Pub/Sub topic can only exist and function if the `pubsub.googleapis.com` service is ENABLED in the same project. Overmind links a `gcp-service-usage-service` whose name is `pubsub.googleapis.com` to all `gcp-pub-sub-topic` resources in that project so that you can assess the blast radius of disabling the API. +A Pub/Sub topic can only be created or used if the **`pubsub.googleapis.com`** Service Usage Service is enabled in the same project. Overmind links the topic back to its enabling service so you can quickly spot configuration drift or missing API enablement that would prevent deployment. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md index 7206a904..76d315a0 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-database.md @@ -3,9 +3,7 @@ title: GCP Spanner Database sidebar_label: gcp-spanner-database --- -Google Cloud Spanner is Google Cloud’s fully-managed, horizontally-scalable, relational database service. -A Spanner **database** is the logical container that holds your tables, schema objects and data inside a Spanner instance. Each database inherits the instance’s compute and storage configuration and can be encrypted either with Google-managed keys or with a customer-managed key (CMEK). -For an overview of the service see the official documentation: https://cloud.google.com/spanner/docs/overview +A GCP Spanner Database is a logically isolated collection of relational data that lives inside a Cloud Spanner instance. It contains the schema (tables, indexes, views) and the data itself, and it inherits the instance’s compute and storage resources. Cloud Spanner provides global consistency, horizontal scalability and automatic replication, making the database suitable for mission-critical, globally distributed workloads. Official documentation: https://cloud.google.com/spanner/docs **Terrafrom Mappings:** @@ -21,8 +19,16 @@ For an overview of the service see the official documentation: https://cloud.goo ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -A Spanner database can be encrypted with a customer-managed encryption key (CMEK) stored in Cloud KMS. When CMEK is enabled, the database resource is linked to the specific `gcp-cloud-kms-crypto-key` that provides its encryption. +A Spanner database can be encrypted with a customer-managed encryption key (CMEK) stored in Cloud KMS. Overmind links the database to the KMS Crypto Key that protects its data at rest. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +When CMEK is enabled, Spanner actually uses a specific version of the KMS key. This link shows the exact key version currently in use so you can track key rotation and ensure compliance. + +### [`gcp-spanner-database`](/sources/gcp/Types/gcp-spanner-database) + +Spanner databases may reference one another through backups, clones or restores. Overmind records these relationships (e.g., a database restored from another) to expose any dependency chain between databases. ### [`gcp-spanner-instance`](/sources/gcp/Types/gcp-spanner-instance) -Every Spanner database lives inside a Spanner instance. The database inherits performance characteristics and regional configuration from its parent `gcp-spanner-instance`, making this a direct parent–child relationship. +Every Spanner database belongs to a single Spanner instance. This link lets you traverse from the database to the parent instance to understand the compute resources, regional configuration and IAM policies that ultimately govern the database. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md index b3820f65..67c03c71 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-spanner-instance.md @@ -3,8 +3,8 @@ title: GCP Spanner Instance sidebar_label: gcp-spanner-instance --- -A **Cloud Spanner instance** is the top-level container for Cloud Spanner resources in Google Cloud. It specifies the geographic placement of the underlying nodes, the amount of compute capacity allocated (measured in processing units), and the instance’s name and labels. All Cloud Spanner databases and their data live inside an instance, and the instance’s configuration determines their availability and latency characteristics. -For full details see the Google Cloud documentation: https://cloud.google.com/spanner/docs/instances +A **Cloud Spanner instance** is the top-level container that defines the geographical placement, compute capacity and billing context for one or more Cloud Spanner databases. When you create an instance you choose an instance configuration (regional or multi-regional) and allocate compute in the form of nodes or processing units; all databases created within the instance inherit this configuration and capacity. Google manages replication, automatic fail-over and online scaling transparently within the boundaries of the instance. +For full details see the official documentation: https://cloud.google.com/spanner/docs/instances **Terrafrom Mappings:** @@ -20,4 +20,4 @@ For full details see the Google Cloud documentation: https://cloud.google.com/sp ### [`gcp-spanner-database`](/sources/gcp/Types/gcp-spanner-database) -A Cloud Spanner instance can host one or more Cloud Spanner databases. Each `gcp-spanner-database` discovered by Overmind will therefore be linked to the `gcp-spanner-instance` that contains it, allowing you to see which databases would be affected by changes to, or deletion of, the parent instance. +Each Cloud Spanner instance can contain multiple Cloud Spanner databases. The `gcp-spanner-database` resource is therefore a child of the `gcp-spanner-instance`; enumerating databases or assessing their risks starts with traversing from the parent instance to its associated databases. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md index 7192c072..b5ce83cf 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup-run.md @@ -3,7 +3,8 @@ title: GCP Sql Admin Backup Run sidebar_label: gcp-sql-admin-backup-run --- -A GCP SQL Admin Backup Run represents an individual on-demand or automatically-scheduled backup created for a Cloud SQL instance. Each backup run records metadata such as its status, start and end times, location, encryption information and size. Backup runs are managed through the Cloud SQL Admin API and can be listed, retrieved or deleted by project administrators. For full details see Google’s documentation: https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1/backupRuns +A **Cloud SQL Backup Run** represents a single on-demand or automated backup operation for a Cloud SQL instance. It records when the backup was initiated, its status, size, location, encryption information and other metadata. Backup runs allow administrators to restore an instance to a previous state or to clone data into a new instance. +Official documentation: https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1/backupRuns ## Supported Methods @@ -15,8 +16,12 @@ A GCP SQL Admin Backup Run represents an individual on-demand or automatically-s ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If a Cloud SQL instance is configured with customer-managed encryption keys (CMEK), the backup run is encrypted with the specified KMS CryptoKey. The backup run therefore references the CryptoKey used for encryption. +If Customer-Managed Encryption Keys (CMEK) are enabled for the instance, the backup run is encrypted with a Cloud KMS Crypto Key. This link points to the parent key that protects the specific key version used for the backup. + +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) + +The `encryptionInfo` block inside the backup run references the exact Cloud KMS Crypto Key Version that encrypted the backup file. This relationship lets you trace which key version must be available to decrypt or restore the backup. ### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) -Every backup run belongs to exactly one Cloud SQL instance; the instance is the parent resource under which the backup run is created. +Every backup run belongs to a single Cloud SQL instance. This link connects the backup run to its parent instance so you can see which database the backup protects and assess the impact of restoring or deleting it. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md index 3c686d99..ab2ee094 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-backup.md @@ -3,9 +3,8 @@ title: GCP Sql Admin Backup sidebar_label: gcp-sql-admin-backup --- -A **GCP Sql Admin Backup** represents the backup configuration that protects a Cloud SQL instance. -The object contains the settings that determine when and how Google Cloud takes automatic or on-demand snapshots of the instance, including the backup window, retention period, and (when Customer-Managed Encryption Keys are used) the CryptoKey that encrypts the resulting files. -For a detailed description of Cloud SQL backups see the official documentation: https://cloud.google.com/sql/docs/mysql/backup-recovery/backups. +A **Cloud SQL backup** represents a point-in-time copy of the data stored in a Cloud SQL instance. Backups are created automatically on a schedule you define or manually on demand, and are retained in Google-managed Cloud Storage where they can later be used to restore the originating instance or clone a new one. Backups may be encrypted either with Google-managed keys or with a customer-managed encryption key (CMEK) from Cloud KMS. +See the official documentation for details: https://cloud.google.com/sql/docs/mysql/backup-recovery/backups ## Supported Methods @@ -17,12 +16,16 @@ For a detailed description of Cloud SQL backups see the official documentation: ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If the backup is encrypted with a Customer-Managed Encryption Key (CMEK), Overmind links the backup to the `gcp-cloud-kms-crypto-key` that holds the key material. Analysing this relationship lets you verify that the key exists, is in the correct state, and has the appropriate IAM policy. +If CMEK encryption is enabled for the Cloud SQL instance, the backup is encrypted with a specific Cloud KMS CryptoKey. This link shows which key secures the backup data at rest. -### [`gcp-sql-admin-backup-run`](/sources/gcp/Types/gcp-sql-admin-backup-run) +### [`gcp-cloud-kms-crypto-key-version`](/sources/gcp/Types/gcp-cloud-kms-crypto-key-version) -Every time the backup configuration is executed it produces a Backup Run. This link connects the configuration to those individual `gcp-sql-admin-backup-run` objects, allowing you to trace whether recent runs succeeded and to inspect metadata such as the size and status of each run. +The actual ciphertext is tied to a particular CryptoKey **version**. Linking to the key version lets you see exactly which rotation of the key was used when the backup was taken. + +### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) + +Although backups are stored out-of-band, they are associated with the same VPC network(s) as the Cloud SQL instance that produced them. This link helps trace network-level access policies that apply when a backup is restored to an instance using private IP. ### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) -The backup configuration belongs to a specific Cloud SQL instance. This link points from the backup resource to the parent `gcp-sql-admin-instance`, helping you understand which database workload the backup protects and enabling dependency traversal from the instance to its safety mechanisms. +Every backup is generated from, and can be restored to, a specific Cloud SQL instance. This link identifies the parent instance, allowing you to evaluate how instance configuration (e.g. region, database version) affects backup usability and risk. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md index 86a0993c..c4167965 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-sql-admin-instance.md @@ -3,7 +3,7 @@ title: GCP Sql Admin Instance sidebar_label: gcp-sql-admin-instance --- -A GCP SQL Admin Instance represents a managed Cloud SQL database instance in Google Cloud Platform. It encapsulates the configuration of the database engine (MySQL, PostgreSQL or SQL Server), machine tier, storage, high-availability settings, networking and encryption options. The resource is managed through the Cloud SQL Admin API, which is documented here: https://cloud.google.com/sql/docs/mysql/admin-api/. Creating or modifying an instance via Terraform, the Cloud Console or gcloud ultimately results in API calls against this object. +A Google Cloud SQL Admin Instance represents a fully-managed relational database instance running on Google Cloud. It encapsulates the configuration for engines such as MySQL, PostgreSQL, or SQL Server, including CPU and memory sizing, version, storage, networking and encryption settings. For full details see the official documentation: https://cloud.google.com/sql/docs/introduction. **Terrafrom Mappings:** @@ -19,24 +19,28 @@ A GCP SQL Admin Instance represents a managed Cloud SQL database instance in Goo ### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) -If Customer-Managed Encryption Keys (CMEK) are enabled for the instance, the instance is encrypted with a specific Cloud KMS Crypto Key. Overmind links the instance to the `gcp-cloud-kms-crypto-key` that provides its disk-level encryption key. +Linked when the instance is encrypted with a Customer-Managed Encryption Key (CMEK); the instance stores the resource ID of the Cloud KMS crypto key it uses for data-at-rest encryption. ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -When an instance is configured for private IP or has authorised networks for public IP access, it attaches to one or more VPC networks. Overmind therefore links the instance to the `gcp-compute-network` resources that define those VPCs. +Appears when the instance is configured with a private IP address. The instance is reachable through a Private Service Connection residing inside a specific VPC network. + +### [`gcp-compute-subnetwork`](/sources/gcp/Types/gcp-compute-subnetwork) + +If private IP is enabled, the instance is bound to a particular subnetwork from which it obtains its internal IP and through which it exposes its endpoints. ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Cloud SQL automatically creates or uses service accounts to perform backups, replication and other administrative tasks. The instance is linked to the `gcp-iam-service-account` identities that act on its behalf, allowing you to trace permissions and potential privilege escalation paths. +Cloud SQL creates or uses a service account to perform administrative actions such as backup, replication and interaction with other Google Cloud services; this link surfaces that service account. ### [`gcp-sql-admin-backup-run`](/sources/gcp/Types/gcp-sql-admin-backup-run) -Each automated or on-demand backup of an instance is represented by a Backup Run resource. Overmind links every `gcp-sql-admin-backup-run` to the parent instance so you can see the full backup history and retention compliance. +Each successful or scheduled backup run is a child of an instance. The link shows all backup-run resources that belong to the current database instance. ### [`gcp-sql-admin-instance`](/sources/gcp/Types/gcp-sql-admin-instance) -Instances may reference other instances when configured for read replicas, high-availability failover or cloning. Overmind links an instance to any peer `gcp-sql-admin-instance` that serves as its primary, replica or clone source/target. +An instance can reference another instance as its read replica or as the source for cloning. This self-link captures those primary/replica relationships. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Cloud SQL supports import/export of SQL dump files and automatic log exports to Cloud Storage. The instance is linked to any `gcp-storage-bucket` that it reads from or writes to during these operations, revealing data-exfiltration or retention risks. +Imports, exports and point-in-time backups can read from or write to Cloud Storage. The instance therefore maintains references to buckets used for these operations. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket-iam-policy.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket-iam-policy.md new file mode 100644 index 00000000..c919de54 --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket-iam-policy.md @@ -0,0 +1,36 @@ +--- +title: GCP Storage Bucket Iam Policy +sidebar_label: gcp-storage-bucket-iam-policy +--- + +A **Storage Bucket IAM policy** defines who (principals) can perform which actions (roles/permissions) on a specific Cloud Storage bucket. It is the fine-grained access-control object that sits on top of a bucket and overrides or complements broader project-level IAM settings. For full details, see the Google Cloud documentation: https://cloud.google.com/storage/docs/access-control/iam + +**Terrafrom Mappings:** + +- `google_storage_bucket_iam_binding.bucket` +- `google_storage_bucket_iam_member.bucket` +- `google_storage_bucket_iam_policy.bucket` + +## Supported Methods + +- `GET`: Get GCP Storage Bucket Iam Policy by "gcp-storage-bucket-iam-policy-bucket" +- ~~`LIST`~~ +- `SEARCH`: Search for GCP Storage Bucket Iam Policy by "gcp-storage-bucket-iam-policy-bucket" + +## Possible Links + +### [`gcp-compute-project`](/sources/gcp/Types/gcp-compute-project) + +The bucket IAM policy is scoped within a single GCP project; therefore every policy item is linked back to the project that owns the bucket. + +### [`gcp-iam-role`](/sources/gcp/Types/gcp-iam-role) + +Each binding inside the policy references one or more IAM roles that grant permissions; this link shows which predefined or custom roles are in use. + +### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) + +Service accounts are common principals in bucket policies. Linking reveals which service accounts have been granted access and with what privileges. + +### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) + +The IAM policy is attached to and governs a specific Cloud Storage bucket; this link connects the policy object to the underlying bucket resource. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md index a4b4edde..2efbb013 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-bucket.md @@ -3,11 +3,15 @@ title: GCP Storage Bucket sidebar_label: gcp-storage-bucket --- -A GCP Storage Bucket is a logical container in Google Cloud Storage that holds your objects (blobs). Buckets provide globally-unique namespaces, configurable lifecycle policies, access controls, versioning, and encryption options, allowing organisations to store and serve unstructured data such as backups, media files, or static web assets. See the official documentation for full details: https://cloud.google.com/storage/docs/key-terms#buckets +A Google Cloud Storage Bucket is a globally-unique container used to store, organise and serve objects (files) in Google Cloud Storage. Buckets provide configuration points for data location, access control, lifecycle management, encryption and logging. They are the fundamental resource for object storage workloads such as static website hosting, backup, or data lakes. +For full details see the official documentation: https://cloud.google.com/storage/docs/buckets **Terrafrom Mappings:** - `google_storage_bucket.name` +- `google_storage_bucket_iam_binding.bucket` +- `google_storage_bucket_iam_member.bucket` +- `google_storage_bucket_iam_policy.bucket` ## Supported Methods @@ -17,14 +21,18 @@ A GCP Storage Bucket is a logical container in Google Cloud Storage that holds y ## Possible Links +### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) + +A bucket may be encrypted with a customer-managed encryption key (CMEK) that resides in Cloud KMS. The bucket’s encryption configuration therefore references the corresponding `gcp-cloud-kms-crypto-key`. + ### [`gcp-compute-network`](/sources/gcp/Types/gcp-compute-network) -Instances and other compute resources that run inside a VPC network often read from or write to a Storage Bucket. Additionally, when Private Google Access or VPC Service Controls are enabled, the bucket’s accessibility is governed by the associated compute network, creating a security dependency between the two resources. +When VPC Service Controls or Private Google Access are used, access between a Compute Network and a Storage Bucket is constrained or allowed based on network settings. Log sinks from VPC flow logs can also target a Storage Bucket, creating a relationship between the bucket and the originating `gcp-compute-network`. ### [`gcp-logging-bucket`](/sources/gcp/Types/gcp-logging-bucket) -Audit logs for a Storage Bucket can be routed into a Cloud Logging bucket, and Logging buckets can export their contents to a Storage Bucket. Either configuration establishes a link whereby changes to the Storage Bucket may affect log retention and compliance. +Cloud Logging can route logs from a Logging Bucket to Cloud Storage for long-term retention or auditing. If such a sink targets this Storage Bucket, the bucket becomes linked to the source `gcp-logging-bucket`. -### [`gcp-cloud-kms-crypto-key`](/sources/gcp/Types/gcp-cloud-kms-crypto-key) +### [`gcp-storage-bucket-iam-policy`](/sources/gcp/Types/gcp-storage-bucket-iam-policy) -A Storage Bucket can be configured to use Customer-Managed Encryption Keys (CMEK). When this option is enabled, the bucket references a Cloud KMS CryptoKey for data-at-rest encryption, making the bucket’s availability and security reliant on the referenced key’s state and permissions. +Every Storage Bucket has an IAM policy that defines who can read, write or administer it. That policy is exposed as a separate `gcp-storage-bucket-iam-policy` object, which is directly attached to this bucket. diff --git a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md index 01476fc4..6a2b63ff 100644 --- a/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md +++ b/docs.overmind.tech/docs/sources/gcp/Types/gcp-storage-transfer-transfer-job.md @@ -3,7 +3,8 @@ title: GCP Storage Transfer Transfer Job sidebar_label: gcp-storage-transfer-transfer-job --- -A Storage Transfer Service Job represents a scheduled or on-demand operation that copies data between cloud storage systems or from on-premises sources into Google Cloud Storage. A job defines source and destination locations, transfer options (such as whether to delete objects after transfer), scheduling, and optional notifications. For full details see the official Google documentation: https://cloud.google.com/storage-transfer/docs/overview +Google Cloud Storage Transfer Service enables you to copy or synchronise data between Cloud Storage buckets, on-premises file systems and external cloud providers. A Storage Transfer **transfer job** is the top-level resource that defines where data should be copied from, where it should be copied to, the schedule on which the copy should run, and options such as delete or overwrite rules. +Official documentation: https://cloud.google.com/storage-transfer/docs/create-transfers **Terrafrom Mappings:** @@ -19,16 +20,20 @@ A Storage Transfer Service Job represents a scheduled or on-demand operation tha ### [`gcp-iam-service-account`](/sources/gcp/Types/gcp-iam-service-account) -Storage Transfer Service creates and utilises a dedicated service account to read from the source and write to the destination. The transfer job must have the correct IAM roles granted on this service account, making the two resources inherently linked. +The transfer job runs under a Google-managed or user-specified IAM service account, which needs roles such as `Storage Object Admin` on the destination bucket and, when applicable, permissions to access the source. ### [`gcp-pub-sub-subscription`](/sources/gcp/Types/gcp-pub-sub-subscription) -If transfer job notifications are configured, the Storage Transfer Service publishes messages to a Pub/Sub topic. A subscription attached to that topic receives the events, so a job that emits notifications will be related to the downstream subscriptions. +If event notifications are enabled, a Pub/Sub subscription can pull the messages that the transfer job publishes when it starts, completes, or encounters errors. ### [`gcp-pub-sub-topic`](/sources/gcp/Types/gcp-pub-sub-topic) -The transfer job can be configured to send success, failure, or progress notifications to a specific Pub/Sub topic. That topic therefore has a direct relationship with the job. +A transfer job can be configured with a Pub/Sub topic as its notification destination so that operational events are published for downstream processing or alerting. + +### [`gcp-secret-manager-secret`](/sources/gcp/Types/gcp-secret-manager-secret) + +When transferring from external providers such as AWS S3 or Azure Blob Storage, the access keys and credentials are often stored in Secret Manager secrets, which the transfer job references to authenticate to the source. ### [`gcp-storage-bucket`](/sources/gcp/Types/gcp-storage-bucket) -Buckets are commonly used as both sources and destinations for transfer jobs. Any bucket referenced in the `transferSpec` of a job (either as a source or destination) is linked to that job. +Every transfer job specifies at least one Cloud Storage bucket as a source and/or destination; therefore it has direct relationships to the buckets involved in the data copy. diff --git a/docs.overmind.tech/docs/sources/gcp/configuration.md b/docs.overmind.tech/docs/sources/gcp/configuration.md index 450d3b11..4d56efcc 100644 --- a/docs.overmind.tech/docs/sources/gcp/configuration.md +++ b/docs.overmind.tech/docs/sources/gcp/configuration.md @@ -142,7 +142,7 @@ Permissions can be applied at any level of the GCP resource hierarchy and are in **Direct Access:** -``` +```text Your GCP Organization/Folder/Project └─ Overmind Service Account └─ Granted: Viewer roles (+ custom role for project-level) @@ -151,7 +151,7 @@ Your GCP Organization/Folder/Project **Service Account Impersonation:** -``` +```text Your GCP Organization/Folder/Project ├─ Your Service Account │ └─ Granted: Viewer roles (+ custom role for project-level) @@ -267,9 +267,11 @@ Re-run the setup script or check for organization-level policies restricting ser 1. Verify regional configuration matches where your resources exist 2. For project-level parents, check that required GCP APIs are enabled: + ```bash gcloud services list --enabled --project=YOUR_PROJECT_ID ``` + 3. For organization or folder-level parents, verify that you have the necessary permissions to list projects and that child projects have the required APIs enabled 4. Some resources may require additional permissions at different levels of the hierarchy @@ -391,7 +393,7 @@ Here are all the predefined GCP roles that Overmind requires, plus the custom ro | Role | Purpose | | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `roles/browser` | **Required:** List projects and navigate resource hierarchy [GCP Docs](https://cloud.google.com/iam/docs/understanding-roles#browser) | +| `roles/browser` | **Required:** List projects and navigate resource hierarchy [GCP Docs](https://cloud.google.com/iam/docs/understanding-roles#browser) | | `roles/aiplatform.viewer` | AI Platform resource discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/aiplatform#aiplatform.viewer) | | `roles/artifactregistry.reader` | Artifact Registry repository discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/artifactregistry#artifactregistry.reader) | | `roles/bigquery.metadataViewer` | BigQuery metadata discovery [GCP Docs](https://cloud.google.com/iam/docs/roles-permissions/bigquery#bigquery.metadataViewer) | @@ -434,6 +436,6 @@ Here are all the predefined GCP roles that Overmind requires, plus the custom ro | ------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `projects/{PROJECT_ID}/roles/overmindCustomRole` | Custom role for additional BigQuery and Spanner permissions **Permissions:** `bigquery.transfers.get` - BigQuery transfer configuration discovery, `spanner.databases.get` - Spanner database detail discovery, `spanner.databases.list` - Spanner database enumeration | -All predefined roles provide read-only access and are sourced from Google Cloud's [predefined roles documentation](https://cloud.google.com/iam/docs/understanding-roles#predefined). +All predefined roles provide read-only access and are sourced from Google Cloud's [predefined roles documentation](https://cloud.google.com/iam/docs/understanding-roles#predefined). **Project-Level Restrictions:** Some roles (`roles/iam.roleViewer` and `roles/iam.serviceAccountViewer`) can only be granted at the project level in GCP. When configuring at the organization or folder level, these roles are automatically excluded. The custom role is also only created and assigned when using a project-level parent (e.g., `projects/my-project`). diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-batch-prediction-job.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-batch-prediction-job.json index 967b9b4b..14ebbab7 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-batch-prediction-job.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-batch-prediction-job.json @@ -2,9 +2,11 @@ "type": "gcp-ai-platform-batch-prediction-job", "category": 8, "potentialLinks": [ + "gcp-ai-platform-endpoint", "gcp-ai-platform-model", "gcp-big-query-table", "gcp-cloud-kms-crypto-key", + "gcp-compute-network", "gcp-iam-service-account", "gcp-storage-bucket" ], diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-custom-job.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-custom-job.json index 22003f70..6301d3ec 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-custom-job.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-custom-job.json @@ -3,6 +3,7 @@ "category": 8, "potentialLinks": [ "gcp-ai-platform-model", + "gcp-artifact-registry-docker-image", "gcp-cloud-kms-crypto-key", "gcp-compute-network", "gcp-iam-service-account", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-endpoint.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-endpoint.json index 67f77a9c..ff51ee9d 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-endpoint.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-endpoint.json @@ -6,7 +6,8 @@ "gcp-ai-platform-model-deployment-monitoring-job", "gcp-big-query-table", "gcp-cloud-kms-crypto-key", - "gcp-compute-network" + "gcp-compute-network", + "gcp-iam-service-account" ], "descriptiveName": "GCP Ai Platform Endpoint", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model-deployment-monitoring-job.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model-deployment-monitoring-job.json index 5d1e6d04..7de24b4a 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model-deployment-monitoring-job.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model-deployment-monitoring-job.json @@ -4,8 +4,10 @@ "potentialLinks": [ "gcp-ai-platform-endpoint", "gcp-ai-platform-model", + "gcp-big-query-table", "gcp-cloud-kms-crypto-key", - "gcp-monitoring-notification-channel" + "gcp-monitoring-notification-channel", + "gcp-storage-bucket" ], "descriptiveName": "GCP Ai Platform Model Deployment Monitoring Job", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model.json index a0ccd633..2b6f499f 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-ai-platform-model.json @@ -5,7 +5,8 @@ "gcp-ai-platform-endpoint", "gcp-ai-platform-pipeline-job", "gcp-artifact-registry-docker-image", - "gcp-cloud-kms-crypto-key" + "gcp-cloud-kms-crypto-key", + "gcp-storage-bucket" ], "descriptiveName": "GCP Ai Platform Model", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-data-transfer-transfer-config.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-data-transfer-transfer-config.json index fa7b6a88..db2af391 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-data-transfer-transfer-config.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-data-transfer-transfer-config.json @@ -4,6 +4,7 @@ "potentialLinks": [ "gcp-big-query-dataset", "gcp-cloud-kms-crypto-key", + "gcp-iam-service-account", "gcp-pub-sub-topic" ], "descriptiveName": "GCP Big Query Data Transfer Transfer Config", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-dataset.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-dataset.json index 3efc5539..d9a0685a 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-dataset.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-dataset.json @@ -3,7 +3,6 @@ "category": 6, "potentialLinks": [ "gcp-big-query-dataset", - "gcp-big-query-model", "gcp-big-query-routine", "gcp-big-query-table", "gcp-cloud-kms-crypto-key", @@ -19,6 +18,15 @@ "terraformMappings": [ { "terraformQueryMap": "google_bigquery_dataset.dataset_id" + }, + { + "terraformQueryMap": "google_bigquery_dataset_iam_binding.dataset_id" + }, + { + "terraformQueryMap": "google_bigquery_dataset_iam_member.dataset_id" + }, + { + "terraformQueryMap": "google_bigquery_dataset_iam_policy.dataset_id" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-model.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-model.json deleted file mode 100644 index be1751c4..00000000 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-model.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "type": "gcp-big-query-model", - "category": 6, - "potentialLinks": [ - "gcp-big-query-dataset", - "gcp-big-query-table", - "gcp-cloud-kms-crypto-key" - ], - "descriptiveName": "GCP Big Query Model", - "supportedQueryMethods": { - "get": true, - "getDescription": "Get GCP Big Query Model by \"gcp-big-query-dataset-id|gcp-big-query-model-id\"", - "search": true, - "searchDescription": "Search for GCP Big Query Model by \"gcp-big-query-model-id\"" - } -} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-routine.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-routine.json index b9704125..76adabc7 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-routine.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-routine.json @@ -1,7 +1,7 @@ { "type": "gcp-big-query-routine", "category": 6, - "potentialLinks": ["gcp-big-query-dataset"], + "potentialLinks": ["gcp-big-query-dataset", "gcp-storage-bucket"], "descriptiveName": "GCP Big Query Routine", "supportedQueryMethods": { "get": true, @@ -11,7 +11,8 @@ }, "terraformMappings": [ { - "terraformQueryMap": "google_bigquery_routine.routine_id" + "terraformMethod": 2, + "terraformQueryMap": "google_bigquery_routine.id" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-table.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-table.json index 6c9b01c6..973275de 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-table.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-query-table.json @@ -1,7 +1,12 @@ { "type": "gcp-big-query-table", "category": 6, - "potentialLinks": ["gcp-big-query-dataset", "gcp-cloud-kms-crypto-key"], + "potentialLinks": [ + "gcp-big-query-dataset", + "gcp-big-query-table", + "gcp-cloud-kms-crypto-key", + "gcp-storage-bucket" + ], "descriptiveName": "GCP Big Query Table", "supportedQueryMethods": { "get": true, @@ -11,7 +16,20 @@ }, "terraformMappings": [ { + "terraformMethod": 2, "terraformQueryMap": "google_bigquery_table.id" + }, + { + "terraformMethod": 2, + "terraformQueryMap": "google_bigquery_table_iam_binding.dataset_id" + }, + { + "terraformMethod": 2, + "terraformQueryMap": "google_bigquery_table_iam_member.dataset_id" + }, + { + "terraformMethod": 2, + "terraformQueryMap": "google_bigquery_table_iam_policy.dataset_id" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-backup.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-backup.json index 3f8518c8..30d81630 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-backup.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-backup.json @@ -3,7 +3,8 @@ "potentialLinks": [ "gcp-big-table-admin-backup", "gcp-big-table-admin-cluster", - "gcp-big-table-admin-table" + "gcp-big-table-admin-table", + "gcp-cloud-kms-crypto-key-version" ], "descriptiveName": "GCP Big Table Admin Backup", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-instance.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-instance.json index fabaff46..aca434bd 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-instance.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-instance.json @@ -12,6 +12,15 @@ "terraformMappings": [ { "terraformQueryMap": "google_bigtable_instance.name" + }, + { + "terraformQueryMap": "google_bigtable_instance_iam_binding.instance" + }, + { + "terraformQueryMap": "google_bigtable_instance_iam_member.instance" + }, + { + "terraformQueryMap": "google_bigtable_instance_iam_policy.instance" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-table.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-table.json index f490a956..7816c24b 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-table.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-big-table-admin-table.json @@ -17,6 +17,18 @@ { "terraformMethod": 2, "terraformQueryMap": "google_bigtable_table.id" + }, + { + "terraformMethod": 2, + "terraformQueryMap": "google_bigtable_table_iam_binding.instance_name" + }, + { + "terraformMethod": 2, + "terraformQueryMap": "google_bigtable_table_iam_member.instance_name" + }, + { + "terraformMethod": 2, + "terraformQueryMap": "google_bigtable_table_iam_policy.instance_name" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-certificate-manager-certificate.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-certificate-manager-certificate.json new file mode 100644 index 00000000..ebaf5d4c --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-certificate-manager-certificate.json @@ -0,0 +1,17 @@ +{ + "type": "gcp-certificate-manager-certificate", + "category": 4, + "descriptiveName": "GCP Certificate Manager Certificate", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get GCP Certificate Manager Certificate by \"gcp-certificate-manager-certificate-location|gcp-certificate-manager-certificate-name\"", + "search": true, + "searchDescription": "Search for GCP Certificate Manager Certificate by \"gcp-certificate-manager-certificate-location\"" + }, + "terraformMappings": [ + { + "terraformMethod": 2, + "terraformQueryMap": "google_certificate_manager_certificate.id" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-build-build.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-build-build.json index 234cabf0..e2c3657a 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-build-build.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-build-build.json @@ -3,8 +3,10 @@ "category": 7, "potentialLinks": [ "gcp-artifact-registry-docker-image", + "gcp-cloud-kms-crypto-key", "gcp-iam-service-account", "gcp-logging-bucket", + "gcp-secret-manager-secret", "gcp-storage-bucket" ], "descriptiveName": "GCP Cloud Build Build", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key-version.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key-version.json new file mode 100644 index 00000000..a7a95898 --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key-version.json @@ -0,0 +1,18 @@ +{ + "type": "gcp-cloud-kms-crypto-key-version", + "category": 4, + "potentialLinks": ["gcp-cloud-kms-crypto-key"], + "descriptiveName": "GCP Cloud Kms Crypto Key Version", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get GCP Cloud Kms Crypto Key Version by \"gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name|gcp-cloud-kms-crypto-key-version-version\"", + "search": true, + "searchDescription": "Search for GCP Cloud Kms Crypto Key Version by \"gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name\"" + }, + "terraformMappings": [ + { + "terraformMethod": 2, + "terraformQueryMap": "google_kms_crypto_key_version.id" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key.json index c28ec0de..f8eb5e0c 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-crypto-key.json @@ -1,12 +1,21 @@ { "type": "gcp-cloud-kms-crypto-key", "category": 4, - "potentialLinks": ["gcp-cloud-kms-key-ring"], + "potentialLinks": [ + "gcp-cloud-kms-crypto-key-version", + "gcp-cloud-kms-key-ring" + ], "descriptiveName": "GCP Cloud Kms Crypto Key", "supportedQueryMethods": { "get": true, "getDescription": "Get GCP Cloud Kms Crypto Key by \"gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name|gcp-cloud-kms-crypto-key-name\"", "search": true, "searchDescription": "Search for GCP Cloud Kms Crypto Key by \"gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name\"" - } + }, + "terraformMappings": [ + { + "terraformMethod": 2, + "terraformQueryMap": "google_kms_crypto_key.id" + } + ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-key-ring.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-key-ring.json index 6eeaa47c..76173476 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-key-ring.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-cloud-kms-key-ring.json @@ -6,12 +6,15 @@ "supportedQueryMethods": { "get": true, "getDescription": "Get GCP Cloud Kms Key Ring by \"gcp-cloud-kms-key-ring-location|gcp-cloud-kms-key-ring-name\"", + "list": true, + "listDescription": "List all GCP Cloud Kms Key Ring items", "search": true, "searchDescription": "Search for GCP Cloud Kms Key Ring by \"gcp-cloud-kms-key-ring-location\"" }, "terraformMappings": [ { - "terraformQueryMap": "google_kms_key_ring.name" + "terraformMethod": 2, + "terraformQueryMap": "google_kms_key_ring.id" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-address.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-address.json index 53ebe1ef..0d1ecaa1 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-address.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-address.json @@ -3,7 +3,12 @@ "category": 3, "potentialLinks": [ "gcp-compute-address", + "gcp-compute-forwarding-rule", + "gcp-compute-global-forwarding-rule", + "gcp-compute-instance", "gcp-compute-network", + "gcp-compute-public-delegated-prefix", + "gcp-compute-router", "gcp-compute-subnetwork" ], "descriptiveName": "GCP Compute Address", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-backend-service.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-backend-service.json index c103843e..7d40a584 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-backend-service.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-backend-service.json @@ -1,7 +1,14 @@ { "type": "gcp-compute-backend-service", "category": 1, - "potentialLinks": ["gcp-compute-network", "gcp-compute-security-policy"], + "potentialLinks": [ + "gcp-compute-health-check", + "gcp-compute-instance", + "gcp-compute-instance-group", + "gcp-compute-network", + "gcp-compute-network-endpoint-group", + "gcp-compute-security-policy" + ], "descriptiveName": "GCP Compute Backend Service", "supportedQueryMethods": { "get": true, @@ -12,6 +19,9 @@ "terraformMappings": [ { "terraformQueryMap": "google_compute_backend_service.name" + }, + { + "terraformQueryMap": "google_compute_region_backend_service.name" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-disk.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-disk.json index 435b2c8c..9f916ca9 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-disk.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-disk.json @@ -2,11 +2,13 @@ "type": "gcp-compute-disk", "category": 2, "potentialLinks": [ + "gcp-cloud-kms-crypto-key-version", "gcp-compute-disk", "gcp-compute-image", "gcp-compute-instance", "gcp-compute-instant-snapshot", - "gcp-compute-snapshot" + "gcp-compute-snapshot", + "gcp-storage-bucket" ], "descriptiveName": "GCP Compute Disk", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-firewall.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-firewall.json index 6dd11c29..6cdeae3f 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-firewall.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-firewall.json @@ -1,13 +1,19 @@ { "type": "gcp-compute-firewall", "category": 3, - "potentialLinks": ["gcp-compute-network", "gcp-iam-service-account"], + "potentialLinks": [ + "gcp-compute-instance", + "gcp-compute-network", + "gcp-iam-service-account" + ], "descriptiveName": "GCP Compute Firewall", "supportedQueryMethods": { "get": true, "getDescription": "Get a gcp-compute-firewall by its \"name\"", "list": true, - "listDescription": "List all gcp-compute-firewall" + "listDescription": "List all gcp-compute-firewall", + "search": true, + "searchDescription": "Search for firewalls by network tag. The query is a plain network tag name." }, "terraformMappings": [ { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-forwarding-rule.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-forwarding-rule.json index bd2ce7de..c4ef38d6 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-forwarding-rule.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-forwarding-rule.json @@ -3,8 +3,13 @@ "category": 3, "potentialLinks": [ "gcp-compute-backend-service", + "gcp-compute-forwarding-rule", "gcp-compute-network", - "gcp-compute-subnetwork" + "gcp-compute-public-delegated-prefix", + "gcp-compute-subnetwork", + "gcp-compute-target-http-proxy", + "gcp-compute-target-https-proxy", + "gcp-compute-target-pool" ], "descriptiveName": "GCP Compute Forwarding Rule", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-address.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-address.json index 84db32de..84a97610 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-address.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-address.json @@ -1,7 +1,11 @@ { "type": "gcp-compute-global-address", "category": 3, - "potentialLinks": ["gcp-compute-network"], + "potentialLinks": [ + "gcp-compute-network", + "gcp-compute-public-delegated-prefix", + "gcp-compute-subnetwork" + ], "descriptiveName": "GCP Compute Global Address", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-forwarding-rule.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-forwarding-rule.json index 28c37fc0..2700eba7 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-forwarding-rule.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-global-forwarding-rule.json @@ -4,7 +4,8 @@ "potentialLinks": [ "gcp-compute-backend-service", "gcp-compute-network", - "gcp-compute-subnetwork" + "gcp-compute-subnetwork", + "gcp-compute-target-http-proxy" ], "descriptiveName": "GCP Compute Global Forwarding Rule", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-health-check.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-health-check.json index beda2c95..6f36f867 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-health-check.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-health-check.json @@ -11,6 +11,9 @@ "terraformMappings": [ { "terraformQueryMap": "google_compute_health_check.name" + }, + { + "terraformQueryMap": "google_compute_region_health_check.name" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-image.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-image.json index acf4c7a2..427ff58a 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-image.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-image.json @@ -1,12 +1,23 @@ { "type": "gcp-compute-image", "category": 1, + "potentialLinks": [ + "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", + "gcp-compute-disk", + "gcp-compute-image", + "gcp-compute-snapshot", + "gcp-iam-service-account", + "gcp-storage-bucket" + ], "descriptiveName": "GCP Compute Image", "supportedQueryMethods": { "get": true, "getDescription": "Get GCP Compute Image by \"gcp-compute-image-name\"", "list": true, - "listDescription": "List all GCP Compute Image items" + "listDescription": "List all GCP Compute Image items", + "search": true, + "searchDescription": "Search for GCP Compute Image by \"gcp-compute-image-family\"" }, "terraformMappings": [ { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-group-manager.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-group-manager.json index bc80be40..566c1414 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-group-manager.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-group-manager.json @@ -3,6 +3,7 @@ "category": 1, "potentialLinks": [ "gcp-compute-autoscaler", + "gcp-compute-health-check", "gcp-compute-instance-group", "gcp-compute-instance-template", "gcp-compute-target-pool" diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-template.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-template.json index b56bff5c..5b335a19 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-template.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance-template.json @@ -4,11 +4,13 @@ "potentialLinks": [ "gcp-cloud-kms-crypto-key", "gcp-compute-disk", + "gcp-compute-firewall", "gcp-compute-image", "gcp-compute-instance", "gcp-compute-network", "gcp-compute-node-group", "gcp-compute-reservation", + "gcp-compute-route", "gcp-compute-security-policy", "gcp-compute-snapshot", "gcp-compute-subnetwork", @@ -19,7 +21,9 @@ "get": true, "getDescription": "Get a gcp-compute-instance-template by its \"name\"", "list": true, - "listDescription": "List all gcp-compute-instance-template" + "listDescription": "List all gcp-compute-instance-template", + "search": true, + "searchDescription": "Search for instance templates by network tag. The query is a plain network tag name." }, "terraformMappings": [ { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance.json index b4700f94..b734dbe4 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-instance.json @@ -2,16 +2,27 @@ "type": "gcp-compute-instance", "category": 1, "potentialLinks": [ + "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", "gcp-compute-disk", + "gcp-compute-firewall", + "gcp-compute-image", + "gcp-compute-instance-group-manager", + "gcp-compute-instance-template", "gcp-compute-network", - "gcp-compute-subnetwork" + "gcp-compute-route", + "gcp-compute-snapshot", + "gcp-compute-subnetwork", + "gcp-iam-service-account" ], "descriptiveName": "GCP Compute Instance", "supportedQueryMethods": { "get": true, "getDescription": "Get GCP Compute Instance by \"gcp-compute-instance-name\"", "list": true, - "listDescription": "List all GCP Compute Instance items" + "listDescription": "List all GCP Compute Instance items", + "search": true, + "searchDescription": "Search for GCP Compute Instance by \"gcp-compute-instance-networkTag\"" }, "terraformMappings": [ { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-machine-image.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-machine-image.json index 72970664..a34228ff 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-machine-image.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-machine-image.json @@ -2,10 +2,14 @@ "type": "gcp-compute-machine-image", "category": 1, "potentialLinks": [ + "gcp-cloud-kms-crypto-key-version", "gcp-compute-disk", + "gcp-compute-image", "gcp-compute-instance", "gcp-compute-network", - "gcp-compute-subnetwork" + "gcp-compute-snapshot", + "gcp-compute-subnetwork", + "gcp-iam-service-account" ], "descriptiveName": "GCP Compute Machine Image", "supportedQueryMethods": { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-group.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-group.json index 73f3c8c0..aa2b9ac3 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-group.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-group.json @@ -1,6 +1,7 @@ { "type": "gcp-compute-node-group", "category": 1, + "potentialLinks": ["gcp-compute-node-template"], "descriptiveName": "GCP Compute Node Group", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-template.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-template.json new file mode 100644 index 00000000..cb5ddf88 --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-node-template.json @@ -0,0 +1,17 @@ +{ + "type": "gcp-compute-node-template", + "category": 7, + "potentialLinks": ["gcp-compute-node-group"], + "descriptiveName": "GCP Compute Node Template", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get GCP Compute Node Template by \"gcp-compute-node-template-name\"", + "list": true, + "listDescription": "List all GCP Compute Node Template items" + }, + "terraformMappings": [ + { + "terraformQueryMap": "google_compute_node_template.name" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-project.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-project.json index b6c4fd2b..b0ecba12 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-project.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-project.json @@ -6,5 +6,31 @@ "supportedQueryMethods": { "get": true, "getDescription": "Get a gcp-compute-project by its \"name\"" - } + }, + "terraformMappings": [ + { + "terraformQueryMap": "google_project.project_id" + }, + { + "terraformQueryMap": "google_compute_shared_vpc_host_project.project" + }, + { + "terraformQueryMap": "google_compute_shared_vpc_service_project.service_project" + }, + { + "terraformQueryMap": "google_compute_shared_vpc_service_project.host_project" + }, + { + "terraformQueryMap": "google_project_iam_binding.project" + }, + { + "terraformQueryMap": "google_project_iam_member.project" + }, + { + "terraformQueryMap": "google_project_iam_policy.project" + }, + { + "terraformQueryMap": "google_project_iam_audit_config.project" + } + ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-region-backend-service.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-region-backend-service.json deleted file mode 100644 index 81449263..00000000 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-region-backend-service.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "type": "gcp-compute-region-backend-service", - "category": 1, - "potentialLinks": [ - "gcp-compute-instance-group", - "gcp-compute-network", - "gcp-compute-security-policy" - ], - "descriptiveName": "GCP Compute Region Backend Service", - "supportedQueryMethods": { - "get": true, - "getDescription": "Get GCP Compute Region Backend Service by \"gcp-compute-region-backend-service-name\"", - "list": true, - "listDescription": "List all GCP Compute Region Backend Service items" - }, - "terraformMappings": [ - { - "terraformQueryMap": "google_compute_region_backend_service.name" - } - ] -} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-regional-instance-group-manager.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-regional-instance-group-manager.json new file mode 100644 index 00000000..4dd43820 --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-regional-instance-group-manager.json @@ -0,0 +1,23 @@ +{ + "type": "gcp-compute-regional-instance-group-manager", + "category": 1, + "potentialLinks": [ + "gcp-compute-autoscaler", + "gcp-compute-health-check", + "gcp-compute-instance-group", + "gcp-compute-instance-template", + "gcp-compute-target-pool" + ], + "descriptiveName": "GCP Compute Regional Instance Group Manager", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get GCP Compute Regional Instance Group Manager by \"gcp-compute-regional-instance-group-manager-name\"", + "list": true, + "listDescription": "List all GCP Compute Regional Instance Group Manager items" + }, + "terraformMappings": [ + { + "terraformQueryMap": "google_compute_region_instance_group_manager.name" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-reservation.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-reservation.json index 6ea4f52b..b8946169 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-reservation.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-reservation.json @@ -1,9 +1,7 @@ { "type": "gcp-compute-reservation", "category": 1, - "potentialLinks": [ - "gcp-compute-region-commitment" - ], + "potentialLinks": ["gcp-compute-region-commitment"], "descriptiveName": "GCP Compute Reservation", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-route.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-route.json index 5a2ebefc..6f15881d 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-route.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-route.json @@ -2,6 +2,7 @@ "type": "gcp-compute-route", "category": 3, "potentialLinks": [ + "gcp-compute-forwarding-rule", "gcp-compute-instance", "gcp-compute-network", "gcp-compute-vpn-tunnel" @@ -11,7 +12,9 @@ "get": true, "getDescription": "Get a gcp-compute-route by its \"name\"", "list": true, - "listDescription": "List all gcp-compute-route" + "listDescription": "List all gcp-compute-route", + "search": true, + "searchDescription": "Search for routes by network tag. The query is a plain network tag name." }, "terraformMappings": [ { diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-snapshot.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-snapshot.json index 5b42d808..aafae65c 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-snapshot.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-snapshot.json @@ -1,7 +1,11 @@ { "type": "gcp-compute-snapshot", "category": 2, - "potentialLinks": ["gcp-compute-disk", "gcp-compute-instant-snapshot"], + "potentialLinks": [ + "gcp-cloud-kms-crypto-key-version", + "gcp-compute-disk", + "gcp-compute-instant-snapshot" + ], "descriptiveName": "GCP Compute Snapshot", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-subnetwork.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-subnetwork.json index 2af0ff8e..2e2e09e5 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-subnetwork.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-compute-subnetwork.json @@ -1,7 +1,10 @@ { "type": "gcp-compute-subnetwork", "category": 3, - "potentialLinks": ["gcp-compute-network"], + "potentialLinks": [ + "gcp-compute-network", + "gcp-compute-public-delegated-prefix" + ], "descriptiveName": "GCP Compute Subnetwork", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-container-cluster.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-container-cluster.json index 75607613..a981a63f 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-container-cluster.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-container-cluster.json @@ -2,7 +2,9 @@ "type": "gcp-container-cluster", "category": 1, "potentialLinks": [ + "gcp-big-query-dataset", "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", "gcp-compute-network", "gcp-compute-node-group", "gcp-compute-subnetwork", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-container-node-pool.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-container-node-pool.json index d14c9319..10bebbea 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-container-node-pool.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-container-node-pool.json @@ -3,7 +3,10 @@ "category": 1, "potentialLinks": [ "gcp-cloud-kms-crypto-key", + "gcp-compute-instance-group-manager", + "gcp-compute-network", "gcp-compute-node-group", + "gcp-compute-subnetwork", "gcp-container-cluster", "gcp-iam-service-account" ], diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-dataform-repository.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-dataform-repository.json index 0513d8c6..c83c8dc1 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-dataform-repository.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-dataform-repository.json @@ -3,6 +3,7 @@ "category": 6, "potentialLinks": [ "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", "gcp-iam-service-account", "gcp-secret-manager-secret" ], diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-dataplex-data-scan.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-dataplex-data-scan.json index 99ae75d0..0f0e2d11 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-dataplex-data-scan.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-dataplex-data-scan.json @@ -1,7 +1,7 @@ { "type": "gcp-dataplex-data-scan", "category": 5, - "potentialLinks": ["gcp-storage-bucket"], + "potentialLinks": ["gcp-big-query-table", "gcp-storage-bucket"], "descriptiveName": "GCP Dataplex Data Scan", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-dataproc-cluster.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-dataproc-cluster.json index d1f19d26..ffe69314 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-dataproc-cluster.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-dataproc-cluster.json @@ -8,7 +8,10 @@ "gcp-compute-network", "gcp-compute-node-group", "gcp-compute-subnetwork", + "gcp-container-cluster", + "gcp-container-node-pool", "gcp-dataproc-autoscaling-policy", + "gcp-dataproc-cluster", "gcp-iam-service-account", "gcp-storage-bucket" ], diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-iam-service-account-key.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-iam-service-account-key.json index 07a30c6a..c75970c7 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-iam-service-account-key.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-iam-service-account-key.json @@ -11,6 +11,7 @@ }, "terraformMappings": [ { + "terraformMethod": 2, "terraformQueryMap": "google_service_account_key.id" } ] diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-bucket.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-bucket.json index e63a32f8..0610c01f 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-bucket.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-bucket.json @@ -1,7 +1,11 @@ { "type": "gcp-logging-bucket", "category": 5, - "potentialLinks": ["gcp-cloud-kms-crypto-key", "gcp-iam-service-account"], + "potentialLinks": [ + "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", + "gcp-iam-service-account" + ], "descriptiveName": "GCP Logging Bucket", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-sink.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-sink.json index 013bcc3b..281a1984 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-sink.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-logging-sink.json @@ -3,6 +3,7 @@ "category": 7, "potentialLinks": [ "gcp-big-query-dataset", + "gcp-iam-service-account", "gcp-logging-bucket", "gcp-pub-sub-topic", "gcp-storage-bucket" diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-monitoring-notification-channel.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-monitoring-notification-channel.json index 9da3e095..04d9ca96 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-monitoring-notification-channel.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-monitoring-notification-channel.json @@ -1,6 +1,7 @@ { "type": "gcp-monitoring-notification-channel", "category": 5, + "potentialLinks": ["gcp-pub-sub-topic"], "descriptiveName": "GCP Monitoring Notification Channel", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-orgpolicy-policy.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-orgpolicy-policy.json index 0126fcb8..e45f413f 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-orgpolicy-policy.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-orgpolicy-policy.json @@ -1,6 +1,7 @@ { "type": "gcp-orgpolicy-policy", "category": 7, + "potentialLinks": ["gcp-cloud-resource-manager-project"], "descriptiveName": "GCP Orgpolicy Policy", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-subscription.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-subscription.json index 35abc174..8ffa0e1a 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-subscription.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-subscription.json @@ -3,6 +3,7 @@ "category": 7, "potentialLinks": [ "gcp-big-query-table", + "gcp-iam-service-account", "gcp-pub-sub-subscription", "gcp-pub-sub-topic", "gcp-storage-bucket" @@ -17,6 +18,15 @@ "terraformMappings": [ { "terraformQueryMap": "google_pubsub_subscription.name" + }, + { + "terraformQueryMap": "google_pubsub_subscription_iam_binding.subscription" + }, + { + "terraformQueryMap": "google_pubsub_subscription_iam_member.subscription" + }, + { + "terraformQueryMap": "google_pubsub_subscription_iam_policy.subscription" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-topic.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-topic.json index 7f8a34b0..4d18093e 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-topic.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-pub-sub-topic.json @@ -1,7 +1,11 @@ { "type": "gcp-pub-sub-topic", "category": 7, - "potentialLinks": ["gcp-cloud-kms-crypto-key", "gcp-storage-bucket"], + "potentialLinks": [ + "gcp-cloud-kms-crypto-key", + "gcp-iam-service-account", + "gcp-storage-bucket" + ], "descriptiveName": "GCP Pub Sub Topic", "supportedQueryMethods": { "get": true, @@ -12,6 +16,15 @@ "terraformMappings": [ { "terraformQueryMap": "google_pubsub_topic.name" + }, + { + "terraformQueryMap": "google_pubsub_topic_iam_binding.topic" + }, + { + "terraformQueryMap": "google_pubsub_topic_iam_member.topic" + }, + { + "terraformQueryMap": "google_pubsub_topic_iam_policy.topic" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-run-revision.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-run-revision.json index 44d783b7..ee50a137 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-run-revision.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-run-revision.json @@ -8,6 +8,7 @@ "gcp-compute-subnetwork", "gcp-iam-service-account", "gcp-run-service", + "gcp-secret-manager-secret", "gcp-sql-admin-instance", "gcp-storage-bucket" ], diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-security-center-management-security-center-service.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-security-center-management-security-center-service.json index f5c6408f..997fcdc2 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-security-center-management-security-center-service.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-security-center-management-security-center-service.json @@ -1,6 +1,7 @@ { "type": "gcp-security-center-management-security-center-service", "category": 4, + "potentialLinks": ["gcp-cloud-resource-manager-project"], "descriptiveName": "GCP Security Center Management Security Center Service", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-service-usage-service.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-service-usage-service.json index 8c15f219..215f3451 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-service-usage-service.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-service-usage-service.json @@ -1,7 +1,7 @@ { "type": "gcp-service-usage-service", "category": 7, - "potentialLinks": ["gcp-pub-sub-topic"], + "potentialLinks": ["gcp-cloud-resource-manager-project", "gcp-pub-sub-topic"], "descriptiveName": "GCP Service Usage Service", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-spanner-database.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-spanner-database.json index f333a2dc..8392d7e6 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-spanner-database.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-spanner-database.json @@ -1,7 +1,12 @@ { "type": "gcp-spanner-database", "category": 6, - "potentialLinks": ["gcp-cloud-kms-crypto-key", "gcp-spanner-instance"], + "potentialLinks": [ + "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", + "gcp-spanner-database", + "gcp-spanner-instance" + ], "descriptiveName": "GCP Spanner Database", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup-run.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup-run.json index 1071747e..5ad4bb97 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup-run.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup-run.json @@ -1,7 +1,11 @@ { "type": "gcp-sql-admin-backup-run", "category": 6, - "potentialLinks": ["gcp-cloud-kms-crypto-key", "gcp-sql-admin-instance"], + "potentialLinks": [ + "gcp-cloud-kms-crypto-key", + "gcp-cloud-kms-crypto-key-version", + "gcp-sql-admin-instance" + ], "descriptiveName": "GCP Sql Admin Backup Run", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup.json index 474f8076..989e4229 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-backup.json @@ -3,7 +3,8 @@ "category": 6, "potentialLinks": [ "gcp-cloud-kms-crypto-key", - "gcp-sql-admin-backup-run", + "gcp-cloud-kms-crypto-key-version", + "gcp-compute-network", "gcp-sql-admin-instance" ], "descriptiveName": "GCP Sql Admin Backup", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-instance.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-instance.json index 16d72392..c884f959 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-instance.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-sql-admin-instance.json @@ -4,6 +4,7 @@ "potentialLinks": [ "gcp-cloud-kms-crypto-key", "gcp-compute-network", + "gcp-compute-subnetwork", "gcp-iam-service-account", "gcp-sql-admin-backup-run", "gcp-sql-admin-instance", diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket-iam-policy.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket-iam-policy.json new file mode 100644 index 00000000..82939923 --- /dev/null +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket-iam-policy.json @@ -0,0 +1,28 @@ +{ + "type": "gcp-storage-bucket-iam-policy", + "category": 4, + "potentialLinks": [ + "gcp-compute-project", + "gcp-iam-role", + "gcp-iam-service-account", + "gcp-storage-bucket" + ], + "descriptiveName": "GCP Storage Bucket Iam Policy", + "supportedQueryMethods": { + "get": true, + "getDescription": "Get GCP Storage Bucket Iam Policy by \"gcp-storage-bucket-iam-policy-bucket\"", + "search": true, + "searchDescription": "Search for GCP Storage Bucket Iam Policy by \"gcp-storage-bucket-iam-policy-bucket\"" + }, + "terraformMappings": [ + { + "terraformQueryMap": "google_storage_bucket_iam_binding.bucket" + }, + { + "terraformQueryMap": "google_storage_bucket_iam_member.bucket" + }, + { + "terraformQueryMap": "google_storage_bucket_iam_policy.bucket" + } + ] +} diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket.json index 95730649..e9db4d9e 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-bucket.json @@ -4,7 +4,8 @@ "potentialLinks": [ "gcp-cloud-kms-crypto-key", "gcp-compute-network", - "gcp-logging-bucket" + "gcp-logging-bucket", + "gcp-storage-bucket-iam-policy" ], "descriptiveName": "GCP Storage Bucket", "supportedQueryMethods": { @@ -16,6 +17,15 @@ "terraformMappings": [ { "terraformQueryMap": "google_storage_bucket.name" + }, + { + "terraformQueryMap": "google_storage_bucket_iam_binding.bucket" + }, + { + "terraformQueryMap": "google_storage_bucket_iam_member.bucket" + }, + { + "terraformQueryMap": "google_storage_bucket_iam_policy.bucket" } ] } diff --git a/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-transfer-transfer-job.json b/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-transfer-transfer-job.json index 2b3bf896..945d03b6 100644 --- a/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-transfer-transfer-job.json +++ b/docs.overmind.tech/docs/sources/gcp/data/gcp-storage-transfer-transfer-job.json @@ -5,6 +5,7 @@ "gcp-iam-service-account", "gcp-pub-sub-subscription", "gcp-pub-sub-topic", + "gcp-secret-manager-secret", "gcp-storage-bucket" ], "descriptiveName": "GCP Storage Transfer Transfer Job", diff --git a/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md b/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md index c8322f36..9c9338aa 100644 --- a/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md +++ b/docs.overmind.tech/docs/sources/k8s/Types/EndpointSlice.md @@ -34,3 +34,7 @@ When Kubernetes populates cluster DNS (e.g. `my-service.my-namespace.svc.cluster ### [`ip`](/sources/aws/Types/networkmanager-network-resource-relationship) EndpointSlices store one or more IPv4/IPv6 addresses for each endpoint. These addresses are linked so that you can follow a path from a Service to the raw IPs that will be contacted, helping to assess network-level reachability and risk. + +### [`Service`](/sources/k8s/Types/Service) + +Every EndpointSlice carries a `kubernetes.io/service-name` label identifying the Service it belongs to. Overmind reads this label and links the EndpointSlice back to its parent Service, completing the bidirectional relationship in the infrastructure graph. diff --git a/docs.overmind.tech/docs/sources/k8s/Types/Service.md b/docs.overmind.tech/docs/sources/k8s/Types/Service.md index 039fe307..48ad1297 100644 --- a/docs.overmind.tech/docs/sources/k8s/Types/Service.md +++ b/docs.overmind.tech/docs/sources/k8s/Types/Service.md @@ -30,3 +30,11 @@ Each Service is assigned one or more IP addresses (ClusterIP, ExternalIP, LoadBa ### [`dns`](/sources/stdlib/Types/dns) Kubernetes automatically registers DNS records for every Service (e.g., `my-service.my-namespace.svc.cluster.local`). Overmind links Services to their corresponding DNS entries so you can trace name resolution to the backing workloads. + +### [`Endpoints`](/sources/k8s/Types/Endpoints) + +Each Service creates a corresponding Endpoints object with the same name that lists the IP addresses of the backing Pods. Overmind links Services to their Endpoints so you can see which addresses are currently active. This uses the legacy `core/v1` API and works on all Kubernetes versions. + +### [`EndpointSlice`](/sources/k8s/Types/EndpointSlice) + +Modern Kubernetes clusters create EndpointSlices (labelled with `kubernetes.io/service-name`) as the scalable replacement for Endpoints. Overmind searches for EndpointSlices matching the Service name so you can trace from a Service to the network endpoints that back it on newer clusters. diff --git a/docs.overmind.tech/docs/sources/k8s/account_settings.png b/docs.overmind.tech/docs/sources/k8s/account_settings.png index 4e50b499..c7e184cb 100644 Binary files a/docs.overmind.tech/docs/sources/k8s/account_settings.png and b/docs.overmind.tech/docs/sources/k8s/account_settings.png differ diff --git a/docs.overmind.tech/docs/sources/k8s/api_key.png b/docs.overmind.tech/docs/sources/k8s/api_key.png index 0d72300e..9fa0623c 100644 Binary files a/docs.overmind.tech/docs/sources/k8s/api_key.png and b/docs.overmind.tech/docs/sources/k8s/api_key.png differ diff --git a/docs.overmind.tech/docs/sources/k8s/configuration.md b/docs.overmind.tech/docs/sources/k8s/configuration.md index 7df113af..cb30ca63 100644 --- a/docs.overmind.tech/docs/sources/k8s/configuration.md +++ b/docs.overmind.tech/docs/sources/k8s/configuration.md @@ -11,10 +11,10 @@ sidebar_position: 1 ## Installation -Create an API Key with `request:receive` scope in Overmind under Account settings > API Keys +Create an API Key with `request:receive` scope in Overmind under **Settings › API Keys**. -![account settings](account_settings.png) -![api key](api_key.png) +![User settings menu in the sidebar](account_settings.png) +![API Keys settings page](api_key.png) Install the source into your Kubernetes cluster using Helm: diff --git a/docs.overmind.tech/docs/sources/k8s/data/EndpointSlice.json b/docs.overmind.tech/docs/sources/k8s/data/EndpointSlice.json index a8a9ef2a..1f41c53e 100644 --- a/docs.overmind.tech/docs/sources/k8s/data/EndpointSlice.json +++ b/docs.overmind.tech/docs/sources/k8s/data/EndpointSlice.json @@ -1,7 +1,7 @@ { "type": "EndpointSlice", "category": 3, - "potentialLinks": ["Node", "Pod", "dns", "ip"], + "potentialLinks": ["Node", "Pod", "dns", "ip", "Service"], "descriptiveName": "Endpoint Slice", "supportedQueryMethods": { "get": true, diff --git a/docs.overmind.tech/docs/sources/k8s/data/Service.json b/docs.overmind.tech/docs/sources/k8s/data/Service.json index 4f22397c..5bc032c9 100644 --- a/docs.overmind.tech/docs/sources/k8s/data/Service.json +++ b/docs.overmind.tech/docs/sources/k8s/data/Service.json @@ -1,7 +1,7 @@ { "type": "Service", "category": 3, - "potentialLinks": ["Pod", "ip", "dns", "Endpoint"], + "potentialLinks": ["Pod", "ip", "dns", "Endpoints", "EndpointSlice"], "descriptiveName": "Service", "supportedQueryMethods": { "get": true, diff --git a/go.mod b/go.mod index 94be0a7f..51b9b4a6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/overmindtech/cli -go 1.25.1 +go 1.26.0 replace github.com/anthropics/anthropic-sdk-go => github.com/anthropics/anthropic-sdk-go v0.2.0-alpha.4 @@ -11,21 +11,22 @@ replace github.com/google/cel-go => github.com/google/cel-go v0.22.1 require ( atomicgo.dev/keyboard v0.2.9 buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 - buf.build/go/protovalidate v1.1.2 - cloud.google.com/go/aiplatform v1.116.0 - cloud.google.com/go/auth v0.18.1 - cloud.google.com/go/bigquery v1.73.1 + buf.build/go/protovalidate v1.1.3 + charm.land/lipgloss/v2 v2.0.0 + cloud.google.com/go/aiplatform v1.119.0 + cloud.google.com/go/auth v0.18.2 + cloud.google.com/go/bigquery v1.74.0 cloud.google.com/go/bigtable v1.42.0 cloud.google.com/go/certificatemanager v1.9.6 - cloud.google.com/go/compute v1.54.0 + cloud.google.com/go/compute v1.56.0 cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/container v1.46.0 cloud.google.com/go/dataplex v1.28.0 - cloud.google.com/go/dataproc/v2 v2.15.0 + cloud.google.com/go/dataproc/v2 v2.16.0 cloud.google.com/go/filestore v1.10.3 cloud.google.com/go/functions v1.19.7 cloud.google.com/go/iam v1.5.3 - cloud.google.com/go/kms v1.25.0 + cloud.google.com/go/kms v1.26.0 cloud.google.com/go/logging v1.13.2 cloud.google.com/go/monitoring v1.24.3 cloud.google.com/go/networksecurity v0.11.0 @@ -42,55 +43,56 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v3 v3.0.0-beta.2 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3 v3.0.1 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4 v4.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.3.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos v1.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3 v3.4.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.3.0 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8 v8.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9 v9.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5 v5.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2 v2.1.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v3 v3.0.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2 v2.0.0-beta.7 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3 v3.0.0 github.com/Masterminds/semver/v3 v3.4.0 github.com/MrAlias/otel-schema-utils v0.4.0-alpha github.com/auth0/go-jwt-middleware/v2 v2.3.1 - github.com/aws/aws-sdk-go-v2 v1.41.1 - github.com/aws/aws-sdk-go-v2/config v1.32.7 - github.com/aws/aws-sdk-go-v2/credentials v1.19.7 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 - github.com/aws/aws-sdk-go-v2/service/apigateway v1.38.4 - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.0 - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.0 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.53.1 - github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.11 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.55.0 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.288.0 - github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 - github.com/aws/aws-sdk-go-v2/service/efs v1.41.10 - github.com/aws/aws-sdk-go-v2/service/eks v1.80.0 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.19 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.6 - github.com/aws/aws-sdk-go-v2/service/iam v1.53.2 - github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 - github.com/aws/aws-sdk-go-v2/service/lambda v1.88.0 - github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.3 - github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.4 - github.com/aws/aws-sdk-go-v2/service/rds v1.115.0 - github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1 - github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 - github.com/aws/aws-sdk-go-v2/service/sns v1.39.11 - github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21 - github.com/aws/aws-sdk-go-v2/service/ssm v1.67.8 - github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 - github.com/aws/smithy-go v1.24.0 + github.com/aws/aws-sdk-go-v2 v1.41.3 + github.com/aws/aws-sdk-go-v2/config v1.32.11 + github.com/aws/aws-sdk-go-v2/credentials v1.19.11 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 + github.com/aws/aws-sdk-go-v2/service/apigateway v1.38.6 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.2 + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.2 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.1 + github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.13 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.1 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.294.0 + github.com/aws/aws-sdk-go-v2/service/ecs v1.73.1 + github.com/aws/aws-sdk-go-v2/service/efs v1.41.12 + github.com/aws/aws-sdk-go-v2/service/eks v1.80.2 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.21 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.8 + github.com/aws/aws-sdk-go-v2/service/iam v1.53.4 + github.com/aws/aws-sdk-go-v2/service/kms v1.50.2 + github.com/aws/aws-sdk-go-v2/service/lambda v1.88.2 + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.5 + github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.6 + github.com/aws/aws-sdk-go-v2/service/rds v1.116.2 + github.com/aws/aws-sdk-go-v2/service/route53 v1.62.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 + github.com/aws/aws-sdk-go-v2/service/sns v1.39.13 + github.com/aws/aws-sdk-go-v2/service/sqs v1.42.23 + github.com/aws/aws-sdk-go-v2/service/ssm v1.68.2 + github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 + github.com/aws/smithy-go v1.24.2 github.com/cenkalti/backoff/v5 v5.0.3 github.com/charmbracelet/glamour v0.10.0 - github.com/charmbracelet/lipgloss/v2 v2.0.0-beta.3 github.com/coder/websocket v1.8.14 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/getsentry/sentry-go v0.42.0 + github.com/getsentry/sentry-go v0.43.0 github.com/go-jose/go-jose/v4 v4.1.3 github.com/google/btree v1.1.3 github.com/google/uuid v1.6.0 @@ -98,18 +100,18 @@ require ( github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e github.com/hashicorp/go-retryablehttp v0.7.8 github.com/hashicorp/hcl/v2 v2.24.0 - github.com/hashicorp/terraform-config-inspect v0.0.0-20260210152655-f4be3ba97d94 - github.com/hashicorp/terraform-plugin-framework v1.17.0 - github.com/hashicorp/terraform-plugin-go v0.29.0 + github.com/hashicorp/terraform-config-inspect v0.0.0-20260224005459-813a97530220 + github.com/hashicorp/terraform-plugin-framework v1.18.0 + github.com/hashicorp/terraform-plugin-go v0.30.0 github.com/hashicorp/terraform-plugin-testing v1.14.0 github.com/jedib0t/go-pretty/v6 v6.7.8 - github.com/micahhausler/aws-iam-policy v0.4.2 + github.com/micahhausler/aws-iam-policy v0.4.4 github.com/miekg/dns v1.1.72 github.com/mitchellh/go-homedir v1.1.0 github.com/muesli/reflow v0.3.0 github.com/nats-io/jwt/v2 v2.8.0 github.com/nats-io/nats-server/v2 v2.12.4 - github.com/nats-io/nats.go v1.48.0 + github.com/nats-io/nats.go v1.49.0 github.com/nats-io/nkeys v0.4.15 github.com/onsi/ginkgo/v2 v2.28.1 // indirect github.com/onsi/gomega v1.39.1 // indirect @@ -125,35 +127,34 @@ require ( github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 github.com/uptrace/opentelemetry-go-extra/otellogrus v0.3.2 github.com/xiam/dig v0.0.0-20191116195832-893b5fb5093b - github.com/zclconf/go-cty v1.17.0 + github.com/zclconf/go-cty v1.18.0 go.etcd.io/bbolt v1.4.3 go.opentelemetry.io/contrib/detectors/aws/ec2/v2 v2.0.0-20250901115419-474a7992e57c - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 - go.opentelemetry.io/otel v1.40.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 - go.opentelemetry.io/otel/sdk v1.40.0 - go.opentelemetry.io/otel/trace v1.40.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0 + go.opentelemetry.io/otel v1.41.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.41.0 + go.opentelemetry.io/otel/sdk v1.41.0 + go.opentelemetry.io/otel/trace v1.41.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.6.0 - golang.org/x/net v0.50.0 + golang.org/x/net v0.51.0 golang.org/x/oauth2 v0.35.0 golang.org/x/sync v0.19.0 golang.org/x/text v0.34.0 gonum.org/v1/gonum v0.17.0 - google.golang.org/api v0.266.0 + google.golang.org/api v0.269.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 - google.golang.org/grpc v1.79.1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 + google.golang.org/grpc v1.79.2 google.golang.org/protobuf v1.36.11 gopkg.in/ini.v1 v1.67.1 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.35.1 - k8s.io/apimachinery v0.35.1 - k8s.io/client-go v0.35.1 - k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 + k8s.io/api v0.35.2 + k8s.io/apimachinery v0.35.2 + k8s.io/client-go v0.35.2 sigs.k8s.io/kind v0.31.0 ) @@ -166,6 +167,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.8 cloud.google.com/go/longrunning v0.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect @@ -180,29 +182,34 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.19 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/charmbracelet/colorprofile v0.3.1 // indirect + github.com/charmbracelet/colorprofile v0.4.2 // indirect github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect; being pulled by glamour, this will be resolved in https://github.com/charmbracelet/glamour/pull/408 - github.com/charmbracelet/x/ansi v0.8.0 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/ultraviolet v0.0.0-20251205161215-1948445e3318 // indirect + github.com/charmbracelet/x/ansi v0.11.6 // indirect + github.com/charmbracelet/x/cellbuf v0.0.15 // indirect github.com/charmbracelet/x/exp/slice v0.0.0-20250417172821-98fd948af1b1 // indirect - github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/cloudflare/circl v1.6.1 // indirect + github.com/charmbracelet/x/term v0.2.2 // indirect + github.com/charmbracelet/x/termios v0.1.1 // indirect + github.com/charmbracelet/x/windows v0.2.2 // indirect + github.com/clipperhouse/displaywidth v0.11.0 // indirect + github.com/clipperhouse/uax29/v2 v2.7.0 // indirect + github.com/cloudflare/circl v1.6.3 // indirect github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect github.com/containerd/console v1.0.4 // indirect github.com/dlclark/regexp2 v1.11.5 // indirect @@ -210,7 +217,7 @@ require ( github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect @@ -221,18 +228,18 @@ require ( github.com/go-openapi/swag v0.23.1 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-json v0.10.5 // indirect - github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/golang-jwt/jwt/v5 v5.3.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.26.1 // indirect + github.com/google/cel-go v0.27.0 // indirect github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-tpm v0.9.8 // indirect github.com/google/s2a-go v0.1.9 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.12 // indirect github.com/gookit/color v1.5.4 // indirect github.com/gorilla/css v1.0.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -259,11 +266,11 @@ require ( github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lithammer/fuzzysearch v1.1.8 // indirect - github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/lucasb-eyer/go-colorful v1.3.0 // indirect github.com/mailru/easyjson v0.9.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect github.com/microcosm-cc/bluemonday v1.0.27 // indirect github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -306,15 +313,15 @@ require ( go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect go.opentelemetry.io/otel/log v0.11.0 // indirect - go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.41.0 // indirect go.opentelemetry.io/otel/schema v0.0.12 // indirect - go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.32.0 // indirect + golang.org/x/mod v0.33.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect golang.org/x/term v0.40.0 // indirect @@ -322,12 +329,13 @@ require ( golang.org/x/tools v0.41.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect diff --git a/go.sum b/go.sum index 15504935..ebb34705 100644 --- a/go.sum +++ b/go.sum @@ -10,26 +10,28 @@ atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs= atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 h1:PMmTMyvHScV9Mn8wc6ASge9uRcHy0jtqPd+fM35LmsQ= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= -buf.build/go/protovalidate v1.1.2 h1:83vYHoY8f34hB8MeitGaYE3CGVPFxwdEUuskh5qQpA0= -buf.build/go/protovalidate v1.1.2/go.mod h1:Ez3z+w4c+wG+EpW8ovgZaZPnPl2XVF6kaxgcv1NG/QE= +buf.build/go/protovalidate v1.1.3 h1:m2GVEgQWd7rk+vIoAZ+f0ygGjvQTuqPQapBBdcpWVPE= +buf.build/go/protovalidate v1.1.3/go.mod h1:9XIuohWz+kj+9JVn3WQneHA5LZP50mjvneZMnbLkiIE= cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +charm.land/lipgloss/v2 v2.0.0 h1:sd8N/B3x892oiOjFfBQdXBQp3cAkvjGaU5TvVZC3ivo= +charm.land/lipgloss/v2 v2.0.0/go.mod h1:w6SnmsBFBmEFBodiEDurGS/sdUY/u1+v72DqUzc6J14= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/aiplatform v1.116.0 h1:Qc8tv4DD6IbQfDKDd1Hu2qeGeYxTKTeZ7GH0vQrLAm8= -cloud.google.com/go/aiplatform v1.116.0/go.mod h1:AdvoUUSXh9ykwEazibd3Fj6OUGrIiZwvZrvm4j5OdkU= -cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= -cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= +cloud.google.com/go/aiplatform v1.119.0 h1:Fum1ighlxsmwbmaf0nhuMDebcKJkpx2mgmd1YcyXaYY= +cloud.google.com/go/aiplatform v1.119.0/go.mod h1:27DcZJbaxFntewF6O0HojDE1B8JQOGKYopNjwoICFdI= +cloud.google.com/go/auth v0.18.2 h1:+Nbt5Ev0xEqxlNjd6c+yYUeosQ5TtEUaNcN/3FozlaM= +cloud.google.com/go/auth v0.18.2/go.mod h1:xD+oY7gcahcu7G2SG2DsBerfFxgPAJz17zz2joOFF3M= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/bigquery v1.73.1 h1:v//GZwdhtmCbZ87rOnxz7pectOGFS1GNRvrGTvLzka4= -cloud.google.com/go/bigquery v1.73.1/go.mod h1:KSLx1mKP/yGiA8U+ohSrqZM1WknUnjZAxHAQZ51/b1k= +cloud.google.com/go/bigquery v1.74.0 h1:Q6bAMv+eyvufOpIrfrYxhM46qq1D3ZQTdgUDQqKS+n8= +cloud.google.com/go/bigquery v1.74.0/go.mod h1:iViO7Cx3A/cRKcHNRsHB3yqGAMInFBswrE9Pxazsc90= cloud.google.com/go/bigtable v1.42.0 h1:SREvT4jLhJQZXUjsLmFs/1SMQJ+rKEj1cJuPE9liQs8= cloud.google.com/go/bigtable v1.42.0/go.mod h1:oZ30nofVB6/UYGg7lBwGLWSea7NZUvw/WvBBgLY07xU= cloud.google.com/go/certificatemanager v1.9.6 h1:v5X8X+THKrS9OFZb6k0GRDP1WQxLXTdMko7OInBliw4= cloud.google.com/go/certificatemanager v1.9.6/go.mod h1:vWogV874jKZkSRDFCMM3r7wqybv8WXs3XhyNff6o/Zo= -cloud.google.com/go/compute v1.54.0 h1:4CKmnpO+40z44bKG5bdcKxQ7ocNpRtOc9SCLLUzze1w= -cloud.google.com/go/compute v1.54.0/go.mod h1:RfBj0L1x/pIM84BrzNX2V21oEv16EKRPBiTcBRRH1Ww= +cloud.google.com/go/compute v1.56.0 h1:e8xch/mR0tJoUBj3nhNb96+MOQ1JGVGB+rBfVzWEU5I= +cloud.google.com/go/compute v1.56.0/go.mod h1:fMFC0mRv+fW2ISg7M3tpDfpZ+kkrHpC/ImNFRCYiNK0= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/container v1.46.0 h1:xX94Lo3xrS5OkdMWKvpEVAbBwjN9uleVv6vOi02fL4s= @@ -38,16 +40,16 @@ cloud.google.com/go/datacatalog v1.26.1 h1:bCRKA8uSQN8wGW3Tw0gwko4E9a64GRmbW1nCb cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= cloud.google.com/go/dataplex v1.28.0 h1:rROI3iqMVI9nXT701ULoFRETQVAOAPC3mPSWFDxXFl0= cloud.google.com/go/dataplex v1.28.0/go.mod h1:VB+xlYJiJ5kreonXsa2cHPj0A3CfPh/mgiHG4JFhbUA= -cloud.google.com/go/dataproc/v2 v2.15.0 h1:I/Yux/d8uaxf3W+d59kolGTOc52+VZaL6RzJw7oDOeg= -cloud.google.com/go/dataproc/v2 v2.15.0/go.mod h1:tSdkodShfzrrUNPDVEL6MdH9/mIEvp/Z9s9PBdbsZg8= +cloud.google.com/go/dataproc/v2 v2.16.0 h1:0g2hnjlQ8SQTnNeu+Bqqa61QPssfSZF3t+9ldRmx+VQ= +cloud.google.com/go/dataproc/v2 v2.16.0/go.mod h1:HlzFg8k1SK+bJN3Zsy2z5g6OZS1D4DYiDUgJtF0gJnE= cloud.google.com/go/filestore v1.10.3 h1:3KZifUVTqGhNNv6MLeONYth1HjlVM4vDhaH+xrdPljU= cloud.google.com/go/filestore v1.10.3/go.mod h1:94ZGyLTx9j+aWKozPQ6Wbq1DuImie/L/HIdGMshtwac= cloud.google.com/go/functions v1.19.7 h1:7LcOD18euIVGRUPaeCmgO6vfWSLNIsi6STWRQcdANG8= cloud.google.com/go/functions v1.19.7/go.mod h1:xbcKfS7GoIcaXr2FSwmtn9NXal1JR4TV6iYZlgXffwA= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.25.0 h1:gVqvGGUmz0nYCmtoxWmdc1wli2L1apgP8U4fghPGSbQ= -cloud.google.com/go/kms v1.25.0/go.mod h1:XIdHkzfj0bUO3E+LvwPg+oc7s58/Ns8Nd8Sdtljihbk= +cloud.google.com/go/kms v1.26.0 h1:cK9mN2cf+9V63D3H1f6koxTatWy39aTI/hCjz1I+adU= +cloud.google.com/go/kms v1.26.0/go.mod h1:pHKOdFJm63hxBsiPkYtowZPltu9dW0MWvBa6IA4HM58= cloud.google.com/go/logging v1.13.2 h1:qqlHCBvieJT9Cdq4QqYx1KPadCQ2noD4FK02eNqHAjA= cloud.google.com/go/logging v1.13.2/go.mod h1:zaybliM3yun1J8mU2dVQ1/qDzjbOqEijZCn6hSBtKak= cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= @@ -90,30 +92,36 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDo github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v3 v3.0.0-beta.2 h1:qiir/pptnHqp6hV8QwV+IExYIf6cPsXBfUDUXQ27t2Y= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v3 v3.0.0-beta.2/go.mod h1:jVRrRDLCOuif95HDYC23ADTMlvahB7tMdl519m9Iyjc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3 v3.0.1 h1:6aObZUybvkz7Sm2d/GxgsZ+0hbhA0RC5p+81aAxQ/Po= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3 v3.0.1/go.mod h1:kz6cfDXtcUJWUjLKSlXW+oBqtWovK648UYJDZYtAZ3g= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4 v4.0.0 h1:KBRoKIQlg79mFK5LRndDGPrCDGRl2xyFr/vG8afLGys= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4 v4.0.0/go.mod h1:w+PG/dv/phWHlE3OIKWa4CAITETZ52D8qznRGMbduPA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.3.0 h1:nyxugFxG2uhbMeJVCFFuD2j9wu+6KgeabITdINraQsE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7 v7.3.0/go.mod h1:e4RAYykLIz73CF52KhSooo4whZGXvXrD09m0jkgnWiU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos v1.0.0 h1:Fv8iibGn1eSw0lt2V3cTsuokBEnOP+M//n8OiMcCgTM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos v1.0.0/go.mod h1:Qpe/qN9d5IQ7WPtTXMRCd6+BWTnhi3sxXVys6oJ5Vho= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3 v3.4.0 h1:+EhRnIOLvffCvUMUfP+MgOp6PrtN1d6xt94DZtrC3lA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3 v3.4.0/go.mod h1:Bb7kqorvA2acMCNFac+2ldoQWi7QrcMdH+9Gg9C7fSM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0 h1:lpOxwrQ919lCZoNCd69rVt8u1eLZuMORrGXqy8sNf3c= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns v1.2.0/go.mod h1:fSvRkb8d26z9dbL40Uf/OO6Vo9iExtZK3D0ulRV+8M0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.1 h1:1kpY4qe+BGAH2ykv4baVSqyx+AY5VjXeJ15SldlU6hs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.1/go.mod h1:nT6cWpWdUt+g81yuKmjeYPUtI73Ak3yQIT4PVVsCEEQ= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.1 h1:nFZ7AvJqTpWobmnZlprsK6GucrByFsXWB+DwkhRxM9I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2 v2.0.1/go.mod h1:ZNiswYTEPuQ/D+mHxONII+FeHHNNVQlJ5IUG88opjS0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.2.0 h1:akP6VpxJGgQRpDR1P462piz/8OhYLRCreDj48AyNabc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.2.0/go.mod h1:8wzvopPfyZYPaQUoKW87Zfdul7jmJMDfp/k7YY3oJyA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.3.0 h1:L7G3dExHBgUxsO3qpTGhk/P2dgnYyW48yn7AO33Tbek= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.3.0/go.mod h1:Ms6gYEy0+A2knfKrwdatsggTXYA2+ICKug8w7STorFw= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8 v8.0.0 h1:7QO7GhGat25QEYL4h607O9zNNTUlAv8PbSesW6Ol5Gg= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8 v8.0.0/go.mod h1:mCqeYzwyjn/pw0JVqHJMIzfUQJrlcV0YjTg5b0NK+F0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9 v9.0.0 h1:CbHDMVJhcJSmXenq+UDWyIjumzVkZIb5pVUGzsCok5M= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9 v9.0.0/go.mod h1:raqbEXrok4aycS74XoU6p9Hne1dliAFpHLizlp+qJoM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5 v5.0.0 h1:S7K+MLPEYe+g9AX9dLKldBpYV03bPl7zeDaWhiNDqqs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5 v5.0.0/go.mod h1:EHRrmrnS2Q8fB3+DE30TTk04JLqjui5ZJEF7eMVQ2/M= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0 h1:yzrctSl9GMIQ5lHu7jc8olOsGjWDCsBpJhWqfGa/YIM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.3.0/go.mod h1:GE4m0rnnfwLGX0Y9A9A25Zx5N/90jneT5ABevqzhuFQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armdeployments v0.2.0 h1:bYq3jfB2x36hslKMHyge3+esWzROtJNk/4dCjsKlrl4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armdeployments v0.2.0/go.mod h1:fewgRjNVE84QVVh798sIMFb7gPXPp7NmnekGnboSnXk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2 v2.1.0 h1:seyVIpxalxYmfjoo8MB4rRzWaobMG+KJ2+MAUrEvDGU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2 v2.1.0/go.mod h1:M3QD7IyKZBaC4uAKjitTOSOXdcPC6JS1A9oOW3hYjbQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v3 v3.0.1 h1:guyQA4b8XB2sbJZXzUnOF9mn0WDBv/ZT7me9wTipKtE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v3 v3.0.1/go.mod h1:8h8yhzh9o+0HeSIhUxYny+rEQajScrfIpNktvgYG3Q8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2 v2.0.0-beta.7 h1:SLsVdG/8T65poVMw5ZJtI/dUL7iIwvbkq+koqmWdmu8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2 v2.0.0-beta.7/go.mod h1:l9kSL5eB+KdZ2aovhkUYwyZE7oQwTEqVCxnpNKChi1U= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3 v3.0.0 h1:tqGq5xt/rNU57Eb52rf6bvrNWoKPSwLDVUQrJnF4C5U= @@ -173,94 +181,94 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk= github.com/auth0/go-jwt-middleware/v2 v2.3.1 h1:lbDyWE9aLydb3zrank+Gufb9qGJN9u//7EbJK07pRrw= github.com/auth0/go-jwt-middleware/v2 v2.3.1/go.mod h1:mqVr0gdB5zuaFyQFWMJH/c/2hehNjbYUD4i8Dpyf+Hc= -github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= -github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= -github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY= -github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY= -github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8= -github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.38.4 h1:V8gcFwJPP3eXZXpeui+p97JmO7WtCkQlEAHrE6Kyt0k= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.38.4/go.mod h1:iJF5UdwkFue/YuUGCFsCCdT3SBMUx0s+h5TNi0Sz+qg= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.0 h1:s92jPptCu97RNwU1yF3jD4ahLZrQ0QkUIvrn464rQ2A= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.0/go.mod h1:8O5Pj92iNpfw/Fa7WdHbn6YiEjDoVdutz+9PGRNoP3Y= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.0 h1:RUQqU9L1LnFJ+9t5hsSB7GI6dVvJDCnG4WgRlDeHK6E= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.0/go.mod h1:9Hd/cqshF4zl13KGLkWtRfITbvKR6m6FZHwhL2BYDSY= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.53.1 h1:ElB5x0nrBHgQs+XcpQ1XJpSJzMFCq6fDTpT6WQCWOtQ= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.53.1/go.mod h1:Cj+LUEvAU073qB2jInKV6Y0nvHX0k7bL7KAga9zZ3jw= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.11 h1:3+DkKJAq5VVqPNu3eT6j0UchZDjDsNeqFNAqsomMPDc= -github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.11/go.mod h1:DNG3VkdVy874VMHH46ekGsD3nq6D4tyDV3HIOuVoouM= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.55.0 h1:CyYoeHWjVSGimzMhlL0Z4l5gLCa++ccnRJKrsaNssxE= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.55.0/go.mod h1:ctEsEHY2vFQc6i4KU07q4n68v7BAmTbujv2Y+z8+hQY= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.288.0 h1:cRu1CgKDK0qYNJRZBWaktwGZ6fvcFiKZm1Huzesc47s= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.288.0/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY= -github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 h1:MzP/ElwTpINq+hS80ZQz4epKVnUTlz8Sz+P/AFORCKM= -github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE= -github.com/aws/aws-sdk-go-v2/service/efs v1.41.10 h1:7ixaaFyZ8xXJWPcK3qQKFf1k1HgME9rtCY7S6Unih8I= -github.com/aws/aws-sdk-go-v2/service/efs v1.41.10/go.mod h1:QwCUd/L5/HX4s/uWt3LPEOwQb/AYE4OyMGB8SL9/W4Y= -github.com/aws/aws-sdk-go-v2/service/eks v1.80.0 h1:moQGV8cPbVTN7r2Xte1Mybku35QDePSJEd3onYVmBtY= -github.com/aws/aws-sdk-go-v2/service/eks v1.80.0/go.mod h1:Qg678m+87sCuJhcsZojenz8mblYG+Tq86V4m3hjVz0s= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.19 h1:ybEda2mkkX2o8NadXZBtcO9tgmW9cTQgeVSjypNsAy0= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.19/go.mod h1:RiMytGvN4azx4yLM0Kn3bX/XO9dLxj+eG72Smy+vNzI= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.6 h1:fQR1aeZKaiPkNPya0JMy2nhsoqoSgIWc3/QTiTiL1K0= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.6/go.mod h1:oJRLDix51wqBDlP9dv+blFkvvf7HESolQz5cdhdmV4A= -github.com/aws/aws-sdk-go-v2/service/iam v1.53.2 h1:62G6btFUwAa5uR5iPlnlNVAM0zJSLbWgDfKOfUC7oW4= -github.com/aws/aws-sdk-go-v2/service/iam v1.53.2/go.mod h1:av9clChrbZbJ5E21msSsiT2oghl2BJHfQGhCkXmhyu8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17 h1:Nhx/OYX+ukejm9t/MkWI8sucnsiroNYNGb5ddI9ungQ= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17/go.mod h1:AjmK8JWnlAevq1b1NBtv5oQVG4iqnYXUufdgol+q9wg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 h1:DKibav4XF66XSeaXcrn9GlWGHos6D/vJ4r7jsK7z5CE= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.5/go.mod h1:1SdcmEGUEQE1mrU2sIgeHtcMSxHuybhPvuEPANzIDfI= -github.com/aws/aws-sdk-go-v2/service/lambda v1.88.0 h1:u66DMbJWDFXs9458RAHNtq2d0gyqcZFV4mzRwfjM358= -github.com/aws/aws-sdk-go-v2/service/lambda v1.88.0/go.mod h1:ogjbkxFgFOjG3dYFQ8irC92gQfpfMDcy1RDKNSZWXNU= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.3 h1:Fobn9IdJv8lgpGv5BYR5m3sFwlMctKgKE9rMRKVKpIQ= -github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.3/go.mod h1:1Yhak+i7rIt8Yq2lWViNXI4zoMufmqqjR89vNwgzafw= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.4 h1:J38JaWrNRBxSU/nrrC92/jqGVl07RAdGXM9GvwtdQqE= -github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.4/go.mod h1:vdT+5yxPXmxzJ8ETFpajcjce/eUViRAG58SPtZyHoGA= -github.com/aws/aws-sdk-go-v2/service/rds v1.115.0 h1:oNl6YghOtxu3MiFk1tQ86QlrYMIEJazGUDbBCg9nxLA= -github.com/aws/aws-sdk-go-v2/service/rds v1.115.0/go.mod h1:JBRYWpz5oXQtHgQC+X8LX9lh0FBCwRHJlWEIT+TTLaE= -github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1 h1:1jIdwWOulae7bBLIgB36OZ0DINACb1wxM6wdGlx4eHE= -github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1/go.mod h1:tE2zGlMIlxWv+7Otap7ctRp3qeKqtnja7DZguj3Vu/Y= -github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= -github.com/aws/aws-sdk-go-v2/service/sns v1.39.11 h1:Ke7RS0NuP9Xwk31prXYcFGA1Qfn8QmNWcxyjKPcXZdc= -github.com/aws/aws-sdk-go-v2/service/sns v1.39.11/go.mod h1:hdZDKzao0PBfJJygT7T92x2uVcWc/htqlhrjFIjnHDM= -github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21 h1:Oa0IhwDLVrcBHDlNo1aosG4CxO4HyvzDV5xUWqWcBc0= -github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21/go.mod h1:t98Ssq+qtXKXl2SFtaSkuT6X42FSM//fnO6sfq5RqGM= -github.com/aws/aws-sdk-go-v2/service/ssm v1.67.8 h1:31Llf5VfrZ78YvYs7sWcS7L2m3waikzRc6q1nYenVS4= -github.com/aws/aws-sdk-go-v2/service/ssm v1.67.8/go.mod h1:/jgaDlU1UImoxTxhRNxXHvBAPqPZQ8oCjcPbbkR6kac= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/aws/aws-sdk-go-v2 v1.41.3 h1:4kQ/fa22KjDt13QCy1+bYADvdgcxpfH18f0zP542kZA= +github.com/aws/aws-sdk-go-v2 v1.41.3/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6 h1:N4lRUXZpZ1KVEUn6hxtco/1d2lgYhNn1fHkkl8WhlyQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.6/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI= +github.com/aws/aws-sdk-go-v2/config v1.32.11 h1:ftxI5sgz8jZkckuUHXfC/wMUc8u3fG1vQS0plr2F2Zs= +github.com/aws/aws-sdk-go-v2/config v1.32.11/go.mod h1:twF11+6ps9aNRKEDimksp923o44w/Thk9+8YIlzWMmo= +github.com/aws/aws-sdk-go-v2/credentials v1.19.11 h1:NdV8cwCcAXrCWyxArt58BrvZJ9pZ9Fhf9w6Uh5W3Uyc= +github.com/aws/aws-sdk-go-v2/credentials v1.19.11/go.mod h1:30yY2zqkMPdrvxBqzI9xQCM+WrlrZKSOpSJEsylVU+8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19 h1:INUvJxmhdEbVulJYHI061k4TVuS3jzzthNvjqvVvTKM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.19/go.mod h1:FpZN2QISLdEBWkayloda+sZjVJL+e9Gl0k1SyTgcswU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19 h1:/sECfyq2JTifMI2JPyZ4bdRN77zJmr6SrS1eL3augIA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.19/go.mod h1:dMf8A5oAqr9/oxOfLkC/c2LU/uMcALP0Rgn2BD5LWn0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19 h1:AWeJMk33GTBf6J20XJe6qZoRSJo0WfUhsMdUKhoODXE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.19/go.mod h1:+GWrYoaAsV7/4pNHpwh1kiNLXkKaSoppxQq9lbH8Ejw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5 h1:clHU5fm//kWS1C2HgtgWxfQbFbx4b6rx+5jzhgX9HrI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.5/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20 h1:qi3e/dmpdONhj1RyIZdi6DKKpDXS5Lb8ftr3p7cyHJc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.20/go.mod h1:V1K+TeJVD5JOk3D9e5tsX2KUdL7BlB+FV6cBhdobN8c= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.38.6 h1:dzd86UudvxJ1c6z/o+hHh7ZhkoBrh81XYz/M11zwQYI= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.38.6/go.mod h1:jWmyEnBPJdt+RaHSRzZDKp3HyyzjOofGp4+xXY503Do= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.2 h1:pzFtdV2DArJul6aM3+WiWjUQ63IzrSnSbvBr8FAokt4= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.64.2/go.mod h1:8xQlcle6cf4R66HrXbiahORXakWpLlvJXoiGae5BlIc= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.2 h1:+5lijyTp+IoU5oh6rL3374yEkaPeFnaes+b4WWUQC2I= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.60.2/go.mod h1:Ndq7ECdcXc8jmE4WPhl409BdAAWW6jrirMFgliMxMtU= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.1 h1:s+ZS2lmYFeCISy20RkSerTmfMIzxlevj4LyWNuE3cfY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.55.1/go.mod h1:xXUsqpyas4oCIPxrKoCeqvyvFBLEYSohybRVV0bHq9A= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.13 h1:nUrVaHNZ82u7H3012w+gqscrbFjVLfSFWvbgeP7+J90= +github.com/aws/aws-sdk-go-v2/service/directconnect v1.38.13/go.mod h1:DTuhYIuDsUBwdSHh6Dg8NNRq7CCeVPI8w0D/ZXQiE40= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.1 h1:EkW4NqA2mwCkL7YCDYh6OpA/bCMhKYbZgpRHt2FD2Ow= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.56.1/go.mod h1:OQp5333OH1IjmJmJpTU4IwoaOoCMnDrThg0zIx169rE= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.294.0 h1:776KnBqePBBR6zEDi0bUIHXzUBOISa2WgAKEgckUF8M= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.294.0/go.mod h1:rB577GvkmJADVOFGY8/j9sPv/ewcsEtQNsd9Lrn7Zx0= +github.com/aws/aws-sdk-go-v2/service/ecs v1.73.1 h1:TSmcWx+RzhGJrPNoFkuqANafJQ7xY3W2UBg6ShN3ae8= +github.com/aws/aws-sdk-go-v2/service/ecs v1.73.1/go.mod h1:KWILGx+bRowcGyJU/va2Ift48c658blP5e1qvldnIRE= +github.com/aws/aws-sdk-go-v2/service/efs v1.41.12 h1:YZXW11dESIf6CNhMG2ICZonCkzKBaGLuFamSJTYV5g0= +github.com/aws/aws-sdk-go-v2/service/efs v1.41.12/go.mod h1:+rjniKD0YQAmjiDNJvLodKXn1vXWwMpctrr/M4zm1V4= +github.com/aws/aws-sdk-go-v2/service/eks v1.80.2 h1:+FLU7+D9AW9ZMQIg4YjIN/nTJV0A2TIB2f+ovZXqAdU= +github.com/aws/aws-sdk-go-v2/service/eks v1.80.2/go.mod h1:nx52u/3RVDWkOcrAchYgt7CXkrd03A6Gvzi0trtMFjQ= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.21 h1:VriOdPKF8YrkMpnT76ZwA2LXk5aBInOfuzN14QGTOJc= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.33.21/go.mod h1:sp4Mz5YUnYCvIkGNEcdEPp+DuHqquEZYXyIuKXuHzig= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.8 h1:xUwbqWhKASQsigeQfeBjhbm6dAP1EeTulHnNSYv5Xfc= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.54.8/go.mod h1:sQoz/dTooY3kCkNNGxVLTS7EacLA0qXUaK4BkpMjGOc= +github.com/aws/aws-sdk-go-v2/service/iam v1.53.4 h1:FUWGS7m97SYL0bk9Kb+Q4bVpcSrKOHNiIbEXIRFTRW4= +github.com/aws/aws-sdk-go-v2/service/iam v1.53.4/go.mod h1:seDE466zJ4haVuAVcRk+yIH4DWb3s6cqt3Od8GxnGAA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6 h1:XAq62tBTJP/85lFD5oqOOe7YYgWxY9LvWq8plyDvDVg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.6/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11 h1:BYf7XNsJMzl4mObARUBUib+j2tf0U//JAAtTnYqvqCw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.11/go.mod h1:aEUS4WrNk/+FxkBZZa7tVgp4pGH+kFGW40Y8rCPqt5g= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.19 h1:jdCj9vbCXwzTcIJX+MVd2UdssFhRJFTrWlPZwZB8Hpk= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.19/go.mod h1:Dgg2d5WGRr7YB8JJsELskBxLUhgwWppXPwlvmuQKhbc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19 h1:X1Tow7suZk9UCJHE1Iw9GMZJJl0dAnKXXP1NaSDHwmw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.19/go.mod h1:/rARO8psX+4sfjUQXp5LLifjUt8DuATZ31WptNJTyQA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19 h1:JnQeStZvPHFHeyky/7LbMlyQjUa+jIBj36OlWm0pzIk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.19/go.mod h1:HGyasyHvYdFQeJhvDHfH7HXkHh57htcJGKDZ+7z+I24= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.2 h1:UOHOXigIzDRaEU03CBQcZ5uW7FNC7E+vwfhsQWXl5RQ= +github.com/aws/aws-sdk-go-v2/service/kms v1.50.2/go.mod h1:nAa5gmcmAmjXN3tGuhPSHLXFeWv+7nzKhjZzh8F7MH0= +github.com/aws/aws-sdk-go-v2/service/lambda v1.88.2 h1:j+IFEtr7aykD6jJRE86kv/+TgN1UK90LudBuz2bjjYw= +github.com/aws/aws-sdk-go-v2/service/lambda v1.88.2/go.mod h1:IDvS3hFp41ZJTByY7BO8PNgQkPNeQDjJfU/0cHJ2V4o= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.5 h1:atVRUNiG3hrpntduj0OExYB31F59zr+eavoAecVNMhQ= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.59.5/go.mod h1:Lr/sslNngRPyPo2FeWkEo02t9f/CjkzSIeR0MqRh8ao= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.6 h1:G6+LJP+mxaLuM65jEwpgOcef2fmGSyr92U0zjE988A0= +github.com/aws/aws-sdk-go-v2/service/networkmanager v1.41.6/go.mod h1:GIy6ofSymO4ZAPKlYhb6Na4sXqsJrmPZPP/NyheE0rk= +github.com/aws/aws-sdk-go-v2/service/rds v1.116.2 h1:KQLPCn9BWXW0Y8DyzEokbTF9HOiOQoR77Eu9GKcjBWU= +github.com/aws/aws-sdk-go-v2/service/rds v1.116.2/go.mod h1:aPw0arz1e+cZUbF4LU7ZMYB1ZSYsJKi/tsAq9wADfeE= +github.com/aws/aws-sdk-go-v2/service/route53 v1.62.3 h1:JRPXnIr0WwFsSHBmuCvT/uh0Vgys+crvwkOghbJEqi8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.62.3/go.mod h1:DHddp7OO4bY467WVCqWBzk5+aEWn7vqYkap7UigJzGk= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4 h1:4ExZyubQ6LQQVuF2Qp9OsfEvsTdAWh5Gfwf6PgIdLdk= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.4/go.mod h1:NF3JcMGOiARAss1ld3WGORCw71+4ExDD2cbbdKS5PpA= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.7 h1:Y2cAXlClHsXkkOvWZFXATr34b0hxxloeQu/pAZz2row= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.7/go.mod h1:idzZ7gmDeqeNrSPkdbtMp9qWMgcBwykA7P7Rzh5DXVU= +github.com/aws/aws-sdk-go-v2/service/sns v1.39.13 h1:8xP94tDzFpgwIOsusGiEFHPaqrpckDojoErk/ZFZTio= +github.com/aws/aws-sdk-go-v2/service/sns v1.39.13/go.mod h1:RwF6Xnba8PlINxJUQq1IAWeon6IglvqsnhNqV8QsQjk= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.23 h1:Rw3+8VaLH0jozccNR52bSvCPYtkiQeNn576l7HCHvL0= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.23/go.mod h1:MdjRkQEd2EUOiifYnkg/6f1NGtZSN3dFOLNByzufXok= +github.com/aws/aws-sdk-go-v2/service/ssm v1.68.2 h1:idKv7B7NjmTDd05YHQYMMEFNeD0rWxs/kVX4lsjEiDo= +github.com/aws/aws-sdk-go-v2/service/ssm v1.68.2/go.mod h1:1NiL45h4A60CO/hu/UdNyG5AD3VEsdpaQx1l5KtpurA= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.12 h1:iSsvB9EtQ09YrsmIc44Heqlx5ByGErqhPK1ZQLppias= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.12/go.mod h1:fEWYKTRGoZNl8tZ77i61/ccwOMJdGxwOhWCkp6TXAr0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16 h1:EnUdUqRP1CNzt2DkV67tJx6XDN4xlfBFm+bzeNOQVb0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.16/go.mod h1:Jic/xv0Rq/pFNCh3WwpH4BEqdbSAl+IyHro8LbibHD8= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.8 h1:XQTQTF75vnug2TXS8m7CVJfC2nniYPZnO1D4Np761Oo= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.8/go.mod h1:Xgx+PR1NUOjNmQY+tRMnouRp83JRM8pRMw/vCaVhPkI= +github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= +github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= -github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= -github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/aymanbagabas/go-udiff v0.4.0 h1:TKnLPh7IbnizJIBKFWa9mKayRUBQ9Kh1BPCk6w2PnYM= +github.com/aymanbagabas/go-udiff v0.4.0/go.mod h1:0L9PGwj20lrtmEMeyw4WKJ/TMyDtvAoK9bf2u/mNo3w= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= @@ -271,26 +279,34 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/colorprofile v0.3.1 h1:k8dTHMd7fgw4bnFd7jXTLZrSU/CQrKnL3m+AxCzDz40= -github.com/charmbracelet/colorprofile v0.3.1/go.mod h1:/GkGusxNs8VB/RSOh3fu0TJmQ4ICMMPApIIVn0KszZ0= +github.com/charmbracelet/colorprofile v0.4.2 h1:BdSNuMjRbotnxHSfxy+PCSa4xAmz7szw70ktAtWRYrY= +github.com/charmbracelet/colorprofile v0.4.2/go.mod h1:0rTi81QpwDElInthtrQ6Ni7cG0sDtwAd4C4le060fT8= github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= -github.com/charmbracelet/lipgloss/v2 v2.0.0-beta.3 h1:W6DpZX6zSkZr0iFq6JVh1vItLoxfYtNlaxOJtWp8Kis= -github.com/charmbracelet/lipgloss/v2 v2.0.0-beta.3/go.mod h1:65HTtKURcv/ict9ZQhr6zT84JqIjMcJbyrZYHHKNfKA= -github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= -github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= -github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= -github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= -github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99klV19u0QnhiizODirwVksQB91TJKV/UaTnACcG30= -github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/ultraviolet v0.0.0-20251205161215-1948445e3318 h1:OqDqxQZliC7C8adA7KjelW3OjtAxREfeHkNcd66wpeI= +github.com/charmbracelet/ultraviolet v0.0.0-20251205161215-1948445e3318/go.mod h1:Y6kE2GzHfkyQQVCSL9r2hwokSrIlHGzZG+71+wDYSZI= +github.com/charmbracelet/x/ansi v0.11.6 h1:GhV21SiDz/45W9AnV2R61xZMRri5NlLnl6CVF7ihZW8= +github.com/charmbracelet/x/ansi v0.11.6/go.mod h1:2JNYLgQUsyqaiLovhU2Rv/pb8r6ydXKS3NIttu3VGZQ= +github.com/charmbracelet/x/cellbuf v0.0.15 h1:ur3pZy0o6z/R7EylET877CBxaiE1Sp1GMxoFPAIztPI= +github.com/charmbracelet/x/cellbuf v0.0.15/go.mod h1:J1YVbR7MUuEGIFPCaaZ96KDl5NoS0DAWkskup+mOY+Q= +github.com/charmbracelet/x/exp/golden v0.0.0-20250806222409-83e3a29d542f h1:pk6gmGpCE7F3FcjaOEKYriCvpmIN4+6OS/RD0vm4uIA= +github.com/charmbracelet/x/exp/golden v0.0.0-20250806222409-83e3a29d542f/go.mod h1:IfZAMTHB6XkZSeXUqriemErjAWCCzT0LwjKFYCZyw0I= github.com/charmbracelet/x/exp/slice v0.0.0-20250417172821-98fd948af1b1 h1:8fUBSeb8wmOWD0ToP8AJFhUCYrmR3aj/sLECrLGM0TI= github.com/charmbracelet/x/exp/slice v0.0.0-20250417172821-98fd948af1b1/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU= -github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= -github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk= +github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI= +github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY= +github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo= +github.com/charmbracelet/x/windows v0.2.2 h1:IofanmuvaxnKHuV04sC0eBy/smG6kIKrWG2/jYn2GuM= +github.com/charmbracelet/x/windows v0.2.2/go.mod h1:/8XtdKZzedat74NQFn0NGlGL4soHB0YQZrETF96h75k= +github.com/clipperhouse/displaywidth v0.11.0 h1:lBc6kY44VFw+TDx4I8opi/EtL9m20WSEFgwIwO+UVM8= +github.com/clipperhouse/displaywidth v0.11.0/go.mod h1:bkrFNkf81G8HyVqmKGxsPufD3JhNl3dSqnGhOoSD/o0= +github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk= +github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= @@ -322,8 +338,8 @@ github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9 github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -332,8 +348,8 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/getsentry/sentry-go v0.42.0 h1:eeFMACuZTbUQf90RE8dE4tXeSe4CZyfvR1MBL7RLEt8= -github.com/getsentry/sentry-go v0.42.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= +github.com/getsentry/sentry-go v0.43.0 h1:XbXLpFicpo8HmBDaInk7dum18G9KSLcjZiyUKS+hLW4= +github.com/getsentry/sentry-go v0.43.0/go.mod h1:XDotiNZbgf5U8bPDUAfvcFmOnMQQceESxyKaObSssW0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -363,8 +379,8 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY= +github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -397,8 +413,8 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= -github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= +github.com/googleapis/enterprise-certificate-proxy v0.3.12 h1:Fg+zsqzYEs1ZnvmcztTYxhgCBsx3eEhEwQ1W/lHq/sQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.12/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg= github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc= github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= @@ -409,8 +425,8 @@ github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IP github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:AFIo+02s+12CEg8Gzz9kzhCbmbq6JcKNrhHffCGA9z4= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -442,16 +458,16 @@ github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQx github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-config-inspect v0.0.0-20260210152655-f4be3ba97d94 h1:p+oHuSCXvfFBFAejlPswDa7i5fi3r3+03jeW9mJs4qM= -github.com/hashicorp/terraform-config-inspect v0.0.0-20260210152655-f4be3ba97d94/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI= +github.com/hashicorp/terraform-config-inspect v0.0.0-20260224005459-813a97530220 h1:v0h6j7IMgA24b8aWG5+d6WStIP9G8e/p0DKK3Bmk7YQ= +github.com/hashicorp/terraform-config-inspect v0.0.0-20260224005459-813a97530220/go.mod h1:Gz/z9Hbn+4KSp8A2FBtNszfLSdT2Tn/uAKGuVqqWmDI= github.com/hashicorp/terraform-exec v0.24.0 h1:mL0xlk9H5g2bn0pPF6JQZk5YlByqSqrO5VoaNtAf8OE= github.com/hashicorp/terraform-exec v0.24.0/go.mod h1:lluc/rDYfAhYdslLJQg3J0oDqo88oGQAdHR+wDqFvo4= github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU= github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE= -github.com/hashicorp/terraform-plugin-framework v1.17.0 h1:JdX50CFrYcYFY31gkmitAEAzLKoBgsK+iaJjDC8OexY= -github.com/hashicorp/terraform-plugin-framework v1.17.0/go.mod h1:4OUXKdHNosX+ys6rLgVlgklfxN3WHR5VHSOABeS/BM0= -github.com/hashicorp/terraform-plugin-go v0.29.0 h1:1nXKl/nSpaYIUBU1IG/EsDOX0vv+9JxAltQyDMpq5mU= -github.com/hashicorp/terraform-plugin-go v0.29.0/go.mod h1:vYZbIyvxyy0FWSmDHChCqKvI40cFTDGSb3D8D70i9GM= +github.com/hashicorp/terraform-plugin-framework v1.18.0 h1:Xy6OfqSTZfAAKXSlJ810lYvuQvYkOpSUoNMQ9l2L1RA= +github.com/hashicorp/terraform-plugin-framework v1.18.0/go.mod h1:eeFIf68PME+kenJeqSrIcpHhYQK0TOyv7ocKdN4Z35E= +github.com/hashicorp/terraform-plugin-go v0.30.0 h1:VmEiD0n/ewxbvV5VI/bYwNtlSEAXtHaZlSnyUUuQK6k= +github.com/hashicorp/terraform-plugin-go v0.30.0/go.mod h1:8d523ORAW8OHgA9e8JKg0ezL3XUO84H0A25o4NY/jRo= github.com/hashicorp/terraform-plugin-log v0.10.0 h1:eu2kW6/QBVdN4P3Ju2WiB2W3ObjkAsyfBsL3Wh1fj3g= github.com/hashicorp/terraform-plugin-log v0.10.0/go.mod h1:/9RR5Cv2aAbrqcTSdNmY1NRHP4E3ekrXRGjqORpXyB0= github.com/hashicorp/terraform-plugin-sdk/v2 v2.38.1 h1:mlAq/OrMlg04IuJT7NpefI1wwtdpWudnEmjuQs04t/4= @@ -502,25 +518,24 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= -github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= -github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= +github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/micahhausler/aws-iam-policy v0.4.2 h1:HF7bERLnpqEmffV9/wTT4jZ7TbSNVk0JbpXo1Cj3up0= -github.com/micahhausler/aws-iam-policy v0.4.2/go.mod h1:Ojgst9ZFn+VEEJpqtuw/LxVGqEf2+hwWBlkYWvF/XWM= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= +github.com/micahhausler/aws-iam-policy v0.4.4 h1:1aMhJ+0CkvUJ8HGN1chX+noXHs8uvGLkD7xIBeYd31c= +github.com/micahhausler/aws-iam-policy v0.4.4/go.mod h1:H+yWljTu4XWJjNJJYgrPUai0AUTGNHc8pumkN57/foI= github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/miekg/dns v1.1.72 h1:vhmr+TF2A3tuoGNkLDFK9zi36F2LS+hKTRW0Uf8kbzI= @@ -557,8 +572,8 @@ github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= github.com/nats-io/nats-server/v2 v2.12.4 h1:ZnT10v2LU2Xcoiy8ek9X6Se4YG8EuMfIfvAEuFVx1Ts= github.com/nats-io/nats-server/v2 v2.12.4/go.mod h1:5MCp/pqm5SEfsvVZ31ll1088ZTwEUdvRX1Hmh/mTTDg= -github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U= -github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE= +github.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw= github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4= github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -689,8 +704,8 @@ github.com/yuin/goldmark v1.7.10 h1:S+LrtBjRmqMac2UdtB6yyCEJm+UILZ2fefI4p7o0QpI= github.com/yuin/goldmark v1.7.10/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk= github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U= -github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0= -github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U= +github.com/zclconf/go-cty v1.18.0 h1:pJ8+HNI4gFoyRNqVE37wWbJWVw43BZczFo7KUoRczaA= +github.com/zclconf/go-cty v1.18.0/go.mod h1:qpnV6EDNgC1sns/AleL1fvatHw72j+S+nS+MJ+T2CSg= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= @@ -707,30 +722,30 @@ go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK2 go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= -go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= -go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0 h1:PnV4kVnw0zOmwwFkAzCN5O07fw1YOIQor120zrh0AVo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0/go.mod h1:ofAwF4uinaf8SXdVzzbL4OsxJ3VfeEg3f/F6CeF49/Y= +go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c= +go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0/go.mod h1:Izur+Wt8gClgMJqO/cZ8wdeeMryJ/xxiOVgFSSfpDTY= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8OkCfD1j3/ER79rUuTYmCvlXBKeYL8= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.41.0 h1:61oRQmYGMW7pXmFjPg1Muy84ndqMxQ6SH2L8fBG8fSY= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.41.0/go.mod h1:c0z2ubK4RQL+kSDuuFu9WnuXimObon3IiKjJf4NACvU= go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y= go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc= -go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= -go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ= +go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps= go.opentelemetry.io/otel/schema v0.0.12 h1:X8NKrwH07Oe9SJruY/D1XmwHrb6D2+qrLs2POlZX7F4= go.opentelemetry.io/otel/schema v0.0.12/go.mod h1:+w+Q7DdGfykSNi+UU9GAQz5/rtYND6FkBJUWUXzZb0M= -go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= -go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= -go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= -go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= -go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= -go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8= +go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90= +go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8= +go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y= +go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0= +go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -751,15 +766,15 @@ golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2 golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= -golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= -golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo= +golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y= golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -782,7 +797,6 @@ golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -820,19 +834,19 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= -google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk= -google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0= +google.golang.org/api v0.269.0 h1:qDrTOxKUQ/P0MveH6a7vZ+DNHxJQjtGm/uvdbdGXCQg= +google.golang.org/api v0.269.0/go.mod h1:N8Wpcu23Tlccl0zSHEkcAZQKDLdquxK+l9r2LkwAauE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= -google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0= -google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= -google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= -google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171 h1:ggcbiqK8WWh6l1dnltU4BgWGIGo+EVYxCaAPih/zQXQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260226221140-a57be14db171/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU= +google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= @@ -858,12 +872,12 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= -k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= -k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= -k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= -k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= +k8s.io/api v0.35.2 h1:tW7mWc2RpxW7HS4CoRXhtYHSzme1PN1UjGHJ1bdrtdw= +k8s.io/api v0.35.2/go.mod h1:7AJfqGoAZcwSFhOjcGM7WV05QxMMgUaChNfLTXDRE60= +k8s.io/apimachinery v0.35.2 h1:NqsM/mmZA7sHW02JZ9RTtk3wInRgbVxL8MPfzSANAK8= +k8s.io/apimachinery v0.35.2/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.2 h1:YUfPefdGJA4aljDdayAXkc98DnPkIetMl4PrKX97W9o= +k8s.io/client-go v0.35.2/go.mod h1:4QqEwh4oQpeK8AaefZ0jwTFJw/9kIjdQi0jpKeYvz7g= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= diff --git a/go/auth/auth.go b/go/auth/auth.go index 9b7e2ce2..21f37347 100644 --- a/go/auth/auth.go +++ b/go/auth/auth.go @@ -70,7 +70,7 @@ type ClientCredentialsConfig struct { // The ClientID of the application that we'll be authenticating as ClientID string // ClientSecret that corresponds to the ClientID - ClientSecret string + ClientSecret string //nolint:gosec // G101 (hardcoded secret): config field name, not a credential value; never JSON-marshaled into logs or responses } type TokenSourceOptionsFunc func(*clientcredentials.Config) @@ -128,7 +128,7 @@ func (flowConfig ClientCredentialsConfig) TokenSource(ctx context.Context, oAuth type Auth0Config struct { Domain string ClientID string - ClientSecret string + ClientSecret string //nolint:gosec // G101 (hardcoded secret): config field name, not a credential value; populated from env vars and only used in OAuth token exchange Audience string } @@ -298,7 +298,7 @@ func (n *natsTokenClient) Sign(in []byte) ([]byte, error) { // tokens type APIKeyTokenSource struct { // The API Key to use to authenticate to the Overmind API - ApiKey string + ApiKey string //nolint:gosec // G101 (hardcoded secret): config field name, not a credential value; only passed to API key exchange endpoint token *oauth2.Token apiKeyClient sdpconnect.ApiKeyServiceClient } diff --git a/go/auth/middleware.go b/go/auth/middleware.go index 5a806c55..0241af4a 100644 --- a/go/auth/middleware.go +++ b/go/auth/middleware.go @@ -7,6 +7,7 @@ import ( "net/http" "net/url" "regexp" + "slices" "strings" "time" @@ -483,13 +484,7 @@ type CustomClaims struct { // HasScope checks whether our claims have a specific scope. func (c CustomClaims) HasScope(expectedScope string) bool { result := strings.Split(c.Scope, " ") - for i := range result { - if result[i] == expectedScope { - return true - } - } - - return false + return slices.Contains(result, expectedScope) } // Validate does nothing for this example, but we need diff --git a/go/auth/middleware_test.go b/go/auth/middleware_test.go index 0e18fd26..b0231711 100644 --- a/go/auth/middleware_test.go +++ b/go/auth/middleware_test.go @@ -122,8 +122,7 @@ func TestNewAuthMiddleware(t *testing.T) { t.Fatal(err) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() jwksURL := server.Start(ctx) @@ -699,8 +698,7 @@ func TestConnectErrorHandling(t *testing.T) { t.Fatal(err) } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() jwksURL := server.Start(ctx) diff --git a/go/auth/nats.go b/go/auth/nats.go index 40c0aade..cb8a8eaf 100644 --- a/go/auth/nats.go +++ b/go/auth/nats.go @@ -224,10 +224,7 @@ func (o NATSOptions) Connect() (sdp.EncodedConnection, error) { triesLeft-- } // Log a non-negative value: 0 means unlimited retries (NumRetries < 0) - logTriesLeft := triesLeft - if logTriesLeft < 0 { - logTriesLeft = 0 - } + logTriesLeft := max(triesLeft, 0) lf := log.Fields{ "servers": servers, "triesLeft": logTriesLeft, diff --git a/go/discovery/cmd.go b/go/discovery/cmd.go index c2b4fe7f..7ad794a7 100644 --- a/go/discovery/cmd.go +++ b/go/discovery/cmd.go @@ -206,7 +206,7 @@ func MapFromEngineConfig(ec *EngineConfig) map[string]any { sourceAccessToken = "[REDACTED]" } - return map[string]interface{}{ + return map[string]any{ "engine-type": ec.EngineType, "version": ec.Version, "source-name": ec.SourceName, diff --git a/go/discovery/doc.go b/go/discovery/doc.go index ab24ca1e..8d3d355f 100644 --- a/go/discovery/doc.go +++ b/go/discovery/doc.go @@ -14,6 +14,17 @@ // 6. Adapter init — use InitialiseAdapters (blocks until success or ctx cancelled) for retryable init, or SetInitError for single-attempt // 7. Wait for SIGTERM, then Stop() // +// # Readiness gating +// +// The engine defaults to "not ready" until adapters are initialized. Both +// ReadinessHealthCheck (the /healthz/ready HTTP probe) and SendHeartbeat report +// an error while adaptersInitialized is false. This prevents Kubernetes from +// routing traffic to a pod that has no adapters registered. +// +// InitialiseAdapters calls MarkAdaptersInitialized automatically on success. +// Sources that do their own initialization (without InitialiseAdapters) must +// call MarkAdaptersInitialized explicitly after adding adapters. +// // # Error handling // // Fatal errors (caller must return or exit): EngineConfigFromViper, NewEngine, Start. @@ -29,5 +40,5 @@ // do not retry. Transient adapter init errors (e.g. upstream API temporarily // unavailable) should use InitialiseAdapters, which retries with backoff. // -// See SetInitError and InitialiseAdapters for details and examples. +// See SetInitError, MarkAdaptersInitialized, and InitialiseAdapters for details and examples. package discovery diff --git a/go/discovery/engine.go b/go/discovery/engine.go index 1b64388e..e5a1dfbb 100644 --- a/go/discovery/engine.go +++ b/go/discovery/engine.go @@ -8,6 +8,7 @@ import ( "slices" "strings" "sync" + "sync/atomic" "time" "connectrpc.com/connect" @@ -61,7 +62,7 @@ type EngineConfig struct { // The 'ovm_*' API key to use to authenticate to the Overmind API. // This and 'SourceAccessToken' are mutually exclusive - ApiKey string // The API key to use to authenticate to the Overmind API" + ApiKey string //nolint:gosec // G101 (hardcoded secret): config field name, not a credential value; populated from CLI flags/env vars // Static token passed to the source to authenticate. SourceAccessToken string // The access token to use to authenticate to the source SourceAccessTokenType string // The type of token to use to authenticate the source for managed sources @@ -161,6 +162,13 @@ type Engine struct { // CrashLoopBackOff so customers can diagnose and fix configuration issues. initError error initErrorMutex sync.RWMutex + + // adaptersInitialized tracks whether adapters have been successfully registered. + // Defaults to false; set to true by InitialiseAdapters on success or manually + // via MarkAdaptersInitialized for sources that don't use InitialiseAdapters. + // ReadinessHealthCheck and SendHeartbeat both check this flag so that a source + // cannot report healthy before it can actually serve queries. + adaptersInitialized atomic.Bool } func NewEngine(engineConfig *EngineConfig) (*Engine, error) { @@ -501,6 +509,10 @@ func (e *Engine) ReadinessHealthCheck(ctx context.Context) error { attribute.String("ovm.healthcheck.type", "readiness"), ) + if !e.AreAdaptersInitialized() { + return errors.New("adapters not yet initialized") + } + // Check for persistent initialization errors first if initErr := e.GetInitError(); initErr != nil { return fmt.Errorf("source initialization failed: %w", initErr) @@ -764,6 +776,19 @@ func (e *Engine) GetInitError() error { return e.initError } +// MarkAdaptersInitialized records that adapters have been successfully registered +// and the source is ready to serve queries. This is called automatically by +// InitialiseAdapters on success. Sources that do their own initialization +// (without InitialiseAdapters) must call this explicitly after adding adapters. +func (e *Engine) MarkAdaptersInitialized() { + e.adaptersInitialized.Store(true) +} + +// AreAdaptersInitialized reports whether adapters have been successfully registered. +func (e *Engine) AreAdaptersInitialized() bool { + return e.adaptersInitialized.Load() +} + // InitialiseAdapters retries initFn with exponential backoff (capped at // 5 minutes) until it succeeds or ctx is cancelled. It blocks the caller. // @@ -807,6 +832,7 @@ func (e *Engine) InitialiseAdapters(ctx context.Context, initFn func(ctx context // Clear any previous init error before the heartbeat so the // API/UI immediately sees the healthy status. e.SetInitError(nil) + e.MarkAdaptersInitialized() } // Send heartbeat regardless of outcome so the API/UI reflects current status diff --git a/go/discovery/engine_initerror_test.go b/go/discovery/engine_initerror_test.go index 26acfa34..71d35309 100644 --- a/go/discovery/engine_initerror_test.go +++ b/go/discovery/engine_initerror_test.go @@ -89,13 +89,11 @@ func TestInitErrorConcurrentAccess(t *testing.T) { // Readers for range 10 { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for range iterations { _ = e.GetInitError() } - }() + }) } wg.Wait() @@ -126,6 +124,9 @@ func TestReadinessHealthCheckWithInitError(t *testing.T) { t.Fatalf("failed to create engine: %v", err) } + // Mark adapters initialized so we're only testing initError behavior + e.MarkAdaptersInitialized() + ctx := context.Background() // Readiness should pass when no init error @@ -178,6 +179,9 @@ func TestSendHeartbeatWithInitError(t *testing.T) { t.Fatalf("failed to create engine: %v", err) } + // Mark adapters initialized so we're only testing initError behavior + e.MarkAdaptersInitialized() + ctx := context.Background() // Send heartbeat with init error @@ -223,6 +227,9 @@ func TestSendHeartbeatWithInitErrorAndCustomError(t *testing.T) { t.Fatalf("failed to create engine: %v", err) } + // Mark adapters initialized so we're only testing initError + custom error behavior + e.MarkAdaptersInitialized() + ctx := context.Background() // Set init error and send heartbeat with custom error @@ -286,6 +293,9 @@ func TestInitialiseAdapters_Success(t *testing.T) { if err := e.GetInitError(); err != nil { t.Errorf("expected init error to be cleared after success, got: %v", err) } + if !e.AreAdaptersInitialized() { + t.Error("expected adaptersInitialized to be true after successful InitialiseAdapters") + } } func TestInitialiseAdapters_RetryThenSuccess(t *testing.T) { @@ -363,3 +373,200 @@ func TestInitialiseAdapters_ContextCancelled(t *testing.T) { t.Error("expected init error to be set after context cancellation with failures") } } + +func TestReadinessFailsBeforeInitialization(t *testing.T) { + ec := &EngineConfig{ + EngineType: "test", + SourceName: "test-source", + HeartbeatOptions: &HeartbeatOptions{ + ReadinessCheck: func(ctx context.Context) error { + return nil + }, + }, + } + + e, err := NewEngine(ec) + if err != nil { + t.Fatalf("failed to create engine: %v", err) + } + + ctx := context.Background() + + err = e.ReadinessHealthCheck(ctx) + if err == nil { + t.Fatal("expected readiness to fail before adapters initialized, got nil") + } + if !strings.Contains(err.Error(), "adapters not yet initialized") { + t.Errorf("expected error to contain 'adapters not yet initialized', got: %v", err) + } +} + +func TestReadinessPassesAfterInitialization(t *testing.T) { + ec := &EngineConfig{ + EngineType: "test", + SourceName: "test-source", + HeartbeatOptions: &HeartbeatOptions{ + ReadinessCheck: func(ctx context.Context) error { + return nil + }, + }, + } + + e, err := NewEngine(ec) + if err != nil { + t.Fatalf("failed to create engine: %v", err) + } + + e.MarkAdaptersInitialized() + + ctx := context.Background() + + if err := e.ReadinessHealthCheck(ctx); err != nil { + t.Errorf("expected readiness to pass after MarkAdaptersInitialized, got: %v", err) + } +} + +func TestHeartbeatIncludesUninitializedError(t *testing.T) { + requests := make(chan *connect.Request[sdp.SubmitSourceHeartbeatRequest], 10) + responses := make(chan *connect.Response[sdp.SubmitSourceHeartbeatResponse], 10) + + ec := &EngineConfig{ + EngineType: "test", + SourceName: "test-source", + HeartbeatOptions: &HeartbeatOptions{ + ManagementClient: testHeartbeatClient{ + Requests: requests, + Responses: responses, + }, + Frequency: 0, + }, + } + + e, err := NewEngine(ec) + if err != nil { + t.Fatalf("failed to create engine: %v", err) + } + + // Do NOT call MarkAdaptersInitialized -- engine is freshly created + + responses <- &connect.Response[sdp.SubmitSourceHeartbeatResponse]{ + Msg: &sdp.SubmitSourceHeartbeatResponse{}, + } + + ctx := context.Background() + err = e.SendHeartbeat(ctx, nil) + if err != nil { + t.Fatalf("expected SendHeartbeat to succeed, got: %v", err) + } + + req := <-requests + if req.Msg.GetError() == "" { + t.Fatal("expected heartbeat to include error before initialization, got empty string") + } + if !strings.Contains(req.Msg.GetError(), "adapters not yet initialized") { + t.Errorf("expected heartbeat error to contain 'adapters not yet initialized', got: %q", req.Msg.GetError()) + } +} + +func TestHeartbeatClearsAfterInitialization(t *testing.T) { + requests := make(chan *connect.Request[sdp.SubmitSourceHeartbeatRequest], 10) + responses := make(chan *connect.Response[sdp.SubmitSourceHeartbeatResponse], 10) + + ec := &EngineConfig{ + EngineType: "test", + SourceName: "test-source", + HeartbeatOptions: &HeartbeatOptions{ + ManagementClient: testHeartbeatClient{ + Requests: requests, + Responses: responses, + }, + Frequency: 0, + }, + } + + e, err := NewEngine(ec) + if err != nil { + t.Fatalf("failed to create engine: %v", err) + } + + e.MarkAdaptersInitialized() + + responses <- &connect.Response[sdp.SubmitSourceHeartbeatResponse]{ + Msg: &sdp.SubmitSourceHeartbeatResponse{}, + } + + ctx := context.Background() + err = e.SendHeartbeat(ctx, nil) + if err != nil { + t.Fatalf("expected SendHeartbeat to succeed, got: %v", err) + } + + req := <-requests + if req.Msg.GetError() != "" { + t.Errorf("expected heartbeat to have no error after initialization, got: %q", req.Msg.GetError()) + } +} + +func TestInitialiseAdapters_SetsInitializedFlag(t *testing.T) { + ec := &EngineConfig{ + EngineType: "test", + SourceName: "test-source", + HeartbeatOptions: &HeartbeatOptions{ + Frequency: 0, + }, + } + e, err := NewEngine(ec) + if err != nil { + t.Fatalf("failed to create engine: %v", err) + } + + if e.AreAdaptersInitialized() { + t.Fatal("expected adaptersInitialized to be false on new engine") + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + e.InitialiseAdapters(ctx, func(ctx context.Context) error { + return nil + }) + + if !e.AreAdaptersInitialized() { + t.Error("expected adaptersInitialized to be true after InitialiseAdapters success") + } +} + +func TestInitialiseAdapters_DoesNotSetFlagOnFailure(t *testing.T) { + ec := &EngineConfig{ + EngineType: "test", + SourceName: "test-source", + HeartbeatOptions: &HeartbeatOptions{ + Frequency: 0, + }, + } + e, err := NewEngine(ec) + if err != nil { + t.Fatalf("failed to create engine: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + time.AfterFunc(500*time.Millisecond, cancel) + + done := make(chan struct{}) + go func() { + e.InitialiseAdapters(ctx, func(ctx context.Context) error { + return errors.New("always fails") + }) + close(done) + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("InitialiseAdapters did not return after context cancellation") + } + + if e.AreAdaptersInitialized() { + t.Error("expected adaptersInitialized to remain false when init always fails") + } +} diff --git a/go/discovery/enginerequests.go b/go/discovery/enginerequests.go index 3b04bf82..c7f8ac00 100644 --- a/go/discovery/enginerequests.go +++ b/go/discovery/enginerequests.go @@ -1,9 +1,14 @@ package discovery import ( + "bytes" "context" "errors" "fmt" + "regexp" + "runtime" + "runtime/pprof" + "strings" "sync" "sync/atomic" "time" @@ -16,6 +21,7 @@ import ( "github.com/sourcegraph/conc/pool" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/singleflight" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -154,8 +160,51 @@ func (e *Engine) HandleQuery(ctx context.Context, query *sdp.Query) { } } -var listExecutionPoolCount atomic.Int32 -var getExecutionPoolCount atomic.Int32 +var ( + goroutineProfileGroup singleflight.Group + + // Compiled once; used by compactGoroutineProfile to strip noise from + // pprof debug=1 output while keeping it human-readable. + profileAddrList = regexp.MustCompile(` @ (?:0x[0-9a-f]+ ?)+`) + profileHexAddr = regexp.MustCompile(`#\t0x[0-9a-f]+\t`) + profileFuncOffset = regexp.MustCompile(`\+0x[0-9a-f]+`) + profileVersion = regexp.MustCompile(`@v[0-9]+\.[0-9]+\.[0-9]+[-\w.]*`) +) + +// compactGoroutineProfile removes noise from a pprof debug=1 goroutine dump +// without losing readability. Typical compression is ~50%, effectively doubling +// how much fits in the Honeycomb 49 KB string attribute limit. +func compactGoroutineProfile(s string) string { + s = strings.ReplaceAll(s, "github.com/overmindtech/workspace/", "g.c/o/w/") + s = strings.ReplaceAll(s, "github.com/", "g.c/") + s = profileAddrList.ReplaceAllString(s, "") // "32257 @ 0x9484c ..." → "32257" + s = profileHexAddr.ReplaceAllString(s, "#\t") // "#\t0xaeda7b\tfoo" → "#\tfoo" + s = profileFuncOffset.ReplaceAllString(s, "") // "Execute+0x4cb" → "Execute" + s = profileVersion.ReplaceAllString(s, "") // "@v1.49.0" → "" + return s +} + +// captureGoroutineSummary returns a compacted goroutine profile (pprof debug=1) +// truncated to maxBytes, deduplicated via singleflight. When many ExecuteQuery +// goroutines hit the stuck timeout simultaneously, only one runs the +// (stop-the-world) pprof capture; the rest share its result. +func captureGoroutineSummary(maxBytes int) string { + v, _, _ := goroutineProfileGroup.Do("goroutine-profile", func() (any, error) { + var buf bytes.Buffer + _ = pprof.Lookup("goroutine").WriteTo(&buf, 1) + s := compactGoroutineProfile(buf.String()) + if len(s) > maxBytes { + s = s[:maxBytes-20] + "\n...[truncated]..." + } + return s, nil + }) + return v.(string) +} + +var ( + listExecutionPoolCount atomic.Int32 + getExecutionPoolCount atomic.Int32 +) // ExecuteQuery Executes a single Query and returns the results without any // linking. Will return an error if the Query couldn't be run. @@ -196,6 +245,8 @@ func (e *Engine) ExecuteQuery(ctx context.Context, query *sdp.Query, responses c // Overall MaxParallelExecutions evaluation is handled by e.executionPool wg := sync.WaitGroup{} expandedMutex := sync.RWMutex{} + totalQueries := len(expanded) + var poolWaitMaxNs atomic.Int64 expandedMutex.RLock() for q, adapter := range expanded { wg.Add(1) @@ -220,8 +271,16 @@ func (e *Engine) ExecuteQuery(ctx context.Context, query *sdp.Query, responses c attribute.Int("ovm.discovery.listExecutionPoolCount", int(listExecutionPoolCount.Load())), attribute.Int("ovm.discovery.getExecutionPoolCount", int(getExecutionPoolCount.Load())), ) + poolSubmitTime := time.Now() p.Go(func() { defer tracing.LogRecoverToReturn(ctx, "ExecuteQuery inner") + waitNs := time.Since(poolSubmitTime).Nanoseconds() + for { + old := poolWaitMaxNs.Load() + if waitNs <= old || poolWaitMaxNs.CompareAndSwap(old, waitNs) { + break + } + } defer func() { // Mark the work as done. This happens before we start // waiting on `expandedMutex` below, to ensure that the @@ -284,8 +343,21 @@ func (e *Engine) ExecuteQuery(ctx context.Context, query *sdp.Query, responses c return case <-time.After(longRunningAdaptersTimeout): // If we're here, then the wait group didn't finish in time + goroutineSummary := captureGoroutineSummary(48_000) expandedMutex.RLock() + span.AddEvent("waitgroup.stuck", trace.WithAttributes( + attribute.Int("ovm.stuck.goroutineCount", runtime.NumGoroutine()), + attribute.Int("ovm.stuck.totalQueries", totalQueries), + attribute.Int("ovm.stuck.remainingQueries", len(expanded)), + attribute.String("ovm.stuck.goroutineProfile", goroutineSummary), + )) for q, adapter := range expanded { + span.AddEvent("waitgroup.stuck.adapter", trace.WithAttributes( + attribute.String("ovm.stuck.adapter", adapter.Name()), + attribute.String("ovm.stuck.type", q.GetType()), + attribute.String("ovm.stuck.scope", q.GetScope()), + attribute.String("ovm.stuck.method", q.GetMethod().String()), + )) // There is a honeycomb trigger for this message: // // https://ui.honeycomb.io/overmind/environments/prod/datasets/kubernetes-metrics/triggers/saWNAnCAXNb @@ -310,6 +382,10 @@ func (e *Engine) ExecuteQuery(ctx context.Context, query *sdp.Query, responses c }() } + span.SetAttributes( + attribute.Float64("ovm.discovery.poolWaitMaxMs", float64(poolWaitMaxNs.Load())/1e6), + ) + // If the context is cancelled, return that error if ctx.Err() != nil { return ctx.Err() @@ -344,6 +420,7 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res // rather run the List first, populate the cache, then have the Get just // grab the value from the cache. To this end we use a GetListMutex to allow // a List to block all subsequent Get queries until it is done + mutexWaitStart := time.Now() switch q.GetMethod() { case sdp.QueryMethod_GET: e.gfm.GetLock(q.GetScope(), q.GetType()) @@ -355,6 +432,10 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res // We don't need to lock for a search since they are independent and // will only ever have a cache hit if the query is identical } + span.SetAttributes( + attribute.Float64("ovm.discovery.mutexWaitMs", float64(time.Since(mutexWaitStart).Milliseconds())), + attribute.String("ovm.discovery.mutexKey", q.GetScope()+"."+q.GetType()), + ) // Ensure that the span is closed when the context is done. This is based on // the assumption that some adapters may not respect the context deadline and @@ -377,6 +458,8 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res // are passed back to the caller var numItems atomic.Int32 var numErrs atomic.Int32 + var channelSendMaxNs atomic.Int64 + var channelSendTotalNs atomic.Int64 var itemHandler ItemHandler = func(item *sdp.Item) { if item == nil { return @@ -384,6 +467,7 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res if err := item.Validate(); err != nil { span.RecordError(err) + sendStart := time.Now() responses <- sdp.NewQueryResponseFromError(&sdp.QueryError{ UUID: q.GetUUID(), ErrorType: sdp.QueryError_OTHER, @@ -392,6 +476,14 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res ResponderName: e.EngineConfig.SourceName, ItemType: q.GetType(), }) + sendNs := time.Since(sendStart).Nanoseconds() + channelSendTotalNs.Add(sendNs) + for { + old := channelSendMaxNs.Load() + if sendNs <= old || channelSendMaxNs.CompareAndSwap(old, sendNs) { + break + } + } return } @@ -409,7 +501,16 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res // Send the item back to the caller numItems.Add(1) + sendStart := time.Now() responses <- sdp.NewQueryResponseFromItem(item) + sendNs := time.Since(sendStart).Nanoseconds() + channelSendTotalNs.Add(sendNs) + for { + old := channelSendMaxNs.Load() + if sendNs <= old || channelSendMaxNs.CompareAndSwap(old, sendNs) { + break + } + } } var errHandler ErrHandler = func(err error) { if err == nil { @@ -423,7 +524,16 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res // Send the error back to the caller numErrs.Add(1) + sendStart := time.Now() responses <- queryResponseFromError(err, q, adapter, e.EngineConfig.SourceName) + sendNs := time.Since(sendStart).Nanoseconds() + channelSendTotalNs.Add(sendNs) + for { + old := channelSendMaxNs.Load() + if sendNs <= old || channelSendMaxNs.CompareAndSwap(old, sendNs) { + break + } + } } stream := NewQueryResultStream(itemHandler, errHandler) @@ -506,6 +616,8 @@ func (e *Engine) Execute(ctx context.Context, q *sdp.Query, adapter Adapter, res span.SetAttributes( attribute.Int("ovm.adapter.numItems", int(numItems.Load())), attribute.Int("ovm.adapter.numErrors", int(numErrs.Load())), + attribute.Float64("ovm.discovery.channelSendMaxMs", float64(channelSendMaxNs.Load())/1e6), + attribute.Float64("ovm.discovery.channelSendTotalMs", float64(channelSendTotalNs.Load())/1e6), ) } diff --git a/go/discovery/getfindmutex_test.go b/go/discovery/getfindmutex_test.go index ff7b4916..e854f5e4 100644 --- a/go/discovery/getfindmutex_test.go +++ b/go/discovery/getfindmutex_test.go @@ -149,14 +149,11 @@ func TestGetLock(t *testing.T) { }() - actionWG.Add(1) - - go func() { + actionWG.Go(func() { for action := range actionChan { order = append(order, action) } - actionWG.Done() - }() + }) go func(t *testing.T) { wg.Wait() diff --git a/go/discovery/heartbeat.go b/go/discovery/heartbeat.go index 0ae6a862..0a9ab3dd 100644 --- a/go/discovery/heartbeat.go +++ b/go/discovery/heartbeat.go @@ -62,6 +62,10 @@ func (e *Engine) SendHeartbeat(ctx context.Context, customErr error) error { allErrors = append(allErrors, initErr) } + if !e.AreAdaptersInitialized() { + allErrors = append(allErrors, errors.New("adapters not yet initialized")) + } + // Check adapter readiness (ReadinessCheck) - with timeout to prevent hanging if e.EngineConfig.HeartbeatOptions.ReadinessCheck != nil { // Add timeout for readiness checks to prevent hanging heartbeats diff --git a/go/discovery/heartbeat_test.go b/go/discovery/heartbeat_test.go index 94e55915..a2c1adc7 100644 --- a/go/discovery/heartbeat_test.go +++ b/go/discovery/heartbeat_test.go @@ -46,6 +46,7 @@ func TestHeartbeats(t *testing.T) { HeartbeatOptions: &heartbeatOptions, } e, _ := NewEngine(&ec) + e.MarkAdaptersInitialized() if err := e.AddAdapters( &TestAdapter{ diff --git a/go/discovery/performance_test.go b/go/discovery/performance_test.go index fd521c71..2b9075aa 100644 --- a/go/discovery/performance_test.go +++ b/go/discovery/performance_test.go @@ -42,7 +42,7 @@ func (s *SlowAdapter) Hidden() bool { func (s *SlowAdapter) Get(ctx context.Context, scope string, query string, ignoreCache bool) (*sdp.Item, error) { end := time.Now().Add(s.QueryDuration) - attributes, _ := sdp.ToAttributes(map[string]interface{}{ + attributes, _ := sdp.ToAttributes(map[string]any{ "name": query, }) diff --git a/go/discovery/querytracker.go b/go/discovery/querytracker.go index 4cb30c97..e1755f94 100644 --- a/go/discovery/querytracker.go +++ b/go/discovery/querytracker.go @@ -3,6 +3,7 @@ package discovery import ( "context" "errors" + "time" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/tracing" @@ -63,9 +64,20 @@ func (qt *QueryTracker) Execute(ctx context.Context) ([]*sdp.Item, []*sdp.Edge, }(errChan) // Process the responses as they come in + var natsPublishMaxNs int64 + var natsPublishTotalNs int64 + var natsPublishCount int + for response := range responses { if qt.Query.Subject() != "" && qt.Engine.natsConnection != nil { + publishStart := time.Now() err := qt.Engine.natsConnection.Publish(ctx, qt.Query.Subject(), response) + publishNs := time.Since(publishStart).Nanoseconds() + natsPublishTotalNs += publishNs + natsPublishCount++ + if publishNs > natsPublishMaxNs { + natsPublishMaxNs = publishNs + } if err != nil { span.RecordError(err) log.WithError(err).Error("Response publishing error") @@ -82,6 +94,12 @@ func (qt *QueryTracker) Execute(ctx context.Context) ([]*sdp.Item, []*sdp.Edge, } } + span.SetAttributes( + attribute.Float64("ovm.nats.publishMaxMs", float64(natsPublishMaxNs)/1e6), + attribute.Float64("ovm.nats.publishTotalMs", float64(natsPublishTotalNs)/1e6), + attribute.Int("ovm.nats.publishCount", natsPublishCount), + ) + // Get the result of the execution err := <-errChan if err != nil { diff --git a/go/discovery/querytracker_test.go b/go/discovery/querytracker_test.go index 338e29f8..7d4a8d0f 100644 --- a/go/discovery/querytracker_test.go +++ b/go/discovery/querytracker_test.go @@ -272,11 +272,9 @@ func TestCancel(t *testing.T) { var wg sync.WaitGroup var err error - wg.Add(1) - go func() { + wg.Go(func() { items, edges, _, err = qt.Execute(context.Background()) - wg.Done() - }() + }) // Give it some time to populate the cancelFunc time.Sleep(100 * time.Millisecond) diff --git a/go/sdp-go/changes.pb.go b/go/sdp-go/changes.pb.go index 9a4130f2..51259dff 100644 --- a/go/sdp-go/changes.pb.go +++ b/go/sdp-go/changes.pb.go @@ -3645,8 +3645,15 @@ type ItemDiff struct { After *Item `protobuf:"bytes,4,opt,name=after,proto3" json:"after,omitempty"` // A summary of how often the GUN's have had similar changes for individual attributes along with planned and unplanned changes ModificationSummary string `protobuf:"bytes,5,opt,name=modificationSummary,proto3" json:"modificationSummary,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Reference to the live infrastructure item this diff was mapped to via + // LLM mapping. Only set when the mapped item differs from the plan item + // (i.e., the plan resource type has no static mapping and the LLM found + // a matching live item of a different type). The frontend uses this to + // draw a synthetic edge in the blast radius graph connecting the plan + // item node to the mapped live item node. + MappedItemRef *Reference `protobuf:"bytes,6,opt,name=mappedItemRef,proto3,oneof" json:"mappedItemRef,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ItemDiff) Reset() { @@ -3714,6 +3721,13 @@ func (x *ItemDiff) GetModificationSummary() string { return "" } +func (x *ItemDiff) GetMappedItemRef() *Reference { + if x != nil { + return x.MappedItemRef + } + return nil +} + type EnrichedTags struct { state protoimpl.MessageState `protogen:"open.v1"` TagValue map[string]*TagValue `protobuf:"bytes,18,rep,name=tagValue,proto3" json:"tagValue,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` @@ -6736,15 +6750,18 @@ const file_changes_proto_rawDesc = "" + "\x04item\x18\x01 \x01(\v2\n" + ".ReferenceR\x04item\x12/\n" + "\x06status\x18\x04 \x01(\x0e2\x17.changes.ItemDiffStatusR\x06status\x12)\n" + - "\vhealthAfter\x18\x05 \x01(\x0e2\a.HealthR\vhealthAfter\"\xd7\x01\n" + + "\vhealthAfter\x18\x05 \x01(\x0e2\a.HealthR\vhealthAfter\"\xa0\x02\n" + "\bItemDiff\x12#\n" + "\x04item\x18\x01 \x01(\v2\n" + ".ReferenceH\x00R\x04item\x88\x01\x01\x12/\n" + "\x06status\x18\x02 \x01(\x0e2\x17.changes.ItemDiffStatusR\x06status\x12\x1d\n" + "\x06before\x18\x03 \x01(\v2\x05.ItemR\x06before\x12\x1b\n" + "\x05after\x18\x04 \x01(\v2\x05.ItemR\x05after\x120\n" + - "\x13modificationSummary\x18\x05 \x01(\tR\x13modificationSummaryB\a\n" + - "\x05_item\"\x9f\x01\n" + + "\x13modificationSummary\x18\x05 \x01(\tR\x13modificationSummary\x125\n" + + "\rmappedItemRef\x18\x06 \x01(\v2\n" + + ".ReferenceH\x01R\rmappedItemRef\x88\x01\x01B\a\n" + + "\x05_itemB\x10\n" + + "\x0e_mappedItemRef\"\x9f\x01\n" + "\fEnrichedTags\x12?\n" + "\btagValue\x18\x12 \x03(\v2#.changes.EnrichedTags.TagValueEntryR\btagValue\x1aN\n" + "\rTagValueEntry\x12\x10\n" + @@ -7264,120 +7281,121 @@ var file_changes_proto_depIdxs = []int32{ 4, // 64: changes.ItemDiff.status:type_name -> changes.ItemDiffStatus 123, // 65: changes.ItemDiff.before:type_name -> Item 123, // 66: changes.ItemDiff.after:type_name -> Item - 107, // 67: changes.EnrichedTags.tagValue:type_name -> changes.EnrichedTags.TagValueEntry - 65, // 68: changes.TagValue.userTagValue:type_name -> changes.UserTagValue - 66, // 69: changes.TagValue.autoTagValue:type_name -> changes.AutoTagValue - 6, // 70: changes.Label.type:type_name -> changes.LabelType - 7, // 71: changes.ChangeSummary.status:type_name -> changes.ChangeStatus - 111, // 72: changes.ChangeSummary.createdAt:type_name -> google.protobuf.Timestamp - 108, // 73: changes.ChangeSummary.tags:type_name -> changes.ChangeSummary.TagsEntry - 63, // 74: changes.ChangeSummary.enrichedTags:type_name -> changes.EnrichedTags - 67, // 75: changes.ChangeSummary.labels:type_name -> changes.Label - 72, // 76: changes.ChangeSummary.githubChangeInfo:type_name -> changes.GithubChangeInfo - 70, // 77: changes.Change.metadata:type_name -> changes.ChangeMetadata - 71, // 78: changes.Change.properties:type_name -> changes.ChangeProperties - 111, // 79: changes.ChangeMetadata.createdAt:type_name -> google.protobuf.Timestamp - 111, // 80: changes.ChangeMetadata.updatedAt:type_name -> google.protobuf.Timestamp - 7, // 81: changes.ChangeMetadata.status:type_name -> changes.ChangeStatus - 109, // 82: changes.ChangeMetadata.UnknownHealthChange:type_name -> changes.ChangeMetadata.HealthChange - 109, // 83: changes.ChangeMetadata.OkHealthChange:type_name -> changes.ChangeMetadata.HealthChange - 109, // 84: changes.ChangeMetadata.WarningHealthChange:type_name -> changes.ChangeMetadata.HealthChange - 109, // 85: changes.ChangeMetadata.ErrorHealthChange:type_name -> changes.ChangeMetadata.HealthChange - 109, // 86: changes.ChangeMetadata.PendingHealthChange:type_name -> changes.ChangeMetadata.HealthChange - 72, // 87: changes.ChangeMetadata.githubChangeInfo:type_name -> changes.GithubChangeInfo - 104, // 88: changes.ChangeMetadata.changeAnalysisStatus:type_name -> changes.ChangeAnalysisStatus - 62, // 89: changes.ChangeProperties.plannedChanges:type_name -> changes.ItemDiff - 110, // 90: changes.ChangeProperties.tags:type_name -> changes.ChangeProperties.TagsEntry - 63, // 91: changes.ChangeProperties.enrichedTags:type_name -> changes.EnrichedTags - 67, // 92: changes.ChangeProperties.labels:type_name -> changes.Label - 69, // 93: changes.ListChangesResponse.changes:type_name -> changes.Change - 7, // 94: changes.ListChangesByStatusRequest.status:type_name -> changes.ChangeStatus - 69, // 95: changes.ListChangesByStatusResponse.changes:type_name -> changes.Change - 71, // 96: changes.CreateChangeRequest.properties:type_name -> changes.ChangeProperties - 69, // 97: changes.CreateChangeResponse.change:type_name -> changes.Change - 5, // 98: changes.GetChangeSummaryRequest.changeOutputFormat:type_name -> changes.ChangeOutputFormat - 10, // 99: changes.GetChangeSummaryRequest.riskSeverityFilter:type_name -> changes.Risk.Severity - 5, // 100: changes.GetChangeSignalsRequest.changeOutputFormat:type_name -> changes.ChangeOutputFormat - 69, // 101: changes.GetChangeResponse.change:type_name -> changes.Change - 104, // 102: changes.ChangeRiskMetadata.changeAnalysisStatus:type_name -> changes.ChangeAnalysisStatus - 103, // 103: changes.ChangeRiskMetadata.risks:type_name -> changes.Risk - 87, // 104: changes.GetChangeRisksResponse.changeRiskMetadata:type_name -> changes.ChangeRiskMetadata - 71, // 105: changes.UpdateChangeRequest.properties:type_name -> changes.ChangeProperties - 69, // 106: changes.UpdateChangeResponse.change:type_name -> changes.Change - 69, // 107: changes.ListChangesBySnapshotUUIDResponse.changes:type_name -> changes.Change - 8, // 108: changes.StartChangeResponse.state:type_name -> changes.StartChangeResponse.State - 9, // 109: changes.EndChangeResponse.state:type_name -> changes.EndChangeResponse.State - 10, // 110: changes.Risk.severity:type_name -> changes.Risk.Severity - 121, // 111: changes.Risk.relatedItems:type_name -> Reference - 11, // 112: changes.ChangeAnalysisStatus.status:type_name -> changes.ChangeAnalysisStatus.Status - 64, // 113: changes.EnrichedTags.TagValueEntry.value:type_name -> changes.TagValue - 73, // 114: changes.ChangesService.ListChanges:input_type -> changes.ListChangesRequest - 75, // 115: changes.ChangesService.ListChangesByStatus:input_type -> changes.ListChangesByStatusRequest - 77, // 116: changes.ChangesService.CreateChange:input_type -> changes.CreateChangeRequest - 79, // 117: changes.ChangesService.GetChange:input_type -> changes.GetChangeRequest - 80, // 118: changes.ChangesService.GetChangeByTicketLink:input_type -> changes.GetChangeByTicketLinkRequest - 81, // 119: changes.ChangesService.GetChangeSummary:input_type -> changes.GetChangeSummaryRequest - 34, // 120: changes.ChangesService.GetChangeTimelineV2:input_type -> changes.GetChangeTimelineV2Request - 86, // 121: changes.ChangesService.GetChangeRisks:input_type -> changes.GetChangeRisksRequest - 89, // 122: changes.ChangesService.UpdateChange:input_type -> changes.UpdateChangeRequest - 91, // 123: changes.ChangesService.DeleteChange:input_type -> changes.DeleteChangeRequest - 92, // 124: changes.ChangesService.ListChangesBySnapshotUUID:input_type -> changes.ListChangesBySnapshotUUIDRequest - 95, // 125: changes.ChangesService.RefreshState:input_type -> changes.RefreshStateRequest - 97, // 126: changes.ChangesService.StartChange:input_type -> changes.StartChangeRequest - 99, // 127: changes.ChangesService.EndChange:input_type -> changes.EndChangeRequest - 97, // 128: changes.ChangesService.StartChangeSimple:input_type -> changes.StartChangeRequest - 99, // 129: changes.ChangesService.EndChangeSimple:input_type -> changes.EndChangeRequest - 56, // 130: changes.ChangesService.ListHomeChanges:input_type -> changes.ListHomeChangesRequest - 54, // 131: changes.ChangesService.StartChangeAnalysis:input_type -> changes.StartChangeAnalysisRequest - 51, // 132: changes.ChangesService.ListChangingItemsSummary:input_type -> changes.ListChangingItemsSummaryRequest - 49, // 133: changes.ChangesService.GetDiff:input_type -> changes.GetDiffRequest - 59, // 134: changes.ChangesService.PopulateChangeFilters:input_type -> changes.PopulateChangeFiltersRequest - 105, // 135: changes.ChangesService.GenerateRiskFix:input_type -> changes.GenerateRiskFixRequest - 31, // 136: changes.ChangesService.GetHypothesesDetails:input_type -> changes.GetHypothesesDetailsRequest - 83, // 137: changes.ChangesService.GetChangeSignals:input_type -> changes.GetChangeSignalsRequest - 15, // 138: changes.LabelService.ListLabelRules:input_type -> changes.ListLabelRulesRequest - 17, // 139: changes.LabelService.CreateLabelRule:input_type -> changes.CreateLabelRuleRequest - 19, // 140: changes.LabelService.GetLabelRule:input_type -> changes.GetLabelRuleRequest - 21, // 141: changes.LabelService.UpdateLabelRule:input_type -> changes.UpdateLabelRuleRequest - 23, // 142: changes.LabelService.DeleteLabelRule:input_type -> changes.DeleteLabelRuleRequest - 25, // 143: changes.LabelService.TestLabelRule:input_type -> changes.TestLabelRuleRequest - 27, // 144: changes.LabelService.ReapplyLabelRuleInTimeRange:input_type -> changes.ReapplyLabelRuleInTimeRangeRequest - 74, // 145: changes.ChangesService.ListChanges:output_type -> changes.ListChangesResponse - 76, // 146: changes.ChangesService.ListChangesByStatus:output_type -> changes.ListChangesByStatusResponse - 78, // 147: changes.ChangesService.CreateChange:output_type -> changes.CreateChangeResponse - 85, // 148: changes.ChangesService.GetChange:output_type -> changes.GetChangeResponse - 85, // 149: changes.ChangesService.GetChangeByTicketLink:output_type -> changes.GetChangeResponse - 82, // 150: changes.ChangesService.GetChangeSummary:output_type -> changes.GetChangeSummaryResponse - 35, // 151: changes.ChangesService.GetChangeTimelineV2:output_type -> changes.GetChangeTimelineV2Response - 88, // 152: changes.ChangesService.GetChangeRisks:output_type -> changes.GetChangeRisksResponse - 90, // 153: changes.ChangesService.UpdateChange:output_type -> changes.UpdateChangeResponse - 94, // 154: changes.ChangesService.DeleteChange:output_type -> changes.DeleteChangeResponse - 93, // 155: changes.ChangesService.ListChangesBySnapshotUUID:output_type -> changes.ListChangesBySnapshotUUIDResponse - 96, // 156: changes.ChangesService.RefreshState:output_type -> changes.RefreshStateResponse - 98, // 157: changes.ChangesService.StartChange:output_type -> changes.StartChangeResponse - 100, // 158: changes.ChangesService.EndChange:output_type -> changes.EndChangeResponse - 101, // 159: changes.ChangesService.StartChangeSimple:output_type -> changes.StartChangeSimpleResponse - 102, // 160: changes.ChangesService.EndChangeSimple:output_type -> changes.EndChangeSimpleResponse - 58, // 161: changes.ChangesService.ListHomeChanges:output_type -> changes.ListHomeChangesResponse - 55, // 162: changes.ChangesService.StartChangeAnalysis:output_type -> changes.StartChangeAnalysisResponse - 52, // 163: changes.ChangesService.ListChangingItemsSummary:output_type -> changes.ListChangingItemsSummaryResponse - 50, // 164: changes.ChangesService.GetDiff:output_type -> changes.GetDiffResponse - 60, // 165: changes.ChangesService.PopulateChangeFilters:output_type -> changes.PopulateChangeFiltersResponse - 106, // 166: changes.ChangesService.GenerateRiskFix:output_type -> changes.GenerateRiskFixResponse - 32, // 167: changes.ChangesService.GetHypothesesDetails:output_type -> changes.GetHypothesesDetailsResponse - 84, // 168: changes.ChangesService.GetChangeSignals:output_type -> changes.GetChangeSignalsResponse - 16, // 169: changes.LabelService.ListLabelRules:output_type -> changes.ListLabelRulesResponse - 18, // 170: changes.LabelService.CreateLabelRule:output_type -> changes.CreateLabelRuleResponse - 20, // 171: changes.LabelService.GetLabelRule:output_type -> changes.GetLabelRuleResponse - 22, // 172: changes.LabelService.UpdateLabelRule:output_type -> changes.UpdateLabelRuleResponse - 24, // 173: changes.LabelService.DeleteLabelRule:output_type -> changes.DeleteLabelRuleResponse - 26, // 174: changes.LabelService.TestLabelRule:output_type -> changes.TestLabelRuleResponse - 28, // 175: changes.LabelService.ReapplyLabelRuleInTimeRange:output_type -> changes.ReapplyLabelRuleInTimeRangeResponse - 145, // [145:176] is the sub-list for method output_type - 114, // [114:145] is the sub-list for method input_type - 114, // [114:114] is the sub-list for extension type_name - 114, // [114:114] is the sub-list for extension extendee - 0, // [0:114] is the sub-list for field type_name + 121, // 67: changes.ItemDiff.mappedItemRef:type_name -> Reference + 107, // 68: changes.EnrichedTags.tagValue:type_name -> changes.EnrichedTags.TagValueEntry + 65, // 69: changes.TagValue.userTagValue:type_name -> changes.UserTagValue + 66, // 70: changes.TagValue.autoTagValue:type_name -> changes.AutoTagValue + 6, // 71: changes.Label.type:type_name -> changes.LabelType + 7, // 72: changes.ChangeSummary.status:type_name -> changes.ChangeStatus + 111, // 73: changes.ChangeSummary.createdAt:type_name -> google.protobuf.Timestamp + 108, // 74: changes.ChangeSummary.tags:type_name -> changes.ChangeSummary.TagsEntry + 63, // 75: changes.ChangeSummary.enrichedTags:type_name -> changes.EnrichedTags + 67, // 76: changes.ChangeSummary.labels:type_name -> changes.Label + 72, // 77: changes.ChangeSummary.githubChangeInfo:type_name -> changes.GithubChangeInfo + 70, // 78: changes.Change.metadata:type_name -> changes.ChangeMetadata + 71, // 79: changes.Change.properties:type_name -> changes.ChangeProperties + 111, // 80: changes.ChangeMetadata.createdAt:type_name -> google.protobuf.Timestamp + 111, // 81: changes.ChangeMetadata.updatedAt:type_name -> google.protobuf.Timestamp + 7, // 82: changes.ChangeMetadata.status:type_name -> changes.ChangeStatus + 109, // 83: changes.ChangeMetadata.UnknownHealthChange:type_name -> changes.ChangeMetadata.HealthChange + 109, // 84: changes.ChangeMetadata.OkHealthChange:type_name -> changes.ChangeMetadata.HealthChange + 109, // 85: changes.ChangeMetadata.WarningHealthChange:type_name -> changes.ChangeMetadata.HealthChange + 109, // 86: changes.ChangeMetadata.ErrorHealthChange:type_name -> changes.ChangeMetadata.HealthChange + 109, // 87: changes.ChangeMetadata.PendingHealthChange:type_name -> changes.ChangeMetadata.HealthChange + 72, // 88: changes.ChangeMetadata.githubChangeInfo:type_name -> changes.GithubChangeInfo + 104, // 89: changes.ChangeMetadata.changeAnalysisStatus:type_name -> changes.ChangeAnalysisStatus + 62, // 90: changes.ChangeProperties.plannedChanges:type_name -> changes.ItemDiff + 110, // 91: changes.ChangeProperties.tags:type_name -> changes.ChangeProperties.TagsEntry + 63, // 92: changes.ChangeProperties.enrichedTags:type_name -> changes.EnrichedTags + 67, // 93: changes.ChangeProperties.labels:type_name -> changes.Label + 69, // 94: changes.ListChangesResponse.changes:type_name -> changes.Change + 7, // 95: changes.ListChangesByStatusRequest.status:type_name -> changes.ChangeStatus + 69, // 96: changes.ListChangesByStatusResponse.changes:type_name -> changes.Change + 71, // 97: changes.CreateChangeRequest.properties:type_name -> changes.ChangeProperties + 69, // 98: changes.CreateChangeResponse.change:type_name -> changes.Change + 5, // 99: changes.GetChangeSummaryRequest.changeOutputFormat:type_name -> changes.ChangeOutputFormat + 10, // 100: changes.GetChangeSummaryRequest.riskSeverityFilter:type_name -> changes.Risk.Severity + 5, // 101: changes.GetChangeSignalsRequest.changeOutputFormat:type_name -> changes.ChangeOutputFormat + 69, // 102: changes.GetChangeResponse.change:type_name -> changes.Change + 104, // 103: changes.ChangeRiskMetadata.changeAnalysisStatus:type_name -> changes.ChangeAnalysisStatus + 103, // 104: changes.ChangeRiskMetadata.risks:type_name -> changes.Risk + 87, // 105: changes.GetChangeRisksResponse.changeRiskMetadata:type_name -> changes.ChangeRiskMetadata + 71, // 106: changes.UpdateChangeRequest.properties:type_name -> changes.ChangeProperties + 69, // 107: changes.UpdateChangeResponse.change:type_name -> changes.Change + 69, // 108: changes.ListChangesBySnapshotUUIDResponse.changes:type_name -> changes.Change + 8, // 109: changes.StartChangeResponse.state:type_name -> changes.StartChangeResponse.State + 9, // 110: changes.EndChangeResponse.state:type_name -> changes.EndChangeResponse.State + 10, // 111: changes.Risk.severity:type_name -> changes.Risk.Severity + 121, // 112: changes.Risk.relatedItems:type_name -> Reference + 11, // 113: changes.ChangeAnalysisStatus.status:type_name -> changes.ChangeAnalysisStatus.Status + 64, // 114: changes.EnrichedTags.TagValueEntry.value:type_name -> changes.TagValue + 73, // 115: changes.ChangesService.ListChanges:input_type -> changes.ListChangesRequest + 75, // 116: changes.ChangesService.ListChangesByStatus:input_type -> changes.ListChangesByStatusRequest + 77, // 117: changes.ChangesService.CreateChange:input_type -> changes.CreateChangeRequest + 79, // 118: changes.ChangesService.GetChange:input_type -> changes.GetChangeRequest + 80, // 119: changes.ChangesService.GetChangeByTicketLink:input_type -> changes.GetChangeByTicketLinkRequest + 81, // 120: changes.ChangesService.GetChangeSummary:input_type -> changes.GetChangeSummaryRequest + 34, // 121: changes.ChangesService.GetChangeTimelineV2:input_type -> changes.GetChangeTimelineV2Request + 86, // 122: changes.ChangesService.GetChangeRisks:input_type -> changes.GetChangeRisksRequest + 89, // 123: changes.ChangesService.UpdateChange:input_type -> changes.UpdateChangeRequest + 91, // 124: changes.ChangesService.DeleteChange:input_type -> changes.DeleteChangeRequest + 92, // 125: changes.ChangesService.ListChangesBySnapshotUUID:input_type -> changes.ListChangesBySnapshotUUIDRequest + 95, // 126: changes.ChangesService.RefreshState:input_type -> changes.RefreshStateRequest + 97, // 127: changes.ChangesService.StartChange:input_type -> changes.StartChangeRequest + 99, // 128: changes.ChangesService.EndChange:input_type -> changes.EndChangeRequest + 97, // 129: changes.ChangesService.StartChangeSimple:input_type -> changes.StartChangeRequest + 99, // 130: changes.ChangesService.EndChangeSimple:input_type -> changes.EndChangeRequest + 56, // 131: changes.ChangesService.ListHomeChanges:input_type -> changes.ListHomeChangesRequest + 54, // 132: changes.ChangesService.StartChangeAnalysis:input_type -> changes.StartChangeAnalysisRequest + 51, // 133: changes.ChangesService.ListChangingItemsSummary:input_type -> changes.ListChangingItemsSummaryRequest + 49, // 134: changes.ChangesService.GetDiff:input_type -> changes.GetDiffRequest + 59, // 135: changes.ChangesService.PopulateChangeFilters:input_type -> changes.PopulateChangeFiltersRequest + 105, // 136: changes.ChangesService.GenerateRiskFix:input_type -> changes.GenerateRiskFixRequest + 31, // 137: changes.ChangesService.GetHypothesesDetails:input_type -> changes.GetHypothesesDetailsRequest + 83, // 138: changes.ChangesService.GetChangeSignals:input_type -> changes.GetChangeSignalsRequest + 15, // 139: changes.LabelService.ListLabelRules:input_type -> changes.ListLabelRulesRequest + 17, // 140: changes.LabelService.CreateLabelRule:input_type -> changes.CreateLabelRuleRequest + 19, // 141: changes.LabelService.GetLabelRule:input_type -> changes.GetLabelRuleRequest + 21, // 142: changes.LabelService.UpdateLabelRule:input_type -> changes.UpdateLabelRuleRequest + 23, // 143: changes.LabelService.DeleteLabelRule:input_type -> changes.DeleteLabelRuleRequest + 25, // 144: changes.LabelService.TestLabelRule:input_type -> changes.TestLabelRuleRequest + 27, // 145: changes.LabelService.ReapplyLabelRuleInTimeRange:input_type -> changes.ReapplyLabelRuleInTimeRangeRequest + 74, // 146: changes.ChangesService.ListChanges:output_type -> changes.ListChangesResponse + 76, // 147: changes.ChangesService.ListChangesByStatus:output_type -> changes.ListChangesByStatusResponse + 78, // 148: changes.ChangesService.CreateChange:output_type -> changes.CreateChangeResponse + 85, // 149: changes.ChangesService.GetChange:output_type -> changes.GetChangeResponse + 85, // 150: changes.ChangesService.GetChangeByTicketLink:output_type -> changes.GetChangeResponse + 82, // 151: changes.ChangesService.GetChangeSummary:output_type -> changes.GetChangeSummaryResponse + 35, // 152: changes.ChangesService.GetChangeTimelineV2:output_type -> changes.GetChangeTimelineV2Response + 88, // 153: changes.ChangesService.GetChangeRisks:output_type -> changes.GetChangeRisksResponse + 90, // 154: changes.ChangesService.UpdateChange:output_type -> changes.UpdateChangeResponse + 94, // 155: changes.ChangesService.DeleteChange:output_type -> changes.DeleteChangeResponse + 93, // 156: changes.ChangesService.ListChangesBySnapshotUUID:output_type -> changes.ListChangesBySnapshotUUIDResponse + 96, // 157: changes.ChangesService.RefreshState:output_type -> changes.RefreshStateResponse + 98, // 158: changes.ChangesService.StartChange:output_type -> changes.StartChangeResponse + 100, // 159: changes.ChangesService.EndChange:output_type -> changes.EndChangeResponse + 101, // 160: changes.ChangesService.StartChangeSimple:output_type -> changes.StartChangeSimpleResponse + 102, // 161: changes.ChangesService.EndChangeSimple:output_type -> changes.EndChangeSimpleResponse + 58, // 162: changes.ChangesService.ListHomeChanges:output_type -> changes.ListHomeChangesResponse + 55, // 163: changes.ChangesService.StartChangeAnalysis:output_type -> changes.StartChangeAnalysisResponse + 52, // 164: changes.ChangesService.ListChangingItemsSummary:output_type -> changes.ListChangingItemsSummaryResponse + 50, // 165: changes.ChangesService.GetDiff:output_type -> changes.GetDiffResponse + 60, // 166: changes.ChangesService.PopulateChangeFilters:output_type -> changes.PopulateChangeFiltersResponse + 106, // 167: changes.ChangesService.GenerateRiskFix:output_type -> changes.GenerateRiskFixResponse + 32, // 168: changes.ChangesService.GetHypothesesDetails:output_type -> changes.GetHypothesesDetailsResponse + 84, // 169: changes.ChangesService.GetChangeSignals:output_type -> changes.GetChangeSignalsResponse + 16, // 170: changes.LabelService.ListLabelRules:output_type -> changes.ListLabelRulesResponse + 18, // 171: changes.LabelService.CreateLabelRule:output_type -> changes.CreateLabelRuleResponse + 20, // 172: changes.LabelService.GetLabelRule:output_type -> changes.GetLabelRuleResponse + 22, // 173: changes.LabelService.UpdateLabelRule:output_type -> changes.UpdateLabelRuleResponse + 24, // 174: changes.LabelService.DeleteLabelRule:output_type -> changes.DeleteLabelRuleResponse + 26, // 175: changes.LabelService.TestLabelRule:output_type -> changes.TestLabelRuleResponse + 28, // 176: changes.LabelService.ReapplyLabelRuleInTimeRange:output_type -> changes.ReapplyLabelRuleInTimeRangeResponse + 146, // [146:177] is the sub-list for method output_type + 115, // [115:146] is the sub-list for method input_type + 115, // [115:115] is the sub-list for extension type_name + 115, // [115:115] is the sub-list for extension extendee + 0, // [0:115] is the sub-list for field type_name } func init() { file_changes_proto_init() } diff --git a/go/sdp-go/connection.go b/go/sdp-go/connection.go index f125afc0..5ade42b1 100644 --- a/go/sdp-go/connection.go +++ b/go/sdp-go/connection.go @@ -10,7 +10,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" - "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) @@ -60,8 +59,7 @@ func recordMessage(ctx context.Context, name, subj, typ, msg string) { } func (ec *EncodedConnectionImpl) Publish(ctx context.Context, subj string, m proto.Message) error { - // TODO: protojson.Format is pretty expensive, replace with summarized data - recordMessage(ctx, "Publish", subj, fmt.Sprint(reflect.TypeOf(m)), protojson.Format(m)) + recordMessage(ctx, "Publish", subj, fmt.Sprint(reflect.TypeOf(m)), fmt.Sprintf("%d bytes", proto.Size(m))) data, err := proto.Marshal(m) if err != nil { @@ -77,8 +75,7 @@ func (ec *EncodedConnectionImpl) Publish(ctx context.Context, subj string, m pro } func (ec *EncodedConnectionImpl) PublishRequest(ctx context.Context, subj, replyTo string, m proto.Message) error { - // TODO: protojson.Format is pretty expensive, replace with summarized data - recordMessage(ctx, "Publish", subj, fmt.Sprint(reflect.TypeOf(m)), protojson.Format(m)) + recordMessage(ctx, "Publish", subj, fmt.Sprint(reflect.TypeOf(m)), fmt.Sprintf("%d bytes", proto.Size(m))) data, err := proto.Marshal(m) if err != nil { @@ -166,7 +163,7 @@ func Unmarshal(ctx context.Context, b []byte, m proto.Message) error { return err } - recordMessage(ctx, "Unmarshal", "unknown", fmt.Sprint(reflect.TypeOf(m)), protojson.Format(m)) + recordMessage(ctx, "Unmarshal", "unknown", fmt.Sprint(reflect.TypeOf(m)), fmt.Sprintf("%d bytes", proto.Size(m))) return nil } diff --git a/go/sdp-go/instance_detect.go b/go/sdp-go/instance_detect.go index 2da3ac9e..9f4ef8a5 100644 --- a/go/sdp-go/instance_detect.go +++ b/go/sdp-go/instance_detect.go @@ -58,7 +58,7 @@ func NewOvermindInstance(ctx context.Context, app string) (OvermindInstance, err } req = req.WithContext(ctx) - res, err := tracing.HTTPClient().Do(req) + res, err := tracing.HTTPClient().Do(req) //nolint:gosec // G107 (SSRF): URL is built from the app base URL (CLI config) + hardcoded path /api/public/instance-data if err != nil { return OvermindInstance{}, fmt.Errorf("could not fetch instance-data: %w", err) } diff --git a/go/sdp-go/items.go b/go/sdp-go/items.go index 49f7f59b..5f91e1e0 100644 --- a/go/sdp-go/items.go +++ b/go/sdp-go/items.go @@ -23,7 +23,7 @@ const WILDCARD = "*" // UniqueAttributeValue returns the value of whatever the Unique Attribute is // for this item. This will then be converted to a string and returned func (i *Item) UniqueAttributeValue() string { - var value interface{} + var value any var err error value, err = i.GetAttributes().Get(i.GetUniqueAttribute()) @@ -65,7 +65,7 @@ func (i *Item) GloballyUniqueName() string { // Hash Returns a 12 character hash for the item. This is likely but not // guaranteed to be unique. The hash is calculated using the GloballyUniqueName func (i *Item) Hash() string { - return HashSum(([]byte(fmt.Sprint(i.GloballyUniqueName())))) + return HashSum((fmt.Append(nil, i.GloballyUniqueName()))) } // IsEqual compares two Edges for equality by checking the From reference @@ -78,7 +78,7 @@ func (e *Edge) IsEqual(other *Edge) bool { // Hash Returns a 12 character hash for the item. This is likely but not // guaranteed to be unique. The hash is calculated using the GloballyUniqueName func (r *Reference) Hash() string { - return HashSum(([]byte(fmt.Sprint(r.GloballyUniqueName())))) + return HashSum((fmt.Append(nil, r.GloballyUniqueName()))) } // GloballyUniqueName Returns a string that defines the Item globally. This a @@ -172,17 +172,17 @@ func (r *Reference) ToQuery() *Query { // Get Returns the value of a given attribute by name. If the attribute is // a nested hash, nested values can be referenced using dot notation e.g. // location.country -func (a *ItemAttributes) Get(name string) (interface{}, error) { - var result interface{} +func (a *ItemAttributes) Get(name string) (any, error) { + var result any // Start at the beginning of the map, we will then traverse down as required result = a.GetAttrStruct().AsMap() - for _, section := range strings.Split(name, ".") { + for section := range strings.SplitSeq(name, ".") { // Check that the data we're using is in the supported format - var m map[string]interface{} + var m map[string]any - m, isMap := result.(map[string]interface{}) + m, isMap := result.(map[string]any) if !isMap { return nil, fmt.Errorf("attribute %v not found", name) @@ -203,7 +203,7 @@ func (a *ItemAttributes) Get(name string) (interface{}, error) { // Set sets an attribute. Values are converted to structpb versions and an error // will be returned if this fails. Note that this does *not* yet support // dot notation e.g. location.country -func (a *ItemAttributes) Set(name string, value interface{}) error { +func (a *ItemAttributes) Set(name string, value any) error { // Check to make sure that the pointer is not nil if a == nil { return errors.New("Set called on nil pointer") @@ -409,7 +409,7 @@ func AddDefaultTransforms(customTransforms TransformMap) TransformMap { // // Note that you need to use `AddDefaultTransforms(TransformMap) TransformMap` // to get sensible default transformations. -func ToAttributesCustom(m map[string]interface{}, sort bool, customTransforms TransformMap) (*ItemAttributes, error) { +func ToAttributesCustom(m map[string]any, sort bool, customTransforms TransformMap) (*ItemAttributes, error) { return toAttributes(m, sort, customTransforms) } @@ -417,16 +417,16 @@ func ToAttributesCustom(m map[string]interface{}, sort bool, customTransforms Tr // slices alphabetically.This should be used when the item doesn't contain array // attributes that are explicitly sorted, especially if these are sometimes // returned in a different order -func ToAttributesSorted(m map[string]interface{}) (*ItemAttributes, error) { +func ToAttributesSorted(m map[string]any) (*ItemAttributes, error) { return toAttributes(m, true, DefaultTransforms) } // ToAttributes Converts a map[string]interface{} to an ItemAttributes object -func ToAttributes(m map[string]interface{}) (*ItemAttributes, error) { +func ToAttributes(m map[string]any) (*ItemAttributes, error) { return toAttributes(m, false, DefaultTransforms) } -func toAttributes(m map[string]interface{}, sort bool, customTransforms TransformMap) (*ItemAttributes, error) { +func toAttributes(m map[string]any, sort bool, customTransforms TransformMap) (*ItemAttributes, error) { if m == nil { return nil, nil } @@ -457,13 +457,13 @@ func toAttributes(m map[string]interface{}, sort bool, customTransforms Transfor // ToAttributesViaJson Converts any struct to a set of attributes by marshalling // to JSON and then back again. This is less performant than ToAttributes() but // does save work when copying large structs to attributes in their entirety -func ToAttributesViaJson(v interface{}) (*ItemAttributes, error) { +func ToAttributesViaJson(v any) (*ItemAttributes, error) { b, err := json.Marshal(v) if err != nil { return nil, err } - var m map[string]interface{} + var m map[string]any err = json.Unmarshal(b, &m) if err != nil { @@ -475,7 +475,7 @@ func ToAttributesViaJson(v interface{}) (*ItemAttributes, error) { // A function that transforms one data type into another that is compatible with // protobuf. This is used to convert things like time.Time into a string -type TransformFunc func(interface{}) interface{} +type TransformFunc func(any) any // A map of types to transform functions type TransformMap map[reflect.Type]TransformFunc @@ -483,11 +483,11 @@ type TransformMap map[reflect.Type]TransformFunc // The default transforms that are used when converting to attributes var DefaultTransforms = TransformMap{ // Time should be in RFC3339Nano format i.e. 2006-01-02T15:04:05.999999999Z07:00 - reflect.TypeOf(time.Time{}): func(i interface{}) interface{} { + reflect.TypeFor[time.Time](): func(i any) any { return i.(time.Time).Format(time.RFC3339Nano) }, // Duration should be in string format - reflect.TypeOf(time.Duration(0)): func(i interface{}) interface{} { + reflect.TypeFor[time.Duration](): func(i any) any { return i.(time.Duration).String() }, } @@ -515,7 +515,7 @@ var DefaultTransforms = TransformMap{ // function does its best to example the available data type to ensure that as // long as the data can in theory be represented by a protobuf struct, the // conversion will work. -func sanitizeInterface(i interface{}, sortArrays bool, customTransforms TransformMap) interface{} { +func sanitizeInterface(i any, sortArrays bool, customTransforms TransformMap) any { if i == nil { return nil } @@ -571,9 +571,9 @@ func sanitizeInterface(i interface{}, sortArrays bool, customTransforms Transfor // conversion on that // returnSlice Returns the array in the format that protobuf can deal with - var returnSlice []interface{} + var returnSlice []any - returnSlice = make([]interface{}, v.Len()) + returnSlice = make([]any, v.Len()) for i := range v.Len() { returnSlice[i] = sanitizeInterface(v.Index(i).Interface(), sortArrays, customTransforms) @@ -585,9 +585,9 @@ func sanitizeInterface(i interface{}, sortArrays bool, customTransforms Transfor return returnSlice case reflect.Map: - var returnMap map[string]interface{} + var returnMap map[string]any - returnMap = make(map[string]interface{}) + returnMap = make(map[string]any) for _, mapKey := range v.MapKeys() { // Convert the key to a string @@ -602,9 +602,9 @@ func sanitizeInterface(i interface{}, sortArrays bool, customTransforms Transfor case reflect.Struct: // In the case of a struct we basically want to turn it into a // map[string]interface{} - var returnMap map[string]interface{} + var returnMap map[string]any - returnMap = make(map[string]interface{}) + returnMap = make(map[string]any) // Range over fields n := t.NumField() @@ -629,7 +629,7 @@ func sanitizeInterface(i interface{}, sortArrays bool, customTransforms Transfor } return sanitizeInterface(returnMap, sortArrays, customTransforms) - case reflect.Ptr: + case reflect.Pointer: // Get the zero value for this field zero := reflect.Zero(t) @@ -648,7 +648,7 @@ func sanitizeInterface(i interface{}, sortArrays bool, customTransforms Transfor // Sorts an interface slice by converting each item to a string and sorting // these strings -func sortInterfaceArray(input []interface{}) { +func sortInterfaceArray(input []any) { sort.Slice(input, func(i, j int) bool { return fmt.Sprint(input[i]) < fmt.Sprint(input[j]) }) diff --git a/go/sdp-go/items_test.go b/go/sdp-go/items_test.go index 666f3c69..a85f4ed0 100644 --- a/go/sdp-go/items_test.go +++ b/go/sdp-go/items_test.go @@ -16,7 +16,7 @@ import ( type ToAttributesTest struct { Name string - Input map[string]interface{} + Input map[string]any } type CustomString string @@ -29,24 +29,24 @@ var Bool1 CustomBool = false var NilPointerBool *bool type CustomStruct struct { - Foo string `json:",omitempty"` - Bar string `json:",omitempty"` - Baz string `json:",omitempty"` - Time time.Time `json:",omitempty"` + Foo string `json:",omitempty"` + Bar string `json:",omitempty"` + Baz string `json:",omitempty"` + Time time.Time Duration time.Duration `json:",omitempty"` } var ToAttributesTests = []ToAttributesTest{ { Name: "Basic strings map", - Input: map[string]interface{}{ + Input: map[string]any{ "firstName": "Dylan", "lastName": "Ratcliffe", }, }, { Name: "Arrays map", - Input: map[string]interface{}{ + Input: map[string]any{ "empty": []string{}, "single-level": []string{ "one", @@ -66,7 +66,7 @@ var ToAttributesTests = []ToAttributesTest{ }, { Name: "Nested strings maps", - Input: map[string]interface{}{ + Input: map[string]any{ "strings map": map[string]string{ "foo": "bar", }, @@ -74,7 +74,7 @@ var ToAttributesTests = []ToAttributesTest{ }, { Name: "Nested integer map", - Input: map[string]interface{}{ + Input: map[string]any{ "numbers map": map[string]int{ "one": 1, "two": 2, @@ -83,7 +83,7 @@ var ToAttributesTests = []ToAttributesTest{ }, { Name: "Nested string-array map", - Input: map[string]interface{}{ + Input: map[string]any{ "arrays map": map[string][]string{ "dogs": { "pug", @@ -94,7 +94,7 @@ var ToAttributesTests = []ToAttributesTest{ }, { Name: "Nested non-string keys map", - Input: map[string]interface{}{ + Input: map[string]any{ "non-string keys": map[int]string{ 1: "one", 2: "two", @@ -104,21 +104,21 @@ var ToAttributesTests = []ToAttributesTest{ }, { Name: "Composite types", - Input: map[string]interface{}{ + Input: map[string]any{ "underlying string": Dylan, "underlying bool": Bool1, }, }, { Name: "Pointers", - Input: map[string]interface{}{ + Input: map[string]any{ "pointer bool": &Bool1, "pointer string": &Dylan, }, }, { Name: "structs", - Input: map[string]interface{}{ + Input: map[string]any{ "named struct": CustomStruct{ Foo: "foo", Bar: "bar", @@ -134,7 +134,7 @@ var ToAttributesTests = []ToAttributesTest{ }, { Name: "Zero-value structs", - Input: map[string]interface{}{ + Input: map[string]any{ "something": CustomStruct{ Foo: "yes", Time: time.Now(), @@ -180,8 +180,8 @@ func TestToAttributes(t *testing.T) { t.Fatal(err) } - var input map[string]interface{} - var output map[string]interface{} + var input map[string]any + var output map[string]any err = json.Unmarshal(inputBytes, &input) @@ -208,7 +208,7 @@ func TestToAttributes(t *testing.T) { } func TestDefaultTransformMap(t *testing.T) { - input := map[string]interface{}{ + input := map[string]any{ // Use a duration "hour": 1 * time.Hour, } @@ -236,8 +236,8 @@ func TestCustomTransforms(t *testing.T) { Value string } - data := map[string]interface{}{ - "user": map[string]interface{}{ + data := map[string]any{ + "user": map[string]any{ "name": "Hunter", "password": Secret{ Value: "hunter2", @@ -246,7 +246,7 @@ func TestCustomTransforms(t *testing.T) { } attributes, err := ToAttributesCustom(data, true, TransformMap{ - reflect.TypeOf(Secret{}): func(i interface{}) interface{} { + reflect.TypeFor[Secret](): func(i any) any { // Remove it return "REDACTED" }, @@ -262,7 +262,7 @@ func TestCustomTransforms(t *testing.T) { t.Fatal(err) } - userMap, ok := user.(map[string]interface{}) + userMap, ok := user.(map[string]any) if !ok { t.Fatalf("Expected user to be a map, got %T", user) @@ -280,7 +280,7 @@ func TestCustomTransforms(t *testing.T) { Bar string } - data := map[string]interface{}{ + data := map[string]any{ "something": Something{ Foo: "foo", Bar: "bar", @@ -288,7 +288,7 @@ func TestCustomTransforms(t *testing.T) { } attributes, err := ToAttributesCustom(data, true, TransformMap{ - reflect.TypeOf(Something{}): func(i interface{}) interface{} { + reflect.TypeFor[Something](): func(i any) any { something := i.(Something) return map[string]string{ @@ -308,7 +308,7 @@ func TestCustomTransforms(t *testing.T) { t.Fatal(err) } - somethingMap, ok := something.(map[string]interface{}) + somethingMap, ok := something.(map[string]any) if !ok { t.Fatalf("Expected something to be a map, got %T", something) @@ -328,7 +328,7 @@ func TestCustomTransforms(t *testing.T) { Bar string } - data := map[string]interface{}{ + data := map[string]any{ "something": Something{ Foo: "foo", Bar: "bar", @@ -337,7 +337,7 @@ func TestCustomTransforms(t *testing.T) { } _, err := ToAttributesCustom(data, true, TransformMap{ - reflect.TypeOf(Something{}): func(i interface{}) interface{} { + reflect.TypeFor[Something](): func(i any) any { return nil }, }) @@ -349,7 +349,7 @@ func TestCustomTransforms(t *testing.T) { } func TestCopy(t *testing.T) { - exampleAttributes, err := ToAttributes(map[string]interface{}{ + exampleAttributes, err := ToAttributes(map[string]any{ "name": "Dylan", "friend": "Mike", "age": 27, @@ -472,8 +472,8 @@ func AssertItemsEqual(itemA *Item, itemB *Item, t *testing.T) { t.Error("UniqueAttribute did not match") } - var nameA interface{} - var nameB interface{} + var nameA any + var nameB any var err error nameA, err = itemA.GetAttributes().Get("name") @@ -643,9 +643,9 @@ func TestToAttributesViaJson(t *testing.T) { } func TestAttributesGet(t *testing.T) { - mapData := map[string]interface{}{ + mapData := map[string]any{ "foo": "bar", - "nest": map[string]interface{}{ + "nest": map[string]any{ "nest2": map[string]string{ "nest3": "nestValue", }, @@ -668,9 +668,9 @@ func TestAttributesGet(t *testing.T) { } func TestAttributesSet(t *testing.T) { - mapData := map[string]interface{}{ + mapData := map[string]any{ "foo": "bar", - "nest": map[string]interface{}{ + "nest": map[string]any{ "nest2": map[string]string{ "nest3": "nestValue", }, diff --git a/go/sdp-go/link_extract.go b/go/sdp-go/link_extract.go index 9f03b99f..8041e61b 100644 --- a/go/sdp-go/link_extract.go +++ b/go/sdp-go/link_extract.go @@ -28,8 +28,8 @@ func ExtractLinksFromAttributes(attributes *ItemAttributes) []*LinkedItemQuery { // converts it to a set of ItemAttributes via the `ToAttributes` function. This // uses reflection. `ExtractLinksFromAttributes` is more efficient if you have // the attributes already in the correct format. -func ExtractLinksFrom(anything interface{}) ([]*LinkedItemQuery, error) { - attributes, err := ToAttributes(map[string]interface{}{ +func ExtractLinksFrom(anything any) ([]*LinkedItemQuery, error) { + attributes, err := ToAttributes(map[string]any{ "": anything, }) if err != nil { diff --git a/go/sdp-go/link_extract_test.go b/go/sdp-go/link_extract_test.go index 0314b0f4..4b2af566 100644 --- a/go/sdp-go/link_extract_test.go +++ b/go/sdp-go/link_extract_test.go @@ -7,7 +7,7 @@ import ( ) // Create a very large set of attributes for the benchmark -func createTestData() (*ItemAttributes, interface{}) { +func createTestData() (*ItemAttributes, any) { yamlString := `--- creationTimestamp: 2024-07-09T11:16:31Z data: @@ -414,7 +414,7 @@ taskArn: arn:aws:ecs:eu-west-2:123456789:task/example-tfc/ded4f8eebe4144ddb9a93a version: 5 ` - mapData := make(map[string]interface{}) + mapData := make(map[string]any) _ = yaml.Unmarshal([]byte(yamlString), &mapData) attrs, _ := ToAttributes(mapData) @@ -616,7 +616,7 @@ func TestExtractLinksFromAttributes(t *testing.T) { func TestExtractLinksFrom(t *testing.T) { tests := []struct { Name string - Object interface{} + Object any ExpectedQueries []string }{ { @@ -677,8 +677,8 @@ func TestExtractLinksFrom(t *testing.T) { func TestExtractLinksFromConfigMapData(t *testing.T) { // Test ConfigMap data with S3 bucket ARN - configMapData := map[string]interface{}{ - "data": map[string]interface{}{ + configMapData := map[string]any{ + "data": map[string]any{ "S3_BUCKET_ARN": "arn:aws:s3:::example-bucket-name", "S3_BUCKET_NAME": "example-bucket-name", }, @@ -761,7 +761,7 @@ func TestS3BucketARNTypeDetection(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - queries, err := ExtractLinksFrom(map[string]interface{}{ + queries, err := ExtractLinksFrom(map[string]any{ "arn": tt.arn, }) if err != nil { diff --git a/go/sdp-go/progress.go b/go/sdp-go/progress.go index f53c5094..d4d8607a 100644 --- a/go/sdp-go/progress.go +++ b/go/sdp-go/progress.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math/rand/v2" "sync" "sync/atomic" "time" @@ -18,8 +19,9 @@ import ( ) // DefaultResponseInterval is the default period of time within which responses -// are sent (5 seconds) -const DefaultResponseInterval = (5 * time.Second) +// are sent (30 seconds). Jitter of +/-10% is applied per tick to prevent a +// thundering herd when many concurrent queries start simultaneously. +const DefaultResponseInterval = (30 * time.Second) // DefaultStartTimeout is the default period of time to wait for the first // response on a query. If no response is received in this time, the query will @@ -100,19 +102,24 @@ func (rs *ResponseSender) Start(ctx context.Context, ec EncodedConnection, respo if ec == nil { return } - tick := time.NewTicker(rs.ResponseInterval) - defer tick.Stop() + + // Apply +/-10% uniform random jitter per tick to prevent a thundering + // herd when many ResponseSenders start near-simultaneously. + tenth := rs.ResponseInterval / 10 + base := rs.ResponseInterval - tenth + jitterRange := 2 * tenth for { - var err error + jitter := time.Duration(rand.Int64N(int64(jitterRange))) //nolint:gosec // jitter does not need cryptographic randomness + delay := base + jitter select { case <-rs.monitorKill: return case <-ctx.Done(): return - case <-tick.C: - err = rs.connection.Publish( + case <-time.After(delay): + err := rs.connection.Publish( ctx, rs.ResponseSubject, &QueryResponse{ResponseType: &QueryResponse_Response{Response: &resp}}, diff --git a/go/sdp-go/progress_test.go b/go/sdp-go/progress_test.go index 02c8adfd..e0d9a533 100644 --- a/go/sdp-go/progress_test.go +++ b/go/sdp-go/progress_test.go @@ -495,9 +495,7 @@ func TestQueryProgressParallel(t *testing.T) { var wg sync.WaitGroup for i := 0; i != 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Test the initial response sq.handleQueryResponse(ctx, &QueryResponse{ ResponseType: &QueryResponse_Response{ @@ -509,7 +507,7 @@ func TestQueryProgressParallel(t *testing.T) { }, }, }) - }() + }) } wg.Wait() @@ -637,8 +635,7 @@ func TestRogueResponder(t *testing.T) { // Create our rogue responder that doesn't cancel when it should ticker := time.NewTicker(5 * time.Second) - tickerCtx, tickerCancel := context.WithCancel(context.Background()) - defer tickerCancel() + tickerCtx := t.Context() defer ticker.Stop() go func() { diff --git a/go/sdp-go/proto_clone_test.go b/go/sdp-go/proto_clone_test.go index 863c8f7d..546104e1 100644 --- a/go/sdp-go/proto_clone_test.go +++ b/go/sdp-go/proto_clone_test.go @@ -79,7 +79,7 @@ func TestProtoCloneReplacesCustomCopy(t *testing.T) { } // Add attributes - attrs, err := ToAttributes(map[string]interface{}{ + attrs, err := ToAttributes(map[string]any{ "name": "test-item", "port": 8080, }) diff --git a/go/sdp-go/sdpws/client.go b/go/sdp-go/sdpws/client.go index 7c7f62cf..f862824a 100644 --- a/go/sdp-go/sdpws/client.go +++ b/go/sdp-go/sdpws/client.go @@ -64,9 +64,9 @@ type Client struct { // receiveCtx is the context for the receive goroutine // receiveCancel cancels the receive context // receiveDone signals when receive has finished - receiveCtx context.Context - receiveCancel context.CancelFunc - receiveDone sync.WaitGroup + receiveCtx context.Context + receiveCancel context.CancelFunc + receiveDone sync.WaitGroup } // Dial connects to the given URL and returns a new Client. Pass nil as handler @@ -123,11 +123,9 @@ func dialImpl(ctx context.Context, u string, httpClient *http.Client, handler Ga // Create a dedicated context for receive() that we can cancel independently c.receiveCtx, c.receiveCancel = context.WithCancel(ctx) - c.receiveDone.Add(1) - go func() { - defer c.receiveDone.Done() + c.receiveDone.Go(func() { c.receive(c.receiveCtx) - }() + }) return c, nil } diff --git a/go/sdp-go/sdpws/client_test.go b/go/sdp-go/sdpws/client_test.go index 57fdd5dd..c3e3a460 100644 --- a/go/sdp-go/sdpws/client_test.go +++ b/go/sdp-go/sdpws/client_test.go @@ -7,6 +7,7 @@ import ( "net/http" "net/http/httptest" "os" + "slices" "sync" "testing" "time" @@ -20,12 +21,7 @@ import ( // Helper function to check if a slice contains a string func contains(slice []string, item string) bool { - for _, s := range slice { - if s == item { - return true - } - } - return false + return slices.Contains(slice, item) } // TestServer is a test server for the websocket client. Note that this can only @@ -643,19 +639,15 @@ func TestClient(t *testing.T) { var wg sync.WaitGroup results := make([]result, 2) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { items, err := c.QueryOne(ctx, query1) results[0] = result{items: items, err: err} - }() + }) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { items, err := c.QueryOne(ctx, query2) results[1] = result{items: items, err: err} - }() + }) wg.Wait() @@ -797,19 +789,15 @@ func TestClient(t *testing.T) { resultsA := make([]result, 1) resultsB := make([]result, 1) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { items, err := c.QueryOne(ctx, queryA) resultsA[0] = result{items: items, err: err} - }() + }) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { items, err := c.QueryOne(ctx, queryB) resultsB[0] = result{items: items, err: err} - }() + }) wg.Wait() @@ -914,9 +902,7 @@ func TestRaceConditionOnClose(t *testing.T) { // Start a simulated receive() goroutine that will call postRequestChan // This simulates the real receive() behavior where it processes messages // and calls postRequestChan() until the context is cancelled - c.receiveDone.Add(1) - go func() { - defer c.receiveDone.Done() + c.receiveDone.Go(func() { // Simulate receive() processing messages and calling postRequestChan // It will be cancelled by Close() and should stop before channels are closed for i := range 1000 { @@ -939,19 +925,17 @@ func TestRaceConditionOnClose(t *testing.T) { }() time.Sleep(time.Nanosecond) } - }() + }) // Start a goroutine that calls Close() concurrently // Close() will cancel the receive context, wait for receive() to finish, // and then close channels. This ensures receive() stops before channels are closed. - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Wait a tiny bit to let some postRequestChan calls start time.Sleep(time.Microsecond * 10) // Use Close() which properly cancels receive context and waits before closing channels _ = c.Close(ctx) - }() + }) // Wait for all goroutines to complete wg.Wait() diff --git a/go/sdp-go/test_utils.go b/go/sdp-go/test_utils.go index 0ca45d10..fbb4c1ec 100644 --- a/go/sdp-go/test_utils.go +++ b/go/sdp-go/test_utils.go @@ -15,7 +15,7 @@ import ( type ResponseMessage struct { Subject string - V interface{} + V any } // TestConnection Used to mock a NATS connection for testing @@ -140,7 +140,7 @@ func (r *TestConnection) subjectToRegexp(subject string) *regexp.Regexp { func (t *TestConnection) RequestMsg(ctx context.Context, msg *nats.Msg) (*nats.Msg, error) { replySubject := randSeq(10) msg.Reply = replySubject - replies := make(chan interface{}, 128) + replies := make(chan any, 128) // Subscribe to the reply subject _, err := t.Subscribe(replySubject, func(msg *nats.Msg) { diff --git a/go/sdpcache/bolt_cache.go b/go/sdpcache/bolt_cache.go index 9d740310..de81b8d6 100644 --- a/go/sdpcache/bolt_cache.go +++ b/go/sdpcache/bolt_cache.go @@ -148,7 +148,7 @@ func parseExpiryKey(key []byte) (time.Time, SSTHash, []byte, error) { } expiryNanoUint := binary.BigEndian.Uint64(key[0:8]) - expiryNano := int64(expiryNanoUint) + expiryNano := int64(expiryNanoUint) //nolint:gosec // G115 (overflow): guarded by underflow check on lines 153-155 that clamps to zero // Check for overflow when converting uint64 to int64 if expiryNano < 0 && expiryNanoUint > 0 { expiryNano = 0 @@ -157,13 +157,13 @@ func parseExpiryKey(key []byte) (time.Time, SSTHash, []byte, error) { // Find the separators rest := key[9:] // skip the first separator - sepIdx := bytes.IndexByte(rest, '|') - if sepIdx < 0 { + before, after, ok := bytes.Cut(rest, []byte{'|'}) + if !ok { return time.Time{}, "", nil, errors.New("invalid expiry key format") } - sstHash := SSTHash(rest[:sepIdx]) - entryKey := rest[sepIdx+1:] + sstHash := SSTHash(before) + entryKey := after return expiry, sstHash, entryKey, nil } @@ -294,7 +294,7 @@ func (c *BoltCache) loadDeletedBytes() error { data := meta.Get(deletedBytesKey) if len(data) == 8 { deletedBytesUint := binary.BigEndian.Uint64(data) - deletedBytes := int64(deletedBytesUint) + deletedBytes := int64(deletedBytesUint) //nolint:gosec // G115 (overflow): guarded by underflow check on lines 299-301 that clamps to zero // Check for overflow when converting uint64 to int64 if deletedBytes < 0 && deletedBytesUint > 0 { deletedBytes = 0 @@ -402,12 +402,12 @@ func (c *BoltCache) CloseAndDestroy() error { // Get the file path before closing path := c.db.Path() - + // Close the database if err := c.db.Close(); err != nil { return err } - + // Delete the cache file return os.Remove(path) } @@ -674,7 +674,13 @@ func (c *BoltCache) Lookup(ctx context.Context, srcName string, method sdp.Query return true, ck, items, nil, noopDone } -// Search performs a lower-level search using a CacheKey. +// Search performs a lower-level search using a CacheKey, bypassing pendingWork +// deduplication. This is used by ShardedCache to do raw reads on individual shards. +func (c *BoltCache) Search(ctx context.Context, ck CacheKey) ([]*sdp.Item, error) { + return c.search(ctx, ck) +} + +// search performs a lower-level search using a CacheKey. // If ctx contains a span, detailed timing metrics will be added as span attributes. func (c *BoltCache) search(ctx context.Context, ck CacheKey) ([]*sdp.Item, error) { if c == nil { diff --git a/go/sdpcache/cache.go b/go/sdpcache/cache.go index d6089394..ceea187b 100644 --- a/go/sdpcache/cache.go +++ b/go/sdpcache/cache.go @@ -5,12 +5,10 @@ import ( "crypto/sha256" "errors" "fmt" - "os" "strings" "sync" "time" - "github.com/getsentry/sentry-go" "github.com/google/btree" "github.com/overmindtech/cli/go/sdp-go" log "github.com/sirupsen/logrus" @@ -316,38 +314,11 @@ func NewMemoryCache() *MemoryCache { } } -// NewCache creates a new cache. This function returns a Cache interface. -// Currently, it returns a file-based implementation. The passed context will be -// used to start the purger. +// NewCache creates a new cache. This function returns a Cache interface backed +// by a ShardedCache (N independent BoltDB files) for write concurrency. +// The passed context will be used to start the purger. func NewCache(ctx context.Context) Cache { - tmpFile, err := os.CreateTemp("", "sdpcache-*.db") - // close the file so bbolt can open it, but keep the file on disk. We don't - // need to check for errors since we're not using the file - _ = tmpFile.Close() - - if err != nil { - sentry.CaptureException(err) - log.WithError(err).Error("Failed to create temporary file for BoltCache, using memory cache instead") - cache := NewMemoryCache() - cache.StartPurger(ctx) - return cache - } - cache, err := NewBoltCache( - tmpFile.Name(), - WithMinWaitTime(30*time.Second), - // allocate 1GB of disk space for the cache (with 1GB additional for compaction temp file) - WithCompactThreshold(1*1024*1024*1024), - ) - if err != nil { - sentry.CaptureException(err) - log.WithError(err).Error("Failed to create BoltCache, using memory cache instead") - _ = os.Remove(tmpFile.Name()) - cache := NewMemoryCache() - cache.StartPurger(ctx) - return cache - } - cache.StartPurger(ctx) - return cache + return newShardedCacheForProduction(ctx) } func newExpiryIndex() *btree.BTreeG[*CachedResult] { diff --git a/go/sdpcache/cache_stuck_test.go b/go/sdpcache/cache_stuck_test.go index b5118e18..38681940 100644 --- a/go/sdpcache/cache_stuck_test.go +++ b/go/sdpcache/cache_stuck_test.go @@ -37,9 +37,7 @@ func TestListErrorWithProperCleanup(t *testing.T) { // First goroutine: Gets cache miss, simulates work that errors, // and properly calls StoreError to cache the error - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -60,12 +58,10 @@ func TestListErrorWithProperCleanup(t *testing.T) { } cache.StoreError(ctx, err, 1*time.Hour, ck) t.Log("First goroutine: properly called StoreError") - }() + }) // Second goroutine: Should get cached error immediately - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -87,7 +83,7 @@ func TestListErrorWithProperCleanup(t *testing.T) { t.Error("second goroutine: expected cached error") } t.Logf("Second goroutine: got cached error after %v", secondCallDuration) - }() + }) // Release all goroutines close(startBarrier) @@ -128,9 +124,7 @@ func TestListErrorWithProperDone(t *testing.T) { // First goroutine: Gets cache miss, simulates work that errors, // and PROPERLY calls the done function - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, _, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -147,12 +141,10 @@ func TestListErrorWithProperDone(t *testing.T) { // CORRECT BEHAVIOR: Call done to release resources done() t.Log("First goroutine: properly called done()") - }() + }) // Second goroutine: Should receive cache miss quickly (not block) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -168,7 +160,7 @@ func TestListErrorWithProperDone(t *testing.T) { } t.Logf("Second goroutine: got cache miss after %v", secondCallDuration) - }() + }) // Release all goroutines close(startBarrier) @@ -220,9 +212,7 @@ func TestListErrorWithStoreError(t *testing.T) { // First goroutine: Gets cache miss, simulates work that errors, // and PROPERLY calls StoreError - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -239,12 +229,10 @@ func TestListErrorWithStoreError(t *testing.T) { // CORRECT BEHAVIOR: Store the error so other callers can get it cache.StoreError(ctx, expectedError, 10*time.Second, ck) t.Log("First goroutine: properly called StoreError") - }() + }) // Second goroutine: Should receive the cached error - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -262,7 +250,7 @@ func TestListErrorWithStoreError(t *testing.T) { } t.Logf("Second goroutine: got result after %v", secondCallDuration) - }() + }) // Release all goroutines close(startBarrier) @@ -314,9 +302,7 @@ func TestListReturnsEmptyButNoStore(t *testing.T) { var secondCallDuration time.Duration // First goroutine: LIST returns 0 items, completes without storing - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -340,12 +326,10 @@ func TestListReturnsEmptyButNoStore(t *testing.T) { } t.Log("First goroutine: completed work but stored nothing") - }() + }) // Second goroutine: Should get cache miss - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -357,7 +341,7 @@ func TestListReturnsEmptyButNoStore(t *testing.T) { secondCallDuration = time.Since(start) t.Logf("Second goroutine: hit=%v, duration=%v", secondCallHit, secondCallDuration) - }() + }) // Release all goroutines close(startBarrier) diff --git a/go/sdpcache/cache_test.go b/go/sdpcache/cache_test.go index 8003557f..b8bf5326 100644 --- a/go/sdpcache/cache_test.go +++ b/go/sdpcache/cache_test.go @@ -14,13 +14,15 @@ import ( ) // testSearch is a helper function that calls the internal search method -// on either MemoryCache or BoltCache implementations for testing purposes +// on either MemoryCache, BoltCache, or ShardedCache implementations for testing purposes func testSearch(ctx context.Context, cache Cache, ck CacheKey) ([]*sdp.Item, error) { switch c := cache.(type) { case *MemoryCache: return c.search(ctx, ck) case *BoltCache: return c.search(ctx, ck) + case *ShardedCache: + return c.searchByKey(ctx, ck) default: return nil, fmt.Errorf("unsupported cache type for search: %T", cache) } @@ -37,16 +39,29 @@ func cacheImplementations(tb testing.TB) []struct { factory func() Cache }{ {"MemoryCache", func() Cache { return NewMemoryCache() }}, - {"BoltCache", func() Cache { - c, err := NewBoltCache(filepath.Join(tb.TempDir(), "cache.db")) - if err != nil { - tb.Fatalf("failed to create BoltCache: %v", err) - } - tb.Cleanup(func() { - _ = c.CloseAndDestroy() - }) - return c - }}, + {"BoltCache", func() Cache { + c, err := NewBoltCache(filepath.Join(tb.TempDir(), "cache.db")) + if err != nil { + tb.Fatalf("failed to create BoltCache: %v", err) + } + tb.Cleanup(func() { + _ = c.CloseAndDestroy() + }) + return c + }}, + {"ShardedCache", func() Cache { + c, err := NewShardedCache( + filepath.Join(tb.TempDir(), "shards"), + DefaultShardCount, + ) + if err != nil { + tb.Fatalf("failed to create ShardedCache: %v", err) + } + tb.Cleanup(func() { + _ = c.CloseAndDestroy() + }) + return c + }}, } } @@ -862,7 +877,7 @@ func TestMultipleItemsSameSST(t *testing.T) { uav := fmt.Sprintf("item%d", i) // Set the item's unique attribute value to match the CacheKey - attrs := make(map[string]interface{}) + attrs := make(map[string]any) if item.GetAttributes() != nil && item.GetAttributes().GetAttrStruct() != nil { for k, v := range item.GetAttributes().GetAttrStruct().GetFields() { attrs[k] = v @@ -1030,22 +1045,18 @@ func TestMemoryCacheConcurrent(t *testing.T) { numParallel := 1_000 for range numParallel { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Store the item item := GenerateRandomItem() ck := CacheKeyFromQuery(item.GetMetadata().GetSourceQuery(), item.GetMetadata().GetSourceName()) cache.StoreItem(ctx, item, 100*time.Millisecond, ck) - wg.Add(1) // Create a goroutine to also delete in parallel - go func() { - defer wg.Done() + wg.Go(func() { cache.Delete(ck) - }() - }() + }) + }) } wg.Wait() @@ -1145,9 +1156,7 @@ func TestMemoryCacheLookupDeduplicationCompleteWithoutStore(t *testing.T) { numWaiters := 3 // First goroutine: starts work and completes without storing anything - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -1162,13 +1171,11 @@ func TestMemoryCacheLookupDeduplicationCompleteWithoutStore(t *testing.T) { // Complete without storing anything - triggers ErrCacheNotFound on re-check cache.pending.Complete(ck.String()) - }() + }) // Waiter goroutines for range numWaiters { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier time.Sleep(10 * time.Millisecond) @@ -1179,7 +1186,7 @@ func TestMemoryCacheLookupDeduplicationCompleteWithoutStore(t *testing.T) { waiterMu.Lock() waiterHits = append(waiterHits, hit) waiterMu.Unlock() - }() + }) } close(startBarrier) @@ -1484,13 +1491,11 @@ func TestBoltCacheConcurrentCloseAndDestroy(t *testing.T) { // Launch concurrent read/write operations for range numOperations { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { item := GenerateRandomItem() ck := CacheKeyFromQuery(item.GetMetadata().GetSourceQuery(), item.GetMetadata().GetSourceName()) cache.StoreItem(ctx, item, 10*time.Second, ck) - }() + }) } // Wait a bit to let operations start @@ -1498,14 +1503,12 @@ func TestBoltCacheConcurrentCloseAndDestroy(t *testing.T) { // Close and destroy while operations are in flight // The compaction lock should serialize this properly - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { err := cache.CloseAndDestroy() if err != nil { t.Logf("CloseAndDestroy returned error: %v", err) } - }() + }) // Wait for all operations to complete wg.Wait() @@ -1753,9 +1756,7 @@ func TestBoltCacheLookupDeduplicationTimeout(t *testing.T) { startBarrier := make(chan struct{}) // First goroutine: does the work but takes a long time - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -1773,13 +1774,11 @@ func TestBoltCacheLookupDeduplicationTimeout(t *testing.T) { item.Scope = sst.Scope item.Type = sst.Type cache.StoreItem(ctx, item, 10*time.Second, ck) - }() + }) // Second goroutine: should timeout waiting var secondHit bool - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -1792,7 +1791,7 @@ func TestBoltCacheLookupDeduplicationTimeout(t *testing.T) { hit, _, _, _, done := cache.Lookup(shortCtx, sst.SourceName, method, sst.Scope, sst.Type, query, false) defer done() secondHit = hit - }() + }) // Release all goroutines close(startBarrier) @@ -1840,9 +1839,7 @@ func TestBoltCacheLookupDeduplicationError(t *testing.T) { numWaiters := 5 // First goroutine: does the work and stores an error - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -1857,13 +1854,11 @@ func TestBoltCacheLookupDeduplicationError(t *testing.T) { // Store the error cache.StoreError(ctx, expectedError, 10*time.Second, ck) - }() + }) // Waiter goroutines: should receive the error for range numWaiters { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -1877,7 +1872,7 @@ func TestBoltCacheLookupDeduplicationError(t *testing.T) { waiterErrors = append(waiterErrors, qErr) } waiterMu.Unlock() - }() + }) } // Release all goroutines @@ -1924,9 +1919,7 @@ func TestBoltCacheLookupDeduplicationCancel(t *testing.T) { numWaiters := 3 // First goroutine: starts work but then calls done() without storing anything - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, _, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -1939,13 +1932,11 @@ func TestBoltCacheLookupDeduplicationCancel(t *testing.T) { // Simulate work that fails - done the pending work time.Sleep(50 * time.Millisecond) done() - }() + }) // Waiter goroutines for range numWaiters { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -1957,7 +1948,7 @@ func TestBoltCacheLookupDeduplicationCancel(t *testing.T) { waiterMu.Lock() waiterHits = append(waiterHits, hit) waiterMu.Unlock() - }() + }) } // Release all goroutines @@ -2002,9 +1993,7 @@ func TestBoltCacheLookupDeduplicationCompleteWithoutStore(t *testing.T) { // First goroutine: starts work and completes without storing anything // This simulates a LIST query that returns 0 items - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier hit, ck, _, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, query, false) @@ -2020,13 +2009,11 @@ func TestBoltCacheLookupDeduplicationCompleteWithoutStore(t *testing.T) { // Complete without storing anything - no items, no error // This triggers the ErrCacheNotFound path in waiters' re-check cache.pending.Complete(ck.String()) - }() + }) // Waiter goroutines for range numWaiters { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { <-startBarrier // Small delay to ensure first goroutine starts first @@ -2038,7 +2025,7 @@ func TestBoltCacheLookupDeduplicationCompleteWithoutStore(t *testing.T) { waiterMu.Lock() waiterHits = append(waiterHits, hit) waiterMu.Unlock() - }() + }) } // Release all goroutines @@ -2105,11 +2092,9 @@ func TestPendingWorkUnit(t *testing.T) { var wg sync.WaitGroup var waitOk bool - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { waitOk = pw.Wait(ctx, entry) - }() + }) // Give waiter time to start waiting time.Sleep(10 * time.Millisecond) @@ -2135,11 +2120,9 @@ func TestPendingWorkUnit(t *testing.T) { var wg sync.WaitGroup var waitOk bool - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { waitOk = pw.Wait(ctx, entry) - }() + }) // Give waiter time to start waiting time.Sleep(10 * time.Millisecond) diff --git a/go/sdpcache/item_generator_test.go b/go/sdpcache/item_generator_test.go index 602a5c8b..2729a5cd 100644 --- a/go/sdpcache/item_generator_test.go +++ b/go/sdpcache/item_generator_test.go @@ -44,7 +44,7 @@ const MaxLinkedItemQueries = 10 // GenerateRandomItem Generates a random item and the tags for this item. The // tags include the name, type and a tag called "all" with a value of "all" func GenerateRandomItem() *sdp.Item { - attrs := make(map[string]interface{}) + attrs := make(map[string]any) name := randSeq(rand.Intn(MaxAttributeValueLength)) typ := Types[rand.Intn(len(Types))] diff --git a/go/sdpcache/sharded_cache.go b/go/sdpcache/sharded_cache.go new file mode 100644 index 00000000..6d9b30c3 --- /dev/null +++ b/go/sdpcache/sharded_cache.go @@ -0,0 +1,511 @@ +package sdpcache + +import ( + "context" + "errors" + "fmt" + "hash/fnv" + "os" + "path/filepath" + "sync" + "time" + + "github.com/getsentry/sentry-go" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/tracing" + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// DefaultShardCount is the number of independent BoltDB shards. 17 is prime +// (avoids hash collision clustering) and distributes ~345 stdlib goroutines to +// ~20 per shard, making BoltDB's single-writer lock no longer a bottleneck. +const DefaultShardCount = 17 + +// ShardedCache implements the Cache interface by distributing entries across N +// independent BoltCache instances. Shard selection uses FNV-32a of the item +// identity (SSTHash + UniqueAttributeValue), so writes within a single adapter +// type (e.g. DNS in stdlib) spread evenly across all shards. +// +// GET queries route to exactly one shard. LIST/SEARCH queries fan out to all +// shards in parallel and merge results. pendingWork deduplication lives at the +// ShardedCache level to prevent duplicate API calls across the fan-out. +type ShardedCache struct { + shards []*BoltCache + dir string + + // pendingWork lives at the ShardedCache level so that deduplication spans + // the entire cache, not individual shards. + pending *pendingWork +} + +var _ Cache = (*ShardedCache)(nil) + +// NewShardedCache creates N BoltCache instances in dir (shard-00.db through +// shard-{N-1}.db) using goroutine fan-out to avoid N× startup latency. +func NewShardedCache(dir string, shardCount int, opts ...BoltCacheOption) (*ShardedCache, error) { + if shardCount <= 0 { + return nil, fmt.Errorf("shard count must be positive, got %d", shardCount) + } + + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, fmt.Errorf("failed to create shard directory: %w", err) + } + + shards := make([]*BoltCache, shardCount) + errs := make([]error, shardCount) + + var wg sync.WaitGroup + for i := range shardCount { + wg.Add(1) + go func(idx int) { + defer wg.Done() + path := filepath.Join(dir, fmt.Sprintf("shard-%02d.db", idx)) + c, err := NewBoltCache(path, opts...) + if err != nil { + errs[idx] = fmt.Errorf("shard %d: %w", idx, err) + return + } + shards[idx] = c + }(i) + } + wg.Wait() + + // If any shard failed, close the ones that succeeded and return the error. + for _, err := range errs { + if err != nil { + for _, s := range shards { + if s != nil { + _ = s.CloseAndDestroy() + } + } + return nil, err + } + } + + return &ShardedCache{ + shards: shards, + dir: dir, + pending: newPendingWork(), + }, nil +} + +// shardFor returns the shard index for a given item identity. +func (sc *ShardedCache) shardFor(sstHash SSTHash, uav string) int { + h := fnv.New32a() + _, _ = h.Write([]byte(sstHash)) + _, _ = h.Write([]byte(uav)) + return int(h.Sum32()) % len(sc.shards) +} + +// Lookup performs a cache lookup, routing GET queries to a single shard and +// LIST/SEARCH queries to all shards via parallel fan-out. +func (sc *ShardedCache) Lookup(ctx context.Context, srcName string, method sdp.QueryMethod, scope string, typ string, query string, ignoreCache bool) (bool, CacheKey, []*sdp.Item, *sdp.QueryError, func()) { + ctx, span := tracing.Tracer().Start(ctx, "ShardedCache.Lookup", + trace.WithAttributes( + attribute.String("ovm.cache.sourceName", srcName), + attribute.String("ovm.cache.method", method.String()), + attribute.String("ovm.cache.scope", scope), + attribute.String("ovm.cache.type", typ), + attribute.String("ovm.cache.query", query), + attribute.Bool("ovm.cache.ignoreCache", ignoreCache), + attribute.Int("ovm.cache.shardCount", len(sc.shards)), + ), + ) + defer span.End() + + ck := CacheKeyFromParts(srcName, method, scope, typ, query) + + if ignoreCache { + span.SetAttributes( + attribute.String("ovm.cache.result", "ignore cache"), + attribute.Bool("ovm.cache.hit", false), + ) + return false, ck, nil, nil, noopDone + } + + items, err := sc.searchByKey(ctx, ck) + + if err != nil { + var qErr *sdp.QueryError + if errors.Is(err, ErrCacheNotFound) { + shouldWork, entry := sc.pending.StartWork(ck.String()) + if shouldWork { + span.SetAttributes( + attribute.String("ovm.cache.result", "cache miss"), + attribute.Bool("ovm.cache.hit", false), + attribute.Bool("ovm.cache.workPending", false), + ) + return false, ck, nil, nil, sc.createDoneFunc(ck) + } + + pendingWaitStart := time.Now() + ok := sc.pending.Wait(ctx, entry) + pendingWaitDuration := time.Since(pendingWaitStart) + span.SetAttributes( + attribute.Float64("ovm.cache.pendingWaitDuration_ms", float64(pendingWaitDuration.Milliseconds())), + attribute.Bool("ovm.cache.pendingWaitSuccess", ok), + ) + + if !ok { + span.SetAttributes( + attribute.String("ovm.cache.result", "pending work cancelled or timeout"), + attribute.Bool("ovm.cache.hit", false), + ) + return false, ck, nil, nil, noopDone + } + + items, recheckErr := sc.searchByKey(ctx, ck) + if recheckErr != nil { + if errors.Is(recheckErr, ErrCacheNotFound) { + span.SetAttributes( + attribute.String("ovm.cache.result", "pending work completed but cache still empty"), + attribute.Bool("ovm.cache.hit", false), + ) + return false, ck, nil, nil, noopDone + } + var recheckQErr *sdp.QueryError + if errors.As(recheckErr, &recheckQErr) { + span.SetAttributes( + attribute.String("ovm.cache.result", "cache hit from pending work: error"), + attribute.Bool("ovm.cache.hit", true), + ) + return true, ck, nil, recheckQErr, noopDone + } + span.SetAttributes( + attribute.String("ovm.cache.result", "unexpected error on re-check"), + attribute.Bool("ovm.cache.hit", false), + ) + return false, ck, nil, nil, noopDone + } + + span.SetAttributes( + attribute.String("ovm.cache.result", "cache hit from pending work"), + attribute.Int("ovm.cache.numItems", len(items)), + attribute.Bool("ovm.cache.hit", true), + ) + return true, ck, items, nil, noopDone + } else if errors.As(err, &qErr) { + if qErr.GetErrorType() == sdp.QueryError_NOTFOUND { + span.SetAttributes(attribute.String("ovm.cache.result", "cache hit: item not found")) + } else { + span.SetAttributes( + attribute.String("ovm.cache.result", "cache hit: QueryError"), + attribute.String("ovm.cache.error", err.Error()), + ) + } + span.SetAttributes(attribute.Bool("ovm.cache.hit", true)) + return true, ck, nil, qErr, noopDone + } else { + qErr = &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: err.Error(), + Scope: scope, + SourceName: srcName, + ItemType: typ, + } + span.SetAttributes( + attribute.String("ovm.cache.error", err.Error()), + attribute.String("ovm.cache.result", "cache hit: unknown QueryError"), + attribute.Bool("ovm.cache.hit", true), + ) + return true, ck, nil, qErr, noopDone + } + } + + if method == sdp.QueryMethod_GET { + if len(items) < 2 { + span.SetAttributes( + attribute.String("ovm.cache.result", "cache hit: 1 item"), + attribute.Int("ovm.cache.numItems", len(items)), + attribute.Bool("ovm.cache.hit", true), + ) + return true, ck, items, nil, noopDone + } + span.SetAttributes( + attribute.String("ovm.cache.result", "cache returned >1 value, purging and continuing"), + attribute.Int("ovm.cache.numItems", len(items)), + attribute.Bool("ovm.cache.hit", false), + ) + sc.Delete(ck) + return false, ck, nil, nil, noopDone + } + + span.SetAttributes( + attribute.String("ovm.cache.result", "cache hit: multiple items"), + attribute.Int("ovm.cache.numItems", len(items)), + attribute.Bool("ovm.cache.hit", true), + ) + return true, ck, items, nil, noopDone +} + +// searchByKey routes GET queries to a single shard and LIST/SEARCH/unspecified +// queries to all shards via fan-out. +func (sc *ShardedCache) searchByKey(ctx context.Context, ck CacheKey) ([]*sdp.Item, error) { + span := trace.SpanFromContext(ctx) + + if ck.UniqueAttributeValue != nil { + idx := sc.shardFor(ck.SST.Hash(), *ck.UniqueAttributeValue) + span.SetAttributes( + attribute.Int("ovm.cache.shardIndex", idx), + attribute.Bool("ovm.cache.fanOut", false), + ) + return sc.shards[idx].Search(ctx, ck) + } + + return sc.searchAll(ctx, ck) +} + +// searchAll fans out a search to all shards in parallel and merges results. +func (sc *ShardedCache) searchAll(ctx context.Context, ck CacheKey) ([]*sdp.Item, error) { + span := trace.SpanFromContext(ctx) + span.SetAttributes(attribute.Bool("ovm.cache.fanOut", true)) + + type result struct { + items []*sdp.Item + err error + dur time.Duration + } + results := make([]result, len(sc.shards)) + + var wg sync.WaitGroup + for i, shard := range sc.shards { + wg.Add(1) + go func(i int, shard *BoltCache) { + defer wg.Done() + start := time.Now() + items, err := shard.Search(ctx, ck) + results[i] = result{items: items, err: err, dur: time.Since(start)} + }(i, shard) + } + wg.Wait() + + var ( + allItems []*sdp.Item + maxDur time.Duration + shardsWithResult int + firstErr error + allNotFound = true + ) + + for _, r := range results { + if r.dur > maxDur { + maxDur = r.dur + } + if r.err != nil { + if errors.Is(r.err, ErrCacheNotFound) { + continue + } + allNotFound = false + if firstErr == nil { + firstErr = r.err + } + continue + } + allNotFound = false + if len(r.items) > 0 { + shardsWithResult++ + allItems = append(allItems, r.items...) + } + } + + span.SetAttributes( + attribute.Float64("ovm.cache.fanOutMaxMs", float64(maxDur.Milliseconds())), + attribute.Int("ovm.cache.shardsWithResults", shardsWithResult), + ) + + if firstErr != nil { + return nil, firstErr + } + + if allNotFound { + return nil, ErrCacheNotFound + } + + return allItems, nil +} + +// StoreItem routes the item to one shard based on its UniqueAttributeValue. +func (sc *ShardedCache) StoreItem(ctx context.Context, item *sdp.Item, duration time.Duration, ck CacheKey) { + if item == nil { + return + } + + sstHash := ck.SST.Hash() + uav := item.UniqueAttributeValue() + idx := sc.shardFor(sstHash, uav) + + span := trace.SpanFromContext(ctx) + span.SetAttributes(attribute.Int("ovm.cache.shardIndex", idx)) + + sc.shards[idx].StoreItem(ctx, item, duration, ck) +} + +// StoreError routes the error based on the CacheKey: +// - GET errors (UniqueAttributeValue set) go to the same shard a GET Lookup would query. +// - LIST/SEARCH errors go to shard 0 as a deterministic default; fan-out reads will find them. +func (sc *ShardedCache) StoreError(ctx context.Context, err error, duration time.Duration, ck CacheKey) { + if err == nil { + return + } + + var idx int + if ck.UniqueAttributeValue != nil { + idx = sc.shardFor(ck.SST.Hash(), *ck.UniqueAttributeValue) + } + + span := trace.SpanFromContext(ctx) + span.SetAttributes(attribute.Int("ovm.cache.shardIndex", idx)) + + sc.shards[idx].StoreError(ctx, err, duration, ck) +} + +// Delete fans out to all shards. +func (sc *ShardedCache) Delete(ck CacheKey) { + var wg sync.WaitGroup + for _, shard := range sc.shards { + wg.Add(1) + go func(s *BoltCache) { + defer wg.Done() + s.Delete(ck) + }(shard) + } + wg.Wait() +} + +// Clear fans out to all shards. +func (sc *ShardedCache) Clear() { + var wg sync.WaitGroup + for _, shard := range sc.shards { + wg.Add(1) + go func(s *BoltCache) { + defer wg.Done() + s.Clear() + }(shard) + } + wg.Wait() +} + +// Purge fans out to all shards in parallel and aggregates PurgeStats. +// TimeTaken reflects wall-clock time of the parallel fan-out, not the sum of +// per-shard durations. +func (sc *ShardedCache) Purge(ctx context.Context, before time.Time) PurgeStats { + type result struct { + stats PurgeStats + } + results := make([]result, len(sc.shards)) + + start := time.Now() + + var wg sync.WaitGroup + for i, shard := range sc.shards { + wg.Add(1) + go func(i int, s *BoltCache) { + defer wg.Done() + results[i] = result{stats: s.Purge(ctx, before)} + }(i, shard) + } + wg.Wait() + + combined := PurgeStats{ + TimeTaken: time.Since(start), + } + for _, r := range results { + combined.NumPurged += r.stats.NumPurged + if r.stats.NextExpiry != nil { + if combined.NextExpiry == nil || r.stats.NextExpiry.Before(*combined.NextExpiry) { + combined.NextExpiry = r.stats.NextExpiry + } + } + } + return combined +} + +// GetMinWaitTime returns the minimum wait time from the first shard. +func (sc *ShardedCache) GetMinWaitTime() time.Duration { + if len(sc.shards) == 0 { + return 0 + } + return sc.shards[0].GetMinWaitTime() +} + +// StartPurger starts a purger on each shard independently. +func (sc *ShardedCache) StartPurger(ctx context.Context) { + for _, shard := range sc.shards { + shard.StartPurger(ctx) + } +} + +// CloseAndDestroy closes and destroys all shard files in parallel, then removes +// the shard directory. +func (sc *ShardedCache) CloseAndDestroy() error { + errs := make([]error, len(sc.shards)) + + var wg sync.WaitGroup + for i, shard := range sc.shards { + wg.Add(1) + go func(i int, s *BoltCache) { + defer wg.Done() + errs[i] = s.CloseAndDestroy() + }(i, shard) + } + wg.Wait() + + for _, err := range errs { + if err != nil { + return err + } + } + + return os.RemoveAll(sc.dir) +} + +// createDoneFunc returns a done function that calls pending.Complete for the +// given cache key. Safe to call multiple times (idempotent via sync.Once). +func (sc *ShardedCache) createDoneFunc(ck CacheKey) func() { + if sc == nil || sc.pending == nil { + return noopDone + } + key := ck.String() + var once sync.Once + return func() { + once.Do(func() { + sc.pending.Complete(key) + }) + } +} + +// newShardedCacheForProduction is used by NewCache to create a production +// ShardedCache with appropriate defaults. It logs and falls back to MemoryCache +// on failure. +func newShardedCacheForProduction(ctx context.Context) Cache { + dir, err := os.MkdirTemp("", "sdpcache-shards-*") + if err != nil { + sentry.CaptureException(err) + log.WithError(err).Error("Failed to create temp dir for ShardedCache, using memory cache instead") + cache := NewMemoryCache() + cache.StartPurger(ctx) + return cache + } + + perShardThreshold := int64(1*1024*1024*1024) / int64(DefaultShardCount) + + cache, err := NewShardedCache( + dir, + DefaultShardCount, + WithMinWaitTime(30*time.Second), + WithCompactThreshold(perShardThreshold), + ) + if err != nil { + sentry.CaptureException(err) + log.WithError(err).Error("Failed to create ShardedCache, using memory cache instead") + _ = os.RemoveAll(dir) + memCache := NewMemoryCache() + memCache.StartPurger(ctx) + return memCache + } + + cache.StartPurger(ctx) + return cache +} diff --git a/go/sdpcache/sharded_cache_test.go b/go/sdpcache/sharded_cache_test.go new file mode 100644 index 00000000..fd656161 --- /dev/null +++ b/go/sdpcache/sharded_cache_test.go @@ -0,0 +1,659 @@ +package sdpcache + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/overmindtech/cli/go/sdp-go" +) + +func TestShardDistributionUniformity(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + numItems := 1000 + + // Use the same SST for all items so they share the same BoltDB SST bucket. + // Different UAVs cause items to distribute across shards via shardFor(). + sst := SST{SourceName: "test-source", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + for i := range numItems { + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + + attrs := make(map[string]any) + attrs["name"] = fmt.Sprintf("item-%d", i) + attributes, _ := sdp.ToAttributes(attrs) + item.Attributes = attributes + + cache.StoreItem(ctx, item, 10*time.Second, ck) + } + + // Count items per shard by searching each shard with the common SST + counts := make([]int, DefaultShardCount) + for i, shard := range cache.shards { + items, searchErr := shard.Search(ctx, ck) + if searchErr == nil { + counts[i] = len(items) + } + } + + totalFound := 0 + for _, c := range counts { + totalFound += c + } + + if totalFound != numItems { + t.Errorf("expected %d total items across shards, got %d", numItems, totalFound) + } + + // Verify distribution is reasonably uniform: no shard should have more than + // 3× the expected average (very loose bound to avoid flaky tests). + expected := float64(numItems) / float64(DefaultShardCount) + for i, c := range counts { + if float64(c) > expected*3 { + t.Errorf("shard %d has %d items, expected roughly %.0f (3× threshold: %.0f)", i, c, expected, expected*3) + } + } + + // Chi-squared test for uniformity (p < 0.001 threshold) + var chiSq float64 + for _, c := range counts { + diff := float64(c) - expected + chiSq += (diff * diff) / expected + } + // Critical value for df=16, p=0.001 is ~39.25 + if chiSq > 39.25 { + t.Errorf("chi-squared %.2f exceeds critical value 39.25 (df=16, p=0.001), distribution may be non-uniform: %v", chiSq, counts) + } +} + +func TestShardedCacheGETRoutesToCorrectShard(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + sst := SST{SourceName: "test", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_GET + + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + + uav := item.UniqueAttributeValue() + ck := CacheKey{SST: sst, Method: &method, UniqueAttributeValue: &uav} + cache.StoreItem(ctx, item, 10*time.Second, ck) + + // Verify the item lands on the expected shard + expectedShard := cache.shardFor(sst.Hash(), uav) + items, err := cache.shards[expectedShard].Search(ctx, ck) + if err != nil { + t.Fatalf("expected item on shard %d, got error: %v", expectedShard, err) + } + if len(items) != 1 { + t.Fatalf("expected 1 item on shard %d, got %d", expectedShard, len(items)) + } + + // Verify Lookup returns the item + hit, _, cachedItems, qErr, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, uav, false) + defer done() + if qErr != nil { + t.Fatalf("unexpected error: %v", qErr) + } + if !hit { + t.Fatal("expected cache hit") + } + if len(cachedItems) != 1 { + t.Fatalf("expected 1 item, got %d", len(cachedItems)) + } +} + +func TestShardedCacheLISTFanOutMerge(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + sst := SST{SourceName: "test", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + // Store items that should land on different shards + numItems := 50 + for i := range numItems { + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + + attrs := make(map[string]any) + attrs["name"] = fmt.Sprintf("item-%d", i) + attributes, _ := sdp.ToAttributes(attrs) + item.Attributes = attributes + + cache.StoreItem(ctx, item, 10*time.Second, ck) + } + + // LIST should fan out and return all items + hit, _, items, qErr, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, "", false) + defer done() + if qErr != nil { + t.Fatalf("unexpected error: %v", qErr) + } + if !hit { + t.Fatal("expected cache hit") + } + if len(items) != numItems { + t.Errorf("expected %d items from LIST fan-out, got %d", numItems, len(items)) + } +} + +func TestShardedCacheCrossShardLIST(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + // Use a small shard count for easier verification + cache, err := NewShardedCache(dir, 3) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + sst := SST{SourceName: "test", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + // Store enough items that at least 2 shards get items + storedNames := make(map[string]bool) + for i := range 30 { + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + + name := fmt.Sprintf("cross-shard-%d", i) + attrs := make(map[string]any) + attrs["name"] = name + attributes, _ := sdp.ToAttributes(attrs) + item.Attributes = attributes + + cache.StoreItem(ctx, item, 10*time.Second, ck) + storedNames[name] = true + } + + // Count items per shard + shardsWithItems := 0 + for _, shard := range cache.shards { + items, err := shard.Search(ctx, ck) + if err == nil && len(items) > 0 { + shardsWithItems++ + } + } + + if shardsWithItems < 2 { + t.Errorf("expected items on at least 2 shards, got %d", shardsWithItems) + } + + // LIST fan-out should return all items regardless of shard + items, err := cache.searchAll(ctx, ck) + if err != nil { + t.Fatalf("searchAll failed: %v", err) + } + if len(items) != 30 { + t.Errorf("expected 30 items from fan-out, got %d", len(items)) + } +} + +func TestShardedCachePendingWorkDeduplication(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + sst := SST{SourceName: "dedup-test", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + + var workCount int32 + var mu sync.Mutex + var wg sync.WaitGroup + + numGoroutines := 10 + results := make([]struct { + hit bool + items []*sdp.Item + }, numGoroutines) + + startBarrier := make(chan struct{}) + + for i := range numGoroutines { + wg.Add(1) + go func(idx int) { + defer wg.Done() + <-startBarrier + + hit, ck, items, _, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, "", false) + defer done() + + if !hit { + mu.Lock() + workCount++ + mu.Unlock() + + time.Sleep(50 * time.Millisecond) + + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + + cache.StoreItem(ctx, item, 10*time.Second, ck) + hit, _, items, _, done = cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, "", false) + defer done() + } + + results[idx] = struct { + hit bool + items []*sdp.Item + }{hit, items} + }(i) + } + + close(startBarrier) + wg.Wait() + + if workCount != 1 { + t.Errorf("expected exactly 1 goroutine to do work, got %d", workCount) + } + + for i, r := range results { + if !r.hit { + t.Errorf("goroutine %d: expected cache hit after dedup, got miss", i) + } + if len(r.items) != 1 { + t.Errorf("goroutine %d: expected 1 item, got %d", i, len(r.items)) + } + } +} + +func TestShardedCacheCloseAndDestroy(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + + ctx := t.Context() + item := GenerateRandomItem() + ck := CacheKeyFromQuery(item.GetMetadata().GetSourceQuery(), item.GetMetadata().GetSourceName()) + cache.StoreItem(ctx, item, 10*time.Second, ck) + + // Verify shard files exist + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatalf("failed to read shard directory: %v", err) + } + if len(entries) != DefaultShardCount { + t.Errorf("expected %d shard files, got %d", DefaultShardCount, len(entries)) + } + + // Close and destroy + if err := cache.CloseAndDestroy(); err != nil { + t.Fatalf("CloseAndDestroy failed: %v", err) + } + + // Verify the directory is removed + if _, err := os.Stat(dir); !os.IsNotExist(err) { + t.Error("shard directory should be removed after CloseAndDestroy") + } +} + +func BenchmarkShardedCacheVsSingleBoltCache(b *testing.B) { + implementations := []struct { + name string + factory func(b *testing.B) Cache + }{ + {"BoltCache", func(b *testing.B) Cache { + c, err := NewBoltCache(filepath.Join(b.TempDir(), "cache.db")) + if err != nil { + b.Fatalf("failed to create BoltCache: %v", err) + } + b.Cleanup(func() { _ = c.CloseAndDestroy() }) + return c + }}, + {"ShardedCache", func(b *testing.B) Cache { + c, err := NewShardedCache( + filepath.Join(b.TempDir(), "shards"), + DefaultShardCount, + ) + if err != nil { + b.Fatalf("failed to create ShardedCache: %v", err) + } + b.Cleanup(func() { _ = c.CloseAndDestroy() }) + return c + }}, + } + + for _, impl := range implementations { + b.Run(impl.name+"/ConcurrentWrite", func(b *testing.B) { + cache := impl.factory(b) + ctx := context.Background() + + sst := SST{SourceName: "bench", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + cache.StoreItem(ctx, item, 10*time.Second, ck) + } + }) + }) + } +} + +func TestShardedCacheShardForDeterminism(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + sst := SST{SourceName: "test", Scope: "scope", Type: "type"} + sstHash := sst.Hash() + + // Same input should always produce the same shard + for range 100 { + idx1 := cache.shardFor(sstHash, "my-unique-value") + idx2 := cache.shardFor(sstHash, "my-unique-value") + if idx1 != idx2 { + t.Fatalf("shardFor is not deterministic: got %d and %d", idx1, idx2) + } + } + + // Different UAVs should produce different shards (at least some of the time) + shardsSeen := make(map[int]bool) + for i := range 100 { + idx := cache.shardFor(sstHash, fmt.Sprintf("value-%d", i)) + shardsSeen[idx] = true + } + if len(shardsSeen) < 2 { + t.Error("expected different UAVs to hash to different shards") + } +} + +func TestShardedCacheErrorRouting(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + + t.Run("GET error routes to same shard as GET lookup", func(t *testing.T) { + sst := SST{SourceName: "err-test", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_GET + uav := "my-item" + ck := CacheKey{SST: sst, Method: &method, UniqueAttributeValue: &uav} + + qErr := &sdp.QueryError{ + ErrorType: sdp.QueryError_NOTFOUND, + ErrorString: "not found", + } + cache.StoreError(ctx, qErr, 10*time.Second, ck) + + // Lookup should find the error + hit, _, _, returnedErr, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, uav, false) + defer done() + if !hit { + t.Fatal("expected cache hit for stored error") + } + if returnedErr == nil { + t.Fatal("expected error to be returned") + } + if returnedErr.GetErrorType() != sdp.QueryError_NOTFOUND { + t.Errorf("expected NOTFOUND, got %v", returnedErr.GetErrorType()) + } + }) + + t.Run("LIST error routes to shard 0 and is found via fan-out", func(t *testing.T) { + sst := SST{SourceName: "list-err-test", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + qErr := &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "list failed", + } + cache.StoreError(ctx, qErr, 10*time.Second, ck) + + // LIST lookup fans out, should find the error on shard 0 + hit, _, _, returnedErr, done := cache.Lookup(ctx, sst.SourceName, method, sst.Scope, sst.Type, "", false) + defer done() + if !hit { + t.Fatal("expected cache hit for stored LIST error") + } + if returnedErr == nil { + t.Fatal("expected error to be returned") + } + if returnedErr.GetErrorType() != sdp.QueryError_OTHER { + t.Errorf("expected OTHER, got %v", returnedErr.GetErrorType()) + } + }) +} + +func TestShardedCacheNewCacheFallback(t *testing.T) { + ctx := t.Context() + cache := NewCache(ctx) + + if cache == nil { + t.Fatal("NewCache returned nil") + } + + // Should be a ShardedCache in normal operation + if _, ok := cache.(*ShardedCache); !ok { + t.Logf("NewCache returned %T (may be MemoryCache if ShardedCache creation failed)", cache) + } + + // Basic operation test + item := GenerateRandomItem() + ck := CacheKeyFromQuery(item.GetMetadata().GetSourceQuery(), item.GetMetadata().GetSourceName()) + cache.StoreItem(ctx, item, 10*time.Second, ck) + + hit, _, items, qErr, done := cache.Lookup(ctx, + item.GetMetadata().GetSourceName(), + sdp.QueryMethod_GET, + item.GetScope(), + item.GetType(), + item.UniqueAttributeValue(), + false, + ) + defer done() + if qErr != nil { + t.Fatalf("unexpected error: %v", qErr) + } + if !hit { + t.Fatal("expected cache hit") + } + if len(items) != 1 { + t.Fatalf("expected 1 item, got %d", len(items)) + } +} + +func TestShardedCacheCompactThresholdScaling(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + + parentThreshold := int64(1 * 1024 * 1024 * 1024) // 1GB + perShardThreshold := parentThreshold / int64(DefaultShardCount) + + cache, err := NewShardedCache(dir, DefaultShardCount, + WithCompactThreshold(perShardThreshold), + ) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + expectedPerShard := parentThreshold / int64(DefaultShardCount) + for i, shard := range cache.shards { + if shard.CompactThreshold != expectedPerShard { + t.Errorf("shard %d: expected CompactThreshold %d, got %d", i, expectedPerShard, shard.CompactThreshold) + } + } +} + +func TestShardedCacheInvalidShardCount(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + + _, err := NewShardedCache(dir, 0) + if err == nil { + t.Error("expected error for shard count 0") + } + + _, err = NewShardedCache(dir, -1) + if err == nil { + t.Error("expected error for negative shard count") + } +} + +func TestShardedCacheConcurrentWriteThroughput(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + sst := SST{SourceName: "concurrent", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + var wg sync.WaitGroup + numParallel := 100 + + for i := range numParallel { + idx := i + wg.Go(func() { + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + + attrs := make(map[string]any) + attrs["name"] = fmt.Sprintf("concurrent-item-%d", idx) + attributes, _ := sdp.ToAttributes(attrs) + item.Attributes = attributes + + cache.StoreItem(ctx, item, 10*time.Second, ck) + }) + } + + wg.Wait() + + items, searchErr := cache.searchAll(ctx, ck) + if searchErr != nil { + t.Fatalf("searchAll failed: %v", searchErr) + } + if len(items) != numParallel { + t.Errorf("expected %d items, got %d", numParallel, len(items)) + } +} + +func TestShardedCachePurgeAggregation(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, 3) // Small count for easier verification + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + ctx := t.Context() + sst := SST{SourceName: "purge", Scope: "scope", Type: "type"} + method := sdp.QueryMethod_LIST + ck := CacheKey{SST: sst, Method: &method} + + // Store items with short expiry + for range 10 { + item := GenerateRandomItem() + item.Scope = sst.Scope + item.Type = sst.Type + item.Metadata.SourceName = sst.SourceName + cache.StoreItem(ctx, item, 100*time.Millisecond, ck) + } + + // Wait for expiry + time.Sleep(200 * time.Millisecond) + + // Purge and check aggregated stats + stats := cache.Purge(ctx, time.Now()) + if stats.NumPurged != 10 { + t.Errorf("expected 10 items purged, got %d", stats.NumPurged) + } +} + +// TestShardedCacheShardForBounds verifies that shardFor always returns a valid +// index in [0, shardCount). +func TestShardedCacheShardForBounds(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + for i := range 10000 { + idx := cache.shardFor(SSTHash(fmt.Sprintf("hash-%d", i)), fmt.Sprintf("uav-%d", i)) + if idx < 0 || idx >= DefaultShardCount { + t.Fatalf("shardFor returned out-of-bounds index %d for shard count %d", idx, DefaultShardCount) + } + } +} + +// TestShardedCacheFNV32aOverflow verifies that the FNV-32a hash mod operation +// works correctly with uint32 values close to math.MaxUint32. +func TestShardedCacheFNV32aOverflow(t *testing.T) { + dir := filepath.Join(t.TempDir(), "shards") + cache, err := NewShardedCache(dir, DefaultShardCount) + if err != nil { + t.Fatalf("failed to create ShardedCache: %v", err) + } + defer func() { _ = cache.CloseAndDestroy() }() + + // These are just strings; the test verifies no panic from the modulo arithmetic + _ = cache.shardFor(SSTHash(fmt.Sprintf("%d", math.MaxUint32)), "test") + _ = cache.shardFor(SSTHash(""), "") + _ = cache.shardFor(SSTHash("a"), "b") +} diff --git a/go/tracing/deferlog.go b/go/tracing/deferlog.go index b2fb04c7..b1717551 100644 --- a/go/tracing/deferlog.go +++ b/go/tracing/deferlog.go @@ -56,7 +56,7 @@ func LogRecoverToExit(ctx context.Context, loc string) { os.Exit(1) } -func HandleError(ctx context.Context, loc string, err interface{}, stack string) { +func HandleError(ctx context.Context, loc string, err any, stack string) { msg := fmt.Sprintf("unhandled panic in %v, exiting: %v", loc, err) hub := sentry.CurrentHub() diff --git a/go/tracing/main.go b/go/tracing/main.go index 013b65e3..6bb88933 100644 --- a/go/tracing/main.go +++ b/go/tracing/main.go @@ -298,10 +298,6 @@ func NewOvermindSampler() *OvermindSampler { SampleRate: 200, ShouldSample: UserAgentMatcher("ELB-HealthChecker/2.0", "kube-probe/1.27+"), }, - { - SampleRate: 10, - ShouldSample: SpanNameMatcher("pool.acquire"), - }, } // Pre-allocate samplers for each rule @@ -338,13 +334,6 @@ func UserAgentMatcher(userAgents ...string) func(sdktrace.SamplingParameters) bo } } -// SpanNameMatcher returns a function that matches specific span names -func SpanNameMatcher(spanNames ...string) func(sdktrace.SamplingParameters) bool { - return func(parameters sdktrace.SamplingParameters) bool { - return slices.Contains(spanNames, parameters.Name) - } -} - // ShouldSample evaluates rules in order and returns the first matching decision func (o *OvermindSampler) ShouldSample(parameters sdktrace.SamplingParameters) sdktrace.SamplingResult { for i, rule := range o.rules { diff --git a/k8s-source/adapters/endpoints.go b/k8s-source/adapters/endpoints.go index ef90ec11..9fa04764 100644 --- a/k8s-source/adapters/endpoints.go +++ b/k8s-source/adapters/endpoints.go @@ -1,3 +1,13 @@ +// This adapter uses the deprecated core/v1.Endpoints API intentionally. +// +// We use the latest K8s SDK version but balance that against supporting as many +// Kubernetes versions as possible. Older clusters may not have the +// discoveryv1.EndpointSlice API, so we retain this adapter for backward +// compatibility. The staticcheck lint exceptions below are therefore expected +// and acceptable. When the SDK eventually drops support for v1.Endpoints we +// will need to split out version-specific builds of the k8s-source. + +//nolint:staticcheck // See note at top of file package adapters import ( @@ -8,7 +18,7 @@ import ( "k8s.io/client-go/kubernetes" ) -func EndpointsExtractor(resource *v1.Endpoints, scope string) ([]*sdp.LinkedItemQuery, error) { //nolint:staticcheck +func EndpointsExtractor(resource *v1.Endpoints, scope string) ([]*sdp.LinkedItemQuery, error) { queries := make([]*sdp.LinkedItemQuery, 0) sd, err := ParseScope(scope, true) @@ -62,15 +72,15 @@ func EndpointsExtractor(resource *v1.Endpoints, scope string) ([]*sdp.LinkedItem } func newEndpointsAdapter(cs *kubernetes.Clientset, cluster string, namespaces []string, cache sdpcache.Cache) discovery.ListableAdapter { - return &KubeTypeAdapter[*v1.Endpoints, *v1.EndpointsList]{ //nolint:staticcheck + return &KubeTypeAdapter[*v1.Endpoints, *v1.EndpointsList]{ ClusterName: cluster, Namespaces: namespaces, TypeName: "Endpoints", - NamespacedInterfaceBuilder: func(namespace string) ItemInterface[*v1.Endpoints, *v1.EndpointsList] { //nolint:staticcheck + NamespacedInterfaceBuilder: func(namespace string) ItemInterface[*v1.Endpoints, *v1.EndpointsList] { return cs.CoreV1().Endpoints(namespace) }, - ListExtractor: func(list *v1.EndpointsList) ([]*v1.Endpoints, error) { //nolint:staticcheck - extracted := make([]*v1.Endpoints, len(list.Items)) //nolint:staticcheck + ListExtractor: func(list *v1.EndpointsList) ([]*v1.Endpoints, error) { + extracted := make([]*v1.Endpoints, len(list.Items)) for i := range list.Items { extracted[i] = &list.Items[i] diff --git a/k8s-source/adapters/endpointslice.go b/k8s-source/adapters/endpointslice.go index b2fc027e..9ac3ffa5 100644 --- a/k8s-source/adapters/endpointslice.go +++ b/k8s-source/adapters/endpointslice.go @@ -20,6 +20,17 @@ func endpointSliceExtractor(resource *v1.EndpointSlice, scope string) ([]*sdp.Li return nil, err } + if serviceName, ok := resource.Labels["kubernetes.io/service-name"]; ok && serviceName != "" { + queries = append(queries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: "Service", + Method: sdp.QueryMethod_GET, + Query: serviceName, + Scope: scope, + }, + }) + } + for _, endpoint := range resource.Endpoints { if endpoint.Hostname != nil { queries = append(queries, &sdp.LinkedItemQuery{ @@ -102,7 +113,7 @@ var endpointSliceAdapterMetadata = Metadata.Register(&sdp.AdapterMetadata{ Type: "EndpointSlice", DescriptiveName: "Endpoint Slice", Category: sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, - PotentialLinks: []string{"Node", "Pod", "dns", "ip"}, + PotentialLinks: []string{"Node", "Pod", "dns", "ip", "Service"}, SupportedQueryMethods: DefaultSupportedQueryMethods("EndpointSlice"), TerraformMappings: []*sdp.TerraformMapping{ { diff --git a/k8s-source/adapters/endpointslice_test.go b/k8s-source/adapters/endpointslice_test.go index e1d99776..c9c2e0ed 100644 --- a/k8s-source/adapters/endpointslice_test.go +++ b/k8s-source/adapters/endpointslice_test.go @@ -59,6 +59,12 @@ func TestEndpointSliceAdapter(t *testing.T) { GetScope: sd.String(), SetupYAML: endpointSliceYAML, GetQueryTests: QueryTests{ + { + ExpectedType: "Service", + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "endpointslice-service", + ExpectedScope: sd.String(), + }, { ExpectedQueryMatches: regexp.MustCompile(`^10\.`), ExpectedType: "ip", diff --git a/k8s-source/adapters/generic_source.go b/k8s-source/adapters/generic_source.go index 6f331f59..9a139961 100644 --- a/k8s-source/adapters/generic_source.go +++ b/k8s-source/adapters/generic_source.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "time" "github.com/overmindtech/cli/go/sdp-go" @@ -312,13 +313,7 @@ var ignoredMetadataFields = []string{ } func ignored(key string) bool { - for _, ignoredKey := range ignoredMetadataFields { - if key == ignoredKey { - return true - } - } - - return false + return slices.Contains(ignoredMetadataFields, key) } // resourcesToItems Converts a slice of resources to a slice of items @@ -360,7 +355,7 @@ func (s *KubeTypeAdapter[Resource, ResourceList]) resourceToItem(resource Resour // Promote the metadata to the top level if metadata, err := attributes.Get("metadata"); err == nil { // Cast to a type we can iterate over - if metadataMap, ok := metadata.(map[string]interface{}); ok { + if metadataMap, ok := metadata.(map[string]any); ok { for key, value := range metadataMap { // Check that the key isn't in the ignored list if !ignored(key) { diff --git a/k8s-source/adapters/service.go b/k8s-source/adapters/service.go index 6c49ed96..462da57a 100644 --- a/k8s-source/adapters/service.go +++ b/k8s-source/adapters/service.go @@ -60,16 +60,28 @@ func serviceExtractor(resource *v1.Service, scope string) ([]*sdp.LinkedItemQuer }) } - // Services also generate an endpoint with the same name + // Services generate an Endpoints object with the same name (older K8s API) queries = append(queries, &sdp.LinkedItemQuery{ Query: &sdp.Query{ - Type: "Endpoint", + Type: "Endpoints", Method: sdp.QueryMethod_GET, Query: resource.Name, Scope: scope, }, }) + // Modern K8s clusters also create EndpointSlices labelled with the service name + queries = append(queries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: "EndpointSlice", + Method: sdp.QueryMethod_SEARCH, + Query: ListOptionsToQuery(&metaV1.ListOptions{ + LabelSelector: "kubernetes.io/service-name=" + resource.Name, + }), + Scope: scope, + }, + }) + for _, ingress := range resource.Status.LoadBalancer.Ingress { if ingress.IP != "" { queries = append(queries, &sdp.LinkedItemQuery{ @@ -124,7 +136,7 @@ var serviceAdapterMetadata = Metadata.Register(&sdp.AdapterMetadata{ Type: "Service", DescriptiveName: "Service", Category: sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, - PotentialLinks: []string{"Pod", "ip", "dns", "Endpoint"}, + PotentialLinks: []string{"Pod", "ip", "dns", "Endpoints", "EndpointSlice"}, SupportedQueryMethods: DefaultSupportedQueryMethods("Service"), TerraformMappings: []*sdp.TerraformMapping{ { diff --git a/k8s-source/adapters/service_test.go b/k8s-source/adapters/service_test.go index d2fa89e1..a655550f 100644 --- a/k8s-source/adapters/service_test.go +++ b/k8s-source/adapters/service_test.go @@ -66,11 +66,17 @@ func TestServiceAdapter(t *testing.T) { ExpectedQueryMatches: regexp.MustCompile(`app=service-test`), }, { - ExpectedType: "Endpoint", + ExpectedType: "Endpoints", ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "service-test-service", ExpectedScope: sd.String(), }, + { + ExpectedType: "EndpointSlice", + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedScope: sd.String(), + ExpectedQueryMatches: regexp.MustCompile(`kubernetes\.io/service-name=service-test-service`), + }, { ExpectedType: "dns", ExpectedMethod: sdp.QueryMethod_SEARCH, diff --git a/k8s-source/build/package/Dockerfile b/k8s-source/build/package/Dockerfile index c9f3c6a8..f579cf84 100644 --- a/k8s-source/build/package/Dockerfile +++ b/k8s-source/build/package/Dockerfile @@ -1,5 +1,5 @@ # Build the source binary -FROM golang:1.25-alpine AS builder +FROM golang:1.26-alpine AS builder ARG TARGETOS ARG TARGETARCH ARG BUILD_VERSION diff --git a/knowledge/discover.go b/knowledge/discover.go index 5e92dbf6..d0468d65 100644 --- a/knowledge/discover.go +++ b/knowledge/discover.go @@ -45,6 +45,32 @@ const ( maxFileSize = 10 * 1024 * 1024 // 10MB ) +// FindKnowledgeDir walks up from startDir looking for a .overmind/knowledge/ +// directory. Returns the absolute path if found, or empty string if not. +// Stops at the repository root (.git boundary) or filesystem root to avoid +// picking up knowledge files from unrelated parent projects. +func FindKnowledgeDir(startDir string) string { + dir, err := filepath.Abs(startDir) + if err != nil { + return "" + } + for { + candidate := filepath.Join(dir, ".overmind", "knowledge") + if info, err := os.Stat(candidate); err == nil && info.IsDir() { + return candidate + } + if _, err := os.Stat(filepath.Join(dir, ".git")); err == nil { + break + } + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + return "" +} + // Discover walks the knowledge directory and discovers all valid knowledge files // Returns valid files and any warnings encountered during discovery func Discover(knowledgeDir string) ([]KnowledgeFile, []Warning) { @@ -147,14 +173,14 @@ func processFile(path, relPath string) (*KnowledgeFile, *Warning) { Reason: fmt.Sprintf("cannot stat file: %v", err), } } - + if fileInfo.Size() > maxFileSize { return nil, &Warning{ Path: relPath, Reason: fmt.Sprintf("file size %d bytes exceeds maximum allowed size of %d bytes", fileInfo.Size(), maxFileSize), } } - + // Read file content content, err := os.ReadFile(path) if err != nil { @@ -213,7 +239,7 @@ func parseFrontmatter(content string) (string, string, string, error) { // Find the closing delimiter remaining := content[startIdx:] - + // Handle edge case: empty frontmatter where second --- is immediately after first if strings.HasPrefix(remaining, "---\n") || strings.HasPrefix(remaining, "---\r\n") { bodyStartIdx := startIdx + 4 // "---\n" @@ -221,16 +247,16 @@ func parseFrontmatter(content string) (string, string, string, error) { bodyStartIdx = startIdx + 5 // "---\r\n" } body := strings.TrimLeft(content[bodyStartIdx:], "\n\r") - + // Empty frontmatter will result in empty name/description which will fail validation var fm frontmatter return fm.Name, fm.Description, body, nil } - + // Find closing delimiter and track which type we found var endIdx int var closingDelimLen int - + // Try CRLF first (more specific), then LF endIdx = strings.Index(remaining, "\n---\r\n") if endIdx != -1 { @@ -268,10 +294,7 @@ func parseFrontmatter(content string) (string, string, string, error) { } // Extract body using the correct offset for the delimiter type found - bodyStartIdx := startIdx + endIdx + closingDelimLen - if bodyStartIdx > len(content) { - bodyStartIdx = len(content) - } + bodyStartIdx := min(startIdx+endIdx+closingDelimLen, len(content)) body := strings.TrimLeft(content[bodyStartIdx:], "\n\r") // Trim whitespace from name and description as per validation @@ -316,13 +339,17 @@ func validateDescription(description string) error { // This is a convenience function that combines discovery, warning logging, and conversion // to reduce code duplication across commands. func DiscoverAndConvert(ctx context.Context, knowledgeDir string) []*sdp.Knowledge { + if knowledgeDir != "" { + log.WithContext(ctx).WithField("knowledgeDir", knowledgeDir).Debug("Resolved knowledge directory") + } + knowledgeFiles, warnings := Discover(knowledgeDir) - + // Log warnings for _, w := range warnings { log.WithContext(ctx).Warnf("Warning: skipping knowledge file %q: %s", w.Path, w.Reason) } - + // Convert to SDP Knowledge messages sdpKnowledge := make([]*sdp.Knowledge, len(knowledgeFiles)) for i, kf := range knowledgeFiles { @@ -333,11 +360,11 @@ func DiscoverAndConvert(ctx context.Context, knowledgeDir string) []*sdp.Knowled FileName: kf.FileName, } } - + // Log when knowledge files are loaded if len(knowledgeFiles) > 0 { log.WithContext(ctx).WithField("knowledgeCount", len(knowledgeFiles)).Info("Loaded knowledge files") } - + return sdpKnowledge } diff --git a/knowledge/discover_test.go b/knowledge/discover_test.go index 2e8d21b0..1c953e04 100644 --- a/knowledge/discover_test.go +++ b/knowledge/discover_test.go @@ -651,6 +651,144 @@ M } } +// FindKnowledgeDir tests + +func TestFindKnowledgeDir_InCWD(t *testing.T) { + root := t.TempDir() + knowledgeDir := filepath.Join(root, ".overmind", "knowledge") + if err := os.MkdirAll(knowledgeDir, 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(root) + + if result != knowledgeDir { + t.Errorf("expected %q, got %q", knowledgeDir, result) + } +} + +func TestFindKnowledgeDir_InParent(t *testing.T) { + root := t.TempDir() + knowledgeDir := filepath.Join(root, ".overmind", "knowledge") + if err := os.MkdirAll(knowledgeDir, 0755); err != nil { + t.Fatal(err) + } + childDir := filepath.Join(root, "environments", "prod") + if err := os.MkdirAll(childDir, 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(childDir) + + if result != knowledgeDir { + t.Errorf("expected %q, got %q", knowledgeDir, result) + } +} + +func TestFindKnowledgeDir_InGrandparent(t *testing.T) { + root := t.TempDir() + knowledgeDir := filepath.Join(root, ".overmind", "knowledge") + if err := os.MkdirAll(knowledgeDir, 0755); err != nil { + t.Fatal(err) + } + deepDir := filepath.Join(root, "a", "b", "c") + if err := os.MkdirAll(deepDir, 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(deepDir) + + if result != knowledgeDir { + t.Errorf("expected %q, got %q", knowledgeDir, result) + } +} + +func TestFindKnowledgeDir_StopsAtGitBoundary(t *testing.T) { + root := t.TempDir() + // Knowledge above the git boundary -- should NOT be found + knowledgeDir := filepath.Join(root, ".overmind", "knowledge") + if err := os.MkdirAll(knowledgeDir, 0755); err != nil { + t.Fatal(err) + } + // Git repo is a subdirectory + repoDir := filepath.Join(root, "my-repo") + if err := os.MkdirAll(filepath.Join(repoDir, ".git"), 0755); err != nil { + t.Fatal(err) + } + workDir := filepath.Join(repoDir, "environments", "prod") + if err := os.MkdirAll(workDir, 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(workDir) + + if result != "" { + t.Errorf("expected empty string (should not escape .git boundary), got %q", result) + } +} + +func TestFindKnowledgeDir_CWDTakesPriority(t *testing.T) { + root := t.TempDir() + // Knowledge at root + rootKnowledge := filepath.Join(root, ".overmind", "knowledge") + if err := os.MkdirAll(rootKnowledge, 0755); err != nil { + t.Fatal(err) + } + // Knowledge also in subdirectory + childDir := filepath.Join(root, "sub") + childKnowledge := filepath.Join(childDir, ".overmind", "knowledge") + if err := os.MkdirAll(childKnowledge, 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(childDir) + + if result != childKnowledge { + t.Errorf("expected CWD knowledge %q to take priority, got %q", childKnowledge, result) + } +} + +func TestFindKnowledgeDir_NotFoundAnywhere(t *testing.T) { + root := t.TempDir() + workDir := filepath.Join(root, "some", "dir") + if err := os.MkdirAll(workDir, 0755); err != nil { + t.Fatal(err) + } + // Place .git at root to create a boundary + if err := os.MkdirAll(filepath.Join(root, ".git"), 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(workDir) + + if result != "" { + t.Errorf("expected empty string, got %q", result) + } +} + +func TestFindKnowledgeDir_GitBoundaryWithKnowledge(t *testing.T) { + root := t.TempDir() + // .git and .overmind/knowledge at the same level + if err := os.MkdirAll(filepath.Join(root, ".git"), 0755); err != nil { + t.Fatal(err) + } + knowledgeDir := filepath.Join(root, ".overmind", "knowledge") + if err := os.MkdirAll(knowledgeDir, 0755); err != nil { + t.Fatal(err) + } + workDir := filepath.Join(root, "environments", "prod") + if err := os.MkdirAll(workDir, 0755); err != nil { + t.Fatal(err) + } + + result := FindKnowledgeDir(workDir) + + // Should find knowledge at repo root before the .git stop triggers + if result != knowledgeDir { + t.Errorf("expected %q, got %q", knowledgeDir, result) + } +} + // Helper functions func writeFile(t *testing.T, path, content string) { diff --git a/sources/azure/build/package/Dockerfile b/sources/azure/build/package/Dockerfile index 3b607dc5..1fc2d532 100644 --- a/sources/azure/build/package/Dockerfile +++ b/sources/azure/build/package/Dockerfile @@ -1,5 +1,5 @@ # Build the source binary -FROM golang:1.25-alpine AS builder +FROM golang:1.26-alpine AS builder ARG TARGETOS ARG TARGETARCH ARG BUILD_VERSION diff --git a/sources/azure/clients/application-gateways-client.go b/sources/azure/clients/application-gateways-client.go index e755f3dd..6ba0d285 100644 --- a/sources/azure/clients/application-gateways-client.go +++ b/sources/azure/clients/application-gateways-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_application_gateways_client.go -package=mocks -source=application-gateways-client.go diff --git a/sources/azure/clients/application-security-groups-client.go b/sources/azure/clients/application-security-groups-client.go new file mode 100644 index 00000000..18fcdb09 --- /dev/null +++ b/sources/azure/clients/application-security-groups-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_application_security_groups_client.go -package=mocks -source=application-security-groups-client.go + +// ApplicationSecurityGroupsPager is a type alias for the generic Pager interface with application security group response type. +type ApplicationSecurityGroupsPager = Pager[armnetwork.ApplicationSecurityGroupsClientListResponse] + +// ApplicationSecurityGroupsClient is an interface for interacting with Azure application security groups. +type ApplicationSecurityGroupsClient interface { + Get(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, options *armnetwork.ApplicationSecurityGroupsClientGetOptions) (armnetwork.ApplicationSecurityGroupsClientGetResponse, error) + NewListPager(resourceGroupName string, options *armnetwork.ApplicationSecurityGroupsClientListOptions) ApplicationSecurityGroupsPager +} + +type applicationSecurityGroupsClient struct { + client *armnetwork.ApplicationSecurityGroupsClient +} + +func (c *applicationSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, applicationSecurityGroupName string, options *armnetwork.ApplicationSecurityGroupsClientGetOptions) (armnetwork.ApplicationSecurityGroupsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, applicationSecurityGroupName, options) +} + +func (c *applicationSecurityGroupsClient) NewListPager(resourceGroupName string, options *armnetwork.ApplicationSecurityGroupsClientListOptions) ApplicationSecurityGroupsPager { + return c.client.NewListPager(resourceGroupName, options) +} + +// NewApplicationSecurityGroupsClient creates a new ApplicationSecurityGroupsClient from the Azure SDK client. +func NewApplicationSecurityGroupsClient(client *armnetwork.ApplicationSecurityGroupsClient) ApplicationSecurityGroupsClient { + return &applicationSecurityGroupsClient{client: client} +} diff --git a/sources/azure/clients/batch-accounts-client.go b/sources/azure/clients/batch-accounts-client.go index 3c41029f..524ab924 100644 --- a/sources/azure/clients/batch-accounts-client.go +++ b/sources/azure/clients/batch-accounts-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" ) //go:generate mockgen -destination=../shared/mocks/mock_batch_accounts_client.go -package=mocks -source=batch-accounts-client.go diff --git a/sources/azure/clients/batch-application-client.go b/sources/azure/clients/batch-application-client.go new file mode 100644 index 00000000..f57ebb6d --- /dev/null +++ b/sources/azure/clients/batch-application-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" +) + +//go:generate mockgen -destination=../shared/mocks/mock_batch_application_client.go -package=mocks -source=batch-application-client.go + +// BatchApplicationsPager is a type alias for the generic Pager interface with batch application response type. +type BatchApplicationsPager = Pager[armbatch.ApplicationClientListResponse] + +// BatchApplicationsClient is an interface for interacting with Azure Batch applications +type BatchApplicationsClient interface { + Get(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (armbatch.ApplicationClientGetResponse, error) + List(ctx context.Context, resourceGroupName string, accountName string) BatchApplicationsPager +} + +type batchApplicationsClient struct { + client *armbatch.ApplicationClient +} + +func (c *batchApplicationsClient) Get(ctx context.Context, resourceGroupName string, accountName string, applicationName string) (armbatch.ApplicationClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, accountName, applicationName, nil) +} + +func (c *batchApplicationsClient) List(ctx context.Context, resourceGroupName string, accountName string) BatchApplicationsPager { + return c.client.NewListPager(resourceGroupName, accountName, nil) +} + +// NewBatchApplicationsClient creates a new BatchApplicationsClient from the Azure SDK client +func NewBatchApplicationsClient(client *armbatch.ApplicationClient) BatchApplicationsClient { + return &batchApplicationsClient{client: client} +} diff --git a/sources/azure/clients/batch-pool-client.go b/sources/azure/clients/batch-pool-client.go new file mode 100644 index 00000000..bc7320b3 --- /dev/null +++ b/sources/azure/clients/batch-pool-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" +) + +//go:generate mockgen -destination=../shared/mocks/mock_batch_pool_client.go -package=mocks -source=batch-pool-client.go + +// BatchPoolsPager is a type alias for the generic Pager interface with batch pool response type. +type BatchPoolsPager = Pager[armbatch.PoolClientListByBatchAccountResponse] + +// BatchPoolsClient is an interface for interacting with Azure Batch pools (child of Batch account). +type BatchPoolsClient interface { + Get(ctx context.Context, resourceGroupName string, accountName string, poolName string) (armbatch.PoolClientGetResponse, error) + ListByBatchAccount(ctx context.Context, resourceGroupName string, accountName string) BatchPoolsPager +} + +type batchPoolsClient struct { + client *armbatch.PoolClient +} + +func (c *batchPoolsClient) Get(ctx context.Context, resourceGroupName string, accountName string, poolName string) (armbatch.PoolClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, accountName, poolName, nil) +} + +func (c *batchPoolsClient) ListByBatchAccount(ctx context.Context, resourceGroupName string, accountName string) BatchPoolsPager { + return c.client.NewListByBatchAccountPager(resourceGroupName, accountName, nil) +} + +// NewBatchPoolsClient creates a new BatchPoolsClient from the Azure SDK client. +func NewBatchPoolsClient(client *armbatch.PoolClient) BatchPoolsClient { + return &batchPoolsClient{client: client} +} diff --git a/sources/azure/clients/capacity-reservations-client.go b/sources/azure/clients/capacity-reservations-client.go new file mode 100644 index 00000000..cec35b63 --- /dev/null +++ b/sources/azure/clients/capacity-reservations-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" +) + +//go:generate mockgen -destination=../shared/mocks/mock_capacity_reservations_client.go -package=mocks -source=capacity-reservations-client.go + +// CapacityReservationsPager is a type alias for the generic Pager interface with capacity reservations list response type. +type CapacityReservationsPager = Pager[armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse] + +// CapacityReservationsClient is an interface for interacting with Azure capacity reservations +type CapacityReservationsClient interface { + NewListByCapacityReservationGroupPager(resourceGroupName string, capacityReservationGroupName string, options *armcompute.CapacityReservationsClientListByCapacityReservationGroupOptions) CapacityReservationsPager + Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *armcompute.CapacityReservationsClientGetOptions) (armcompute.CapacityReservationsClientGetResponse, error) +} + +type capacityReservationsClient struct { + client *armcompute.CapacityReservationsClient +} + +func (c *capacityReservationsClient) NewListByCapacityReservationGroupPager(resourceGroupName string, capacityReservationGroupName string, options *armcompute.CapacityReservationsClientListByCapacityReservationGroupOptions) CapacityReservationsPager { + return c.client.NewListByCapacityReservationGroupPager(resourceGroupName, capacityReservationGroupName, options) +} + +func (c *capacityReservationsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *armcompute.CapacityReservationsClientGetOptions) (armcompute.CapacityReservationsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options) +} + +// NewCapacityReservationsClient creates a new CapacityReservationsClient from the Azure SDK client +func NewCapacityReservationsClient(client *armcompute.CapacityReservationsClient) CapacityReservationsClient { + return &capacityReservationsClient{client: client} +} diff --git a/sources/azure/clients/dbforpostgresql-flexible-server-private-endpoint-connection-client.go b/sources/azure/clients/dbforpostgresql-flexible-server-private-endpoint-connection-client.go new file mode 100644 index 00000000..cfccd927 --- /dev/null +++ b/sources/azure/clients/dbforpostgresql-flexible-server-private-endpoint-connection-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" +) + +//go:generate mockgen -destination=../shared/mocks/mock_dbforpostgresql_flexible_server_private_endpoint_connection_client.go -package=mocks -source=dbforpostgresql-flexible-server-private-endpoint-connection-client.go + +// DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager is a type alias for the generic Pager interface with PostgreSQL flexible server private endpoint connection list response type. +type DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager = Pager[armpostgresqlflexibleservers.PrivateEndpointConnectionsClientListByServerResponse] + +// DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient is an interface for interacting with Azure DB for PostgreSQL flexible server private endpoint connections. +type DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient interface { + Get(ctx context.Context, resourceGroupName string, serverName string, privateEndpointConnectionName string) (armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse, error) + ListByServer(ctx context.Context, resourceGroupName string, serverName string) DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager +} + +type dbforpostgresqlFlexibleServerPrivateEndpointConnectionsClient struct { + client *armpostgresqlflexibleservers.PrivateEndpointConnectionsClient +} + +func (c *dbforpostgresqlFlexibleServerPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, serverName string, privateEndpointConnectionName string) (armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, serverName, privateEndpointConnectionName, nil) +} + +func (c *dbforpostgresqlFlexibleServerPrivateEndpointConnectionsClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager { + return c.client.NewListByServerPager(resourceGroupName, serverName, nil) +} + +// NewDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient creates a new DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient from the Azure SDK client. +func NewDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(client *armpostgresqlflexibleservers.PrivateEndpointConnectionsClient) DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient { + return &dbforpostgresqlFlexibleServerPrivateEndpointConnectionsClient{client: client} +} diff --git a/sources/azure/clients/ddos-protection-plans-client.go b/sources/azure/clients/ddos-protection-plans-client.go new file mode 100644 index 00000000..426ef05c --- /dev/null +++ b/sources/azure/clients/ddos-protection-plans-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_ddos_protection_plans_client.go -package=mocks -source=ddos-protection-plans-client.go + +// DdosProtectionPlansPager is a type alias for the generic Pager interface with DDoS protection plan list response type. +type DdosProtectionPlansPager = Pager[armnetwork.DdosProtectionPlansClientListByResourceGroupResponse] + +// DdosProtectionPlansClient is an interface for interacting with Azure DDoS protection plans. +type DdosProtectionPlansClient interface { + Get(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, options *armnetwork.DdosProtectionPlansClientGetOptions) (armnetwork.DdosProtectionPlansClientGetResponse, error) + NewListByResourceGroupPager(resourceGroupName string, options *armnetwork.DdosProtectionPlansClientListByResourceGroupOptions) DdosProtectionPlansPager +} + +type ddosProtectionPlansClient struct { + client *armnetwork.DdosProtectionPlansClient +} + +func (c *ddosProtectionPlansClient) Get(ctx context.Context, resourceGroupName string, ddosProtectionPlanName string, options *armnetwork.DdosProtectionPlansClientGetOptions) (armnetwork.DdosProtectionPlansClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, ddosProtectionPlanName, options) +} + +func (c *ddosProtectionPlansClient) NewListByResourceGroupPager(resourceGroupName string, options *armnetwork.DdosProtectionPlansClientListByResourceGroupOptions) DdosProtectionPlansPager { + return c.client.NewListByResourceGroupPager(resourceGroupName, options) +} + +// NewDdosProtectionPlansClient creates a new DdosProtectionPlansClient from the Azure SDK client. +func NewDdosProtectionPlansClient(client *armnetwork.DdosProtectionPlansClient) DdosProtectionPlansClient { + return &ddosProtectionPlansClient{client: client} +} diff --git a/sources/azure/clients/dedicated-hosts-client.go b/sources/azure/clients/dedicated-hosts-client.go new file mode 100644 index 00000000..e82bf6e9 --- /dev/null +++ b/sources/azure/clients/dedicated-hosts-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" +) + +//go:generate mockgen -destination=../shared/mocks/mock_dedicated_hosts_client.go -package=mocks -source=dedicated-hosts-client.go + +// DedicatedHostsPager is a type alias for the generic Pager interface with dedicated hosts list response type. +type DedicatedHostsPager = Pager[armcompute.DedicatedHostsClientListByHostGroupResponse] + +// DedicatedHostsClient is an interface for interacting with Azure dedicated hosts +type DedicatedHostsClient interface { + NewListByHostGroupPager(resourceGroupName string, hostGroupName string, options *armcompute.DedicatedHostsClientListByHostGroupOptions) DedicatedHostsPager + Get(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientGetOptions) (armcompute.DedicatedHostsClientGetResponse, error) +} + +type dedicatedHostsClient struct { + client *armcompute.DedicatedHostsClient +} + +func (c *dedicatedHostsClient) NewListByHostGroupPager(resourceGroupName string, hostGroupName string, options *armcompute.DedicatedHostsClientListByHostGroupOptions) DedicatedHostsPager { + return c.client.NewListByHostGroupPager(resourceGroupName, hostGroupName, options) +} + +func (c *dedicatedHostsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *armcompute.DedicatedHostsClientGetOptions) (armcompute.DedicatedHostsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, hostGroupName, hostName, options) +} + +// NewDedicatedHostsClient creates a new DedicatedHostsClient from the Azure SDK client +func NewDedicatedHostsClient(client *armcompute.DedicatedHostsClient) DedicatedHostsClient { + return &dedicatedHostsClient{client: client} +} diff --git a/sources/azure/clients/documentdb-database-accounts-client.go b/sources/azure/clients/documentdb-database-accounts-client.go index 2c6ce075..6fd66c2e 100644 --- a/sources/azure/clients/documentdb-database-accounts-client.go +++ b/sources/azure/clients/documentdb-database-accounts-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" ) //go:generate mockgen -destination=../shared/mocks/mock_documentdb_database_accounts_client.go -package=mocks -source=documentdb-database-accounts-client.go diff --git a/sources/azure/clients/documentdb-private-endpoint-connection-client.go b/sources/azure/clients/documentdb-private-endpoint-connection-client.go new file mode 100644 index 00000000..6fe021f0 --- /dev/null +++ b/sources/azure/clients/documentdb-private-endpoint-connection-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" +) + +//go:generate mockgen -destination=../shared/mocks/mock_documentdb_private_endpoint_connection_client.go -package=mocks -source=documentdb-private-endpoint-connection-client.go + +// DocumentDBPrivateEndpointConnectionsPager is a type alias for the generic Pager interface with Cosmos DB private endpoint connection list response type. +type DocumentDBPrivateEndpointConnectionsPager = Pager[armcosmos.PrivateEndpointConnectionsClientListByDatabaseAccountResponse] + +// DocumentDBPrivateEndpointConnectionsClient is an interface for interacting with Azure Cosmos DB (DocumentDB) database account private endpoint connections. +type DocumentDBPrivateEndpointConnectionsClient interface { + Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (armcosmos.PrivateEndpointConnectionsClientGetResponse, error) + ListByDatabaseAccount(ctx context.Context, resourceGroupName string, accountName string) DocumentDBPrivateEndpointConnectionsPager +} + +type documentDBPrivateEndpointConnectionsClient struct { + client *armcosmos.PrivateEndpointConnectionsClient +} + +func (c *documentDBPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (armcosmos.PrivateEndpointConnectionsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, accountName, privateEndpointConnectionName, nil) +} + +func (c *documentDBPrivateEndpointConnectionsClient) ListByDatabaseAccount(ctx context.Context, resourceGroupName string, accountName string) DocumentDBPrivateEndpointConnectionsPager { + return c.client.NewListByDatabaseAccountPager(resourceGroupName, accountName, nil) +} + +// NewDocumentDBPrivateEndpointConnectionsClient creates a new DocumentDBPrivateEndpointConnectionsClient from the Azure SDK client. +func NewDocumentDBPrivateEndpointConnectionsClient(client *armcosmos.PrivateEndpointConnectionsClient) DocumentDBPrivateEndpointConnectionsClient { + return &documentDBPrivateEndpointConnectionsClient{client: client} +} diff --git a/sources/azure/clients/encryption-scopes-client.go b/sources/azure/clients/encryption-scopes-client.go new file mode 100644 index 00000000..bd730639 --- /dev/null +++ b/sources/azure/clients/encryption-scopes-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" +) + +//go:generate mockgen -destination=../shared/mocks/mock_encryption_scopes_client.go -package=mocks -source=encryption-scopes-client.go + +// EncryptionScopesPager is a type alias for the generic Pager interface with encryption scope list response type. +type EncryptionScopesPager = Pager[armstorage.EncryptionScopesClientListResponse] + +// EncryptionScopesClient is an interface for interacting with Azure storage encryption scopes +type EncryptionScopesClient interface { + Get(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (armstorage.EncryptionScopesClientGetResponse, error) + List(ctx context.Context, resourceGroupName string, accountName string) EncryptionScopesPager +} + +type encryptionScopesClient struct { + client *armstorage.EncryptionScopesClient +} + +func (c *encryptionScopesClient) Get(ctx context.Context, resourceGroupName string, accountName string, encryptionScopeName string) (armstorage.EncryptionScopesClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, accountName, encryptionScopeName, nil) +} + +func (c *encryptionScopesClient) List(ctx context.Context, resourceGroupName string, accountName string) EncryptionScopesPager { + return c.client.NewListPager(resourceGroupName, accountName, nil) +} + +// NewEncryptionScopesClient creates a new EncryptionScopesClient from the Azure SDK client +func NewEncryptionScopesClient(client *armstorage.EncryptionScopesClient) EncryptionScopesClient { + return &encryptionScopesClient{client: client} +} diff --git a/sources/azure/clients/gallery-applications-client.go b/sources/azure/clients/gallery-applications-client.go new file mode 100644 index 00000000..b7194f54 --- /dev/null +++ b/sources/azure/clients/gallery-applications-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" +) + +//go:generate mockgen -destination=../shared/mocks/mock_gallery_applications_client.go -package=mocks -source=gallery-applications-client.go + +// GalleryApplicationsPager is a type alias for the generic Pager interface with gallery application response type. +type GalleryApplicationsPager = Pager[armcompute.GalleryApplicationsClientListByGalleryResponse] + +// GalleryApplicationsClient is an interface for interacting with Azure gallery applications +type GalleryApplicationsClient interface { + NewListByGalleryPager(resourceGroupName string, galleryName string, options *armcompute.GalleryApplicationsClientListByGalleryOptions) GalleryApplicationsPager + Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *armcompute.GalleryApplicationsClientGetOptions) (armcompute.GalleryApplicationsClientGetResponse, error) +} + +type galleryApplicationsClient struct { + client *armcompute.GalleryApplicationsClient +} + +func (c *galleryApplicationsClient) NewListByGalleryPager(resourceGroupName string, galleryName string, options *armcompute.GalleryApplicationsClientListByGalleryOptions) GalleryApplicationsPager { + return c.client.NewListByGalleryPager(resourceGroupName, galleryName, options) +} + +func (c *galleryApplicationsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *armcompute.GalleryApplicationsClientGetOptions) (armcompute.GalleryApplicationsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, galleryName, galleryApplicationName, options) +} + +// NewGalleryApplicationsClient creates a new GalleryApplicationsClient from the Azure SDK client +func NewGalleryApplicationsClient(client *armcompute.GalleryApplicationsClient) GalleryApplicationsClient { + return &galleryApplicationsClient{client: client} +} diff --git a/sources/azure/clients/keyvault-key-client.go b/sources/azure/clients/keyvault-key-client.go new file mode 100644 index 00000000..7f829917 --- /dev/null +++ b/sources/azure/clients/keyvault-key-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_keyvault_key_client.go -package=mocks -source=keyvault-key-client.go + +// KeysPager is a type alias for the generic Pager interface with keys response type. +type KeysPager = Pager[armkeyvault.KeysClientListResponse] + +// KeysClient is an interface for interacting with Azure Key Vault keys +type KeysClient interface { + NewListPager(resourceGroupName string, vaultName string, options *armkeyvault.KeysClientListOptions) KeysPager + Get(ctx context.Context, resourceGroupName string, vaultName string, keyName string, options *armkeyvault.KeysClientGetOptions) (armkeyvault.KeysClientGetResponse, error) +} + +type keysClient struct { + client *armkeyvault.KeysClient +} + +func (c *keysClient) NewListPager(resourceGroupName string, vaultName string, options *armkeyvault.KeysClientListOptions) KeysPager { + return c.client.NewListPager(resourceGroupName, vaultName, options) +} + +func (c *keysClient) Get(ctx context.Context, resourceGroupName string, vaultName string, keyName string, options *armkeyvault.KeysClientGetOptions) (armkeyvault.KeysClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, vaultName, keyName, options) +} + +// NewKeysClient creates a new KeysClient from the Azure SDK client +func NewKeysClient(client *armkeyvault.KeysClient) KeysClient { + return &keysClient{client: client} +} diff --git a/sources/azure/clients/keyvault-managed-hsm-private-endpoint-connection-client.go b/sources/azure/clients/keyvault-managed-hsm-private-endpoint-connection-client.go new file mode 100644 index 00000000..0fef287c --- /dev/null +++ b/sources/azure/clients/keyvault-managed-hsm-private-endpoint-connection-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_keyvault_managed_hsm_private_endpoint_connection_client.go -package=mocks -source=keyvault-managed-hsm-private-endpoint-connection-client.go + +// KeyVaultManagedHSMPrivateEndpointConnectionsPager is a type alias for the generic Pager interface with MHSM private endpoint connection list response type. +type KeyVaultManagedHSMPrivateEndpointConnectionsPager = Pager[armkeyvault.MHSMPrivateEndpointConnectionsClientListByResourceResponse] + +// KeyVaultManagedHSMPrivateEndpointConnectionsClient is an interface for interacting with Azure Key Vault Managed HSM private endpoint connections. +type KeyVaultManagedHSMPrivateEndpointConnectionsClient interface { + Get(ctx context.Context, resourceGroupName string, hsmName string, privateEndpointConnectionName string) (armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse, error) + ListByResource(ctx context.Context, resourceGroupName string, hsmName string) KeyVaultManagedHSMPrivateEndpointConnectionsPager +} + +type keyvaultManagedHSMPrivateEndpointConnectionsClient struct { + client *armkeyvault.MHSMPrivateEndpointConnectionsClient +} + +func (c *keyvaultManagedHSMPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, hsmName string, privateEndpointConnectionName string) (armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, hsmName, privateEndpointConnectionName, nil) +} + +func (c *keyvaultManagedHSMPrivateEndpointConnectionsClient) ListByResource(ctx context.Context, resourceGroupName string, hsmName string) KeyVaultManagedHSMPrivateEndpointConnectionsPager { + return c.client.NewListByResourcePager(resourceGroupName, hsmName, nil) +} + +// NewKeyVaultManagedHSMPrivateEndpointConnectionsClient creates a new KeyVaultManagedHSMPrivateEndpointConnectionsClient from the Azure SDK client. +func NewKeyVaultManagedHSMPrivateEndpointConnectionsClient(client *armkeyvault.MHSMPrivateEndpointConnectionsClient) KeyVaultManagedHSMPrivateEndpointConnectionsClient { + return &keyvaultManagedHSMPrivateEndpointConnectionsClient{client: client} +} diff --git a/sources/azure/clients/load-balancers-client.go b/sources/azure/clients/load-balancers-client.go index 2ba33b1e..2f3e5df3 100644 --- a/sources/azure/clients/load-balancers-client.go +++ b/sources/azure/clients/load-balancers-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_load_balancers_client.go -package=mocks -source=load-balancers-client.go diff --git a/sources/azure/clients/nat-gateways-client.go b/sources/azure/clients/nat-gateways-client.go new file mode 100644 index 00000000..635f50ed --- /dev/null +++ b/sources/azure/clients/nat-gateways-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_nat_gateways_client.go -package=mocks -source=nat-gateways-client.go + +// NatGatewaysPager is a type alias for the generic Pager interface with NAT gateway list response type. +type NatGatewaysPager = Pager[armnetwork.NatGatewaysClientListResponse] + +// NatGatewaysClient is an interface for interacting with Azure NAT gateways. +type NatGatewaysClient interface { + Get(ctx context.Context, resourceGroupName string, natGatewayName string, options *armnetwork.NatGatewaysClientGetOptions) (armnetwork.NatGatewaysClientGetResponse, error) + NewListPager(resourceGroupName string, options *armnetwork.NatGatewaysClientListOptions) NatGatewaysPager +} + +type natGatewaysClient struct { + client *armnetwork.NatGatewaysClient +} + +func (c *natGatewaysClient) Get(ctx context.Context, resourceGroupName string, natGatewayName string, options *armnetwork.NatGatewaysClientGetOptions) (armnetwork.NatGatewaysClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, natGatewayName, options) +} + +func (c *natGatewaysClient) NewListPager(resourceGroupName string, options *armnetwork.NatGatewaysClientListOptions) NatGatewaysPager { + return c.client.NewListPager(resourceGroupName, options) +} + +// NewNatGatewaysClient creates a new NatGatewaysClient from the Azure SDK client. +func NewNatGatewaysClient(client *armnetwork.NatGatewaysClient) NatGatewaysClient { + return &natGatewaysClient{client: client} +} diff --git a/sources/azure/clients/network-interfaces-client.go b/sources/azure/clients/network-interfaces-client.go index cbc8963c..ad9a50a3 100644 --- a/sources/azure/clients/network-interfaces-client.go +++ b/sources/azure/clients/network-interfaces-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_network_interfaces_client.go -package=mocks -source=network-interfaces-client.go diff --git a/sources/azure/clients/network-private-endpoint-client.go b/sources/azure/clients/network-private-endpoint-client.go new file mode 100644 index 00000000..701eaa83 --- /dev/null +++ b/sources/azure/clients/network-private-endpoint-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_network_private_endpoint_client.go -package=mocks -source=network-private-endpoint-client.go + +// PrivateEndpointsPager is a type alias for the generic Pager interface with private endpoint response type. +type PrivateEndpointsPager = Pager[armnetwork.PrivateEndpointsClientListResponse] + +// PrivateEndpointsClient is an interface for interacting with Azure private endpoints. +type PrivateEndpointsClient interface { + Get(ctx context.Context, resourceGroupName string, privateEndpointName string) (armnetwork.PrivateEndpointsClientGetResponse, error) + List(resourceGroupName string) PrivateEndpointsPager +} + +type privateEndpointsClient struct { + client *armnetwork.PrivateEndpointsClient +} + +func (c *privateEndpointsClient) Get(ctx context.Context, resourceGroupName string, privateEndpointName string) (armnetwork.PrivateEndpointsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, privateEndpointName, nil) +} + +func (c *privateEndpointsClient) List(resourceGroupName string) PrivateEndpointsPager { + return c.client.NewListPager(resourceGroupName, nil) +} + +// NewPrivateEndpointsClient creates a new PrivateEndpointsClient from the Azure SDK client. +func NewPrivateEndpointsClient(client *armnetwork.PrivateEndpointsClient) PrivateEndpointsClient { + return &privateEndpointsClient{client: client} +} diff --git a/sources/azure/clients/network-security-groups-client.go b/sources/azure/clients/network-security-groups-client.go index 7dc9df0e..fac29fc0 100644 --- a/sources/azure/clients/network-security-groups-client.go +++ b/sources/azure/clients/network-security-groups-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_network_security_groups_client.go -package=mocks -source=network-security-groups-client.go diff --git a/sources/azure/clients/postgresql-flexible-server-firewall-rule-client.go b/sources/azure/clients/postgresql-flexible-server-firewall-rule-client.go new file mode 100644 index 00000000..599dfdcd --- /dev/null +++ b/sources/azure/clients/postgresql-flexible-server-firewall-rule-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" +) + +//go:generate mockgen -destination=../shared/mocks/mock_postgresql_flexible_server_firewall_rule_client.go -package=mocks -source=postgresql-flexible-server-firewall-rule-client.go + +// PostgreSQLFlexibleServerFirewallRulePager is a type alias for the generic Pager interface with PostgreSQL flexible server firewall rule response type. +type PostgreSQLFlexibleServerFirewallRulePager = Pager[armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse] + +// PostgreSQLFlexibleServerFirewallRuleClient is an interface for interacting with Azure PostgreSQL flexible server firewall rules. +type PostgreSQLFlexibleServerFirewallRuleClient interface { + ListByServer(ctx context.Context, resourceGroupName string, serverName string) PostgreSQLFlexibleServerFirewallRulePager + Get(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string) (armpostgresqlflexibleservers.FirewallRulesClientGetResponse, error) +} + +type postgresqlFlexibleServerFirewallRuleClient struct { + client *armpostgresqlflexibleservers.FirewallRulesClient +} + +func (a *postgresqlFlexibleServerFirewallRuleClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) PostgreSQLFlexibleServerFirewallRulePager { + return a.client.NewListByServerPager(resourceGroupName, serverName, nil) +} + +func (a *postgresqlFlexibleServerFirewallRuleClient) Get(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string) (armpostgresqlflexibleservers.FirewallRulesClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, serverName, firewallRuleName, nil) +} + +// NewPostgreSQLFlexibleServerFirewallRuleClient creates a new PostgreSQLFlexibleServerFirewallRuleClient from the Azure SDK client. +func NewPostgreSQLFlexibleServerFirewallRuleClient(client *armpostgresqlflexibleservers.FirewallRulesClient) PostgreSQLFlexibleServerFirewallRuleClient { + return &postgresqlFlexibleServerFirewallRuleClient{client: client} +} diff --git a/sources/azure/clients/private-dns-zones-client.go b/sources/azure/clients/private-dns-zones-client.go new file mode 100644 index 00000000..4ac42a61 --- /dev/null +++ b/sources/azure/clients/private-dns-zones-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" +) + +//go:generate mockgen -destination=../shared/mocks/mock_private_dns_zones_client.go -package=mocks -source=private-dns-zones-client.go + +// PrivateDNSZonesPager is a type alias for the generic Pager interface with private zone response type. +type PrivateDNSZonesPager = Pager[armprivatedns.PrivateZonesClientListByResourceGroupResponse] + +// PrivateDNSZonesClient is an interface for interacting with Azure Private DNS zones. +type PrivateDNSZonesClient interface { + NewListByResourceGroupPager(resourceGroupName string, options *armprivatedns.PrivateZonesClientListByResourceGroupOptions) PrivateDNSZonesPager + Get(ctx context.Context, resourceGroupName string, privateZoneName string, options *armprivatedns.PrivateZonesClientGetOptions) (armprivatedns.PrivateZonesClientGetResponse, error) +} + +type privateDNSZonesClient struct { + client *armprivatedns.PrivateZonesClient +} + +func (c *privateDNSZonesClient) NewListByResourceGroupPager(resourceGroupName string, options *armprivatedns.PrivateZonesClientListByResourceGroupOptions) PrivateDNSZonesPager { + return c.client.NewListByResourceGroupPager(resourceGroupName, options) +} + +func (c *privateDNSZonesClient) Get(ctx context.Context, resourceGroupName string, privateZoneName string, options *armprivatedns.PrivateZonesClientGetOptions) (armprivatedns.PrivateZonesClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, privateZoneName, options) +} + +// NewPrivateDNSZonesClient creates a new PrivateDNSZonesClient from the Azure SDK client. +func NewPrivateDNSZonesClient(client *armprivatedns.PrivateZonesClient) PrivateDNSZonesClient { + return &privateDNSZonesClient{client: client} +} diff --git a/sources/azure/clients/public-ip-addresses.go b/sources/azure/clients/public-ip-addresses.go index c5b29958..a43e9b06 100644 --- a/sources/azure/clients/public-ip-addresses.go +++ b/sources/azure/clients/public-ip-addresses.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_public_ip_addresses_client.go -package=mocks -source=public-ip-addresses.go diff --git a/sources/azure/clients/public-ip-prefixes-client.go b/sources/azure/clients/public-ip-prefixes-client.go new file mode 100644 index 00000000..092639a2 --- /dev/null +++ b/sources/azure/clients/public-ip-prefixes-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_public_ip_prefixes_client.go -package=mocks -source=public-ip-prefixes-client.go + +// PublicIPPrefixesPager is a type alias for the generic Pager interface with public IP prefix response type. +type PublicIPPrefixesPager = Pager[armnetwork.PublicIPPrefixesClientListResponse] + +// PublicIPPrefixesClient is an interface for interacting with Azure public IP prefixes. +type PublicIPPrefixesClient interface { + Get(ctx context.Context, resourceGroupName string, publicIPPrefixName string, options *armnetwork.PublicIPPrefixesClientGetOptions) (armnetwork.PublicIPPrefixesClientGetResponse, error) + NewListPager(resourceGroupName string, options *armnetwork.PublicIPPrefixesClientListOptions) PublicIPPrefixesPager +} + +type publicIPPrefixesClient struct { + client *armnetwork.PublicIPPrefixesClient +} + +func (c *publicIPPrefixesClient) Get(ctx context.Context, resourceGroupName string, publicIPPrefixName string, options *armnetwork.PublicIPPrefixesClientGetOptions) (armnetwork.PublicIPPrefixesClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, publicIPPrefixName, options) +} + +func (c *publicIPPrefixesClient) NewListPager(resourceGroupName string, options *armnetwork.PublicIPPrefixesClientListOptions) PublicIPPrefixesPager { + return c.client.NewListPager(resourceGroupName, options) +} + +// NewPublicIPPrefixesClient creates a new PublicIPPrefixesClient from the Azure SDK client. +func NewPublicIPPrefixesClient(client *armnetwork.PublicIPPrefixesClient) PublicIPPrefixesClient { + return &publicIPPrefixesClient{client: client} +} diff --git a/sources/azure/clients/record-sets-client.go b/sources/azure/clients/record-sets-client.go new file mode 100644 index 00000000..e54b996c --- /dev/null +++ b/sources/azure/clients/record-sets-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" +) + +//go:generate mockgen -destination=../shared/mocks/mock_record_sets_client.go -package=mocks -source=record-sets-client.go + +// RecordSetsPager is a type alias for the generic Pager interface with record sets list response type. +type RecordSetsPager = Pager[armdns.RecordSetsClientListAllByDNSZoneResponse] + +// RecordSetsClient is an interface for interacting with Azure DNS record sets +type RecordSetsClient interface { + Get(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType armdns.RecordType, options *armdns.RecordSetsClientGetOptions) (armdns.RecordSetsClientGetResponse, error) + NewListAllByDNSZonePager(resourceGroupName string, zoneName string, options *armdns.RecordSetsClientListAllByDNSZoneOptions) RecordSetsPager +} + +type recordSetsClient struct { + client *armdns.RecordSetsClient +} + +func (c *recordSetsClient) Get(ctx context.Context, resourceGroupName string, zoneName string, relativeRecordSetName string, recordType armdns.RecordType, options *armdns.RecordSetsClientGetOptions) (armdns.RecordSetsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, options) +} + +func (c *recordSetsClient) NewListAllByDNSZonePager(resourceGroupName string, zoneName string, options *armdns.RecordSetsClientListAllByDNSZoneOptions) RecordSetsPager { + return c.client.NewListAllByDNSZonePager(resourceGroupName, zoneName, options) +} + +// NewRecordSetsClient creates a new RecordSetsClient from the Azure SDK client +func NewRecordSetsClient(client *armdns.RecordSetsClient) RecordSetsClient { + return &recordSetsClient{client: client} +} diff --git a/sources/azure/clients/route-tables-client.go b/sources/azure/clients/route-tables-client.go index 2708b3c3..5686be77 100644 --- a/sources/azure/clients/route-tables-client.go +++ b/sources/azure/clients/route-tables-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_route_tables_client.go -package=mocks -source=route-tables-client.go diff --git a/sources/azure/clients/routes-client.go b/sources/azure/clients/routes-client.go new file mode 100644 index 00000000..7a23239d --- /dev/null +++ b/sources/azure/clients/routes-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_routes_client.go -package=mocks -source=routes-client.go + +// RoutesPager is a type alias for the generic Pager interface with routes list response type. +type RoutesPager = Pager[armnetwork.RoutesClientListResponse] + +// RoutesClient is an interface for interacting with Azure routes (child of route table). +type RoutesClient interface { + Get(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, options *armnetwork.RoutesClientGetOptions) (armnetwork.RoutesClientGetResponse, error) + NewListPager(resourceGroupName string, routeTableName string, options *armnetwork.RoutesClientListOptions) RoutesPager +} + +type routesClient struct { + client *armnetwork.RoutesClient +} + +func (a *routesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, options *armnetwork.RoutesClientGetOptions) (armnetwork.RoutesClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, routeTableName, routeName, options) +} + +func (a *routesClient) NewListPager(resourceGroupName string, routeTableName string, options *armnetwork.RoutesClientListOptions) RoutesPager { + return a.client.NewListPager(resourceGroupName, routeTableName, options) +} + +// NewRoutesClient creates a new RoutesClient from the Azure SDK client. +func NewRoutesClient(client *armnetwork.RoutesClient) RoutesClient { + return &routesClient{client: client} +} diff --git a/sources/azure/clients/security-rules-client.go b/sources/azure/clients/security-rules-client.go new file mode 100644 index 00000000..1a68f0d2 --- /dev/null +++ b/sources/azure/clients/security-rules-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_security_rules_client.go -package=mocks -source=security-rules-client.go + +// SecurityRulesPager is a type alias for the generic Pager interface with security rules list response type. +type SecurityRulesPager = Pager[armnetwork.SecurityRulesClientListResponse] + +// SecurityRulesClient is an interface for interacting with Azure NSG security rules (child of network security group). +type SecurityRulesClient interface { + Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, options *armnetwork.SecurityRulesClientGetOptions) (armnetwork.SecurityRulesClientGetResponse, error) + NewListPager(resourceGroupName string, networkSecurityGroupName string, options *armnetwork.SecurityRulesClientListOptions) SecurityRulesPager +} + +type securityRulesClient struct { + client *armnetwork.SecurityRulesClient +} + +func (a *securityRulesClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, options *armnetwork.SecurityRulesClientGetOptions) (armnetwork.SecurityRulesClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, options) +} + +func (a *securityRulesClient) NewListPager(resourceGroupName string, networkSecurityGroupName string, options *armnetwork.SecurityRulesClientListOptions) SecurityRulesPager { + return a.client.NewListPager(resourceGroupName, networkSecurityGroupName, options) +} + +// NewSecurityRulesClient creates a new SecurityRulesClient from the Azure SDK client. +func NewSecurityRulesClient(client *armnetwork.SecurityRulesClient) SecurityRulesClient { + return &securityRulesClient{client: client} +} diff --git a/sources/azure/clients/sql-elastic-pool-client.go b/sources/azure/clients/sql-elastic-pool-client.go new file mode 100644 index 00000000..3bb367cd --- /dev/null +++ b/sources/azure/clients/sql-elastic-pool-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_sql_elastic_pool_client.go -package=mocks -source=sql-elastic-pool-client.go + +// SqlElasticPoolPager is a type alias for the generic Pager interface with SQL elastic pool list response type. +type SqlElasticPoolPager = Pager[armsql.ElasticPoolsClientListByServerResponse] + +// SqlElasticPoolClient is an interface for interacting with Azure SQL elastic pools. +type SqlElasticPoolClient interface { + ListByServer(ctx context.Context, resourceGroupName string, serverName string) SqlElasticPoolPager + Get(ctx context.Context, resourceGroupName string, serverName string, elasticPoolName string) (armsql.ElasticPoolsClientGetResponse, error) +} + +type sqlElasticPoolClient struct { + client *armsql.ElasticPoolsClient +} + +func (a *sqlElasticPoolClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) SqlElasticPoolPager { + return a.client.NewListByServerPager(resourceGroupName, serverName, nil) +} + +func (a *sqlElasticPoolClient) Get(ctx context.Context, resourceGroupName string, serverName string, elasticPoolName string) (armsql.ElasticPoolsClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, serverName, elasticPoolName, nil) +} + +// NewSqlElasticPoolClient creates a new SqlElasticPoolClient from the Azure SDK client. +func NewSqlElasticPoolClient(client *armsql.ElasticPoolsClient) SqlElasticPoolClient { + return &sqlElasticPoolClient{client: client} +} diff --git a/sources/azure/clients/sql-server-firewall-rule-client.go b/sources/azure/clients/sql-server-firewall-rule-client.go new file mode 100644 index 00000000..aa7a8d30 --- /dev/null +++ b/sources/azure/clients/sql-server-firewall-rule-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_sql_server_firewall_rule_client.go -package=mocks -source=sql-server-firewall-rule-client.go + +// SqlServerFirewallRulePager is a type alias for the generic Pager interface with SQL server firewall rule response type. +type SqlServerFirewallRulePager = Pager[armsql.FirewallRulesClientListByServerResponse] + +// SqlServerFirewallRuleClient is an interface for interacting with Azure SQL server firewall rules. +type SqlServerFirewallRuleClient interface { + ListByServer(ctx context.Context, resourceGroupName string, serverName string) SqlServerFirewallRulePager + Get(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string) (armsql.FirewallRulesClientGetResponse, error) +} + +type sqlServerFirewallRuleClient struct { + client *armsql.FirewallRulesClient +} + +func (a *sqlServerFirewallRuleClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) SqlServerFirewallRulePager { + return a.client.NewListByServerPager(resourceGroupName, serverName, nil) +} + +func (a *sqlServerFirewallRuleClient) Get(ctx context.Context, resourceGroupName string, serverName string, firewallRuleName string) (armsql.FirewallRulesClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, serverName, firewallRuleName, nil) +} + +// NewSqlServerFirewallRuleClient creates a new SqlServerFirewallRuleClient from the Azure SDK client. +func NewSqlServerFirewallRuleClient(client *armsql.FirewallRulesClient) SqlServerFirewallRuleClient { + return &sqlServerFirewallRuleClient{client: client} +} diff --git a/sources/azure/clients/sql-server-private-endpoint-connection-client.go b/sources/azure/clients/sql-server-private-endpoint-connection-client.go new file mode 100644 index 00000000..f262a7fc --- /dev/null +++ b/sources/azure/clients/sql-server-private-endpoint-connection-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_sql_server_private_endpoint_connection_client.go -package=mocks -source=sql-server-private-endpoint-connection-client.go + +// SQLServerPrivateEndpointConnectionsPager is a type alias for the generic Pager interface with SQL server private endpoint connection list response type. +type SQLServerPrivateEndpointConnectionsPager = Pager[armsql.PrivateEndpointConnectionsClientListByServerResponse] + +// SQLServerPrivateEndpointConnectionsClient is an interface for interacting with Azure SQL server private endpoint connections. +type SQLServerPrivateEndpointConnectionsClient interface { + Get(ctx context.Context, resourceGroupName string, serverName string, privateEndpointConnectionName string) (armsql.PrivateEndpointConnectionsClientGetResponse, error) + ListByServer(ctx context.Context, resourceGroupName string, serverName string) SQLServerPrivateEndpointConnectionsPager +} + +type sqlServerPrivateEndpointConnectionsClient struct { + client *armsql.PrivateEndpointConnectionsClient +} + +func (c *sqlServerPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, serverName string, privateEndpointConnectionName string) (armsql.PrivateEndpointConnectionsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, serverName, privateEndpointConnectionName, nil) +} + +func (c *sqlServerPrivateEndpointConnectionsClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) SQLServerPrivateEndpointConnectionsPager { + return c.client.NewListByServerPager(resourceGroupName, serverName, nil) +} + +// NewSQLServerPrivateEndpointConnectionsClient creates a new SQLServerPrivateEndpointConnectionsClient from the Azure SDK client. +func NewSQLServerPrivateEndpointConnectionsClient(client *armsql.PrivateEndpointConnectionsClient) SQLServerPrivateEndpointConnectionsClient { + return &sqlServerPrivateEndpointConnectionsClient{client: client} +} diff --git a/sources/azure/clients/sql-server-virtual-network-rule-client.go b/sources/azure/clients/sql-server-virtual-network-rule-client.go new file mode 100644 index 00000000..71ffb2ec --- /dev/null +++ b/sources/azure/clients/sql-server-virtual-network-rule-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" +) + +//go:generate mockgen -destination=../shared/mocks/mock_sql_server_virtual_network_rule_client.go -package=mocks -source=sql-server-virtual-network-rule-client.go + +// SqlServerVirtualNetworkRulePager is a type alias for the generic Pager interface with SQL server virtual network rule list response type. +type SqlServerVirtualNetworkRulePager = Pager[armsql.VirtualNetworkRulesClientListByServerResponse] + +// SqlServerVirtualNetworkRuleClient is an interface for interacting with Azure SQL server virtual network rules. +type SqlServerVirtualNetworkRuleClient interface { + ListByServer(ctx context.Context, resourceGroupName string, serverName string) SqlServerVirtualNetworkRulePager + Get(ctx context.Context, resourceGroupName string, serverName string, virtualNetworkRuleName string) (armsql.VirtualNetworkRulesClientGetResponse, error) +} + +type sqlServerVirtualNetworkRuleClient struct { + client *armsql.VirtualNetworkRulesClient +} + +func (a *sqlServerVirtualNetworkRuleClient) ListByServer(ctx context.Context, resourceGroupName string, serverName string) SqlServerVirtualNetworkRulePager { + return a.client.NewListByServerPager(resourceGroupName, serverName, nil) +} + +func (a *sqlServerVirtualNetworkRuleClient) Get(ctx context.Context, resourceGroupName string, serverName string, virtualNetworkRuleName string) (armsql.VirtualNetworkRulesClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, serverName, virtualNetworkRuleName, nil) +} + +// NewSqlServerVirtualNetworkRuleClient creates a new SqlServerVirtualNetworkRuleClient from the Azure SDK client. +func NewSqlServerVirtualNetworkRuleClient(client *armsql.VirtualNetworkRulesClient) SqlServerVirtualNetworkRuleClient { + return &sqlServerVirtualNetworkRuleClient{client: client} +} diff --git a/sources/azure/clients/storage-private-endpoint-connection-client.go b/sources/azure/clients/storage-private-endpoint-connection-client.go new file mode 100644 index 00000000..d591473a --- /dev/null +++ b/sources/azure/clients/storage-private-endpoint-connection-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" +) + +//go:generate mockgen -destination=../shared/mocks/mock_storage_private_endpoint_connection_client.go -package=mocks -source=storage-private-endpoint-connection-client.go + +// PrivateEndpointConnectionsPager is a type alias for the generic Pager interface with storage private endpoint connection list response type. +type PrivateEndpointConnectionsPager = Pager[armstorage.PrivateEndpointConnectionsClientListResponse] + +// StoragePrivateEndpointConnectionsClient is an interface for interacting with Azure storage account private endpoint connections. +type StoragePrivateEndpointConnectionsClient interface { + Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (armstorage.PrivateEndpointConnectionsClientGetResponse, error) + List(ctx context.Context, resourceGroupName string, accountName string) PrivateEndpointConnectionsPager +} + +type storagePrivateEndpointConnectionsClient struct { + client *armstorage.PrivateEndpointConnectionsClient +} + +func (c *storagePrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, accountName string, privateEndpointConnectionName string) (armstorage.PrivateEndpointConnectionsClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, accountName, privateEndpointConnectionName, nil) +} + +func (c *storagePrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName string, accountName string) PrivateEndpointConnectionsPager { + return c.client.NewListPager(resourceGroupName, accountName, nil) +} + +// NewStoragePrivateEndpointConnectionsClient creates a new StoragePrivateEndpointConnectionsClient from the Azure SDK client. +func NewStoragePrivateEndpointConnectionsClient(client *armstorage.PrivateEndpointConnectionsClient) StoragePrivateEndpointConnectionsClient { + return &storagePrivateEndpointConnectionsClient{client: client} +} diff --git a/sources/azure/clients/subnets-client.go b/sources/azure/clients/subnets-client.go new file mode 100644 index 00000000..385e83e0 --- /dev/null +++ b/sources/azure/clients/subnets-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_subnets_client.go -package=mocks -source=subnets-client.go + +// SubnetsPager is a type alias for the generic Pager interface with subnet list response type. +type SubnetsPager = Pager[armnetwork.SubnetsClientListResponse] + +// SubnetsClient is an interface for interacting with Azure virtual network subnets. +type SubnetsClient interface { + Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *armnetwork.SubnetsClientGetOptions) (armnetwork.SubnetsClientGetResponse, error) + NewListPager(resourceGroupName string, virtualNetworkName string, options *armnetwork.SubnetsClientListOptions) SubnetsPager +} + +type subnetsClientAdapter struct { + client *armnetwork.SubnetsClient +} + +func (a *subnetsClientAdapter) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, options *armnetwork.SubnetsClientGetOptions) (armnetwork.SubnetsClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, virtualNetworkName, subnetName, options) +} + +func (a *subnetsClientAdapter) NewListPager(resourceGroupName string, virtualNetworkName string, options *armnetwork.SubnetsClientListOptions) SubnetsPager { + return a.client.NewListPager(resourceGroupName, virtualNetworkName, options) +} + +// NewSubnetsClient creates a new SubnetsClient from the Azure SDK client. +func NewSubnetsClient(client *armnetwork.SubnetsClient) SubnetsClient { + return &subnetsClientAdapter{client: client} +} diff --git a/sources/azure/clients/virtual-network-gateways-client.go b/sources/azure/clients/virtual-network-gateways-client.go new file mode 100644 index 00000000..56401d1b --- /dev/null +++ b/sources/azure/clients/virtual-network-gateways-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_virtual_network_gateways_client.go -package=mocks -source=virtual-network-gateways-client.go + +// VirtualNetworkGatewaysPager is a type alias for the generic Pager interface with virtual network gateway list response type. +type VirtualNetworkGatewaysPager = Pager[armnetwork.VirtualNetworkGatewaysClientListResponse] + +// VirtualNetworkGatewaysClient is an interface for interacting with Azure virtual network gateways. +type VirtualNetworkGatewaysClient interface { + Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientGetOptions) (armnetwork.VirtualNetworkGatewaysClientGetResponse, error) + NewListPager(resourceGroupName string, options *armnetwork.VirtualNetworkGatewaysClientListOptions) VirtualNetworkGatewaysPager +} + +type virtualNetworkGatewaysClient struct { + client *armnetwork.VirtualNetworkGatewaysClient +} + +func (c *virtualNetworkGatewaysClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientGetOptions) (armnetwork.VirtualNetworkGatewaysClientGetResponse, error) { + return c.client.Get(ctx, resourceGroupName, virtualNetworkGatewayName, options) +} + +func (c *virtualNetworkGatewaysClient) NewListPager(resourceGroupName string, options *armnetwork.VirtualNetworkGatewaysClientListOptions) VirtualNetworkGatewaysPager { + return c.client.NewListPager(resourceGroupName, options) +} + +// NewVirtualNetworkGatewaysClient creates a new VirtualNetworkGatewaysClient from the Azure SDK client. +func NewVirtualNetworkGatewaysClient(client *armnetwork.VirtualNetworkGatewaysClient) VirtualNetworkGatewaysClient { + return &virtualNetworkGatewaysClient{client: client} +} diff --git a/sources/azure/clients/virtual-network-peerings-client.go b/sources/azure/clients/virtual-network-peerings-client.go new file mode 100644 index 00000000..7bc7029d --- /dev/null +++ b/sources/azure/clients/virtual-network-peerings-client.go @@ -0,0 +1,35 @@ +package clients + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" +) + +//go:generate mockgen -destination=../shared/mocks/mock_virtual_network_peerings_client.go -package=mocks -source=virtual-network-peerings-client.go + +// VirtualNetworkPeeringsPager is a type alias for the generic Pager interface with virtual network peerings list response type. +type VirtualNetworkPeeringsPager = Pager[armnetwork.VirtualNetworkPeeringsClientListResponse] + +// VirtualNetworkPeeringsClient is an interface for interacting with Azure virtual network peerings. +type VirtualNetworkPeeringsClient interface { + Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, peeringName string, options *armnetwork.VirtualNetworkPeeringsClientGetOptions) (armnetwork.VirtualNetworkPeeringsClientGetResponse, error) + NewListPager(resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworkPeeringsClientListOptions) VirtualNetworkPeeringsPager +} + +type virtualNetworkPeeringsClientAdapter struct { + client *armnetwork.VirtualNetworkPeeringsClient +} + +func (a *virtualNetworkPeeringsClientAdapter) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, peeringName string, options *armnetwork.VirtualNetworkPeeringsClientGetOptions) (armnetwork.VirtualNetworkPeeringsClientGetResponse, error) { + return a.client.Get(ctx, resourceGroupName, virtualNetworkName, peeringName, options) +} + +func (a *virtualNetworkPeeringsClientAdapter) NewListPager(resourceGroupName string, virtualNetworkName string, options *armnetwork.VirtualNetworkPeeringsClientListOptions) VirtualNetworkPeeringsPager { + return a.client.NewListPager(resourceGroupName, virtualNetworkName, options) +} + +// NewVirtualNetworkPeeringsClient creates a new VirtualNetworkPeeringsClient from the Azure SDK client. +func NewVirtualNetworkPeeringsClient(client *armnetwork.VirtualNetworkPeeringsClient) VirtualNetworkPeeringsClient { + return &virtualNetworkPeeringsClientAdapter{client: client} +} diff --git a/sources/azure/clients/virtual-networks-client.go b/sources/azure/clients/virtual-networks-client.go index 07e99710..4b8616f6 100644 --- a/sources/azure/clients/virtual-networks-client.go +++ b/sources/azure/clients/virtual-networks-client.go @@ -3,7 +3,7 @@ package clients import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" ) //go:generate mockgen -destination=../shared/mocks/mock_virtual_networks_client.go -package=mocks -source=virtual-networks-client.go diff --git a/sources/azure/integration-tests/authorization-role-assignment_test.go b/sources/azure/integration-tests/authorization-role-assignment_test.go index b4548094..300d5588 100644 --- a/sources/azure/integration-tests/authorization-role-assignment_test.go +++ b/sources/azure/integration-tests/authorization-role-assignment_test.go @@ -15,7 +15,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/google/uuid" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -378,8 +377,8 @@ func createRoleAssignment(ctx context.Context, client *armauthorization.RoleAssi parameters := armauthorization.RoleAssignmentCreateParameters{ Properties: &armauthorization.RoleAssignmentProperties{ - PrincipalID: ptr.To(principalID), - RoleDefinitionID: ptr.To(roleDefinitionID), + PrincipalID: new(principalID), + RoleDefinitionID: new(roleDefinitionID), }, } diff --git a/sources/azure/integration-tests/batch-batch-accounts_test.go b/sources/azure/integration-tests/batch-batch-accounts_test.go index 4aea0a93..4a2c02a2 100644 --- a/sources/azure/integration-tests/batch-batch-accounts_test.go +++ b/sources/azure/integration-tests/batch-batch-accounts_test.go @@ -12,11 +12,10 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -320,16 +319,16 @@ func createBatchAccount(ctx context.Context, client *armbatch.AccountClient, res // Create the batch account poller, err := client.BeginCreate(ctx, resourceGroupName, accountName, armbatch.AccountCreateParameters{ - Location: ptr.To(location), + Location: new(location), Properties: &armbatch.AccountCreateProperties{ AutoStorage: &armbatch.AutoStorageBaseProperties{ - StorageAccountID: ptr.To(storageAccountID), + StorageAccountID: new(storageAccountID), }, - PoolAllocationMode: ptr.To(armbatch.PoolAllocationModeBatchService), + PoolAllocationMode: new(armbatch.PoolAllocationModeBatchService), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("batch-account"), + "purpose": new("overmind-integration-tests"), + "test": new("batch-account"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-availability-set_test.go b/sources/azure/integration-tests/compute-availability-set_test.go index ffc52e92..76f6f232 100644 --- a/sources/azure/integration-tests/compute-availability-set_test.go +++ b/sources/azure/integration-tests/compute-availability-set_test.go @@ -11,10 +11,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -379,16 +378,16 @@ func createAvailabilitySet(ctx context.Context, client *armcompute.AvailabilityS // Create the availability set resp, err := client.CreateOrUpdate(ctx, resourceGroupName, avSetName, armcompute.AvailabilitySet{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.AvailabilitySetProperties{ - PlatformFaultDomainCount: ptr.To[int32](2), - PlatformUpdateDomainCount: ptr.To[int32](2), + PlatformFaultDomainCount: new(int32(2)), + PlatformUpdateDomainCount: new(int32(2)), ProximityPlacementGroup: nil, // Optional - not setting for this test VirtualMachines: nil, // Will be populated when VMs are added }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-availability-set"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-availability-set"), }, }, nil) if err != nil { @@ -473,22 +472,22 @@ func createVirtualNetworkForAVSet(ctx context.Context, client *armnetwork.Virtua // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.2.0.0/16")}, + AddressPrefixes: []*string{new("10.2.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestSubnetForAVSetName), + Name: new(integrationTestSubnetForAVSetName), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.2.0.0/24"), + AddressPrefix: new("10.2.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -515,22 +514,22 @@ func createNetworkInterfaceForAVSet(ctx context.Context, client *armnetwork.Inte // Create the NIC poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, armnetwork.Interface{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.InterfacePropertiesFormat{ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -567,54 +566,54 @@ func createVirtualMachineWithAvailabilitySet(ctx context.Context, client *armcom // Create the VM poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, armcompute.VirtualMachine{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: ptr.To(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ - Publisher: ptr.To("Canonical"), - Offer: ptr.To("0001-com-ubuntu-server-jammy"), - SKU: ptr.To("22_04-lts-arm64"), // ARM64 image for ARM-based VM - Version: ptr.To("latest"), + Publisher: new("Canonical"), + Offer: new("0001-com-ubuntu-server-jammy"), + SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ - Name: ptr.To(fmt.Sprintf("%s-osdisk", vmName)), - CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage), + Name: new(fmt.Sprintf("%s-osdisk", vmName)), + CreateOption: new(armcompute.DiskCreateOptionTypesFromImage), ManagedDisk: &armcompute.ManagedDiskParameters{ - StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS), + StorageAccountType: new(armcompute.StorageAccountTypesStandardLRS), }, - DeleteOption: ptr.To(armcompute.DiskDeleteOptionTypesDelete), + DeleteOption: new(armcompute.DiskDeleteOptionTypesDelete), }, }, OSProfile: &armcompute.OSProfile{ - ComputerName: ptr.To(vmName), - AdminUsername: ptr.To("azureuser"), + ComputerName: new(vmName), + AdminUsername: new("azureuser"), // Use password authentication for integration tests (simpler than SSH keys) - AdminPassword: ptr.To("OvmIntegTest2024!"), + AdminPassword: new("OvmIntegTest2024!"), LinuxConfiguration: &armcompute.LinuxConfiguration{ - DisablePasswordAuthentication: ptr.To(false), + DisablePasswordAuthentication: new(false), }, }, NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: ptr.To(nicID), + ID: new(nicID), Properties: &armcompute.NetworkInterfaceReferenceProperties{ - Primary: ptr.To(true), + Primary: new(true), }, }, }, }, AvailabilitySet: &armcompute.SubResource{ - ID: ptr.To(availabilitySetID), + ID: new(availabilitySetID), }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-availability-set"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-availability-set"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-capacity-reservation-group_test.go b/sources/azure/integration-tests/compute-capacity-reservation-group_test.go index 9509e715..cd48cf18 100644 --- a/sources/azure/integration-tests/compute-capacity-reservation-group_test.go +++ b/sources/azure/integration-tests/compute-capacity-reservation-group_test.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -247,10 +246,10 @@ func createCapacityReservationGroup(ctx context.Context, client *armcompute.Capa } _, err = client.CreateOrUpdate(ctx, resourceGroupName, groupName, armcompute.CapacityReservationGroup{ - Location: ptr.To(location), + Location: new(location), Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-capacity-reservation-group"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-capacity-reservation-group"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-dedicated-host-group_test.go b/sources/azure/integration-tests/compute-dedicated-host-group_test.go index 89e1d475..6bdd96e7 100644 --- a/sources/azure/integration-tests/compute-dedicated-host-group_test.go +++ b/sources/azure/integration-tests/compute-dedicated-host-group_test.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -247,13 +246,13 @@ func createDedicatedHostGroup(ctx context.Context, client *armcompute.DedicatedH } _, err = client.CreateOrUpdate(ctx, resourceGroupName, hostGroupName, armcompute.DedicatedHostGroup{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.DedicatedHostGroupProperties{ - PlatformFaultDomainCount: ptr.To[int32](1), + PlatformFaultDomainCount: new(int32(1)), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-dedicated-host-group"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-dedicated-host-group"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-disk-access_test.go b/sources/azure/integration-tests/compute-disk-access_test.go index bea69ff7..32550fa2 100644 --- a/sources/azure/integration-tests/compute-disk-access_test.go +++ b/sources/azure/integration-tests/compute-disk-access_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -261,10 +260,10 @@ func createDiskAccess(ctx context.Context, client *armcompute.DiskAccessesClient } poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, diskAccessName, armcompute.DiskAccess{ - Location: ptr.To(location), + Location: new(location), Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-disk-access"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-disk-access"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-disk-encryption-set_test.go b/sources/azure/integration-tests/compute-disk-encryption-set_test.go index 4278e7b6..e86b55a2 100644 --- a/sources/azure/integration-tests/compute-disk-encryption-set_test.go +++ b/sources/azure/integration-tests/compute-disk-encryption-set_test.go @@ -15,7 +15,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -358,26 +357,26 @@ func createDiskEncryptionSet(ctx context.Context, client *armcompute.DiskEncrypt // New DES creation. des := armcompute.DiskEncryptionSet{ - Location: ptr.To(location), + Location: new(location), Identity: &armcompute.EncryptionSetIdentity{ - Type: ptr.To(armcompute.DiskEncryptionSetIdentityTypeUserAssigned), + Type: new(armcompute.DiskEncryptionSetIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armcompute.UserAssignedIdentitiesValue{ userAssignedIdentityResourceID: &armcompute.UserAssignedIdentitiesValue{}, }, }, Properties: &armcompute.EncryptionSetProperties{ - EncryptionType: ptr.To(armcompute.DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey), + EncryptionType: new(armcompute.DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey), ActiveKey: &armcompute.KeyForDiskEncryptionSet{ - KeyURL: ptr.To(keyURL), + KeyURL: new(keyURL), SourceVault: &armcompute.SourceVault{ - ID: ptr.To(vaultID), + ID: new(vaultID), }, }, - RotationToLatestKeyVersionEnabled: ptr.To(false), + RotationToLatestKeyVersionEnabled: new(false), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-disk-encryption-set"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-disk-encryption-set"), }, } diff --git a/sources/azure/integration-tests/compute-disk_test.go b/sources/azure/integration-tests/compute-disk_test.go index bd47dad0..81049955 100644 --- a/sources/azure/integration-tests/compute-disk_test.go +++ b/sources/azure/integration-tests/compute-disk_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -289,19 +288,19 @@ func createDisk(ctx context.Context, client *armcompute.DisksClient, resourceGro // Create an empty disk (DiskCreateOptionEmpty) // This is the simplest type of disk to create for testing poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, diskName, armcompute.Disk{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.DiskProperties{ CreationData: &armcompute.CreationData{ - CreateOption: ptr.To(armcompute.DiskCreateOptionEmpty), + CreateOption: new(armcompute.DiskCreateOptionEmpty), }, - DiskSizeGB: ptr.To[int32](10), // 10 GB disk + DiskSizeGB: new(int32(10)), // 10 GB disk }, SKU: &armcompute.DiskSKU{ - Name: ptr.To(armcompute.DiskStorageAccountTypesStandardLRS), + Name: new(armcompute.DiskStorageAccountTypesStandardLRS), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-disk"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-disk"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-image_test.go b/sources/azure/integration-tests/compute-image_test.go index 78ceef2c..7b311b35 100644 --- a/sources/azure/integration-tests/compute-image_test.go +++ b/sources/azure/integration-tests/compute-image_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -336,22 +335,22 @@ func createImage(ctx context.Context, client *armcompute.ImagesClient, resourceG // Create an image from a managed disk poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, imageName, armcompute.Image{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.ImageProperties{ - HyperVGeneration: ptr.To(armcompute.HyperVGenerationTypesV1), + HyperVGeneration: new(armcompute.HyperVGenerationTypesV1), StorageProfile: &armcompute.ImageStorageProfile{ OSDisk: &armcompute.ImageOSDisk{ ManagedDisk: &armcompute.SubResource{ - ID: ptr.To(sourceDiskID), + ID: new(sourceDiskID), }, - OSState: ptr.To(armcompute.OperatingSystemStateTypesGeneralized), - OSType: ptr.To(armcompute.OperatingSystemTypesLinux), + OSState: new(armcompute.OperatingSystemStateTypesGeneralized), + OSType: new(armcompute.OperatingSystemTypesLinux), }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-image"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-image"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-proximity-placement-group_test.go b/sources/azure/integration-tests/compute-proximity-placement-group_test.go index 7280cf75..7c4cfdb8 100644 --- a/sources/azure/integration-tests/compute-proximity-placement-group_test.go +++ b/sources/azure/integration-tests/compute-proximity-placement-group_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -261,13 +260,13 @@ func createProximityPlacementGroup(ctx context.Context, client *armcompute.Proxi } resp, err := client.CreateOrUpdate(ctx, resourceGroupName, ppgName, armcompute.ProximityPlacementGroup{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.ProximityPlacementGroupProperties{ - ProximityPlacementGroupType: ptr.To(armcompute.ProximityPlacementGroupTypeStandard), + ProximityPlacementGroupType: new(armcompute.ProximityPlacementGroupTypeStandard), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-proximity-placement-group"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-proximity-placement-group"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-snapshot_test.go b/sources/azure/integration-tests/compute-snapshot_test.go index c6388e86..a3f24439 100644 --- a/sources/azure/integration-tests/compute-snapshot_test.go +++ b/sources/azure/integration-tests/compute-snapshot_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -328,16 +327,16 @@ func createSnapshot(ctx context.Context, client *armcompute.SnapshotsClient, res } poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, snapshotName, armcompute.Snapshot{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.SnapshotProperties{ CreationData: &armcompute.CreationData{ - CreateOption: ptr.To(armcompute.DiskCreateOptionCopy), - SourceResourceID: ptr.To(sourceDiskID), + CreateOption: new(armcompute.DiskCreateOptionCopy), + SourceResourceID: new(sourceDiskID), }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-snapshot"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-snapshot"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/compute-virtual-machine-extension_test.go b/sources/azure/integration-tests/compute-virtual-machine-extension_test.go index 1c89a6df..d2fdc25c 100644 --- a/sources/azure/integration-tests/compute-virtual-machine-extension_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine-extension_test.go @@ -11,10 +11,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -336,23 +335,23 @@ func createVirtualNetworkForExtension(ctx context.Context, client *armnetwork.Vi // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.2.0.0/16")}, + AddressPrefixes: []*string{new("10.2.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestExtensionSubnetName), + Name: new(integrationTestExtensionSubnetName), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.2.0.0/24"), + AddressPrefix: new("10.2.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-extension"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-extension"), }, }, nil) if err != nil { @@ -379,23 +378,23 @@ func createNetworkInterfaceForExtension(ctx context.Context, client *armnetwork. // Create the NIC poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, armnetwork.Interface{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.InterfacePropertiesFormat{ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-extension"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-extension"), }, }, nil) if err != nil { @@ -432,51 +431,51 @@ func createVirtualMachineForExtension(ctx context.Context, client *armcompute.Vi // Create the VM poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, armcompute.VirtualMachine{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: ptr.To(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ - Publisher: ptr.To("Canonical"), - Offer: ptr.To("0001-com-ubuntu-server-jammy"), - SKU: ptr.To("22_04-lts-arm64"), // ARM64 image for ARM-based VM - Version: ptr.To("latest"), + Publisher: new("Canonical"), + Offer: new("0001-com-ubuntu-server-jammy"), + SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ - Name: ptr.To(fmt.Sprintf("%s-osdisk", vmName)), - CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage), + Name: new(fmt.Sprintf("%s-osdisk", vmName)), + CreateOption: new(armcompute.DiskCreateOptionTypesFromImage), ManagedDisk: &armcompute.ManagedDiskParameters{ - StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS), + StorageAccountType: new(armcompute.StorageAccountTypesStandardLRS), }, - DeleteOption: ptr.To(armcompute.DiskDeleteOptionTypesDelete), + DeleteOption: new(armcompute.DiskDeleteOptionTypesDelete), }, }, OSProfile: &armcompute.OSProfile{ - ComputerName: ptr.To(vmName), - AdminUsername: ptr.To("azureuser"), + ComputerName: new(vmName), + AdminUsername: new("azureuser"), // Use password authentication for integration tests (simpler than SSH keys) - AdminPassword: ptr.To("OvmIntegTest2024!"), + AdminPassword: new("OvmIntegTest2024!"), LinuxConfiguration: &armcompute.LinuxConfiguration{ - DisablePasswordAuthentication: ptr.To(false), + DisablePasswordAuthentication: new(false), }, }, NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: ptr.To(nicID), + ID: new(nicID), Properties: &armcompute.NetworkInterfaceReferenceProperties{ - Primary: ptr.To(true), + Primary: new(true), }, }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-extension"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-extension"), }, }, nil) if err != nil { @@ -563,18 +562,18 @@ func createVirtualMachineExtension(ctx context.Context, client *armcompute.Virtu // Create the extension with CustomScript extension // Reference: https://learn.microsoft.com/en-us/rest/api/compute/virtual-machine-extensions/create-or-update?view=rest-compute-2025-04-01&tabs=HTTP poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, extensionName, armcompute.VirtualMachineExtension{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.VirtualMachineExtensionProperties{ - Publisher: ptr.To("Microsoft.Azure.Extensions"), - Type: ptr.To("CustomScript"), - TypeHandlerVersion: ptr.To("2.1"), - Settings: map[string]interface{}{ + Publisher: new("Microsoft.Azure.Extensions"), + Type: new("CustomScript"), + TypeHandlerVersion: new("2.1"), + Settings: map[string]any{ "commandToExecute": "echo 'Hello from Overmind integration test'", }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-extension"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-extension"), }, }, nil) if err != nil { @@ -663,7 +662,7 @@ func deleteVirtualMachineExtension(ctx context.Context, client *armcompute.Virtu func deleteVirtualMachineForExtension(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName string) error { // Use forceDeletion to speed up cleanup poller, err := client.BeginDelete(ctx, resourceGroupName, vmName, &armcompute.VirtualMachinesClientBeginDeleteOptions{ - ForceDeletion: ptr.To(true), + ForceDeletion: new(true), }) if err != nil { var respErr *azcore.ResponseError diff --git a/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go b/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go index e887ac32..4cb7cd8f 100644 --- a/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine-run-command_test.go @@ -11,10 +11,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -336,23 +335,23 @@ func createVirtualNetworkForRunCommand(ctx context.Context, client *armnetwork.V // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.1.0.0/16")}, + AddressPrefixes: []*string{new("10.1.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestRunCommandSubnetName), + Name: new(integrationTestRunCommandSubnetName), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.1.0.0/24"), + AddressPrefix: new("10.1.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-run-command"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-run-command"), }, }, nil) if err != nil { @@ -379,23 +378,23 @@ func createNetworkInterfaceForRunCommand(ctx context.Context, client *armnetwork // Create the NIC poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, armnetwork.Interface{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.InterfacePropertiesFormat{ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-run-command"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-run-command"), }, }, nil) if err != nil { @@ -432,51 +431,51 @@ func createVirtualMachineForRunCommand(ctx context.Context, client *armcompute.V // Create the VM poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, armcompute.VirtualMachine{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: ptr.To(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ - Publisher: ptr.To("Canonical"), - Offer: ptr.To("0001-com-ubuntu-server-jammy"), - SKU: ptr.To("22_04-lts-arm64"), // ARM64 image for ARM-based VM - Version: ptr.To("latest"), + Publisher: new("Canonical"), + Offer: new("0001-com-ubuntu-server-jammy"), + SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ - Name: ptr.To(fmt.Sprintf("%s-osdisk", vmName)), - CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage), + Name: new(fmt.Sprintf("%s-osdisk", vmName)), + CreateOption: new(armcompute.DiskCreateOptionTypesFromImage), ManagedDisk: &armcompute.ManagedDiskParameters{ - StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS), + StorageAccountType: new(armcompute.StorageAccountTypesStandardLRS), }, - DeleteOption: ptr.To(armcompute.DiskDeleteOptionTypesDelete), + DeleteOption: new(armcompute.DiskDeleteOptionTypesDelete), }, }, OSProfile: &armcompute.OSProfile{ - ComputerName: ptr.To(vmName), - AdminUsername: ptr.To("azureuser"), + ComputerName: new(vmName), + AdminUsername: new("azureuser"), // Use password authentication for integration tests (simpler than SSH keys) - AdminPassword: ptr.To("OvmIntegTest2024!"), + AdminPassword: new("OvmIntegTest2024!"), LinuxConfiguration: &armcompute.LinuxConfiguration{ - DisablePasswordAuthentication: ptr.To(false), + DisablePasswordAuthentication: new(false), }, }, NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: ptr.To(nicID), + ID: new(nicID), Properties: &armcompute.NetworkInterfaceReferenceProperties{ - Primary: ptr.To(true), + Primary: new(true), }, }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-run-command"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-run-command"), }, }, nil) if err != nil { @@ -563,18 +562,18 @@ func createVirtualMachineRunCommand(ctx context.Context, client *armcompute.Virt // Create the run command with a simple shell script // Reference: https://learn.microsoft.com/en-us/rest/api/compute/virtual-machine-run-commands/create-or-update?view=rest-compute-2025-04-01&tabs=HTTP poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, runCommandName, armcompute.VirtualMachineRunCommand{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.VirtualMachineRunCommandProperties{ Source: &armcompute.VirtualMachineRunCommandScriptSource{ - Script: ptr.To("#!/bin/bash\necho 'Hello from Overmind integration test'\n"), + Script: new("#!/bin/bash\necho 'Hello from Overmind integration test'\n"), }, - AsyncExecution: ptr.To(false), - RunAsUser: ptr.To("azureuser"), - TimeoutInSeconds: ptr.To[int32](3600), + AsyncExecution: new(false), + RunAsUser: new("azureuser"), + TimeoutInSeconds: new(int32(3600)), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-run-command"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-run-command"), }, }, nil) if err != nil { @@ -663,7 +662,7 @@ func deleteVirtualMachineRunCommand(ctx context.Context, client *armcompute.Virt func deleteVirtualMachineForRunCommand(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName string) error { // Use forceDeletion to speed up cleanup poller, err := client.BeginDelete(ctx, resourceGroupName, vmName, &armcompute.VirtualMachinesClientBeginDeleteOptions{ - ForceDeletion: ptr.To(true), + ForceDeletion: new(true), }) if err != nil { var respErr *azcore.ResponseError diff --git a/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go b/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go index f71515fc..3a79bec0 100644 --- a/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine-scale-set_test.go @@ -12,10 +12,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -342,22 +341,22 @@ func createVirtualNetworkForVMSS(ctx context.Context, client *armnetwork.Virtual // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.1.0.0/16")}, + AddressPrefixes: []*string{new("10.1.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestVMSSSubnetName), + Name: new(integrationTestVMSSSubnetName), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.1.0.0/24"), + AddressPrefix: new("10.1.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -408,53 +407,53 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua // Create the VMSS poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmssName, armcompute.VirtualMachineScaleSet{ - Location: ptr.To(location), + Location: new(location), SKU: &armcompute.SKU{ - Name: ptr.To("Standard_B1s"), // Burstable B-series VM - cheaper and more widely available - Tier: ptr.To("Standard"), - Capacity: ptr.To[int64](1), // Start with 1 instance for testing + Name: new("Standard_B1s"), // Burstable B-series VM - cheaper and more widely available + Tier: new("Standard"), + Capacity: new(int64(1)), // Start with 1 instance for testing }, Properties: &armcompute.VirtualMachineScaleSetProperties{ UpgradePolicy: &armcompute.UpgradePolicy{ - Mode: ptr.To(armcompute.UpgradeModeManual), + Mode: new(armcompute.UpgradeModeManual), }, VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ OSProfile: &armcompute.VirtualMachineScaleSetOSProfile{ - ComputerNamePrefix: ptr.To(vmssName), - AdminUsername: ptr.To("azureuser"), - AdminPassword: ptr.To("OvmIntegTest2024!"), + ComputerNamePrefix: new(vmssName), + AdminUsername: new("azureuser"), + AdminPassword: new("OvmIntegTest2024!"), LinuxConfiguration: &armcompute.LinuxConfiguration{ - DisablePasswordAuthentication: ptr.To(false), + DisablePasswordAuthentication: new(false), }, }, StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ ImageReference: &armcompute.ImageReference{ - Publisher: ptr.To("Canonical"), - Offer: ptr.To("0001-com-ubuntu-server-jammy"), - SKU: ptr.To("22_04-lts"), // x64 image for B-series VM - Version: ptr.To("latest"), + Publisher: new("Canonical"), + Offer: new("0001-com-ubuntu-server-jammy"), + SKU: new("22_04-lts"), // x64 image for B-series VM + Version: new("latest"), }, OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ - CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage), + CreateOption: new(armcompute.DiskCreateOptionTypesFromImage), ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ - StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS), + StorageAccountType: new(armcompute.StorageAccountTypesStandardLRS), }, }, }, NetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{ NetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{ { - Name: ptr.To("vmss-nic-config"), + Name: new("vmss-nic-config"), Properties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{ - Primary: ptr.To(true), + Primary: new(true), IPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{ Subnet: &armcompute.APIEntityReference{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - Primary: ptr.To(true), + Primary: new(true), }, }, }, @@ -465,8 +464,8 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-scale-set"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-scale-set"), }, }, nil) if err != nil { @@ -485,53 +484,53 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua // Retry creation retryPoller, retryErr := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmssName, armcompute.VirtualMachineScaleSet{ - Location: ptr.To(location), + Location: new(location), SKU: &armcompute.SKU{ - Name: ptr.To("Standard_B1s"), - Tier: ptr.To("Standard"), - Capacity: ptr.To[int64](1), + Name: new("Standard_B1s"), + Tier: new("Standard"), + Capacity: new(int64(1)), }, Properties: &armcompute.VirtualMachineScaleSetProperties{ UpgradePolicy: &armcompute.UpgradePolicy{ - Mode: ptr.To(armcompute.UpgradeModeManual), + Mode: new(armcompute.UpgradeModeManual), }, VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ OSProfile: &armcompute.VirtualMachineScaleSetOSProfile{ - ComputerNamePrefix: ptr.To(vmssName), - AdminUsername: ptr.To("azureuser"), - AdminPassword: ptr.To("OvmIntegTest2024!"), + ComputerNamePrefix: new(vmssName), + AdminUsername: new("azureuser"), + AdminPassword: new("OvmIntegTest2024!"), LinuxConfiguration: &armcompute.LinuxConfiguration{ - DisablePasswordAuthentication: ptr.To(false), + DisablePasswordAuthentication: new(false), }, }, StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ ImageReference: &armcompute.ImageReference{ - Publisher: ptr.To("Canonical"), - Offer: ptr.To("0001-com-ubuntu-server-jammy"), - SKU: ptr.To("22_04-lts"), - Version: ptr.To("latest"), + Publisher: new("Canonical"), + Offer: new("0001-com-ubuntu-server-jammy"), + SKU: new("22_04-lts"), + Version: new("latest"), }, OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ - CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage), + CreateOption: new(armcompute.DiskCreateOptionTypesFromImage), ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ - StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS), + StorageAccountType: new(armcompute.StorageAccountTypesStandardLRS), }, }, }, NetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{ NetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{ { - Name: ptr.To("vmss-nic-config"), + Name: new("vmss-nic-config"), Properties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{ - Primary: ptr.To(true), + Primary: new(true), IPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{ Subnet: &armcompute.APIEntityReference{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - Primary: ptr.To(true), + Primary: new(true), }, }, }, @@ -542,8 +541,8 @@ func createVirtualMachineScaleSet(ctx context.Context, client *armcompute.Virtua }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine-scale-set"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine-scale-set"), }, }, nil) if retryErr != nil { @@ -680,7 +679,7 @@ func waitForVMSSAvailable(ctx context.Context, client *armcompute.VirtualMachine func deleteVirtualMachineScaleSet(ctx context.Context, client *armcompute.VirtualMachineScaleSetsClient, resourceGroupName, vmssName string) error { // Use forceDeletion to speed up cleanup poller, err := client.BeginDelete(ctx, resourceGroupName, vmssName, &armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions{ - ForceDeletion: ptr.To(true), + ForceDeletion: new(true), }) if err != nil { var respErr *azcore.ResponseError diff --git a/sources/azure/integration-tests/compute-virtual-machine_test.go b/sources/azure/integration-tests/compute-virtual-machine_test.go index 2ef716e8..dfb76466 100644 --- a/sources/azure/integration-tests/compute-virtual-machine_test.go +++ b/sources/azure/integration-tests/compute-virtual-machine_test.go @@ -11,10 +11,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -307,22 +306,22 @@ func createVirtualNetwork(ctx context.Context, client *armnetwork.VirtualNetwork // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.0.0.0/16")}, + AddressPrefixes: []*string{new("10.0.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestSubnetName), + Name: new(integrationTestSubnetName), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.0.0.0/24"), + AddressPrefix: new("10.0.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -349,22 +348,22 @@ func createNetworkInterface(ctx context.Context, client *armnetwork.InterfacesCl // Create the NIC poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, armnetwork.Interface{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.InterfacePropertiesFormat{ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -401,51 +400,51 @@ func createVirtualMachine(ctx context.Context, client *armcompute.VirtualMachine // Create the VM poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vmName, armcompute.VirtualMachine{ - Location: ptr.To(location), + Location: new(location), Properties: &armcompute.VirtualMachineProperties{ HardwareProfile: &armcompute.HardwareProfile{ // Use Standard_D2ps_v5 - ARM-based VM with good availability in westus2 - VMSize: ptr.To(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), + VMSize: new(armcompute.VirtualMachineSizeTypes("Standard_D2ps_v5")), }, StorageProfile: &armcompute.StorageProfile{ ImageReference: &armcompute.ImageReference{ - Publisher: ptr.To("Canonical"), - Offer: ptr.To("0001-com-ubuntu-server-jammy"), - SKU: ptr.To("22_04-lts-arm64"), // ARM64 image for ARM-based VM - Version: ptr.To("latest"), + Publisher: new("Canonical"), + Offer: new("0001-com-ubuntu-server-jammy"), + SKU: new("22_04-lts-arm64"), // ARM64 image for ARM-based VM + Version: new("latest"), }, OSDisk: &armcompute.OSDisk{ - Name: ptr.To(fmt.Sprintf("%s-osdisk", vmName)), - CreateOption: ptr.To(armcompute.DiskCreateOptionTypesFromImage), + Name: new(fmt.Sprintf("%s-osdisk", vmName)), + CreateOption: new(armcompute.DiskCreateOptionTypesFromImage), ManagedDisk: &armcompute.ManagedDiskParameters{ - StorageAccountType: ptr.To(armcompute.StorageAccountTypesStandardLRS), + StorageAccountType: new(armcompute.StorageAccountTypesStandardLRS), }, - DeleteOption: ptr.To(armcompute.DiskDeleteOptionTypesDelete), + DeleteOption: new(armcompute.DiskDeleteOptionTypesDelete), }, }, OSProfile: &armcompute.OSProfile{ - ComputerName: ptr.To(vmName), - AdminUsername: ptr.To("azureuser"), + ComputerName: new(vmName), + AdminUsername: new("azureuser"), // Use password authentication for integration tests (simpler than SSH keys) - AdminPassword: ptr.To("OvmIntegTest2024!"), + AdminPassword: new("OvmIntegTest2024!"), LinuxConfiguration: &armcompute.LinuxConfiguration{ - DisablePasswordAuthentication: ptr.To(false), + DisablePasswordAuthentication: new(false), }, }, NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: ptr.To(nicID), + ID: new(nicID), Properties: &armcompute.NetworkInterfaceReferenceProperties{ - Primary: ptr.To(true), + Primary: new(true), }, }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("compute-virtual-machine"), + "purpose": new("overmind-integration-tests"), + "test": new("compute-virtual-machine"), }, }, nil) if err != nil { @@ -525,7 +524,7 @@ func waitForVMAvailable(ctx context.Context, client *armcompute.VirtualMachinesC func deleteVirtualMachine(ctx context.Context, client *armcompute.VirtualMachinesClient, resourceGroupName, vmName string) error { // Use forceDeletion to speed up cleanup poller, err := client.BeginDelete(ctx, resourceGroupName, vmName, &armcompute.VirtualMachinesClientBeginDeleteOptions{ - ForceDeletion: ptr.To(true), + ForceDeletion: new(true), }) if err != nil { var respErr *azcore.ResponseError diff --git a/sources/azure/integration-tests/dbforpostgresql-database_test.go b/sources/azure/integration-tests/dbforpostgresql-database_test.go index e2efc7ed..2b685d94 100644 --- a/sources/azure/integration-tests/dbforpostgresql-database_test.go +++ b/sources/azure/integration-tests/dbforpostgresql-database_test.go @@ -15,7 +15,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -309,23 +308,23 @@ func createPostgreSQLFlexibleServer(ctx context.Context, client *armpostgresqlfl // Create the PostgreSQL Flexible Server // Using Burstable tier for cost-effective testing poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, serverName, armpostgresqlflexibleservers.Server{ - Location: ptr.To(location), + Location: new(location), Properties: &armpostgresqlflexibleservers.ServerProperties{ - AdministratorLogin: ptr.To(adminLogin), - AdministratorLoginPassword: ptr.To(adminPassword), - Version: ptr.To(armpostgresqlflexibleservers.PostgresMajorVersion("14")), - Storage: &armpostgresqlflexibleservers.Storage{StorageSizeGB: ptr.To[int32](32)}, - Backup: &armpostgresqlflexibleservers.Backup{BackupRetentionDays: ptr.To[int32](7), GeoRedundantBackup: ptr.To(armpostgresqlflexibleservers.GeographicallyRedundantBackupDisabled)}, - Network: &armpostgresqlflexibleservers.Network{PublicNetworkAccess: ptr.To(armpostgresqlflexibleservers.ServerPublicNetworkAccessStateEnabled)}, + AdministratorLogin: new(adminLogin), + AdministratorLoginPassword: new(adminPassword), + Version: new(armpostgresqlflexibleservers.PostgresMajorVersion("14")), + Storage: &armpostgresqlflexibleservers.Storage{StorageSizeGB: new(int32(32))}, + Backup: &armpostgresqlflexibleservers.Backup{BackupRetentionDays: new(int32(7)), GeoRedundantBackup: new(armpostgresqlflexibleservers.GeographicallyRedundantBackupDisabled)}, + Network: &armpostgresqlflexibleservers.Network{PublicNetworkAccess: new(armpostgresqlflexibleservers.ServerPublicNetworkAccessStateEnabled)}, HighAvailability: nil, // High availability disabled by not setting it }, SKU: &armpostgresqlflexibleservers.SKU{ - Name: ptr.To("Standard_B1ms"), // Burstable tier, 1 vCore, 2GB RAM - Tier: ptr.To(armpostgresqlflexibleservers.SKUTierBurstable), + Name: new("Standard_B1ms"), // Burstable tier, 1 vCore, 2GB RAM + Tier: new(armpostgresqlflexibleservers.SKUTierBurstable), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("dbforpostgresql-database"), + "purpose": new("overmind-integration-tests"), + "test": new("dbforpostgresql-database"), }, }, nil) if err != nil { @@ -407,8 +406,8 @@ func createPostgreSQLDatabase(ctx context.Context, client *armpostgresqlflexible // Create the PostgreSQL database poller, err := client.BeginCreate(ctx, resourceGroupName, serverName, databaseName, armpostgresqlflexibleservers.Database{ Properties: &armpostgresqlflexibleservers.DatabaseProperties{ - Charset: ptr.To("UTF8"), - Collation: ptr.To("en_US.utf8"), + Charset: new("UTF8"), + Collation: new("en_US.utf8"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/documentdb-database-accounts_test.go b/sources/azure/integration-tests/documentdb-database-accounts_test.go index 6d59dbb4..8b8eb3e3 100644 --- a/sources/azure/integration-tests/documentdb-database-accounts_test.go +++ b/sources/azure/integration-tests/documentdb-database-accounts_test.go @@ -10,10 +10,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -189,24 +188,24 @@ func createCosmosDBAccount(ctx context.Context, client *armcosmos.DatabaseAccoun // Create the Cosmos DB account // Using SQL API as the default, which is the most common poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ - Location: ptr.To(location), - Kind: ptr.To(armcosmos.DatabaseAccountKindGlobalDocumentDB), + Location: new(location), + Kind: new(armcosmos.DatabaseAccountKindGlobalDocumentDB), Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ - DatabaseAccountOfferType: ptr.To("Standard"), + DatabaseAccountOfferType: new("Standard"), Locations: []*armcosmos.Location{ { - LocationName: ptr.To(location), - FailoverPriority: ptr.To[int32](0), - IsZoneRedundant: ptr.To(false), + LocationName: new(location), + FailoverPriority: new(int32(0)), + IsZoneRedundant: new(false), }, }, ConsistencyPolicy: &armcosmos.ConsistencyPolicy{ - DefaultConsistencyLevel: ptr.To(armcosmos.DefaultConsistencyLevelSession), + DefaultConsistencyLevel: new(armcosmos.DefaultConsistencyLevelSession), }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("documentdb-database-accounts"), + "purpose": new("overmind-integration-tests"), + "test": new("documentdb-database-accounts"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/helpers_test.go b/sources/azure/integration-tests/helpers_test.go index 8c27e165..7690f315 100644 --- a/sources/azure/integration-tests/helpers_test.go +++ b/sources/azure/integration-tests/helpers_test.go @@ -6,7 +6,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" ) // Shared constants for integration tests @@ -26,10 +25,10 @@ func createResourceGroup(ctx context.Context, client *armresources.ResourceGroup // Create the resource group _, err = client.CreateOrUpdate(ctx, resourceGroupName, armresources.ResourceGroup{ - Location: ptr.To(location), + Location: new(location), Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "managed": ptr.To("true"), + "purpose": new("overmind-integration-tests"), + "managed": new("true"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/keyvault-vault_test.go b/sources/azure/integration-tests/keyvault-vault_test.go index 187756c2..614e9dfb 100644 --- a/sources/azure/integration-tests/keyvault-vault_test.go +++ b/sources/azure/integration-tests/keyvault-vault_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -212,20 +211,20 @@ func createKeyVault(ctx context.Context, client *armkeyvault.VaultsClient, resou // Key Vault names must be globally unique and 3-24 characters // They can only contain alphanumeric characters and hyphens params := armkeyvault.VaultCreateOrUpdateParameters{ - Location: ptr.To(location), + Location: new(location), Properties: &armkeyvault.VaultProperties{ - TenantID: ptr.To(tenantID), + TenantID: new(tenantID), SKU: &armkeyvault.SKU{ - Family: ptr.To(armkeyvault.SKUFamilyA), - Name: ptr.To(armkeyvault.SKUNameStandard), + Family: new(armkeyvault.SKUFamilyA), + Name: new(armkeyvault.SKUNameStandard), }, AccessPolicies: []*armkeyvault.AccessPolicyEntry{ // For integration tests, we create with minimal configuration. }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("keyvault-vault"), + "purpose": new("overmind-integration-tests"), + "test": new("keyvault-vault"), }, } diff --git a/sources/azure/integration-tests/managedidentity-user-assigned-identity_test.go b/sources/azure/integration-tests/managedidentity-user-assigned-identity_test.go index a622263a..6330e078 100644 --- a/sources/azure/integration-tests/managedidentity-user-assigned-identity_test.go +++ b/sources/azure/integration-tests/managedidentity-user-assigned-identity_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -255,10 +254,10 @@ func createUserAssignedIdentity(ctx context.Context, client *armmsi.UserAssigned // Create the User Assigned Identity resp, err := client.CreateOrUpdate(ctx, resourceGroupName, identityName, armmsi.Identity{ - Location: ptr.To(location), + Location: new(location), Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("managedidentity-user-assigned-identity"), + "purpose": new("overmind-integration-tests"), + "test": new("managedidentity-user-assigned-identity"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-application-gateway_test.go b/sources/azure/integration-tests/network-application-gateway_test.go index 7525eee2..bbcbbefa 100644 --- a/sources/azure/integration-tests/network-application-gateway_test.go +++ b/sources/azure/integration-tests/network-application-gateway_test.go @@ -10,10 +10,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -374,14 +373,14 @@ func createVirtualNetworkForAG(ctx context.Context, client *armnetwork.VirtualNe // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.3.0.0/16")}, + AddressPrefixes: []*string{new("10.3.0.0/16")}, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -410,7 +409,7 @@ func createAGSubnet(ctx context.Context, client *armnetwork.SubnetsClient, resou // Create the subnet with /24 address space for Application Gateway poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, subnetName, armnetwork.Subnet{ Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.3.0.0/24"), + AddressPrefix: new("10.3.0.0/24"), }, }, nil) if err != nil { @@ -463,16 +462,16 @@ func createPublicIPForAG(ctx context.Context, client *armnetwork.PublicIPAddress // Create the public IP address with Standard SKU (required for Application Gateway v2) poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, publicIPName, armnetwork.PublicIPAddress{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodStatic), - PublicIPAddressVersion: ptr.To(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), }, SKU: &armnetwork.PublicIPAddressSKU{ - Name: ptr.To(armnetwork.PublicIPAddressSKUNameStandard), + Name: new(armnetwork.PublicIPAddressSKUNameStandard), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-load-balancer_test.go b/sources/azure/integration-tests/network-load-balancer_test.go index b6cd87ba..3746285b 100644 --- a/sources/azure/integration-tests/network-load-balancer_test.go +++ b/sources/azure/integration-tests/network-load-balancer_test.go @@ -9,10 +9,9 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -369,22 +368,22 @@ func createVirtualNetworkForLB(ctx context.Context, client *armnetwork.VirtualNe // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.2.0.0/16")}, + AddressPrefixes: []*string{new("10.2.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestSubnetNameForLB), + Name: new(integrationTestSubnetNameForLB), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.2.0.0/24"), + AddressPrefix: new("10.2.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -432,16 +431,16 @@ func createPublicIPForLB(ctx context.Context, client *armnetwork.PublicIPAddress // Create the public IP address poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, publicIPName, armnetwork.PublicIPAddress{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodStatic), - PublicIPAddressVersion: ptr.To(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), }, SKU: &armnetwork.PublicIPAddressSKU{ - Name: ptr.To(armnetwork.PublicIPAddressSKUNameStandard), + Name: new(armnetwork.PublicIPAddressSKUNameStandard), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -489,47 +488,47 @@ func createPublicLoadBalancer(ctx context.Context, client *armnetwork.LoadBalanc // Create the public load balancer poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, lbName, armnetwork.LoadBalancer{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.LoadBalancerPropertiesFormat{ FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ { - Name: ptr.To("frontend-ip-config-public"), + Name: new("frontend-ip-config-public"), Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &armnetwork.PublicIPAddress{ - ID: ptr.To(publicIPID), + ID: new(publicIPID), }, }, }, }, BackendAddressPools: []*armnetwork.BackendAddressPool{ { - Name: ptr.To("backend-pool"), + Name: new("backend-pool"), }, }, LoadBalancingRules: []*armnetwork.LoadBalancingRule{ { - Name: ptr.To("lb-rule"), + Name: new("lb-rule"), Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ FrontendIPConfiguration: &armnetwork.SubResource{ - ID: ptr.To(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/frontend-ip-config-public", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/frontend-ip-config-public", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), }, BackendAddressPool: &armnetwork.SubResource{ - ID: ptr.To(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/backend-pool", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/backend-pool", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), }, - Protocol: ptr.To(armnetwork.TransportProtocolTCP), - FrontendPort: ptr.To[int32](80), - BackendPort: ptr.To[int32](80), - EnableFloatingIP: ptr.To(false), - IdleTimeoutInMinutes: ptr.To[int32](4), + Protocol: new(armnetwork.TransportProtocolTCP), + FrontendPort: new(int32(80)), + BackendPort: new(int32(80)), + EnableFloatingIP: new(false), + IdleTimeoutInMinutes: new(int32(4)), }, }, }, }, SKU: &armnetwork.LoadBalancerSKU{ - Name: ptr.To(armnetwork.LoadBalancerSKUNameStandard), + Name: new(armnetwork.LoadBalancerSKUNameStandard), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -556,49 +555,49 @@ func createInternalLoadBalancer(ctx context.Context, client *armnetwork.LoadBala // Create the internal load balancer poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, lbName, armnetwork.LoadBalancer{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.LoadBalancerPropertiesFormat{ FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ { - Name: ptr.To("frontend-ip-config-internal"), + Name: new("frontend-ip-config-internal"), Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, - PrivateIPAddress: ptr.To("10.2.0.5"), - PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodStatic), + PrivateIPAddress: new("10.2.0.5"), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), }, }, }, BackendAddressPools: []*armnetwork.BackendAddressPool{ { - Name: ptr.To("backend-pool"), + Name: new("backend-pool"), }, }, LoadBalancingRules: []*armnetwork.LoadBalancingRule{ { - Name: ptr.To("lb-rule"), + Name: new("lb-rule"), Properties: &armnetwork.LoadBalancingRulePropertiesFormat{ FrontendIPConfiguration: &armnetwork.SubResource{ - ID: ptr.To(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/frontend-ip-config-internal", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/frontendIPConfigurations/frontend-ip-config-internal", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), }, BackendAddressPool: &armnetwork.SubResource{ - ID: ptr.To(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/backend-pool", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/loadBalancers/%s/backendAddressPools/backend-pool", os.Getenv("AZURE_SUBSCRIPTION_ID"), resourceGroupName, lbName)), }, - Protocol: ptr.To(armnetwork.TransportProtocolTCP), - FrontendPort: ptr.To[int32](80), - BackendPort: ptr.To[int32](80), - EnableFloatingIP: ptr.To(false), - IdleTimeoutInMinutes: ptr.To[int32](4), + Protocol: new(armnetwork.TransportProtocolTCP), + FrontendPort: new(int32(80)), + BackendPort: new(int32(80)), + EnableFloatingIP: new(false), + IdleTimeoutInMinutes: new(int32(4)), }, }, }, }, SKU: &armnetwork.LoadBalancerSKU{ - Name: ptr.To(armnetwork.LoadBalancerSKUNameStandard), + Name: new(armnetwork.LoadBalancerSKUNameStandard), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-network-interface_test.go b/sources/azure/integration-tests/network-network-interface_test.go index a496409b..c0aaf9e0 100644 --- a/sources/azure/integration-tests/network-network-interface_test.go +++ b/sources/azure/integration-tests/network-network-interface_test.go @@ -9,10 +9,9 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -253,22 +252,22 @@ func createVirtualNetworkForNIC(ctx context.Context, client *armnetwork.VirtualN // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.1.0.0/16")}, + AddressPrefixes: []*string{new("10.1.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestSubnetNameForNIC), + Name: new(integrationTestSubnetNameForNIC), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.1.0.0/24"), + AddressPrefix: new("10.1.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-network-security-group_test.go b/sources/azure/integration-tests/network-network-security-group_test.go index 4618167b..75a1fcce 100644 --- a/sources/azure/integration-tests/network-network-security-group_test.go +++ b/sources/azure/integration-tests/network-network-security-group_test.go @@ -10,10 +10,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -318,27 +317,27 @@ func createNetworkSecurityGroup(ctx context.Context, client *armnetwork.Security // Create a basic network security group with a sample security rule // This creates an NSG with a default allow rule for testing poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nsgName, armnetwork.SecurityGroup{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.SecurityGroupPropertiesFormat{ SecurityRules: []*armnetwork.SecurityRule{ { - Name: ptr.To("AllowSSH"), + Name: new("AllowSSH"), Properties: &armnetwork.SecurityRulePropertiesFormat{ - Protocol: ptr.To(armnetwork.SecurityRuleProtocolTCP), - SourcePortRange: ptr.To("*"), - DestinationPortRange: ptr.To("22"), - SourceAddressPrefix: ptr.To("*"), - DestinationAddressPrefix: ptr.To("*"), - Access: ptr.To(armnetwork.SecurityRuleAccessAllow), - Priority: ptr.To[int32](1000), - Direction: ptr.To(armnetwork.SecurityRuleDirectionInbound), + Protocol: new(armnetwork.SecurityRuleProtocolTCP), + SourcePortRange: new("*"), + DestinationPortRange: new("22"), + SourceAddressPrefix: new("*"), + DestinationAddressPrefix: new("*"), + Access: new(armnetwork.SecurityRuleAccessAllow), + Priority: new(int32(1000)), + Direction: new(armnetwork.SecurityRuleDirectionInbound), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("network-network-security-group"), + "purpose": new("overmind-integration-tests"), + "test": new("network-network-security-group"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-public-ip-address_test.go b/sources/azure/integration-tests/network-public-ip-address_test.go index 9fea6d49..9a5f29c4 100644 --- a/sources/azure/integration-tests/network-public-ip-address_test.go +++ b/sources/azure/integration-tests/network-public-ip-address_test.go @@ -10,10 +10,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -296,22 +295,22 @@ func createVirtualNetworkForPIP(ctx context.Context, client *armnetwork.VirtualN // Create the VNet poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, vnetName, armnetwork.VirtualNetwork{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ptr.To("10.2.0.0/16")}, + AddressPrefixes: []*string{new("10.2.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: ptr.To(integrationTestSubnetNameForPIP), + Name: new(integrationTestSubnetNameForPIP), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: ptr.To("10.2.0.0/24"), + AddressPrefix: new("10.2.0.0/24"), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), + "purpose": new("overmind-integration-tests"), }, }, nil) if err != nil { @@ -359,17 +358,17 @@ func createPublicIPAddress(ctx context.Context, client *armnetwork.PublicIPAddre // Create the public IP address poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, publicIPName, armnetwork.PublicIPAddress{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAddressVersion: ptr.To(armnetwork.IPVersionIPv4), - PublicIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), }, SKU: &armnetwork.PublicIPAddressSKU{ - Name: ptr.To(armnetwork.PublicIPAddressSKUNameStandard), + Name: new(armnetwork.PublicIPAddressSKUNameStandard), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("network-public-ip-address"), + "purpose": new("overmind-integration-tests"), + "test": new("network-public-ip-address"), }, }, nil) if err != nil { @@ -463,26 +462,26 @@ func createNetworkInterfaceWithPublicIP(ctx context.Context, client *armnetwork. // Create the NIC poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, nicName, armnetwork.Interface{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.InterfacePropertiesFormat{ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: ptr.To("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: ptr.To(subnetID), + ID: new(subnetID), }, PublicIPAddress: &armnetwork.PublicIPAddress{ - ID: ptr.To(publicIPID), + ID: new(publicIPID), }, - PrivateIPAllocationMethod: ptr.To(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("network-public-ip-address"), + "purpose": new("overmind-integration-tests"), + "test": new("network-public-ip-address"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-route-table_test.go b/sources/azure/integration-tests/network-route-table_test.go index 1de639d7..e788388b 100644 --- a/sources/azure/integration-tests/network-route-table_test.go +++ b/sources/azure/integration-tests/network-route-table_test.go @@ -10,10 +10,9 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -328,13 +327,13 @@ func createRouteTable(ctx context.Context, client *armnetwork.RouteTablesClient, // Create a basic route table poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, routeTableName, armnetwork.RouteTable{ - Location: ptr.To(location), + Location: new(location), Properties: &armnetwork.RouteTablePropertiesFormat{ // Routes will be added separately as child resources }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("network-route-table"), + "purpose": new("overmind-integration-tests"), + "test": new("network-route-table"), }, }, nil) if err != nil { @@ -433,9 +432,9 @@ func createRoute(ctx context.Context, client *armnetwork.RoutesClient, resourceG // This creates a route that will link to a NetworkIP poller, err := client.BeginCreateOrUpdate(ctx, resourceGroupName, routeTableName, routeName, armnetwork.Route{ Properties: &armnetwork.RoutePropertiesFormat{ - AddressPrefix: ptr.To("10.0.0.0/8"), - NextHopType: ptr.To(armnetwork.RouteNextHopTypeVirtualAppliance), - NextHopIPAddress: ptr.To("10.0.0.1"), // This will create a link to stdlib.NetworkIP + AddressPrefix: new("10.0.0.0/8"), + NextHopType: new(armnetwork.RouteNextHopTypeVirtualAppliance), + NextHopIPAddress: new("10.0.0.1"), // This will create a link to stdlib.NetworkIP }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/network-virtual-network_test.go b/sources/azure/integration-tests/network-virtual-network_test.go index 9a831b64..7a155c81 100644 --- a/sources/azure/integration-tests/network-virtual-network_test.go +++ b/sources/azure/integration-tests/network-virtual-network_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" diff --git a/sources/azure/integration-tests/network-zone_test.go b/sources/azure/integration-tests/network-zone_test.go index cd2e363d..4a69d93a 100644 --- a/sources/azure/integration-tests/network-zone_test.go +++ b/sources/azure/integration-tests/network-zone_test.go @@ -13,7 +13,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -344,13 +343,13 @@ func createDNSZone(ctx context.Context, client *armdns.ZonesClient, resourceGrou // Create the DNS zone resp, err := client.CreateOrUpdate(ctx, resourceGroupName, zoneName, armdns.Zone{ - Location: ptr.To(location), + Location: new(location), Properties: &armdns.ZoneProperties{ - ZoneType: ptr.To(armdns.ZoneTypePublic), + ZoneType: new(armdns.ZoneTypePublic), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "managed": ptr.To("true"), + "purpose": new("overmind-integration-tests"), + "managed": new("true"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/sql-database_test.go b/sources/azure/integration-tests/sql-database_test.go index be46d5a0..3b7b72ee 100644 --- a/sources/azure/integration-tests/sql-database_test.go +++ b/sources/azure/integration-tests/sql-database_test.go @@ -15,7 +15,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -299,7 +298,7 @@ func createSQLServer(ctx context.Context, client *armsql.ServersClient, resource } var respErr *azcore.ResponseError - if err != nil && !errors.As(err, &respErr) { + if !errors.As(err, &respErr) { // Some other error occurred return fmt.Errorf("failed to check if SQL server exists: %w", err) } @@ -321,15 +320,15 @@ func createSQLServer(ctx context.Context, client *armsql.ServersClient, resource } poller, err := client.BeginCreateOrUpdate(ctx, resourceGroup, serverName, armsql.Server{ - Location: ptr.To(location), + Location: new(location), Properties: &armsql.ServerProperties{ - AdministratorLogin: ptr.To(adminLogin), - AdministratorLoginPassword: ptr.To(adminPassword), - Version: ptr.To("12.0"), + AdministratorLogin: new(adminLogin), + AdministratorLoginPassword: new(adminPassword), + Version: new("12.0"), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "managed": ptr.To("true"), + "purpose": new("overmind-integration-tests"), + "managed": new("true"), }, }, nil) if err != nil { @@ -372,7 +371,7 @@ func createSQLDatabase(ctx context.Context, client *armsql.DatabasesClient, reso } var respErr *azcore.ResponseError - if err != nil && !errors.As(err, &respErr) { + if !errors.As(err, &respErr) { // Some other error occurred return fmt.Errorf("failed to check if SQL database exists: %w", err) } @@ -386,13 +385,13 @@ func createSQLDatabase(ctx context.Context, client *armsql.DatabasesClient, reso // Create the SQL database // Using Basic tier for integration tests (cheaper) poller, err := client.BeginCreateOrUpdate(ctx, resourceGroup, serverName, databaseName, armsql.Database{ - Location: ptr.To(location), + Location: new(location), Properties: &armsql.DatabaseProperties{ - RequestedServiceObjectiveName: ptr.To("Basic"), + RequestedServiceObjectiveName: new("Basic"), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "managed": ptr.To("true"), + "purpose": new("overmind-integration-tests"), + "managed": new("true"), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/storage-blob-container_test.go b/sources/azure/integration-tests/storage-blob-container_test.go index 0ff9b17d..ffda244e 100644 --- a/sources/azure/integration-tests/storage-blob-container_test.go +++ b/sources/azure/integration-tests/storage-blob-container_test.go @@ -15,7 +15,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -275,17 +274,17 @@ func createStorageAccount(ctx context.Context, client *armstorage.AccountsClient // Create the storage account poller, err := client.BeginCreate(ctx, resourceGroupName, accountName, armstorage.AccountCreateParameters{ - Location: ptr.To(location), - Kind: ptr.To(armstorage.KindStorageV2), + Location: new(location), + Kind: new(armstorage.KindStorageV2), SKU: &armstorage.SKU{ - Name: ptr.To(armstorage.SKUNameStandardLRS), + Name: new(armstorage.SKUNameStandardLRS), }, Properties: &armstorage.AccountPropertiesCreateParameters{ - AccessTier: ptr.To(armstorage.AccessTierHot), + AccessTier: new(armstorage.AccessTierHot), }, Tags: map[string]*string{ - "purpose": ptr.To("overmind-integration-tests"), - "test": ptr.To("storage-blob-container"), + "purpose": new("overmind-integration-tests"), + "test": new("storage-blob-container"), }, }, nil) if err != nil { @@ -372,7 +371,7 @@ func createBlobContainer(ctx context.Context, client *armstorage.BlobContainersC // Create the blob container resp, err := client.Create(ctx, resourceGroupName, accountName, containerName, armstorage.BlobContainer{ ContainerProperties: &armstorage.ContainerProperties{ - PublicAccess: ptr.To(armstorage.PublicAccessNone), + PublicAccess: new(armstorage.PublicAccessNone), }, }, nil) if err != nil { diff --git a/sources/azure/integration-tests/storage-fileshare_test.go b/sources/azure/integration-tests/storage-fileshare_test.go index d9a5a992..092e41d3 100644 --- a/sources/azure/integration-tests/storage-fileshare_test.go +++ b/sources/azure/integration-tests/storage-fileshare_test.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -249,7 +248,7 @@ func createFileShare(ctx context.Context, client *armstorage.FileSharesClient, r // File shares require a quota (size in GB) resp, err := client.Create(ctx, resourceGroupName, accountName, shareName, armstorage.FileShare{ FileShareProperties: &armstorage.FileShareProperties{ - ShareQuota: ptr.To(int32(1)), // 1GB minimum quota + ShareQuota: new(int32(1)), // 1GB minimum quota }, }, nil) if err != nil { diff --git a/sources/azure/manual/README.md b/sources/azure/manual/README.md index ee64ddcc..6834ec72 100644 --- a/sources/azure/manual/README.md +++ b/sources/azure/manual/README.md @@ -32,6 +32,10 @@ This directory contains manually implemented Azure adapters that cannot be gener - Network Security Groups: Referenced through network interfaces - Requires manual parsing and conditional linking based on the resource ID format and provider namespace +**Network Private DNS Zone** (`network-private-dns-zone.go`): +- Discovers Azure Private DNS Zones via `armprivatedns`; uses `MultiResourceGroupBase` and list-by-resource-group pager +- Links zone name to stdlib DNS for resolution; health from provisioning state + ## Implementation Guidelines ### For Detailed Implementation Rules @@ -54,7 +58,7 @@ Refer to the [cursor rules](.cursor/rules/azure-manual-adapter-creation.mdc) for 3. **Handle Complex Resource Linking**: - Parse Azure resource IDs to extract resource names and types - Extract resource identifiers from Azure resource manager format - - Create appropriate linked item queries with correct blast propagation + - Create appropriate linked item queries 4. **Include Comprehensive Tests**: - Unit tests for all methods @@ -85,7 +89,7 @@ When reviewing PRs for manual adapters, ensure: ### ✅ Linked Item Queries - [ ] Example values in tests match actual Azure resource formats - [ ] Scopes for linked item queries are correct (verify with linked resource documentation) -- [ ] Blast propagation rules are appropriate for resource relationships +- [ ] Linked item queries are appropriately defined - [ ] All possible resource references are handled (no missing cases) ### ✅ Documentation and References diff --git a/sources/azure/manual/adapters.go b/sources/azure/manual/adapters.go index ba2172fa..aaeba7cb 100644 --- a/sources/azure/manual/adapters.go +++ b/sources/azure/manual/adapters.go @@ -6,13 +6,14 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v3" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" @@ -106,11 +107,31 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create tables client: %w", err) } + encryptionScopesClient, err := armstorage.NewEncryptionScopesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create encryption scopes client: %w", err) + } + + privateEndpointConnectionsClient, err := armstorage.NewPrivateEndpointConnectionsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create private endpoint connections client: %w", err) + } + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create virtual networks client: %w", err) } + subnetsClient, err := armnetwork.NewSubnetsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create subnets client: %w", err) + } + + virtualNetworkPeeringsClient, err := armnetwork.NewVirtualNetworkPeeringsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create virtual network peerings client: %w", err) + } + networkInterfacesClient, err := armnetwork.NewInterfacesClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create network interfaces client: %w", err) @@ -126,6 +147,11 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create document db database accounts client: %w", err) } + documentDBPrivateEndpointConnectionsClient, err := armcosmos.NewPrivateEndpointConnectionsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create document db private endpoint connections client: %w", err) + } + keyVaultsClient, err := armkeyvault.NewVaultsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create key vaults client: %w", err) @@ -141,16 +167,41 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create public ip addresses client: %w", err) } + publicIPPrefixesClient, err := armnetwork.NewPublicIPPrefixesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create public ip prefixes client: %w", err) + } + + ddosProtectionPlansClient, err := armnetwork.NewDdosProtectionPlansClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create DDoS protection plans client: %w", err) + } + loadBalancersClient, err := armnetwork.NewLoadBalancersClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create load balancers client: %w", err) } + privateEndpointsClient, err := armnetwork.NewPrivateEndpointsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create private endpoints client: %w", err) + } + batchAccountsClient, err := armbatch.NewAccountClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create batch accounts client: %w", err) } + batchApplicationClient, err := armbatch.NewApplicationClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create batch application client: %w", err) + } + + batchPoolClient, err := armbatch.NewPoolClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create batch pool client: %w", err) + } + virtualMachineScaleSetsClient, err := armcompute.NewVirtualMachineScaleSetsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create virtual machine scale sets client: %w", err) @@ -175,31 +226,96 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create route tables client: %w", err) } + routesClient, err := armnetwork.NewRoutesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create routes client: %w", err) + } + + securityRulesClient, err := armnetwork.NewSecurityRulesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create security rules client: %w", err) + } + applicationGatewaysClient, err := armnetwork.NewApplicationGatewaysClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create application gateways client: %w", err) } + applicationSecurityGroupsClient, err := armnetwork.NewApplicationSecurityGroupsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create application security groups client: %w", err) + } + + virtualNetworkGatewaysClient, err := armnetwork.NewVirtualNetworkGatewaysClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create virtual network gateways client: %w", err) + } + + natGatewaysClient, err := armnetwork.NewNatGatewaysClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create nat gateways client: %w", err) + } + managedHSMsClient, err := armkeyvault.NewManagedHsmsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create managed hsms client: %w", err) } + mhsmPrivateEndpointConnectionsClient, err := armkeyvault.NewMHSMPrivateEndpointConnectionsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create MHSM private endpoint connections client: %w", err) + } + sqlServersClient, err := armsql.NewServersClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create sql servers client: %w", err) } + sqlFirewallRulesClient, err := armsql.NewFirewallRulesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create sql firewall rules client: %w", err) + } + + sqlVirtualNetworkRulesClient, err := armsql.NewVirtualNetworkRulesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create sql virtual network rules client: %w", err) + } + + sqlElasticPoolsClient, err := armsql.NewElasticPoolsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create sql elastic pools client: %w", err) + } + + sqlPrivateEndpointConnectionsClient, err := armsql.NewPrivateEndpointConnectionsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create sql private endpoint connections client: %w", err) + } + postgresqlFlexibleServersClient, err := armpostgresqlflexibleservers.NewServersClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create postgresql flexible servers client: %w", err) } + postgresqlFirewallRulesClient, err := armpostgresqlflexibleservers.NewFirewallRulesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create postgresql firewall rules client: %w", err) + } + + postgresqlPrivateEndpointConnectionsClient, err := armpostgresqlflexibleservers.NewPrivateEndpointConnectionsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create postgresql flexible server private endpoint connections client: %w", err) + } + secretsClient, err := armkeyvault.NewSecretsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create secrets client: %w", err) } + keysClient, err := armkeyvault.NewKeysClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create keys client: %w", err) + } + userAssignedIdentitiesClient, err := armmsi.NewUserAssignedIdentitiesClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create user assigned identities client: %w", err) @@ -238,6 +354,14 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred if err != nil { return nil, fmt.Errorf("failed to create zones client: %w", err) } + recordSetsClient, err := armdns.NewRecordSetsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create record sets client: %w", err) + } + privateDNSZonesClient, err := armprivatedns.NewPrivateZonesClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create private DNS zones client: %w", err) + } diskAccessesClient, err := armcompute.NewDiskAccessesClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create disk accesses client: %w", err) @@ -248,16 +372,31 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred return nil, fmt.Errorf("failed to create dedicated host groups client: %w", err) } + dedicatedHostsClient, err := armcompute.NewDedicatedHostsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create dedicated hosts client: %w", err) + } + capacityReservationGroupsClient, err := armcompute.NewCapacityReservationGroupsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create capacity reservation groups client: %w", err) } + capacityReservationsClient, err := armcompute.NewCapacityReservationsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create capacity reservations client: %w", err) + } + galleryApplicationVersionsClient, err := armcompute.NewGalleryApplicationVersionsClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create gallery application versions client: %w", err) } + galleryApplicationsClient, err := armcompute.NewGalleryApplicationsClient(subscriptionID, cred, nil) + if err != nil { + return nil, fmt.Errorf("failed to create gallery applications client: %w", err) + } + galleryImagesClient, err := armcompute.NewGalleryImagesClient(subscriptionID, cred, nil) if err != nil { return nil, fmt.Errorf("failed to create gallery images client: %w", err) @@ -305,10 +444,26 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewTablesClient(tablesClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewStorageEncryptionScope( + clients.NewEncryptionScopesClient(encryptionScopesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewStoragePrivateEndpointConnection( + clients.NewStoragePrivateEndpointConnectionsClient(privateEndpointConnectionsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkVirtualNetwork( clients.NewVirtualNetworksClient(virtualNetworksClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkSubnet( + clients.NewSubnetsClient(subnetsClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewNetworkVirtualNetworkPeering( + clients.NewVirtualNetworkPeeringsClient(virtualNetworkPeeringsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkNetworkInterface( clients.NewNetworkInterfacesClient(networkInterfacesClient), resourceGroupScopes, @@ -317,10 +472,30 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewSqlDatabasesClient(sqlDatabasesClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewSqlElasticPool( + clients.NewSqlElasticPoolClient(sqlElasticPoolsClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewSqlServerFirewallRule( + clients.NewSqlServerFirewallRuleClient(sqlFirewallRulesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewSqlServerVirtualNetworkRule( + clients.NewSqlServerVirtualNetworkRuleClient(sqlVirtualNetworkRulesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewSQLServerPrivateEndpointConnection( + clients.NewSQLServerPrivateEndpointConnectionsClient(sqlPrivateEndpointConnectionsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewDocumentDBDatabaseAccounts( clients.NewDocumentDBDatabaseAccountsClient(documentDBDatabaseAccountsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewDocumentDBPrivateEndpointConnection( + clients.NewDocumentDBPrivateEndpointConnectionsClient(documentDBPrivateEndpointConnectionsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewKeyVaultVault( clients.NewVaultsClient(keyVaultsClient), resourceGroupScopes, @@ -329,6 +504,10 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewManagedHSMsClient(managedHSMsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewKeyVaultManagedHSMPrivateEndpointConnection( + clients.NewKeyVaultManagedHSMPrivateEndpointConnectionsClient(mhsmPrivateEndpointConnectionsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewDBforPostgreSQLDatabase( clients.NewPostgreSQLDatabasesClient(postgreSQLDatabasesClient), resourceGroupScopes, @@ -337,18 +516,46 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewPublicIPAddressesClient(publicIPAddressesClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkPublicIPPrefix( + clients.NewPublicIPPrefixesClient(publicIPPrefixesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewNetworkDdosProtectionPlan( + clients.NewDdosProtectionPlansClient(ddosProtectionPlansClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkLoadBalancer( clients.NewLoadBalancersClient(loadBalancersClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkPrivateEndpoint( + clients.NewPrivateEndpointsClient(privateEndpointsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkZone( clients.NewZonesClient(zonesClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkPrivateDNSZone( + clients.NewPrivateDNSZonesClient(privateDNSZonesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewNetworkDNSRecordSet( + clients.NewRecordSetsClient(recordSetsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewBatchAccount( clients.NewBatchAccountsClient(batchAccountsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewBatchBatchApplication( + clients.NewBatchApplicationsClient(batchApplicationClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewBatchBatchPool( + clients.NewBatchPoolsClient(batchPoolClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewComputeVirtualMachineScaleSet( clients.NewVirtualMachineScaleSetsClient(virtualMachineScaleSetsClient), resourceGroupScopes, @@ -365,14 +572,34 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewNetworkSecurityGroupsClient(networkSecurityGroupsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkApplicationSecurityGroup( + clients.NewApplicationSecurityGroupsClient(applicationSecurityGroupsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkRouteTable( clients.NewRouteTablesClient(routeTablesClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkRoute( + clients.NewRoutesClient(routesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewNetworkSecurityRule( + clients.NewSecurityRulesClient(securityRulesClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewNetworkApplicationGateway( clients.NewApplicationGatewaysClient(applicationGatewaysClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewNetworkVirtualNetworkGateway( + clients.NewVirtualNetworkGatewaysClient(virtualNetworkGatewaysClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewNetworkNatGateway( + clients.NewNatGatewaysClient(natGatewaysClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewSqlServer( clients.NewSqlServersClient(sqlServersClient), resourceGroupScopes, @@ -381,10 +608,22 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewPostgreSQLFlexibleServersClient(postgresqlFlexibleServersClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerFirewallRule( + clients.NewPostgreSQLFlexibleServerFirewallRuleClient(postgresqlFirewallRulesClient), + resourceGroupScopes, + ), cache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection( + clients.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(postgresqlPrivateEndpointConnectionsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewKeyVaultSecret( clients.NewSecretsClient(secretsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewKeyVaultKey( + clients.NewKeysClient(keysClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewManagedIdentityUserAssignedIdentity( clients.NewUserAssignedIdentitiesClient(userAssignedIdentitiesClient), resourceGroupScopes, @@ -421,14 +660,26 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewDedicatedHostGroupsClient(dedicatedHostGroupsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewComputeDedicatedHost( + clients.NewDedicatedHostsClient(dedicatedHostsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewComputeCapacityReservationGroup( clients.NewCapacityReservationGroupsClient(capacityReservationGroupsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewComputeCapacityReservation( + clients.NewCapacityReservationsClient(capacityReservationsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewComputeGalleryApplicationVersion( clients.NewGalleryApplicationVersionsClient(galleryApplicationVersionsClient), resourceGroupScopes, ), cache), + sources.WrapperToAdapter(NewComputeGalleryApplication( + clients.NewGalleryApplicationsClient(galleryApplicationsClient), + resourceGroupScopes, + ), cache), sources.WrapperToAdapter(NewComputeGallery( clients.NewGalleriesClient(galleriesClient), resourceGroupScopes, @@ -441,7 +692,7 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred clients.NewSnapshotsClient(snapshotsClient), resourceGroupScopes, ), cache), - ) + ) } // Subscription-scoped adapters (not resource-group-scoped) @@ -470,26 +721,48 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred sources.WrapperToAdapter(NewStorageFileShare(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewStorageQueues(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewStorageTable(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewStorageEncryptionScope(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewStoragePrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkVirtualNetwork(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkSubnet(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkVirtualNetworkPeering(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkNetworkInterface(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewSqlDatabase(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewSqlServerFirewallRule(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewSqlServerVirtualNetworkRule(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewSQLServerPrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewDocumentDBDatabaseAccounts(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewDocumentDBPrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewKeyVaultVault(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewKeyVaultManagedHSM(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewKeyVaultManagedHSMPrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewDBforPostgreSQLDatabase(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkPublicIPAddress(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkPublicIPPrefix(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkDdosProtectionPlan(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkLoadBalancer(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkZone(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkPrivateDNSZone(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkDNSRecordSet(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewBatchAccount(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewBatchBatchApplication(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewBatchBatchPool(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeVirtualMachineScaleSet(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeAvailabilitySet(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeDisk(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkNetworkSecurityGroup(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkApplicationSecurityGroup(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkSecurityRule(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkRouteTable(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewNetworkApplicationGateway(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkVirtualNetworkGateway(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewNetworkNatGateway(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewSqlServer(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServer(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerFirewallRule(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewKeyVaultSecret(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewKeyVaultKey(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewManagedIdentityUserAssignedIdentity(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewAuthorizationRoleAssignment(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeDiskEncryptionSet(nil, placeholderResourceGroupScopes), noOpCache), @@ -499,12 +772,16 @@ func Adapters(ctx context.Context, subscriptionID string, regions []string, cred sources.WrapperToAdapter(NewComputeProximityPlacementGroup(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeDiskAccess(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeDedicatedHostGroup(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewComputeDedicatedHost(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeCapacityReservationGroup(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewComputeCapacityReservation(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeGalleryApplicationVersion(nil, placeholderResourceGroupScopes), noOpCache), + sources.WrapperToAdapter(NewComputeGalleryApplication(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeGallery(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeGalleryImage(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeSnapshot(nil, placeholderResourceGroupScopes), noOpCache), sources.WrapperToAdapter(NewComputeSharedGalleryImage(nil, subscriptionID), noOpCache), + sources.WrapperToAdapter(NewNetworkPrivateEndpoint(nil, placeholderResourceGroupScopes), noOpCache), ) _ = regions diff --git a/sources/azure/manual/authorization-role-assignment_test.go b/sources/azure/manual/authorization-role-assignment_test.go index ae5c5f92..7a7fec01 100644 --- a/sources/azure/manual/authorization-role-assignment_test.go +++ b/sources/azure/manual/authorization-role-assignment_test.go @@ -6,7 +6,6 @@ import ( "reflect" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v3" "go.uber.org/mock/gomock" @@ -142,7 +141,7 @@ func TestAuthorizationRoleAssignment(t *testing.T) { roleAssignment := &armauthorization.RoleAssignment{ Name: nil, // Role assignment with nil name should cause error Properties: &armauthorization.RoleAssignmentProperties{ - Scope: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg"), + Scope: new("/subscriptions/test-subscription/resourceGroups/test-rg"), }, } @@ -271,7 +270,7 @@ func TestAuthorizationRoleAssignment(t *testing.T) { roleAssignment2 := &armauthorization.RoleAssignment{ Name: nil, // Role assignment with nil name should cause error Properties: &armauthorization.RoleAssignmentProperties{ - Scope: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg"), + Scope: new("/subscriptions/test-subscription/resourceGroups/test-rg"), }, } @@ -457,7 +456,7 @@ func TestAuthorizationRoleAssignment(t *testing.T) { wrapper := manual.NewAuthorizationRoleAssignment(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) // Use interface assertion to access PredefinedRole method - if roleInterface, ok := interface{}(wrapper).(interface{ PredefinedRole() string }); ok { + if roleInterface, ok := any(wrapper).(interface{ PredefinedRole() string }); ok { role := roleInterface.PredefinedRole() if role != "Reader" { t.Errorf("Expected PredefinedRole to be 'Reader', got %s", role) @@ -472,7 +471,7 @@ func TestAuthorizationRoleAssignment(t *testing.T) { roleAssignment := createAzureRoleAssignment(roleAssignmentName, "/subscriptions/test-subscription/resourceGroups/test-rg") // Add delegated managed identity resource ID delegatedIdentityID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity" - roleAssignment.Properties.DelegatedManagedIdentityResourceID = to.Ptr(delegatedIdentityID) + roleAssignment.Properties.DelegatedManagedIdentityResourceID = new(delegatedIdentityID) mockClient := mocks.NewMockRoleAssignmentsClient(ctrl) azureScope := "/subscriptions/test-subscription/resourceGroups/test-rg" @@ -540,7 +539,7 @@ func (m *MockRoleAssignmentsPager) More() bool { func (mr *MockRoleAssignmentsPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockRoleAssignmentsPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockRoleAssignmentsPager) NextPage(ctx context.Context) (armauthorization.RoleAssignmentsClientListForResourceGroupResponse, error) { @@ -551,21 +550,21 @@ func (m *MockRoleAssignmentsPager) NextPage(ctx context.Context) (armauthorizati return ret0, ret1 } -func (mr *MockRoleAssignmentsPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockRoleAssignmentsPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockRoleAssignmentsPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armauthorization.RoleAssignmentsClientListForResourceGroupResponse, error)](), ctx) } // createAzureRoleAssignment creates a mock Azure role assignment for testing func createAzureRoleAssignment(roleAssignmentName, scope string) *armauthorization.RoleAssignment { return &armauthorization.RoleAssignment{ - Name: to.Ptr(roleAssignmentName), - Type: to.Ptr("Microsoft.Authorization/roleAssignments"), - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Authorization/roleAssignments/" + roleAssignmentName), + Name: new(roleAssignmentName), + Type: new("Microsoft.Authorization/roleAssignments"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Authorization/roleAssignments/" + roleAssignmentName), Properties: &armauthorization.RoleAssignmentProperties{ - Scope: to.Ptr(scope), - RoleDefinitionID: to.Ptr("/subscriptions/test-subscription/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c"), - PrincipalID: to.Ptr("00000000-0000-0000-0000-000000000000"), + Scope: new(scope), + RoleDefinitionID: new("/subscriptions/test-subscription/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c"), + PrincipalID: new("00000000-0000-0000-0000-000000000000"), }, } } diff --git a/sources/azure/manual/batch-batch-accounts.go b/sources/azure/manual/batch-batch-accounts.go index 5dda3207..b0e2383c 100644 --- a/sources/azure/manual/batch-batch-accounts.go +++ b/sources/azure/manual/batch-batch-accounts.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/batch-batch-accounts_test.go b/sources/azure/manual/batch-batch-accounts_test.go index 8ecab3af..0d37f1a2 100644 --- a/sources/azure/manual/batch-batch-accounts_test.go +++ b/sources/azure/manual/batch-batch-accounts_test.go @@ -6,8 +6,7 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -446,37 +445,37 @@ func createAzureBatchAccount(accountName, provisioningState, subscriptionID, res nodeIdentityID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-node-identity" return &armbatch.Account{ - Name: to.Ptr(accountName), - Location: to.Ptr("eastus"), + Name: new(accountName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armbatch.AccountProperties{ - ProvisioningState: (*armbatch.ProvisioningState)(to.Ptr(provisioningState)), + ProvisioningState: (*armbatch.ProvisioningState)(new(provisioningState)), AutoStorage: &armbatch.AutoStorageProperties{ - StorageAccountID: to.Ptr(storageAccountID), - LastKeySync: to.Ptr(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)), + StorageAccountID: new(storageAccountID), + LastKeySync: new(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)), NodeIdentityReference: &armbatch.ComputeNodeIdentityReference{ - ResourceID: to.Ptr(nodeIdentityID), + ResourceID: new(nodeIdentityID), }, }, KeyVaultReference: &armbatch.KeyVaultReference{ - ID: to.Ptr(keyVaultID), - URL: to.Ptr("https://test-keyvault.vault.azure.net/"), + ID: new(keyVaultID), + URL: new("https://test-keyvault.vault.azure.net/"), }, PrivateEndpointConnections: []*armbatch.PrivateEndpointConnection{ { Properties: &armbatch.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armbatch.PrivateEndpoint{ - ID: to.Ptr(privateEndpointID), + ID: new(privateEndpointID), }, }, }, }, }, Identity: &armbatch.AccountIdentity{ - Type: (*armbatch.ResourceIdentityType)(to.Ptr(armbatch.ResourceIdentityTypeUserAssigned)), + Type: (*armbatch.ResourceIdentityType)(new(armbatch.ResourceIdentityTypeUserAssigned)), UserAssignedIdentities: map[string]*armbatch.UserAssignedIdentities{ identityID: {}, }, @@ -494,16 +493,16 @@ func createAzureBatchAccountWithCrossRGResources( storageAccountID := "/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Storage/storageAccounts/test-storage-account" return &armbatch.Account{ - Name: to.Ptr(accountName), - Location: to.Ptr("eastus"), + Name: new(accountName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armbatch.AccountProperties{ - ProvisioningState: (*armbatch.ProvisioningState)(to.Ptr(provisioningState)), + ProvisioningState: (*armbatch.ProvisioningState)(new(provisioningState)), AutoStorage: &armbatch.AutoStorageProperties{ - StorageAccountID: to.Ptr(storageAccountID), - LastKeySync: to.Ptr(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)), + StorageAccountID: new(storageAccountID), + LastKeySync: new(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)), }, }, } diff --git a/sources/azure/manual/batch-batch-application.go b/sources/azure/manual/batch-batch-application.go new file mode 100644 index 00000000..a551e962 --- /dev/null +++ b/sources/azure/manual/batch-batch-application.go @@ -0,0 +1,232 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var BatchBatchApplicationLookupByName = shared.NewItemTypeLookup("name", azureshared.BatchBatchApplication) + +type batchBatchApplicationWrapper struct { + client clients.BatchApplicationsClient + *azureshared.MultiResourceGroupBase +} + +// NewBatchBatchApplication returns a SearchableWrapper for Azure Batch applications (child of Batch account). +func NewBatchBatchApplication(client clients.BatchApplicationsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &batchBatchApplicationWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_COMPUTE_APPLICATION, + azureshared.BatchBatchApplication, + ), + } +} + +func (b batchBatchApplicationWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: accountName and applicationName", + Scope: scope, + ItemType: b.Type(), + } + } + accountName := queryParts[0] + applicationName := queryParts[1] + + rgScope, err := b.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + resp, err := b.client.Get(ctx, rgScope.ResourceGroup, accountName, applicationName) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + return b.azureApplicationToSDPItem(&resp.Application, accountName, applicationName, scope) +} + +func (b batchBatchApplicationWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + BatchAccountLookupByName, + BatchBatchApplicationLookupByName, + } +} + +func (b batchBatchApplicationWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: accountName", + Scope: scope, + ItemType: b.Type(), + } + } + accountName := queryParts[0] + + rgScope, err := b.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + pager := b.client.List(ctx, rgScope.ResourceGroup, accountName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + for _, app := range page.Value { + if app == nil || app.Name == nil { + continue + } + item, sdpErr := b.azureApplicationToSDPItem(app, accountName, *app.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (b batchBatchApplicationWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: accountName"), scope, b.Type())) + return + } + accountName := queryParts[0] + + rgScope, err := b.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, b.Type())) + return + } + pager := b.client.List(ctx, rgScope.ResourceGroup, accountName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, b.Type())) + return + } + for _, app := range page.Value { + if app == nil || app.Name == nil { + continue + } + item, sdpErr := b.azureApplicationToSDPItem(app, accountName, *app.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (b batchBatchApplicationWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + BatchAccountLookupByName, + }, + } +} + +func (b batchBatchApplicationWrapper) azureApplicationToSDPItem(app *armbatch.Application, accountName, applicationName, scope string) (*sdp.Item, *sdp.QueryError) { + if app.Name == nil { + return nil, azureshared.QueryError(errors.New("application name is nil"), scope, b.Type()) + } + attributes, err := shared.ToAttributesWithExclude(app, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + if err := attributes.Set("uniqueAttr", shared.CompositeLookupKey(accountName, applicationName)); err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.BatchBatchApplication.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(app.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + // Link to parent Batch Account + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.BatchBatchAccount.String(), + Method: sdp.QueryMethod_GET, + Query: accountName, + Scope: scope, + }, + }) + + // Link to Application Packages (child resource under this application) + // Packages are listed under /batchAccounts/{account}/applications/{app}/versions + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.BatchBatchApplicationPackage.String(), + Method: sdp.QueryMethod_SEARCH, + Query: shared.CompositeLookupKey(accountName, applicationName), + Scope: scope, + }, + }) + + // Link to default version application package when set (GET to specific child resource) + if app.Properties != nil && app.Properties.DefaultVersion != nil && *app.Properties.DefaultVersion != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.BatchBatchApplicationPackage.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(accountName, applicationName, *app.Properties.DefaultVersion), + Scope: scope, + }, + }) + } + + return sdpItem, nil +} + +func (b batchBatchApplicationWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.BatchBatchAccount: true, + azureshared.BatchBatchApplicationPackage: true, + } +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/batch_application +func (b batchBatchApplicationWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_batch_application.id", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions/compute +func (b batchBatchApplicationWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Batch/batchAccounts/applications/read", + } +} + +func (b batchBatchApplicationWrapper) PredefinedRole() string { + return "Azure Batch Account Reader" +} diff --git a/sources/azure/manual/batch-batch-application_test.go b/sources/azure/manual/batch-batch-application_test.go new file mode 100644 index 00000000..32f38ee2 --- /dev/null +++ b/sources/azure/manual/batch-batch-application_test.go @@ -0,0 +1,254 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +// mockBatchApplicationsPager is a mock implementation of BatchApplicationsPager. +type mockBatchApplicationsPager struct { + pages []armbatch.ApplicationClientListResponse + index int +} + +func (m *mockBatchApplicationsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockBatchApplicationsPager) NextPage(ctx context.Context) (armbatch.ApplicationClientListResponse, error) { + if m.index >= len(m.pages) { + return armbatch.ApplicationClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +// errorBatchApplicationsPager is a mock pager that always returns an error. +type errorBatchApplicationsPager struct{} + +func (e *errorBatchApplicationsPager) More() bool { + return true +} + +func (e *errorBatchApplicationsPager) NextPage(ctx context.Context) (armbatch.ApplicationClientListResponse, error) { + return armbatch.ApplicationClientListResponse{}, errors.New("pager error") +} + +// testBatchApplicationsClient wraps the mock and injects a pager from List(). +type testBatchApplicationsClient struct { + *mocks.MockBatchApplicationsClient + pager clients.BatchApplicationsPager +} + +func (t *testBatchApplicationsClient) List(ctx context.Context, resourceGroupName, accountName string) clients.BatchApplicationsPager { + if t.pager != nil { + return t.pager + } + return t.MockBatchApplicationsClient.List(ctx, resourceGroupName, accountName) +} + +func createAzureBatchApplication(name string) *armbatch.Application { + allowUpdates := true + return &armbatch.Application{ + ID: new("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Batch/batchAccounts/acc/applications/" + name), + Name: new(name), + Type: new("Microsoft.Batch/batchAccounts/applications"), + Properties: &armbatch.ApplicationProperties{ + DisplayName: new("Test application " + name), + AllowUpdates: &allowUpdates, + }, + Tags: map[string]*string{"env": new("test")}, + } +} + +func TestBatchBatchApplication(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + accountName := "test-batch-account" + applicationName := "test-app" + + t.Run("Get", func(t *testing.T) { + app := createAzureBatchApplication(applicationName) + + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, applicationName).Return( + armbatch.ApplicationClientGetResponse{ + Application: *app, + }, nil) + + wrapper := manual.NewBatchBatchApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, applicationName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.BatchBatchApplication.String() { + t.Errorf("Expected type %s, got %s", azureshared.BatchBatchApplication.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUnique := shared.CompositeLookupKey(accountName, applicationName) + if sdpItem.UniqueAttributeValue() != expectedUnique { + t.Errorf("Expected unique attribute value %s, got %s", expectedUnique, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != scope { + t.Errorf("Expected scope %s, got %s", scope, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected valid item, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.BatchBatchAccount.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: accountName, ExpectedScope: scope}, + {ExpectedType: azureshared.BatchBatchApplicationPackage.String(), ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: shared.CompositeLookupKey(accountName, applicationName), ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + wrapper := manual.NewBatchBatchApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, accountName, true) + if qErr == nil { + t.Error("Expected error when Get with insufficient query parts, but got nil") + } + }) + + t.Run("Get_ClientError", func(t *testing.T) { + expectedErr := errors.New("application not found") + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, "nonexistent").Return( + armbatch.ApplicationClientGetResponse{}, expectedErr) + + wrapper := manual.NewBatchBatchApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, "nonexistent") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when client returns error, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + app1 := createAzureBatchApplication("app-1") + app2 := createAzureBatchApplication("app-2") + + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + pages := []armbatch.ApplicationClientListResponse{ + { + ListApplicationsResult: armbatch.ListApplicationsResult{ + Value: []*armbatch.Application{app1, app2}, + }, + }, + } + mockPager := &mockBatchApplicationsPager{pages: pages} + testClient := &testBatchApplicationsClient{ + MockBatchApplicationsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewBatchBatchApplication(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, accountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Errorf("Expected valid item, got: %v", err) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + wrapper := manual.NewBatchBatchApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, scope) + if qErr == nil { + t.Error("Expected error when Search with no query parts, but got nil") + } + }) + + t.Run("Search_PagerError", func(t *testing.T) { + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + errorPager := &errorBatchApplicationsPager{} + testClient := &testBatchApplicationsClient{ + MockBatchApplicationsClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewBatchBatchApplication(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, scope, accountName) + if qErr == nil { + t.Error("Expected error when pager returns error, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + wrapper := manual.NewBatchBatchApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + links := wrapper.PotentialLinks() + if !links[azureshared.BatchBatchAccount] { + t.Error("PotentialLinks() should include BatchBatchAccount") + } + if !links[azureshared.BatchBatchApplicationPackage] { + t.Error("PotentialLinks() should include BatchBatchApplicationPackage") + } + }) + + t.Run("ImplementsSearchableAdapter", func(t *testing.T) { + mockClient := mocks.NewMockBatchApplicationsClient(ctrl) + wrapper := manual.NewBatchBatchApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Error("Adapter should implement SearchableAdapter interface") + } + }) +} diff --git a/sources/azure/manual/batch-batch-pool.go b/sources/azure/manual/batch-batch-pool.go new file mode 100644 index 00000000..fd473fa9 --- /dev/null +++ b/sources/azure/manual/batch-batch-pool.go @@ -0,0 +1,706 @@ +package manual + +import ( + "context" + "errors" + "fmt" + "net" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var BatchBatchPoolLookupByName = shared.NewItemTypeLookup("name", azureshared.BatchBatchPool) + +type batchBatchPoolWrapper struct { + client clients.BatchPoolsClient + *azureshared.MultiResourceGroupBase +} + +// NewBatchBatchPool returns a SearchableWrapper for Azure Batch pools (child of Batch account). +func NewBatchBatchPool(client clients.BatchPoolsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &batchBatchPoolWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_COMPUTE_APPLICATION, + azureshared.BatchBatchPool, + ), + } +} + +func (b batchBatchPoolWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: accountName and poolName", + Scope: scope, + ItemType: b.Type(), + } + } + accountName := queryParts[0] + poolName := queryParts[1] + + rgScope, err := b.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + resp, err := b.client.Get(ctx, rgScope.ResourceGroup, accountName, poolName) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + return b.azurePoolToSDPItem(&resp.Pool, accountName, poolName, scope) +} + +func (b batchBatchPoolWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + BatchAccountLookupByName, + BatchBatchPoolLookupByName, + } +} + +func (b batchBatchPoolWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: accountName", + Scope: scope, + ItemType: b.Type(), + } + } + accountName := queryParts[0] + + rgScope, err := b.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + pager := b.client.ListByBatchAccount(ctx, rgScope.ResourceGroup, accountName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + for _, pool := range page.Value { + if pool == nil || pool.Name == nil { + continue + } + item, sdpErr := b.azurePoolToSDPItem(pool, accountName, *pool.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (b batchBatchPoolWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: accountName"), scope, b.Type())) + return + } + accountName := queryParts[0] + + rgScope, err := b.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, b.Type())) + return + } + pager := b.client.ListByBatchAccount(ctx, rgScope.ResourceGroup, accountName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, b.Type())) + return + } + for _, pool := range page.Value { + if pool == nil || pool.Name == nil { + continue + } + item, sdpErr := b.azurePoolToSDPItem(pool, accountName, *pool.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (b batchBatchPoolWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + BatchAccountLookupByName, + }, + } +} + +func (b batchBatchPoolWrapper) azurePoolToSDPItem(pool *armbatch.Pool, accountName, poolName, scope string) (*sdp.Item, *sdp.QueryError) { + if pool.Name == nil { + return nil, azureshared.QueryError(errors.New("pool name is nil"), scope, b.Type()) + } + attributes, err := shared.ToAttributesWithExclude(pool, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + if err := attributes.Set("uniqueAttr", shared.CompositeLookupKey(accountName, poolName)); err != nil { + return nil, azureshared.QueryError(err, scope, b.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.BatchBatchPool.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(pool.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + // Link to parent Batch Account + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.BatchBatchAccount.String(), + Method: sdp.QueryMethod_GET, + Query: accountName, + Scope: scope, + }, + }) + + // Link to public IPs when NetworkConfiguration.PublicIPAddressConfiguration.IPAddressIDs is set + if pool.Properties != nil && pool.Properties.NetworkConfiguration != nil && pool.Properties.NetworkConfiguration.PublicIPAddressConfiguration != nil { + for _, ipIDPtr := range pool.Properties.NetworkConfiguration.PublicIPAddressConfiguration.IPAddressIDs { + if ipIDPtr == nil || *ipIDPtr == "" { + continue + } + ipName := azureshared.ExtractResourceName(*ipIDPtr) + if ipName == "" { + continue + } + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*ipIDPtr); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPublicIPAddress.String(), + Method: sdp.QueryMethod_GET, + Query: ipName, + Scope: linkedScope, + }, + }) + } + } + + // Link to Subnet when NetworkConfiguration.SubnetID is set + if pool.Properties != nil && pool.Properties.NetworkConfiguration != nil && pool.Properties.NetworkConfiguration.SubnetID != nil { + subnetID := *pool.Properties.NetworkConfiguration.SubnetID + scopeParams := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"subscriptions", "resourceGroups"}) + subnetParams := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"virtualNetworks", "subnets"}) + if len(scopeParams) >= 2 && len(subnetParams) >= 2 { + subnetScope := fmt.Sprintf("%s.%s", scopeParams[0], scopeParams[1]) + vnetName := subnetParams[0] + subnetName := subnetParams[1] + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(vnetName, subnetName), + Scope: subnetScope, + }, + }) + } + } + + // Link to user-assigned managed identities from Identity.UserAssignedIdentities map keys (resource IDs) + if pool.Identity != nil && pool.Identity.UserAssignedIdentities != nil { + for identityResourceID := range pool.Identity.UserAssignedIdentities { + if identityResourceID == "" { + continue + } + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName == "" { + continue + } + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + + // Link to application packages referenced by the pool (Properties.ApplicationPackages) + // ID can be .../batchAccounts/{account}/applications/{app}/versions/{version} (specific version) + // or .../applications/{app} (default version); when default, use pkgRef.Version as fallback. + if pool.Properties != nil && pool.Properties.ApplicationPackages != nil { + for _, pkgRef := range pool.Properties.ApplicationPackages { + if pkgRef == nil || pkgRef.ID == nil || *pkgRef.ID == "" { + continue + } + var pkgAccountName, appName, version string + params := azureshared.ExtractPathParamsFromResourceID(*pkgRef.ID, []string{"batchAccounts", "applications", "versions"}) + if len(params) >= 3 { + pkgAccountName, appName, version = params[0], params[1], params[2] + } else { + paramsApp := azureshared.ExtractPathParamsFromResourceID(*pkgRef.ID, []string{"batchAccounts", "applications"}) + if len(paramsApp) < 2 { + continue + } + pkgAccountName, appName = paramsApp[0], paramsApp[1] + if pkgRef.Version != nil && *pkgRef.Version != "" { + version = *pkgRef.Version + } else { + // Default version reference with no Version field: cannot form GET (adapter needs account|app|version) + continue + } + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.BatchBatchApplicationPackage.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(pkgAccountName, appName, version), + Scope: scope, + }, + }) + } + } + + // Note: armbatch v4 removed Certificates from PoolProperties; certificate refs are no longer linked from pools. + + // Link to storage accounts and IP/DNS from MountConfiguration + seenIPs := make(map[string]struct{}) + seenDNS := make(map[string]struct{}) + if pool.Properties != nil && pool.Properties.MountConfiguration != nil { + for _, mount := range pool.Properties.MountConfiguration { + if mount == nil { + continue + } + if mount.AzureBlobFileSystemConfiguration != nil { + blobCfg := mount.AzureBlobFileSystemConfiguration + if blobCfg.AccountName != nil && *blobCfg.AccountName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.StorageAccount.String(), + Method: sdp.QueryMethod_GET, + Query: *blobCfg.AccountName, + Scope: scope, + }, + }) + } + if blobCfg.AccountName != nil && *blobCfg.AccountName != "" && blobCfg.ContainerName != nil && *blobCfg.ContainerName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.StorageBlobContainer.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(*blobCfg.AccountName, *blobCfg.ContainerName), + Scope: scope, + }, + }) + } + if blobCfg.IdentityReference != nil && blobCfg.IdentityReference.ResourceID != nil && *blobCfg.IdentityReference.ResourceID != "" { + identityResourceID := *blobCfg.IdentityReference.ResourceID + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + if mount.AzureFileShareConfiguration != nil { + if mount.AzureFileShareConfiguration.AccountName != nil && *mount.AzureFileShareConfiguration.AccountName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.StorageAccount.String(), + Method: sdp.QueryMethod_GET, + Query: *mount.AzureFileShareConfiguration.AccountName, + Scope: scope, + }, + }) + } + if mount.AzureFileShareConfiguration.AzureFileURL != nil && *mount.AzureFileShareConfiguration.AzureFileURL != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *mount.AzureFileShareConfiguration.AzureFileURL, seenDNS, seenIPs) + } + } + if mount.CifsMountConfiguration != nil && mount.CifsMountConfiguration.Source != nil && *mount.CifsMountConfiguration.Source != "" { + appendMountSourceHostLink(&sdpItem.LinkedItemQueries, *mount.CifsMountConfiguration.Source, seenIPs, seenDNS) + } + if mount.NfsMountConfiguration != nil && mount.NfsMountConfiguration.Source != nil && *mount.NfsMountConfiguration.Source != "" { + appendMountSourceHostLink(&sdpItem.LinkedItemQueries, *mount.NfsMountConfiguration.Source, seenIPs, seenDNS) + } + } + } + + // Link to image reference from DeploymentConfiguration.VirtualMachineConfiguration.ImageReference + // (custom image, shared gallery image, or community gallery image) + if pool.Properties != nil && pool.Properties.DeploymentConfiguration != nil && + pool.Properties.DeploymentConfiguration.VirtualMachineConfiguration != nil { + imageRef := pool.Properties.DeploymentConfiguration.VirtualMachineConfiguration.ImageReference + if imageRef != nil { + // ImageReference.ID: custom image or gallery image version path + if imageRef.ID != nil && *imageRef.ID != "" { + imageID := *imageRef.ID + if strings.Contains(imageID, "/galleries/") && strings.Contains(imageID, "/images/") && strings.Contains(imageID, "/versions/") { + params := azureshared.ExtractPathParamsFromResourceID(imageID, []string{"galleries", "images", "versions"}) + if len(params) == 3 { + galleryName, imageName, versionName := params[0], params[1], params[2] + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(imageID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeSharedGalleryImage.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(galleryName, imageName, versionName), + Scope: linkScope, + }, + }) + } + } else if strings.Contains(imageID, "/images/") { + imageName := azureshared.ExtractResourceName(imageID) + if imageName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(imageID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeImage.String(), + Method: sdp.QueryMethod_GET, + Query: imageName, + Scope: linkScope, + }, + }) + } + } + } + // SharedGalleryImageID (path: .../sharedGalleries/{name}/images/{name}/versions/{name}) + if imageRef.SharedGalleryImageID != nil && *imageRef.SharedGalleryImageID != "" { + sharedGalleryImageID := *imageRef.SharedGalleryImageID + parts := azureshared.ExtractPathParamsFromResourceID(sharedGalleryImageID, []string{"sharedGalleries", "images", "versions"}) + if len(parts) >= 3 { + galleryName, imageName, version := parts[0], parts[1], parts[2] + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(sharedGalleryImageID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeSharedGalleryImage.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(galleryName, imageName, version), + Scope: linkScope, + }, + }) + } + } + // CommunityGalleryImageID + if imageRef.CommunityGalleryImageID != nil && *imageRef.CommunityGalleryImageID != "" { + communityGalleryImageID := *imageRef.CommunityGalleryImageID + parts := azureshared.ExtractPathParamsFromResourceID(communityGalleryImageID, []string{"CommunityGalleries", "Images", "Versions"}) + if len(parts) >= 3 { + communityGalleryName, imageName, version := parts[0], parts[1], parts[2] + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeCommunityGalleryImage.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(communityGalleryName, imageName, version), + Scope: scope, + }, + }) + } + } + } + // Container registries (RegistryServer → DNS link; IdentityReference → managed identity link) + vmConfig := pool.Properties.DeploymentConfiguration.VirtualMachineConfiguration + if vmConfig.ContainerConfiguration != nil && vmConfig.ContainerConfiguration.ContainerRegistries != nil { + for _, reg := range vmConfig.ContainerConfiguration.ContainerRegistries { + if reg == nil { + continue + } + if reg.RegistryServer != nil && *reg.RegistryServer != "" { + host := strings.TrimSpace(*reg.RegistryServer) + if host != "" { + if net.ParseIP(host) != nil { + if _, seen := seenIPs[host]; !seen { + seenIPs[host] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: host, + Scope: "global", + }, + }) + } + } else { + if _, seen := seenDNS[host]; !seen { + seenDNS[host] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: host, + Scope: "global", + }, + }) + } + } + } + } + if reg.IdentityReference != nil && reg.IdentityReference.ResourceID != nil && *reg.IdentityReference.ResourceID != "" { + identityResourceID := *reg.IdentityReference.ResourceID + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + } + } + + // StartTask: ResourceFiles (HTTPUrl, StorageContainerURL → URI links; IdentityReference → managed identity), ContainerSettings.Registry (RegistryServer → DNS; IdentityReference → managed identity) + if pool.Properties != nil && pool.Properties.StartTask != nil { + startTask := pool.Properties.StartTask + if startTask.ResourceFiles != nil { + for _, rf := range startTask.ResourceFiles { + if rf == nil { + continue + } + if rf.HTTPURL != nil && *rf.HTTPURL != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *rf.HTTPURL, seenDNS, seenIPs) + } + if rf.StorageContainerURL != nil && *rf.StorageContainerURL != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *rf.StorageContainerURL, seenDNS, seenIPs) + } + if rf.IdentityReference != nil && rf.IdentityReference.ResourceID != nil && *rf.IdentityReference.ResourceID != "" { + identityResourceID := *rf.IdentityReference.ResourceID + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + } + if startTask.ContainerSettings != nil && startTask.ContainerSettings.Registry != nil { + reg := startTask.ContainerSettings.Registry + if reg.RegistryServer != nil && *reg.RegistryServer != "" { + host := strings.TrimSpace(*reg.RegistryServer) + if host != "" { + if net.ParseIP(host) != nil { + if _, seen := seenIPs[host]; !seen { + seenIPs[host] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: host, + Scope: "global", + }, + }) + } + } else { + if _, seen := seenDNS[host]; !seen { + seenDNS[host] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: host, + Scope: "global", + }, + }) + } + } + } + } + if reg.IdentityReference != nil && reg.IdentityReference.ResourceID != nil && *reg.IdentityReference.ResourceID != "" { + identityResourceID := *reg.IdentityReference.ResourceID + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Map provisioning state to health + if pool.Properties != nil && pool.Properties.ProvisioningState != nil { + switch *pool.Properties.ProvisioningState { + case armbatch.PoolProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armbatch.PoolProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + return sdpItem, nil +} + +func (b batchBatchPoolWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.BatchBatchAccount: true, + azureshared.NetworkSubnet: true, + azureshared.NetworkPublicIPAddress: true, + azureshared.ManagedIdentityUserAssignedIdentity: true, + azureshared.BatchBatchApplicationPackage: true, + azureshared.StorageAccount: true, + azureshared.StorageBlobContainer: true, + azureshared.ComputeImage: true, + azureshared.ComputeSharedGalleryImage: true, + azureshared.ComputeCommunityGalleryImage: true, + stdlib.NetworkIP: true, + stdlib.NetworkDNS: true, + stdlib.NetworkHTTP: true, + } +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/batch_pool +func (b batchBatchPoolWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_batch_pool.id", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions/compute +func (b batchBatchPoolWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Batch/batchAccounts/pools/read", + } +} + +func (b batchBatchPoolWrapper) PredefinedRole() string { + return "Azure Batch Account Reader" +} + +// appendMountSourceHostLink extracts a host from a CIFS or NFS mount source (e.g. "\\server\share", "nfs://host/path", or "192.168.1.1") and appends a NetworkIP or NetworkDNS linked query with deduplication. +func appendMountSourceHostLink(queries *[]*sdp.LinkedItemQuery, source string, seenIPs, seenDNS map[string]struct{}) { + if source == "" { + return + } + var host string + if after, ok := strings.CutPrefix(source, "\\\\"); ok { + // UNC path: \\server\share + rest := after + if before, _, ok := strings.Cut(rest, "\\"); ok { + host = before + } else { + host = rest + } + } else if strings.Contains(source, "://") { + u, err := url.Parse(source) + if err != nil || u.Host == "" { + return + } + host = u.Hostname() + } else { + // NFS format: host:/path (e.g. 192.168.1.1:/vol1) — split on ":/" so host has no trailing colon + if before, _, ok0 := strings.Cut(source, ":/"); ok0 { + host = before + } else if idx := strings.IndexAny(source, "/\\"); idx >= 0 { + host = source[:idx] + } else { + host = source + } + } + host = strings.TrimSpace(host) + if host == "" { + return + } + if net.ParseIP(host) != nil { + if _, seen := seenIPs[host]; !seen { + seenIPs[host] = struct{}{} + *queries = append(*queries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: host, + Scope: "global", + }, + }) + } + } else { + if _, seen := seenDNS[host]; !seen { + seenDNS[host] = struct{}{} + *queries = append(*queries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: host, + Scope: "global", + }, + }) + } + } +} diff --git a/sources/azure/manual/batch-batch-pool_test.go b/sources/azure/manual/batch-batch-pool_test.go new file mode 100644 index 00000000..93648924 --- /dev/null +++ b/sources/azure/manual/batch-batch-pool_test.go @@ -0,0 +1,272 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +type mockBatchPoolsPager struct { + pages []armbatch.PoolClientListByBatchAccountResponse + index int +} + +func (m *mockBatchPoolsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockBatchPoolsPager) NextPage(ctx context.Context) (armbatch.PoolClientListByBatchAccountResponse, error) { + if m.index >= len(m.pages) { + return armbatch.PoolClientListByBatchAccountResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorBatchPoolsPager struct{} + +func (e *errorBatchPoolsPager) More() bool { + return true +} + +func (e *errorBatchPoolsPager) NextPage(ctx context.Context) (armbatch.PoolClientListByBatchAccountResponse, error) { + return armbatch.PoolClientListByBatchAccountResponse{}, errors.New("pager error") +} + +type testBatchPoolsClient struct { + *mocks.MockBatchPoolsClient + pager clients.BatchPoolsPager +} + +func (t *testBatchPoolsClient) ListByBatchAccount(ctx context.Context, resourceGroupName, accountName string) clients.BatchPoolsPager { + if t.pager != nil { + return t.pager + } + return t.MockBatchPoolsClient.ListByBatchAccount(ctx, resourceGroupName, accountName) +} + +func createAzureBatchPool(name string) *armbatch.Pool { + state := armbatch.PoolProvisioningStateSucceeded + return &armbatch.Pool{ + ID: new("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Batch/batchAccounts/acc/pools/" + name), + Name: new(name), + Type: new("Microsoft.Batch/batchAccounts/pools"), + Properties: &armbatch.PoolProperties{ + VMSize: new("Standard_D2s_v3"), + ProvisioningState: &state, + }, + Tags: map[string]*string{"env": new("test")}, + } +} + +func TestBatchBatchPool(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + accountName := "test-batch-account" + poolName := "test-pool" + + t.Run("Get", func(t *testing.T) { + pool := createAzureBatchPool(poolName) + + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, poolName).Return( + armbatch.PoolClientGetResponse{ + Pool: *pool, + }, nil) + + wrapper := manual.NewBatchBatchPool(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, poolName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.BatchBatchPool.String() { + t.Errorf("Expected type %s, got %s", azureshared.BatchBatchPool.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUnique := shared.CompositeLookupKey(accountName, poolName) + if sdpItem.UniqueAttributeValue() != expectedUnique { + t.Errorf("Expected unique attribute value %s, got %s", expectedUnique, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != scope { + t.Errorf("Expected scope %s, got %s", scope, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected valid item, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.BatchBatchAccount.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: accountName, ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + wrapper := manual.NewBatchBatchPool(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, accountName, true) + if qErr == nil { + t.Error("Expected error when Get with insufficient query parts, but got nil") + } + }) + + t.Run("Get_ClientError", func(t *testing.T) { + expectedErr := errors.New("pool not found") + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, "nonexistent").Return( + armbatch.PoolClientGetResponse{}, expectedErr) + + wrapper := manual.NewBatchBatchPool(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, "nonexistent") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when client returns error, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + pool1 := createAzureBatchPool("pool-1") + pool2 := createAzureBatchPool("pool-2") + + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + pages := []armbatch.PoolClientListByBatchAccountResponse{ + { + ListPoolsResult: armbatch.ListPoolsResult{ + Value: []*armbatch.Pool{pool1, pool2}, + }, + }, + } + mockPager := &mockBatchPoolsPager{pages: pages} + testClient := &testBatchPoolsClient{ + MockBatchPoolsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewBatchBatchPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, accountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Errorf("Expected valid item, got: %v", err) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + wrapper := manual.NewBatchBatchPool(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, scope) + if qErr == nil { + t.Error("Expected error when Search with no query parts, but got nil") + } + }) + + t.Run("Search_PagerError", func(t *testing.T) { + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + errorPager := &errorBatchPoolsPager{} + testClient := &testBatchPoolsClient{ + MockBatchPoolsClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewBatchBatchPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, scope, accountName) + if qErr == nil { + t.Error("Expected error when pager returns error, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + wrapper := manual.NewBatchBatchPool(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + links := wrapper.PotentialLinks() + if !links[azureshared.BatchBatchAccount] { + t.Error("PotentialLinks() should include BatchBatchAccount") + } + if !links[azureshared.NetworkSubnet] { + t.Error("PotentialLinks() should include NetworkSubnet") + } + if !links[azureshared.ManagedIdentityUserAssignedIdentity] { + t.Error("PotentialLinks() should include ManagedIdentityUserAssignedIdentity") + } + if !links[azureshared.BatchBatchApplicationPackage] { + t.Error("PotentialLinks() should include BatchBatchApplicationPackage") + } + if !links[azureshared.NetworkPublicIPAddress] { + t.Error("PotentialLinks() should include NetworkPublicIPAddress") + } + if !links[azureshared.StorageAccount] { + t.Error("PotentialLinks() should include StorageAccount") + } + if !links[stdlib.NetworkIP] { + t.Error("PotentialLinks() should include stdlib.NetworkIP") + } + if !links[stdlib.NetworkDNS] { + t.Error("PotentialLinks() should include stdlib.NetworkDNS") + } + if !links[stdlib.NetworkHTTP] { + t.Error("PotentialLinks() should include stdlib.NetworkHTTP") + } + }) + + t.Run("ImplementsSearchableAdapter", func(t *testing.T) { + mockClient := mocks.NewMockBatchPoolsClient(ctrl) + wrapper := manual.NewBatchBatchPool(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Error("Adapter should implement SearchableAdapter interface") + } + }) +} diff --git a/sources/azure/manual/compute-availability-set_test.go b/sources/azure/manual/compute-availability-set_test.go index 12b22270..dfcae477 100644 --- a/sources/azure/manual/compute-availability-set_test.go +++ b/sources/azure/manual/compute-availability-set_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -251,9 +250,9 @@ func TestComputeAvailabilitySet(t *testing.T) { avSet1 := createAzureAvailabilitySet("test-avset-1") avSetNilName := &armcompute.AvailabilitySet{ Name: nil, // nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -324,24 +323,24 @@ func TestComputeAvailabilitySet(t *testing.T) { // createAzureAvailabilitySet creates a mock Azure Availability Set for testing func createAzureAvailabilitySet(avSetName string) *armcompute.AvailabilitySet { return &armcompute.AvailabilitySet{ - Name: to.Ptr(avSetName), - Location: to.Ptr("eastus"), + Name: new(avSetName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.AvailabilitySetProperties{ - PlatformFaultDomainCount: to.Ptr(int32(2)), - PlatformUpdateDomainCount: to.Ptr(int32(5)), + PlatformFaultDomainCount: new(int32(2)), + PlatformUpdateDomainCount: new(int32(5)), ProximityPlacementGroup: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-ppg"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-ppg"), }, VirtualMachines: []*armcompute.SubResource{ { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/test-vm-1"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/test-vm-1"), }, { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/test-vm-2"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/test-vm-2"), }, }, }, @@ -352,20 +351,20 @@ func createAzureAvailabilitySet(avSetName string) *armcompute.AvailabilitySet { // with links to resources in different resource groups func createAzureAvailabilitySetWithCrossResourceGroupLinks(avSetName, subscriptionID string) *armcompute.AvailabilitySet { return &armcompute.AvailabilitySet{ - Name: to.Ptr(avSetName), - Location: to.Ptr("eastus"), + Name: new(avSetName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.AvailabilitySetProperties{ - PlatformFaultDomainCount: to.Ptr(int32(2)), - PlatformUpdateDomainCount: to.Ptr(int32(5)), + PlatformFaultDomainCount: new(int32(2)), + PlatformUpdateDomainCount: new(int32(5)), ProximityPlacementGroup: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-ppg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-ppg"), }, VirtualMachines: []*armcompute.SubResource{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/vm-rg/providers/Microsoft.Compute/virtualMachines/test-vm"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/vm-rg/providers/Microsoft.Compute/virtualMachines/test-vm"), }, }, }, @@ -375,14 +374,14 @@ func createAzureAvailabilitySetWithCrossResourceGroupLinks(avSetName, subscripti // createAzureAvailabilitySetWithoutLinks creates a mock Availability Set without any linked resources func createAzureAvailabilitySetWithoutLinks(avSetName string) *armcompute.AvailabilitySet { return &armcompute.AvailabilitySet{ - Name: to.Ptr(avSetName), - Location: to.Ptr("eastus"), + Name: new(avSetName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.AvailabilitySetProperties{ - PlatformFaultDomainCount: to.Ptr(int32(2)), - PlatformUpdateDomainCount: to.Ptr(int32(5)), + PlatformFaultDomainCount: new(int32(2)), + PlatformUpdateDomainCount: new(int32(5)), // No ProximityPlacementGroup // No VirtualMachines }, diff --git a/sources/azure/manual/compute-capacity-reservation-group_test.go b/sources/azure/manual/compute-capacity-reservation-group_test.go index fa22c505..6dfb123f 100644 --- a/sources/azure/manual/compute-capacity-reservation-group_test.go +++ b/sources/azure/manual/compute-capacity-reservation-group_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -239,9 +238,9 @@ func TestComputeCapacityReservationGroup(t *testing.T) { crg1 := createAzureCapacityReservationGroup("test-crg-1") crgNilName := &armcompute.CapacityReservationGroup{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.CapacityReservationGroupProperties{}, } @@ -332,11 +331,11 @@ func capacityReservationGroupListOptions() *armcompute.CapacityReservationGroups // createAzureCapacityReservationGroup creates a mock Azure Capacity Reservation Group for testing. func createAzureCapacityReservationGroup(groupName string) *armcompute.CapacityReservationGroup { return &armcompute.CapacityReservationGroup{ - Name: to.Ptr(groupName), - Location: to.Ptr("eastus"), + Name: new(groupName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.CapacityReservationGroupProperties{}, } @@ -347,20 +346,20 @@ func createAzureCapacityReservationGroupWithLinks(groupName, subscriptionID, res reservations := make([]*armcompute.SubResourceReadOnly, 0, len(reservationNames)) for _, name := range reservationNames { reservations = append(reservations, &armcompute.SubResourceReadOnly{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/capacityReservationGroups/" + groupName + "/capacityReservations/" + name), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/capacityReservationGroups/" + groupName + "/capacityReservations/" + name), }) } vms := make([]*armcompute.SubResourceReadOnly, 0, len(vmNames)) for _, name := range vmNames { vms = append(vms, &armcompute.SubResourceReadOnly{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/" + name), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/" + name), }) } return &armcompute.CapacityReservationGroup{ - Name: to.Ptr(groupName), - Location: to.Ptr("eastus"), + Name: new(groupName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.CapacityReservationGroupProperties{ CapacityReservations: reservations, diff --git a/sources/azure/manual/compute-capacity-reservation.go b/sources/azure/manual/compute-capacity-reservation.go new file mode 100644 index 00000000..697853bb --- /dev/null +++ b/sources/azure/manual/compute-capacity-reservation.go @@ -0,0 +1,287 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var ComputeCapacityReservationLookupByName = shared.NewItemTypeLookup("name", azureshared.ComputeCapacityReservation) + +type computeCapacityReservationWrapper struct { + client clients.CapacityReservationsClient + *azureshared.MultiResourceGroupBase +} + +func NewComputeCapacityReservation(client clients.CapacityReservationsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &computeCapacityReservationWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_COMPUTE_APPLICATION, + azureshared.ComputeCapacityReservation, + ), + } +} + +func capacityReservationGetOptions() *armcompute.CapacityReservationsClientGetOptions { + expand := armcompute.CapacityReservationInstanceViewTypesInstanceView + return &armcompute.CapacityReservationsClientGetOptions{ + Expand: &expand, + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/compute/capacity-reservations/get?view=rest-compute-2025-04-01&tabs=HTTP +func (c *computeCapacityReservationWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 2 { + return nil, azureshared.QueryError(errors.New("queryParts must be exactly 2: capacity reservation group name and capacity reservation name"), scope, c.Type()) + } + groupName := queryParts[0] + if groupName == "" { + return nil, azureshared.QueryError(errors.New("capacity reservation group name cannot be empty"), scope, c.Type()) + } + reservationName := queryParts[1] + if reservationName == "" { + return nil, azureshared.QueryError(errors.New("capacity reservation name cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + resp, err := c.client.Get(ctx, rgScope.ResourceGroup, groupName, reservationName, capacityReservationGetOptions()) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + return c.azureCapacityReservationToSDPItem(&resp.CapacityReservation, groupName, scope) +} + +// ref: https://learn.microsoft.com/en-us/rest/api/compute/capacity-reservations/list-by-capacity-reservation-group?view=rest-compute-2025-04-01&tabs=HTTP +func (c *computeCapacityReservationWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 1 { + return nil, azureshared.QueryError(errors.New("queryParts must be exactly 1: capacity reservation group name"), scope, c.Type()) + } + groupName := queryParts[0] + if groupName == "" { + return nil, azureshared.QueryError(errors.New("capacity reservation group name cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + pager := c.client.NewListByCapacityReservationGroupPager(rgScope.ResourceGroup, groupName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + for _, res := range page.Value { + if res == nil || res.Name == nil { + continue + } + item, sdpErr := c.azureCapacityReservationToSDPItem(res, groupName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (c *computeCapacityReservationWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) != 1 { + stream.SendError(azureshared.QueryError(errors.New("queryParts must be exactly 1: capacity reservation group name"), scope, c.Type())) + return + } + groupName := queryParts[0] + if groupName == "" { + stream.SendError(azureshared.QueryError(errors.New("capacity reservation group name cannot be empty"), scope, c.Type())) + return + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + + pager := c.client.NewListByCapacityReservationGroupPager(rgScope.ResourceGroup, groupName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + for _, res := range page.Value { + if res == nil || res.Name == nil { + continue + } + item, sdpErr := c.azureCapacityReservationToSDPItem(res, groupName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (c *computeCapacityReservationWrapper) azureCapacityReservationToSDPItem(res *armcompute.CapacityReservation, groupName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(res, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + if res.Name == nil { + return nil, azureshared.QueryError(errors.New("capacity reservation name is nil"), scope, c.Type()) + } + reservationName := *res.Name + if reservationName == "" { + return nil, azureshared.QueryError(errors.New("capacity reservation name cannot be empty"), scope, c.Type()) + } + if err := attributes.Set("uniqueAttr", shared.CompositeLookupKey(groupName, reservationName)); err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + linkedItemQueries := make([]*sdp.LinkedItemQuery, 0) + + // Parent: capacity reservation group + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeCapacityReservationGroup.String(), + Method: sdp.QueryMethod_GET, + Query: groupName, + Scope: scope, + }, + }) + + // VMs associated with this capacity reservation + if res.Properties != nil && res.Properties.VirtualMachinesAssociated != nil { + for _, vmRef := range res.Properties.VirtualMachinesAssociated { + if vmRef == nil || vmRef.ID == nil || *vmRef.ID == "" { + continue + } + vmName := azureshared.ExtractResourceName(*vmRef.ID) + if vmName == "" { + continue + } + vmScope := scope + if linkScope := azureshared.ExtractScopeFromResourceID(*vmRef.ID); linkScope != "" { + vmScope = linkScope + } + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeVirtualMachine.String(), + Method: sdp.QueryMethod_GET, + Query: vmName, + Scope: vmScope, + }, + }) + } + } + + // VMs physically allocated to this capacity reservation (from instance view; only populated when Get uses $expand=instanceView) + if res.Properties != nil && res.Properties.InstanceView != nil && res.Properties.InstanceView.UtilizationInfo != nil && res.Properties.InstanceView.UtilizationInfo.VirtualMachinesAllocated != nil { + for _, vmRef := range res.Properties.InstanceView.UtilizationInfo.VirtualMachinesAllocated { + if vmRef == nil || vmRef.ID == nil || *vmRef.ID == "" { + continue + } + vmName := azureshared.ExtractResourceName(*vmRef.ID) + if vmName == "" { + continue + } + vmScope := scope + if linkScope := azureshared.ExtractScopeFromResourceID(*vmRef.ID); linkScope != "" { + vmScope = linkScope + } + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeVirtualMachine.String(), + Method: sdp.QueryMethod_GET, + Query: vmName, + Scope: vmScope, + }, + }) + } + } + + sdpItem := &sdp.Item{ + Type: azureshared.ComputeCapacityReservation.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(res.Tags), + LinkedItemQueries: linkedItemQueries, + } + + // Health status from ProvisioningState + if res.Properties != nil && res.Properties.ProvisioningState != nil { + state := strings.ToLower(*res.Properties.ProvisioningState) + switch state { + case "succeeded": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "creating", "updating", "deleting": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "failed", "canceled": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + } + } + + return sdpItem, nil +} + +func (c *computeCapacityReservationWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + ComputeCapacityReservationGroupLookupByName, + ComputeCapacityReservationLookupByName, + } +} + +func (c *computeCapacityReservationWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + ComputeCapacityReservationGroupLookupByName, + }, + } +} + +func (c *computeCapacityReservationWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.ComputeCapacityReservationGroup: true, + azureshared.ComputeVirtualMachine: true, + } +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/capacity_reservation +func (c *computeCapacityReservationWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_capacity_reservation.id", + }, + } +} + +func (c *computeCapacityReservationWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Compute/capacityReservationGroups/capacityReservations/read", + } +} + +func (c *computeCapacityReservationWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/compute-capacity-reservation_test.go b/sources/azure/manual/compute-capacity-reservation_test.go new file mode 100644 index 00000000..515e7285 --- /dev/null +++ b/sources/azure/manual/compute-capacity-reservation_test.go @@ -0,0 +1,346 @@ +package manual + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +func createAzureCapacityReservation(reservationName, groupName string) *armcompute.CapacityReservation { + return &armcompute.CapacityReservation{ + ID: new("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/capacityReservationGroups/" + groupName + "/capacityReservations/" + reservationName), + Name: new(reservationName), + Type: new("Microsoft.Compute/capacityReservationGroups/capacityReservations"), + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + SKU: &armcompute.SKU{ + Name: new("Standard_D2s_v3"), + Capacity: new(int64(1)), + }, + Properties: &armcompute.CapacityReservationProperties{ + ProvisioningState: new("Succeeded"), + }, + } +} + +func createAzureCapacityReservationWithVMs(reservationName, groupName, subscriptionID, resourceGroup string, vmNames ...string) *armcompute.CapacityReservation { + vms := make([]*armcompute.SubResourceReadOnly, 0, len(vmNames)) + for _, vmName := range vmNames { + vms = append(vms, &armcompute.SubResourceReadOnly{ + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/" + vmName), + }) + } + return &armcompute.CapacityReservation{ + ID: new("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/capacityReservationGroups/" + groupName + "/capacityReservations/" + reservationName), + Name: new(reservationName), + Type: new("Microsoft.Compute/capacityReservationGroups/capacityReservations"), + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + SKU: &armcompute.SKU{ + Name: new("Standard_D2s_v3"), + Capacity: new(int64(1)), + }, + Properties: &armcompute.CapacityReservationProperties{ + ProvisioningState: new("Succeeded"), + VirtualMachinesAssociated: vms, + }, + } +} + +type mockCapacityReservationsPager struct { + items []*armcompute.CapacityReservation + index int +} + +func (m *mockCapacityReservationsPager) More() bool { + return m.index < len(m.items) +} + +func (m *mockCapacityReservationsPager) NextPage(ctx context.Context) (armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse, error) { + if m.index >= len(m.items) { + return armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse{ + CapacityReservationListResult: armcompute.CapacityReservationListResult{ + Value: []*armcompute.CapacityReservation{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + return armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse{ + CapacityReservationListResult: armcompute.CapacityReservationListResult{ + Value: []*armcompute.CapacityReservation{item}, + }, + }, nil +} + +type errorCapacityReservationsPager struct{} + +func (e *errorCapacityReservationsPager) More() bool { + return true +} + +func (e *errorCapacityReservationsPager) NextPage(ctx context.Context) (armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse, error) { + return armcompute.CapacityReservationsClientListByCapacityReservationGroupResponse{}, errors.New("pager error") +} + +type testCapacityReservationsClient struct { + *mocks.MockCapacityReservationsClient + pager clients.CapacityReservationsPager +} + +func (t *testCapacityReservationsClient) NewListByCapacityReservationGroupPager(resourceGroupName string, capacityReservationGroupName string, options *armcompute.CapacityReservationsClientListByCapacityReservationGroupOptions) clients.CapacityReservationsPager { + if t.pager != nil { + return t.pager + } + return t.MockCapacityReservationsClient.NewListByCapacityReservationGroupPager(resourceGroupName, capacityReservationGroupName, options) +} + +func TestComputeCapacityReservation(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + groupName := "test-crg" + reservationName := "test-reservation" + + t.Run("Get", func(t *testing.T) { + res := createAzureCapacityReservation(reservationName, groupName) + + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, groupName, reservationName, gomock.Eq(capacityReservationGetOptions())).Return( + armcompute.CapacityReservationsClientGetResponse{ + CapacityReservation: *res, + }, nil) + + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(groupName, reservationName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.ComputeCapacityReservation.String() { + t.Errorf("Expected type %s, got %s", azureshared.ComputeCapacityReservation.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUnique := shared.CompositeLookupKey(groupName, reservationName) + if sdpItem.UniqueAttributeValue() != expectedUnique { + t.Errorf("Expected unique attribute value %s, got %s", expectedUnique, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag env=test, got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.ComputeCapacityReservationGroup.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: groupName, ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithVMLinks", func(t *testing.T) { + res := createAzureCapacityReservationWithVMs(reservationName, groupName, subscriptionID, resourceGroup, "vm-1", "vm-2") + + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, groupName, reservationName, gomock.Eq(capacityReservationGetOptions())).Return( + armcompute.CapacityReservationsClientGetResponse{ + CapacityReservation: *res, + }, nil) + + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(groupName, reservationName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.ComputeCapacityReservationGroup.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: groupName, ExpectedScope: scope}, + {ExpectedType: azureshared.ComputeVirtualMachine.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "vm-1", ExpectedScope: scope}, + {ExpectedType: azureshared.ComputeVirtualMachine.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "vm-2", ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, groupName, true) + if qErr == nil { + t.Error("Expected error when Get with wrong number of query parts, but got nil") + } + }) + + t.Run("Get_EmptyGroupName", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", reservationName) + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when capacity reservation group name is empty, but got nil") + } + }) + + t.Run("Get_EmptyReservationName", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(groupName, "") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when capacity reservation name is empty, but got nil") + } + }) + + t.Run("Get_ClientError", func(t *testing.T) { + expectedErr := errors.New("reservation not found") + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, groupName, "nonexistent", gomock.Eq(capacityReservationGetOptions())).Return( + armcompute.CapacityReservationsClientGetResponse{}, expectedErr) + + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(groupName, "nonexistent") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when client returns error, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + res1 := createAzureCapacityReservation("res-1", groupName) + res2 := createAzureCapacityReservation("res-2", groupName) + + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + pager := &mockCapacityReservationsPager{ + items: []*armcompute.CapacityReservation{res1, res2}, + } + testClient := &testCapacityReservationsClient{ + MockCapacityReservationsClient: mockClient, + pager: pager, + } + + wrapper := NewComputeCapacityReservation(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, groupName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Errorf("Expected valid item, got: %v", err) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, scope, groupName, reservationName) + if qErr == nil { + t.Error("Expected error when Search with wrong number of query parts, but got nil") + } + }) + + t.Run("Search_EmptyGroupName", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, scope, "") + if qErr == nil { + t.Error("Expected error when capacity reservation group name is empty, but got nil") + } + }) + + t.Run("Search_PagerError", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + errorPager := &errorCapacityReservationsPager{} + testClient := &testCapacityReservationsClient{ + MockCapacityReservationsClient: mockClient, + pager: errorPager, + } + + wrapper := NewComputeCapacityReservation(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, scope, groupName, true) + if err == nil { + t.Error("Expected error when pager returns error, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + links := wrapper.PotentialLinks() + expected := map[shared.ItemType]bool{ + azureshared.ComputeCapacityReservationGroup: true, + azureshared.ComputeVirtualMachine: true, + } + for itemType, want := range expected { + if got := links[itemType]; got != want { + t.Errorf("PotentialLinks()[%v] = %v, want %v", itemType, got, want) + } + } + }) + + t.Run("ImplementsSearchableAdapter", func(t *testing.T) { + mockClient := mocks.NewMockCapacityReservationsClient(ctrl) + wrapper := NewComputeCapacityReservation(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Error("Adapter should implement SearchableAdapter interface") + } + }) +} diff --git a/sources/azure/manual/compute-dedicated-host-group_test.go b/sources/azure/manual/compute-dedicated-host-group_test.go index 5b24242f..605de8e2 100644 --- a/sources/azure/manual/compute-dedicated-host-group_test.go +++ b/sources/azure/manual/compute-dedicated-host-group_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -225,12 +224,12 @@ func TestComputeDedicatedHostGroup(t *testing.T) { hostGroup1 := createAzureDedicatedHostGroup("test-host-group-1") hostGroupNilName := &armcompute.DedicatedHostGroup{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.DedicatedHostGroupProperties{ - PlatformFaultDomainCount: to.Ptr(int32(2)), + PlatformFaultDomainCount: new(int32(2)), }, } @@ -306,15 +305,15 @@ func TestComputeDedicatedHostGroup(t *testing.T) { // createAzureDedicatedHostGroup creates a mock Azure Dedicated Host Group for testing. func createAzureDedicatedHostGroup(hostGroupName string) *armcompute.DedicatedHostGroup { return &armcompute.DedicatedHostGroup{ - Name: to.Ptr(hostGroupName), - Location: to.Ptr("eastus"), + Name: new(hostGroupName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.DedicatedHostGroupProperties{ - PlatformFaultDomainCount: to.Ptr(int32(2)), - SupportAutomaticPlacement: to.Ptr(false), + PlatformFaultDomainCount: new(int32(2)), + SupportAutomaticPlacement: new(false), AdditionalCapabilities: nil, Hosts: nil, InstanceView: nil, @@ -327,17 +326,17 @@ func createAzureDedicatedHostGroupWithHosts(hostGroupName, subscriptionID, resou hosts := make([]*armcompute.SubResourceReadOnly, 0, len(hostNames)) for _, name := range hostNames { hosts = append(hosts, &armcompute.SubResourceReadOnly{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/hostGroups/" + hostGroupName + "/hosts/" + name), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/hostGroups/" + hostGroupName + "/hosts/" + name), }) } return &armcompute.DedicatedHostGroup{ - Name: to.Ptr(hostGroupName), - Location: to.Ptr("eastus"), + Name: new(hostGroupName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.DedicatedHostGroupProperties{ - PlatformFaultDomainCount: to.Ptr(int32(2)), + PlatformFaultDomainCount: new(int32(2)), Hosts: hosts, }, } diff --git a/sources/azure/manual/compute-dedicated-host.go b/sources/azure/manual/compute-dedicated-host.go new file mode 100644 index 00000000..2f27444a --- /dev/null +++ b/sources/azure/manual/compute-dedicated-host.go @@ -0,0 +1,255 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var ComputeDedicatedHostLookupByName = shared.NewItemTypeLookup("name", azureshared.ComputeDedicatedHost) + +type computeDedicatedHostWrapper struct { + client clients.DedicatedHostsClient + *azureshared.MultiResourceGroupBase +} + +func NewComputeDedicatedHost(client clients.DedicatedHostsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &computeDedicatedHostWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_COMPUTE_APPLICATION, + azureshared.ComputeDedicatedHost, + ), + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/compute/dedicated-hosts/get?view=rest-compute-2025-04-01&tabs=HTTP +func (c *computeDedicatedHostWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 2 { + return nil, azureshared.QueryError(errors.New("queryParts must be exactly 2: dedicated host group name and dedicated host name"), scope, c.Type()) + } + hostGroupName := queryParts[0] + if hostGroupName == "" { + return nil, azureshared.QueryError(errors.New("dedicated host group name cannot be empty"), scope, c.Type()) + } + hostName := queryParts[1] + if hostName == "" { + return nil, azureshared.QueryError(errors.New("dedicated host name cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + resp, err := c.client.Get(ctx, rgScope.ResourceGroup, hostGroupName, hostName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + return c.azureDedicatedHostToSDPItem(&resp.DedicatedHost, hostGroupName, scope) +} + +// ref: https://learn.microsoft.com/en-us/rest/api/compute/dedicated-hosts/list-by-host-group?view=rest-compute-2025-04-01&tabs=HTTP +func (c *computeDedicatedHostWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 1 { + return nil, azureshared.QueryError(errors.New("queryParts must be exactly 1: dedicated host group name"), scope, c.Type()) + } + hostGroupName := queryParts[0] + if hostGroupName == "" { + return nil, azureshared.QueryError(errors.New("dedicated host group name cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + pager := c.client.NewListByHostGroupPager(rgScope.ResourceGroup, hostGroupName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + for _, host := range page.Value { + if host == nil || host.Name == nil { + continue + } + item, sdpErr := c.azureDedicatedHostToSDPItem(host, hostGroupName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (c *computeDedicatedHostWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) != 1 { + stream.SendError(azureshared.QueryError(errors.New("queryParts must be exactly 1: dedicated host group name"), scope, c.Type())) + return + } + hostGroupName := queryParts[0] + if hostGroupName == "" { + stream.SendError(azureshared.QueryError(errors.New("dedicated host group name cannot be empty"), scope, c.Type())) + return + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + + pager := c.client.NewListByHostGroupPager(rgScope.ResourceGroup, hostGroupName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + for _, host := range page.Value { + if host == nil || host.Name == nil { + continue + } + item, sdpErr := c.azureDedicatedHostToSDPItem(host, hostGroupName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (c *computeDedicatedHostWrapper) azureDedicatedHostToSDPItem(host *armcompute.DedicatedHost, hostGroupName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(host, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + if host.Name == nil { + return nil, azureshared.QueryError(errors.New("dedicated host name is nil"), scope, c.Type()) + } + hostName := *host.Name + if hostName == "" { + return nil, azureshared.QueryError(errors.New("dedicated host name cannot be empty"), scope, c.Type()) + } + if err := attributes.Set("uniqueAttr", shared.CompositeLookupKey(hostGroupName, hostName)); err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + linkedItemQueries := make([]*sdp.LinkedItemQuery, 0) + + // Parent: dedicated host group + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeDedicatedHostGroup.String(), + Method: sdp.QueryMethod_GET, + Query: hostGroupName, + Scope: scope, + }, + }) + + // VMs deployed on this dedicated host + if host.Properties != nil && host.Properties.VirtualMachines != nil { + for _, vmRef := range host.Properties.VirtualMachines { + if vmRef == nil || vmRef.ID == nil || *vmRef.ID == "" { + continue + } + vmName := azureshared.ExtractResourceName(*vmRef.ID) + if vmName == "" { + continue + } + vmScope := scope + if linkScope := azureshared.ExtractScopeFromResourceID(*vmRef.ID); linkScope != "" { + vmScope = linkScope + } + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeVirtualMachine.String(), + Method: sdp.QueryMethod_GET, + Query: vmName, + Scope: vmScope, + }, + }) + } + } + + sdpItem := &sdp.Item{ + Type: azureshared.ComputeDedicatedHost.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(host.Tags), + LinkedItemQueries: linkedItemQueries, + } + + // Health status from ProvisioningState + if host.Properties != nil && host.Properties.ProvisioningState != nil { + state := strings.ToLower(*host.Properties.ProvisioningState) + switch state { + case "succeeded": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "creating", "updating", "deleting": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "failed", "canceled": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + } + } + + return sdpItem, nil +} + +func (c *computeDedicatedHostWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + ComputeDedicatedHostGroupLookupByName, + ComputeDedicatedHostLookupByName, + } +} + +func (c *computeDedicatedHostWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + ComputeDedicatedHostGroupLookupByName, + }, + } +} + +func (c *computeDedicatedHostWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.ComputeDedicatedHostGroup: true, + azureshared.ComputeVirtualMachine: true, + } +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/dedicated_host +func (c *computeDedicatedHostWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_dedicated_host.id", + }, + } +} + +func (c *computeDedicatedHostWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Compute/hostGroups/hosts/read", + } +} + +func (c *computeDedicatedHostWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/compute-dedicated-host_test.go b/sources/azure/manual/compute-dedicated-host_test.go new file mode 100644 index 00000000..b3f1204b --- /dev/null +++ b/sources/azure/manual/compute-dedicated-host_test.go @@ -0,0 +1,346 @@ +package manual + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +func createAzureDedicatedHost(hostName, hostGroupName string) *armcompute.DedicatedHost { + return &armcompute.DedicatedHost{ + ID: new("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/hostGroups/" + hostGroupName + "/hosts/" + hostName), + Name: new(hostName), + Type: new("Microsoft.Compute/hostGroups/hosts"), + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + SKU: &armcompute.SKU{ + Name: new("DSv3-Type1"), + }, + Properties: &armcompute.DedicatedHostProperties{ + PlatformFaultDomain: new(int32(0)), + ProvisioningState: new("Succeeded"), + }, + } +} + +func createAzureDedicatedHostWithVMs(hostName, hostGroupName, subscriptionID, resourceGroup string, vmNames ...string) *armcompute.DedicatedHost { + vms := make([]*armcompute.SubResourceReadOnly, 0, len(vmNames)) + for _, vmName := range vmNames { + vms = append(vms, &armcompute.SubResourceReadOnly{ + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/" + vmName), + }) + } + return &armcompute.DedicatedHost{ + ID: new("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/hostGroups/" + hostGroupName + "/hosts/" + hostName), + Name: new(hostName), + Type: new("Microsoft.Compute/hostGroups/hosts"), + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + SKU: &armcompute.SKU{ + Name: new("DSv3-Type1"), + }, + Properties: &armcompute.DedicatedHostProperties{ + PlatformFaultDomain: new(int32(0)), + ProvisioningState: new("Succeeded"), + VirtualMachines: vms, + }, + } +} + +type mockDedicatedHostsPager struct { + items []*armcompute.DedicatedHost + index int +} + +func (m *mockDedicatedHostsPager) More() bool { + return m.index < len(m.items) +} + +func (m *mockDedicatedHostsPager) NextPage(ctx context.Context) (armcompute.DedicatedHostsClientListByHostGroupResponse, error) { + if m.index >= len(m.items) { + return armcompute.DedicatedHostsClientListByHostGroupResponse{ + DedicatedHostListResult: armcompute.DedicatedHostListResult{ + Value: []*armcompute.DedicatedHost{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + return armcompute.DedicatedHostsClientListByHostGroupResponse{ + DedicatedHostListResult: armcompute.DedicatedHostListResult{ + Value: []*armcompute.DedicatedHost{item}, + }, + }, nil +} + +type errorDedicatedHostsPager struct{} + +func (e *errorDedicatedHostsPager) More() bool { + return true +} + +func (e *errorDedicatedHostsPager) NextPage(ctx context.Context) (armcompute.DedicatedHostsClientListByHostGroupResponse, error) { + return armcompute.DedicatedHostsClientListByHostGroupResponse{}, errors.New("pager error") +} + +type testDedicatedHostsClient struct { + *mocks.MockDedicatedHostsClient + pager clients.DedicatedHostsPager +} + +func (t *testDedicatedHostsClient) NewListByHostGroupPager(resourceGroupName string, hostGroupName string, options *armcompute.DedicatedHostsClientListByHostGroupOptions) clients.DedicatedHostsPager { + if t.pager != nil { + return t.pager + } + return t.MockDedicatedHostsClient.NewListByHostGroupPager(resourceGroupName, hostGroupName, options) +} + +func TestComputeDedicatedHost(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + hostGroupName := "test-host-group" + hostName := "test-host" + + t.Run("Get", func(t *testing.T) { + host := createAzureDedicatedHost(hostName, hostGroupName) + + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hostGroupName, hostName, nil).Return( + armcompute.DedicatedHostsClientGetResponse{ + DedicatedHost: *host, + }, nil) + + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hostGroupName, hostName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.ComputeDedicatedHost.String() { + t.Errorf("Expected type %s, got %s", azureshared.ComputeDedicatedHost.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUnique := shared.CompositeLookupKey(hostGroupName, hostName) + if sdpItem.UniqueAttributeValue() != expectedUnique { + t.Errorf("Expected unique attribute value %s, got %s", expectedUnique, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag env=test, got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.ComputeDedicatedHostGroup.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: hostGroupName, ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithVMLinks", func(t *testing.T) { + host := createAzureDedicatedHostWithVMs(hostName, hostGroupName, subscriptionID, resourceGroup, "vm-1", "vm-2") + + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hostGroupName, hostName, nil).Return( + armcompute.DedicatedHostsClientGetResponse{ + DedicatedHost: *host, + }, nil) + + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hostGroupName, hostName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.ComputeDedicatedHostGroup.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: hostGroupName, ExpectedScope: scope}, + {ExpectedType: azureshared.ComputeVirtualMachine.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "vm-1", ExpectedScope: scope}, + {ExpectedType: azureshared.ComputeVirtualMachine.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "vm-2", ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, hostGroupName, true) + if qErr == nil { + t.Error("Expected error when Get with wrong number of query parts, but got nil") + } + }) + + t.Run("Get_EmptyHostGroupName", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", hostName) + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when host group name is empty, but got nil") + } + }) + + t.Run("Get_EmptyHostName", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hostGroupName, "") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when host name is empty, but got nil") + } + }) + + t.Run("Get_ClientError", func(t *testing.T) { + expectedErr := errors.New("host not found") + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hostGroupName, "nonexistent", nil).Return( + armcompute.DedicatedHostsClientGetResponse{}, expectedErr) + + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hostGroupName, "nonexistent") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when client returns error, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + host1 := createAzureDedicatedHost("host-1", hostGroupName) + host2 := createAzureDedicatedHost("host-2", hostGroupName) + + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + pager := &mockDedicatedHostsPager{ + items: []*armcompute.DedicatedHost{host1, host2}, + } + testClient := &testDedicatedHostsClient{ + MockDedicatedHostsClient: mockClient, + pager: pager, + } + + wrapper := NewComputeDedicatedHost(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, hostGroupName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Errorf("Expected valid item, got: %v", err) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, scope, hostGroupName, hostName) + if qErr == nil { + t.Error("Expected error when Search with wrong number of query parts, but got nil") + } + }) + + t.Run("Search_EmptyHostGroupName", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, scope, "") + if qErr == nil { + t.Error("Expected error when host group name is empty, but got nil") + } + }) + + t.Run("Search_PagerError", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + errorPager := &errorDedicatedHostsPager{} + testClient := &testDedicatedHostsClient{ + MockDedicatedHostsClient: mockClient, + pager: errorPager, + } + + wrapper := NewComputeDedicatedHost(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, scope, hostGroupName, true) + if err == nil { + t.Error("Expected error when pager returns error, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + links := wrapper.PotentialLinks() + expected := map[shared.ItemType]bool{ + azureshared.ComputeDedicatedHostGroup: true, + azureshared.ComputeVirtualMachine: true, + } + for itemType, want := range expected { + if got := links[itemType]; got != want { + t.Errorf("PotentialLinks()[%v] = %v, want %v", itemType, got, want) + } + } + }) + + t.Run("ImplementsSearchableAdapter", func(t *testing.T) { + mockClient := mocks.NewMockDedicatedHostsClient(ctrl) + wrapper := NewComputeDedicatedHost(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Error("Adapter should implement SearchableAdapter interface") + } + }) +} diff --git a/sources/azure/manual/compute-disk-access_test.go b/sources/azure/manual/compute-disk-access_test.go index 9b042210..128f0c90 100644 --- a/sources/azure/manual/compute-disk-access_test.go +++ b/sources/azure/manual/compute-disk-access_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -239,9 +238,9 @@ func TestComputeDiskAccess(t *testing.T) { diskAccess1 := createAzureDiskAccess("test-disk-access-1") diskAccessNilName := &armcompute.DiskAccess{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -317,14 +316,14 @@ func TestComputeDiskAccess(t *testing.T) { // createAzureDiskAccess creates a mock Azure Disk Access for testing. func createAzureDiskAccess(diskAccessName string) *armcompute.DiskAccess { return &armcompute.DiskAccess{ - Name: to.Ptr(diskAccessName), - Location: to.Ptr("eastus"), + Name: new(diskAccessName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.DiskAccessProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), }, } } @@ -332,27 +331,27 @@ func createAzureDiskAccess(diskAccessName string) *armcompute.DiskAccess { // createAzureDiskAccessWithPrivateEndpointConnections creates a mock Azure Disk Access with private endpoint connections. func createAzureDiskAccessWithPrivateEndpointConnections(diskAccessName, subscriptionID, resourceGroup string) *armcompute.DiskAccess { return &armcompute.DiskAccess{ - Name: to.Ptr(diskAccessName), - Location: to.Ptr("eastus"), + Name: new(diskAccessName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.DiskAccessProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), PrivateEndpointConnections: []*armcompute.PrivateEndpointConnection{ { - Name: to.Ptr("pe-connection-1"), + Name: new("pe-connection-1"), Properties: &armcompute.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armcompute.PrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), }, }, }, { - Name: to.Ptr("pe-connection-2"), + Name: new("pe-connection-2"), Properties: &armcompute.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armcompute.PrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-other-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-other-rg"), }, }, }, diff --git a/sources/azure/manual/compute-disk-encryption-set_test.go b/sources/azure/manual/compute-disk-encryption-set_test.go index 2052400f..f166ce83 100644 --- a/sources/azure/manual/compute-disk-encryption-set_test.go +++ b/sources/azure/manual/compute-disk-encryption-set_test.go @@ -7,7 +7,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -264,7 +263,7 @@ func TestComputeDiskEncryptionSet(t *testing.T) { desName := "test-des" des := &armcompute.DiskEncryptionSet{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), } mockClient := mocks.NewMockDiskEncryptionSetsClient(ctrl) @@ -311,7 +310,7 @@ func TestComputeDiskEncryptionSet(t *testing.T) { des1 := createAzureDiskEncryptionSet("test-des-1") desNil := &armcompute.DiskEncryptionSet{ Name: nil, // Should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), } mockClient := mocks.NewMockDiskEncryptionSetsClient(ctrl) @@ -430,30 +429,30 @@ func TestComputeDiskEncryptionSet(t *testing.T) { func createAzureDiskEncryptionSet(name string) *armcompute.DiskEncryptionSet { return &armcompute.DiskEncryptionSet{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.EncryptionSetProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), }, } } func createAzureDiskEncryptionSetWithAllLinks(name, subscriptionID, resourceGroup string) *armcompute.DiskEncryptionSet { return &armcompute.DiskEncryptionSet{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.EncryptionSetProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), ActiveKey: &armcompute.KeyForDiskEncryptionSet{ - KeyURL: to.Ptr("https://test-vault.vault.azure.net/keys/test-key/00000000000000000000000000000000"), + KeyURL: new("https://test-vault.vault.azure.net/keys/test-key/00000000000000000000000000000000"), SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), }, }, }, @@ -469,9 +468,9 @@ func createAzureDiskEncryptionSetWithPreviousKeys(name, subscriptionID, resource des := createAzureDiskEncryptionSetWithAllLinks(name, subscriptionID, resourceGroup) des.Properties.PreviousKeys = []*armcompute.KeyForDiskEncryptionSet{ { - KeyURL: to.Ptr("https://test-old-vault.vault.azure.net/keys/test-old-key/00000000000000000000000000000000"), + KeyURL: new("https://test-old-vault.vault.azure.net/keys/test-old-key/00000000000000000000000000000000"), SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-old-vault"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-old-vault"), }, }, } @@ -483,9 +482,9 @@ func createAzureDiskEncryptionSetWithPreviousKeysSameVault(name, subscriptionID, des.Properties.PreviousKeys = []*armcompute.KeyForDiskEncryptionSet{ { // Same vault + key as ActiveKey.KeyURL to ensure links are deduplicated. - KeyURL: to.Ptr("https://test-vault.vault.azure.net/keys/test-key/00000000000000000000000000000000"), + KeyURL: new("https://test-vault.vault.azure.net/keys/test-key/00000000000000000000000000000000"), SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), }, }, } diff --git a/sources/azure/manual/compute-disk_test.go b/sources/azure/manual/compute-disk_test.go index 8dd2c7f9..f503c364 100644 --- a/sources/azure/manual/compute-disk_test.go +++ b/sources/azure/manual/compute-disk_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -365,9 +364,9 @@ func TestComputeDisk(t *testing.T) { disk1 := createAzureDisk("test-disk-1", "Succeeded") diskNilName := &armcompute.Disk{ Name: nil, // nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -489,17 +488,17 @@ func TestComputeDisk(t *testing.T) { // createAzureDisk creates a mock Azure Disk for testing func createAzureDisk(diskName, provisioningState string) *armcompute.Disk { return &armcompute.Disk{ - Name: to.Ptr(diskName), - Location: to.Ptr("eastus"), + Name: new(diskName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.DiskProperties{ - ProvisioningState: to.Ptr(provisioningState), - DiskSizeGB: to.Ptr(int32(128)), + ProvisioningState: new(provisioningState), + DiskSizeGB: new(int32(128)), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + CreateOption: new(armcompute.DiskCreateOptionEmpty), }, }, } @@ -508,59 +507,59 @@ func createAzureDisk(diskName, provisioningState string) *armcompute.Disk { // createAzureDiskWithAllLinks creates a mock Azure Disk with all possible linked resources func createAzureDiskWithAllLinks(diskName, subscriptionID, resourceGroup string) *armcompute.Disk { return &armcompute.Disk{ - Name: to.Ptr(diskName), - Location: to.Ptr("eastus"), + Name: new(diskName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, - ManagedBy: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-vm"), + ManagedBy: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-vm"), ManagedByExtended: []*string{ - to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-vm-2"), + new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-vm-2"), }, Properties: &armcompute.DiskProperties{ - ProvisioningState: to.Ptr("Succeeded"), - DiskSizeGB: to.Ptr(int32(128)), - DiskAccessID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskAccesses/test-disk-access"), + ProvisioningState: new("Succeeded"), + DiskSizeGB: new(int32(128)), + DiskAccessID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskAccesses/test-disk-access"), Encryption: &armcompute.Encryption{ - DiskEncryptionSetID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set"), + DiskEncryptionSetID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set"), }, SecurityProfile: &armcompute.DiskSecurityProfile{ - SecureVMDiskEncryptionSetID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-secure-vm-disk-encryption-set"), + SecureVMDiskEncryptionSetID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-secure-vm-disk-encryption-set"), }, ShareInfo: []*armcompute.ShareInfoElement{ { - VMURI: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-vm-3"), + VMURI: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-vm-3"), }, }, CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), - SourceResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/source-disk"), - StorageAccountID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Storage/storageAccounts/test-storage-account"), + CreateOption: new(armcompute.DiskCreateOptionCopy), + SourceResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/source-disk"), + StorageAccountID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Storage/storageAccounts/test-storage-account"), ImageReference: &armcompute.ImageDiskReference{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/images/test-image"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/images/test-image"), }, GalleryImageReference: &armcompute.ImageDiskReference{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/galleries/test-gallery/images/test-gallery-image/versions/1.0.0"), - SharedGalleryImageID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/galleries/test-gallery-2/images/test-gallery-image-2/versions/2.0.0"), - CommunityGalleryImageID: to.Ptr("/CommunityGalleries/test-community-gallery/Images/test-community-image/Versions/1.0.0"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/galleries/test-gallery/images/test-gallery-image/versions/1.0.0"), + SharedGalleryImageID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/galleries/test-gallery-2/images/test-gallery-image-2/versions/2.0.0"), + CommunityGalleryImageID: new("/CommunityGalleries/test-community-gallery/Images/test-community-image/Versions/1.0.0"), }, - ElasticSanResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ElasticSan/elasticSans/test-elastic-san/volumegroups/test-volume-group/snapshots/test-snapshot"), + ElasticSanResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ElasticSan/elasticSans/test-elastic-san/volumegroups/test-volume-group/snapshots/test-snapshot"), }, EncryptionSettingsCollection: &armcompute.EncryptionSettingsCollection{ - Enabled: to.Ptr(true), + Enabled: new(true), EncryptionSettings: []*armcompute.EncryptionSettingsElement{ { DiskEncryptionKey: &armcompute.KeyVaultAndSecretReference{ SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-keyvault"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-keyvault"), }, - SecretURL: to.Ptr("https://test-keyvault.vault.azure.net/secrets/test-secret/version"), + SecretURL: new("https://test-keyvault.vault.azure.net/secrets/test-secret/version"), }, KeyEncryptionKey: &armcompute.KeyVaultAndKeyReference{ SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-keyvault-2"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-keyvault-2"), }, - KeyURL: to.Ptr("https://test-keyvault-2.vault.azure.net/keys/test-key/version"), + KeyURL: new("https://test-keyvault-2.vault.azure.net/keys/test-key/version"), }, }, }, @@ -572,17 +571,17 @@ func createAzureDiskWithAllLinks(diskName, subscriptionID, resourceGroup string) // createAzureDiskFromSnapshot creates a mock Azure Disk created from a snapshot func createAzureDiskFromSnapshot(diskName, subscriptionID, resourceGroup string) *armcompute.Disk { return &armcompute.Disk{ - Name: to.Ptr(diskName), - Location: to.Ptr("eastus"), + Name: new(diskName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.DiskProperties{ - ProvisioningState: to.Ptr("Succeeded"), - DiskSizeGB: to.Ptr(int32(128)), + ProvisioningState: new("Succeeded"), + DiskSizeGB: new(int32(128)), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), - SourceResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/test-snapshot"), + CreateOption: new(armcompute.DiskCreateOptionCopy), + SourceResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/test-snapshot"), }, }, } @@ -591,17 +590,17 @@ func createAzureDiskFromSnapshot(diskName, subscriptionID, resourceGroup string) // createAzureDiskWithCrossResourceGroupLinks creates a mock Azure Disk with links to resources in different resource groups func createAzureDiskWithCrossResourceGroupLinks(diskName, subscriptionID, resourceGroup string) *armcompute.Disk { return &armcompute.Disk{ - Name: to.Ptr(diskName), - Location: to.Ptr("eastus"), + Name: new(diskName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, - ManagedBy: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/virtualMachines/test-vm-other-rg"), + ManagedBy: new("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/virtualMachines/test-vm-other-rg"), Properties: &armcompute.DiskProperties{ - ProvisioningState: to.Ptr("Succeeded"), - DiskSizeGB: to.Ptr(int32(128)), + ProvisioningState: new("Succeeded"), + DiskSizeGB: new(int32(128)), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + CreateOption: new(armcompute.DiskCreateOptionEmpty), }, }, } diff --git a/sources/azure/manual/compute-gallery-application-version.go b/sources/azure/manual/compute-gallery-application-version.go index e5773d47..dc863f58 100644 --- a/sources/azure/manual/compute-gallery-application-version.go +++ b/sources/azure/manual/compute-gallery-application-version.go @@ -18,7 +18,6 @@ import ( var ( ComputeGalleryApplicationVersionLookupByName = shared.NewItemTypeLookup("name", azureshared.ComputeGalleryApplicationVersion) - ComputeGalleryApplicationLookupByName = shared.NewItemTypeLookup("name", azureshared.ComputeGalleryApplication) //todo: move to its adapter file when created, this is just a placeholder ) type computeGalleryApplicationVersionWrapper struct { @@ -204,7 +203,7 @@ func (c computeGalleryApplicationVersionWrapper) azureGalleryApplicationVersionT if link == "" || (!strings.HasPrefix(link, "http://") && !strings.HasPrefix(link, "https://")) { return } - AppendURILinks(&linkedItemQueries, link, linkedDNSHostnames, seenIPs, true, true) + AppendURILinks(&linkedItemQueries, link, linkedDNSHostnames, seenIPs) if accountName := azureshared.ExtractStorageAccountNameFromBlobURI(link); accountName != "" { if _, seen := seenStorageAccounts[accountName]; !seen { seenStorageAccounts[accountName] = struct{}{} diff --git a/sources/azure/manual/compute-gallery-application-version_test.go b/sources/azure/manual/compute-gallery-application-version_test.go index 25716e24..3b580627 100644 --- a/sources/azure/manual/compute-gallery-application-version_test.go +++ b/sources/azure/manual/compute-gallery-application-version_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -15,6 +14,7 @@ import ( "github.com/overmindtech/cli/sources" "github.com/overmindtech/cli/sources/azure/clients" azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" "github.com/overmindtech/cli/sources/shared" "github.com/overmindtech/cli/sources/stdlib" ) @@ -51,7 +51,7 @@ func (e *errorGalleryApplicationVersionsPager) NextPage(ctx context.Context) (ar // testGalleryApplicationVersionsClient wraps the mock and returns a pager from NewListByGalleryApplicationPager. type testGalleryApplicationVersionsClient struct { - *MockGalleryApplicationVersionsClient + *mocks.MockGalleryApplicationVersionsClient pager clients.GalleryApplicationVersionsPager } @@ -65,15 +65,15 @@ func (t *testGalleryApplicationVersionsClient) NewListByGalleryApplicationPager( func createAzureGalleryApplicationVersion(versionName string) *armcompute.GalleryApplicationVersion { return &armcompute.GalleryApplicationVersion{ - Name: to.Ptr(versionName), - Location: to.Ptr("eastus"), + Name: new(versionName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.GalleryApplicationVersionProperties{ PublishingProfile: &armcompute.GalleryApplicationVersionPublishingProfile{ Source: &armcompute.UserArtifactSource{ - MediaLink: to.Ptr("https://mystorageaccount.blob.core.windows.net/packages/app.zip"), + MediaLink: new("https://mystorageaccount.blob.core.windows.net/packages/app.zip"), }, }, }, @@ -82,14 +82,14 @@ func createAzureGalleryApplicationVersion(versionName string) *armcompute.Galler func createAzureGalleryApplicationVersionWithLinks(versionName, subscriptionID, resourceGroup string) *armcompute.GalleryApplicationVersion { v := createAzureGalleryApplicationVersion(versionName) - v.Properties.PublishingProfile.Source.DefaultConfigurationLink = to.Ptr("https://mystorageaccount.blob.core.windows.net/config/default.json") + v.Properties.PublishingProfile.Source.DefaultConfigurationLink = new("https://mystorageaccount.blob.core.windows.net/config/default.json") desID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-des" v.Properties.PublishingProfile.TargetRegions = []*armcompute.TargetRegion{ { - Name: to.Ptr("eastus"), + Name: new("eastus"), Encryption: &armcompute.EncryptionImages{ OSDiskImage: &armcompute.OSDiskImageEncryption{ - DiskEncryptionSetID: to.Ptr(desID), + DiskEncryptionSetID: new(desID), }, }, }, @@ -112,7 +112,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { t.Run("Get", func(t *testing.T) { version := createAzureGalleryApplicationVersion(galleryApplicationVersionName) - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryApplicationName, galleryApplicationVersionName, nil).Return( armcompute.GalleryApplicationVersionsClientGetResponse{ GalleryApplicationVersion: *version, @@ -160,7 +160,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { t.Run("GetWithLinkedResources", func(t *testing.T) { version := createAzureGalleryApplicationVersionWithLinks(galleryApplicationVersionName, subscriptionID, resourceGroup) - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryApplicationName, galleryApplicationVersionName, nil).Return( armcompute.GalleryApplicationVersionsClientGetResponse{ GalleryApplicationVersion: *version, @@ -192,7 +192,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("Get_InvalidQueryParts", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) wrapper := NewComputeGalleryApplicationVersion(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -204,7 +204,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("Get_EmptyGalleryName", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) wrapper := NewComputeGalleryApplicationVersion(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -217,7 +217,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { t.Run("Get_ClientError", func(t *testing.T) { expectedErr := errors.New("version not found") - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryApplicationName, "nonexistent", nil).Return( armcompute.GalleryApplicationVersionsClientGetResponse{}, expectedErr) @@ -234,9 +234,9 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { t.Run("Get_NonBlobURL_NoStorageLinks", func(t *testing.T) { // MediaLink that is not Azure Blob Storage must not create StorageAccount/StorageBlobContainer links. version := createAzureGalleryApplicationVersion(galleryApplicationVersionName) - version.Properties.PublishingProfile.Source.MediaLink = to.Ptr("https://example.com/artifacts/app.zip") + version.Properties.PublishingProfile.Source.MediaLink = new("https://example.com/artifacts/app.zip") - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryApplicationName, galleryApplicationVersionName, nil).Return( armcompute.GalleryApplicationVersionsClientGetResponse{ GalleryApplicationVersion: *version, @@ -286,9 +286,9 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { t.Run("Get_IPHost_EmitsIPLink", func(t *testing.T) { // When MediaLink or DefaultConfigurationLink has a literal IP host, emit stdlib.NetworkIP link (GET, global), not DNS. version := createAzureGalleryApplicationVersion(galleryApplicationVersionName) - version.Properties.PublishingProfile.Source.MediaLink = to.Ptr("https://192.168.1.10:8443/artifacts/app.zip") + version.Properties.PublishingProfile.Source.MediaLink = new("https://192.168.1.10:8443/artifacts/app.zip") - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryApplicationName, galleryApplicationVersionName, nil).Return( armcompute.GalleryApplicationVersionsClientGetResponse{ GalleryApplicationVersion: *version, @@ -329,7 +329,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { v1 := createAzureGalleryApplicationVersion("1.0.0") v2 := createAzureGalleryApplicationVersion("1.0.1") - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) pages := []armcompute.GalleryApplicationVersionsClientListByGalleryApplicationResponse{ { GalleryApplicationVersionList: armcompute.GalleryApplicationVersionList{ @@ -369,7 +369,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("Search_InvalidQueryParts", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) wrapper := NewComputeGalleryApplicationVersion(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -385,7 +385,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("Search_EmptyGalleryName", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) wrapper := NewComputeGalleryApplicationVersion(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) _, qErr := wrapper.Search(ctx, scope, "", galleryApplicationName) @@ -395,7 +395,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("Search_PagerError", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) errorPager := &errorGalleryApplicationVersionsPager{} testClient := &testGalleryApplicationVersionsClient{ MockGalleryApplicationVersionsClient: mockClient, @@ -418,7 +418,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("PotentialLinks", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) wrapper := NewComputeGalleryApplicationVersion(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) links := wrapper.PotentialLinks() @@ -440,7 +440,7 @@ func TestComputeGalleryApplicationVersion(t *testing.T) { }) t.Run("ImplementsSearchableAdapter", func(t *testing.T) { - mockClient := NewMockGalleryApplicationVersionsClient(ctrl) + mockClient := mocks.NewMockGalleryApplicationVersionsClient(ctrl) wrapper := NewComputeGalleryApplicationVersion(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) diff --git a/sources/azure/manual/compute-gallery-application.go b/sources/azure/manual/compute-gallery-application.go new file mode 100644 index 00000000..5fa2e23d --- /dev/null +++ b/sources/azure/manual/compute-gallery-application.go @@ -0,0 +1,250 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var ( + ComputeGalleryApplicationLookupByName = shared.NewItemTypeLookup("name", azureshared.ComputeGalleryApplication) +) + +type computeGalleryApplicationWrapper struct { + client clients.GalleryApplicationsClient + *azureshared.MultiResourceGroupBase +} + +func NewComputeGalleryApplication(client clients.GalleryApplicationsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &computeGalleryApplicationWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_COMPUTE_APPLICATION, + azureshared.ComputeGalleryApplication, + ), + } +} + +func (c computeGalleryApplicationWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 2 { + return nil, azureshared.QueryError(errors.New("queryParts must be exactly 2 and be the gallery name and gallery application name"), scope, c.Type()) + } + galleryName := queryParts[0] + if galleryName == "" { + return nil, azureshared.QueryError(errors.New("gallery name cannot be empty"), scope, c.Type()) + } + galleryApplicationName := queryParts[1] + if galleryApplicationName == "" { + return nil, azureshared.QueryError(errors.New("gallery application name cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + resp, err := c.client.Get(ctx, rgScope.ResourceGroup, galleryName, galleryApplicationName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + return c.azureGalleryApplicationToSDPItem(&resp.GalleryApplication, galleryName, scope) +} + +func (c computeGalleryApplicationWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 1 { + return nil, azureshared.QueryError(errors.New("queryParts must be exactly 1 and be the gallery name"), scope, c.Type()) + } + galleryName := queryParts[0] + if galleryName == "" { + return nil, azureshared.QueryError(errors.New("gallery name cannot be empty"), scope, c.Type()) + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + pager := c.client.NewListByGalleryPager(rgScope.ResourceGroup, galleryName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + for _, galleryApplication := range page.Value { + if galleryApplication == nil || galleryApplication.Name == nil { + continue + } + item, sdpErr := c.azureGalleryApplicationToSDPItem(galleryApplication, galleryName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (c computeGalleryApplicationWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) != 1 { + stream.SendError(azureshared.QueryError(errors.New("queryParts must be exactly 1 and be the gallery name"), scope, c.Type())) + return + } + galleryName := queryParts[0] + if galleryName == "" { + stream.SendError(azureshared.QueryError(errors.New("gallery name cannot be empty"), scope, c.Type())) + return + } + + rgScope, err := c.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + + pager := c.client.NewListByGalleryPager(rgScope.ResourceGroup, galleryName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, c.Type())) + return + } + for _, galleryApplication := range page.Value { + if galleryApplication == nil || galleryApplication.Name == nil { + continue + } + item, sdpErr := c.azureGalleryApplicationToSDPItem(galleryApplication, galleryName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (c computeGalleryApplicationWrapper) azureGalleryApplicationToSDPItem( + galleryApplication *armcompute.GalleryApplication, + galleryName, + scope string, +) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(galleryApplication, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + if galleryApplication.Name == nil { + return nil, azureshared.QueryError(errors.New("gallery application name is nil"), scope, c.Type()) + } + galleryApplicationName := *galleryApplication.Name + if galleryApplicationName == "" { + return nil, azureshared.QueryError(errors.New("gallery application name cannot be empty"), scope, c.Type()) + } + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(galleryName, galleryApplicationName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, c.Type()) + } + + linkedItemQueries := make([]*sdp.LinkedItemQuery, 0) + + // Parent Gallery: application depends on gallery + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeGallery.String(), + Method: sdp.QueryMethod_GET, + Query: galleryName, + Scope: scope, + }, + }) + + // Child: list gallery application versions under this application (Search by gallery name + application name) + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeGalleryApplicationVersion.String(), + Method: sdp.QueryMethod_SEARCH, + Query: shared.CompositeLookupKey(galleryName, galleryApplicationName), + Scope: scope, + }, + }) + + // URI-based links: Eula, PrivacyStatementURI, ReleaseNoteURI + linkedDNSHostnames := make(map[string]struct{}) + seenIPs := make(map[string]struct{}) + if galleryApplication.Properties != nil { + if galleryApplication.Properties.Eula != nil && *galleryApplication.Properties.Eula != "" { + AppendURILinks(&linkedItemQueries, *galleryApplication.Properties.Eula, linkedDNSHostnames, seenIPs) + } + if galleryApplication.Properties.PrivacyStatementURI != nil && *galleryApplication.Properties.PrivacyStatementURI != "" { + AppendURILinks(&linkedItemQueries, *galleryApplication.Properties.PrivacyStatementURI, linkedDNSHostnames, seenIPs) + } + if galleryApplication.Properties.ReleaseNoteURI != nil && *galleryApplication.Properties.ReleaseNoteURI != "" { + AppendURILinks(&linkedItemQueries, *galleryApplication.Properties.ReleaseNoteURI, linkedDNSHostnames, seenIPs) + } + } + + sdpItem := &sdp.Item{ + Type: azureshared.ComputeGalleryApplication.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(galleryApplication.Tags), + LinkedItemQueries: linkedItemQueries, + } + return sdpItem, nil +} + +func (c computeGalleryApplicationWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + ComputeGalleryLookupByName, + ComputeGalleryApplicationLookupByName, + } +} + +func (c computeGalleryApplicationWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + ComputeGalleryLookupByName, + }, + } +} + +func (c computeGalleryApplicationWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.ComputeGallery, + azureshared.ComputeGalleryApplicationVersion, + stdlib.NetworkDNS, + stdlib.NetworkHTTP, + stdlib.NetworkIP, + ) +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/gallery_application +func (c computeGalleryApplicationWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_gallery_application.id", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions/compute#microsoftcompute +func (c computeGalleryApplicationWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Compute/galleries/applications/read", + } +} + +func (c computeGalleryApplicationWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/compute-gallery-application_test.go b/sources/azure/manual/compute-gallery-application_test.go new file mode 100644 index 00000000..11ca6264 --- /dev/null +++ b/sources/azure/manual/compute-gallery-application_test.go @@ -0,0 +1,306 @@ +package manual + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +// mockGalleryApplicationsPager is a mock pager for ListByGallery. +type mockGalleryApplicationsPager struct { + pages []armcompute.GalleryApplicationsClientListByGalleryResponse + index int +} + +func (m *mockGalleryApplicationsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockGalleryApplicationsPager) NextPage(ctx context.Context) (armcompute.GalleryApplicationsClientListByGalleryResponse, error) { + if m.index >= len(m.pages) { + return armcompute.GalleryApplicationsClientListByGalleryResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +// errorGalleryApplicationsPager is a mock pager that always returns an error. +type errorGalleryApplicationsPager struct{} + +func (e *errorGalleryApplicationsPager) More() bool { + return true +} + +func (e *errorGalleryApplicationsPager) NextPage(ctx context.Context) (armcompute.GalleryApplicationsClientListByGalleryResponse, error) { + return armcompute.GalleryApplicationsClientListByGalleryResponse{}, errors.New("pager error") +} + +// testGalleryApplicationsClient wraps the mock and returns a pager from NewListByGalleryPager. +type testGalleryApplicationsClient struct { + *mocks.MockGalleryApplicationsClient + pager clients.GalleryApplicationsPager +} + +func (t *testGalleryApplicationsClient) NewListByGalleryPager(resourceGroupName, galleryName string, options *armcompute.GalleryApplicationsClientListByGalleryOptions) clients.GalleryApplicationsPager { + if t.pager != nil { + return t.pager + } + return t.MockGalleryApplicationsClient.NewListByGalleryPager(resourceGroupName, galleryName, options) +} + +func createAzureGalleryApplication(applicationName string) *armcompute.GalleryApplication { + return &armcompute.GalleryApplication{ + Name: new(applicationName), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + }, + Properties: &armcompute.GalleryApplicationProperties{ + SupportedOSType: to.Ptr(armcompute.OperatingSystemTypesWindows), + Description: new("Test gallery application"), + }, + } +} + +func TestComputeGalleryApplication(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + galleryName := "test-gallery" + galleryApplicationName := "test-application" + + t.Run("Get", func(t *testing.T) { + app := createAzureGalleryApplication(galleryApplicationName) + + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryApplicationName, nil).Return( + armcompute.GalleryApplicationsClientGetResponse{ + GalleryApplication: *app, + }, nil) + + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(galleryName, galleryApplicationName) + sdpItem, qErr := adapter.Get(ctx, scope, query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.ComputeGalleryApplication.String() { + t.Errorf("Expected type %s, got %s", azureshared.ComputeGalleryApplication.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUnique := shared.CompositeLookupKey(galleryName, galleryApplicationName) + if sdpItem.UniqueAttributeValue() != expectedUnique { + t.Errorf("Expected unique attribute value %s, got %s", expectedUnique, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag env=test, got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + {ExpectedType: azureshared.ComputeGallery.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: galleryName, ExpectedScope: scope}, + {ExpectedType: azureshared.ComputeGalleryApplicationVersion.String(), ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: shared.CompositeLookupKey(galleryName, galleryApplicationName), ExpectedScope: scope}, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, galleryName, true) + if qErr == nil { + t.Error("Expected error when Get with wrong number of query parts, but got nil") + } + }) + + t.Run("Get_EmptyGalleryName", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey("", galleryApplicationName) + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when gallery name is empty, but got nil") + } + }) + + t.Run("Get_EmptyApplicationName", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(galleryName, "") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when gallery application name is empty, but got nil") + } + }) + + t.Run("Get_ClientError", func(t *testing.T) { + expectedErr := errors.New("application not found") + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, "nonexistent", nil).Return( + armcompute.GalleryApplicationsClientGetResponse{}, expectedErr) + + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(galleryName, "nonexistent") + _, qErr := adapter.Get(ctx, scope, query, true) + if qErr == nil { + t.Error("Expected error when client returns error, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + app1 := createAzureGalleryApplication("app-1") + app2 := createAzureGalleryApplication("app-2") + + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + pages := []armcompute.GalleryApplicationsClientListByGalleryResponse{ + { + GalleryApplicationList: armcompute.GalleryApplicationList{ + Value: []*armcompute.GalleryApplication{app1, app2}, + }, + }, + } + mockPager := &mockGalleryApplicationsPager{pages: pages} + testClient := &testGalleryApplicationsClient{ + MockGalleryApplicationsClient: mockClient, + pager: mockPager, + } + + wrapper := NewComputeGalleryApplication(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, scope, galleryName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Errorf("Expected valid item, got: %v", err) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + searchQuery := shared.CompositeLookupKey(galleryName, galleryApplicationName) + _, err := searchable.Search(ctx, scope, searchQuery, true) + if err == nil { + t.Error("Expected error when Search with wrong number of query parts, but got nil") + } + }) + + t.Run("Search_EmptyGalleryName", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, scope, "") + if qErr == nil { + t.Error("Expected error when gallery name is empty, but got nil") + } + }) + + t.Run("Search_PagerError", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + errorPager := &errorGalleryApplicationsPager{} + testClient := &testGalleryApplicationsClient{ + MockGalleryApplicationsClient: mockClient, + pager: errorPager, + } + + wrapper := NewComputeGalleryApplication(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, scope, galleryName, true) + if err == nil { + t.Error("Expected error when pager returns error, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + links := wrapper.PotentialLinks() + expected := map[shared.ItemType]bool{ + azureshared.ComputeGallery: true, + azureshared.ComputeGalleryApplicationVersion: true, + stdlib.NetworkDNS: true, + stdlib.NetworkHTTP: true, + stdlib.NetworkIP: true, + } + for itemType, want := range expected { + if got := links[itemType]; got != want { + t.Errorf("PotentialLinks()[%v] = %v, want %v", itemType, got, want) + } + } + }) + + t.Run("ImplementsSearchableAdapter", func(t *testing.T) { + mockClient := mocks.NewMockGalleryApplicationsClient(ctrl) + wrapper := NewComputeGalleryApplication(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Error("Adapter should implement SearchableAdapter interface") + } + }) +} diff --git a/sources/azure/manual/compute-gallery-image.go b/sources/azure/manual/compute-gallery-image.go index 8fdfd581..2da9df24 100644 --- a/sources/azure/manual/compute-gallery-image.go +++ b/sources/azure/manual/compute-gallery-image.go @@ -173,13 +173,13 @@ func (c computeGalleryImageWrapper) azureGalleryImageToSDPItem( seenIPs := make(map[string]struct{}) if galleryImage.Properties != nil { if galleryImage.Properties.Eula != nil { - AppendURILinks(&linkedItemQueries, *galleryImage.Properties.Eula, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *galleryImage.Properties.Eula, linkedDNSHostnames, seenIPs) } if galleryImage.Properties.PrivacyStatementURI != nil { - AppendURILinks(&linkedItemQueries, *galleryImage.Properties.PrivacyStatementURI, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *galleryImage.Properties.PrivacyStatementURI, linkedDNSHostnames, seenIPs) } if galleryImage.Properties.ReleaseNoteURI != nil { - AppendURILinks(&linkedItemQueries, *galleryImage.Properties.ReleaseNoteURI, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *galleryImage.Properties.ReleaseNoteURI, linkedDNSHostnames, seenIPs) } } diff --git a/sources/azure/manual/compute-gallery-image_test.go b/sources/azure/manual/compute-gallery-image_test.go index 5a2cd299..d2632cae 100644 --- a/sources/azure/manual/compute-gallery-image_test.go +++ b/sources/azure/manual/compute-gallery-image_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -15,6 +14,7 @@ import ( "github.com/overmindtech/cli/sources" "github.com/overmindtech/cli/sources/azure/clients" azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" "github.com/overmindtech/cli/sources/shared" "github.com/overmindtech/cli/sources/stdlib" ) @@ -51,7 +51,7 @@ func (e *errorGalleryImagesPager) NextPage(ctx context.Context) (armcompute.Gall // testGalleryImagesClient wraps the mock and returns a pager from NewListByGalleryPager. type testGalleryImagesClient struct { - *MockGalleryImagesClient + *mocks.MockGalleryImagesClient pager clients.GalleryImagesPager } @@ -65,28 +65,28 @@ func (t *testGalleryImagesClient) NewListByGalleryPager(resourceGroupName, galle func createAzureGalleryImage(imageName string) *armcompute.GalleryImage { return &armcompute.GalleryImage{ - Name: to.Ptr(imageName), - Location: to.Ptr("eastus"), + Name: new(imageName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.GalleryImageProperties{ Identifier: &armcompute.GalleryImageIdentifier{ - Publisher: to.Ptr("test-publisher"), - Offer: to.Ptr("test-offer"), - SKU: to.Ptr("test-sku"), + Publisher: new("test-publisher"), + Offer: new("test-offer"), + SKU: new("test-sku"), }, - OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), - OSState: to.Ptr(armcompute.OperatingSystemStateTypesGeneralized), + OSType: new(armcompute.OperatingSystemTypesLinux), + OSState: new(armcompute.OperatingSystemStateTypesGeneralized), }, } } func createAzureGalleryImageWithURIs(imageName string) *armcompute.GalleryImage { img := createAzureGalleryImage(imageName) - img.Properties.Eula = to.Ptr("https://eula.example.com/terms") - img.Properties.PrivacyStatementURI = to.Ptr("https://example.com/privacy") - img.Properties.ReleaseNoteURI = to.Ptr("https://releases.example.com/notes") + img.Properties.Eula = new("https://eula.example.com/terms") + img.Properties.PrivacyStatementURI = new("https://example.com/privacy") + img.Properties.ReleaseNoteURI = new("https://releases.example.com/notes") return img } @@ -104,7 +104,7 @@ func TestComputeGalleryImage(t *testing.T) { t.Run("Get", func(t *testing.T) { image := createAzureGalleryImage(galleryImageName) - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryImageName, nil).Return( armcompute.GalleryImagesClientGetResponse{ GalleryImage: *image, @@ -147,7 +147,7 @@ func TestComputeGalleryImage(t *testing.T) { t.Run("GetWithURIs", func(t *testing.T) { image := createAzureGalleryImageWithURIs(galleryImageName) - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryImageName, nil).Return( armcompute.GalleryImagesClientGetResponse{ GalleryImage: *image, @@ -178,9 +178,9 @@ func TestComputeGalleryImage(t *testing.T) { t.Run("Get_PlainTextEula_NoLinks", func(t *testing.T) { image := createAzureGalleryImage(galleryImageName) - image.Properties.Eula = to.Ptr("This software is provided as-is. No warranty.") + image.Properties.Eula = new("This software is provided as-is. No warranty.") - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryImageName, nil).Return( armcompute.GalleryImagesClientGetResponse{ GalleryImage: *image, @@ -210,10 +210,10 @@ func TestComputeGalleryImage(t *testing.T) { t.Run("Get_SameHostDeduplication", func(t *testing.T) { image := createAzureGalleryImage(galleryImageName) - image.Properties.PrivacyStatementURI = to.Ptr("https://example.com/privacy") - image.Properties.ReleaseNoteURI = to.Ptr("https://example.com/release-notes") + image.Properties.PrivacyStatementURI = new("https://example.com/privacy") + image.Properties.ReleaseNoteURI = new("https://example.com/release-notes") - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryImageName, nil).Return( armcompute.GalleryImagesClientGetResponse{ GalleryImage: *image, @@ -252,9 +252,9 @@ func TestComputeGalleryImage(t *testing.T) { t.Run("Get_IPHost_EmitsIPLink", func(t *testing.T) { image := createAzureGalleryImage(galleryImageName) - image.Properties.PrivacyStatementURI = to.Ptr("https://192.168.1.10:8443/privacy") + image.Properties.PrivacyStatementURI = new("https://192.168.1.10:8443/privacy") - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, galleryImageName, nil).Return( armcompute.GalleryImagesClientGetResponse{ GalleryImage: *image, @@ -292,7 +292,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("Get_InvalidQueryParts", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -304,7 +304,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("Get_EmptyGalleryName", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -316,7 +316,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("Get_EmptyImageName", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -329,7 +329,7 @@ func TestComputeGalleryImage(t *testing.T) { t.Run("Get_ClientError", func(t *testing.T) { expectedErr := errors.New("image not found") - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, galleryName, "nonexistent", nil).Return( armcompute.GalleryImagesClientGetResponse{}, expectedErr) @@ -347,7 +347,7 @@ func TestComputeGalleryImage(t *testing.T) { img1 := createAzureGalleryImage("image-1") img2 := createAzureGalleryImage("image-2") - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) pages := []armcompute.GalleryImagesClientListByGalleryResponse{ { GalleryImageList: armcompute.GalleryImageList{ @@ -386,7 +386,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("Search_InvalidQueryParts", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -404,7 +404,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("Search_EmptyGalleryName", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) _, qErr := wrapper.Search(ctx, scope, "") @@ -414,7 +414,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("Search_PagerError", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) errorPager := &errorGalleryImagesPager{} testClient := &testGalleryImagesClient{ MockGalleryImagesClient: mockClient, @@ -436,7 +436,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("PotentialLinks", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) links := wrapper.PotentialLinks() @@ -454,7 +454,7 @@ func TestComputeGalleryImage(t *testing.T) { }) t.Run("ImplementsSearchableAdapter", func(t *testing.T) { - mockClient := NewMockGalleryImagesClient(ctrl) + mockClient := mocks.NewMockGalleryImagesClient(ctrl) wrapper := NewComputeGalleryImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) diff --git a/sources/azure/manual/compute-gallery.go b/sources/azure/manual/compute-gallery.go index 48f3fc40..b1ba4563 100644 --- a/sources/azure/manual/compute-gallery.go +++ b/sources/azure/manual/compute-gallery.go @@ -134,16 +134,26 @@ func (c computeGalleryWrapper) azureGalleryToSDPItem(gallery *armcompute.Gallery }, }) + // Child resources: list gallery applications under this gallery (Search by gallery name) + linkedItemQueries = append(linkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ComputeGalleryApplication.String(), + Method: sdp.QueryMethod_SEARCH, + Query: galleryName, + Scope: scope, + }, + }) + // URI-based links from community gallery info: PublisherURI, Eula linkedDNSHostnames := make(map[string]struct{}) seenIPs := make(map[string]struct{}) if gallery.Properties != nil && gallery.Properties.SharingProfile != nil && gallery.Properties.SharingProfile.CommunityGalleryInfo != nil { info := gallery.Properties.SharingProfile.CommunityGalleryInfo if info.PublisherURI != nil { - AppendURILinks(&linkedItemQueries, *info.PublisherURI, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *info.PublisherURI, linkedDNSHostnames, seenIPs) } if info.Eula != nil { - AppendURILinks(&linkedItemQueries, *info.Eula, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *info.Eula, linkedDNSHostnames, seenIPs) } } diff --git a/sources/azure/manual/compute-gallery_test.go b/sources/azure/manual/compute-gallery_test.go index a6c2fde0..0a3ec302 100644 --- a/sources/azure/manual/compute-gallery_test.go +++ b/sources/azure/manual/compute-gallery_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -72,6 +71,12 @@ func TestComputeGallery(t *testing.T) { ExpectedQuery: galleryName, ExpectedScope: scope, }, + { + ExpectedType: azureshared.ComputeGalleryApplication.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: galleryName, + ExpectedScope: scope, + }, } shared.RunStaticTests(t, adapter, sdpItem, queryTests) }) @@ -159,9 +164,9 @@ func TestComputeGallery(t *testing.T) { gallery1 := createAzureGallery("test-gallery-1") galleryNilName := &armcompute.Gallery{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -229,18 +234,18 @@ func TestComputeGallery(t *testing.T) { func createAzureGallery(galleryName string) *armcompute.Gallery { return &armcompute.Gallery{ - Name: to.Ptr(galleryName), - Location: to.Ptr("eastus"), + Name: new(galleryName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.GalleryProperties{ - Description: to.Ptr("Test shared image gallery"), + Description: new("Test shared image gallery"), Identifier: &armcompute.GalleryIdentifier{ - UniqueName: to.Ptr("unique-" + galleryName), + UniqueName: new("unique-" + galleryName), }, - ProvisioningState: to.Ptr(armcompute.GalleryProvisioningStateSucceeded), + ProvisioningState: new(armcompute.GalleryProvisioningStateSucceeded), }, } } diff --git a/sources/azure/manual/compute-image_test.go b/sources/azure/manual/compute-image_test.go index dd3a0609..475248a6 100644 --- a/sources/azure/manual/compute-image_test.go +++ b/sources/azure/manual/compute-image_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -298,9 +297,9 @@ func TestComputeImage(t *testing.T) { image1 := createAzureImage("test-image-1") imageNilName := &armcompute.Image{ Name: nil, // nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -483,7 +482,7 @@ func TestComputeImage(t *testing.T) { wrapper := manual.NewComputeImage(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) // PredefinedRole is available on the wrapper, not the adapter - if roleInterface, ok := interface{}(wrapper).(interface{ PredefinedRole() string }); ok { + if roleInterface, ok := any(wrapper).(interface{ PredefinedRole() string }); ok { role := roleInterface.PredefinedRole() if role != "Reader" { t.Errorf("Expected predefined role 'Reader', got %s", role) @@ -497,14 +496,14 @@ func TestComputeImage(t *testing.T) { // createAzureImage creates a mock Azure Image for testing func createAzureImage(imageName string) *armcompute.Image { return &armcompute.Image{ - Name: to.Ptr(imageName), - Location: to.Ptr("eastus"), + Name: new(imageName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.ImageProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), }, } } @@ -515,46 +514,46 @@ func createAzureImageWithAllLinks(imageName, subscriptionID, resourceGroup strin dataDiskBlobURI := "https://teststorageaccount2.blob.core.windows.net/vhds/datadisk1.vhd" return &armcompute.Image{ - Name: to.Ptr(imageName), - Location: to.Ptr("eastus"), + Name: new(imageName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.ImageProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), StorageProfile: &armcompute.ImageStorageProfile{ OSDisk: &armcompute.ImageOSDisk{ - OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), - OSState: to.Ptr(armcompute.OperatingSystemStateTypesGeneralized), + OSType: new(armcompute.OperatingSystemTypesLinux), + OSState: new(armcompute.OperatingSystemStateTypesGeneralized), ManagedDisk: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/test-os-disk"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/test-os-disk"), }, Snapshot: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/test-os-snapshot"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/test-os-snapshot"), }, - BlobURI: to.Ptr(osDiskBlobURI), + BlobURI: new(osDiskBlobURI), DiskEncryptionSet: &armcompute.DiskEncryptionSetParameters{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-os-disk-encryption-set"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-os-disk-encryption-set"), }, }, DataDisks: []*armcompute.ImageDataDisk{ { - Lun: to.Ptr(int32(0)), + Lun: new(int32(0)), ManagedDisk: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/test-data-disk-1"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/test-data-disk-1"), }, Snapshot: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/test-data-snapshot-1"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/test-data-snapshot-1"), }, - BlobURI: to.Ptr(dataDiskBlobURI), + BlobURI: new(dataDiskBlobURI), DiskEncryptionSet: &armcompute.DiskEncryptionSetParameters{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-data-disk-encryption-set"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-data-disk-encryption-set"), }, }, }, }, SourceVirtualMachine: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-source-vm"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/virtualMachines/test-source-vm"), }, }, } @@ -563,19 +562,19 @@ func createAzureImageWithAllLinks(imageName, subscriptionID, resourceGroup strin // createAzureImageWithCrossResourceGroupLinks creates a mock Azure Image with links to resources in different resource groups func createAzureImageWithCrossResourceGroupLinks(imageName, subscriptionID, resourceGroup string) *armcompute.Image { return &armcompute.Image{ - Name: to.Ptr(imageName), - Location: to.Ptr("eastus"), + Name: new(imageName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.ImageProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), StorageProfile: &armcompute.ImageStorageProfile{ OSDisk: &armcompute.ImageOSDisk{ - OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), - OSState: to.Ptr(armcompute.OperatingSystemStateTypesGeneralized), + OSType: new(armcompute.OperatingSystemTypesLinux), + OSState: new(armcompute.OperatingSystemStateTypesGeneralized), ManagedDisk: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/disks/test-disk-other-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/disks/test-disk-other-rg"), }, }, }, diff --git a/sources/azure/manual/compute-proximity-placement-group_test.go b/sources/azure/manual/compute-proximity-placement-group_test.go index 7e36dd32..464a8d82 100644 --- a/sources/azure/manual/compute-proximity-placement-group_test.go +++ b/sources/azure/manual/compute-proximity-placement-group_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -194,9 +193,9 @@ func TestComputeProximityPlacementGroup(t *testing.T) { ppg1 := createAzureProximityPlacementGroup("test-ppg-1", subscriptionID, resourceGroup) ppgNilName := &armcompute.ProximityPlacementGroup{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -258,45 +257,45 @@ func TestComputeProximityPlacementGroup(t *testing.T) { func createAzureProximityPlacementGroup(ppgName, subscriptionID, resourceGroup string) *armcompute.ProximityPlacementGroup { baseID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute" return &armcompute.ProximityPlacementGroup{ - Name: to.Ptr(ppgName), - Location: to.Ptr("eastus"), + Name: new(ppgName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.ProximityPlacementGroupProperties{ - ProximityPlacementGroupType: to.Ptr(armcompute.ProximityPlacementGroupTypeStandard), + ProximityPlacementGroupType: new(armcompute.ProximityPlacementGroupTypeStandard), VirtualMachines: []*armcompute.SubResourceWithColocationStatus{ - {ID: to.Ptr(baseID + "/virtualMachines/test-vm")}, + {ID: new(baseID + "/virtualMachines/test-vm")}, }, AvailabilitySets: []*armcompute.SubResourceWithColocationStatus{ - {ID: to.Ptr(baseID + "/availabilitySets/test-avset")}, + {ID: new(baseID + "/availabilitySets/test-avset")}, }, VirtualMachineScaleSets: []*armcompute.SubResourceWithColocationStatus{ - {ID: to.Ptr(baseID + "/virtualMachineScaleSets/test-vmss")}, + {ID: new(baseID + "/virtualMachineScaleSets/test-vmss")}, }, }, - Zones: []*string{to.Ptr("1")}, + Zones: []*string{new("1")}, } } func createAzureProximityPlacementGroupWithCrossResourceGroupLinks(ppgName, subscriptionID string) *armcompute.ProximityPlacementGroup { return &armcompute.ProximityPlacementGroup{ - Name: to.Ptr(ppgName), - Location: to.Ptr("eastus"), + Name: new(ppgName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.ProximityPlacementGroupProperties{ - ProximityPlacementGroupType: to.Ptr(armcompute.ProximityPlacementGroupTypeStandard), + ProximityPlacementGroupType: new(armcompute.ProximityPlacementGroupTypeStandard), VirtualMachines: []*armcompute.SubResourceWithColocationStatus{ - {ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/vm-rg/providers/Microsoft.Compute/virtualMachines/test-vm")}, + {ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/vm-rg/providers/Microsoft.Compute/virtualMachines/test-vm")}, }, AvailabilitySets: []*armcompute.SubResourceWithColocationStatus{ - {ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/avset-rg/providers/Microsoft.Compute/availabilitySets/test-avset")}, + {ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/avset-rg/providers/Microsoft.Compute/availabilitySets/test-avset")}, }, VirtualMachineScaleSets: []*armcompute.SubResourceWithColocationStatus{ - {ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/vmss-rg/providers/Microsoft.Compute/virtualMachineScaleSets/test-vmss")}, + {ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/vmss-rg/providers/Microsoft.Compute/virtualMachineScaleSets/test-vmss")}, }, }, } @@ -304,13 +303,13 @@ func createAzureProximityPlacementGroupWithCrossResourceGroupLinks(ppgName, subs func createAzureProximityPlacementGroupWithoutLinks(ppgName string) *armcompute.ProximityPlacementGroup { return &armcompute.ProximityPlacementGroup{ - Name: to.Ptr(ppgName), - Location: to.Ptr("eastus"), + Name: new(ppgName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.ProximityPlacementGroupProperties{ - ProximityPlacementGroupType: to.Ptr(armcompute.ProximityPlacementGroupTypeStandard), + ProximityPlacementGroupType: new(armcompute.ProximityPlacementGroupTypeStandard), }, } } diff --git a/sources/azure/manual/compute-shared-gallery-image.go b/sources/azure/manual/compute-shared-gallery-image.go index 37ba6c70..2d736ff8 100644 --- a/sources/azure/manual/compute-shared-gallery-image.go +++ b/sources/azure/manual/compute-shared-gallery-image.go @@ -174,10 +174,10 @@ func (c computeSharedGalleryImageWrapper) azureSharedGalleryImageToSDPItem( seenIPs := make(map[string]struct{}) if image.Properties != nil { if image.Properties.Eula != nil { - AppendURILinks(&linkedItemQueries, *image.Properties.Eula, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *image.Properties.Eula, linkedDNSHostnames, seenIPs) } if image.Properties.PrivacyStatementURI != nil { - AppendURILinks(&linkedItemQueries, *image.Properties.PrivacyStatementURI, linkedDNSHostnames, seenIPs, true, false) + AppendURILinks(&linkedItemQueries, *image.Properties.PrivacyStatementURI, linkedDNSHostnames, seenIPs) } } diff --git a/sources/azure/manual/compute-shared-gallery-image_test.go b/sources/azure/manual/compute-shared-gallery-image_test.go index 25efb0a7..bff2bdd2 100644 --- a/sources/azure/manual/compute-shared-gallery-image_test.go +++ b/sources/azure/manual/compute-shared-gallery-image_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -102,7 +101,7 @@ func TestComputeSharedGalleryImage(t *testing.T) { t.Run("Get_PlainTextEula_NoLinks", func(t *testing.T) { image := createSharedGalleryImage(imageName) - image.Properties.Eula = to.Ptr("This software is provided as-is. No warranty.") + image.Properties.Eula = new("This software is provided as-is. No warranty.") mockClient := mocks.NewMockSharedGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, location, galleryUniqueName, imageName, nil).Return( @@ -133,8 +132,8 @@ func TestComputeSharedGalleryImage(t *testing.T) { t.Run("Get_SameHostDeduplication", func(t *testing.T) { image := createSharedGalleryImage(imageName) - image.Properties.Eula = to.Ptr("https://example.com/eula") - image.Properties.PrivacyStatementURI = to.Ptr("https://example.com/privacy") + image.Properties.Eula = new("https://example.com/eula") + image.Properties.PrivacyStatementURI = new("https://example.com/privacy") mockClient := mocks.NewMockSharedGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, location, galleryUniqueName, imageName, nil).Return( @@ -174,7 +173,7 @@ func TestComputeSharedGalleryImage(t *testing.T) { t.Run("Get_IPHost_EmitsIPLink", func(t *testing.T) { image := createSharedGalleryImage(imageName) - image.Properties.PrivacyStatementURI = to.Ptr("https://192.168.1.10:8443/privacy") + image.Properties.PrivacyStatementURI = new("https://192.168.1.10:8443/privacy") mockClient := mocks.NewMockSharedGalleryImagesClient(ctrl) mockClient.EXPECT().Get(ctx, location, galleryUniqueName, imageName, nil).Return( @@ -398,27 +397,27 @@ func TestComputeSharedGalleryImage(t *testing.T) { func createSharedGalleryImage(name string) *armcompute.SharedGalleryImage { return &armcompute.SharedGalleryImage{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Identifier: &armcompute.SharedGalleryIdentifier{ - UniqueID: to.Ptr("/SharedGalleries/test-gallery-unique-name"), + UniqueID: new("/SharedGalleries/test-gallery-unique-name"), }, Properties: &armcompute.SharedGalleryImageProperties{ Identifier: &armcompute.GalleryImageIdentifier{ - Publisher: to.Ptr("test-publisher"), - Offer: to.Ptr("test-offer"), - SKU: to.Ptr("test-sku"), + Publisher: new("test-publisher"), + Offer: new("test-offer"), + SKU: new("test-sku"), }, - OSType: to.Ptr(armcompute.OperatingSystemTypesLinux), - OSState: to.Ptr(armcompute.OperatingSystemStateTypesGeneralized), + OSType: new(armcompute.OperatingSystemTypesLinux), + OSState: new(armcompute.OperatingSystemStateTypesGeneralized), }, } } func createSharedGalleryImageWithURIs(name string) *armcompute.SharedGalleryImage { img := createSharedGalleryImage(name) - img.Properties.Eula = to.Ptr("https://eula.example.com/terms") - img.Properties.PrivacyStatementURI = to.Ptr("https://example.com/privacy") + img.Properties.Eula = new("https://eula.example.com/terms") + img.Properties.PrivacyStatementURI = new("https://example.com/privacy") return img } diff --git a/sources/azure/manual/compute-snapshot_test.go b/sources/azure/manual/compute-snapshot_test.go index db9a434a..3208b2f1 100644 --- a/sources/azure/manual/compute-snapshot_test.go +++ b/sources/azure/manual/compute-snapshot_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -75,19 +74,19 @@ func TestComputeSnapshot(t *testing.T) { ExpectedType: azureshared.ComputeDiskAccess.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "test-disk-access", - ExpectedScope: subscriptionID + "." + resourceGroup, }, + ExpectedScope: subscriptionID + "." + resourceGroup}, { // Properties.Encryption.DiskEncryptionSetID ExpectedType: azureshared.ComputeDiskEncryptionSet.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "test-des", - ExpectedScope: subscriptionID + "." + resourceGroup, }, + ExpectedScope: subscriptionID + "." + resourceGroup}, { // Properties.CreationData.SourceResourceID (disk) ExpectedType: azureshared.ComputeDisk.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "source-disk", - ExpectedScope: subscriptionID + "." + resourceGroup, }, + ExpectedScope: subscriptionID + "." + resourceGroup}, } shared.RunStaticTests(t, adapter, sdpItem, queryTests) @@ -119,7 +118,7 @@ func TestComputeSnapshot(t *testing.T) { ExpectedType: azureshared.ComputeSnapshot.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "source-snapshot", - ExpectedScope: subscriptionID + "." + resourceGroup, }, + ExpectedScope: subscriptionID + "." + resourceGroup}, } shared.RunStaticTests(t, adapter, sdpItem, queryTests) @@ -151,25 +150,25 @@ func TestComputeSnapshot(t *testing.T) { ExpectedType: azureshared.StorageAccount.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "teststorageaccount", - ExpectedScope: subscriptionID + "." + resourceGroup, }, + ExpectedScope: subscriptionID + "." + resourceGroup}, { // Properties.CreationData.SourceURI → Blob Container ExpectedType: azureshared.StorageBlobContainer.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: shared.CompositeLookupKey("teststorageaccount", "vhds"), - ExpectedScope: subscriptionID + "." + resourceGroup, }, + ExpectedScope: subscriptionID + "." + resourceGroup}, { // Properties.CreationData.SourceURI → HTTP ExpectedType: stdlib.NetworkHTTP.String(), ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: "https://teststorageaccount.blob.core.windows.net/vhds/my-disk.vhd", - ExpectedScope: "global", }, + ExpectedScope: "global"}, { // Properties.CreationData.SourceURI → DNS ExpectedType: stdlib.NetworkDNS.String(), ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: "teststorageaccount.blob.core.windows.net", - ExpectedScope: "global", }, + ExpectedScope: "global"}, } shared.RunStaticTests(t, adapter, sdpItem, queryTests) @@ -201,13 +200,13 @@ func TestComputeSnapshot(t *testing.T) { ExpectedType: stdlib.NetworkHTTP.String(), ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: "https://10.0.0.1/vhds/my-disk.vhd", - ExpectedScope: "global", }, + ExpectedScope: "global"}, { // Properties.CreationData.SourceURI → IP (host is IP address) ExpectedType: stdlib.NetworkIP.String(), ExpectedMethod: sdp.QueryMethod_GET, ExpectedQuery: "10.0.0.1", - ExpectedScope: "global", }, + ExpectedScope: "global"}, } shared.RunStaticTests(t, adapter, sdpItem, queryTests) @@ -429,9 +428,9 @@ func TestComputeSnapshot(t *testing.T) { snapshot1 := createAzureSnapshot("test-snapshot-1", subscriptionID, resourceGroup) snapshotNilName := &armcompute.Snapshot{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -500,21 +499,21 @@ func TestComputeSnapshot(t *testing.T) { // createAzureSnapshot creates a mock Azure Snapshot with linked resources for testing func createAzureSnapshot(name, subscriptionID, resourceGroup string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), - DiskAccessID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskAccesses/test-disk-access"), + ProvisioningState: new("Succeeded"), + DiskAccessID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskAccesses/test-disk-access"), Encryption: &armcompute.Encryption{ - DiskEncryptionSetID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-des"), + DiskEncryptionSetID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/diskEncryptionSets/test-des"), }, CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), - SourceResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/source-disk"), + CreateOption: new(armcompute.DiskCreateOptionCopy), + SourceResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/disks/source-disk"), }, }, } @@ -523,16 +522,16 @@ func createAzureSnapshot(name, subscriptionID, resourceGroup string) *armcompute // createAzureSnapshotFromSnapshot creates a mock Snapshot that was copied from another snapshot func createAzureSnapshotFromSnapshot(name, subscriptionID, resourceGroup string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), - SourceResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/source-snapshot"), + CreateOption: new(armcompute.DiskCreateOptionCopy), + SourceResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Compute/snapshots/source-snapshot"), }, }, } @@ -541,16 +540,16 @@ func createAzureSnapshotFromSnapshot(name, subscriptionID, resourceGroup string) // createAzureSnapshotFromBlobURI creates a mock Snapshot imported from a blob URI func createAzureSnapshotFromBlobURI(name string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionImport), - SourceURI: to.Ptr("https://teststorageaccount.blob.core.windows.net/vhds/my-disk.vhd"), + CreateOption: new(armcompute.DiskCreateOptionImport), + SourceURI: new("https://teststorageaccount.blob.core.windows.net/vhds/my-disk.vhd"), }, }, } @@ -559,16 +558,16 @@ func createAzureSnapshotFromBlobURI(name string) *armcompute.Snapshot { // createAzureSnapshotFromIPBlobURI creates a mock Snapshot imported from a blob URI with an IP address host func createAzureSnapshotFromIPBlobURI(name string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionImport), - SourceURI: to.Ptr("https://10.0.0.1/vhds/my-disk.vhd"), + CreateOption: new(armcompute.DiskCreateOptionImport), + SourceURI: new("https://10.0.0.1/vhds/my-disk.vhd"), }, }, } @@ -577,31 +576,31 @@ func createAzureSnapshotFromIPBlobURI(name string) *armcompute.Snapshot { // createAzureSnapshotWithEncryptionIPHosts creates a mock Snapshot with encryption settings using IP-based SecretURL and KeyURL func createAzureSnapshotWithEncryptionIPHosts(name, subscriptionID, resourceGroup string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + CreateOption: new(armcompute.DiskCreateOptionEmpty), }, EncryptionSettingsCollection: &armcompute.EncryptionSettingsCollection{ - Enabled: to.Ptr(true), + Enabled: new(true), EncryptionSettings: []*armcompute.EncryptionSettingsElement{ { DiskEncryptionKey: &armcompute.KeyVaultAndSecretReference{ SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), }, - SecretURL: to.Ptr("https://10.0.0.2/secrets/my-secret/version1"), + SecretURL: new("https://10.0.0.2/secrets/my-secret/version1"), }, KeyEncryptionKey: &armcompute.KeyVaultAndKeyReference{ SourceVault: &armcompute.SourceVault{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/vaults/test-vault"), }, - KeyURL: to.Ptr("https://10.0.0.3/keys/my-key/version1"), + KeyURL: new("https://10.0.0.3/keys/my-key/version1"), }, }, }, @@ -613,17 +612,17 @@ func createAzureSnapshotWithEncryptionIPHosts(name, subscriptionID, resourceGrou // createAzureSnapshotWithCrossResourceGroupLinks creates a mock Snapshot with links to resources in different resource groups func createAzureSnapshotWithCrossResourceGroupLinks(name, subscriptionID string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), - DiskAccessID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/diskAccesses/test-disk-access"), + ProvisioningState: new("Succeeded"), + DiskAccessID: new("/subscriptions/" + subscriptionID + "/resourceGroups/other-rg/providers/Microsoft.Compute/diskAccesses/test-disk-access"), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionCopy), - SourceResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/disk-rg/providers/Microsoft.Compute/disks/source-disk"), + CreateOption: new(armcompute.DiskCreateOptionCopy), + SourceResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/disk-rg/providers/Microsoft.Compute/disks/source-disk"), }, }, } @@ -632,15 +631,15 @@ func createAzureSnapshotWithCrossResourceGroupLinks(name, subscriptionID string) // createAzureSnapshotWithoutLinks creates a mock Snapshot without any linked resources func createAzureSnapshotWithoutLinks(name string) *armcompute.Snapshot { return &armcompute.Snapshot{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcompute.SnapshotProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), CreationData: &armcompute.CreationData{ - CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + CreateOption: new(armcompute.DiskCreateOptionEmpty), }, }, } diff --git a/sources/azure/manual/compute-virtual-machine-extension_test.go b/sources/azure/manual/compute-virtual-machine-extension_test.go index e64c4a17..8cbff84e 100644 --- a/sources/azure/manual/compute-virtual-machine-extension_test.go +++ b/sources/azure/manual/compute-virtual-machine-extension_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -134,9 +133,9 @@ func TestComputeVirtualMachineExtension(t *testing.T) { extension := createAzureVirtualMachineExtension(extensionName, vmName) extension.Properties.ProtectedSettingsFromKeyVault = &armcompute.KeyVaultSecretReference{ SourceVault: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/different-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), + ID: new("/subscriptions/test-subscription/resourceGroups/different-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), }, - SecretURL: to.Ptr("https://test-keyvault.vault.azure.net/secrets/test-secret/version"), + SecretURL: new("https://test-keyvault.vault.azure.net/secrets/test-secret/version"), } mockClient := mocks.NewMockVirtualMachineExtensionsClient(ctrl) @@ -673,18 +672,18 @@ func TestComputeVirtualMachineExtension(t *testing.T) { func createAzureVirtualMachineExtension(extensionName, vmName string) *armcompute.VirtualMachineExtension { return &armcompute.VirtualMachineExtension{ - Name: to.Ptr(extensionName), - Location: to.Ptr("eastus"), - Type: to.Ptr("Microsoft.Compute/virtualMachines/extensions"), + Name: new(extensionName), + Location: new("eastus"), + Type: new("Microsoft.Compute/virtualMachines/extensions"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.VirtualMachineExtensionProperties{ - Publisher: to.Ptr("Microsoft.Compute"), - Type: to.Ptr("CustomScriptExtension"), - TypeHandlerVersion: to.Ptr("1.10"), - ProvisioningState: to.Ptr("Succeeded"), + Publisher: new("Microsoft.Compute"), + Type: new("CustomScriptExtension"), + TypeHandlerVersion: new("1.10"), + ProvisioningState: new("Succeeded"), }, } } @@ -693,7 +692,7 @@ func createAzureVirtualMachineExtensionWithKeyVault(extensionName, vmName string extension := createAzureVirtualMachineExtension(extensionName, vmName) extension.Properties.ProtectedSettingsFromKeyVault = &armcompute.KeyVaultSecretReference{ SourceVault: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), }, } return extension @@ -701,8 +700,8 @@ func createAzureVirtualMachineExtensionWithKeyVault(extensionName, vmName string func createAzureVirtualMachineExtensionWithSettingsURL(extensionName, vmName string) *armcompute.VirtualMachineExtension { extension := createAzureVirtualMachineExtension(extensionName, vmName) - extension.Properties.Settings = map[string]interface{}{ - "fileUris": []interface{}{ + extension.Properties.Settings = map[string]any{ + "fileUris": []any{ "https://example.com/scripts/script.sh", }, "commandToExecute": "bash script.sh", @@ -712,7 +711,7 @@ func createAzureVirtualMachineExtensionWithSettingsURL(extensionName, vmName str func createAzureVirtualMachineExtensionWithSettingsIP(extensionName, vmName string) *armcompute.VirtualMachineExtension { extension := createAzureVirtualMachineExtension(extensionName, vmName) - extension.Properties.Settings = map[string]interface{}{ + extension.Properties.Settings = map[string]any{ "serverIP": "10.0.0.1", "port": 8080, } @@ -721,7 +720,7 @@ func createAzureVirtualMachineExtensionWithSettingsIP(extensionName, vmName stri func createAzureVirtualMachineExtensionWithProtectedSettings(extensionName, vmName string) *armcompute.VirtualMachineExtension { extension := createAzureVirtualMachineExtension(extensionName, vmName) - extension.Properties.ProtectedSettings = map[string]interface{}{ + extension.Properties.ProtectedSettings = map[string]any{ "storageAccountName": "mystorageaccount", "storageAccountKey": "secret-key", "endpoint": "https://api.example.com/v1", @@ -733,16 +732,16 @@ func createAzureVirtualMachineExtensionWithAllLinks(extensionName, vmName string extension := createAzureVirtualMachineExtension(extensionName, vmName) extension.Properties.ProtectedSettingsFromKeyVault = &armcompute.KeyVaultSecretReference{ SourceVault: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), }, } - extension.Properties.Settings = map[string]interface{}{ - "fileUris": []interface{}{ + extension.Properties.Settings = map[string]any{ + "fileUris": []any{ "https://example.com/scripts/script.sh", }, "serverIP": "10.0.0.1", } - extension.Properties.ProtectedSettings = map[string]interface{}{ + extension.Properties.ProtectedSettings = map[string]any{ "endpoint": "https://api.example.com/v1", } return extension diff --git a/sources/azure/manual/compute-virtual-machine-run-command_test.go b/sources/azure/manual/compute-virtual-machine-run-command_test.go index fa0ddc92..33b68cee 100644 --- a/sources/azure/manual/compute-virtual-machine-run-command_test.go +++ b/sources/azure/manual/compute-virtual-machine-run-command_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -63,39 +62,39 @@ func (t *testVirtualMachineRunCommandsClient) NewListByVirtualMachinePager(resou func createAzureVirtualMachineRunCommand(runCommandName, vmName string) *armcompute.VirtualMachineRunCommand { return &armcompute.VirtualMachineRunCommand{ - Name: to.Ptr(runCommandName), - Location: to.Ptr("eastus"), + Name: new(runCommandName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.VirtualMachineRunCommandProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), }, } } func createAzureVirtualMachineRunCommandWithBlobURIs(runCommandName, vmName string) *armcompute.VirtualMachineRunCommand { runCommand := createAzureVirtualMachineRunCommand(runCommandName, vmName) - runCommand.Properties.OutputBlobURI = to.Ptr("https://mystorageaccount.blob.core.windows.net/outputcontainer/output.log") - runCommand.Properties.ErrorBlobURI = to.Ptr("https://mystorageaccount.blob.core.windows.net/errorcontainer/error.log") + runCommand.Properties.OutputBlobURI = new("https://mystorageaccount.blob.core.windows.net/outputcontainer/output.log") + runCommand.Properties.ErrorBlobURI = new("https://mystorageaccount.blob.core.windows.net/errorcontainer/error.log") return runCommand } func createAzureVirtualMachineRunCommandWithHTTPScriptURI(runCommandName, vmName string) *armcompute.VirtualMachineRunCommand { runCommand := createAzureVirtualMachineRunCommand(runCommandName, vmName) runCommand.Properties.Source = &armcompute.VirtualMachineRunCommandScriptSource{ - ScriptURI: to.Ptr("https://example.com/scripts/script.sh"), + ScriptURI: new("https://example.com/scripts/script.sh"), } return runCommand } func createAzureVirtualMachineRunCommandWithAllLinks(runCommandName, vmName string) *armcompute.VirtualMachineRunCommand { runCommand := createAzureVirtualMachineRunCommand(runCommandName, vmName) - runCommand.Properties.OutputBlobURI = to.Ptr("https://mystorageaccount.blob.core.windows.net/outputcontainer/output.log") - runCommand.Properties.ErrorBlobURI = to.Ptr("https://mystorageaccount.blob.core.windows.net/errorcontainer/error.log") + runCommand.Properties.OutputBlobURI = new("https://mystorageaccount.blob.core.windows.net/outputcontainer/output.log") + runCommand.Properties.ErrorBlobURI = new("https://mystorageaccount.blob.core.windows.net/errorcontainer/error.log") runCommand.Properties.Source = &armcompute.VirtualMachineRunCommandScriptSource{ - ScriptURI: to.Ptr("https://mystorageaccount.blob.core.windows.net/scripts/script.sh"), + ScriptURI: new("https://mystorageaccount.blob.core.windows.net/scripts/script.sh"), } return runCommand } @@ -502,9 +501,9 @@ func TestComputeVirtualMachineRunCommand(t *testing.T) { t.Run("SkipItemsWithoutName", func(t *testing.T) { runCommandWithName := createAzureVirtualMachineRunCommand("run-command-1", vmName) runCommandWithoutName := &armcompute.VirtualMachineRunCommand{ - Location: to.Ptr("eastus"), + Location: new("eastus"), Properties: &armcompute.VirtualMachineRunCommandProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), }, } diff --git a/sources/azure/manual/compute-virtual-machine-scale-set_test.go b/sources/azure/manual/compute-virtual-machine-scale-set_test.go index 62729dfa..22487b1d 100644 --- a/sources/azure/manual/compute-virtual-machine-scale-set_test.go +++ b/sources/azure/manual/compute-virtual-machine-scale-set_test.go @@ -7,7 +7,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -504,7 +503,7 @@ func (m *MockVirtualMachineScaleSetsPager) More() bool { func (mr *MockVirtualMachineScaleSetsPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockVirtualMachineScaleSetsPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockVirtualMachineScaleSetsPager) NextPage(ctx context.Context) (armcompute.VirtualMachineScaleSetsClientListResponse, error) { @@ -515,34 +514,34 @@ func (m *MockVirtualMachineScaleSetsPager) NextPage(ctx context.Context) (armcom return ret0, ret1 } -func (mr *MockVirtualMachineScaleSetsPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockVirtualMachineScaleSetsPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockVirtualMachineScaleSetsPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armcompute.VirtualMachineScaleSetsClientListResponse, error)](), ctx) } // createAzureVirtualMachineScaleSet creates a mock Azure Virtual Machine Scale Set for testing func createAzureVirtualMachineScaleSet(scaleSetName, provisioningState string) *armcompute.VirtualMachineScaleSet { return &armcompute.VirtualMachineScaleSet{ - Name: to.Ptr(scaleSetName), - Location: to.Ptr("eastus"), + Name: new(scaleSetName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.VirtualMachineScaleSetProperties{ - ProvisioningState: to.Ptr(provisioningState), + ProvisioningState: new(provisioningState), VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ ExtensionProfile: &armcompute.VirtualMachineScaleSetExtensionProfile{ Extensions: []*armcompute.VirtualMachineScaleSetExtension{ { - Name: to.Ptr("CustomScriptExtension"), + Name: new("CustomScriptExtension"), Properties: &armcompute.VirtualMachineScaleSetExtensionProperties{ - Type: to.Ptr("CustomScriptExtension"), - Publisher: to.Ptr("Microsoft.Compute"), - TypeHandlerVersion: to.Ptr("1.10"), + Type: new("CustomScriptExtension"), + Publisher: new("Microsoft.Compute"), + TypeHandlerVersion: new("1.10"), ProtectedSettingsFromKeyVault: &armcompute.KeyVaultSecretReference{ SourceVault: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault-ext"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault-ext"), }, }, }, @@ -551,48 +550,48 @@ func createAzureVirtualMachineScaleSet(scaleSetName, provisioningState string) * }, NetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{ HealthProbe: &armcompute.APIEntityReference{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/test-lb/probes/test-probe"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/test-lb/probes/test-probe"), }, NetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{ { - Name: to.Ptr("nic-config"), + Name: new("nic-config"), Properties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{ NetworkSecurityGroup: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkSecurityGroups/test-nsg"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkSecurityGroups/test-nsg"), }, IPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{ { - Name: to.Ptr("ip-config"), + Name: new("ip-config"), Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{ Subnet: &armcompute.APIEntityReference{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/default"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/default"), }, PublicIPAddressConfiguration: &armcompute.VirtualMachineScaleSetPublicIPAddressConfiguration{ - Name: to.Ptr("public-ip-config"), + Name: new("public-ip-config"), Properties: &armcompute.VirtualMachineScaleSetPublicIPAddressConfigurationProperties{ PublicIPPrefix: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/publicIPPrefixes/test-pip-prefix"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/publicIPPrefixes/test-pip-prefix"), }, }, }, LoadBalancerBackendAddressPools: []*armcompute.SubResource{ { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/test-lb/backendAddressPools/test-backend-pool"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/test-lb/backendAddressPools/test-backend-pool"), }, }, LoadBalancerInboundNatPools: []*armcompute.SubResource{ { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/test-lb/inboundNatPools/test-nat-pool"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/test-lb/inboundNatPools/test-nat-pool"), }, }, ApplicationGatewayBackendAddressPools: []*armcompute.SubResource{ { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/applicationGateways/test-ag/backendAddressPools/test-ag-pool"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/applicationGateways/test-ag/backendAddressPools/test-ag-pool"), }, }, ApplicationSecurityGroups: []*armcompute.SubResource{ { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/applicationSecurityGroups/test-asg"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/applicationSecurityGroups/test-asg"), }, }, }, @@ -604,22 +603,22 @@ func createAzureVirtualMachineScaleSet(scaleSetName, provisioningState string) * }, StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ ImageReference: &armcompute.ImageReference{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/images/test-image"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/images/test-image"), }, OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ - Name: to.Ptr("os-disk"), + Name: new("os-disk"), ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ DiskEncryptionSet: &armcompute.DiskEncryptionSetParameters{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set"), }, }, }, DataDisks: []*armcompute.VirtualMachineScaleSetDataDisk{ { - Name: to.Ptr("data-disk-1"), + Name: new("data-disk-1"), ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ DiskEncryptionSet: &armcompute.DiskEncryptionSetParameters{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set-data"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/diskEncryptionSets/test-disk-encryption-set-data"), }, }, }, @@ -629,26 +628,26 @@ func createAzureVirtualMachineScaleSet(scaleSetName, provisioningState string) * Secrets: []*armcompute.VaultSecretGroup{ { SourceVault: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/vaults/test-keyvault"), }, }, }, }, DiagnosticsProfile: &armcompute.DiagnosticsProfile{ BootDiagnostics: &armcompute.BootDiagnostics{ - StorageURI: to.Ptr("https://teststorageaccount.blob.core.windows.net/"), + StorageURI: new("https://teststorageaccount.blob.core.windows.net/"), }, }, }, ProximityPlacementGroup: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-ppg"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/proximityPlacementGroups/test-ppg"), }, HostGroup: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/hostGroups/test-host-group"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/hostGroups/test-host-group"), }, }, Identity: &armcompute.VirtualMachineScaleSetIdentity{ - Type: to.Ptr(armcompute.ResourceIdentityTypeUserAssigned), + Type: new(armcompute.ResourceIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armcompute.UserAssignedIdentitiesValue{ "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity": {}, }, diff --git a/sources/azure/manual/compute-virtual-machine_test.go b/sources/azure/manual/compute-virtual-machine_test.go index a746365f..f565919d 100644 --- a/sources/azure/manual/compute-virtual-machine_test.go +++ b/sources/azure/manual/compute-virtual-machine_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" "go.uber.org/mock/gomock" @@ -314,26 +313,26 @@ func TestComputeVirtualMachine(t *testing.T) { // createAzureVirtualMachine creates a mock Azure VM for testing func createAzureVirtualMachine(vmName, provisioningState string) *armcompute.VirtualMachine { return &armcompute.VirtualMachine{ - Name: to.Ptr(vmName), - Location: to.Ptr("eastus"), + Name: new(vmName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcompute.VirtualMachineProperties{ - ProvisioningState: to.Ptr(provisioningState), + ProvisioningState: new(provisioningState), StorageProfile: &armcompute.StorageProfile{ OSDisk: &armcompute.OSDisk{ - Name: to.Ptr("os-disk"), + Name: new("os-disk"), ManagedDisk: &armcompute.ManagedDiskParameters{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/disks/os-disk"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/disks/os-disk"), }, }, DataDisks: []*armcompute.DataDisk{ { - Name: to.Ptr("data-disk-1"), + Name: new("data-disk-1"), ManagedDisk: &armcompute.ManagedDiskParameters{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/disks/data-disk-1"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/disks/data-disk-1"), }, }, }, @@ -341,19 +340,19 @@ func createAzureVirtualMachine(vmName, provisioningState string) *armcompute.Vir NetworkProfile: &armcompute.NetworkProfile{ NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkInterfaces/test-nic"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkInterfaces/test-nic"), }, }, }, AvailabilitySet: &armcompute.SubResource{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/availabilitySets/test-avset"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Compute/availabilitySets/test-avset"), }, }, // Add VM extensions to Resources Resources: []*armcompute.VirtualMachineExtension{ { - Name: to.Ptr("CustomScriptExtension"), - Type: to.Ptr("Microsoft.Compute/virtualMachines/extensions"), + Name: new("CustomScriptExtension"), + Type: new("Microsoft.Compute/virtualMachines/extensions"), }, }, } diff --git a/sources/azure/manual/dbforpostgresql-database_test.go b/sources/azure/manual/dbforpostgresql-database_test.go index da9a097f..eba76125 100644 --- a/sources/azure/manual/dbforpostgresql-database_test.go +++ b/sources/azure/manual/dbforpostgresql-database_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" "go.uber.org/mock/gomock" @@ -193,8 +192,8 @@ func TestDBforPostgreSQLDatabase(t *testing.T) { database2 := &armpostgresqlflexibleservers.Database{ Name: nil, // Database with nil name should be skipped Properties: &armpostgresqlflexibleservers.DatabaseProperties{ - Charset: to.Ptr("UTF8"), - Collation: to.Ptr("en_US.utf8"), + Charset: new("UTF8"), + Collation: new("en_US.utf8"), }, } @@ -299,11 +298,11 @@ func createAzurePostgreSQLDatabase(serverName, databaseName string) *armpostgres databaseID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/" + serverName + "/databases/" + databaseName return &armpostgresqlflexibleservers.Database{ - Name: to.Ptr(databaseName), - ID: to.Ptr(databaseID), + Name: new(databaseName), + ID: new(databaseID), Properties: &armpostgresqlflexibleservers.DatabaseProperties{ - Charset: to.Ptr("UTF8"), - Collation: to.Ptr("en_US.utf8"), + Charset: new("UTF8"), + Collation: new("en_US.utf8"), }, } } diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-firewall-rule.go b/sources/azure/manual/dbforpostgresql-flexible-server-firewall-rule.go new file mode 100644 index 00000000..eb583710 --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-firewall-rule.go @@ -0,0 +1,250 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var DBforPostgreSQLFlexibleServerFirewallRuleLookupByName = shared.NewItemTypeLookup("name", azureshared.DBforPostgreSQLFlexibleServerFirewallRule) + +type dbforPostgreSQLFlexibleServerFirewallRuleWrapper struct { + client clients.PostgreSQLFlexibleServerFirewallRuleClient + + *azureshared.MultiResourceGroupBase +} + +func NewDBforPostgreSQLFlexibleServerFirewallRule(client clients.PostgreSQLFlexibleServerFirewallRuleClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &dbforPostgreSQLFlexibleServerFirewallRuleWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.DBforPostgreSQLFlexibleServerFirewallRule, + ), + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and firewallRuleName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + firewallRuleName := queryParts[1] + if firewallRuleName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "firewallRuleName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, firewallRuleName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + return s.azureDBforPostgreSQLFlexibleServerFirewallRuleToSDPItem(&resp.FirewallRule, serverName, firewallRuleName, scope) +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) azureDBforPostgreSQLFlexibleServerFirewallRuleToSDPItem(rule *armpostgresqlflexibleservers.FirewallRule, serverName, firewallRuleName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(rule, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, firewallRuleName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.DBforPostgreSQLFlexibleServerFirewallRule.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: nil, + } + + // Link to parent PostgreSQL Flexible Server + if rule.ID != nil { + params := azureshared.ExtractPathParamsFromResourceID(*rule.ID, []string{"flexibleServers"}) + if len(params) > 0 { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DBforPostgreSQLFlexibleServer.String(), + Method: sdp.QueryMethod_GET, + Query: params[0], + Scope: scope, + }, + }) + } + } else { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DBforPostgreSQLFlexibleServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + } + + // Link to stdlib IP items for StartIPAddress and EndIPAddress + if rule.Properties != nil { + if rule.Properties.StartIPAddress != nil && *rule.Properties.StartIPAddress != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *rule.Properties.StartIPAddress, + Scope: "global", + }, + }) + } + if rule.Properties.EndIPAddress != nil && *rule.Properties.EndIPAddress != "" && (rule.Properties.StartIPAddress == nil || *rule.Properties.EndIPAddress != *rule.Properties.StartIPAddress) { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *rule.Properties.EndIPAddress, + Scope: "global", + }, + }) + } + } + + return sdpItem, nil +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + DBforPostgreSQLFlexibleServerLookupByName, + DBforPostgreSQLFlexibleServerFirewallRuleLookupByName, + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + for _, rule := range page.Value { + if rule.Name == nil { + continue + } + item, sdpErr := s.azureDBforPostgreSQLFlexibleServerFirewallRuleToSDPItem(rule, serverName, *rule.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, rule := range page.Value { + if rule.Name == nil { + continue + } + item, sdpErr := s.azureDBforPostgreSQLFlexibleServerFirewallRuleToSDPItem(rule, serverName, *rule.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + DBforPostgreSQLFlexibleServerLookupByName, + }, + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.DBforPostgreSQLFlexibleServer: true, + stdlib.NetworkIP: true, + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_postgresql_flexible_server_firewall_rule.id", + }, + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.DBforPostgreSQL/flexibleServers/firewallRules/read", + } +} + +func (s dbforPostgreSQLFlexibleServerFirewallRuleWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-firewall-rule_test.go b/sources/azure/manual/dbforpostgresql-flexible-server-firewall-rule_test.go new file mode 100644 index 00000000..58891de8 --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-firewall-rule_test.go @@ -0,0 +1,324 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +type mockPostgreSQLFlexibleServerFirewallRulePager struct { + pages []armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse + index int +} + +func (m *mockPostgreSQLFlexibleServerFirewallRulePager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockPostgreSQLFlexibleServerFirewallRulePager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorPostgreSQLFlexibleServerFirewallRulePager struct{} + +func (e *errorPostgreSQLFlexibleServerFirewallRulePager) More() bool { + return true +} + +func (e *errorPostgreSQLFlexibleServerFirewallRulePager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse, error) { + return armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse{}, errors.New("pager error") +} + +type testPostgreSQLFlexibleServerFirewallRuleClient struct { + *mocks.MockPostgreSQLFlexibleServerFirewallRuleClient + pager clients.PostgreSQLFlexibleServerFirewallRulePager +} + +func (t *testPostgreSQLFlexibleServerFirewallRuleClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.PostgreSQLFlexibleServerFirewallRulePager { + return t.pager +} + +func TestDBforPostgreSQLFlexibleServerFirewallRule(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + firewallRuleName := "test-rule" + + t.Run("Get", func(t *testing.T) { + rule := createAzurePostgreSQLFlexibleServerFirewallRule(serverName, firewallRuleName) + + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, firewallRuleName).Return( + armpostgresqlflexibleservers.FirewallRulesClientGetResponse{ + FirewallRule: *rule, + }, nil) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(&testPostgreSQLFlexibleServerFirewallRuleClient{MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, firewallRuleName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerFirewallRule.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerFirewallRule, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(serverName, firewallRuleName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.DBforPostgreSQLFlexibleServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "0.0.0.0", + ExpectedScope: "global", + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "255.255.255.255", + ExpectedScope: "global", + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(&testPostgreSQLFlexibleServerFirewallRuleClient{MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing only serverName (1 query part), but got nil") + } + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(&testPostgreSQLFlexibleServerFirewallRuleClient{MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when firewall rule name is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + rule1 := createAzurePostgreSQLFlexibleServerFirewallRule(serverName, "rule1") + rule2 := createAzurePostgreSQLFlexibleServerFirewallRule(serverName, "rule2") + + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + pager := &mockPostgreSQLFlexibleServerFirewallRulePager{ + pages: []armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse{ + { + FirewallRuleList: armpostgresqlflexibleservers.FirewallRuleList{ + Value: []*armpostgresqlflexibleservers.FirewallRule{rule1, rule2}, + }, + }, + }, + } + + testClient := &testPostgreSQLFlexibleServerFirewallRuleClient{ + MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient, + pager: pager, + } + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + items, qErr := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if qErr != nil { + t.Fatalf("Expected no error from Search, got: %v", qErr) + } + if len(items) != 2 { + t.Errorf("Expected 2 items from Search, got %d", len(items)) + } + }) + + t.Run("SearchStream", func(t *testing.T) { + rule1 := createAzurePostgreSQLFlexibleServerFirewallRule(serverName, "rule1") + + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + pager := &mockPostgreSQLFlexibleServerFirewallRulePager{ + pages: []armpostgresqlflexibleservers.FirewallRulesClientListByServerResponse{ + { + FirewallRuleList: armpostgresqlflexibleservers.FirewallRuleList{ + Value: []*armpostgresqlflexibleservers.FirewallRule{rule1}, + }, + }, + }, + } + + testClient := &testPostgreSQLFlexibleServerFirewallRuleClient{ + MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient, + pager: pager, + } + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchStreamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support SearchStream operation") + } + + stream := discovery.NewRecordingQueryResultStream() + searchStreamable.SearchStream(ctx, wrapper.Scopes()[0], serverName, true, stream) + items := stream.GetItems() + errs := stream.GetErrors() + if len(errs) > 0 { + t.Fatalf("Expected no errors from SearchStream, got: %v", errs) + } + if len(items) != 1 { + t.Errorf("Expected 1 item from SearchStream, got %d", len(items)) + } + }) + + t.Run("SearchWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(&testPostgreSQLFlexibleServerFirewallRuleClient{MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("firewall rule not found") + + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, "nonexistent-rule").Return( + armpostgresqlflexibleservers.FirewallRulesClientGetResponse{}, expectedErr) + + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(&testPostgreSQLFlexibleServerFirewallRuleClient{MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent-rule") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent firewall rule, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + errorPager := &errorPostgreSQLFlexibleServerFirewallRulePager{} + testClient := &testPostgreSQLFlexibleServerFirewallRuleClient{ + MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName) + if qErr == nil { + t.Error("Expected error from Search when pager returns error, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl) + wrapper := manual.NewDBforPostgreSQLFlexibleServerFirewallRule(&testPostgreSQLFlexibleServerFirewallRuleClient{MockPostgreSQLFlexibleServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.DBforPostgreSQL/flexibleServers/firewallRules/read" + found := slices.Contains(permissions, expectedPermission) + if !found { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + potentialLinks := w.PotentialLinks() + if !potentialLinks[azureshared.DBforPostgreSQLFlexibleServer] { + t.Error("Expected PotentialLinks to include DBforPostgreSQLFlexibleServer") + } + if !potentialLinks[stdlib.NetworkIP] { + t.Error("Expected PotentialLinks to include stdlib.NetworkIP") + } + + mappings := w.TerraformMappings() + if len(mappings) == 0 { + t.Error("Expected TerraformMappings to return at least one mapping") + } + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_postgresql_flexible_server_firewall_rule.id" { + foundMapping = true + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_postgresql_flexible_server_firewall_rule.id' mapping") + } + }) +} + +func createAzurePostgreSQLFlexibleServerFirewallRule(serverName, firewallRuleName string) *armpostgresqlflexibleservers.FirewallRule { + ruleID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/" + serverName + "/firewallRules/" + firewallRuleName + return &armpostgresqlflexibleservers.FirewallRule{ + Name: new(firewallRuleName), + ID: new(ruleID), + Properties: &armpostgresqlflexibleservers.FirewallRuleProperties{ + StartIPAddress: new("0.0.0.0"), + EndIPAddress: new("255.255.255.255"), + }, + } +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-private-endpoint-connection.go b/sources/azure/manual/dbforpostgresql-flexible-server-private-endpoint-connection.go new file mode 100644 index 00000000..00bdb154 --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-private-endpoint-connection.go @@ -0,0 +1,236 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var DBforPostgreSQLFlexibleServerPrivateEndpointConnectionLookupByName = shared.NewItemTypeLookup("name", azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection) + +type dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper struct { + client clients.DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient + + *azureshared.MultiResourceGroupBase +} + +// NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection returns a SearchableWrapper for Azure DB for PostgreSQL flexible server private endpoint connections. +func NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(client clients.DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection, + ), + } +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and privateEndpointConnectionName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + connectionName := queryParts[1] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, connectionName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(&resp.PrivateEndpointConnection, serverName, connectionName, scope) + if sdpErr != nil { + return nil, sdpErr + } + return item, nil +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + DBforPostgreSQLFlexibleServerLookupByName, + DBforPostgreSQLFlexibleServerPrivateEndpointConnectionLookupByName, + } +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, serverName, *conn.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, serverName, *conn.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + DBforPostgreSQLFlexibleServerLookupByName, + }, + } +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.DBforPostgreSQLFlexibleServer: true, + azureshared.NetworkPrivateEndpoint: true, + } +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) azurePrivateEndpointConnectionToSDPItem(conn *armpostgresqlflexibleservers.PrivateEndpointConnection, serverName, connectionName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(conn) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, connectionName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health from provisioning state + if conn.Properties != nil && conn.Properties.ProvisioningState != nil { + state := strings.ToLower(string(*conn.Properties.ProvisioningState)) + switch state { + case "succeeded": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "creating", "updating", "deleting": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "failed": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent DB for PostgreSQL Flexible Server + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DBforPostgreSQLFlexibleServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + + // Link to Network Private Endpoint when present (may be in different resource group) + if conn.Properties != nil && conn.Properties.PrivateEndpoint != nil && conn.Properties.PrivateEndpoint.ID != nil { + peID := *conn.Properties.PrivateEndpoint.ID + peName := azureshared.ExtractResourceName(peID) + if peName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(peID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: peName, + Scope: linkedScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.DBforPostgreSQL/flexibleServers/privateEndpointConnections/read", + } +} + +func (s dbforpostgresqlFlexibleServerPrivateEndpointConnectionWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server-private-endpoint-connection_test.go b/sources/azure/manual/dbforpostgresql-flexible-server-private-endpoint-connection_test.go new file mode 100644 index 00000000..22334460 --- /dev/null +++ b/sources/azure/manual/dbforpostgresql-flexible-server-private-endpoint-connection_test.go @@ -0,0 +1,278 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager struct { + pages []armpostgresqlflexibleservers.PrivateEndpointConnectionsClientListByServerResponse + index int +} + +func (m *mockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager) NextPage(ctx context.Context) (armpostgresqlflexibleservers.PrivateEndpointConnectionsClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armpostgresqlflexibleservers.PrivateEndpointConnectionsClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient struct { + *mocks.MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient + pager clients.DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager +} + +func (t *testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager { + return t.pager +} + +func TestDBforPostgreSQLFlexibleServerPrivateEndpointConnection(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-pg-server" + connectionName := "test-pec" + + t.Run("Get", func(t *testing.T) { + conn := createAzureDBforPostgreSQLFlexibleServerPrivateEndpointConnection(connectionName, "") + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, connectionName).Return( + armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(serverName, connectionName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(serverName, connectionName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) < 1 { + t.Fatalf("Expected at least 1 linked query, got: %d", len(linkedQueries)) + } + + foundFlexibleServer := false + for _, lq := range linkedQueries { + if lq.GetQuery().GetType() == azureshared.DBforPostgreSQLFlexibleServer.String() { + foundFlexibleServer = true + if lq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected DBforPostgreSQLFlexibleServer link method GET, got %v", lq.GetQuery().GetMethod()) + } + if lq.GetQuery().GetQuery() != serverName { + t.Errorf("Expected DBforPostgreSQLFlexibleServer query %s, got %s", serverName, lq.GetQuery().GetQuery()) + } + } + } + if !foundFlexibleServer { + t.Error("Expected linked query to DBforPostgreSQLFlexibleServer") + } + }) + }) + + t.Run("Get_WithPrivateEndpointLink", func(t *testing.T) { + peID := "/subscriptions/test-subscription/resourceGroups/other-rg/providers/Microsoft.Network/privateEndpoints/test-pe" + conn := createAzureDBforPostgreSQLFlexibleServerPrivateEndpointConnection(connectionName, peID) + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, connectionName).Return( + armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + foundPrivateEndpoint := false + for _, lq := range sdpItem.GetLinkedItemQueries() { + if lq.GetQuery().GetType() == azureshared.NetworkPrivateEndpoint.String() { + foundPrivateEndpoint = true + if lq.GetQuery().GetQuery() != "test-pe" { + t.Errorf("Expected NetworkPrivateEndpoint query 'test-pe', got %s", lq.GetQuery().GetQuery()) + } + } + } + if !foundPrivateEndpoint { + t.Error("Expected linked query to NetworkPrivateEndpoint when PrivateEndpoint ID is set") + } + }) + + t.Run("Search", func(t *testing.T) { + conn1 := createAzureDBforPostgreSQLFlexibleServerPrivateEndpointConnection("pec-1", "") + conn2 := createAzureDBforPostgreSQLFlexibleServerPrivateEndpointConnection("pec-2", "") + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager{ + pages: []armpostgresqlflexibleservers.PrivateEndpointConnectionsClientListByServerResponse{ + { + PrivateEndpointConnectionList: armpostgresqlflexibleservers.PrivateEndpointConnectionList{ + Value: []*armpostgresqlflexibleservers.PrivateEndpointConnection{conn1, conn2}, + }, + }, + }, + } + testClient := &testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{ + MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + items, qErr := wrapper.Search(ctx, subscriptionID+"."+resourceGroup, serverName) + if qErr != nil { + t.Fatalf("Search failed: %v", qErr) + } + if len(items) != 2 { + t.Errorf("Expected 2 items, got %d", len(items)) + } + for _, item := range items { + if item.GetType() != azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.DBforPostgreSQLFlexibleServerPrivateEndpointConnection, item.GetType()) + } + } + }) + + t.Run("Search_NilNameSkipped", func(t *testing.T) { + validConn := createAzureDBforPostgreSQLFlexibleServerPrivateEndpointConnection("valid-pec", "") + + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager{ + pages: []armpostgresqlflexibleservers.PrivateEndpointConnectionsClientListByServerResponse{ + { + PrivateEndpointConnectionList: armpostgresqlflexibleservers.PrivateEndpointConnectionList{ + Value: []*armpostgresqlflexibleservers.PrivateEndpointConnection{ + nil, + {Name: nil}, + validConn, + }, + }, + }, + }, + } + testClient := &testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{ + MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + items, qErr := wrapper.Search(ctx, subscriptionID+"."+resourceGroup, serverName) + if qErr != nil { + t.Fatalf("Search failed: %v", qErr) + } + if len(items) != 1 { + t.Errorf("Expected 1 item (nil names skipped), got %d", len(items)) + } + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl) + testClient := &testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when query has only serverName") + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("connection not found") + mockClient := mocks.NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, connectionName).Return( + armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse{}, expectedErr) + + testClient := &testDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, connectionName) + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Fatal("Expected error when Get fails") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewDBforPostgreSQLFlexibleServerPrivateEndpointConnection(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + links := wrapper.PotentialLinks() + if !links[azureshared.DBforPostgreSQLFlexibleServer] { + t.Error("Expected PotentialLinks to include DBforPostgreSQLFlexibleServer") + } + if !links[azureshared.NetworkPrivateEndpoint] { + t.Error("Expected PotentialLinks to include NetworkPrivateEndpoint") + } + }) +} + +func createAzureDBforPostgreSQLFlexibleServerPrivateEndpointConnection(connectionName, privateEndpointID string) *armpostgresqlflexibleservers.PrivateEndpointConnection { + state := armpostgresqlflexibleservers.PrivateEndpointConnectionProvisioningStateSucceeded + conn := &armpostgresqlflexibleservers.PrivateEndpointConnection{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/test-pg-server/privateEndpointConnections/" + connectionName), + Name: new(connectionName), + Type: new("Microsoft.DBforPostgreSQL/flexibleServers/privateEndpointConnections"), + Properties: &armpostgresqlflexibleservers.PrivateEndpointConnectionProperties{ + ProvisioningState: &state, + }, + } + if privateEndpointID != "" { + conn.Properties.PrivateEndpoint = &armpostgresqlflexibleservers.PrivateEndpoint{ + ID: new(privateEndpointID), + } + } + return conn +} diff --git a/sources/azure/manual/dbforpostgresql-flexible-server_test.go b/sources/azure/manual/dbforpostgresql-flexible-server_test.go index b6278f77..beb4448c 100644 --- a/sources/azure/manual/dbforpostgresql-flexible-server_test.go +++ b/sources/azure/manual/dbforpostgresql-flexible-server_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" "go.uber.org/mock/gomock" @@ -327,7 +326,7 @@ func TestDBforPostgreSQLFlexibleServer(t *testing.T) { server2 := &armpostgresqlflexibleservers.Server{ Name: nil, // Server with nil name should be skipped Properties: &armpostgresqlflexibleservers.ServerProperties{ - Version: to.Ptr(armpostgresqlflexibleservers.PostgresMajorVersion("14")), + Version: new(armpostgresqlflexibleservers.PostgresMajorVersion("14")), }, } @@ -421,10 +420,10 @@ func TestDBforPostgreSQLFlexibleServer(t *testing.T) { geoBackupIdentityID := "/subscriptions/sub-id/resourceGroups/rg-id/providers/Microsoft.ManagedIdentity/userAssignedIdentities/geo-identity" server.Properties.DataEncryption = &armpostgresqlflexibleservers.DataEncryption{ - PrimaryKeyURI: to.Ptr(primaryKeyURI), - PrimaryUserAssignedIdentityID: to.Ptr(primaryIdentityID), - GeoBackupKeyURI: to.Ptr(geoBackupKeyURI), - GeoBackupUserAssignedIdentityID: to.Ptr(geoBackupIdentityID), + PrimaryKeyURI: new(primaryKeyURI), + PrimaryUserAssignedIdentityID: new(primaryIdentityID), + GeoBackupKeyURI: new(geoBackupKeyURI), + GeoBackupUserAssignedIdentityID: new(geoBackupIdentityID), } mockClient := mocks.NewMockPostgreSQLFlexibleServersClient(ctrl) @@ -526,8 +525,8 @@ func TestDBforPostgreSQLFlexibleServer(t *testing.T) { replicaServerName := "replica-server" sourceServerID := "/subscriptions/sub-id/resourceGroups/source-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/source-server" server := createAzurePostgreSQLFlexibleServer(replicaServerName, "", "") - server.Properties.SourceServerResourceID = to.Ptr(sourceServerID) - server.Properties.ReplicationRole = to.Ptr(armpostgresqlflexibleservers.ReplicationRoleAsyncReplica) + server.Properties.SourceServerResourceID = new(sourceServerID) + server.Properties.ReplicationRole = new(armpostgresqlflexibleservers.ReplicationRoleAsyncReplica) mockClient := mocks.NewMockPostgreSQLFlexibleServersClient(ctrl) mockClient.EXPECT().Get(ctx, resourceGroup, replicaServerName, nil).Return( @@ -608,32 +607,32 @@ func createAzurePostgreSQLFlexibleServer(serverName, subnetID, fqdn string) *arm serverID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DBforPostgreSQL/flexibleServers/" + serverName server := &armpostgresqlflexibleservers.Server{ - Name: to.Ptr(serverName), - ID: to.Ptr(serverID), - Location: to.Ptr("eastus"), + Name: new(serverName), + ID: new(serverID), + Location: new("eastus"), Properties: &armpostgresqlflexibleservers.ServerProperties{ - Version: to.Ptr(armpostgresqlflexibleservers.PostgresMajorVersion("14")), - State: to.Ptr(armpostgresqlflexibleservers.ServerStateReady), + Version: new(armpostgresqlflexibleservers.PostgresMajorVersion("14")), + State: new(armpostgresqlflexibleservers.ServerStateReady), }, SKU: &armpostgresqlflexibleservers.SKU{ - Name: to.Ptr("Standard_B1ms"), - Tier: to.Ptr(armpostgresqlflexibleservers.SKUTierBurstable), + Name: new("Standard_B1ms"), + Tier: new(armpostgresqlflexibleservers.SKUTierBurstable), }, Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } // Add network configuration if subnet ID is provided if subnetID != "" { server.Properties.Network = &armpostgresqlflexibleservers.Network{ - DelegatedSubnetResourceID: to.Ptr(subnetID), + DelegatedSubnetResourceID: new(subnetID), } } // Add FQDN if provided if fqdn != "" { - server.Properties.FullyQualifiedDomainName = to.Ptr(fqdn) + server.Properties.FullyQualifiedDomainName = new(fqdn) } return server diff --git a/sources/azure/manual/documentdb-database-accounts.go b/sources/azure/manual/documentdb-database-accounts.go index 28b12c20..7901faa7 100644 --- a/sources/azure/manual/documentdb-database-accounts.go +++ b/sources/azure/manual/documentdb-database-accounts.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" @@ -13,6 +13,7 @@ import ( "github.com/overmindtech/cli/sources/azure/clients" azureshared "github.com/overmindtech/cli/sources/azure/shared" "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" ) var DocumentDBDatabaseAccountsLookupByName = shared.NewItemTypeLookup("name", azureshared.DocumentDBDatabaseAccounts) @@ -289,6 +290,49 @@ func (s documentDBDatabaseAccountsWrapper) azureDocumentDBDatabaseAccountToSDPIt } } + // Link to stdlib for document endpoint and regional endpoints (DNS/HTTP) + linkedDNSHostnames := make(map[string]struct{}) + seenIPs := make(map[string]struct{}) + if account.Properties != nil && account.Properties.DocumentEndpoint != nil && *account.Properties.DocumentEndpoint != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *account.Properties.DocumentEndpoint, linkedDNSHostnames, seenIPs) + } + if account.Properties != nil { + for _, loc := range account.Properties.ReadLocations { + if loc != nil && loc.DocumentEndpoint != nil && *loc.DocumentEndpoint != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *loc.DocumentEndpoint, linkedDNSHostnames, seenIPs) + } + } + for _, loc := range account.Properties.WriteLocations { + if loc != nil && loc.DocumentEndpoint != nil && *loc.DocumentEndpoint != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *loc.DocumentEndpoint, linkedDNSHostnames, seenIPs) + } + } + for _, loc := range account.Properties.Locations { + if loc != nil && loc.DocumentEndpoint != nil && *loc.DocumentEndpoint != "" { + AppendURILinks(&sdpItem.LinkedItemQueries, *loc.DocumentEndpoint, linkedDNSHostnames, seenIPs) + } + } + // Link to stdlib.NetworkIP for IP rules (single IPv4 or CIDR) + if account.Properties.IPRules != nil { + for _, rule := range account.Properties.IPRules { + if rule != nil && rule.IPAddressOrRange != nil && *rule.IPAddressOrRange != "" { + val := *rule.IPAddressOrRange + if _, seen := seenIPs[val]; !seen { + seenIPs[val] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: val, + Scope: "global", + }, + }) + } + } + } + } + } + return sdpItem, nil } @@ -305,6 +349,9 @@ func (s documentDBDatabaseAccountsWrapper) PotentialLinks() map[shared.ItemType] azureshared.NetworkSubnet, azureshared.KeyVaultVault, azureshared.ManagedIdentityUserAssignedIdentity, + stdlib.NetworkIP, + stdlib.NetworkDNS, + stdlib.NetworkHTTP, ) } diff --git a/sources/azure/manual/documentdb-database-accounts_test.go b/sources/azure/manual/documentdb-database-accounts_test.go index 4ebd8fb0..2c6a3e51 100644 --- a/sources/azure/manual/documentdb-database-accounts_test.go +++ b/sources/azure/manual/documentdb-database-accounts_test.go @@ -5,8 +5,7 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -177,7 +176,7 @@ func TestDocumentDBDatabaseAccounts(t *testing.T) { account := &armcosmos.DatabaseAccountGetResults{ Name: nil, // No name field Properties: &armcosmos.DatabaseAccountGetProperties{ - ProvisioningState: to.Ptr("Succeeded"), + ProvisioningState: new("Succeeded"), }, } @@ -350,27 +349,27 @@ func TestDocumentDBDatabaseAccounts(t *testing.T) { // createAzureCosmosDBAccount creates a mock Azure Cosmos DB account with all linked resources func createAzureCosmosDBAccount(accountName, provisioningState, subscriptionID, resourceGroup string) *armcosmos.DatabaseAccountGetResults { return &armcosmos.DatabaseAccountGetResults{ - Name: to.Ptr(accountName), - Location: to.Ptr("eastus"), + Name: new(accountName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armcosmos.DatabaseAccountGetProperties{ - ProvisioningState: to.Ptr(provisioningState), + ProvisioningState: new(provisioningState), // Private Endpoint Connections PrivateEndpointConnections: []*armcosmos.PrivateEndpointConnection{ { Properties: &armcosmos.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armcosmos.PrivateEndpointProperty{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), }, }, }, { Properties: &armcosmos.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armcosmos.PrivateEndpointProperty{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-diff-rg"), }, }, }, @@ -378,17 +377,17 @@ func createAzureCosmosDBAccount(accountName, provisioningState, subscriptionID, // Virtual Network Rules VirtualNetworkRules: []*armcosmos.VirtualNetworkRule{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet-diff-rg/subnets/test-subnet-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet-diff-rg/subnets/test-subnet-diff-rg"), }, }, // Key Vault Key URI - KeyVaultKeyURI: to.Ptr("https://test-keyvault.vault.azure.net/keys/test-key/version"), + KeyVaultKeyURI: new("https://test-keyvault.vault.azure.net/keys/test-key/version"), }, Identity: &armcosmos.ManagedServiceIdentity{ - Type: to.Ptr(armcosmos.ResourceIdentityTypeUserAssigned), + Type: new(armcosmos.ResourceIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armcosmos.Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties{ "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity": {}, "/subscriptions/" + subscriptionID + "/resourceGroups/identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity-diff-rg": {}, @@ -400,13 +399,13 @@ func createAzureCosmosDBAccount(accountName, provisioningState, subscriptionID, // createAzureCosmosDBAccountMinimal creates a minimal mock Azure Cosmos DB account without linked resources func createAzureCosmosDBAccountMinimal(accountName, provisioningState string) *armcosmos.DatabaseAccountGetResults { return &armcosmos.DatabaseAccountGetResults{ - Name: to.Ptr(accountName), - Location: to.Ptr("eastus"), + Name: new(accountName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcosmos.DatabaseAccountGetProperties{ - ProvisioningState: to.Ptr(provisioningState), + ProvisioningState: new(provisioningState), }, } } @@ -414,19 +413,19 @@ func createAzureCosmosDBAccountMinimal(accountName, provisioningState string) *a // createAzureCosmosDBAccountCrossRG creates a mock Azure Cosmos DB account with linked resources in different resource groups func createAzureCosmosDBAccountCrossRG(accountName, provisioningState, subscriptionID, resourceGroup string) *armcosmos.DatabaseAccountGetResults { return &armcosmos.DatabaseAccountGetResults{ - Name: to.Ptr(accountName), - Location: to.Ptr("eastus"), + Name: new(accountName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armcosmos.DatabaseAccountGetProperties{ - ProvisioningState: to.Ptr(provisioningState), + ProvisioningState: new(provisioningState), // Private Endpoint in different resource group PrivateEndpointConnections: []*armcosmos.PrivateEndpointConnection{ { Properties: &armcosmos.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armcosmos.PrivateEndpointProperty{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-pe-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-pe-diff-rg"), }, }, }, @@ -434,12 +433,12 @@ func createAzureCosmosDBAccountCrossRG(accountName, provisioningState, subscript // Subnet in different resource group VirtualNetworkRules: []*armcosmos.VirtualNetworkRule{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, }, Identity: &armcosmos.ManagedServiceIdentity{ - Type: to.Ptr(armcosmos.ResourceIdentityTypeUserAssigned), + Type: new(armcosmos.ResourceIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armcosmos.Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties{ "/subscriptions/" + subscriptionID + "/resourceGroups/identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity": {}, }, diff --git a/sources/azure/manual/documentdb-private-endpoint-connection.go b/sources/azure/manual/documentdb-private-endpoint-connection.go new file mode 100644 index 00000000..8a2d5efa --- /dev/null +++ b/sources/azure/manual/documentdb-private-endpoint-connection.go @@ -0,0 +1,236 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var DocumentDBPrivateEndpointConnectionLookupByName = shared.NewItemTypeLookup("name", azureshared.DocumentDBPrivateEndpointConnection) + +type documentDBPrivateEndpointConnectionWrapper struct { + client clients.DocumentDBPrivateEndpointConnectionsClient + + *azureshared.MultiResourceGroupBase +} + +// NewDocumentDBPrivateEndpointConnection returns a SearchableWrapper for Azure Cosmos DB (DocumentDB) database account private endpoint connections. +func NewDocumentDBPrivateEndpointConnection(client clients.DocumentDBPrivateEndpointConnectionsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &documentDBPrivateEndpointConnectionWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.DocumentDBPrivateEndpointConnection, + ), + } +} + +func (s documentDBPrivateEndpointConnectionWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: accountName and privateEndpointConnectionName", + Scope: scope, + ItemType: s.Type(), + } + } + accountName := queryParts[0] + connectionName := queryParts[1] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, accountName, connectionName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(&resp.PrivateEndpointConnection, accountName, connectionName, scope) + if sdpErr != nil { + return nil, sdpErr + } + return item, nil +} + +func (s documentDBPrivateEndpointConnectionWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + DocumentDBDatabaseAccountsLookupByName, + DocumentDBPrivateEndpointConnectionLookupByName, + } +} + +func (s documentDBPrivateEndpointConnectionWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: accountName", + Scope: scope, + ItemType: s.Type(), + } + } + accountName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByDatabaseAccount(ctx, rgScope.ResourceGroup, accountName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, accountName, *conn.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s documentDBPrivateEndpointConnectionWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: accountName"), scope, s.Type())) + return + } + accountName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByDatabaseAccount(ctx, rgScope.ResourceGroup, accountName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, accountName, *conn.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s documentDBPrivateEndpointConnectionWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + DocumentDBDatabaseAccountsLookupByName, + }, + } +} + +func (s documentDBPrivateEndpointConnectionWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.DocumentDBDatabaseAccounts: true, + azureshared.NetworkPrivateEndpoint: true, + } +} + +func (s documentDBPrivateEndpointConnectionWrapper) azurePrivateEndpointConnectionToSDPItem(conn *armcosmos.PrivateEndpointConnection, accountName, connectionName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(conn) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(accountName, connectionName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.DocumentDBPrivateEndpointConnection.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health from provisioning state (Cosmos uses *string, not an enum) + if conn.Properties != nil && conn.Properties.ProvisioningState != nil { + state := strings.ToLower(*conn.Properties.ProvisioningState) + switch state { + case "succeeded": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "creating", "deleting": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "failed": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent DocumentDB Database Account + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.DocumentDBDatabaseAccounts.String(), + Method: sdp.QueryMethod_GET, + Query: accountName, + Scope: scope, + }, + }) + + // Link to Network Private Endpoint when present (may be in different resource group) + if conn.Properties != nil && conn.Properties.PrivateEndpoint != nil && conn.Properties.PrivateEndpoint.ID != nil { + peID := *conn.Properties.PrivateEndpoint.ID + peName := azureshared.ExtractResourceName(peID) + if peName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(peID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: peName, + Scope: linkedScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (s documentDBPrivateEndpointConnectionWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.DocumentDB/databaseAccounts/privateEndpointConnections/read", + } +} + +func (s documentDBPrivateEndpointConnectionWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/documentdb-private-endpoint-connection_test.go b/sources/azure/manual/documentdb-private-endpoint-connection_test.go new file mode 100644 index 00000000..ed53de9f --- /dev/null +++ b/sources/azure/manual/documentdb-private-endpoint-connection_test.go @@ -0,0 +1,320 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockDocumentDBPrivateEndpointConnectionsPager struct { + pages []armcosmos.PrivateEndpointConnectionsClientListByDatabaseAccountResponse + index int +} + +func (m *mockDocumentDBPrivateEndpointConnectionsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockDocumentDBPrivateEndpointConnectionsPager) NextPage(ctx context.Context) (armcosmos.PrivateEndpointConnectionsClientListByDatabaseAccountResponse, error) { + if m.index >= len(m.pages) { + return armcosmos.PrivateEndpointConnectionsClientListByDatabaseAccountResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type testDocumentDBPrivateEndpointConnectionsClient struct { + *mocks.MockDocumentDBPrivateEndpointConnectionsClient + pager clients.DocumentDBPrivateEndpointConnectionsPager +} + +func (t *testDocumentDBPrivateEndpointConnectionsClient) ListByDatabaseAccount(ctx context.Context, resourceGroupName, accountName string) clients.DocumentDBPrivateEndpointConnectionsPager { + return t.pager +} + +func TestDocumentDBPrivateEndpointConnection(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + accountName := "test-cosmos-account" + connectionName := "test-pec" + + t.Run("Get", func(t *testing.T) { + conn := createAzureDocumentDBPrivateEndpointConnection(connectionName, "") + + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, connectionName).Return( + armcosmos.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testDocumentDBPrivateEndpointConnectionsClient{MockDocumentDBPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.DocumentDBPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.DocumentDBPrivateEndpointConnection, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(accountName, connectionName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(accountName, connectionName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) < 1 { + t.Fatalf("Expected at least 1 linked query, got: %d", len(linkedQueries)) + } + + foundDocumentDBAccount := false + for _, lq := range linkedQueries { + if lq.GetQuery().GetType() == azureshared.DocumentDBDatabaseAccounts.String() { + foundDocumentDBAccount = true + if lq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected DocumentDBDatabaseAccounts link method GET, got %v", lq.GetQuery().GetMethod()) + } + if lq.GetQuery().GetQuery() != accountName { + t.Errorf("Expected DocumentDBDatabaseAccounts query %s, got %s", accountName, lq.GetQuery().GetQuery()) + } + } + } + if !foundDocumentDBAccount { + t.Error("Expected linked query to DocumentDBDatabaseAccounts") + } + }) + }) + + t.Run("Get_WithPrivateEndpointLink", func(t *testing.T) { + peID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-pe" + conn := createAzureDocumentDBPrivateEndpointConnection(connectionName, peID) + + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, connectionName).Return( + armcosmos.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testDocumentDBPrivateEndpointConnectionsClient{MockDocumentDBPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + foundPrivateEndpoint := false + for _, lq := range sdpItem.GetLinkedItemQueries() { + if lq.GetQuery().GetType() == azureshared.NetworkPrivateEndpoint.String() { + foundPrivateEndpoint = true + if lq.GetQuery().GetQuery() != "test-pe" { + t.Errorf("Expected NetworkPrivateEndpoint query 'test-pe', got %s", lq.GetQuery().GetQuery()) + } + break + } + } + if !foundPrivateEndpoint { + t.Error("Expected linked query to NetworkPrivateEndpoint when PrivateEndpoint ID is set") + } + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + testClient := &testDocumentDBPrivateEndpointConnectionsClient{MockDocumentDBPrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], accountName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + conn1 := createAzureDocumentDBPrivateEndpointConnection("pec-1", "") + conn2 := createAzureDocumentDBPrivateEndpointConnection("pec-2", "") + + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockDocumentDBPrivateEndpointConnectionsPager{ + pages: []armcosmos.PrivateEndpointConnectionsClientListByDatabaseAccountResponse{ + { + PrivateEndpointConnectionListResult: armcosmos.PrivateEndpointConnectionListResult{ + Value: []*armcosmos.PrivateEndpointConnection{conn1, conn2}, + }, + }, + }, + } + + testClient := &testDocumentDBPrivateEndpointConnectionsClient{ + MockDocumentDBPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], accountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.DocumentDBPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.DocumentDBPrivateEndpointConnection, item.GetType()) + } + } + }) + + t.Run("Search_NilNameSkipped", func(t *testing.T) { + validConn := createAzureDocumentDBPrivateEndpointConnection("valid-pec", "") + + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockDocumentDBPrivateEndpointConnectionsPager{ + pages: []armcosmos.PrivateEndpointConnectionsClientListByDatabaseAccountResponse{ + { + PrivateEndpointConnectionListResult: armcosmos.PrivateEndpointConnectionListResult{ + Value: []*armcosmos.PrivateEndpointConnection{ + {Name: nil}, + validConn, + }, + }, + }, + }, + } + + testClient := &testDocumentDBPrivateEndpointConnectionsClient{ + MockDocumentDBPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], accountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(accountName, "valid-pec") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(accountName, "valid-pec"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + testClient := &testDocumentDBPrivateEndpointConnectionsClient{MockDocumentDBPrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("private endpoint connection not found") + + mockClient := mocks.NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, "nonexistent-pec").Return( + armcosmos.PrivateEndpointConnectionsClientGetResponse{}, expectedErr) + + testClient := &testDocumentDBPrivateEndpointConnectionsClient{MockDocumentDBPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewDocumentDBPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, "nonexistent-pec") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent private endpoint connection, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewDocumentDBPrivateEndpointConnection(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + links := wrapper.PotentialLinks() + if !links[azureshared.DocumentDBDatabaseAccounts] { + t.Error("Expected DocumentDBDatabaseAccounts in PotentialLinks") + } + if !links[azureshared.NetworkPrivateEndpoint] { + t.Error("Expected NetworkPrivateEndpoint in PotentialLinks") + } + }) +} + +func createAzureDocumentDBPrivateEndpointConnection(connectionName, privateEndpointID string) *armcosmos.PrivateEndpointConnection { + conn := &armcosmos.PrivateEndpointConnection{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.DocumentDB/databaseAccounts/test-cosmos-account/privateEndpointConnections/" + connectionName), + Name: new(connectionName), + Type: new("Microsoft.DocumentDB/databaseAccounts/privateEndpointConnections"), + Properties: &armcosmos.PrivateEndpointConnectionProperties{ + ProvisioningState: new("Succeeded"), + PrivateLinkServiceConnectionState: &armcosmos.PrivateLinkServiceConnectionStateProperty{ + Status: new("Approved"), + }, + }, + } + if privateEndpointID != "" { + conn.Properties.PrivateEndpoint = &armcosmos.PrivateEndpointProperty{ + ID: new(privateEndpointID), + } + } + return conn +} diff --git a/sources/azure/manual/keyvault-key.go b/sources/azure/manual/keyvault-key.go new file mode 100644 index 00000000..768fb01a --- /dev/null +++ b/sources/azure/manual/keyvault-key.go @@ -0,0 +1,294 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var KeyVaultKeyLookupByName = shared.NewItemTypeLookup("name", azureshared.KeyVaultKey) + +type keyvaultKeyWrapper struct { + client clients.KeysClient + + *azureshared.MultiResourceGroupBase +} + +func NewKeyVaultKey(client clients.KeysClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &keyvaultKeyWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_SECURITY, + azureshared.KeyVaultKey, + ), + } +} + +func (k keyvaultKeyWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, azureshared.QueryError(errors.New("Get requires 2 query parts: vaultName and keyName"), scope, k.Type()) + } + + vaultName := queryParts[0] + if vaultName == "" { + return nil, azureshared.QueryError(errors.New("vaultName cannot be empty"), scope, k.Type()) + } + + keyName := queryParts[1] + if keyName == "" { + return nil, azureshared.QueryError(errors.New("keyName cannot be empty"), scope, k.Type()) + } + + rgScope, err := k.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, k.Type()) + } + resp, err := k.client.Get(ctx, rgScope.ResourceGroup, vaultName, keyName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, k.Type()) + } + + return k.azureKeyToSDPItem(&resp.Key, vaultName, keyName, scope) +} + +func (k keyvaultKeyWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, azureshared.QueryError(errors.New("Search requires 1 query part: vaultName"), scope, k.Type()) + } + + vaultName := queryParts[0] + if vaultName == "" { + return nil, azureshared.QueryError(errors.New("vaultName cannot be empty"), scope, k.Type()) + } + + rgScope, err := k.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, k.Type()) + } + pager := k.client.NewListPager(rgScope.ResourceGroup, vaultName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, k.Type()) + } + for _, key := range page.Value { + if key.Name == nil { + continue + } + var keyVaultName string + if key.ID != nil && *key.ID != "" { + vaultParams := azureshared.ExtractPathParamsFromResourceID(*key.ID, []string{"vaults"}) + if len(vaultParams) > 0 { + keyVaultName = vaultParams[0] + } + } + if keyVaultName == "" { + keyVaultName = vaultName + } + item, sdpErr := k.azureKeyToSDPItem(key, keyVaultName, *key.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (k keyvaultKeyWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: vaultName"), scope, k.Type())) + return + } + vaultName := queryParts[0] + if vaultName == "" { + stream.SendError(azureshared.QueryError(errors.New("vaultName cannot be empty"), scope, k.Type())) + return + } + + rgScope, err := k.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, k.Type())) + return + } + pager := k.client.NewListPager(rgScope.ResourceGroup, vaultName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, k.Type())) + return + } + for _, key := range page.Value { + if key.Name == nil { + continue + } + var keyVaultName string + if key.ID != nil && *key.ID != "" { + vaultParams := azureshared.ExtractPathParamsFromResourceID(*key.ID, []string{"vaults"}) + if len(vaultParams) > 0 { + keyVaultName = vaultParams[0] + } + } + if keyVaultName == "" { + keyVaultName = vaultName + } + item, sdpErr := k.azureKeyToSDPItem(key, keyVaultName, *key.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (k keyvaultKeyWrapper) azureKeyToSDPItem(key *armkeyvault.Key, vaultName, keyName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(key, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, k.Type()) + } + + if key.Name == nil { + return nil, azureshared.QueryError(errors.New("key name is nil"), scope, k.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(vaultName, keyName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, k.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.KeyVaultKey.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(key.Tags), + } + + if key.ID != nil && *key.ID != "" { + vaultParams := azureshared.ExtractPathParamsFromResourceID(*key.ID, []string{"vaults"}) + if len(vaultParams) > 0 { + extractedVaultName := vaultParams[0] + if extractedVaultName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(*key.ID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultVault.String(), + Method: sdp.QueryMethod_GET, + Query: extractedVaultName, + Scope: linkedScope, + }, + }) + } + } + } + + var linkedDNSName string + if key.Properties != nil && key.Properties.KeyURI != nil && *key.Properties.KeyURI != "" { + keyURI := *key.Properties.KeyURI + dnsName := azureshared.ExtractDNSFromURL(keyURI) + if dnsName != "" { + linkedDNSName = dnsName + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: dnsName, + Scope: "global", + }, + }) + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkHTTP.String(), + Method: sdp.QueryMethod_SEARCH, + Query: keyURI, + Scope: "global", + }, + }) + } + + if key.Properties != nil && key.Properties.KeyURIWithVersion != nil && *key.Properties.KeyURIWithVersion != "" { + keyURIWithVersion := *key.Properties.KeyURIWithVersion + dnsName := azureshared.ExtractDNSFromURL(keyURIWithVersion) + if dnsName != "" && dnsName != linkedDNSName { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: dnsName, + Scope: "global", + }, + }) + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkHTTP.String(), + Method: sdp.QueryMethod_SEARCH, + Query: keyURIWithVersion, + Scope: "global", + }, + }) + } + + return sdpItem, nil +} + +func (k keyvaultKeyWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + KeyVaultVaultLookupByName, // First key: vault name (queryParts[0]) + KeyVaultKeyLookupByName, // Second key: key name (queryParts[1]) + } +} + +func (k keyvaultKeyWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + KeyVaultVaultLookupByName, + }, + } +} + +func (k keyvaultKeyWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_key_vault_key.id", + }, + } +} + +func (k keyvaultKeyWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.KeyVaultVault, + stdlib.NetworkDNS, + stdlib.NetworkHTTP, + ) +} + +func (k keyvaultKeyWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.KeyVault/vaults/keys/read", + } +} + +func (k keyvaultKeyWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/keyvault-key_test.go b/sources/azure/manual/keyvault-key_test.go new file mode 100644 index 00000000..7c5462e2 --- /dev/null +++ b/sources/azure/manual/keyvault-key_test.go @@ -0,0 +1,501 @@ +package manual_test + +import ( + "context" + "errors" + "fmt" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + + +type mockKeysPager struct { + pages []armkeyvault.KeysClientListResponse + index int +} + +func (m *mockKeysPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockKeysPager) NextPage(ctx context.Context) (armkeyvault.KeysClientListResponse, error) { + if m.index >= len(m.pages) { + return armkeyvault.KeysClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorKeysPager struct{} + +func (e *errorKeysPager) More() bool { return true } + +func (e *errorKeysPager) NextPage(ctx context.Context) (armkeyvault.KeysClientListResponse, error) { + return armkeyvault.KeysClientListResponse{}, errors.New("pager error") +} + +type testKeysClient struct { + *mocks.MockKeysClient + pager clients.KeysPager +} + +func (t *testKeysClient) NewListPager(resourceGroupName, vaultName string, options *armkeyvault.KeysClientListOptions) clients.KeysPager { + t.MockKeysClient.NewListPager(resourceGroupName, vaultName, options) + return t.pager +} + +func TestKeyVaultKey(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + vaultName := "test-keyvault" + keyName := "test-key" + + t.Run("Get", func(t *testing.T) { + key := createAzureKey(keyName, subscriptionID, resourceGroup, vaultName) + + mockClient := mocks.NewMockKeysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, vaultName, keyName, nil).Return( + armkeyvault.KeysClientGetResponse{ + Key: *key, + }, nil) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := vaultName + shared.QuerySeparator + keyName + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.KeyVaultKey.String() { + t.Errorf("Expected type %s, got %s", azureshared.KeyVaultKey, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(vaultName, keyName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.KeyVaultVault.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: vaultName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, { + ExpectedType: stdlib.NetworkDNS.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: vaultName + ".vault.azure.net", + ExpectedScope: "global", + }, { + ExpectedType: stdlib.NetworkHTTP.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: fmt.Sprintf("https://%s.vault.azure.net/keys/%s", vaultName, keyName), + ExpectedScope: "global", + }} + + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], vaultName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Get_EmptyVaultName", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.QuerySeparator + keyName + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when vault name is empty, but got nil") + } + }) + + t.Run("Get_EmptyKeyName", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := vaultName + shared.QuerySeparator + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when key name is empty, but got nil") + } + }) + + t.Run("Get_NoName", func(t *testing.T) { + key := &armkeyvault.Key{ + Name: nil, + } + + mockClient := mocks.NewMockKeysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, vaultName, keyName, nil).Return( + armkeyvault.KeysClientGetResponse{ + Key: *key, + }, nil) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := vaultName + shared.QuerySeparator + keyName + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when key has no name, but got nil") + } + }) + + t.Run("Get_NoLinkedResources", func(t *testing.T) { + key := createAzureKeyMinimal(keyName) + + mockClient := mocks.NewMockKeysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, vaultName, keyName, nil).Return( + armkeyvault.KeysClientGetResponse{ + Key: *key, + }, nil) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := vaultName + shared.QuerySeparator + keyName + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if len(sdpItem.GetLinkedItemQueries()) != 0 { + t.Errorf("Expected no linked item queries, got %d", len(sdpItem.GetLinkedItemQueries())) + } + }) + + t.Run("Search", func(t *testing.T) { + key1 := createAzureKey("key-1", subscriptionID, resourceGroup, vaultName) + key2 := createAzureKey("key-2", subscriptionID, resourceGroup, vaultName) + + mockPager := &mockKeysPager{ + pages: []armkeyvault.KeysClientListResponse{ + { + KeyListResult: armkeyvault.KeyListResult{ + Value: []*armkeyvault.Key{ + {ID: key1.ID, Name: key1.Name, Type: key1.Type, Properties: key1.Properties, Tags: key1.Tags}, + {ID: key2.ID, Name: key2.Name, Type: key2.Type, Properties: key2.Properties, Tags: key2.Tags}, + }, + }, + }, + }, + } + + mockClient := mocks.NewMockKeysClient(ctrl) + mockClient.EXPECT().NewListPager(resourceGroup, vaultName, nil).Return(mockPager) + + testClient := &testKeysClient{ + MockKeysClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], vaultName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.KeyVaultKey.String() { + t.Errorf("Expected type %s, got %s", azureshared.KeyVaultKey, item.GetType()) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_EmptyVaultName", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], "") + if qErr == nil { + t.Error("Expected error when vault name is empty, but got nil") + } + }) + + t.Run("Search_KeyWithNilName", func(t *testing.T) { + validKey := createAzureKey("valid-key", subscriptionID, resourceGroup, vaultName) + mockPager := &mockKeysPager{ + pages: []armkeyvault.KeysClientListResponse{ + { + KeyListResult: armkeyvault.KeyListResult{ + Value: []*armkeyvault.Key{ + {Name: nil}, + {ID: validKey.ID, Name: validKey.Name, Type: validKey.Type, Properties: validKey.Properties, Tags: validKey.Tags}, + }, + }, + }, + }, + } + + mockClient := mocks.NewMockKeysClient(ctrl) + mockClient.EXPECT().NewListPager(resourceGroup, vaultName, nil).Return(mockPager) + + testClient := &testKeysClient{ + MockKeysClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], vaultName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item, got: %d", len(sdpItems)) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(vaultName, "valid-key") + if sdpItems[0].UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("key not found") + + mockClient := mocks.NewMockKeysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, vaultName, "nonexistent-key", nil).Return( + armkeyvault.KeysClientGetResponse{}, expectedErr) + + wrapper := manual.NewKeyVaultKey(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := vaultName + shared.QuerySeparator + "nonexistent-key" + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent key, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + errorPager := &errorKeysPager{} + + mockClient.EXPECT().NewListPager(resourceGroup, vaultName, nil).Return(errorPager) + + testClient := &testKeysClient{ + MockKeysClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, wrapper.Scopes()[0], vaultName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + if wrapper == nil { + t.Error("Wrapper should not be nil") + } + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + _, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Error("Adapter should implement SearchableAdapter interface") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + links := wrapper.PotentialLinks() + if len(links) == 0 { + t.Error("Expected potential links to be defined") + } + if !links[azureshared.KeyVaultVault] { + t.Error("Expected KeyVaultVault to be in potential links") + } + if !links[stdlib.NetworkDNS] { + t.Error("Expected stdlib.NetworkDNS to be in potential links") + } + if !links[stdlib.NetworkHTTP] { + t.Error("Expected stdlib.NetworkHTTP to be in potential links") + } + }) + + t.Run("TerraformMappings", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + mappings := wrapper.TerraformMappings() + if len(mappings) == 0 { + t.Fatal("Expected TerraformMappings to be defined") + } + + foundIDMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_key_vault_key.id" { + foundIDMapping = true + if mapping.GetTerraformMethod() != sdp.QueryMethod_SEARCH { + t.Errorf("Expected TerraformMethod to be SEARCH for id mapping, got %s", mapping.GetTerraformMethod()) + } + } + } + if !foundIDMapping { + t.Error("Expected TerraformMappings to include 'azurerm_key_vault_key.id' mapping") + } + if len(mappings) != 1 { + t.Errorf("Expected 1 TerraformMapping, got %d", len(mappings)) + } + }) + + t.Run("IAMPermissions", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + permissions := wrapper.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to be defined") + } + expectedPermission := "Microsoft.KeyVault/vaults/keys/read" + if !slices.Contains(permissions, expectedPermission) { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + }) + + t.Run("PredefinedRole", func(t *testing.T) { + mockClient := mocks.NewMockKeysClient(ctrl) + testClient := &testKeysClient{MockKeysClient: mockClient} + wrapper := manual.NewKeyVaultKey(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + type predefinedRoleInterface interface { + PredefinedRole() string + } + if roleInterface, ok := wrapper.(predefinedRoleInterface); ok { + role := roleInterface.PredefinedRole() + if role != "Reader" { + t.Errorf("Expected PredefinedRole to be 'Reader', got %s", role) + } + } else { + t.Error("Wrapper should implement PredefinedRole method") + } + }) +} + +func createAzureKey(keyName, subscriptionID, resourceGroup, vaultName string) *armkeyvault.Key { + return &armkeyvault.Key{ + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.KeyVault/vaults/%s/keys/%s", subscriptionID, resourceGroup, vaultName, keyName)), + Name: new(keyName), + Type: new("Microsoft.KeyVault/vaults/keys"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armkeyvault.KeyProperties{ + KeyURI: new(fmt.Sprintf("https://%s.vault.azure.net/keys/%s", vaultName, keyName)), + }, + } +} + +func createAzureKeyMinimal(keyName string) *armkeyvault.Key { + return &armkeyvault.Key{ + Name: new(keyName), + Type: new("Microsoft.KeyVault/vaults/keys"), + Tags: map[string]*string{ + "env": new("test"), + }, + Properties: &armkeyvault.KeyProperties{}, + } +} diff --git a/sources/azure/manual/keyvault-managed-hsm-private-endpoint-connection.go b/sources/azure/manual/keyvault-managed-hsm-private-endpoint-connection.go new file mode 100644 index 00000000..70d80c38 --- /dev/null +++ b/sources/azure/manual/keyvault-managed-hsm-private-endpoint-connection.go @@ -0,0 +1,260 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var KeyVaultManagedHSMPrivateEndpointConnectionLookupByName = shared.NewItemTypeLookup("name", azureshared.KeyVaultManagedHSMPrivateEndpointConnection) + +type keyvaultManagedHSMPrivateEndpointConnectionWrapper struct { + client clients.KeyVaultManagedHSMPrivateEndpointConnectionsClient + + *azureshared.MultiResourceGroupBase +} + +// NewKeyVaultManagedHSMPrivateEndpointConnection returns a SearchableWrapper for Azure Key Vault Managed HSM private endpoint connections. +func NewKeyVaultManagedHSMPrivateEndpointConnection(client clients.KeyVaultManagedHSMPrivateEndpointConnectionsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &keyvaultManagedHSMPrivateEndpointConnectionWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_SECURITY, + azureshared.KeyVaultManagedHSMPrivateEndpointConnection, + ), + } +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: hsmName and privateEndpointConnectionName", + Scope: scope, + ItemType: s.Type(), + } + } + hsmName := queryParts[0] + connectionName := queryParts[1] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, hsmName, connectionName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item, sdpErr := s.azureMHSMPrivateEndpointConnectionToSDPItem(&resp.MHSMPrivateEndpointConnection, hsmName, connectionName, scope) + if sdpErr != nil { + return nil, sdpErr + } + return item, nil +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + KeyVaultManagedHSMsLookupByName, + KeyVaultManagedHSMPrivateEndpointConnectionLookupByName, + } +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: hsmName", + Scope: scope, + ItemType: s.Type(), + } + } + hsmName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByResource(ctx, rgScope.ResourceGroup, hsmName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + + item, sdpErr := s.azureMHSMPrivateEndpointConnectionToSDPItem(conn, hsmName, *conn.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: hsmName"), scope, s.Type())) + return + } + hsmName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByResource(ctx, rgScope.ResourceGroup, hsmName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + item, sdpErr := s.azureMHSMPrivateEndpointConnectionToSDPItem(conn, hsmName, *conn.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + KeyVaultManagedHSMsLookupByName, + }, + } +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.KeyVaultManagedHSM: true, + azureshared.NetworkPrivateEndpoint: true, + azureshared.ManagedIdentityUserAssignedIdentity: true, + } +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) azureMHSMPrivateEndpointConnectionToSDPItem(conn *armkeyvault.MHSMPrivateEndpointConnection, hsmName, connectionName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(conn, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(hsmName, connectionName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.KeyVaultManagedHSMPrivateEndpointConnection.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(conn.Tags), + } + + // Health from provisioning state + if conn.Properties != nil && conn.Properties.ProvisioningState != nil { + state := strings.ToLower(string(*conn.Properties.ProvisioningState)) + switch state { + case "succeeded": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "creating", "updating", "deleting": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "failed": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent Key Vault Managed HSM + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultManagedHSM.String(), + Method: sdp.QueryMethod_GET, + Query: hsmName, + Scope: scope, + }, + }) + + // Link to Network Private Endpoint when present (may be in different resource group) + if conn.Properties != nil && conn.Properties.PrivateEndpoint != nil && conn.Properties.PrivateEndpoint.ID != nil { + peID := *conn.Properties.PrivateEndpoint.ID + peName := azureshared.ExtractResourceName(peID) + if peName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(peID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: peName, + Scope: linkedScope, + }, + }) + } + } + + // Link to User Assigned Managed Identities (same pattern as KeyVaultManagedHSM adapter) + // User Assigned Identities can be in a different resource group than the Managed HSM. + if conn.Identity != nil && conn.Identity.UserAssignedIdentities != nil { + for identityResourceID := range conn.Identity.UserAssignedIdentities { + identityName := azureshared.ExtractResourceName(identityResourceID) + if identityName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(identityResourceID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + + return sdpItem, nil +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.KeyVault/managedHSMs/privateEndpointConnections/read", + } +} + +func (s keyvaultManagedHSMPrivateEndpointConnectionWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/keyvault-managed-hsm-private-endpoint-connection_test.go b/sources/azure/manual/keyvault-managed-hsm-private-endpoint-connection_test.go new file mode 100644 index 00000000..2f727524 --- /dev/null +++ b/sources/azure/manual/keyvault-managed-hsm-private-endpoint-connection_test.go @@ -0,0 +1,370 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockKeyVaultManagedHSMPrivateEndpointConnectionsPager struct { + pages []armkeyvault.MHSMPrivateEndpointConnectionsClientListByResourceResponse + index int +} + +func (m *mockKeyVaultManagedHSMPrivateEndpointConnectionsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockKeyVaultManagedHSMPrivateEndpointConnectionsPager) NextPage(ctx context.Context) (armkeyvault.MHSMPrivateEndpointConnectionsClientListByResourceResponse, error) { + if m.index >= len(m.pages) { + return armkeyvault.MHSMPrivateEndpointConnectionsClientListByResourceResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type testKeyVaultManagedHSMPrivateEndpointConnectionsClient struct { + *mocks.MockKeyVaultManagedHSMPrivateEndpointConnectionsClient + pager clients.KeyVaultManagedHSMPrivateEndpointConnectionsPager +} + +func (t *testKeyVaultManagedHSMPrivateEndpointConnectionsClient) ListByResource(ctx context.Context, resourceGroupName, hsmName string) clients.KeyVaultManagedHSMPrivateEndpointConnectionsPager { + return t.pager +} + +func TestKeyVaultManagedHSMPrivateEndpointConnection(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + hsmName := "test-hsm" + connectionName := "test-pec" + + t.Run("Get", func(t *testing.T) { + conn := createAzureMHSMPrivateEndpointConnection(connectionName, "") + + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hsmName, connectionName).Return( + armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse{ + MHSMPrivateEndpointConnection: *conn, + }, nil) + + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hsmName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.KeyVaultManagedHSMPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.KeyVaultManagedHSMPrivateEndpointConnection, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(hsmName, connectionName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(hsmName, connectionName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) < 1 { + t.Fatalf("Expected at least 1 linked query, got: %d", len(linkedQueries)) + } + + foundKeyVaultManagedHSM := false + for _, lq := range linkedQueries { + if lq.GetQuery().GetType() == azureshared.KeyVaultManagedHSM.String() { + foundKeyVaultManagedHSM = true + if lq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected KeyVaultManagedHSM link method GET, got %v", lq.GetQuery().GetMethod()) + } + if lq.GetQuery().GetQuery() != hsmName { + t.Errorf("Expected KeyVaultManagedHSM query %s, got %s", hsmName, lq.GetQuery().GetQuery()) + } + } + } + if !foundKeyVaultManagedHSM { + t.Error("Expected linked query to KeyVaultManagedHSM") + } + }) + }) + + t.Run("Get_WithPrivateEndpointLink", func(t *testing.T) { + peID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-pe" + conn := createAzureMHSMPrivateEndpointConnection(connectionName, peID) + + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hsmName, connectionName).Return( + armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse{ + MHSMPrivateEndpointConnection: *conn, + }, nil) + + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hsmName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + foundPrivateEndpoint := false + for _, lq := range sdpItem.GetLinkedItemQueries() { + if lq.GetQuery().GetType() == azureshared.NetworkPrivateEndpoint.String() { + foundPrivateEndpoint = true + if lq.GetQuery().GetQuery() != "test-pe" { + t.Errorf("Expected NetworkPrivateEndpoint query 'test-pe', got %s", lq.GetQuery().GetQuery()) + } + break + } + } + if !foundPrivateEndpoint { + t.Error("Expected linked query to NetworkPrivateEndpoint when PrivateEndpoint ID is set") + } + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], hsmName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + conn1 := createAzureMHSMPrivateEndpointConnection("pec-1", "") + conn2 := createAzureMHSMPrivateEndpointConnection("pec-2", "") + + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockKeyVaultManagedHSMPrivateEndpointConnectionsPager{ + pages: []armkeyvault.MHSMPrivateEndpointConnectionsClientListByResourceResponse{ + { + MHSMPrivateEndpointConnectionsListResult: armkeyvault.MHSMPrivateEndpointConnectionsListResult{ + Value: []*armkeyvault.MHSMPrivateEndpointConnection{conn1, conn2}, + }, + }, + }, + } + + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{ + MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], hsmName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.KeyVaultManagedHSMPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.KeyVaultManagedHSMPrivateEndpointConnection, item.GetType()) + } + } + }) + + t.Run("Search_NilNameSkipped", func(t *testing.T) { + validConn := createAzureMHSMPrivateEndpointConnection("valid-pec", "") + + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockKeyVaultManagedHSMPrivateEndpointConnectionsPager{ + pages: []armkeyvault.MHSMPrivateEndpointConnectionsClientListByResourceResponse{ + { + MHSMPrivateEndpointConnectionsListResult: armkeyvault.MHSMPrivateEndpointConnectionsListResult{ + Value: []*armkeyvault.MHSMPrivateEndpointConnection{ + {Name: nil}, + validConn, + }, + }, + }, + }, + } + + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{ + MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], hsmName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(hsmName, "valid-pec") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(hsmName, "valid-pec"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("private endpoint connection not found") + + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hsmName, "nonexistent-pec").Return( + armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse{}, expectedErr) + + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hsmName, "nonexistent-pec") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent private endpoint connection, but got nil") + } + }) + + t.Run("Get_WithUserAssignedIdentityLink", func(t *testing.T) { + identityID := "/subscriptions/" + subscriptionID + "/resourceGroups/identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity" + conn := createAzureMHSMPrivateEndpointConnectionWithIdentity(connectionName, "", identityID) + + mockClient := mocks.NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, hsmName, connectionName).Return( + armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse{ + MHSMPrivateEndpointConnection: *conn, + }, nil) + + testClient := &testKeyVaultManagedHSMPrivateEndpointConnectionsClient{MockKeyVaultManagedHSMPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(hsmName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + foundIdentity := false + for _, lq := range sdpItem.GetLinkedItemQueries() { + if lq.GetQuery().GetType() == azureshared.ManagedIdentityUserAssignedIdentity.String() { + foundIdentity = true + if lq.GetQuery().GetQuery() != "test-identity" { + t.Errorf("Expected ManagedIdentityUserAssignedIdentity query 'test-identity', got %s", lq.GetQuery().GetQuery()) + } + if lq.GetQuery().GetScope() != subscriptionID+".identity-rg" { + t.Errorf("Expected scope %s.identity-rg for identity in different RG, got %s", subscriptionID, lq.GetQuery().GetScope()) + } + } + } + if !foundIdentity { + t.Error("Expected linked query to ManagedIdentityUserAssignedIdentity when Identity.UserAssignedIdentities is set") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewKeyVaultManagedHSMPrivateEndpointConnection(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + links := wrapper.PotentialLinks() + if !links[azureshared.KeyVaultManagedHSM] { + t.Error("Expected KeyVaultManagedHSM in PotentialLinks") + } + if !links[azureshared.NetworkPrivateEndpoint] { + t.Error("Expected NetworkPrivateEndpoint in PotentialLinks") + } + if !links[azureshared.ManagedIdentityUserAssignedIdentity] { + t.Error("Expected ManagedIdentityUserAssignedIdentity in PotentialLinks") + } + }) +} + +func createAzureMHSMPrivateEndpointConnection(connectionName, privateEndpointID string) *armkeyvault.MHSMPrivateEndpointConnection { + return createAzureMHSMPrivateEndpointConnectionWithIdentity(connectionName, privateEndpointID, "") +} + +func createAzureMHSMPrivateEndpointConnectionWithIdentity(connectionName, privateEndpointID, identityResourceID string) *armkeyvault.MHSMPrivateEndpointConnection { + state := armkeyvault.PrivateEndpointConnectionProvisioningStateSucceeded + conn := &armkeyvault.MHSMPrivateEndpointConnection{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.KeyVault/managedHSMs/test-hsm/privateEndpointConnections/" + connectionName), + Name: new(connectionName), + Type: new("Microsoft.KeyVault/managedHSMs/privateEndpointConnections"), + Properties: &armkeyvault.MHSMPrivateEndpointConnectionProperties{ + ProvisioningState: &state, + }, + } + if privateEndpointID != "" { + conn.Properties.PrivateEndpoint = &armkeyvault.MHSMPrivateEndpoint{ + ID: new(privateEndpointID), + } + } + if identityResourceID != "" { + conn.Identity = &armkeyvault.ManagedServiceIdentity{ + Type: new(armkeyvault.ManagedServiceIdentityTypeUserAssigned), + UserAssignedIdentities: map[string]*armkeyvault.UserAssignedIdentity{ + identityResourceID: {}, + }, + } + } + return conn +} diff --git a/sources/azure/manual/keyvault-managed-hsm_test.go b/sources/azure/manual/keyvault-managed-hsm_test.go index af237d38..8a7fb39a 100644 --- a/sources/azure/manual/keyvault-managed-hsm_test.go +++ b/sources/azure/manual/keyvault-managed-hsm_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" "go.uber.org/mock/gomock" @@ -203,7 +202,7 @@ func TestKeyVaultManagedHSM(t *testing.T) { hsm := &armkeyvault.ManagedHsm{ Name: nil, // No name field Properties: &armkeyvault.ManagedHsmProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), }, } @@ -325,7 +324,7 @@ func TestKeyVaultManagedHSM(t *testing.T) { hsm2 := &armkeyvault.ManagedHsm{ Name: nil, // This should be skipped Properties: &armkeyvault.ManagedHsmProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), }, } hsm3 := createAzureManagedHSM("test-managed-hsm-3", subscriptionID, resourceGroup) @@ -480,7 +479,7 @@ func TestKeyVaultManagedHSM(t *testing.T) { hsm2 := &armkeyvault.ManagedHsm{ Name: nil, // This should be skipped Properties: &armkeyvault.ManagedHsmProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), }, } hsm3 := createAzureManagedHSM("test-managed-hsm-3", subscriptionID, resourceGroup) @@ -604,30 +603,30 @@ func TestKeyVaultManagedHSM(t *testing.T) { // createAzureManagedHSM creates a mock Azure Managed HSM with linked resources func createAzureManagedHSM(hsmName, subscriptionID, resourceGroup string) *armkeyvault.ManagedHsm { return &armkeyvault.ManagedHsm{ - Name: to.Ptr(hsmName), - Location: to.Ptr("eastus"), + Name: new(hsmName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armkeyvault.ManagedHsmProperties{ - TenantID: to.Ptr("test-tenant-id"), - HsmURI: to.Ptr("https://" + hsmName + ".managedhsm.azure.net"), + TenantID: new("test-tenant-id"), + HsmURI: new("https://" + hsmName + ".managedhsm.azure.net"), // Private Endpoint Connections (ID is the connection resource ID for child resource linking) PrivateEndpointConnections: []*armkeyvault.MHSMPrivateEndpointConnectionItem{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/managedHSMs/" + hsmName + "/privateEndpointConnections/test-pec-1"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/managedHSMs/" + hsmName + "/privateEndpointConnections/test-pec-1"), Properties: &armkeyvault.MHSMPrivateEndpointConnectionProperties{ PrivateEndpoint: &armkeyvault.MHSMPrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), }, }, }, { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/managedHSMs/" + hsmName + "/privateEndpointConnections/test-pec-2"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.KeyVault/managedHSMs/" + hsmName + "/privateEndpointConnections/test-pec-2"), Properties: &armkeyvault.MHSMPrivateEndpointConnectionProperties{ PrivateEndpoint: &armkeyvault.MHSMPrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-diff-rg"), }, }, }, @@ -636,25 +635,25 @@ func createAzureManagedHSM(hsmName, subscriptionID, resourceGroup string) *armke NetworkACLs: &armkeyvault.MHSMNetworkRuleSet{ VirtualNetworkRules: []*armkeyvault.MHSMVirtualNetworkRule{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet-diff-rg/subnets/test-subnet-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet-diff-rg/subnets/test-subnet-diff-rg"), }, }, IPRules: []*armkeyvault.MHSMIPRule{ { - Value: to.Ptr("192.168.1.1"), + Value: new("192.168.1.1"), }, { - Value: to.Ptr("10.0.0.0/24"), + Value: new("10.0.0.0/24"), }, }, }, }, // User Assigned Identities Identity: &armkeyvault.ManagedServiceIdentity{ - Type: to.Ptr(armkeyvault.ManagedServiceIdentityTypeUserAssigned), + Type: new(armkeyvault.ManagedServiceIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armkeyvault.UserAssignedIdentity{ "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity": {}, "/subscriptions/" + subscriptionID + "/resourceGroups/identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity-diff-rg": {}, @@ -666,13 +665,13 @@ func createAzureManagedHSM(hsmName, subscriptionID, resourceGroup string) *armke // createAzureManagedHSMMinimal creates a minimal mock Azure Managed HSM without linked resources func createAzureManagedHSMMinimal(hsmName string) *armkeyvault.ManagedHsm { return &armkeyvault.ManagedHsm{ - Name: to.Ptr(hsmName), - Location: to.Ptr("eastus"), + Name: new(hsmName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armkeyvault.ManagedHsmProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), }, } } @@ -680,19 +679,19 @@ func createAzureManagedHSMMinimal(hsmName string) *armkeyvault.ManagedHsm { // createAzureManagedHSMCrossRG creates a mock Azure Managed HSM with linked resources in different resource groups func createAzureManagedHSMCrossRG(hsmName, subscriptionID, resourceGroup string) *armkeyvault.ManagedHsm { return &armkeyvault.ManagedHsm{ - Name: to.Ptr(hsmName), - Location: to.Ptr("eastus"), + Name: new(hsmName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armkeyvault.ManagedHsmProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), // Private Endpoint in different resource group PrivateEndpointConnections: []*armkeyvault.MHSMPrivateEndpointConnectionItem{ { Properties: &armkeyvault.MHSMPrivateEndpointConnectionProperties{ PrivateEndpoint: &armkeyvault.MHSMPrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-pe-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-pe-diff-rg"), }, }, }, @@ -701,14 +700,14 @@ func createAzureManagedHSMCrossRG(hsmName, subscriptionID, resourceGroup string) NetworkACLs: &armkeyvault.MHSMNetworkRuleSet{ VirtualNetworkRules: []*armkeyvault.MHSMVirtualNetworkRule{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, }, }, // User Assigned Identity in different resource group Identity: &armkeyvault.ManagedServiceIdentity{ - Type: to.Ptr(armkeyvault.ManagedServiceIdentityTypeUserAssigned), + Type: new(armkeyvault.ManagedServiceIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armkeyvault.UserAssignedIdentity{ "/subscriptions/" + subscriptionID + "/resourceGroups/identity-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity-diff-rg": {}, }, diff --git a/sources/azure/manual/keyvault-secret_test.go b/sources/azure/manual/keyvault-secret_test.go index 0e8f23b1..c8814996 100644 --- a/sources/azure/manual/keyvault-secret_test.go +++ b/sources/azure/manual/keyvault-secret_test.go @@ -4,9 +4,9 @@ import ( "context" "errors" "fmt" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" "go.uber.org/mock/gomock" @@ -502,13 +502,7 @@ func TestKeyVaultSecret(t *testing.T) { } expectedPermission := "Microsoft.KeyVault/vaults/secrets/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -578,16 +572,16 @@ func TestKeyVaultSecret(t *testing.T) { // createAzureSecret creates a mock Azure Key Vault secret with linked vault func createAzureSecret(secretName, subscriptionID, resourceGroup, vaultName string) *armkeyvault.Secret { return &armkeyvault.Secret{ - ID: to.Ptr(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.KeyVault/vaults/%s/secrets/%s", subscriptionID, resourceGroup, vaultName, secretName)), - Name: to.Ptr(secretName), - Type: to.Ptr("Microsoft.KeyVault/vaults/secrets"), + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.KeyVault/vaults/%s/secrets/%s", subscriptionID, resourceGroup, vaultName, secretName)), + Name: new(secretName), + Type: new("Microsoft.KeyVault/vaults/secrets"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armkeyvault.SecretProperties{ - Value: to.Ptr("secret-value"), - SecretURI: to.Ptr(fmt.Sprintf("https://%s.vault.azure.net/secrets/%s", vaultName, secretName)), + Value: new("secret-value"), + SecretURI: new(fmt.Sprintf("https://%s.vault.azure.net/secrets/%s", vaultName, secretName)), }, } } @@ -595,13 +589,13 @@ func createAzureSecret(secretName, subscriptionID, resourceGroup, vaultName stri // createAzureSecretMinimal creates a minimal mock Azure Key Vault secret without ID (no linked resources) func createAzureSecretMinimal(secretName string) *armkeyvault.Secret { return &armkeyvault.Secret{ - Name: to.Ptr(secretName), - Type: to.Ptr("Microsoft.KeyVault/vaults/secrets"), + Name: new(secretName), + Type: new("Microsoft.KeyVault/vaults/secrets"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armkeyvault.SecretProperties{ - Value: to.Ptr("secret-value"), + Value: new("secret-value"), }, } } @@ -609,14 +603,14 @@ func createAzureSecretMinimal(secretName string) *armkeyvault.Secret { // createAzureSecretCrossRG creates a mock Azure Key Vault secret with vault in a different resource group func createAzureSecretCrossRG(secretName, subscriptionID, vaultResourceGroup, vaultName string) *armkeyvault.Secret { return &armkeyvault.Secret{ - ID: to.Ptr(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.KeyVault/vaults/%s/secrets/%s", subscriptionID, vaultResourceGroup, vaultName, secretName)), - Name: to.Ptr(secretName), - Type: to.Ptr("Microsoft.KeyVault/vaults/secrets"), + ID: new(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.KeyVault/vaults/%s/secrets/%s", subscriptionID, vaultResourceGroup, vaultName, secretName)), + Name: new(secretName), + Type: new("Microsoft.KeyVault/vaults/secrets"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armkeyvault.SecretProperties{ - Value: to.Ptr("secret-value"), + Value: new("secret-value"), }, } } diff --git a/sources/azure/manual/keyvault-vault.go b/sources/azure/manual/keyvault-vault.go index de66dca1..9aee56a7 100644 --- a/sources/azure/manual/keyvault-vault.go +++ b/sources/azure/manual/keyvault-vault.go @@ -135,7 +135,7 @@ func (k keyvaultVaultWrapper) azureKeyVaultToSDPItem(vault *armkeyvault.Vault, s Tags: azureshared.ConvertAzureTags(vault.Tags), } - // Child resources: list secrets in this vault (Search by vault name) + // Child resources: list secrets and keys in this vault (Search by vault name) vaultName := *vault.Name sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ Query: &sdp.Query{ @@ -145,6 +145,14 @@ func (k keyvaultVaultWrapper) azureKeyVaultToSDPItem(vault *armkeyvault.Vault, s Scope: scope, }, }) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultKey.String(), + Method: sdp.QueryMethod_SEARCH, + Query: vaultName, + Scope: scope, + }, + }) // Link to Private Endpoints from Private Endpoint Connections // Reference: https://learn.microsoft.com/en-us/rest/api/virtualnetwork/private-endpoints/get @@ -302,6 +310,7 @@ func (k keyvaultVaultWrapper) TerraformMappings() []*sdp.TerraformMapping { func (k keyvaultVaultWrapper) PotentialLinks() map[shared.ItemType]bool { return shared.NewItemTypesSet( azureshared.KeyVaultSecret, + azureshared.KeyVaultKey, azureshared.NetworkPrivateEndpoint, azureshared.NetworkSubnet, azureshared.KeyVaultManagedHSM, diff --git a/sources/azure/manual/keyvault-vault_test.go b/sources/azure/manual/keyvault-vault_test.go index 267d6955..bb8ac8d0 100644 --- a/sources/azure/manual/keyvault-vault_test.go +++ b/sources/azure/manual/keyvault-vault_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" "go.uber.org/mock/gomock" @@ -113,6 +112,12 @@ func TestKeyVaultVault(t *testing.T) { ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: vaultName, ExpectedScope: subscriptionID + "." + resourceGroup, + }, { + // Child resources: keys in this vault (SEARCH by vault name) + ExpectedType: azureshared.KeyVaultKey.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: vaultName, + ExpectedScope: subscriptionID + "." + resourceGroup, }, { // Private Endpoint (GET) - same resource group ExpectedType: azureshared.NetworkPrivateEndpoint.String(), @@ -184,7 +189,7 @@ func TestKeyVaultVault(t *testing.T) { vault := &armkeyvault.Vault{ Name: nil, // No name field Properties: &armkeyvault.VaultProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), }, } @@ -220,9 +225,9 @@ func TestKeyVaultVault(t *testing.T) { t.Fatalf("Expected no error, got: %v", qErr) } - // Should only have the child SEARCH link (secrets in vault); no private endpoints, subnets, etc. - if len(sdpItem.GetLinkedItemQueries()) != 1 { - t.Errorf("Expected 1 linked item query (KeyVaultSecret SEARCH), got %d", len(sdpItem.GetLinkedItemQueries())) + // Should only have the child SEARCH links (secrets and keys in vault); no private endpoints, subnets, etc. + if len(sdpItem.GetLinkedItemQueries()) != 2 { + t.Errorf("Expected 2 linked item queries (KeyVaultSecret and KeyVaultKey SEARCH), got %d", len(sdpItem.GetLinkedItemQueries())) } }) @@ -362,6 +367,8 @@ func TestKeyVaultVault(t *testing.T) { } expectedLinks := map[shared.ItemType]bool{ + azureshared.KeyVaultSecret: true, + azureshared.KeyVaultKey: true, azureshared.NetworkPrivateEndpoint: true, azureshared.NetworkSubnet: true, azureshared.KeyVaultManagedHSM: true, @@ -379,27 +386,27 @@ func TestKeyVaultVault(t *testing.T) { // createAzureKeyVault creates a mock Azure Key Vault with linked resources func createAzureKeyVault(vaultName, subscriptionID, resourceGroup string) *armkeyvault.Vault { return &armkeyvault.Vault{ - Name: to.Ptr(vaultName), - Location: to.Ptr("eastus"), + Name: new(vaultName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armkeyvault.VaultProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), // Private Endpoint Connections PrivateEndpointConnections: []*armkeyvault.PrivateEndpointConnectionItem{ { Properties: &armkeyvault.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armkeyvault.PrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-private-endpoint"), }, }, }, { Properties: &armkeyvault.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armkeyvault.PrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-private-endpoint-diff-rg"), }, }, }, @@ -408,21 +415,21 @@ func createAzureKeyVault(vaultName, subscriptionID, resourceGroup string) *armke NetworkACLs: &armkeyvault.NetworkRuleSet{ VirtualNetworkRules: []*armkeyvault.VirtualNetworkRule{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet-diff-rg/subnets/test-subnet-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet-diff-rg/subnets/test-subnet-diff-rg"), }, }, IPRules: []*armkeyvault.IPRule{ - {Value: to.Ptr("192.168.1.100")}, - {Value: to.Ptr("10.0.0.0/24")}, + {Value: new("192.168.1.100")}, + {Value: new("10.0.0.0/24")}, }, }, // Vault URI for keys and secrets operations - VaultURI: to.Ptr("https://" + vaultName + ".vault.azure.net/"), + VaultURI: new("https://" + vaultName + ".vault.azure.net/"), // Managed HSM Pool Resource ID - HsmPoolResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/hsm-rg/providers/Microsoft.KeyVault/managedHSMs/test-managed-hsm"), + HsmPoolResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/hsm-rg/providers/Microsoft.KeyVault/managedHSMs/test-managed-hsm"), }, } } @@ -430,13 +437,13 @@ func createAzureKeyVault(vaultName, subscriptionID, resourceGroup string) *armke // createAzureKeyVaultMinimal creates a minimal mock Azure Key Vault without linked resources func createAzureKeyVaultMinimal(vaultName string) *armkeyvault.Vault { return &armkeyvault.Vault{ - Name: to.Ptr(vaultName), - Location: to.Ptr("eastus"), + Name: new(vaultName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armkeyvault.VaultProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), }, } } @@ -444,19 +451,19 @@ func createAzureKeyVaultMinimal(vaultName string) *armkeyvault.Vault { // createAzureKeyVaultCrossRG creates a mock Azure Key Vault with linked resources in different resource groups func createAzureKeyVaultCrossRG(vaultName, subscriptionID, resourceGroup string) *armkeyvault.Vault { return &armkeyvault.Vault{ - Name: to.Ptr(vaultName), - Location: to.Ptr("eastus"), + Name: new(vaultName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armkeyvault.VaultProperties{ - TenantID: to.Ptr("test-tenant-id"), + TenantID: new("test-tenant-id"), // Private Endpoint in different resource group PrivateEndpointConnections: []*armkeyvault.PrivateEndpointConnectionItem{ { Properties: &armkeyvault.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armkeyvault.PrivateEndpoint{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-pe-diff-rg"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/privateEndpoints/test-pe-diff-rg"), }, }, }, @@ -465,12 +472,12 @@ func createAzureKeyVaultCrossRG(vaultName, subscriptionID, resourceGroup string) NetworkACLs: &armkeyvault.NetworkRuleSet{ VirtualNetworkRules: []*armkeyvault.VirtualNetworkRule{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/different-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, }, // Managed HSM in different resource group - HsmPoolResourceID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/hsm-rg/providers/Microsoft.KeyVault/managedHSMs/test-managed-hsm"), + HsmPoolResourceID: new("/subscriptions/" + subscriptionID + "/resourceGroups/hsm-rg/providers/Microsoft.KeyVault/managedHSMs/test-managed-hsm"), }, } } diff --git a/sources/azure/manual/links_helpers.go b/sources/azure/manual/links_helpers.go index 46302efc..fc11304e 100644 --- a/sources/azure/manual/links_helpers.go +++ b/sources/azure/manual/links_helpers.go @@ -2,6 +2,7 @@ package manual import ( "net" + "slices" "strings" "github.com/overmindtech/cli/go/sdp-go" @@ -21,10 +22,8 @@ func appendLinkIfValid( if value == "" { return } - for _, skip := range skipValues { - if value == skip { - return - } + if slices.Contains(skipValues, value) { + return } if q := createQuery(value); q != nil { *queries = append(*queries, q) @@ -33,13 +32,11 @@ func appendLinkIfValid( // AppendURILinks appends linked item queries for a URI: HTTP link plus DNS or IP link from the host (with deduplication). // It mutates linkedItemQueries and the dedupe maps. Skips empty or non-http(s) URIs. -// blastIn and blastOut set BlastPropagation for the added HTTP/DNS/IP links. func AppendURILinks( linkedItemQueries *[]*sdp.LinkedItemQuery, uri string, linkedDNSHostnames map[string]struct{}, seenIPs map[string]struct{}, - blastIn, blastOut bool, ) { if uri == "" || (!strings.HasPrefix(uri, "http://") && !strings.HasPrefix(uri, "https://")) { return diff --git a/sources/azure/manual/managedidentity-user-assigned-identity_test.go b/sources/azure/manual/managedidentity-user-assigned-identity_test.go index 219dd86f..c4e6ff92 100644 --- a/sources/azure/manual/managedidentity-user-assigned-identity_test.go +++ b/sources/azure/manual/managedidentity-user-assigned-identity_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi" "go.uber.org/mock/gomock" @@ -136,14 +135,14 @@ func TestManagedIdentityUserAssignedIdentity(t *testing.T) { identity1 := createAzureUserAssignedIdentity("test-identity-1") identity2 := &armmsi.Identity{ Name: nil, // Identity with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armmsi.UserAssignedIdentityProperties{ - ClientID: to.Ptr("test-client-id-2"), - PrincipalID: to.Ptr("test-principal-id-2"), - TenantID: to.Ptr("test-tenant-id"), + ClientID: new("test-client-id-2"), + PrincipalID: new("test-principal-id-2"), + TenantID: new("test-tenant-id"), }, } @@ -299,16 +298,16 @@ func TestManagedIdentityUserAssignedIdentity(t *testing.T) { // createAzureUserAssignedIdentity creates a mock Azure User Assigned Identity for testing func createAzureUserAssignedIdentity(identityName string) *armmsi.Identity { return &armmsi.Identity{ - Name: to.Ptr(identityName), - Location: to.Ptr("eastus"), + Name: new(identityName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armmsi.UserAssignedIdentityProperties{ - ClientID: to.Ptr("test-client-id"), - PrincipalID: to.Ptr("test-principal-id"), - TenantID: to.Ptr("test-tenant-id"), + ClientID: new("test-client-id"), + PrincipalID: new("test-principal-id"), + TenantID: new("test-tenant-id"), }, } } diff --git a/sources/azure/manual/mock_gallery_application_versions_client_test.go b/sources/azure/manual/mock_gallery_application_versions_client_test.go deleted file mode 100644 index e86f52f3..00000000 --- a/sources/azure/manual/mock_gallery_application_versions_client_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: sources/azure/clients/gallery-application-versions-client.go -// -// Generated by this command: -// -// mockgen -destination=sources/azure/manual/mock_gallery_application_versions_client_test.go -package=manual -source=sources/azure/clients/gallery-application-versions-client.go -// - -// Package manual is a generated GoMock package. -package manual - -import ( - context "context" - reflect "reflect" - - armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - clients "github.com/overmindtech/cli/sources/azure/clients" - gomock "go.uber.org/mock/gomock" -) - -// MockGalleryApplicationVersionsClient is a mock of GalleryApplicationVersionsClient interface. -type MockGalleryApplicationVersionsClient struct { - ctrl *gomock.Controller - recorder *MockGalleryApplicationVersionsClientMockRecorder - isgomock struct{} -} - -// MockGalleryApplicationVersionsClientMockRecorder is the mock recorder for MockGalleryApplicationVersionsClient. -type MockGalleryApplicationVersionsClientMockRecorder struct { - mock *MockGalleryApplicationVersionsClient -} - -// NewMockGalleryApplicationVersionsClient creates a new mock instance. -func NewMockGalleryApplicationVersionsClient(ctrl *gomock.Controller) *MockGalleryApplicationVersionsClient { - mock := &MockGalleryApplicationVersionsClient{ctrl: ctrl} - mock.recorder = &MockGalleryApplicationVersionsClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGalleryApplicationVersionsClient) EXPECT() *MockGalleryApplicationVersionsClientMockRecorder { - return m.recorder -} - -// Get mocks base method. -func (m *MockGalleryApplicationVersionsClient) Get(ctx context.Context, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName string, options *armcompute.GalleryApplicationVersionsClientGetOptions) (armcompute.GalleryApplicationVersionsClientGetResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, options) - ret0, _ := ret[0].(armcompute.GalleryApplicationVersionsClientGetResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockGalleryApplicationVersionsClientMockRecorder) Get(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockGalleryApplicationVersionsClient)(nil).Get), ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, options) -} - -// NewListByGalleryApplicationPager mocks base method. -func (m *MockGalleryApplicationVersionsClient) NewListByGalleryApplicationPager(resourceGroupName, galleryName, galleryApplicationName string, options *armcompute.GalleryApplicationVersionsClientListByGalleryApplicationOptions) clients.GalleryApplicationVersionsPager { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewListByGalleryApplicationPager", resourceGroupName, galleryName, galleryApplicationName, options) - ret0, _ := ret[0].(clients.GalleryApplicationVersionsPager) - return ret0 -} - -// NewListByGalleryApplicationPager indicates an expected call of NewListByGalleryApplicationPager. -func (mr *MockGalleryApplicationVersionsClientMockRecorder) NewListByGalleryApplicationPager(resourceGroupName, galleryName, galleryApplicationName, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByGalleryApplicationPager", reflect.TypeOf((*MockGalleryApplicationVersionsClient)(nil).NewListByGalleryApplicationPager), resourceGroupName, galleryName, galleryApplicationName, options) -} diff --git a/sources/azure/manual/mock_gallery_images_client_test.go b/sources/azure/manual/mock_gallery_images_client_test.go deleted file mode 100644 index 58709319..00000000 --- a/sources/azure/manual/mock_gallery_images_client_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: sources/azure/clients/gallery-images-client.go -// -// Generated by this command: -// -// mockgen -destination=sources/azure/manual/mock_gallery_images_client_test.go -package=manual -source=sources/azure/clients/gallery-images-client.go -// - -// Package manual is a generated GoMock package. -package manual - -import ( - context "context" - reflect "reflect" - - armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" - clients "github.com/overmindtech/cli/sources/azure/clients" - gomock "go.uber.org/mock/gomock" -) - -// MockGalleryImagesClient is a mock of GalleryImagesClient interface. -type MockGalleryImagesClient struct { - ctrl *gomock.Controller - recorder *MockGalleryImagesClientMockRecorder - isgomock struct{} -} - -// MockGalleryImagesClientMockRecorder is the mock recorder for MockGalleryImagesClient. -type MockGalleryImagesClientMockRecorder struct { - mock *MockGalleryImagesClient -} - -// NewMockGalleryImagesClient creates a new mock instance. -func NewMockGalleryImagesClient(ctrl *gomock.Controller) *MockGalleryImagesClient { - mock := &MockGalleryImagesClient{ctrl: ctrl} - mock.recorder = &MockGalleryImagesClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGalleryImagesClient) EXPECT() *MockGalleryImagesClientMockRecorder { - return m.recorder -} - -// Get mocks base method. -func (m *MockGalleryImagesClient) Get(ctx context.Context, resourceGroupName, galleryName, galleryImageName string, options *armcompute.GalleryImagesClientGetOptions) (armcompute.GalleryImagesClientGetResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, galleryName, galleryImageName, options) - ret0, _ := ret[0].(armcompute.GalleryImagesClientGetResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockGalleryImagesClientMockRecorder) Get(ctx, resourceGroupName, galleryName, galleryImageName, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockGalleryImagesClient)(nil).Get), ctx, resourceGroupName, galleryName, galleryImageName, options) -} - -// NewListByGalleryPager mocks base method. -func (m *MockGalleryImagesClient) NewListByGalleryPager(resourceGroupName, galleryName string, options *armcompute.GalleryImagesClientListByGalleryOptions) clients.GalleryImagesPager { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewListByGalleryPager", resourceGroupName, galleryName, options) - ret0, _ := ret[0].(clients.GalleryImagesPager) - return ret0 -} - -// NewListByGalleryPager indicates an expected call of NewListByGalleryPager. -func (mr *MockGalleryImagesClientMockRecorder) NewListByGalleryPager(resourceGroupName, galleryName, options any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByGalleryPager", reflect.TypeOf((*MockGalleryImagesClient)(nil).NewListByGalleryPager), resourceGroupName, galleryName, options) -} diff --git a/sources/azure/manual/network-application-gateway.go b/sources/azure/manual/network-application-gateway.go index a4511f87..97fd89cf 100644 --- a/sources/azure/manual/network-application-gateway.go +++ b/sources/azure/manual/network-application-gateway.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/network-application-gateway_test.go b/sources/azure/manual/network-application-gateway_test.go index 17bd4d56..9bb62f88 100644 --- a/sources/azure/manual/network-application-gateway_test.go +++ b/sources/azure/manual/network-application-gateway_test.go @@ -7,8 +7,7 @@ import ( "reflect" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -245,9 +244,9 @@ func TestNetworkApplicationGateway(t *testing.T) { t.Run("Get_WithNilName", func(t *testing.T) { applicationGateway := &armnetwork.ApplicationGateway{ Name: nil, // Application Gateway with nil name should cause an error - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -337,9 +336,9 @@ func TestNetworkApplicationGateway(t *testing.T) { ag1 := createAzureApplicationGateway("test-ag-1", subscriptionID, resourceGroup) ag2 := &armnetwork.ApplicationGateway{ Name: nil, // Application Gateway with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -505,7 +504,7 @@ func TestNetworkApplicationGateway(t *testing.T) { } // Verify PredefinedRole - if roleInterface, ok := interface{}(wrapper).(interface{ PredefinedRole() string }); ok { + if roleInterface, ok := any(wrapper).(interface{ PredefinedRole() string }); ok { role := roleInterface.PredefinedRole() if role != "Reader" { t.Errorf("Expected PredefinedRole to be 'Reader', got %s", role) @@ -545,7 +544,7 @@ func (m *MockApplicationGatewaysPager) More() bool { func (mr *MockApplicationGatewaysPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockApplicationGatewaysPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockApplicationGatewaysPager) NextPage(ctx context.Context) (armnetwork.ApplicationGatewaysClientListResponse, error) { @@ -556,28 +555,28 @@ func (m *MockApplicationGatewaysPager) NextPage(ctx context.Context) (armnetwork return ret0, ret1 } -func (mr *MockApplicationGatewaysPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockApplicationGatewaysPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockApplicationGatewaysPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.ApplicationGatewaysClientListResponse, error)](), ctx) } // createAzureApplicationGateway creates a mock Azure Application Gateway for testing func createAzureApplicationGateway(agName, subscriptionID, resourceGroup string) *armnetwork.ApplicationGateway { return &armnetwork.ApplicationGateway{ - Name: to.Ptr(agName), - Location: to.Ptr("eastus"), + Name: new(agName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.ApplicationGatewayPropertiesFormat{ // GatewayIPConfigurations (Child Resource) GatewayIPConfigurations: []*armnetwork.ApplicationGatewayIPConfiguration{ { - Name: to.Ptr("gateway-ip-config"), + Name: new("gateway-ip-config"), Properties: &armnetwork.ApplicationGatewayIPConfigurationPropertiesFormat{ Subnet: &armnetwork.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, }, @@ -585,23 +584,23 @@ func createAzureApplicationGateway(agName, subscriptionID, resourceGroup string) // FrontendIPConfigurations (Child Resource) FrontendIPConfigurations: []*armnetwork.ApplicationGatewayFrontendIPConfiguration{ { - Name: to.Ptr("frontend-ip-config"), + Name: new("frontend-ip-config"), Properties: &armnetwork.ApplicationGatewayFrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &armnetwork.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/test-public-ip"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/test-public-ip"), }, - PrivateIPAddress: to.Ptr("10.2.0.5"), + PrivateIPAddress: new("10.2.0.5"), }, }, }, // BackendAddressPools (Child Resource) BackendAddressPools: []*armnetwork.ApplicationGatewayBackendAddressPool{ { - Name: to.Ptr("backend-pool"), + Name: new("backend-pool"), Properties: &armnetwork.ApplicationGatewayBackendAddressPoolPropertiesFormat{ BackendAddresses: []*armnetwork.ApplicationGatewayBackendAddress{ { - IPAddress: to.Ptr("10.0.1.4"), + IPAddress: new("10.0.1.4"), }, }, }, @@ -610,76 +609,76 @@ func createAzureApplicationGateway(agName, subscriptionID, resourceGroup string) // HTTPListeners (Child Resource) HTTPListeners: []*armnetwork.ApplicationGatewayHTTPListener{ { - Name: to.Ptr("http-listener"), + Name: new("http-listener"), }, }, // BackendHTTPSettingsCollection (Child Resource) BackendHTTPSettingsCollection: []*armnetwork.ApplicationGatewayBackendHTTPSettings{ { - Name: to.Ptr("backend-http-settings"), + Name: new("backend-http-settings"), }, }, // RequestRoutingRules (Child Resource) RequestRoutingRules: []*armnetwork.ApplicationGatewayRequestRoutingRule{ { - Name: to.Ptr("routing-rule"), + Name: new("routing-rule"), }, }, // Probes (Child Resource) Probes: []*armnetwork.ApplicationGatewayProbe{ { - Name: to.Ptr("health-probe"), + Name: new("health-probe"), }, }, // SSLCertificates (Child Resource) SSLCertificates: []*armnetwork.ApplicationGatewaySSLCertificate{ { - Name: to.Ptr("ssl-cert"), + Name: new("ssl-cert"), Properties: &armnetwork.ApplicationGatewaySSLCertificatePropertiesFormat{ - KeyVaultSecretID: to.Ptr("https://test-keyvault.vault.azure.net/secrets/test-secret/version"), + KeyVaultSecretID: new("https://test-keyvault.vault.azure.net/secrets/test-secret/version"), }, }, }, // URLPathMaps (Child Resource) URLPathMaps: []*armnetwork.ApplicationGatewayURLPathMap{ { - Name: to.Ptr("url-path-map"), + Name: new("url-path-map"), }, }, // AuthenticationCertificates (Child Resource) AuthenticationCertificates: []*armnetwork.ApplicationGatewayAuthenticationCertificate{ { - Name: to.Ptr("auth-cert"), + Name: new("auth-cert"), }, }, // TrustedRootCertificates (Child Resource) TrustedRootCertificates: []*armnetwork.ApplicationGatewayTrustedRootCertificate{ { - Name: to.Ptr("trusted-root-cert"), + Name: new("trusted-root-cert"), Properties: &armnetwork.ApplicationGatewayTrustedRootCertificatePropertiesFormat{ - KeyVaultSecretID: to.Ptr("https://test-trusted-keyvault.vault.azure.net/secrets/test-trusted-secret/version"), + KeyVaultSecretID: new("https://test-trusted-keyvault.vault.azure.net/secrets/test-trusted-secret/version"), }, }, }, // RewriteRuleSets (Child Resource) RewriteRuleSets: []*armnetwork.ApplicationGatewayRewriteRuleSet{ { - Name: to.Ptr("rewrite-rule-set"), + Name: new("rewrite-rule-set"), }, }, // RedirectConfigurations (Child Resource) RedirectConfigurations: []*armnetwork.ApplicationGatewayRedirectConfiguration{ { - Name: to.Ptr("redirect-config"), + Name: new("redirect-config"), }, }, // FirewallPolicy (External Resource) FirewallPolicy: &armnetwork.SubResource{ - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/test-waf-policy"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/test-waf-policy"), }, }, Identity: &armnetwork.ManagedServiceIdentity{ - Type: to.Ptr(armnetwork.ResourceIdentityTypeUserAssigned), + Type: new(armnetwork.ResourceIdentityTypeUserAssigned), UserAssignedIdentities: map[string]*armnetwork.Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties{ "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-identity": {}, }, @@ -693,10 +692,10 @@ func createAzureApplicationGatewayWithDifferentScopePublicIP(agName, subscriptio // Override FrontendIPConfiguration with PublicIPAddress in different scope ag.Properties.FrontendIPConfigurations = []*armnetwork.ApplicationGatewayFrontendIPConfiguration{ { - Name: to.Ptr("frontend-ip-config"), + Name: new("frontend-ip-config"), Properties: &armnetwork.ApplicationGatewayFrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &armnetwork.SubResource{ - ID: to.Ptr("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/publicIPAddresses/test-public-ip"), + ID: new("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/publicIPAddresses/test-public-ip"), }, }, }, diff --git a/sources/azure/manual/network-application-security-group.go b/sources/azure/manual/network-application-security-group.go new file mode 100644 index 00000000..6aec9794 --- /dev/null +++ b/sources/azure/manual/network-application-security-group.go @@ -0,0 +1,178 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var NetworkApplicationSecurityGroupLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkApplicationSecurityGroup) + +type networkApplicationSecurityGroupWrapper struct { + client clients.ApplicationSecurityGroupsClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkApplicationSecurityGroup(client clients.ApplicationSecurityGroupsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkApplicationSecurityGroupWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkApplicationSecurityGroup, + ), + } +} + +func (n networkApplicationSecurityGroupWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, asg := range page.Value { + if asg.Name == nil { + continue + } + item, sdpErr := n.azureApplicationSecurityGroupToSDPItem(asg, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkApplicationSecurityGroupWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, asg := range page.Value { + if asg.Name == nil { + continue + } + item, sdpErr := n.azureApplicationSecurityGroupToSDPItem(asg, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkApplicationSecurityGroupWrapper) azureApplicationSecurityGroupToSDPItem(asg *armnetwork.ApplicationSecurityGroup, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(asg, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + if asg.Name == nil { + return nil, azureshared.QueryError(errors.New("application security group name is nil"), scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkApplicationSecurityGroup.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(asg.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + //no links - https://learn.microsoft.com/en-us/rest/api/virtualnetwork/application-security-groups/get?view=rest-virtualnetwork-2025-05-01&tabs=HTTP + + // Health from provisioning state + if asg.Properties != nil && asg.Properties.ProvisioningState != nil { + switch *asg.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + return sdpItem, nil +} + +// ref: https://learn.microsoft.com/en-us/rest/api/virtualnetwork/application-security-groups/get +func (n networkApplicationSecurityGroupWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, azureshared.QueryError(errors.New("query must be exactly one part (application security group name)"), scope, n.Type()) + } + asgName := queryParts[0] + if asgName == "" { + return nil, azureshared.QueryError(errors.New("application security group name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, asgName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + return n.azureApplicationSecurityGroupToSDPItem(&resp.ApplicationSecurityGroup, scope) +} + +func (n networkApplicationSecurityGroupWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkApplicationSecurityGroupLookupByName, + } +} + +func (n networkApplicationSecurityGroupWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{} +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/application_security_group +func (n networkApplicationSecurityGroupWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_application_security_group.name", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftnetwork +func (n networkApplicationSecurityGroupWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/applicationSecurityGroups/read", + } +} + +func (n networkApplicationSecurityGroupWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-application-security-group_test.go b/sources/azure/manual/network-application-security-group_test.go new file mode 100644 index 00000000..7fb3c306 --- /dev/null +++ b/sources/azure/manual/network-application-security-group_test.go @@ -0,0 +1,344 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "sync" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +func TestNetworkApplicationSecurityGroup(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + + t.Run("Get", func(t *testing.T) { + asgName := "test-asg" + asg := createAzureApplicationSecurityGroup(asgName) + + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, asgName, nil).Return( + armnetwork.ApplicationSecurityGroupsClientGetResponse{ + ApplicationSecurityGroup: *asg, + }, nil) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], asgName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkApplicationSecurityGroup.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkApplicationSecurityGroup, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != asgName { + t.Errorf("Expected unique attribute value %s, got %s", asgName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + // Application Security Group has no linked item queries + queryTests := shared.QueryTests{} + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "", true) + if qErr == nil { + t.Error("Expected error when application security group name is empty, but got nil") + } + }) + + t.Run("Get_ASGWithNilName", func(t *testing.T) { + provisioningState := armnetwork.ProvisioningStateSucceeded + asgWithNilName := &armnetwork.ApplicationSecurityGroup{ + Name: nil, + Location: new("eastus"), + Properties: &armnetwork.ApplicationSecurityGroupPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "test-asg", nil).Return( + armnetwork.ApplicationSecurityGroupsClientGetResponse{ + ApplicationSecurityGroup: *asgWithNilName, + }, nil) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "test-asg", true) + if qErr == nil { + t.Error("Expected error when application security group has nil name, but got nil") + } + }) + + t.Run("List", func(t *testing.T) { + asg1 := createAzureApplicationSecurityGroup("asg-1") + asg2 := createAzureApplicationSecurityGroup("asg-2") + + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + mockPager := newMockApplicationSecurityGroupsPager(ctrl, []*armnetwork.ApplicationSecurityGroup{asg1, asg2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if item.Validate() != nil { + t.Fatalf("Expected no validation error, got: %v", item.Validate()) + } + if item.GetType() != azureshared.NetworkApplicationSecurityGroup.String() { + t.Fatalf("Expected type %s, got: %s", azureshared.NetworkApplicationSecurityGroup, item.GetType()) + } + } + }) + + t.Run("List_WithNilName", func(t *testing.T) { + asg1 := createAzureApplicationSecurityGroup("asg-1") + provisioningState := armnetwork.ProvisioningStateSucceeded + asg2NilName := &armnetwork.ApplicationSecurityGroup{ + Name: nil, + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + Properties: &armnetwork.ApplicationSecurityGroupPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + mockPager := newMockApplicationSecurityGroupsPager(ctrl, []*armnetwork.ApplicationSecurityGroup{asg1, asg2NilName}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != "asg-1" { + t.Errorf("Expected item name 'asg-1', got: %s", sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ListStream", func(t *testing.T) { + asg1 := createAzureApplicationSecurityGroup("stream-asg-1") + asg2 := createAzureApplicationSecurityGroup("stream-asg-2") + + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + mockPager := newMockApplicationSecurityGroupsPager(ctrl, []*armnetwork.ApplicationSecurityGroup{asg1, asg2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + wg := &sync.WaitGroup{} + wg.Add(2) + + var items []*sdp.Item + mockItemHandler := func(item *sdp.Item) { + items = append(items, item) + wg.Done() + } + var errs []error + mockErrorHandler := func(err error) { + errs = append(errs, err) + } + stream := discovery.NewQueryResultStream(mockItemHandler, mockErrorHandler) + + listStreamable, ok := adapter.(discovery.ListStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support ListStream operation") + } + + listStreamable.ListStream(ctx, wrapper.Scopes()[0], true, stream) + wg.Wait() + + if len(errs) != 0 { + t.Fatalf("Expected no errors, got: %v", errs) + } + if len(items) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(items)) + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("application security group not found") + + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "nonexistent-asg", nil).Return( + armnetwork.ApplicationSecurityGroupsClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "nonexistent-asg", true) + if qErr == nil { + t.Error("Expected error when getting non-existent application security group, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockApplicationSecurityGroupsClient(ctrl) + wrapper := manual.NewNetworkApplicationSecurityGroup(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Network/applicationSecurityGroups/read" + if !slices.Contains(permissions, expectedPermission) { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + mappings := w.TerraformMappings() + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_application_security_group.name" { + foundMapping = true + if mapping.GetTerraformMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected TerraformMethod GET, got: %s", mapping.GetTerraformMethod()) + } + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_application_security_group.name'") + } + + lookups := w.GetLookups() + foundLookup := false + for _, lookup := range lookups { + if lookup.ItemType == azureshared.NetworkApplicationSecurityGroup { + foundLookup = true + break + } + } + if !foundLookup { + t.Error("Expected GetLookups to include NetworkApplicationSecurityGroup") + } + }) +} + +type mockApplicationSecurityGroupsPager struct { + ctrl *gomock.Controller + items []*armnetwork.ApplicationSecurityGroup + index int + more bool +} + +func newMockApplicationSecurityGroupsPager(ctrl *gomock.Controller, items []*armnetwork.ApplicationSecurityGroup) clients.ApplicationSecurityGroupsPager { + return &mockApplicationSecurityGroupsPager{ + ctrl: ctrl, + items: items, + index: 0, + more: len(items) > 0, + } +} + +func (m *mockApplicationSecurityGroupsPager) More() bool { + return m.more +} + +func (m *mockApplicationSecurityGroupsPager) NextPage(ctx context.Context) (armnetwork.ApplicationSecurityGroupsClientListResponse, error) { + if m.index >= len(m.items) { + m.more = false + return armnetwork.ApplicationSecurityGroupsClientListResponse{ + ApplicationSecurityGroupListResult: armnetwork.ApplicationSecurityGroupListResult{ + Value: []*armnetwork.ApplicationSecurityGroup{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + m.more = m.index < len(m.items) + return armnetwork.ApplicationSecurityGroupsClientListResponse{ + ApplicationSecurityGroupListResult: armnetwork.ApplicationSecurityGroupListResult{ + Value: []*armnetwork.ApplicationSecurityGroup{item}, + }, + }, nil +} + +func createAzureApplicationSecurityGroup(name string) *armnetwork.ApplicationSecurityGroup { + provisioningState := armnetwork.ProvisioningStateSucceeded + return &armnetwork.ApplicationSecurityGroup{ + ID: new("/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Network/applicationSecurityGroups/" + name), + Name: new(name), + Type: new("Microsoft.Network/applicationSecurityGroups"), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armnetwork.ApplicationSecurityGroupPropertiesFormat{ + ProvisioningState: &provisioningState, + ResourceGUID: new("00000000-0000-0000-0000-000000000001"), + }, + } +} + +// Ensure mockApplicationSecurityGroupsPager satisfies the pager interface at compile time. +var _ clients.ApplicationSecurityGroupsPager = (*mockApplicationSecurityGroupsPager)(nil) diff --git a/sources/azure/manual/network-ddos-protection-plan.go b/sources/azure/manual/network-ddos-protection-plan.go new file mode 100644 index 00000000..0e65fd1a --- /dev/null +++ b/sources/azure/manual/network-ddos-protection-plan.go @@ -0,0 +1,225 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var NetworkDdosProtectionPlanLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkDdosProtectionPlan) + +type networkDdosProtectionPlanWrapper struct { + client clients.DdosProtectionPlansClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkDdosProtectionPlan(client clients.DdosProtectionPlansClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkDdosProtectionPlanWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkDdosProtectionPlan, + ), + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/virtualnetwork/ddos-protection-plans/list-by-resource-group +func (n networkDdosProtectionPlanWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListByResourceGroupPager(rgScope.ResourceGroup, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, plan := range page.Value { + if plan.Name == nil { + continue + } + item, sdpErr := n.azureDdosProtectionPlanToSDPItem(plan, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkDdosProtectionPlanWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListByResourceGroupPager(rgScope.ResourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, plan := range page.Value { + if plan.Name == nil { + continue + } + item, sdpErr := n.azureDdosProtectionPlanToSDPItem(plan, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/virtualnetwork/ddos-protection-plans/get +func (n networkDdosProtectionPlanWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 1 { + return nil, azureshared.QueryError(errors.New("query must be exactly one part (DDoS protection plan name)"), scope, n.Type()) + } + planName := queryParts[0] + if planName == "" { + return nil, azureshared.QueryError(errors.New("DDoS protection plan name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, planName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + return n.azureDdosProtectionPlanToSDPItem(&resp.DdosProtectionPlan, scope) +} + +func (n networkDdosProtectionPlanWrapper) azureDdosProtectionPlanToSDPItem(plan *armnetwork.DdosProtectionPlan, scope string) (*sdp.Item, *sdp.QueryError) { + if plan.Name == nil { + return nil, azureshared.QueryError(errors.New("DDoS protection plan name is nil"), scope, n.Type()) + } + + attributes, err := shared.ToAttributesWithExclude(plan, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkDdosProtectionPlan.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(plan.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + if plan.Properties != nil { + // Link to each associated virtual network + for _, ref := range plan.Properties.VirtualNetworks { + if ref != nil && ref.ID != nil { + vnetID := *ref.ID + vnetName := azureshared.ExtractResourceName(vnetID) + if vnetName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(vnetID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + } + // Link to each associated public IP address + for _, ref := range plan.Properties.PublicIPAddresses { + if ref != nil && ref.ID != nil { + publicIPID := *ref.ID + publicIPName := azureshared.ExtractResourceName(publicIPID) + if publicIPName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(publicIPID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPublicIPAddress.String(), + Method: sdp.QueryMethod_GET, + Query: publicIPName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Health from provisioning state + if plan.Properties != nil && plan.Properties.ProvisioningState != nil { + switch *plan.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + return sdpItem, nil +} + +func (n networkDdosProtectionPlanWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkDdosProtectionPlanLookupByName, + } +} + +func (n networkDdosProtectionPlanWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkVirtualNetwork: true, + azureshared.NetworkPublicIPAddress: true, + } +} + +func (n networkDdosProtectionPlanWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_network_ddos_protection_plan.name", + }, + } +} + +// https://learn.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftnetwork +func (n networkDdosProtectionPlanWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/ddosProtectionPlans/read", + } +} + +func (n networkDdosProtectionPlanWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-ddos-protection-plan_test.go b/sources/azure/manual/network-ddos-protection-plan_test.go new file mode 100644 index 00000000..f8274bd6 --- /dev/null +++ b/sources/azure/manual/network-ddos-protection-plan_test.go @@ -0,0 +1,397 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "sync" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +func TestNetworkDdosProtectionPlan(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + + t.Run("Get", func(t *testing.T) { + planName := "test-ddos-plan" + plan := createAzureDdosProtectionPlan(planName) + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, planName, nil).Return( + armnetwork.DdosProtectionPlansClientGetResponse{ + DdosProtectionPlan: *plan, + }, nil) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], planName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkDdosProtectionPlan.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkDdosProtectionPlan.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != planName { + t.Errorf("Expected unique attribute value %s, got %s", planName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{} + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithLinkedResources", func(t *testing.T) { + planName := "test-ddos-plan-with-links" + plan := createAzureDdosProtectionPlanWithLinks(planName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, planName, nil).Return( + armnetwork.DdosProtectionPlansClientGetResponse{ + DdosProtectionPlan: *plan, + }, nil) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], planName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + t.Run("StaticTests", func(t *testing.T) { + scope := subscriptionID + "." + resourceGroup + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-vnet", + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkPublicIPAddress.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-public-ip", + ExpectedScope: scope, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "", true) + if qErr == nil { + t.Error("Expected error when DDoS protection plan name is empty, but got nil") + } + }) + + t.Run("Get_PlanWithNilName", func(t *testing.T) { + provisioningState := armnetwork.ProvisioningStateSucceeded + planWithNilName := &armnetwork.DdosProtectionPlan{ + Name: nil, + Location: new("eastus"), + Properties: &armnetwork.DdosProtectionPlanPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "test-plan", nil).Return( + armnetwork.DdosProtectionPlansClientGetResponse{ + DdosProtectionPlan: *planWithNilName, + }, nil) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "test-plan", true) + if qErr == nil { + t.Error("Expected error when DDoS protection plan has nil name, but got nil") + } + }) + + t.Run("List", func(t *testing.T) { + plan1 := createAzureDdosProtectionPlan("plan-1") + plan2 := createAzureDdosProtectionPlan("plan-2") + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockPager := newMockDdosProtectionPlansPager(ctrl, []*armnetwork.DdosProtectionPlan{plan1, plan2}) + + mockClient.EXPECT().NewListByResourceGroupPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if item.Validate() != nil { + t.Fatalf("Expected no validation error, got: %v", item.Validate()) + } + if item.GetType() != azureshared.NetworkDdosProtectionPlan.String() { + t.Fatalf("Expected type %s, got: %s", azureshared.NetworkDdosProtectionPlan.String(), item.GetType()) + } + } + }) + + t.Run("List_WithNilName", func(t *testing.T) { + plan1 := createAzureDdosProtectionPlan("plan-1") + provisioningState := armnetwork.ProvisioningStateSucceeded + plan2NilName := &armnetwork.DdosProtectionPlan{ + Name: nil, + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + Properties: &armnetwork.DdosProtectionPlanPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockPager := newMockDdosProtectionPlansPager(ctrl, []*armnetwork.DdosProtectionPlan{plan1, plan2NilName}) + + mockClient.EXPECT().NewListByResourceGroupPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != "plan-1" { + t.Errorf("Expected item name 'plan-1', got: %s", sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ListStream", func(t *testing.T) { + plan1 := createAzureDdosProtectionPlan("stream-plan-1") + plan2 := createAzureDdosProtectionPlan("stream-plan-2") + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockPager := newMockDdosProtectionPlansPager(ctrl, []*armnetwork.DdosProtectionPlan{plan1, plan2}) + + mockClient.EXPECT().NewListByResourceGroupPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + wg := &sync.WaitGroup{} + wg.Add(2) + + var items []*sdp.Item + mockItemHandler := func(item *sdp.Item) { + items = append(items, item) + wg.Done() + } + var errs []error + mockErrorHandler := func(err error) { + errs = append(errs, err) + } + stream := discovery.NewQueryResultStream(mockItemHandler, mockErrorHandler) + + listStreamable, ok := adapter.(discovery.ListStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support ListStream operation") + } + + listStreamable.ListStream(ctx, wrapper.Scopes()[0], true, stream) + wg.Wait() + + if len(errs) != 0 { + t.Fatalf("Expected no errors, got: %v", errs) + } + if len(items) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(items)) + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("DDoS protection plan not found") + + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "nonexistent-plan", nil).Return( + armnetwork.DdosProtectionPlansClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "nonexistent-plan", true) + if qErr == nil { + t.Error("Expected error when getting non-existent DDoS protection plan, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockDdosProtectionPlansClient(ctrl) + wrapper := manual.NewNetworkDdosProtectionPlan(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Network/ddosProtectionPlans/read" + if !slices.Contains(permissions, expectedPermission) { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + mappings := w.TerraformMappings() + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_network_ddos_protection_plan.name" { + foundMapping = true + if mapping.GetTerraformMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected TerraformMethod GET, got: %s", mapping.GetTerraformMethod()) + } + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_network_ddos_protection_plan.name'") + } + + lookups := w.GetLookups() + foundLookup := false + for _, lookup := range lookups { + if lookup.ItemType == azureshared.NetworkDdosProtectionPlan { + foundLookup = true + break + } + } + if !foundLookup { + t.Error("Expected GetLookups to include NetworkDdosProtectionPlan") + } + + potentialLinks := w.PotentialLinks() + for _, linkType := range []shared.ItemType{azureshared.NetworkVirtualNetwork, azureshared.NetworkPublicIPAddress} { + if !potentialLinks[linkType] { + t.Errorf("Expected PotentialLinks to include %s", linkType) + } + } + }) +} + +type mockDdosProtectionPlansPager struct { + ctrl *gomock.Controller + items []*armnetwork.DdosProtectionPlan + index int + more bool +} + +func newMockDdosProtectionPlansPager(ctrl *gomock.Controller, items []*armnetwork.DdosProtectionPlan) clients.DdosProtectionPlansPager { + return &mockDdosProtectionPlansPager{ + ctrl: ctrl, + items: items, + index: 0, + more: len(items) > 0, + } +} + +func (m *mockDdosProtectionPlansPager) More() bool { + return m.more +} + +func (m *mockDdosProtectionPlansPager) NextPage(ctx context.Context) (armnetwork.DdosProtectionPlansClientListByResourceGroupResponse, error) { + if m.index >= len(m.items) { + m.more = false + return armnetwork.DdosProtectionPlansClientListByResourceGroupResponse{ + DdosProtectionPlanListResult: armnetwork.DdosProtectionPlanListResult{ + Value: []*armnetwork.DdosProtectionPlan{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + m.more = m.index < len(m.items) + return armnetwork.DdosProtectionPlansClientListByResourceGroupResponse{ + DdosProtectionPlanListResult: armnetwork.DdosProtectionPlanListResult{ + Value: []*armnetwork.DdosProtectionPlan{item}, + }, + }, nil +} + +func createAzureDdosProtectionPlan(name string) *armnetwork.DdosProtectionPlan { + provisioningState := armnetwork.ProvisioningStateSucceeded + return &armnetwork.DdosProtectionPlan{ + ID: new("/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Network/ddosProtectionPlans/" + name), + Name: new(name), + Type: new("Microsoft.Network/ddosProtectionPlans"), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armnetwork.DdosProtectionPlanPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } +} + +func createAzureDdosProtectionPlanWithLinks(name, subscriptionID, resourceGroup string) *armnetwork.DdosProtectionPlan { + plan := createAzureDdosProtectionPlan(name) + plan.Properties.VirtualNetworks = []*armnetwork.SubResource{ + {ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet")}, + } + plan.Properties.PublicIPAddresses = []*armnetwork.SubResource{ + {ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/test-public-ip")}, + } + return plan +} + +var _ clients.DdosProtectionPlansPager = (*mockDdosProtectionPlansPager)(nil) diff --git a/sources/azure/manual/network-dns-record-set.go b/sources/azure/manual/network-dns-record-set.go new file mode 100644 index 00000000..00869b7e --- /dev/null +++ b/sources/azure/manual/network-dns-record-set.go @@ -0,0 +1,385 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkDNSRecordSetLookupByRecordType = shared.NewItemTypeLookup("recordType", azureshared.NetworkDNSRecordSet) +var NetworkDNSRecordSetLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkDNSRecordSet) + +type networkDNSRecordSetWrapper struct { + client clients.RecordSetsClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkDNSRecordSet(client clients.RecordSetsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkDNSRecordSetWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkDNSRecordSet, + ), + } +} + +// recordTypeFromResourceType extracts the DNS record type (e.g. "A", "AAAA") from the ARM resource type (e.g. "Microsoft.Network/dnszones/A"). +func recordTypeFromResourceType(resourceType string) string { + if resourceType == "" { + return "" + } + parts := strings.Split(resourceType, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return "" +} + +//ref: https://learn.microsoft.com/en-us/rest/api/dns/record-sets/get?view=rest-dns-2018-05-01&tabs=HTTP +func (n networkDNSRecordSetWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 3 { + return nil, azureshared.QueryError(errors.New("Get requires 3 query parts: zoneName, recordType, and relativeRecordSetName"), scope, n.Type()) + } + zoneName := queryParts[0] + recordTypeStr := queryParts[1] + relativeRecordSetName := queryParts[2] + if zoneName == "" || recordTypeStr == "" || relativeRecordSetName == "" { + return nil, azureshared.QueryError(errors.New("zoneName, recordType and relativeRecordSetName cannot be empty"), scope, n.Type()) + } + recordType := armdns.RecordType(recordTypeStr) + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, zoneName, relativeRecordSetName, recordType, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + return n.azureRecordSetToSDPItem(&resp.RecordSet, zoneName, scope) +} + +func (n networkDNSRecordSetWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkZoneLookupByName, + NetworkDNSRecordSetLookupByRecordType, + NetworkDNSRecordSetLookupByName, + } +} + +func (n networkDNSRecordSetWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, azureshared.QueryError(errors.New("Search requires 1 query part: zoneName"), scope, n.Type()) + } + zoneName := queryParts[0] + if zoneName == "" { + return nil, azureshared.QueryError(errors.New("zoneName cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListAllByDNSZonePager(rgScope.ResourceGroup, zoneName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, rs := range page.Value { + if rs == nil || rs.Name == nil { + continue + } + item, sdpErr := n.azureRecordSetToSDPItem(rs, zoneName, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkDNSRecordSetWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: zoneName"), scope, n.Type())) + return + } + zoneName := queryParts[0] + if zoneName == "" { + stream.SendError(azureshared.QueryError(errors.New("zoneName cannot be empty"), scope, n.Type())) + return + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListAllByDNSZonePager(rgScope.ResourceGroup, zoneName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, rs := range page.Value { + if rs == nil || rs.Name == nil { + continue + } + item, sdpErr := n.azureRecordSetToSDPItem(rs, zoneName, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkDNSRecordSetWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + {NetworkZoneLookupByName}, + } +} + +func (n networkDNSRecordSetWrapper) azureRecordSetToSDPItem(rs *armdns.RecordSet, zoneName, scope string) (*sdp.Item, *sdp.QueryError) { + if rs.Name == nil { + return nil, azureshared.QueryError(errors.New("record set name is nil"), scope, n.Type()) + } + relativeName := *rs.Name + recordTypeStr := "" + if rs.Type != nil { + recordTypeStr = recordTypeFromResourceType(*rs.Type) + } + if recordTypeStr == "" { + return nil, azureshared.QueryError(errors.New("record set type is nil or invalid"), scope, n.Type()) + } + + attributes, err := shared.ToAttributesWithExclude(rs, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + uniqueAttr := shared.CompositeLookupKey(zoneName, recordTypeStr, relativeName) + if err := attributes.Set("uniqueAttr", uniqueAttr); err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkDNSRecordSet.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Link to parent DNS zone + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkZone.String(), + Method: sdp.QueryMethod_GET, + Query: zoneName, + Scope: scope, + }, + }) + + // Link to DNS name (standard library) from FQDN if present + if rs.Properties != nil && rs.Properties.Fqdn != nil && *rs.Properties.Fqdn != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: *rs.Properties.Fqdn, + Scope: "global", + }, + }) + } + + // LinkedItemQueries for IP addresses and DNS names in record data + if rs.Properties != nil { + seenIPs := make(map[string]struct{}) + seenDNS := make(map[string]struct{}) + + // A records (IPv4) -> stdlib.NetworkIP, GET, global + for _, a := range rs.Properties.ARecords { + if a != nil && a.IPv4Address != nil && *a.IPv4Address != "" { + ip := *a.IPv4Address + if _, seen := seenIPs[ip]; !seen { + seenIPs[ip] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: ip, + Scope: "global", + }, + }) + } + } + } + // AAAA records (IPv6) -> stdlib.NetworkIP, GET, global + for _, aaaa := range rs.Properties.AaaaRecords { + if aaaa != nil && aaaa.IPv6Address != nil && *aaaa.IPv6Address != "" { + ip := *aaaa.IPv6Address + if _, seen := seenIPs[ip]; !seen { + seenIPs[ip] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: ip, + Scope: "global", + }, + }) + } + } + } + + // DNS names in record data -> stdlib.NetworkDNS, SEARCH, global + appendDNSLink := func(name string) { + if name == "" { + return + } + if _, seen := seenDNS[name]; !seen { + seenDNS[name] = struct{}{} + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: name, + Scope: "global", + }, + }) + } + } + if rs.Properties.CnameRecord != nil && rs.Properties.CnameRecord.Cname != nil && *rs.Properties.CnameRecord.Cname != "" { + appendDNSLink(*rs.Properties.CnameRecord.Cname) + } + for _, mx := range rs.Properties.MxRecords { + if mx != nil && mx.Exchange != nil && *mx.Exchange != "" { + appendDNSLink(*mx.Exchange) + } + } + for _, ns := range rs.Properties.NsRecords { + if ns != nil && ns.Nsdname != nil && *ns.Nsdname != "" { + appendDNSLink(*ns.Nsdname) + } + } + for _, ptr := range rs.Properties.PtrRecords { + if ptr != nil && ptr.Ptrdname != nil && *ptr.Ptrdname != "" { + appendDNSLink(*ptr.Ptrdname) + } + } + // SOA Host is the authoritative name server (DNS name). SOA Email is an email in DNS + // notation (e.g. admin.example.com = admin@example.com), not a resolvable hostname. + if rs.Properties.SoaRecord != nil && rs.Properties.SoaRecord.Host != nil && *rs.Properties.SoaRecord.Host != "" { + appendDNSLink(*rs.Properties.SoaRecord.Host) + } + // Only "issue" and "issuewild" CAA values are DNS names (CA domain). "iodef" values + // are URLs (e.g. mailto: or https:) and must not be passed to appendDNSLink. + for _, caa := range rs.Properties.CaaRecords { + if caa == nil || caa.Tag == nil || caa.Value == nil || *caa.Value == "" { + continue + } + tag := *caa.Tag + if tag != "issue" && tag != "issuewild" { + continue + } + appendDNSLink(*caa.Value) + } + for _, srv := range rs.Properties.SrvRecords { + if srv != nil && srv.Target != nil && *srv.Target != "" { + appendDNSLink(*srv.Target) + } + } + + // TargetResource (Azure resource ID) -> link to referenced resource. + // Pass the composite lookup key (extracted query parts) so the target adapter's Get + // receives the expected parts when the transformer splits by QuerySeparator; it does + // not parse full resource IDs for linked GET queries. + // For types in pathKeysMap we use ExtractPathParamsFromResourceIDByType; for simple + // single-name resources (e.g. public IP, Traffic Manager) we fall back to ExtractResourceName. + if rs.Properties.TargetResource != nil && rs.Properties.TargetResource.ID != nil && *rs.Properties.TargetResource.ID != "" { + targetID := *rs.Properties.TargetResource.ID + linkScope := azureshared.ExtractScopeFromResourceID(targetID) + if linkScope == "" { + linkScope = scope + } + itemType := azureshared.ItemTypeFromLinkedResourceID(targetID) + if itemType != "" { + queryParts := azureshared.ExtractPathParamsFromResourceIDByType(itemType, targetID) + var query string + if queryParts != nil { + query = shared.CompositeLookupKey(queryParts...) + } else { + // Simple resource type (no pathKeysMap): use resource name as single query part + query = azureshared.ExtractResourceName(targetID) + } + if query != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: itemType, + Method: sdp.QueryMethod_GET, + Query: query, + Scope: linkScope, + }, + }) + } + } + } + } + + // Health from provisioning state + if rs.Properties != nil && rs.Properties.ProvisioningState != nil { + switch *rs.Properties.ProvisioningState { + case "Succeeded": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "Creating", "Updating", "Deleting": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "Failed", "Canceled": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + return sdpItem, nil +} + +func (n networkDNSRecordSetWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.NetworkZone, + stdlib.NetworkDNS, + stdlib.NetworkIP, + ) +} + +func (n networkDNSRecordSetWrapper) TerraformMappings() []*sdp.TerraformMapping { + return nil +} + +func (n networkDNSRecordSetWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/dnszones/*/read", + } +} + +func (n networkDNSRecordSetWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-dns-record-set_test.go b/sources/azure/manual/network-dns-record-set_test.go new file mode 100644 index 00000000..4ab57c26 --- /dev/null +++ b/sources/azure/manual/network-dns-record-set_test.go @@ -0,0 +1,313 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +func createAzureRecordSet(relativeName, recordType, zoneName, subscriptionID, resourceGroup string) *armdns.RecordSet { + fqdn := relativeName + "." + zoneName + armType := "Microsoft.Network/dnszones/" + recordType + provisioningState := "Succeeded" + return &armdns.RecordSet{ + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/dnszones/" + zoneName + "/" + recordType + "/" + relativeName), + Name: new(relativeName), + Type: new(armType), + Properties: &armdns.RecordSetProperties{ + Fqdn: new(fqdn), + ProvisioningState: &provisioningState, + TTL: new(int64(3600)), + ARecords: nil, + AaaaRecords: nil, + CnameRecord: nil, + MxRecords: nil, + NsRecords: nil, + PtrRecords: nil, + SoaRecord: nil, + SrvRecords: nil, + TxtRecords: nil, + CaaRecords: nil, + TargetResource: nil, + Metadata: nil, + }, + } +} + +type mockRecordSetsPager struct { + pages []armdns.RecordSetsClientListAllByDNSZoneResponse + index int +} + +func (m *mockRecordSetsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockRecordSetsPager) NextPage(ctx context.Context) (armdns.RecordSetsClientListAllByDNSZoneResponse, error) { + if m.index >= len(m.pages) { + return armdns.RecordSetsClientListAllByDNSZoneResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +func TestNetworkDNSRecordSet(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + zoneName := "example.com" + relativeName := "www" + recordType := "A" + query := shared.CompositeLookupKey(zoneName, recordType, relativeName) + + t.Run("Get", func(t *testing.T) { + rs := createAzureRecordSet(relativeName, recordType, zoneName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockRecordSetsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, zoneName, relativeName, armdns.RecordType(recordType), nil).Return( + armdns.RecordSetsClientGetResponse{ + RecordSet: *rs, + }, nil) + + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkDNSRecordSet.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkDNSRecordSet.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUnique := shared.CompositeLookupKey(zoneName, recordType, relativeName) + if sdpItem.UniqueAttributeValue() != expectedUnique { + t.Errorf("Expected unique attribute value %s, got %s", expectedUnique, sdpItem.UniqueAttributeValue()) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkZone.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: zoneName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: stdlib.NetworkDNS.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "www.example.com", + ExpectedScope: "global", + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockRecordSetsClient(ctrl) + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + // Single part (zone only) is insufficient + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], zoneName, true) + if qErr == nil { + t.Error("Expected error when providing only one query part, got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + rs1 := createAzureRecordSet("www", "A", zoneName, subscriptionID, resourceGroup) + rs2 := createAzureRecordSet("mail", "MX", zoneName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockRecordSetsClient(ctrl) + mockPager := &mockRecordSetsPager{ + pages: []armdns.RecordSetsClientListAllByDNSZoneResponse{ + { + RecordSetListResult: armdns.RecordSetListResult{ + Value: []*armdns.RecordSet{rs1, rs2}, + }, + }, + }, + } + mockClient.EXPECT().NewListAllByDNSZonePager(resourceGroup, zoneName, nil).Return(mockPager) + + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not implement SearchableAdapter") + } + + items, qErr := searchable.Search(ctx, wrapper.Scopes()[0], zoneName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + if len(items) != 2 { + t.Fatalf("Expected 2 items, got %d", len(items)) + } + for _, item := range items { + if item.Validate() != nil { + t.Fatalf("Expected valid item, got: %v", item.Validate()) + } + } + }) + + t.Run("SearchStream", func(t *testing.T) { + rs := createAzureRecordSet("www", "A", zoneName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockRecordSetsClient(ctrl) + mockPager := &mockRecordSetsPager{ + pages: []armdns.RecordSetsClientListAllByDNSZoneResponse{ + { + RecordSetListResult: armdns.RecordSetListResult{ + Value: []*armdns.RecordSet{rs}, + }, + }, + }, + } + mockClient.EXPECT().NewListAllByDNSZonePager(resourceGroup, zoneName, nil).Return(mockPager) + + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + streamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not implement SearchStreamableAdapter") + } + + var received []*sdp.Item + stream := discovery.NewQueryResultStream( + func(item *sdp.Item) { received = append(received, item) }, + func(error) {}, + ) + streamable.SearchStream(ctx, wrapper.Scopes()[0], zoneName, true, stream) + + if len(received) != 1 { + t.Fatalf("Expected 1 item from SearchStream, got %d", len(received)) + } + if received[0].GetType() != azureshared.NetworkDNSRecordSet.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkDNSRecordSet.String(), received[0].GetType()) + } + }) + + t.Run("SearchWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockRecordSetsClient(ctrl) + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, qErr := searchable.Search(ctx, wrapper.Scopes()[0], "", true) + if qErr == nil { + t.Error("Expected error when providing empty zone name, got nil") + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("record set not found") + mockClient := mocks.NewMockRecordSetsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, zoneName, relativeName, armdns.RecordType(recordType), nil).Return( + armdns.RecordSetsClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when Get fails, got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockRecordSetsClient(ctrl) + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + potentialLinks := wrapper.PotentialLinks() + if !potentialLinks[azureshared.NetworkZone] { + t.Error("Expected PotentialLinks to include NetworkZone") + } + if !potentialLinks[stdlib.NetworkDNS] { + t.Error("Expected PotentialLinks to include stdlib.NetworkDNS") + } + if !potentialLinks[stdlib.NetworkIP] { + t.Error("Expected PotentialLinks to include stdlib.NetworkIP") + } + }) + + t.Run("IAMPermissions", func(t *testing.T) { + mockClient := mocks.NewMockRecordSetsClient(ctrl) + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + perms := wrapper.IAMPermissions() + if len(perms) == 0 { + t.Error("Expected at least one IAM permission") + } + expectedPermission := "Microsoft.Network/dnszones/*/read" + found := slices.Contains(perms, expectedPermission) + if !found { + t.Errorf("Expected IAMPermissions to include %q", expectedPermission) + } + }) + + t.Run("GetWithARecordsAndCnameLinkedQueries", func(t *testing.T) { + rs := createAzureRecordSet(relativeName, recordType, zoneName, subscriptionID, resourceGroup) + rs.Properties.ARecords = []*armdns.ARecord{{IPv4Address: new("192.168.1.1")}} + rs.Properties.CnameRecord = &armdns.CnameRecord{Cname: new("backend.example.com")} + + mockClient := mocks.NewMockRecordSetsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, zoneName, relativeName, armdns.RecordType(recordType), nil).Return( + armdns.RecordSetsClientGetResponse{RecordSet: *rs}, nil) + + wrapper := manual.NewNetworkDNSRecordSet(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + var hasIPLink, hasCnameLink bool + for _, lq := range sdpItem.GetLinkedItemQueries() { + q := lq.GetQuery() + if q == nil { + continue + } + if q.GetType() == stdlib.NetworkIP.String() && q.GetQuery() == "192.168.1.1" && q.GetMethod() == sdp.QueryMethod_GET && q.GetScope() == "global" { + hasIPLink = true + } + if q.GetType() == stdlib.NetworkDNS.String() && q.GetQuery() == "backend.example.com" && q.GetMethod() == sdp.QueryMethod_SEARCH && q.GetScope() == "global" { + hasCnameLink = true + } + } + if !hasIPLink { + t.Error("Expected LinkedItemQueries to include stdlib.NetworkIP for A record 192.168.1.1 (GET, global)") + } + if !hasCnameLink { + t.Error("Expected LinkedItemQueries to include stdlib.NetworkDNS for CNAME backend.example.com (SEARCH, global)") + } + }) +} diff --git a/sources/azure/manual/network-load-balancer.go b/sources/azure/manual/network-load-balancer.go index 4369d479..3b3081dd 100644 --- a/sources/azure/manual/network-load-balancer.go +++ b/sources/azure/manual/network-load-balancer.go @@ -6,7 +6,7 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/network-load-balancer_test.go b/sources/azure/manual/network-load-balancer_test.go index e20ead69..6f028cf6 100644 --- a/sources/azure/manual/network-load-balancer_test.go +++ b/sources/azure/manual/network-load-balancer_test.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -209,9 +209,9 @@ func TestNetworkLoadBalancer(t *testing.T) { lb1 := createAzureLoadBalancer("test-lb-1", subscriptionID, resourceGroup) lb2 := &armnetwork.LoadBalancer{ Name: nil, // Load balancer with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.LoadBalancerPropertiesFormat{}, } @@ -317,13 +317,7 @@ func TestNetworkLoadBalancer(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/loadBalancers/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -493,7 +487,7 @@ func (m *MockLoadBalancersPager) More() bool { func (mr *MockLoadBalancersPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockLoadBalancersPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockLoadBalancersPager) NextPage(ctx context.Context) (armnetwork.LoadBalancersClientListResponse, error) { @@ -504,9 +498,9 @@ func (m *MockLoadBalancersPager) NextPage(ctx context.Context) (armnetwork.LoadB return ret0, ret1 } -func (mr *MockLoadBalancersPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockLoadBalancersPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockLoadBalancersPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.LoadBalancersClientListResponse, error)](), ctx) } // createAzureLoadBalancer creates a mock Azure load balancer for testing with all linked resources @@ -516,61 +510,61 @@ func createAzureLoadBalancer(lbName, subscriptionID, resourceGroup string) *armn nicID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkInterfaces/test-nic/ipConfigurations/ipconfig1", subscriptionID, resourceGroup) return &armnetwork.LoadBalancer{ - Name: to.Ptr(lbName), - Location: to.Ptr("eastus"), + Name: new(lbName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.LoadBalancerPropertiesFormat{ FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ { - Name: to.Ptr("frontend-ip-config"), + Name: new("frontend-ip-config"), Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &armnetwork.PublicIPAddress{ - ID: to.Ptr(publicIPID), + ID: new(publicIPID), }, Subnet: &armnetwork.Subnet{ - ID: to.Ptr(subnetID), + ID: new(subnetID), }, // PrivateIPAddress is present when using a subnet (internal load balancer) - PrivateIPAddress: to.Ptr("10.2.0.5"), + PrivateIPAddress: new("10.2.0.5"), }, }, }, BackendAddressPools: []*armnetwork.BackendAddressPool{ { - Name: to.Ptr("backend-pool"), + Name: new("backend-pool"), }, }, InboundNatRules: []*armnetwork.InboundNatRule{ { - Name: to.Ptr("inbound-nat-rule"), + Name: new("inbound-nat-rule"), Properties: &armnetwork.InboundNatRulePropertiesFormat{ BackendIPConfiguration: &armnetwork.InterfaceIPConfiguration{ - ID: to.Ptr(nicID), + ID: new(nicID), }, }, }, }, LoadBalancingRules: []*armnetwork.LoadBalancingRule{ { - Name: to.Ptr("lb-rule"), + Name: new("lb-rule"), }, }, Probes: []*armnetwork.Probe{ { - Name: to.Ptr("probe"), + Name: new("probe"), }, }, OutboundRules: []*armnetwork.OutboundRule{ { - Name: to.Ptr("outbound-rule"), + Name: new("outbound-rule"), }, }, InboundNatPools: []*armnetwork.InboundNatPool{ { - Name: to.Ptr("nat-pool"), + Name: new("nat-pool"), }, }, }, @@ -582,18 +576,18 @@ func createAzureLoadBalancerWithDifferentScopePublicIP(lbName, subscriptionID, r publicIPID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/publicIPAddresses/test-public-ip", otherSub, otherRG) return &armnetwork.LoadBalancer{ - Name: to.Ptr(lbName), - Location: to.Ptr("eastus"), + Name: new(lbName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.LoadBalancerPropertiesFormat{ FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ { - Name: to.Ptr("frontend-ip-config"), + Name: new("frontend-ip-config"), Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ PublicIPAddress: &armnetwork.PublicIPAddress{ - ID: to.Ptr(publicIPID), + ID: new(publicIPID), }, }, }, @@ -607,18 +601,18 @@ func createAzureLoadBalancerWithDifferentScopeSubnet(lbName, subscriptionID, res subnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet", otherSub, otherRG) return &armnetwork.LoadBalancer{ - Name: to.Ptr(lbName), - Location: to.Ptr("eastus"), + Name: new(lbName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.LoadBalancerPropertiesFormat{ FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ { - Name: to.Ptr("frontend-ip-config"), + Name: new("frontend-ip-config"), Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ Subnet: &armnetwork.Subnet{ - ID: to.Ptr(subnetID), + ID: new(subnetID), }, }, }, diff --git a/sources/azure/manual/network-nat-gateway.go b/sources/azure/manual/network-nat-gateway.go new file mode 100644 index 00000000..e6f6f98f --- /dev/null +++ b/sources/azure/manual/network-nat-gateway.go @@ -0,0 +1,283 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var NetworkNatGatewayLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkNatGateway) + +type networkNatGatewayWrapper struct { + client clients.NatGatewaysClient + + *azureshared.MultiResourceGroupBase +} + +// NewNetworkNatGateway creates a new networkNatGatewayWrapper instance. +func NewNetworkNatGateway(client clients.NatGatewaysClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkNatGatewayWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkNatGateway, + ), + } +} + +func (n networkNatGatewayWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + for _, ng := range page.Value { + if ng.Name == nil { + continue + } + item, sdpErr := n.azureNatGatewayToSDPItem(ng, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (n networkNatGatewayWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + + for _, ng := range page.Value { + if ng.Name == nil { + continue + } + item, sdpErr := n.azureNatGatewayToSDPItem(ng, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkNatGatewayWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 1 query part: natGatewayName", + Scope: scope, + ItemType: n.Type(), + } + } + + natGatewayName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, natGatewayName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + return n.azureNatGatewayToSDPItem(&resp.NatGateway, scope) +} + +func (n networkNatGatewayWrapper) azureNatGatewayToSDPItem(ng *armnetwork.NatGateway, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(ng, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + if ng.Name == nil { + return nil, azureshared.QueryError(errors.New("nat gateway name is nil"), scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkNatGateway.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(ng.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + // Health from provisioning state + if ng.Properties != nil && ng.Properties.ProvisioningState != nil { + switch *ng.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Linked resources from Properties + if ng.Properties == nil { + return sdpItem, nil + } + props := ng.Properties + + // Public IP addresses (V4 and V6) + for _, refs := range [][]*armnetwork.SubResource{props.PublicIPAddresses, props.PublicIPAddressesV6} { + for _, ref := range refs { + if ref != nil && ref.ID != nil { + refID := *ref.ID + refName := azureshared.ExtractResourceName(refID) + if refName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(refID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPublicIPAddress.String(), + Method: sdp.QueryMethod_GET, + Query: refName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Public IP prefixes (V4 and V6) + for _, refs := range [][]*armnetwork.SubResource{props.PublicIPPrefixes, props.PublicIPPrefixesV6} { + for _, ref := range refs { + if ref != nil && ref.ID != nil { + refID := *ref.ID + refName := azureshared.ExtractResourceName(refID) + if refName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(refID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPublicIPPrefix.String(), + Method: sdp.QueryMethod_GET, + Query: refName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Subnets (read-only references: subnets using this NAT gateway) + for _, ref := range props.Subnets { + if ref != nil && ref.ID != nil { + subnetID := *ref.ID + params := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"virtualNetworks", "subnets"}) + if len(params) >= 2 && params[0] != "" && params[1] != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(subnetID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Scope: linkedScope, + Query: shared.CompositeLookupKey(params[0], params[1]), + }, + }) + } + } + } + + // Source virtual network + if props.SourceVirtualNetwork != nil && props.SourceVirtualNetwork.ID != nil { + vnetID := *props.SourceVirtualNetwork.ID + vnetName := azureshared.ExtractResourceName(vnetID) + if vnetName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(vnetID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (n networkNatGatewayWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkNatGatewayLookupByName, + } +} + +func (n networkNatGatewayWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkPublicIPAddress: true, + azureshared.NetworkPublicIPPrefix: true, + azureshared.NetworkSubnet: true, + azureshared.NetworkVirtualNetwork: true, + } +} + +func (n networkNatGatewayWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_nat_gateway.name", + }, + } +} + +func (n networkNatGatewayWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/natGateways/read", + } +} + +func (n networkNatGatewayWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-nat-gateway_test.go b/sources/azure/manual/network-nat-gateway_test.go new file mode 100644 index 00000000..79fac9e8 --- /dev/null +++ b/sources/azure/manual/network-nat-gateway_test.go @@ -0,0 +1,354 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +func TestNetworkNatGateway(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + + t.Run("Get", func(t *testing.T) { + natGatewayName := "test-nat-gateway" + ng := createAzureNatGateway(natGatewayName) + + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, natGatewayName, nil).Return( + armnetwork.NatGatewaysClientGetResponse{ + NatGateway: *ng, + }, nil) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, scope, natGatewayName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkNatGateway.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkNatGateway.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != natGatewayName { + t.Errorf("Expected unique attribute value %s, got %s", natGatewayName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{} + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithLinkedResources", func(t *testing.T) { + natGatewayName := "test-nat-gateway-with-links" + ng := createAzureNatGatewayWithLinks(natGatewayName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, natGatewayName, nil).Return( + armnetwork.NatGatewaysClientGetResponse{ + NatGateway: *ng, + }, nil) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, scope, natGatewayName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkPublicIPAddress.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-public-ip", + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkPublicIPPrefix.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-public-ip-prefix", + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkSubnet.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("test-vnet", "test-subnet"), + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "source-vnet", + ExpectedScope: scope, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "", nil).Return( + armnetwork.NatGatewaysClientGetResponse{}, errors.New("nat gateway not found")) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, "", true) + if qErr == nil { + t.Error("Expected error when getting nat gateway with empty name, but got nil") + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + natGatewayName := "nonexistent-nat-gateway" + expectedErr := errors.New("nat gateway not found") + + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, natGatewayName, nil).Return( + armnetwork.NatGatewaysClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, natGatewayName, true) + if qErr == nil { + t.Fatal("Expected error when nat gateway not found, got nil") + } + }) + + t.Run("List", func(t *testing.T) { + ng1 := createAzureNatGateway("nat-gateway-1") + ng2 := createAzureNatGateway("nat-gateway-2") + + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockPager := newMockNatGatewaysPager(ctrl, []*armnetwork.NatGateway{ng1, ng2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + items, err := listable.List(ctx, scope, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(items) != 2 { + t.Fatalf("Expected 2 items, got %d", len(items)) + } + + for i, item := range items { + if item.GetType() != azureshared.NetworkNatGateway.String() { + t.Errorf("Item %d: expected type %s, got %s", i, azureshared.NetworkNatGateway.String(), item.GetType()) + } + if item.Validate() != nil { + t.Errorf("Item %d: validation error: %v", i, item.Validate()) + } + } + }) + + t.Run("ListStream", func(t *testing.T) { + ng1 := createAzureNatGateway("nat-gateway-1") + ng2 := createAzureNatGateway("nat-gateway-2") + + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockPager := newMockNatGatewaysPager(ctrl, []*armnetwork.NatGateway{ng1, ng2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listStream, ok := adapter.(discovery.ListStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support ListStream operation") + } + + var received []*sdp.Item + stream := &collectingStream{items: &received} + listStream.ListStream(ctx, scope, true, stream) + + if len(received) != 2 { + t.Fatalf("Expected 2 items from stream, got %d", len(received)) + } + }) + + t.Run("List_NilNameSkipped", func(t *testing.T) { + ng1 := createAzureNatGateway("nat-gateway-1") + ng2NilName := createAzureNatGateway("nat-gateway-2") + ng2NilName.Name = nil + + mockClient := mocks.NewMockNatGatewaysClient(ctrl) + mockPager := newMockNatGatewaysPager(ctrl, []*armnetwork.NatGateway{ng1, ng2NilName}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkNatGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + items, err := listable.List(ctx, scope, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(items) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got %d", len(items)) + } + if items[0].UniqueAttributeValue() != "nat-gateway-1" { + t.Errorf("Expected only nat-gateway-1, got %s", items[0].UniqueAttributeValue()) + } + }) + + t.Run("GetLookups", func(t *testing.T) { + wrapper := manual.NewNetworkNatGateway(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + lookups := wrapper.GetLookups() + if len(lookups) == 0 { + t.Error("Expected GetLookups to return at least one lookup") + } + found := false + for _, l := range lookups { + if l.ItemType.String() == azureshared.NetworkNatGateway.String() { + found = true + break + } + } + if !found { + t.Error("Expected GetLookups to include NetworkNatGateway") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewNetworkNatGateway(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + potentialLinks := wrapper.PotentialLinks() + for _, linkType := range []shared.ItemType{ + azureshared.NetworkPublicIPAddress, + azureshared.NetworkPublicIPPrefix, + azureshared.NetworkSubnet, + azureshared.NetworkVirtualNetwork, + } { + if !potentialLinks[linkType] { + t.Errorf("Expected PotentialLinks to include %s", linkType) + } + } + }) +} + +type mockNatGatewaysPager struct { + ctrl *gomock.Controller + items []*armnetwork.NatGateway + index int + more bool +} + +func newMockNatGatewaysPager(ctrl *gomock.Controller, items []*armnetwork.NatGateway) *mockNatGatewaysPager { + return &mockNatGatewaysPager{ + ctrl: ctrl, + items: items, + index: 0, + more: len(items) > 0, + } +} + +func (m *mockNatGatewaysPager) More() bool { + return m.more +} + +func (m *mockNatGatewaysPager) NextPage(ctx context.Context) (armnetwork.NatGatewaysClientListResponse, error) { + if m.index >= len(m.items) { + m.more = false + return armnetwork.NatGatewaysClientListResponse{ + NatGatewayListResult: armnetwork.NatGatewayListResult{ + Value: []*armnetwork.NatGateway{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + m.more = m.index < len(m.items) + return armnetwork.NatGatewaysClientListResponse{ + NatGatewayListResult: armnetwork.NatGatewayListResult{ + Value: []*armnetwork.NatGateway{item}, + }, + }, nil +} + +func createAzureNatGateway(name string) *armnetwork.NatGateway { + provisioningState := armnetwork.ProvisioningStateSucceeded + return &armnetwork.NatGateway{ + ID: new("/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Network/natGateways/" + name), + Name: new(name), + Type: new("Microsoft.Network/natGateways"), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armnetwork.NatGatewayPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } +} + +func createAzureNatGatewayWithLinks(name, subscriptionID, resourceGroup string) *armnetwork.NatGateway { + ng := createAzureNatGateway(name) + baseID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network" + publicIPID := baseID + "/publicIPAddresses/test-public-ip" + publicIPPrefixID := baseID + "/publicIPPrefixes/test-public-ip-prefix" + subnetID := baseID + "/virtualNetworks/test-vnet/subnets/test-subnet" + sourceVnetID := baseID + "/virtualNetworks/source-vnet" + + ng.Properties.PublicIPAddresses = []*armnetwork.SubResource{ + {ID: new(publicIPID)}, + } + ng.Properties.PublicIPPrefixes = []*armnetwork.SubResource{ + {ID: new(publicIPPrefixID)}, + } + ng.Properties.Subnets = []*armnetwork.SubResource{ + {ID: new(subnetID)}, + } + ng.Properties.SourceVirtualNetwork = &armnetwork.SubResource{ + ID: new(sourceVnetID), + } + return ng +} diff --git a/sources/azure/manual/network-network-interface.go b/sources/azure/manual/network-network-interface.go index 7c7d75e9..c8f52a0b 100644 --- a/sources/azure/manual/network-network-interface.go +++ b/sources/azure/manual/network-network-interface.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/network-network-interface_test.go b/sources/azure/manual/network-network-interface_test.go index a7f632c5..413aa50b 100644 --- a/sources/azure/manual/network-network-interface_test.go +++ b/sources/azure/manual/network-network-interface_test.go @@ -4,10 +4,10 @@ import ( "context" "errors" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -222,16 +222,16 @@ func TestNetworkNetworkInterface(t *testing.T) { nic1 := createAzureNetworkInterface("test-nic-1", "test-vm-1", "test-nsg-1") nic2 := &armnetwork.Interface{ Name: nil, // NIC with nil name should cause an error in azureNetworkInterfaceToSDPItem - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.InterfacePropertiesFormat{ IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: to.Ptr("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ - PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, @@ -332,13 +332,7 @@ func TestNetworkNetworkInterface(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/networkInterfaces/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -400,7 +394,7 @@ func (m *MockNetworkInterfacesPager) More() bool { func (mr *MockNetworkInterfacesPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockNetworkInterfacesPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockNetworkInterfacesPager) NextPage(ctx context.Context) (armnetwork.InterfacesClientListResponse, error) { @@ -411,9 +405,9 @@ func (m *MockNetworkInterfacesPager) NextPage(ctx context.Context) (armnetwork.I return ret0, ret1 } -func (mr *MockNetworkInterfacesPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockNetworkInterfacesPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockNetworkInterfacesPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.InterfacesClientListResponse, error)](), ctx) } // createAzureNetworkInterface creates a mock Azure network interface for testing @@ -422,24 +416,24 @@ func createAzureNetworkInterface(nicName, vmName, nsgName string) *armnetwork.In nsgID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkSecurityGroups/" + nsgName return &armnetwork.Interface{ - Name: to.Ptr(nicName), - Location: to.Ptr("eastus"), + Name: new(nicName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.InterfacePropertiesFormat{ VirtualMachine: &armnetwork.SubResource{ - ID: to.Ptr(vmID), + ID: new(vmID), }, NetworkSecurityGroup: &armnetwork.SecurityGroup{ - ID: to.Ptr(nsgID), + ID: new(nsgID), }, IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ { - Name: to.Ptr("ipconfig1"), + Name: new("ipconfig1"), Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ - PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), + PrivateIPAllocationMethod: new(armnetwork.IPAllocationMethodDynamic), }, }, }, @@ -452,7 +446,7 @@ func createAzureNetworkInterfaceWithDNSServers(nicName, vmName, nsgName string, nic := createAzureNetworkInterface(nicName, vmName, nsgName) ptrs := make([]*string, len(dnsServers)) for i := range dnsServers { - ptrs[i] = to.Ptr(dnsServers[i]) + ptrs[i] = new(dnsServers[i]) } nic.Properties.DNSSettings = &armnetwork.InterfaceDNSSettings{ DNSServers: ptrs, diff --git a/sources/azure/manual/network-network-security-group.go b/sources/azure/manual/network-network-security-group.go index 5e072ab1..1a6cfe95 100644 --- a/sources/azure/manual/network-network-security-group.go +++ b/sources/azure/manual/network-network-security-group.go @@ -6,7 +6,7 @@ import ( "net" "strings" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/network-network-security-group_test.go b/sources/azure/manual/network-network-security-group_test.go index e79071fd..4080c289 100644 --- a/sources/azure/manual/network-network-security-group_test.go +++ b/sources/azure/manual/network-network-security-group_test.go @@ -4,10 +4,10 @@ import ( "context" "errors" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -133,9 +133,9 @@ func TestNetworkNetworkSecurityGroup(t *testing.T) { t.Run("Get_WithNilName", func(t *testing.T) { nsg := &armnetwork.SecurityGroup{ Name: nil, // NSG with nil name should cause an error - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -212,9 +212,9 @@ func TestNetworkNetworkSecurityGroup(t *testing.T) { nsg1 := createAzureNetworkSecurityGroup("test-nsg-1") nsg2 := &armnetwork.SecurityGroup{ Name: nil, // NSG with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -306,20 +306,20 @@ func TestNetworkNetworkSecurityGroup(t *testing.T) { otherSubscriptionID := "other-subscription" nsg := &armnetwork.SecurityGroup{ - Name: to.Ptr(nsgName), - Location: to.Ptr("eastus"), + Name: new(nsgName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.SecurityGroupPropertiesFormat{ Subnets: []*armnetwork.Subnet{ { - ID: to.Ptr("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, NetworkInterfaces: []*armnetwork.Interface{ { - ID: to.Ptr("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/networkInterfaces/test-nic"), + ID: new("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/networkInterfaces/test-nic"), }, }, }, @@ -386,13 +386,7 @@ func TestNetworkNetworkSecurityGroup(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/networkSecurityGroups/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -472,7 +466,7 @@ func (m *MockNetworkSecurityGroupsPager) More() bool { func (mr *MockNetworkSecurityGroupsPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockNetworkSecurityGroupsPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockNetworkSecurityGroupsPager) NextPage(ctx context.Context) (armnetwork.SecurityGroupsClientListResponse, error) { @@ -483,9 +477,9 @@ func (m *MockNetworkSecurityGroupsPager) NextPage(ctx context.Context) (armnetwo return ret0, ret1 } -func (mr *MockNetworkSecurityGroupsPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockNetworkSecurityGroupsPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockNetworkSecurityGroupsPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.SecurityGroupsClientListResponse, error)](), ctx) } // createAzureNetworkSecurityGroup creates a mock Azure network security group for testing @@ -494,29 +488,29 @@ func createAzureNetworkSecurityGroup(nsgName string) *armnetwork.SecurityGroup { resourceGroup := "test-rg" return &armnetwork.SecurityGroup{ - Name: to.Ptr(nsgName), - Location: to.Ptr("eastus"), + Name: new(nsgName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.SecurityGroupPropertiesFormat{ // SecurityRules (child resources) SecurityRules: []*armnetwork.SecurityRule{ { - Name: to.Ptr("test-security-rule"), + Name: new("test-security-rule"), Properties: &armnetwork.SecurityRulePropertiesFormat{ - Priority: to.Ptr(int32(1000)), - Direction: to.Ptr(armnetwork.SecurityRuleDirectionInbound), - Access: to.Ptr(armnetwork.SecurityRuleAccessAllow), + Priority: new(int32(1000)), + Direction: new(armnetwork.SecurityRuleDirectionInbound), + Access: new(armnetwork.SecurityRuleAccessAllow), SourceApplicationSecurityGroups: []*armnetwork.ApplicationSecurityGroup{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/applicationSecurityGroups/test-asg-source"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/applicationSecurityGroups/test-asg-source"), }, }, DestinationApplicationSecurityGroups: []*armnetwork.ApplicationSecurityGroup{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/applicationSecurityGroups/test-asg-dest"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/applicationSecurityGroups/test-asg-dest"), }, }, }, @@ -525,14 +519,14 @@ func createAzureNetworkSecurityGroup(nsgName string) *armnetwork.SecurityGroup { // DefaultSecurityRules (child resources) DefaultSecurityRules: []*armnetwork.SecurityRule{ { - Name: to.Ptr("AllowVnetInBound"), + Name: new("AllowVnetInBound"), Properties: &armnetwork.SecurityRulePropertiesFormat{ - Priority: to.Ptr(int32(65000)), - Direction: to.Ptr(armnetwork.SecurityRuleDirectionInbound), - Access: to.Ptr(armnetwork.SecurityRuleAccessAllow), + Priority: new(int32(65000)), + Direction: new(armnetwork.SecurityRuleDirectionInbound), + Access: new(armnetwork.SecurityRuleAccessAllow), SourceApplicationSecurityGroups: []*armnetwork.ApplicationSecurityGroup{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/applicationSecurityGroups/test-asg-default-source"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/applicationSecurityGroups/test-asg-default-source"), }, }, }, @@ -541,13 +535,13 @@ func createAzureNetworkSecurityGroup(nsgName string) *armnetwork.SecurityGroup { // Subnets (external resources) Subnets: []*armnetwork.Subnet{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, // NetworkInterfaces (external resources) NetworkInterfaces: []*armnetwork.Interface{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/networkInterfaces/test-nic"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/networkInterfaces/test-nic"), }, }, }, diff --git a/sources/azure/manual/network-private-dns-zone.go b/sources/azure/manual/network-private-dns-zone.go new file mode 100644 index 00000000..18f54ba5 --- /dev/null +++ b/sources/azure/manual/network-private-dns-zone.go @@ -0,0 +1,222 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkPrivateDNSZoneLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkPrivateDNSZone) + +type networkPrivateDNSZoneWrapper struct { + client clients.PrivateDNSZonesClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkPrivateDNSZone(client clients.PrivateDNSZonesClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkPrivateDNSZoneWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkPrivateDNSZone, + ), + } +} + +func (n networkPrivateDNSZoneWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListByResourceGroupPager(rgScope.ResourceGroup, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, zone := range page.Value { + if zone.Name == nil { + continue + } + item, sdpErr := n.azurePrivateZoneToSDPItem(zone, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkPrivateDNSZoneWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListByResourceGroupPager(rgScope.ResourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, zone := range page.Value { + if zone.Name == nil { + continue + } + item, sdpErr := n.azurePrivateZoneToSDPItem(zone, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkPrivateDNSZoneWrapper) azurePrivateZoneToSDPItem(zone *armprivatedns.PrivateZone, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(zone, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + if zone.Name == nil { + return nil, azureshared.QueryError(errors.New("zone name is nil"), scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkPrivateDNSZone.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(zone.Tags), + } + + // Health from provisioning state + if zone.Properties != nil && zone.Properties.ProvisioningState != nil { + switch *zone.Properties.ProvisioningState { + case armprivatedns.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armprivatedns.ProvisioningStateCreating, armprivatedns.ProvisioningStateUpdating, armprivatedns.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armprivatedns.ProvisioningStateFailed, armprivatedns.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + zoneName := *zone.Name + + // Link to DNS name (standard library) for the zone name + if zoneName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: zoneName, + Scope: "global", + }, + }) + } + + // Link to Virtual Network Links (child resource of Private DNS Zone) + // Reference: https://learn.microsoft.com/en-us/rest/api/dns/privatednszones/virtualnetworklinks/list + // Virtual network links can be listed by zone name, so we use SEARCH method + if zoneName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkDNSVirtualNetworkLink.String(), + Method: sdp.QueryMethod_SEARCH, + Query: zoneName, + Scope: scope, + }, + }) + } + + // Link to DNS Record Sets (child resource of Private DNS Zone) + // Reference: https://learn.microsoft.com/en-us/rest/api/dns/privatednszones/recordsets/list + // Record sets (A, AAAA, CNAME, MX, PTR, SOA, SRV, TXT) can be listed by zone name, so we use SEARCH method + if zoneName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkDNSRecordSet.String(), + Method: sdp.QueryMethod_SEARCH, + Query: zoneName, + Scope: scope, + }, + }) + } + + return sdpItem, nil +} + +// ref: https://learn.microsoft.com/en-us/rest/api/dns/privatednszones/get +func (n networkPrivateDNSZoneWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, azureshared.QueryError(errors.New("query must be exactly one part (private zone name)"), scope, n.Type()) + } + zoneName := queryParts[0] + if zoneName == "" { + return nil, azureshared.QueryError(errors.New("private zone name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, zoneName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + return n.azurePrivateZoneToSDPItem(&resp.PrivateZone, scope) +} + +func (n networkPrivateDNSZoneWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkPrivateDNSZoneLookupByName, + } +} + +func (n networkPrivateDNSZoneWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.NetworkDNSRecordSet, + azureshared.NetworkDNSVirtualNetworkLink, + stdlib.NetworkDNS, + ) +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/private_dns_zone +func (n networkPrivateDNSZoneWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_private_dns_zone.name", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftnetwork +func (n networkPrivateDNSZoneWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/privateDnsZones/read", + } +} + +func (n networkPrivateDNSZoneWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-private-dns-zone_test.go b/sources/azure/manual/network-private-dns-zone_test.go new file mode 100644 index 00000000..4901d7a3 --- /dev/null +++ b/sources/azure/manual/network-private-dns-zone_test.go @@ -0,0 +1,376 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "sync" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +func TestNetworkPrivateDNSZone(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + + t.Run("Get", func(t *testing.T) { + zoneName := "private.example.com" + zone := createAzurePrivateZone(zoneName) + + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, zoneName, nil).Return( + armprivatedns.PrivateZonesClientGetResponse{ + PrivateZone: *zone, + }, nil) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], zoneName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkPrivateDNSZone.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkPrivateDNSZone, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != zoneName { + t.Errorf("Expected unique attribute value %s, got %s", zoneName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + scope := subscriptionID + "." + resourceGroup + queryTests := shared.QueryTests{ + { + ExpectedType: stdlib.NetworkDNS.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: zoneName, + ExpectedScope: "global", + }, + { + ExpectedType: azureshared.NetworkDNSVirtualNetworkLink.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: zoneName, + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkDNSRecordSet.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: zoneName, + ExpectedScope: scope, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "", true) + if qErr == nil { + t.Error("Expected error when zone name is empty, but got nil") + } + }) + + t.Run("Get_ZoneWithNilName", func(t *testing.T) { + provisioningState := armprivatedns.ProvisioningStateSucceeded + zoneWithNilName := &armprivatedns.PrivateZone{ + Name: nil, + Location: new("eastus"), + Properties: &armprivatedns.PrivateZoneProperties{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "test-zone", nil).Return( + armprivatedns.PrivateZonesClientGetResponse{ + PrivateZone: *zoneWithNilName, + }, nil) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "test-zone", true) + if qErr == nil { + t.Error("Expected error when zone has nil name, but got nil") + } + }) + + t.Run("List", func(t *testing.T) { + zone1 := createAzurePrivateZone("private1.example.com") + zone2 := createAzurePrivateZone("private2.example.com") + + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + mockPager := newMockPrivateDNSZonesPager(ctrl, []*armprivatedns.PrivateZone{zone1, zone2}) + + mockClient.EXPECT().NewListByResourceGroupPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if item.Validate() != nil { + t.Fatalf("Expected no validation error, got: %v", item.Validate()) + } + if item.GetType() != azureshared.NetworkPrivateDNSZone.String() { + t.Fatalf("Expected type %s, got: %s", azureshared.NetworkPrivateDNSZone, item.GetType()) + } + } + }) + + t.Run("List_WithNilName", func(t *testing.T) { + zone1 := createAzurePrivateZone("private1.example.com") + provisioningState := armprivatedns.ProvisioningStateSucceeded + zone2NilName := &armprivatedns.PrivateZone{ + Name: nil, + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + Properties: &armprivatedns.PrivateZoneProperties{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + mockPager := newMockPrivateDNSZonesPager(ctrl, []*armprivatedns.PrivateZone{zone1, zone2NilName}) + + mockClient.EXPECT().NewListByResourceGroupPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != "private1.example.com" { + t.Errorf("Expected item name 'private1.example.com', got: %s", sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ListStream", func(t *testing.T) { + zone1 := createAzurePrivateZone("stream1.example.com") + zone2 := createAzurePrivateZone("stream2.example.com") + + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + mockPager := newMockPrivateDNSZonesPager(ctrl, []*armprivatedns.PrivateZone{zone1, zone2}) + + mockClient.EXPECT().NewListByResourceGroupPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + wg := &sync.WaitGroup{} + wg.Add(2) + + var items []*sdp.Item + mockItemHandler := func(item *sdp.Item) { + items = append(items, item) + wg.Done() + } + var errs []error + mockErrorHandler := func(err error) { + errs = append(errs, err) + } + stream := discovery.NewQueryResultStream(mockItemHandler, mockErrorHandler) + + listStreamable, ok := adapter.(discovery.ListStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support ListStream operation") + } + + listStreamable.ListStream(ctx, wrapper.Scopes()[0], true, stream) + wg.Wait() + + if len(errs) != 0 { + t.Fatalf("Expected no errors, got: %v", errs) + } + if len(items) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(items)) + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("private zone not found") + + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "nonexistent-zone", nil).Return( + armprivatedns.PrivateZonesClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "nonexistent-zone", true) + if qErr == nil { + t.Error("Expected error when getting non-existent zone, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockPrivateDNSZonesClient(ctrl) + wrapper := manual.NewNetworkPrivateDNSZone(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Network/privateDnsZones/read" + if !slices.Contains(permissions, expectedPermission) { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + potentialLinks := w.PotentialLinks() + if !potentialLinks[azureshared.NetworkDNSRecordSet] { + t.Error("Expected PotentialLinks to include NetworkDNSRecordSet") + } + if !potentialLinks[azureshared.NetworkDNSVirtualNetworkLink] { + t.Error("Expected PotentialLinks to include NetworkDNSVirtualNetworkLink") + } + if !potentialLinks[stdlib.NetworkDNS] { + t.Error("Expected PotentialLinks to include stdlib.NetworkDNS") + } + + mappings := w.TerraformMappings() + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_private_dns_zone.name" { + foundMapping = true + if mapping.GetTerraformMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected TerraformMethod GET, got: %s", mapping.GetTerraformMethod()) + } + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_private_dns_zone.name'") + } + + lookups := w.GetLookups() + foundLookup := false + for _, lookup := range lookups { + if lookup.ItemType == azureshared.NetworkPrivateDNSZone { + foundLookup = true + break + } + } + if !foundLookup { + t.Error("Expected GetLookups to include NetworkPrivateDNSZone") + } + }) +} + +type mockPrivateDNSZonesPager struct { + ctrl *gomock.Controller + items []*armprivatedns.PrivateZone + index int + more bool +} + +func newMockPrivateDNSZonesPager(ctrl *gomock.Controller, items []*armprivatedns.PrivateZone) clients.PrivateDNSZonesPager { + return &mockPrivateDNSZonesPager{ + ctrl: ctrl, + items: items, + index: 0, + more: len(items) > 0, + } +} + +func (m *mockPrivateDNSZonesPager) More() bool { + return m.more +} + +func (m *mockPrivateDNSZonesPager) NextPage(ctx context.Context) (armprivatedns.PrivateZonesClientListByResourceGroupResponse, error) { + if m.index >= len(m.items) { + m.more = false + return armprivatedns.PrivateZonesClientListByResourceGroupResponse{ + PrivateZoneListResult: armprivatedns.PrivateZoneListResult{ + Value: []*armprivatedns.PrivateZone{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + m.more = m.index < len(m.items) + return armprivatedns.PrivateZonesClientListByResourceGroupResponse{ + PrivateZoneListResult: armprivatedns.PrivateZoneListResult{ + Value: []*armprivatedns.PrivateZone{item}, + }, + }, nil +} + +func createAzurePrivateZone(zoneName string) *armprivatedns.PrivateZone { + state := armprivatedns.ProvisioningStateSucceeded + return &armprivatedns.PrivateZone{ + ID: new("/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Network/privateDnsZones/" + zoneName), + Name: new(zoneName), + Type: new("Microsoft.Network/privateDnsZones"), + Location: new("global"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armprivatedns.PrivateZoneProperties{ + ProvisioningState: &state, + MaxNumberOfRecordSets: new(int64(5000)), + NumberOfRecordSets: new(int64(0)), + }, + } +} + +// Ensure mockPrivateDNSZonesPager satisfies the pager interface at compile time. +var _ clients.PrivateDNSZonesPager = (*mockPrivateDNSZonesPager)(nil) diff --git a/sources/azure/manual/network-private-endpoint.go b/sources/azure/manual/network-private-endpoint.go new file mode 100644 index 00000000..b4abcbe8 --- /dev/null +++ b/sources/azure/manual/network-private-endpoint.go @@ -0,0 +1,346 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkPrivateEndpointLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkPrivateEndpoint) + +type networkPrivateEndpointWrapper struct { + client clients.PrivateEndpointsClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkPrivateEndpoint(client clients.PrivateEndpointsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkPrivateEndpointWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkPrivateEndpoint, + ), + } +} + +func (n networkPrivateEndpointWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.List(rgScope.ResourceGroup) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, pe := range page.Value { + if pe.Name == nil { + continue + } + item, sdpErr := n.azurePrivateEndpointToSDPItem(pe, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkPrivateEndpointWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.List(rgScope.ResourceGroup) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, pe := range page.Value { + if pe.Name == nil { + continue + } + item, sdpErr := n.azurePrivateEndpointToSDPItem(pe, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkPrivateEndpointWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 1 { + return nil, azureshared.QueryError(errors.New("query must be a private endpoint name"), scope, n.Type()) + } + name := queryParts[0] + if name == "" { + return nil, azureshared.QueryError(errors.New("private endpoint name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, name) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + return n.azurePrivateEndpointToSDPItem(&resp.PrivateEndpoint, scope) +} + +func (n networkPrivateEndpointWrapper) azurePrivateEndpointToSDPItem(pe *armnetwork.PrivateEndpoint, scope string) (*sdp.Item, *sdp.QueryError) { + if pe.Name == nil { + return nil, azureshared.QueryError(errors.New("private endpoint name is nil"), scope, n.Type()) + } + attributes, err := shared.ToAttributesWithExclude(pe, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkPrivateEndpoint.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(pe.Tags), + } + + // Health status from ProvisioningState + if pe.Properties != nil && pe.Properties.ProvisioningState != nil { + switch *pe.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + } + } + + // Link to Subnet and parent VirtualNetwork + if pe.Properties != nil && pe.Properties.Subnet != nil && pe.Properties.Subnet.ID != nil { + subnetParams := azureshared.ExtractPathParamsFromResourceID(*pe.Properties.Subnet.ID, []string{"virtualNetworks", "subnets"}) + if len(subnetParams) >= 2 { + vnetName, subnetName := subnetParams[0], subnetParams[1] + linkedScope := azureshared.ExtractScopeFromResourceID(*pe.Properties.Subnet.ID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(vnetName, subnetName), + Scope: linkedScope, + }, + }) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + + // Link to NetworkInterfaces (read-only array of NICs created for this private endpoint) + if pe.Properties != nil && pe.Properties.NetworkInterfaces != nil { + for _, iface := range pe.Properties.NetworkInterfaces { + if iface != nil && iface.ID != nil { + nicName := azureshared.ExtractResourceName(*iface.ID) + if nicName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(*iface.ID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNetworkInterface.String(), + Method: sdp.QueryMethod_GET, + Query: nicName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Link to ApplicationSecurityGroups + if pe.Properties != nil && pe.Properties.ApplicationSecurityGroups != nil { + for _, asg := range pe.Properties.ApplicationSecurityGroups { + if asg != nil && asg.ID != nil { + asgName := azureshared.ExtractResourceName(*asg.ID) + if asgName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(*asg.ID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkApplicationSecurityGroup.String(), + Method: sdp.QueryMethod_GET, + Query: asgName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Link IPConfigurations[].Properties.PrivateIPAddress to stdlib ip (GET, global) + if pe.Properties != nil && pe.Properties.IPConfigurations != nil { + for _, ipConfig := range pe.Properties.IPConfigurations { + if ipConfig == nil || ipConfig.Properties == nil || ipConfig.Properties.PrivateIPAddress == nil { + continue + } + if *ipConfig.Properties.PrivateIPAddress != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *ipConfig.Properties.PrivateIPAddress, + Scope: "global", + }, + }) + } + } + } + + // Link to Private Link Services from PrivateLinkServiceConnections and ManualPrivateLinkServiceConnections + if pe.Properties != nil { + seenPLS := make(map[string]struct{}) + for _, conns := range [][]*armnetwork.PrivateLinkServiceConnection{ + pe.Properties.PrivateLinkServiceConnections, + pe.Properties.ManualPrivateLinkServiceConnections, + } { + for _, conn := range conns { + if conn == nil || conn.Properties == nil || conn.Properties.PrivateLinkServiceID == nil { + continue + } + plsID := *conn.Properties.PrivateLinkServiceID + if plsID == "" { + continue + } + if _, ok := seenPLS[plsID]; ok { + continue + } + seenPLS[plsID] = struct{}{} + plsName := azureshared.ExtractResourceName(plsID) + if plsName == "" { + continue + } + linkedScope := azureshared.ExtractScopeFromResourceID(plsID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateLinkService.String(), + Method: sdp.QueryMethod_GET, + Query: plsName, + Scope: linkedScope, + }, + }) + } + } + } + + // Link CustomDnsConfigs: Fqdn -> stdlib dns (SEARCH, global), IPAddresses -> stdlib ip (GET, global) + if pe.Properties != nil && pe.Properties.CustomDNSConfigs != nil { + for _, dnsConfig := range pe.Properties.CustomDNSConfigs { + if dnsConfig == nil { + continue + } + if dnsConfig.Fqdn != nil && *dnsConfig.Fqdn != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: *dnsConfig.Fqdn, + Scope: "global", + }, + }) + } + if dnsConfig.IPAddresses != nil { + for _, ip := range dnsConfig.IPAddresses { + if ip != nil && *ip != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *ip, + Scope: "global", + }, + }) + } + } + } + } + } + + return sdpItem, nil +} + +func (n networkPrivateEndpointWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkPrivateEndpointLookupByName, + } +} + +func (n networkPrivateEndpointWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.NetworkSubnet, + azureshared.NetworkVirtualNetwork, + azureshared.NetworkNetworkInterface, + azureshared.NetworkApplicationSecurityGroup, + azureshared.NetworkPrivateLinkService, + stdlib.NetworkIP, + stdlib.NetworkDNS, + ) +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/private_endpoint +func (n networkPrivateEndpointWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_private_endpoint.name", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions-reference#microsoftnetwork +func (n networkPrivateEndpointWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/privateEndpoints/read", + } +} + +func (n networkPrivateEndpointWrapper) PredefinedRole() string { + return "Network Contributor" +} diff --git a/sources/azure/manual/network-private-endpoint_test.go b/sources/azure/manual/network-private-endpoint_test.go new file mode 100644 index 00000000..73ce8c7d --- /dev/null +++ b/sources/azure/manual/network-private-endpoint_test.go @@ -0,0 +1,334 @@ +package manual_test + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +func TestNetworkPrivateEndpoint(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + + t.Run("Get", func(t *testing.T) { + peName := "test-pe" + pe := createAzurePrivateEndpoint(peName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockPrivateEndpointsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, peName).Return( + armnetwork.PrivateEndpointsClientGetResponse{ + PrivateEndpoint: *pe, + }, nil) + + wrapper := manual.NewNetworkPrivateEndpoint(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], peName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkPrivateEndpoint.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkPrivateEndpoint, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != peName { + t.Errorf("Expected unique attribute value %s, got %s", peName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkSubnet.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("test-vnet", "test-subnet"), + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-vnet", + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, { + ExpectedType: azureshared.NetworkNetworkInterface.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-nic", + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, { + ExpectedType: azureshared.NetworkApplicationSecurityGroup.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-asg", + ExpectedScope: fmt.Sprintf("%s.%s", subscriptionID, resourceGroup), + }, { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "10.0.0.10", + ExpectedScope: "global", + }, { + ExpectedType: stdlib.NetworkDNS.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "myendpoint.example.com", + ExpectedScope: "global", + }, { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "10.0.0.5", + ExpectedScope: "global", + }, + } + + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_EmptyName", func(t *testing.T) { + mockClient := mocks.NewMockPrivateEndpointsClient(ctrl) + + wrapper := manual.NewNetworkPrivateEndpoint(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "", true) + if qErr == nil { + t.Error("Expected error when getting private endpoint with empty name, but got nil") + } + }) + + t.Run("List", func(t *testing.T) { + pe1 := createAzurePrivateEndpoint("test-pe-1", subscriptionID, resourceGroup) + pe2 := createAzurePrivateEndpoint("test-pe-2", subscriptionID, resourceGroup) + + mockClient := mocks.NewMockPrivateEndpointsClient(ctrl) + mockPager := NewMockPrivateEndpointsPager(ctrl) + + gomock.InOrder( + mockPager.EXPECT().More().Return(true), + mockPager.EXPECT().NextPage(ctx).Return( + armnetwork.PrivateEndpointsClientListResponse{ + PrivateEndpointListResult: armnetwork.PrivateEndpointListResult{ + Value: []*armnetwork.PrivateEndpoint{pe1, pe2}, + }, + }, nil), + mockPager.EXPECT().More().Return(false), + ) + + mockClient.EXPECT().List(resourceGroup).Return(mockPager) + + wrapper := manual.NewNetworkPrivateEndpoint(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if item.Validate() != nil { + t.Fatalf("Expected no validation error, got: %v", item.Validate()) + } + if item.GetType() != azureshared.NetworkPrivateEndpoint.String() { + t.Fatalf("Expected type %s, got: %s", azureshared.NetworkPrivateEndpoint, item.GetType()) + } + } + }) + + t.Run("List_WithNilName", func(t *testing.T) { + pe1 := createAzurePrivateEndpoint("test-pe-1", subscriptionID, resourceGroup) + pe2 := &armnetwork.PrivateEndpoint{ + Name: nil, + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + Properties: &armnetwork.PrivateEndpointProperties{ + ProvisioningState: to.Ptr(armnetwork.ProvisioningStateSucceeded), + }, + } + + mockClient := mocks.NewMockPrivateEndpointsClient(ctrl) + mockPager := NewMockPrivateEndpointsPager(ctrl) + + gomock.InOrder( + mockPager.EXPECT().More().Return(true), + mockPager.EXPECT().NextPage(ctx).Return( + armnetwork.PrivateEndpointsClientListResponse{ + PrivateEndpointListResult: armnetwork.PrivateEndpointListResult{ + Value: []*armnetwork.PrivateEndpoint{pe1, pe2}, + }, + }, nil), + mockPager.EXPECT().More().Return(false), + ) + + mockClient.EXPECT().List(resourceGroup).Return(mockPager) + + wrapper := manual.NewNetworkPrivateEndpoint(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != "test-pe-1" { + t.Errorf("Expected item name 'test-pe-1', got: %s", sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("private endpoint not found") + + mockClient := mocks.NewMockPrivateEndpointsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "nonexistent-pe").Return( + armnetwork.PrivateEndpointsClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkPrivateEndpoint(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "nonexistent-pe", true) + if qErr == nil { + t.Fatal("Expected error when getting nonexistent private endpoint, got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + mockClient := mocks.NewMockPrivateEndpointsClient(ctrl) + wrapper := manual.NewNetworkPrivateEndpoint(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + w := wrapper.(sources.Wrapper) + potentialLinks := w.PotentialLinks() + if len(potentialLinks) == 0 { + t.Error("Expected PotentialLinks to return at least one link type") + } + if !potentialLinks[azureshared.NetworkSubnet] { + t.Error("Expected PotentialLinks to include NetworkSubnet") + } + if !potentialLinks[azureshared.NetworkVirtualNetwork] { + t.Error("Expected PotentialLinks to include NetworkVirtualNetwork") + } + }) +} + +// MockPrivateEndpointsPager is a mock for PrivateEndpointsPager +type MockPrivateEndpointsPager struct { + ctrl *gomock.Controller + recorder *MockPrivateEndpointsPagerMockRecorder +} + +type MockPrivateEndpointsPagerMockRecorder struct { + mock *MockPrivateEndpointsPager +} + +func NewMockPrivateEndpointsPager(ctrl *gomock.Controller) *MockPrivateEndpointsPager { + mock := &MockPrivateEndpointsPager{ctrl: ctrl} + mock.recorder = &MockPrivateEndpointsPagerMockRecorder{mock} + return mock +} + +func (m *MockPrivateEndpointsPager) EXPECT() *MockPrivateEndpointsPagerMockRecorder { + return m.recorder +} + +func (m *MockPrivateEndpointsPager) More() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "More") + ret0, _ := ret[0].(bool) + return ret0 +} + +func (mr *MockPrivateEndpointsPagerMockRecorder) More() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) +} + +func (m *MockPrivateEndpointsPager) NextPage(ctx context.Context) (armnetwork.PrivateEndpointsClientListResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextPage", ctx) + ret0, _ := ret[0].(armnetwork.PrivateEndpointsClientListResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockPrivateEndpointsPagerMockRecorder) NextPage(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.PrivateEndpointsClientListResponse, error)](), ctx) +} + +func createAzurePrivateEndpoint(peName, subscriptionID, resourceGroup string) *armnetwork.PrivateEndpoint { + subnetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet", subscriptionID, resourceGroup) + nicID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/networkInterfaces/test-nic", subscriptionID, resourceGroup) + asgID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/applicationSecurityGroups/test-asg", subscriptionID, resourceGroup) + + return &armnetwork.PrivateEndpoint{ + Name: new(peName), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armnetwork.PrivateEndpointProperties{ + ProvisioningState: to.Ptr(armnetwork.ProvisioningStateSucceeded), + Subnet: &armnetwork.Subnet{ + ID: new(subnetID), + }, + NetworkInterfaces: []*armnetwork.Interface{ + {ID: new(nicID)}, + }, + ApplicationSecurityGroups: []*armnetwork.ApplicationSecurityGroup{ + {ID: new(asgID)}, + }, + IPConfigurations: []*armnetwork.PrivateEndpointIPConfiguration{ + { + Properties: &armnetwork.PrivateEndpointIPConfigurationProperties{ + PrivateIPAddress: new("10.0.0.10"), + }, + }, + }, + CustomDNSConfigs: []*armnetwork.CustomDNSConfigPropertiesFormat{ + { + Fqdn: new("myendpoint.example.com"), + IPAddresses: []*string{new("10.0.0.5")}, + }, + }, + }, + } +} diff --git a/sources/azure/manual/network-public-ip-address.go b/sources/azure/manual/network-public-ip-address.go index eecf5e3e..1206a087 100644 --- a/sources/azure/manual/network-public-ip-address.go +++ b/sources/azure/manual/network-public-ip-address.go @@ -5,7 +5,7 @@ import ( "errors" "strings" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/network-public-ip-address_test.go b/sources/azure/manual/network-public-ip-address_test.go index f8971218..fb7b0559 100644 --- a/sources/azure/manual/network-public-ip-address_test.go +++ b/sources/azure/manual/network-public-ip-address_test.go @@ -4,10 +4,10 @@ import ( "context" "errors" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -276,9 +276,9 @@ func TestNetworkPublicIPAddress(t *testing.T) { publicIP1 := createAzurePublicIPAddress("test-public-ip-1", "", "", "", "", "") publicIP2 := &armnetwork.PublicIPAddress{ Name: nil, // Public IP with nil name will be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.PublicIPAddressPropertiesFormat{}, } @@ -384,13 +384,7 @@ func TestNetworkPublicIPAddress(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/publicIPAddresses/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -454,7 +448,7 @@ func (m *MockPublicIPAddressesPager) More() bool { func (mr *MockPublicIPAddressesPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockPublicIPAddressesPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockPublicIPAddressesPager) NextPage(ctx context.Context) (armnetwork.PublicIPAddressesClientListResponse, error) { @@ -465,24 +459,24 @@ func (m *MockPublicIPAddressesPager) NextPage(ctx context.Context) (armnetwork.P return ret0, ret1 } -func (mr *MockPublicIPAddressesPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockPublicIPAddressesPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockPublicIPAddressesPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.PublicIPAddressesClientListResponse, error)](), ctx) } // createAzurePublicIPAddress creates a mock Azure public IP address for testing func createAzurePublicIPAddress(name, nicName, prefixName, natGatewayName, ddosPlanName, loadBalancerName string) *armnetwork.PublicIPAddress { publicIP := &armnetwork.PublicIPAddress{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAddressVersion: to.Ptr(armnetwork.IPVersionIPv4), - PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), - IPAddress: to.Ptr("203.0.113.1"), // Add IP address for testing + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), + IPAddress: new("203.0.113.1"), // Add IP address for testing }, } @@ -490,7 +484,7 @@ func createAzurePublicIPAddress(name, nicName, prefixName, natGatewayName, ddosP if nicName != "" { ipConfigID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkInterfaces/" + nicName + "/ipConfigurations/ipconfig1" publicIP.Properties.IPConfiguration = &armnetwork.IPConfiguration{ - ID: to.Ptr(ipConfigID), + ID: new(ipConfigID), } } @@ -498,7 +492,7 @@ func createAzurePublicIPAddress(name, nicName, prefixName, natGatewayName, ddosP if prefixName != "" { prefixID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/publicIPPrefixes/" + prefixName publicIP.Properties.PublicIPPrefix = &armnetwork.SubResource{ - ID: to.Ptr(prefixID), + ID: new(prefixID), } } @@ -506,7 +500,7 @@ func createAzurePublicIPAddress(name, nicName, prefixName, natGatewayName, ddosP if natGatewayName != "" { natGatewayID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/natGateways/" + natGatewayName publicIP.Properties.NatGateway = &armnetwork.NatGateway{ - ID: to.Ptr(natGatewayID), + ID: new(natGatewayID), } } @@ -515,7 +509,7 @@ func createAzurePublicIPAddress(name, nicName, prefixName, natGatewayName, ddosP ddosPlanID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/ddosProtectionPlans/" + ddosPlanName publicIP.Properties.DdosSettings = &armnetwork.DdosSettings{ DdosProtectionPlan: &armnetwork.SubResource{ - ID: to.Ptr(ddosPlanID), + ID: new(ddosPlanID), }, } } @@ -524,7 +518,7 @@ func createAzurePublicIPAddress(name, nicName, prefixName, natGatewayName, ddosP if loadBalancerName != "" { lbIPConfigID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/loadBalancers/" + loadBalancerName + "/frontendIPConfigurations/frontendIPConfig1" publicIP.Properties.IPConfiguration = &armnetwork.IPConfiguration{ - ID: to.Ptr(lbIPConfigID), + ID: new(lbIPConfigID), } } @@ -536,16 +530,16 @@ func createAzurePublicIPAddressWithLinkedIP(name, linkedIPName, subscriptionID, linkedIPID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/" + linkedIPName return &armnetwork.PublicIPAddress{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAddressVersion: to.Ptr(armnetwork.IPVersionIPv4), - PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), LinkedPublicIPAddress: &armnetwork.PublicIPAddress{ - ID: to.Ptr(linkedIPID), + ID: new(linkedIPID), }, }, } @@ -556,16 +550,16 @@ func createAzurePublicIPAddressWithServiceIP(name, serviceIPName, subscriptionID serviceIPID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/" + serviceIPName return &armnetwork.PublicIPAddress{ - Name: to.Ptr(name), - Location: to.Ptr("eastus"), + Name: new(name), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAddressVersion: to.Ptr(armnetwork.IPVersionIPv4), - PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), + PublicIPAddressVersion: new(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: new(armnetwork.IPAllocationMethodStatic), ServicePublicIPAddress: &armnetwork.PublicIPAddress{ - ID: to.Ptr(serviceIPID), + ID: new(serviceIPID), }, }, } diff --git a/sources/azure/manual/network-public-ip-prefix.go b/sources/azure/manual/network-public-ip-prefix.go new file mode 100644 index 00000000..050ea99c --- /dev/null +++ b/sources/azure/manual/network-public-ip-prefix.go @@ -0,0 +1,314 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkPublicIPPrefixLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkPublicIPPrefix) + +type networkPublicIPPrefixWrapper struct { + client clients.PublicIPPrefixesClient + + *azureshared.MultiResourceGroupBase +} + +func NewNetworkPublicIPPrefix(client clients.PublicIPPrefixesClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkPublicIPPrefixWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkPublicIPPrefix, + ), + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/virtualnetwork/public-ip-prefixes/list +func (n networkPublicIPPrefixWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, prefix := range page.Value { + if prefix.Name == nil { + continue + } + item, sdpErr := n.azurePublicIPPrefixToSDPItem(prefix, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkPublicIPPrefixWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, prefix := range page.Value { + if prefix.Name == nil { + continue + } + item, sdpErr := n.azurePublicIPPrefixToSDPItem(prefix, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +// ref: https://learn.microsoft.com/en-us/rest/api/virtualnetwork/public-ip-prefixes/get +func (n networkPublicIPPrefixWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) != 1 { + return nil, azureshared.QueryError(errors.New("query must be exactly one part (public IP prefix name)"), scope, n.Type()) + } + publicIPPrefixName := queryParts[0] + if publicIPPrefixName == "" { + return nil, azureshared.QueryError(errors.New("public IP prefix name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, publicIPPrefixName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + return n.azurePublicIPPrefixToSDPItem(&resp.PublicIPPrefix, scope) +} + +func (n networkPublicIPPrefixWrapper) azurePublicIPPrefixToSDPItem(prefix *armnetwork.PublicIPPrefix, scope string) (*sdp.Item, *sdp.QueryError) { + if prefix.Name == nil { + return nil, azureshared.QueryError(errors.New("public IP prefix name is nil"), scope, n.Type()) + } + + attributes, err := shared.ToAttributesWithExclude(prefix, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkPublicIPPrefix.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(prefix.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + // Link to Custom Location when ExtendedLocation.Name is a custom location resource ID (Microsoft.ExtendedLocation/customLocations) + if prefix.ExtendedLocation != nil && prefix.ExtendedLocation.Name != nil { + customLocationID := *prefix.ExtendedLocation.Name + if strings.Contains(customLocationID, "customLocations") { + customLocationName := azureshared.ExtractResourceName(customLocationID) + if customLocationName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(customLocationID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ExtendedLocationCustomLocation.String(), + Method: sdp.QueryMethod_GET, + Query: customLocationName, + Scope: linkedScope, + }, + }) + } + } + } + + // Link to IP (standard library) for allocated prefix (e.g. "20.10.0.0/28") + if prefix.Properties != nil && prefix.Properties.IPPrefix != nil && *prefix.Properties.IPPrefix != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *prefix.Properties.IPPrefix, + Scope: "global", + }, + }) + } + + if prefix.Properties != nil { + // Link to Custom IP Prefix (parent prefix this prefix is associated with) + if prefix.Properties.CustomIPPrefix != nil && prefix.Properties.CustomIPPrefix.ID != nil { + customPrefixID := *prefix.Properties.CustomIPPrefix.ID + customPrefixName := azureshared.ExtractResourceName(customPrefixID) + if customPrefixName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(customPrefixID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkCustomIPPrefix.String(), + Method: sdp.QueryMethod_GET, + Query: customPrefixName, + Scope: linkedScope, + }, + }) + } + } + + // Link to NAT Gateway + if prefix.Properties.NatGateway != nil && prefix.Properties.NatGateway.ID != nil { + natGatewayID := *prefix.Properties.NatGateway.ID + natGatewayName := azureshared.ExtractResourceName(natGatewayID) + if natGatewayName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(natGatewayID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNatGateway.String(), + Method: sdp.QueryMethod_GET, + Query: natGatewayName, + Scope: linkedScope, + }, + }) + } + } + + // Link to Load Balancer and Frontend IP Configuration (from frontend IP configuration reference) + if prefix.Properties.LoadBalancerFrontendIPConfiguration != nil && prefix.Properties.LoadBalancerFrontendIPConfiguration.ID != nil { + feConfigID := *prefix.Properties.LoadBalancerFrontendIPConfiguration.ID + // Format: .../loadBalancers/{lbName}/frontendIPConfigurations/{feConfigName} + params := azureshared.ExtractPathParamsFromResourceID(feConfigID, []string{"loadBalancers", "frontendIPConfigurations"}) + if len(params) >= 2 && params[0] != "" && params[1] != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(feConfigID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancer.String(), + Method: sdp.QueryMethod_GET, + Query: params[0], + Scope: linkedScope, + }, + }) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLoadBalancerFrontendIPConfiguration.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(params[0], params[1]), + Scope: linkedScope, + }, + }) + } + } + + // Link to each referenced Public IP Address + for _, ref := range prefix.Properties.PublicIPAddresses { + if ref != nil && ref.ID != nil { + refID := *ref.ID + refName := azureshared.ExtractResourceName(refID) + if refName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(refID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPublicIPAddress.String(), + Method: sdp.QueryMethod_GET, + Query: refName, + Scope: linkedScope, + }, + }) + } + } + } + } + + // Health from provisioning state + if prefix.Properties != nil && prefix.Properties.ProvisioningState != nil { + switch *prefix.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + return sdpItem, nil +} + +func (n networkPublicIPPrefixWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkPublicIPPrefixLookupByName, + } +} + +func (n networkPublicIPPrefixWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkCustomIPPrefix: true, + azureshared.NetworkNatGateway: true, + azureshared.NetworkLoadBalancer: true, + azureshared.NetworkLoadBalancerFrontendIPConfiguration: true, + azureshared.NetworkPublicIPAddress: true, + azureshared.ExtendedLocationCustomLocation: true, + stdlib.NetworkIP: true, + } +} + +func (n networkPublicIPPrefixWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_public_ip_prefix.name", + }, + } +} + +// https://learn.microsoft.com/en-us/azure/role-based-access-control/resource-provider-operations#microsoftnetwork +func (n networkPublicIPPrefixWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/publicIPPrefixes/read", + } +} + +func (n networkPublicIPPrefixWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-public-ip-prefix_test.go b/sources/azure/manual/network-public-ip-prefix_test.go new file mode 100644 index 00000000..67ad44cf --- /dev/null +++ b/sources/azure/manual/network-public-ip-prefix_test.go @@ -0,0 +1,447 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "sync" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +func TestNetworkPublicIPPrefix(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + + t.Run("Get", func(t *testing.T) { + prefixName := "test-prefix" + prefix := createAzurePublicIPPrefix(prefixName) + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, prefixName, nil).Return( + armnetwork.PublicIPPrefixesClientGetResponse{ + PublicIPPrefix: *prefix, + }, nil) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], prefixName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkPublicIPPrefix.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkPublicIPPrefix.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != prefixName { + t.Errorf("Expected unique attribute value %s, got %s", prefixName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + // Public IP prefix with no linked resources in base createAzurePublicIPPrefix + queryTests := shared.QueryTests{} + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithLinkedResources", func(t *testing.T) { + prefixName := "test-prefix-with-links" + prefix := createAzurePublicIPPrefixWithLinks(prefixName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, prefixName, nil).Return( + armnetwork.PublicIPPrefixesClientGetResponse{ + PublicIPPrefix: *prefix, + }, nil) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], prefixName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + t.Run("StaticTests", func(t *testing.T) { + scope := subscriptionID + "." + resourceGroup + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.ExtendedLocationCustomLocation.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-custom-location", + ExpectedScope: scope, + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "20.10.0.0/28", + ExpectedScope: "global", + }, + { + ExpectedType: azureshared.NetworkCustomIPPrefix.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-custom-prefix", + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkNatGateway.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-nat-gateway", + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkLoadBalancer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-load-balancer", + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkLoadBalancerFrontendIPConfiguration.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("test-load-balancer", "frontend"), + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkPublicIPAddress.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "referenced-public-ip", + ExpectedScope: scope, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "", true) + if qErr == nil { + t.Error("Expected error when public IP prefix name is empty, but got nil") + } + }) + + t.Run("Get_PrefixWithNilName", func(t *testing.T) { + provisioningState := armnetwork.ProvisioningStateSucceeded + prefixWithNilName := &armnetwork.PublicIPPrefix{ + Name: nil, + Location: new("eastus"), + Properties: &armnetwork.PublicIPPrefixPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "test-prefix", nil).Return( + armnetwork.PublicIPPrefixesClientGetResponse{ + PublicIPPrefix: *prefixWithNilName, + }, nil) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "test-prefix", true) + if qErr == nil { + t.Error("Expected error when public IP prefix has nil name, but got nil") + } + }) + + t.Run("List", func(t *testing.T) { + prefix1 := createAzurePublicIPPrefix("prefix-1") + prefix2 := createAzurePublicIPPrefix("prefix-2") + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockPager := newMockPublicIPPrefixesPager(ctrl, []*armnetwork.PublicIPPrefix{prefix1, prefix2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if item.Validate() != nil { + t.Fatalf("Expected no validation error, got: %v", item.Validate()) + } + if item.GetType() != azureshared.NetworkPublicIPPrefix.String() { + t.Fatalf("Expected type %s, got: %s", azureshared.NetworkPublicIPPrefix.String(), item.GetType()) + } + } + }) + + t.Run("List_WithNilName", func(t *testing.T) { + prefix1 := createAzurePublicIPPrefix("prefix-1") + provisioningState := armnetwork.ProvisioningStateSucceeded + prefix2NilName := &armnetwork.PublicIPPrefix{ + Name: nil, + Location: new("eastus"), + Tags: map[string]*string{"env": new("test")}, + Properties: &armnetwork.PublicIPPrefixPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockPager := newMockPublicIPPrefixesPager(ctrl, []*armnetwork.PublicIPPrefix{prefix1, prefix2NilName}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + sdpItems, err := listable.List(ctx, wrapper.Scopes()[0], true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != "prefix-1" { + t.Errorf("Expected item name 'prefix-1', got: %s", sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ListStream", func(t *testing.T) { + prefix1 := createAzurePublicIPPrefix("stream-prefix-1") + prefix2 := createAzurePublicIPPrefix("stream-prefix-2") + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockPager := newMockPublicIPPrefixesPager(ctrl, []*armnetwork.PublicIPPrefix{prefix1, prefix2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + wg := &sync.WaitGroup{} + wg.Add(2) + + var items []*sdp.Item + mockItemHandler := func(item *sdp.Item) { + items = append(items, item) + wg.Done() + } + var errs []error + mockErrorHandler := func(err error) { + errs = append(errs, err) + } + stream := discovery.NewQueryResultStream(mockItemHandler, mockErrorHandler) + + listStreamable, ok := adapter.(discovery.ListStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support ListStream operation") + } + + listStreamable.ListStream(ctx, wrapper.Scopes()[0], true, stream) + wg.Wait() + + if len(errs) != 0 { + t.Fatalf("Expected no errors, got: %v", errs) + } + if len(items) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(items)) + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + expectedErr := errors.New("public IP prefix not found") + + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "nonexistent-prefix", nil).Return( + armnetwork.PublicIPPrefixesClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "nonexistent-prefix", true) + if qErr == nil { + t.Error("Expected error when getting non-existent public IP prefix, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockPublicIPPrefixesClient(ctrl) + wrapper := manual.NewNetworkPublicIPPrefix(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Network/publicIPPrefixes/read" + if !slices.Contains(permissions, expectedPermission) { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + mappings := w.TerraformMappings() + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_public_ip_prefix.name" { + foundMapping = true + if mapping.GetTerraformMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected TerraformMethod GET, got: %s", mapping.GetTerraformMethod()) + } + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_public_ip_prefix.name'") + } + + lookups := w.GetLookups() + foundLookup := false + for _, lookup := range lookups { + if lookup.ItemType == azureshared.NetworkPublicIPPrefix { + foundLookup = true + break + } + } + if !foundLookup { + t.Error("Expected GetLookups to include NetworkPublicIPPrefix") + } + + potentialLinks := w.PotentialLinks() + for _, linkType := range []shared.ItemType{azureshared.ExtendedLocationCustomLocation, azureshared.NetworkCustomIPPrefix, azureshared.NetworkNatGateway, azureshared.NetworkLoadBalancer, azureshared.NetworkLoadBalancerFrontendIPConfiguration, azureshared.NetworkPublicIPAddress, stdlib.NetworkIP} { + if !potentialLinks[linkType] { + t.Errorf("Expected PotentialLinks to include %s", linkType) + } + } + }) +} + +type mockPublicIPPrefixesPager struct { + ctrl *gomock.Controller + items []*armnetwork.PublicIPPrefix + index int + more bool +} + +func newMockPublicIPPrefixesPager(ctrl *gomock.Controller, items []*armnetwork.PublicIPPrefix) clients.PublicIPPrefixesPager { + return &mockPublicIPPrefixesPager{ + ctrl: ctrl, + items: items, + index: 0, + more: len(items) > 0, + } +} + +func (m *mockPublicIPPrefixesPager) More() bool { + return m.more +} + +func (m *mockPublicIPPrefixesPager) NextPage(ctx context.Context) (armnetwork.PublicIPPrefixesClientListResponse, error) { + if m.index >= len(m.items) { + m.more = false + return armnetwork.PublicIPPrefixesClientListResponse{ + PublicIPPrefixListResult: armnetwork.PublicIPPrefixListResult{ + Value: []*armnetwork.PublicIPPrefix{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + m.more = m.index < len(m.items) + return armnetwork.PublicIPPrefixesClientListResponse{ + PublicIPPrefixListResult: armnetwork.PublicIPPrefixListResult{ + Value: []*armnetwork.PublicIPPrefix{item}, + }, + }, nil +} + +func createAzurePublicIPPrefix(name string) *armnetwork.PublicIPPrefix { + provisioningState := armnetwork.ProvisioningStateSucceeded + prefixLength := int32(28) + return &armnetwork.PublicIPPrefix{ + ID: new("/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Network/publicIPPrefixes/" + name), + Name: new(name), + Type: new("Microsoft.Network/publicIPPrefixes"), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armnetwork.PublicIPPrefixPropertiesFormat{ + ProvisioningState: &provisioningState, + PrefixLength: &prefixLength, + }, + } +} + +func createAzurePublicIPPrefixWithLinks(name, subscriptionID, resourceGroup string) *armnetwork.PublicIPPrefix { + prefix := createAzurePublicIPPrefix(name) + prefix.Properties.IPPrefix = new("20.10.0.0/28") + customLocationID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.ExtendedLocation/customLocations/test-custom-location" + customPrefixID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/customIPPrefixes/test-custom-prefix" + natGatewayID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/natGateways/test-nat-gateway" + lbFeConfigID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/loadBalancers/test-load-balancer/frontendIPConfigurations/frontend" + publicIPID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/referenced-public-ip" + + prefix.ExtendedLocation = &armnetwork.ExtendedLocation{ + Name: new(customLocationID), + } + prefix.Properties.CustomIPPrefix = &armnetwork.SubResource{ + ID: new(customPrefixID), + } + prefix.Properties.NatGateway = &armnetwork.NatGateway{ + ID: new(natGatewayID), + } + prefix.Properties.LoadBalancerFrontendIPConfiguration = &armnetwork.SubResource{ + ID: new(lbFeConfigID), + } + prefix.Properties.PublicIPAddresses = []*armnetwork.ReferencedPublicIPAddress{ + {ID: new(publicIPID)}, + } + return prefix +} + +var _ clients.PublicIPPrefixesPager = (*mockPublicIPPrefixesPager)(nil) diff --git a/sources/azure/manual/network-route-table.go b/sources/azure/manual/network-route-table.go index e0c1e839..5c6cedc5 100644 --- a/sources/azure/manual/network-route-table.go +++ b/sources/azure/manual/network-route-table.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/sources" "github.com/overmindtech/cli/sources/azure/clients" diff --git a/sources/azure/manual/network-route-table_test.go b/sources/azure/manual/network-route-table_test.go index 56dbdb73..c7422ea4 100644 --- a/sources/azure/manual/network-route-table_test.go +++ b/sources/azure/manual/network-route-table_test.go @@ -4,10 +4,10 @@ import ( "context" "errors" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -109,9 +109,9 @@ func TestNetworkRouteTable(t *testing.T) { t.Run("Get_WithNilName", func(t *testing.T) { routeTable := &armnetwork.RouteTable{ Name: nil, // Route table with nil name should cause an error - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -188,9 +188,9 @@ func TestNetworkRouteTable(t *testing.T) { routeTable1 := createAzureRouteTable("test-route-table-1") routeTable2 := &armnetwork.RouteTable{ Name: nil, // Route table with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, } @@ -282,15 +282,15 @@ func TestNetworkRouteTable(t *testing.T) { otherSubscriptionID := "other-subscription" routeTable := &armnetwork.RouteTable{ - Name: to.Ptr(routeTableName), - Location: to.Ptr("eastus"), + Name: new(routeTableName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.RouteTablePropertiesFormat{ Subnets: []*armnetwork.Subnet{ { - ID: to.Ptr("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + otherSubscriptionID + "/resourceGroups/" + otherResourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, }, @@ -330,19 +330,19 @@ func TestNetworkRouteTable(t *testing.T) { // Test route table with route that has NextHopIPAddress routeTableName := "test-route-table" routeTable := &armnetwork.RouteTable{ - Name: to.Ptr(routeTableName), - Location: to.Ptr("eastus"), + Name: new(routeTableName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.RouteTablePropertiesFormat{ Routes: []*armnetwork.Route{ { - Name: to.Ptr("test-route"), + Name: new("test-route"), Properties: &armnetwork.RoutePropertiesFormat{ - AddressPrefix: to.Ptr("10.0.0.0/16"), - NextHopType: to.Ptr(armnetwork.RouteNextHopTypeVirtualAppliance), - NextHopIPAddress: to.Ptr("10.0.0.1"), + AddressPrefix: new("10.0.0.0/16"), + NextHopType: new(armnetwork.RouteNextHopTypeVirtualAppliance), + NextHopIPAddress: new("10.0.0.1"), }, }, }, @@ -385,18 +385,18 @@ func TestNetworkRouteTable(t *testing.T) { // Test route table with route that doesn't have NextHopIPAddress routeTableName := "test-route-table" routeTable := &armnetwork.RouteTable{ - Name: to.Ptr(routeTableName), - Location: to.Ptr("eastus"), + Name: new(routeTableName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.RouteTablePropertiesFormat{ Routes: []*armnetwork.Route{ { - Name: to.Ptr("test-route"), + Name: new("test-route"), Properties: &armnetwork.RoutePropertiesFormat{ - AddressPrefix: to.Ptr("10.0.0.0/16"), - NextHopType: to.Ptr(armnetwork.RouteNextHopTypeInternet), + AddressPrefix: new("10.0.0.0/16"), + NextHopType: new(armnetwork.RouteNextHopTypeInternet), // No NextHopIPAddress }, }, @@ -439,13 +439,7 @@ func TestNetworkRouteTable(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/routeTables/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -485,7 +479,7 @@ func TestNetworkRouteTable(t *testing.T) { // Verify PredefinedRole // PredefinedRole is available on the wrapper, not the adapter // Use type assertion with interface{} to access the method - if roleInterface, ok := interface{}(wrapper).(interface{ PredefinedRole() string }); ok { + if roleInterface, ok := any(wrapper).(interface{ PredefinedRole() string }); ok { role := roleInterface.PredefinedRole() if role != "Reader" { t.Errorf("Expected PredefinedRole to be 'Reader', got %s", role) @@ -525,7 +519,7 @@ func (m *MockRouteTablesPager) More() bool { func (mr *MockRouteTablesPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockRouteTablesPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockRouteTablesPager) NextPage(ctx context.Context) (armnetwork.RouteTablesClientListResponse, error) { @@ -536,9 +530,9 @@ func (m *MockRouteTablesPager) NextPage(ctx context.Context) (armnetwork.RouteTa return ret0, ret1 } -func (mr *MockRouteTablesPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockRouteTablesPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockRouteTablesPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.RouteTablesClientListResponse, error)](), ctx) } // createAzureRouteTable creates a mock Azure route table for testing @@ -547,28 +541,28 @@ func createAzureRouteTable(routeTableName string) *armnetwork.RouteTable { resourceGroup := "test-rg" return &armnetwork.RouteTable{ - Name: to.Ptr(routeTableName), - Location: to.Ptr("eastus"), + Name: new(routeTableName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.RouteTablePropertiesFormat{ // Routes (child resources) Routes: []*armnetwork.Route{ { - Name: to.Ptr("test-route"), + Name: new("test-route"), Properties: &armnetwork.RoutePropertiesFormat{ - AddressPrefix: to.Ptr("10.0.0.0/16"), - NextHopType: to.Ptr(armnetwork.RouteNextHopTypeVirtualAppliance), - NextHopIPAddress: to.Ptr("10.0.0.1"), + AddressPrefix: new("10.0.0.0/16"), + NextHopType: new(armnetwork.RouteNextHopTypeVirtualAppliance), + NextHopIPAddress: new("10.0.0.1"), }, }, }, // Subnets (external resources) Subnets: []*armnetwork.Subnet{ { - ID: to.Ptr("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"), }, }, }, diff --git a/sources/azure/manual/network-route.go b/sources/azure/manual/network-route.go new file mode 100644 index 00000000..a7f66a12 --- /dev/null +++ b/sources/azure/manual/network-route.go @@ -0,0 +1,224 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkRouteLookupByUniqueAttr = shared.NewItemTypeLookup("uniqueAttr", azureshared.NetworkRoute) + +type networkRouteWrapper struct { + client clients.RoutesClient + *azureshared.MultiResourceGroupBase +} + +// NewNetworkRoute creates a new networkRouteWrapper instance (SearchableWrapper: child of route table). +func NewNetworkRoute(client clients.RoutesClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkRouteWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkRoute, + ), + } +} + +func (n networkRouteWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: routeTableName and routeName", + Scope: scope, + ItemType: n.Type(), + } + } + routeTableName := queryParts[0] + routeName := queryParts[1] + if routeName == "" { + return nil, azureshared.QueryError(errors.New("route name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, routeTableName, routeName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + return n.azureRouteToSDPItem(&resp.Route, routeTableName, routeName, scope) +} + +func (n networkRouteWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkRouteTableLookupByName, + NetworkRouteLookupByUniqueAttr, + } +} + +func (n networkRouteWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: routeTableName", + Scope: scope, + ItemType: n.Type(), + } + } + routeTableName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, routeTableName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, route := range page.Value { + if route == nil || route.Name == nil { + continue + } + item, sdpErr := n.azureRouteToSDPItem(route, routeTableName, *route.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkRouteWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: routeTableName"), scope, n.Type())) + return + } + routeTableName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, routeTableName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, route := range page.Value { + if route == nil || route.Name == nil { + continue + } + item, sdpErr := n.azureRouteToSDPItem(route, routeTableName, *route.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkRouteWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + {NetworkRouteTableLookupByName}, + } +} + +func (n networkRouteWrapper) azureRouteToSDPItem(route *armnetwork.Route, routeTableName, routeName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(route, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(routeTableName, routeName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkRoute.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health status from ProvisioningState + if route.Properties != nil && route.Properties.ProvisioningState != nil { + switch *route.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + } + } + + // Link to parent Route Table + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkRouteTable.String(), + Method: sdp.QueryMethod_GET, + Query: routeTableName, + Scope: scope, + }, + }) + + // Link to NextHopIPAddress (IP address to stdlib) + if route.Properties != nil && route.Properties.NextHopIPAddress != nil && *route.Properties.NextHopIPAddress != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *route.Properties.NextHopIPAddress, + Scope: "global", + }, + }) + } + + return sdpItem, nil +} + +func (n networkRouteWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.NetworkRouteTable, + stdlib.NetworkIP, + ) +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/route +func (n networkRouteWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + {TerraformMethod: sdp.QueryMethod_SEARCH, TerraformQueryMap: "azurerm_route.id"}, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions-reference#microsoftnetwork +func (n networkRouteWrapper) IAMPermissions() []string { + return []string{"Microsoft.Network/routeTables/routes/read"} +} + +func (n networkRouteWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-route_test.go b/sources/azure/manual/network-route_test.go new file mode 100644 index 00000000..02775600 --- /dev/null +++ b/sources/azure/manual/network-route_test.go @@ -0,0 +1,306 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + sdp "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +type mockRoutesPager struct { + pages []armnetwork.RoutesClientListResponse + index int +} + +func (m *mockRoutesPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockRoutesPager) NextPage(ctx context.Context) (armnetwork.RoutesClientListResponse, error) { + if m.index >= len(m.pages) { + return armnetwork.RoutesClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorRoutesPager struct{} + +func (e *errorRoutesPager) More() bool { + return true +} + +func (e *errorRoutesPager) NextPage(ctx context.Context) (armnetwork.RoutesClientListResponse, error) { + return armnetwork.RoutesClientListResponse{}, errors.New("pager error") +} + +type testRoutesClient struct { + *mocks.MockRoutesClient + pager clients.RoutesPager +} + +func (t *testRoutesClient) NewListPager(resourceGroupName, routeTableName string, options *armnetwork.RoutesClientListOptions) clients.RoutesPager { + return t.pager +} + +func TestNetworkRoute(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + routeTableName := "test-route-table" + routeName := "test-route" + + t.Run("Get", func(t *testing.T) { + route := createAzureRoute(routeName, routeTableName) + + mockClient := mocks.NewMockRoutesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, routeTableName, routeName, nil).Return( + armnetwork.RoutesClientGetResponse{ + Route: *route, + }, nil) + + testClient := &testRoutesClient{MockRoutesClient: mockClient} + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(routeTableName, routeName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkRoute.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkRoute, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(routeTableName, routeName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(routeTableName, routeName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkRouteTable.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: routeTableName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "10.0.0.1", + ExpectedScope: "global", + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_EmptyRouteName", func(t *testing.T) { + mockClient := mocks.NewMockRoutesClient(ctrl) + testClient := &testRoutesClient{MockRoutesClient: mockClient} + + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(routeTableName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when route name is empty, but got nil") + } + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockRoutesClient(ctrl) + testClient := &testRoutesClient{MockRoutesClient: mockClient} + + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], routeTableName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + route1 := createAzureRoute("route-1", routeTableName) + route2 := createAzureRoute("route-2", routeTableName) + + mockClient := mocks.NewMockRoutesClient(ctrl) + mockPager := &mockRoutesPager{ + pages: []armnetwork.RoutesClientListResponse{ + { + RouteListResult: armnetwork.RouteListResult{ + Value: []*armnetwork.Route{route1, route2}, + }, + }, + }, + } + + testClient := &testRoutesClient{ + MockRoutesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], routeTableName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.NetworkRoute.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkRoute, item.GetType()) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockRoutesClient(ctrl) + testClient := &testRoutesClient{MockRoutesClient: mockClient} + + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_RouteWithNilName", func(t *testing.T) { + validRoute := createAzureRoute("valid-route", routeTableName) + + mockClient := mocks.NewMockRoutesClient(ctrl) + mockPager := &mockRoutesPager{ + pages: []armnetwork.RoutesClientListResponse{ + { + RouteListResult: armnetwork.RouteListResult{ + Value: []*armnetwork.Route{ + {Name: nil, ID: new("/some/id")}, + validRoute, + }, + }, + }, + }, + } + + testClient := &testRoutesClient{ + MockRoutesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], routeTableName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(routeTableName, "valid-route") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(routeTableName, "valid-route"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("route not found") + + mockClient := mocks.NewMockRoutesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, routeTableName, "nonexistent-route", nil).Return( + armnetwork.RoutesClientGetResponse{}, expectedErr) + + testClient := &testRoutesClient{MockRoutesClient: mockClient} + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(routeTableName, "nonexistent-route") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent route, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockRoutesClient(ctrl) + testClient := &testRoutesClient{ + MockRoutesClient: mockClient, + pager: &errorRoutesPager{}, + } + + wrapper := manual.NewNetworkRoute(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, err := searchable.Search(ctx, wrapper.Scopes()[0], routeTableName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) +} + +func createAzureRoute(routeName, routeTableName string) *armnetwork.Route { + idStr := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/routeTables/" + routeTableName + "/routes/" + routeName + typeStr := "Microsoft.Network/routeTables/routes" + provisioningState := armnetwork.ProvisioningStateSucceeded + nextHopIP := "10.0.0.1" + nextHopType := armnetwork.RouteNextHopTypeVnetLocal + return &armnetwork.Route{ + ID: &idStr, + Name: &routeName, + Type: &typeStr, + Properties: &armnetwork.RoutePropertiesFormat{ + ProvisioningState: &provisioningState, + NextHopIPAddress: &nextHopIP, + AddressPrefix: new("10.0.0.0/24"), + NextHopType: &nextHopType, + }, + } +} diff --git a/sources/azure/manual/network-security-rule.go b/sources/azure/manual/network-security-rule.go new file mode 100644 index 00000000..fcaf5cc1 --- /dev/null +++ b/sources/azure/manual/network-security-rule.go @@ -0,0 +1,272 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkSecurityRuleLookupByUniqueAttr = shared.NewItemTypeLookup("uniqueAttr", azureshared.NetworkSecurityRule) + +type networkSecurityRuleWrapper struct { + client clients.SecurityRulesClient + *azureshared.MultiResourceGroupBase +} + +// NewNetworkSecurityRule creates a new networkSecurityRuleWrapper instance (SearchableWrapper: child of network security group). +func NewNetworkSecurityRule(client clients.SecurityRulesClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkSecurityRuleWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkSecurityRule, + ), + } +} + +func (n networkSecurityRuleWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: networkSecurityGroupName and securityRuleName", + Scope: scope, + ItemType: n.Type(), + } + } + nsgName := queryParts[0] + ruleName := queryParts[1] + if ruleName == "" { + return nil, azureshared.QueryError(errors.New("security rule name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, nsgName, ruleName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + return n.azureSecurityRuleToSDPItem(&resp.SecurityRule, nsgName, ruleName, scope) +} + +func (n networkSecurityRuleWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkNetworkSecurityGroupLookupByName, + NetworkSecurityRuleLookupByUniqueAttr, + } +} + +func (n networkSecurityRuleWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: networkSecurityGroupName", + Scope: scope, + ItemType: n.Type(), + } + } + nsgName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nsgName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, rule := range page.Value { + if rule == nil || rule.Name == nil { + continue + } + item, sdpErr := n.azureSecurityRuleToSDPItem(rule, nsgName, *rule.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkSecurityRuleWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: networkSecurityGroupName"), scope, n.Type())) + return + } + nsgName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nsgName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, rule := range page.Value { + if rule == nil || rule.Name == nil { + continue + } + item, sdpErr := n.azureSecurityRuleToSDPItem(rule, nsgName, *rule.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkSecurityRuleWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + {NetworkNetworkSecurityGroupLookupByName}, + } +} + +func (n networkSecurityRuleWrapper) azureSecurityRuleToSDPItem(rule *armnetwork.SecurityRule, nsgName, ruleName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(rule, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(nsgName, ruleName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkSecurityRule.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Link to parent Network Security Group + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNetworkSecurityGroup.String(), + Method: sdp.QueryMethod_GET, + Query: nsgName, + Scope: scope, + }, + }) + + if rule.Properties != nil { + // Link to SourceApplicationSecurityGroups + if rule.Properties.SourceApplicationSecurityGroups != nil { + for _, asgRef := range rule.Properties.SourceApplicationSecurityGroups { + if asgRef != nil && asgRef.ID != nil { + asgName := azureshared.ExtractResourceName(*asgRef.ID) + if asgName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*asgRef.ID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkApplicationSecurityGroup.String(), + Method: sdp.QueryMethod_GET, + Query: asgName, + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to DestinationApplicationSecurityGroups + if rule.Properties.DestinationApplicationSecurityGroups != nil { + for _, asgRef := range rule.Properties.DestinationApplicationSecurityGroups { + if asgRef != nil && asgRef.ID != nil { + asgName := azureshared.ExtractResourceName(*asgRef.ID) + if asgName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(*asgRef.ID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkApplicationSecurityGroup.String(), + Method: sdp.QueryMethod_GET, + Query: asgName, + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to stdlib.NetworkIP for source/destination address prefixes when they are IPs or CIDRs + if rule.Properties.SourceAddressPrefix != nil { + appendIPOrCIDRLinkIfValid(&sdpItem.LinkedItemQueries, *rule.Properties.SourceAddressPrefix) + } + for _, p := range rule.Properties.SourceAddressPrefixes { + if p != nil { + appendIPOrCIDRLinkIfValid(&sdpItem.LinkedItemQueries, *p) + } + } + if rule.Properties.DestinationAddressPrefix != nil { + appendIPOrCIDRLinkIfValid(&sdpItem.LinkedItemQueries, *rule.Properties.DestinationAddressPrefix) + } + for _, p := range rule.Properties.DestinationAddressPrefixes { + if p != nil { + appendIPOrCIDRLinkIfValid(&sdpItem.LinkedItemQueries, *p) + } + } + } + + return sdpItem, nil +} + +func (n networkSecurityRuleWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.NetworkNetworkSecurityGroup, + azureshared.NetworkApplicationSecurityGroup, + stdlib.NetworkIP, + ) +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/network_security_rule +func (n networkSecurityRuleWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_network_security_rule.id", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions-reference#microsoftnetwork +func (n networkSecurityRuleWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/networkSecurityGroups/securityRules/read", + } +} + +func (n networkSecurityRuleWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-security-rule_test.go b/sources/azure/manual/network-security-rule_test.go new file mode 100644 index 00000000..3f77f922 --- /dev/null +++ b/sources/azure/manual/network-security-rule_test.go @@ -0,0 +1,300 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + sdp "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockSecurityRulesPager struct { + pages []armnetwork.SecurityRulesClientListResponse + index int +} + +func (m *mockSecurityRulesPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSecurityRulesPager) NextPage(ctx context.Context) (armnetwork.SecurityRulesClientListResponse, error) { + if m.index >= len(m.pages) { + return armnetwork.SecurityRulesClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorSecurityRulesPager struct{} + +func (e *errorSecurityRulesPager) More() bool { + return true +} + +func (e *errorSecurityRulesPager) NextPage(ctx context.Context) (armnetwork.SecurityRulesClientListResponse, error) { + return armnetwork.SecurityRulesClientListResponse{}, errors.New("pager error") +} + +type testSecurityRulesClient struct { + *mocks.MockSecurityRulesClient + pager clients.SecurityRulesPager +} + +func (t *testSecurityRulesClient) NewListPager(resourceGroupName, networkSecurityGroupName string, options *armnetwork.SecurityRulesClientListOptions) clients.SecurityRulesPager { + return t.pager +} + +func TestNetworkSecurityRule(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + nsgName := "test-nsg" + ruleName := "test-rule" + + t.Run("Get", func(t *testing.T) { + rule := createAzureSecurityRule(ruleName, nsgName) + + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, nsgName, ruleName, nil).Return( + armnetwork.SecurityRulesClientGetResponse{ + SecurityRule: *rule, + }, nil) + + testClient := &testSecurityRulesClient{MockSecurityRulesClient: mockClient} + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(nsgName, ruleName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkSecurityRule.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkSecurityRule, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(nsgName, ruleName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(nsgName, ruleName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkNetworkSecurityGroup.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: nsgName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_EmptyRuleName", func(t *testing.T) { + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + testClient := &testSecurityRulesClient{MockSecurityRulesClient: mockClient} + + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(nsgName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when rule name is empty, but got nil") + } + }) + + t.Run("Get_InsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + testClient := &testSecurityRulesClient{MockSecurityRulesClient: mockClient} + + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], nsgName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + rule1 := createAzureSecurityRule("rule-1", nsgName) + rule2 := createAzureSecurityRule("rule-2", nsgName) + + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + mockPager := &mockSecurityRulesPager{ + pages: []armnetwork.SecurityRulesClientListResponse{ + { + SecurityRuleListResult: armnetwork.SecurityRuleListResult{ + Value: []*armnetwork.SecurityRule{rule1, rule2}, + }, + }, + }, + } + + testClient := &testSecurityRulesClient{ + MockSecurityRulesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], nsgName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.NetworkSecurityRule.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkSecurityRule, item.GetType()) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + testClient := &testSecurityRulesClient{MockSecurityRulesClient: mockClient} + + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_RuleWithNilName", func(t *testing.T) { + validRule := createAzureSecurityRule("valid-rule", nsgName) + + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + mockPager := &mockSecurityRulesPager{ + pages: []armnetwork.SecurityRulesClientListResponse{ + { + SecurityRuleListResult: armnetwork.SecurityRuleListResult{ + Value: []*armnetwork.SecurityRule{ + {Name: nil, ID: new("/some/id")}, + validRule, + }, + }, + }, + }, + } + + testClient := &testSecurityRulesClient{ + MockSecurityRulesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], nsgName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(nsgName, "valid-rule") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(nsgName, "valid-rule"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("security rule not found") + + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, nsgName, "nonexistent-rule", nil).Return( + armnetwork.SecurityRulesClientGetResponse{}, expectedErr) + + testClient := &testSecurityRulesClient{MockSecurityRulesClient: mockClient} + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(nsgName, "nonexistent-rule") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent rule, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockSecurityRulesClient(ctrl) + testClient := &testSecurityRulesClient{ + MockSecurityRulesClient: mockClient, + pager: &errorSecurityRulesPager{}, + } + + wrapper := manual.NewNetworkSecurityRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, err := searchable.Search(ctx, wrapper.Scopes()[0], nsgName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) +} + +func createAzureSecurityRule(ruleName, nsgName string) *armnetwork.SecurityRule { + idStr := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/networkSecurityGroups/" + nsgName + "/securityRules/" + ruleName + typeStr := "Microsoft.Network/networkSecurityGroups/securityRules" + access := armnetwork.SecurityRuleAccessAllow + direction := armnetwork.SecurityRuleDirectionInbound + protocol := armnetwork.SecurityRuleProtocolAsterisk + priority := int32(100) + return &armnetwork.SecurityRule{ + ID: &idStr, + Name: &ruleName, + Type: &typeStr, + Properties: &armnetwork.SecurityRulePropertiesFormat{ + Access: &access, + Direction: &direction, + Protocol: &protocol, + Priority: &priority, + }, + } +} diff --git a/sources/azure/manual/network-subnet.go b/sources/azure/manual/network-subnet.go new file mode 100644 index 00000000..031a9134 --- /dev/null +++ b/sources/azure/manual/network-subnet.go @@ -0,0 +1,452 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var NetworkSubnetLookupByUniqueAttr = shared.NewItemTypeLookup("uniqueAttr", azureshared.NetworkSubnet) + +type networkSubnetWrapper struct { + client clients.SubnetsClient + + *azureshared.MultiResourceGroupBase +} + +// NewNetworkSubnet creates a new networkSubnetWrapper instance (SearchableWrapper: child of virtual network). +func NewNetworkSubnet(client clients.SubnetsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkSubnetWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkSubnet, + ), + } +} + +func (n networkSubnetWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: virtualNetworkName and subnetName", + Scope: scope, + ItemType: n.Type(), + } + } + virtualNetworkName := queryParts[0] + subnetName := queryParts[1] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, virtualNetworkName, subnetName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + return n.azureSubnetToSDPItem(&resp.Subnet, virtualNetworkName, subnetName, scope) +} + +func (n networkSubnetWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkVirtualNetworkLookupByName, + NetworkSubnetLookupByUniqueAttr, + } +} + +func (n networkSubnetWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: virtualNetworkName", + Scope: scope, + ItemType: n.Type(), + } + } + virtualNetworkName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, virtualNetworkName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + for _, subnet := range page.Value { + if subnet == nil || subnet.Name == nil { + continue + } + item, sdpErr := n.azureSubnetToSDPItem(subnet, virtualNetworkName, *subnet.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (n networkSubnetWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: virtualNetworkName"), scope, n.Type())) + return + } + virtualNetworkName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, virtualNetworkName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, subnet := range page.Value { + if subnet == nil || subnet.Name == nil { + continue + } + item, sdpErr := n.azureSubnetToSDPItem(subnet, virtualNetworkName, *subnet.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkSubnetWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + NetworkVirtualNetworkLookupByName, + }, + } +} + +func (n networkSubnetWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkVirtualNetwork: true, + azureshared.NetworkNetworkSecurityGroup: true, + azureshared.NetworkRouteTable: true, + azureshared.NetworkNatGateway: true, + azureshared.NetworkPrivateEndpoint: true, + azureshared.NetworkServiceEndpointPolicy: true, + azureshared.NetworkIpAllocation: true, + azureshared.NetworkNetworkInterface: true, + azureshared.NetworkApplicationGateway: true, + } +} + +func (n networkSubnetWrapper) azureSubnetToSDPItem(subnet *armnetwork.Subnet, virtualNetworkName, subnetName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(subnet, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(virtualNetworkName, subnetName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkSubnet.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Link to parent Virtual Network + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: virtualNetworkName, + Scope: scope, + }, + }) + + // Link to Network Security Group from subnet + if subnet.Properties != nil && subnet.Properties.NetworkSecurityGroup != nil && subnet.Properties.NetworkSecurityGroup.ID != nil { + nsgID := *subnet.Properties.NetworkSecurityGroup.ID + nsgName := azureshared.ExtractResourceName(nsgID) + if nsgName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(nsgID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNetworkSecurityGroup.String(), + Method: sdp.QueryMethod_GET, + Query: nsgName, + Scope: linkScope, + }, + }) + } + } + + // Link to Route Table from subnet + if subnet.Properties != nil && subnet.Properties.RouteTable != nil && subnet.Properties.RouteTable.ID != nil { + routeTableID := *subnet.Properties.RouteTable.ID + routeTableName := azureshared.ExtractResourceName(routeTableID) + if routeTableName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(routeTableID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkRouteTable.String(), + Method: sdp.QueryMethod_GET, + Query: routeTableName, + Scope: linkScope, + }, + }) + } + } + + // Link to NAT Gateway from subnet + if subnet.Properties != nil && subnet.Properties.NatGateway != nil && subnet.Properties.NatGateway.ID != nil { + natGatewayID := *subnet.Properties.NatGateway.ID + natGatewayName := azureshared.ExtractResourceName(natGatewayID) + if natGatewayName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(natGatewayID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNatGateway.String(), + Method: sdp.QueryMethod_GET, + Query: natGatewayName, + Scope: linkScope, + }, + }) + } + } + + // Link to Private Endpoints from subnet (read-only references) + if subnet.Properties != nil && subnet.Properties.PrivateEndpoints != nil { + for _, privateEndpoint := range subnet.Properties.PrivateEndpoints { + if privateEndpoint != nil && privateEndpoint.ID != nil { + privateEndpointID := *privateEndpoint.ID + privateEndpointName := azureshared.ExtractResourceName(privateEndpointID) + if privateEndpointName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(privateEndpointID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: privateEndpointName, + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to Service Endpoint Policies from subnet + if subnet.Properties != nil && subnet.Properties.ServiceEndpointPolicies != nil { + for _, policy := range subnet.Properties.ServiceEndpointPolicies { + if policy != nil && policy.ID != nil { + policyID := *policy.ID + policyName := azureshared.ExtractResourceName(policyID) + if policyName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(policyID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkServiceEndpointPolicy.String(), + Method: sdp.QueryMethod_GET, + Query: policyName, + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to IP Allocations from subnet (references that use this subnet) + if subnet.Properties != nil && subnet.Properties.IPAllocations != nil { + for _, ipAlloc := range subnet.Properties.IPAllocations { + if ipAlloc != nil && ipAlloc.ID != nil { + ipAllocID := *ipAlloc.ID + ipAllocName := azureshared.ExtractResourceName(ipAllocID) + if ipAllocName != "" { + linkScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(ipAllocID); extractedScope != "" { + linkScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkIpAllocation.String(), + Method: sdp.QueryMethod_GET, + Query: ipAllocName, + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to Network Interfaces that have IP configurations in this subnet (read-only references) + if subnet.Properties != nil && subnet.Properties.IPConfigurations != nil { + for _, ipConfig := range subnet.Properties.IPConfigurations { + if ipConfig != nil && ipConfig.ID != nil { + ipConfigID := *ipConfig.ID + // Format: .../networkInterfaces/{nicName}/ipConfigurations/{ipConfigName} + if strings.Contains(ipConfigID, "/networkInterfaces/") { + nicNames := azureshared.ExtractPathParamsFromResourceID(ipConfigID, []string{"networkInterfaces"}) + if len(nicNames) > 0 && nicNames[0] != "" { + linkScope := azureshared.ExtractScopeFromResourceID(ipConfigID) + if linkScope == "" { + linkScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkNetworkInterface.String(), + Method: sdp.QueryMethod_GET, + Query: nicNames[0], + Scope: linkScope, + }, + }) + } + } + } + } + } + + // Link to Application Gateways that have gateway IP configurations in this subnet (read-only references) + if subnet.Properties != nil && subnet.Properties.ApplicationGatewayIPConfigurations != nil { + for _, agIPConfig := range subnet.Properties.ApplicationGatewayIPConfigurations { + if agIPConfig != nil && agIPConfig.ID != nil { + agIPConfigID := *agIPConfig.ID + // Format: .../applicationGateways/{agName}/applicationGatewayIPConfigurations/... + agNames := azureshared.ExtractPathParamsFromResourceID(agIPConfigID, []string{"applicationGateways"}) + if len(agNames) > 0 && agNames[0] != "" { + linkScope := azureshared.ExtractScopeFromResourceID(agIPConfigID) + if linkScope == "" { + linkScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkApplicationGateway.String(), + Method: sdp.QueryMethod_GET, + Query: agNames[0], + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to external resources referenced by ResourceNavigationLinks (e.g. SQL Managed Instance) + if subnet.Properties != nil && subnet.Properties.ResourceNavigationLinks != nil { + for _, rnl := range subnet.Properties.ResourceNavigationLinks { + if rnl != nil && rnl.Properties != nil && rnl.Properties.Link != nil { + linkID := *rnl.Properties.Link + resourceName := azureshared.ExtractResourceName(linkID) + if resourceName != "" { + linkScope := azureshared.ExtractScopeFromResourceID(linkID) + if linkScope == "" { + linkScope = scope + } + itemType := azureshared.ItemTypeFromLinkedResourceID(linkID) + if itemType == "" { + itemType = "azure-resource" + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: itemType, + Method: sdp.QueryMethod_GET, + Query: resourceName, + Scope: linkScope, + }, + }) + } + } + } + } + + // Link to external resources referenced by ServiceAssociationLinks (e.g. App Service Environment) + if subnet.Properties != nil && subnet.Properties.ServiceAssociationLinks != nil { + for _, sal := range subnet.Properties.ServiceAssociationLinks { + if sal != nil && sal.Properties != nil && sal.Properties.Link != nil { + linkID := *sal.Properties.Link + resourceName := azureshared.ExtractResourceName(linkID) + if resourceName != "" { + linkScope := azureshared.ExtractScopeFromResourceID(linkID) + if linkScope == "" { + linkScope = scope + } + itemType := azureshared.ItemTypeFromLinkedResourceID(linkID) + if itemType == "" { + itemType = "azure-resource" + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: itemType, + Method: sdp.QueryMethod_GET, + Query: resourceName, + Scope: linkScope, + }, + }) + } + } + } + } + + return sdpItem, nil +} + +func (n networkSubnetWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_subnet.id", + }, + } +} + +func (n networkSubnetWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/virtualNetworks/subnets/read", + } +} + +func (n networkSubnetWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-subnet_test.go b/sources/azure/manual/network-subnet_test.go new file mode 100644 index 00000000..7e55e9d8 --- /dev/null +++ b/sources/azure/manual/network-subnet_test.go @@ -0,0 +1,277 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockSubnetsPager struct { + pages []armnetwork.SubnetsClientListResponse + index int +} + +func (m *mockSubnetsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSubnetsPager) NextPage(ctx context.Context) (armnetwork.SubnetsClientListResponse, error) { + if m.index >= len(m.pages) { + return armnetwork.SubnetsClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorSubnetsPager struct{} + +func (e *errorSubnetsPager) More() bool { + return true +} + +func (e *errorSubnetsPager) NextPage(ctx context.Context) (armnetwork.SubnetsClientListResponse, error) { + return armnetwork.SubnetsClientListResponse{}, errors.New("pager error") +} + +type testSubnetsClient struct { + *mocks.MockSubnetsClient + pager clients.SubnetsPager +} + +func (t *testSubnetsClient) NewListPager(resourceGroupName, virtualNetworkName string, options *armnetwork.SubnetsClientListOptions) clients.SubnetsPager { + return t.pager +} + +func TestNetworkSubnet(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + virtualNetworkName := "test-vnet" + subnetName := "test-subnet" + + t.Run("Get", func(t *testing.T) { + subnet := createAzureSubnet(subnetName, virtualNetworkName) + + mockClient := mocks.NewMockSubnetsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, virtualNetworkName, subnetName, nil).Return( + armnetwork.SubnetsClientGetResponse{ + Subnet: *subnet, + }, nil) + + testClient := &testSubnetsClient{MockSubnetsClient: mockClient} + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(virtualNetworkName, subnetName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkSubnet.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkSubnet, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(virtualNetworkName, subnetName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(virtualNetworkName, subnetName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: virtualNetworkName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSubnetsClient(ctrl) + testClient := &testSubnetsClient{MockSubnetsClient: mockClient} + + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + subnet1 := createAzureSubnet("subnet-1", virtualNetworkName) + subnet2 := createAzureSubnet("subnet-2", virtualNetworkName) + + mockClient := mocks.NewMockSubnetsClient(ctrl) + mockPager := &mockSubnetsPager{ + pages: []armnetwork.SubnetsClientListResponse{ + { + SubnetListResult: armnetwork.SubnetListResult{ + Value: []*armnetwork.Subnet{subnet1, subnet2}, + }, + }, + }, + } + + testClient := &testSubnetsClient{ + MockSubnetsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.NetworkSubnet.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkSubnet, item.GetType()) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSubnetsClient(ctrl) + testClient := &testSubnetsClient{MockSubnetsClient: mockClient} + + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_SubnetWithNilName", func(t *testing.T) { + validSubnet := createAzureSubnet("valid-subnet", virtualNetworkName) + + mockClient := mocks.NewMockSubnetsClient(ctrl) + mockPager := &mockSubnetsPager{ + pages: []armnetwork.SubnetsClientListResponse{ + { + SubnetListResult: armnetwork.SubnetListResult{ + Value: []*armnetwork.Subnet{ + {Name: nil, ID: new("/some/id")}, + validSubnet, + }, + }, + }, + }, + } + + testClient := &testSubnetsClient{ + MockSubnetsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(virtualNetworkName, "valid-subnet") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(virtualNetworkName, "valid-subnet"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("subnet not found") + + mockClient := mocks.NewMockSubnetsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, virtualNetworkName, "nonexistent-subnet", nil).Return( + armnetwork.SubnetsClientGetResponse{}, expectedErr) + + testClient := &testSubnetsClient{MockSubnetsClient: mockClient} + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(virtualNetworkName, "nonexistent-subnet") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent subnet, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockSubnetsClient(ctrl) + testClient := &testSubnetsClient{ + MockSubnetsClient: mockClient, + pager: &errorSubnetsPager{}, + } + + wrapper := manual.NewNetworkSubnet(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, err := searchable.Search(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) +} + +func createAzureSubnet(subnetName, vnetName string) *armnetwork.Subnet { + return &armnetwork.Subnet{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/" + subnetName), + Name: new(subnetName), + Type: new("Microsoft.Network/virtualNetworks/subnets"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: new("10.0.0.0/24"), + }, + } +} diff --git a/sources/azure/manual/network-virtual-network-gateway.go b/sources/azure/manual/network-virtual-network-gateway.go new file mode 100644 index 00000000..a08c3785 --- /dev/null +++ b/sources/azure/manual/network-virtual-network-gateway.go @@ -0,0 +1,488 @@ +package manual + +import ( + "context" + "errors" + "net" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var NetworkVirtualNetworkGatewayLookupByName = shared.NewItemTypeLookup("name", azureshared.NetworkVirtualNetworkGateway) + +type networkVirtualNetworkGatewayWrapper struct { + client clients.VirtualNetworkGatewaysClient + + *azureshared.MultiResourceGroupBase +} + +// NewNetworkVirtualNetworkGateway creates a new networkVirtualNetworkGatewayWrapper instance. +func NewNetworkVirtualNetworkGateway(client clients.VirtualNetworkGatewaysClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.ListableWrapper { + return &networkVirtualNetworkGatewayWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkVirtualNetworkGateway, + ), + } +} + +func (n networkVirtualNetworkGatewayWrapper) List(ctx context.Context, scope string) ([]*sdp.Item, *sdp.QueryError) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + for _, gw := range page.Value { + if gw.Name == nil { + continue + } + item, sdpErr := n.azureVirtualNetworkGatewayToSDPItem(gw, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (n networkVirtualNetworkGatewayWrapper) ListStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string) { + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + + for _, gw := range page.Value { + if gw.Name == nil { + continue + } + item, sdpErr := n.azureVirtualNetworkGatewayToSDPItem(gw, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkVirtualNetworkGatewayWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 1 query part: virtualNetworkGatewayName", + Scope: scope, + ItemType: n.Type(), + } + } + + gatewayName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, gatewayName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + return n.azureVirtualNetworkGatewayToSDPItem(&resp.VirtualNetworkGateway, scope) +} + +func (n networkVirtualNetworkGatewayWrapper) azureVirtualNetworkGatewayToSDPItem(gw *armnetwork.VirtualNetworkGateway, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(gw, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + if gw.Name == nil { + return nil, azureshared.QueryError(errors.New("virtual network gateway name is nil"), scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkVirtualNetworkGateway.String(), + UniqueAttribute: "name", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(gw.Tags), + LinkedItemQueries: []*sdp.LinkedItemQuery{}, + } + + // Health from provisioning state + if gw.Properties != nil && gw.Properties.ProvisioningState != nil { + switch *gw.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link from IP configurations: subnet, public IP, private IP + if gw.Properties != nil && gw.Properties.IPConfigurations != nil { + for _, ipConfig := range gw.Properties.IPConfigurations { + if ipConfig == nil || ipConfig.Properties == nil { + continue + } + + // Subnet (SearchableWrapper: virtualNetworks/{vnet}/subnets/{subnet}) + if ipConfig.Properties.Subnet != nil && ipConfig.Properties.Subnet.ID != nil { + subnetID := *ipConfig.Properties.Subnet.ID + params := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"virtualNetworks", "subnets"}) + if len(params) >= 2 && params[0] != "" && params[1] != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(subnetID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Scope: linkedScope, + Query: shared.CompositeLookupKey(params[0], params[1]), + }, + }) + } + } + + // Public IP address + if ipConfig.Properties.PublicIPAddress != nil && ipConfig.Properties.PublicIPAddress.ID != nil { + pubIPID := *ipConfig.Properties.PublicIPAddress.ID + pubIPName := azureshared.ExtractResourceName(pubIPID) + if pubIPName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(pubIPID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPublicIPAddress.String(), + Method: sdp.QueryMethod_GET, + Query: pubIPName, + Scope: linkedScope, + }, + }) + } + } + + // Private IP address -> stdlib ip + if ipConfig.Properties.PrivateIPAddress != nil && *ipConfig.Properties.PrivateIPAddress != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *ipConfig.Properties.PrivateIPAddress, + Scope: "global", + }, + }) + } + } + } + + // Inbound DNS forwarding endpoint (read-only IP) + if gw.Properties != nil && gw.Properties.InboundDNSForwardingEndpoint != nil && *gw.Properties.InboundDNSForwardingEndpoint != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *gw.Properties.InboundDNSForwardingEndpoint, + Scope: "global", + }, + }) + } + + // Gateway default site (Local Network Gateway) + if gw.Properties != nil && gw.Properties.GatewayDefaultSite != nil && gw.Properties.GatewayDefaultSite.ID != nil { + localGWID := *gw.Properties.GatewayDefaultSite.ID + localGWName := azureshared.ExtractResourceName(localGWID) + if localGWName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(localGWID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkLocalNetworkGateway.String(), + Method: sdp.QueryMethod_GET, + Query: localGWName, + Scope: linkedScope, + }, + }) + } + } + + // Extended location (custom location) when Name is a custom location resource ID + if gw.ExtendedLocation != nil && gw.ExtendedLocation.Name != nil { + customLocationID := *gw.ExtendedLocation.Name + if strings.Contains(customLocationID, "customLocations") { + customLocationName := azureshared.ExtractResourceName(customLocationID) + if customLocationName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(customLocationID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ExtendedLocationCustomLocation.String(), + Method: sdp.QueryMethod_GET, + Query: customLocationName, + Scope: linkedScope, + }, + }) + } + } + } + + // User-assigned managed identities (map keys are ARM resource IDs) + if gw.Identity != nil && gw.Identity.UserAssignedIdentities != nil { + for identityID := range gw.Identity.UserAssignedIdentities { + if identityID == "" { + continue + } + identityName := azureshared.ExtractResourceName(identityID) + if identityName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(identityID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.ManagedIdentityUserAssignedIdentity.String(), + Method: sdp.QueryMethod_GET, + Query: identityName, + Scope: linkedScope, + }, + }) + } + } + } + + // VNet extended location resource (customer VNet when gateway type is local) + if gw.Properties != nil && gw.Properties.VNetExtendedLocationResourceID != nil && *gw.Properties.VNetExtendedLocationResourceID != "" { + vnetID := *gw.Properties.VNetExtendedLocationResourceID + vnetName := azureshared.ExtractResourceName(vnetID) + if vnetName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(vnetID) + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: linkedScope, + }, + }) + } + } + + // VPN client configuration: RADIUS server address(es) (IP or DNS) + if gw.Properties != nil && gw.Properties.VPNClientConfiguration != nil { + vpnCfg := gw.Properties.VPNClientConfiguration + if vpnCfg.RadiusServerAddress != nil && *vpnCfg.RadiusServerAddress != "" { + appendDNSServerLinkIfValid(&sdpItem.LinkedItemQueries, *vpnCfg.RadiusServerAddress) + } + if vpnCfg.RadiusServers != nil { + for _, radiusServer := range vpnCfg.RadiusServers { + if radiusServer != nil && radiusServer.RadiusServerAddress != nil && *radiusServer.RadiusServerAddress != "" { + appendDNSServerLinkIfValid(&sdpItem.LinkedItemQueries, *radiusServer.RadiusServerAddress) + } + } + } + // AAD authentication URLs (e.g. https://login.microsoftonline.com/{tenant}/) — link DNS hostnames + for _, s := range []*string{vpnCfg.AADTenant, vpnCfg.AADAudience, vpnCfg.AADIssuer} { + if s == nil || *s == "" { + continue + } + host := extractHostFromURLOrHostname(*s) + if host == "" { + continue + } + // Skip if it's an IP address; stdlib ip links are added elsewhere for IPs + if net.ParseIP(host) != nil { + continue + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: host, + Scope: "global", + }, + }) + } + } + + // BGP settings: peering address and IP arrays + if gw.Properties != nil && gw.Properties.BgpSettings != nil { + bgp := gw.Properties.BgpSettings + if bgp.BgpPeeringAddress != nil && *bgp.BgpPeeringAddress != "" { + if net.ParseIP(*bgp.BgpPeeringAddress) != nil { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *bgp.BgpPeeringAddress, + Scope: "global", + }, + }) + } else { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: *bgp.BgpPeeringAddress, + Scope: "global", + }, + }) + } + } + if bgp.BgpPeeringAddresses != nil { + for _, peeringAddr := range bgp.BgpPeeringAddresses { + if peeringAddr == nil { + continue + } + for _, ipStr := range peeringAddr.DefaultBgpIPAddresses { + if ipStr != nil && *ipStr != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *ipStr, + Scope: "global", + }, + }) + } + } + for _, ipStr := range peeringAddr.CustomBgpIPAddresses { + if ipStr != nil && *ipStr != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *ipStr, + Scope: "global", + }, + }) + } + } + for _, ipStr := range peeringAddr.TunnelIPAddresses { + if ipStr != nil && *ipStr != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *ipStr, + Scope: "global", + }, + }) + } + } + } + } + } + + // Virtual Network Gateway Connections (child resource; list by parent gateway name) + if gw.Name != nil && *gw.Name != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetworkGatewayConnection.String(), + Method: sdp.QueryMethod_SEARCH, + Scope: scope, + Query: *gw.Name, + }, + }) + } + + return sdpItem, nil +} + +func (n networkVirtualNetworkGatewayWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkVirtualNetworkGatewayLookupByName, + } +} + +func (n networkVirtualNetworkGatewayWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.NetworkSubnet: true, + azureshared.NetworkPublicIPAddress: true, + azureshared.NetworkLocalNetworkGateway: true, + azureshared.NetworkVirtualNetworkGatewayConnection: true, + azureshared.ExtendedLocationCustomLocation: true, + azureshared.ManagedIdentityUserAssignedIdentity: true, + azureshared.NetworkVirtualNetwork: true, + stdlib.NetworkIP: true, + stdlib.NetworkDNS: true, + } +} + +func (n networkVirtualNetworkGatewayWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_GET, + TerraformQueryMap: "azurerm_virtual_network_gateway.name", + }, + } +} + +func (n networkVirtualNetworkGatewayWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/virtualNetworkGateways/read", + } +} + +func extractHostFromURLOrHostname(s string) string { + s = strings.TrimSpace(s) + if s == "" { + return "" + } + u, err := url.Parse(s) + if err != nil { + return s + } + if u.Host != "" { + return u.Hostname() + } + return s +} + +func (n networkVirtualNetworkGatewayWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-virtual-network-gateway_test.go b/sources/azure/manual/network-virtual-network-gateway_test.go new file mode 100644 index 00000000..c0536c76 --- /dev/null +++ b/sources/azure/manual/network-virtual-network-gateway_test.go @@ -0,0 +1,389 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +func TestNetworkVirtualNetworkGateway(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + scope := subscriptionID + "." + resourceGroup + + t.Run("Get", func(t *testing.T) { + gatewayName := "test-gateway" + gw := createAzureVirtualNetworkGateway(gatewayName) + + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, gatewayName, nil).Return( + armnetwork.VirtualNetworkGatewaysClientGetResponse{ + VirtualNetworkGateway: *gw, + }, nil) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, scope, gatewayName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkVirtualNetworkGateway.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkVirtualNetworkGateway.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "name" { + t.Errorf("Expected unique attribute 'name', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != gatewayName { + t.Errorf("Expected unique attribute value %s, got %s", gatewayName, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetTags()["env"] != "test" { + t.Errorf("Expected tag 'env=test', got: %v", sdpItem.GetTags()["env"]) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkVirtualNetworkGatewayConnection.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: gatewayName, + ExpectedScope: scope, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithLinkedResources", func(t *testing.T) { + gatewayName := "test-gateway-with-links" + gw := createAzureVirtualNetworkGatewayWithLinks(gatewayName, subscriptionID, resourceGroup) + + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, gatewayName, nil).Return( + armnetwork.VirtualNetworkGatewaysClientGetResponse{ + VirtualNetworkGateway: *gw, + }, nil) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, scope, gatewayName, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkSubnet.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("test-vnet", "GatewaySubnet"), + ExpectedScope: scope, + }, + { + ExpectedType: azureshared.NetworkPublicIPAddress.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-gateway-pip", + ExpectedScope: scope, + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "10.0.1.4", + ExpectedScope: "global", + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "10.0.0.5", + ExpectedScope: "global", + }, + { + ExpectedType: azureshared.NetworkVirtualNetworkGatewayConnection.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: gatewayName, + ExpectedScope: scope, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, "", nil).Return( + armnetwork.VirtualNetworkGatewaysClientGetResponse{}, errors.New("virtual network gateway not found")) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, "", true) + if qErr == nil { + t.Error("Expected error when getting gateway with empty name, but got nil") + } + }) + + t.Run("ErrorHandling", func(t *testing.T) { + gatewayName := "nonexistent-gateway" + expectedErr := errors.New("virtual network gateway not found") + + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, gatewayName, nil).Return( + armnetwork.VirtualNetworkGatewaysClientGetResponse{}, expectedErr) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, scope, gatewayName, true) + if qErr == nil { + t.Fatal("Expected error when gateway not found, got nil") + } + }) + + t.Run("List", func(t *testing.T) { + gw1 := createAzureVirtualNetworkGateway("gateway-1") + gw2 := createAzureVirtualNetworkGateway("gateway-2") + + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockPager := newMockVirtualNetworkGatewaysPager(ctrl, []*armnetwork.VirtualNetworkGateway{gw1, gw2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + items, err := listable.List(ctx, scope, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(items) != 2 { + t.Fatalf("Expected 2 items, got %d", len(items)) + } + + for i, item := range items { + if item.GetType() != azureshared.NetworkVirtualNetworkGateway.String() { + t.Errorf("Item %d: expected type %s, got %s", i, azureshared.NetworkVirtualNetworkGateway.String(), item.GetType()) + } + if item.Validate() != nil { + t.Errorf("Item %d: validation error: %v", i, item.Validate()) + } + } + }) + + t.Run("ListStream", func(t *testing.T) { + gw1 := createAzureVirtualNetworkGateway("gateway-1") + gw2 := createAzureVirtualNetworkGateway("gateway-2") + + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockPager := newMockVirtualNetworkGatewaysPager(ctrl, []*armnetwork.VirtualNetworkGateway{gw1, gw2}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listStream, ok := adapter.(discovery.ListStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support ListStream operation") + } + + var received []*sdp.Item + stream := &collectingStream{items: &received} + listStream.ListStream(ctx, scope, true, stream) + + if len(received) != 2 { + t.Fatalf("Expected 2 items from stream, got %d", len(received)) + } + }) + + t.Run("List_NilNameSkipped", func(t *testing.T) { + gw1 := createAzureVirtualNetworkGateway("gateway-1") + gw2NilName := createAzureVirtualNetworkGateway("gateway-2") + gw2NilName.Name = nil + + mockClient := mocks.NewMockVirtualNetworkGatewaysClient(ctrl) + mockPager := newMockVirtualNetworkGatewaysPager(ctrl, []*armnetwork.VirtualNetworkGateway{gw1, gw2NilName}) + + mockClient.EXPECT().NewListPager(resourceGroup, nil).Return(mockPager) + + wrapper := manual.NewNetworkVirtualNetworkGateway(mockClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + listable, ok := adapter.(discovery.ListableAdapter) + if !ok { + t.Fatalf("Adapter does not support List operation") + } + + items, err := listable.List(ctx, scope, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(items) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got %d", len(items)) + } + if items[0].UniqueAttributeValue() != "gateway-1" { + t.Errorf("Expected only gateway-1, got %s", items[0].UniqueAttributeValue()) + } + }) + + t.Run("GetLookups", func(t *testing.T) { + wrapper := manual.NewNetworkVirtualNetworkGateway(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + lookups := wrapper.GetLookups() + if len(lookups) == 0 { + t.Error("Expected GetLookups to return at least one lookup") + } + found := false + for _, l := range lookups { + if l.ItemType.String() == azureshared.NetworkVirtualNetworkGateway.String() { + found = true + break + } + } + if !found { + t.Error("Expected GetLookups to include NetworkVirtualNetworkGateway") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewNetworkVirtualNetworkGateway(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + potentialLinks := wrapper.PotentialLinks() + for _, linkType := range []shared.ItemType{ + azureshared.NetworkSubnet, + azureshared.NetworkPublicIPAddress, + azureshared.NetworkLocalNetworkGateway, + azureshared.NetworkVirtualNetworkGatewayConnection, + azureshared.ExtendedLocationCustomLocation, + azureshared.ManagedIdentityUserAssignedIdentity, + azureshared.NetworkVirtualNetwork, + stdlib.NetworkIP, + stdlib.NetworkDNS, + } { + if !potentialLinks[linkType] { + t.Errorf("Expected PotentialLinks to include %s", linkType) + } + } + }) +} + +type collectingStream struct { + items *[]*sdp.Item +} + +func (c *collectingStream) SendItem(item *sdp.Item) { + *c.items = append(*c.items, item) +} + +func (c *collectingStream) SendError(err error) {} + +type mockVirtualNetworkGatewaysPager struct { + ctrl *gomock.Controller + items []*armnetwork.VirtualNetworkGateway + index int + more bool +} + +func newMockVirtualNetworkGatewaysPager(ctrl *gomock.Controller, items []*armnetwork.VirtualNetworkGateway) *mockVirtualNetworkGatewaysPager { + return &mockVirtualNetworkGatewaysPager{ + ctrl: ctrl, + items: items, + index: 0, + more: len(items) > 0, + } +} + +func (m *mockVirtualNetworkGatewaysPager) More() bool { + return m.more +} + +func (m *mockVirtualNetworkGatewaysPager) NextPage(ctx context.Context) (armnetwork.VirtualNetworkGatewaysClientListResponse, error) { + if m.index >= len(m.items) { + m.more = false + return armnetwork.VirtualNetworkGatewaysClientListResponse{ + VirtualNetworkGatewayListResult: armnetwork.VirtualNetworkGatewayListResult{ + Value: []*armnetwork.VirtualNetworkGateway{}, + }, + }, nil + } + item := m.items[m.index] + m.index++ + m.more = m.index < len(m.items) + return armnetwork.VirtualNetworkGatewaysClientListResponse{ + VirtualNetworkGatewayListResult: armnetwork.VirtualNetworkGatewayListResult{ + Value: []*armnetwork.VirtualNetworkGateway{item}, + }, + }, nil +} + +func createAzureVirtualNetworkGateway(name string) *armnetwork.VirtualNetworkGateway { + provisioningState := armnetwork.ProvisioningStateSucceeded + gatewayType := armnetwork.VirtualNetworkGatewayTypeVPN + vpnType := armnetwork.VPNTypeRouteBased + return &armnetwork.VirtualNetworkGateway{ + ID: new("/subscriptions/test-sub/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworkGateways/" + name), + Name: new(name), + Type: new("Microsoft.Network/virtualNetworkGateways"), + Location: new("eastus"), + Tags: map[string]*string{ + "env": new("test"), + "project": new("testing"), + }, + Properties: &armnetwork.VirtualNetworkGatewayPropertiesFormat{ + ProvisioningState: &provisioningState, + GatewayType: &gatewayType, + VPNType: &vpnType, + }, + } +} + +func createAzureVirtualNetworkGatewayWithLinks(name, subscriptionID, resourceGroup string) *armnetwork.VirtualNetworkGateway { + gw := createAzureVirtualNetworkGateway(name) + subnetID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/GatewaySubnet" + publicIPID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/publicIPAddresses/test-gateway-pip" + privateIP := "10.0.1.4" + inboundDNS := "10.0.0.5" + gw.Properties.IPConfigurations = []*armnetwork.VirtualNetworkGatewayIPConfiguration{ + { + ID: new("/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/virtualNetworkGateways/" + name + "/ipConfigurations/default"), + Name: new("default"), + Properties: &armnetwork.VirtualNetworkGatewayIPConfigurationPropertiesFormat{ + Subnet: &armnetwork.SubResource{ + ID: new(subnetID), + }, + PublicIPAddress: &armnetwork.SubResource{ + ID: new(publicIPID), + }, + PrivateIPAddress: &privateIP, + }, + }, + } + gw.Properties.InboundDNSForwardingEndpoint = &inboundDNS + return gw +} diff --git a/sources/azure/manual/network-virtual-network-peering.go b/sources/azure/manual/network-virtual-network-peering.go new file mode 100644 index 00000000..99368906 --- /dev/null +++ b/sources/azure/manual/network-virtual-network-peering.go @@ -0,0 +1,270 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var NetworkVirtualNetworkPeeringLookupByUniqueAttr = shared.NewItemTypeLookup("uniqueAttr", azureshared.NetworkVirtualNetworkPeering) + +type networkVirtualNetworkPeeringWrapper struct { + client clients.VirtualNetworkPeeringsClient + + *azureshared.MultiResourceGroupBase +} + +// NewNetworkVirtualNetworkPeering creates a new networkVirtualNetworkPeeringWrapper instance (SearchableWrapper: child of virtual network). +func NewNetworkVirtualNetworkPeering(client clients.VirtualNetworkPeeringsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &networkVirtualNetworkPeeringWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_NETWORK, + azureshared.NetworkVirtualNetworkPeering, + ), + } +} + +func (n networkVirtualNetworkPeeringWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: virtualNetworkName and peeringName", + Scope: scope, + ItemType: n.Type(), + } + } + virtualNetworkName := queryParts[0] + peeringName := queryParts[1] + if peeringName == "" { + return nil, azureshared.QueryError(errors.New("peering name cannot be empty"), scope, n.Type()) + } + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + resp, err := n.client.Get(ctx, rgScope.ResourceGroup, virtualNetworkName, peeringName, nil) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + return n.azureVirtualNetworkPeeringToSDPItem(&resp.VirtualNetworkPeering, virtualNetworkName, peeringName, scope) +} + +func (n networkVirtualNetworkPeeringWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + NetworkVirtualNetworkLookupByName, + NetworkVirtualNetworkPeeringLookupByUniqueAttr, + } +} + +func (n networkVirtualNetworkPeeringWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: virtualNetworkName", + Scope: scope, + ItemType: n.Type(), + } + } + virtualNetworkName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + pager := n.client.NewListPager(rgScope.ResourceGroup, virtualNetworkName, nil) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + for _, peering := range page.Value { + if peering == nil || peering.Name == nil { + continue + } + item, sdpErr := n.azureVirtualNetworkPeeringToSDPItem(peering, virtualNetworkName, *peering.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + return items, nil +} + +func (n networkVirtualNetworkPeeringWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: virtualNetworkName"), scope, n.Type())) + return + } + virtualNetworkName := queryParts[0] + + rgScope, err := n.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + pager := n.client.NewListPager(rgScope.ResourceGroup, virtualNetworkName, nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, n.Type())) + return + } + for _, peering := range page.Value { + if peering == nil || peering.Name == nil { + continue + } + item, sdpErr := n.azureVirtualNetworkPeeringToSDPItem(peering, virtualNetworkName, *peering.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (n networkVirtualNetworkPeeringWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + NetworkVirtualNetworkLookupByName, + }, + } +} + +func (n networkVirtualNetworkPeeringWrapper) azureVirtualNetworkPeeringToSDPItem(peering *armnetwork.VirtualNetworkPeering, virtualNetworkName, peeringName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(peering, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(virtualNetworkName, peeringName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, n.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.NetworkVirtualNetworkPeering.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health status from ProvisioningState + if peering.Properties != nil && peering.Properties.ProvisioningState != nil { + switch *peering.Properties.ProvisioningState { + case armnetwork.ProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armnetwork.ProvisioningStateCreating, armnetwork.ProvisioningStateUpdating, armnetwork.ProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armnetwork.ProvisioningStateFailed, armnetwork.ProvisioningStateCanceled: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + } + } + + // Link to parent (local) Virtual Network + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: virtualNetworkName, + Scope: scope, + }, + }) + + // Link to remote Virtual Network and remote subnets (selective peering) + if peering.Properties != nil && peering.Properties.RemoteVirtualNetwork != nil && peering.Properties.RemoteVirtualNetwork.ID != nil { + remoteVNetID := *peering.Properties.RemoteVirtualNetwork.ID + remoteVNetName := azureshared.ExtractResourceName(remoteVNetID) + if remoteVNetName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(remoteVNetID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: remoteVNetName, + Scope: linkedScope, + }, + }) + // Link to remote subnets (selective subnet peering) + if peering.Properties.RemoteSubnetNames != nil { + for _, name := range peering.Properties.RemoteSubnetNames { + if name != nil && *name != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(remoteVNetName, *name), + Scope: linkedScope, + }, + }) + } + } + } + } + } + + // Link to local subnets (selective subnet peering) + if peering.Properties != nil && peering.Properties.LocalSubnetNames != nil { + for _, name := range peering.Properties.LocalSubnetNames { + if name != nil && *name != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(virtualNetworkName, *name), + Scope: scope, + }, + }) + } + } + } + + return sdpItem, nil +} + +func (n networkVirtualNetworkPeeringWrapper) PotentialLinks() map[shared.ItemType]bool { + return shared.NewItemTypesSet( + azureshared.NetworkVirtualNetwork, + azureshared.NetworkSubnet, + ) +} + +// ref: https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network_peering +func (n networkVirtualNetworkPeeringWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_virtual_network_peering.id", + }, + } +} + +// ref: https://learn.microsoft.com/en-us/azure/role-based-access-control/permissions-reference#microsoftnetwork +func (n networkVirtualNetworkPeeringWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Network/virtualNetworks/virtualNetworkPeerings/read", + } +} + +func (n networkVirtualNetworkPeeringWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/network-virtual-network-peering_test.go b/sources/azure/manual/network-virtual-network-peering_test.go new file mode 100644 index 00000000..9ece09bf --- /dev/null +++ b/sources/azure/manual/network-virtual-network-peering_test.go @@ -0,0 +1,294 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + sdp "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockVirtualNetworkPeeringsPager struct { + pages []armnetwork.VirtualNetworkPeeringsClientListResponse + index int +} + +func (m *mockVirtualNetworkPeeringsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockVirtualNetworkPeeringsPager) NextPage(ctx context.Context) (armnetwork.VirtualNetworkPeeringsClientListResponse, error) { + if m.index >= len(m.pages) { + return armnetwork.VirtualNetworkPeeringsClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorVirtualNetworkPeeringsPager struct{} + +func (e *errorVirtualNetworkPeeringsPager) More() bool { + return true +} + +func (e *errorVirtualNetworkPeeringsPager) NextPage(ctx context.Context) (armnetwork.VirtualNetworkPeeringsClientListResponse, error) { + return armnetwork.VirtualNetworkPeeringsClientListResponse{}, errors.New("pager error") +} + +type testVirtualNetworkPeeringsClient struct { + *mocks.MockVirtualNetworkPeeringsClient + pager clients.VirtualNetworkPeeringsPager +} + +func (t *testVirtualNetworkPeeringsClient) NewListPager(resourceGroupName, virtualNetworkName string, options *armnetwork.VirtualNetworkPeeringsClientListOptions) clients.VirtualNetworkPeeringsPager { + return t.pager +} + +func TestNetworkVirtualNetworkPeering(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + virtualNetworkName := "test-vnet" + peeringName := "test-peering" + + t.Run("Get", func(t *testing.T) { + peering := createAzureVirtualNetworkPeering(peeringName, virtualNetworkName) + + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, virtualNetworkName, peeringName, nil).Return( + armnetwork.VirtualNetworkPeeringsClientGetResponse{ + VirtualNetworkPeering: *peering, + }, nil) + + testClient := &testVirtualNetworkPeeringsClient{MockVirtualNetworkPeeringsClient: mockClient} + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(virtualNetworkName, peeringName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.NetworkVirtualNetworkPeering.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkVirtualNetworkPeering, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(virtualNetworkName, peeringName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(virtualNetworkName, peeringName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: virtualNetworkName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_EmptyPeeringName", func(t *testing.T) { + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + testClient := &testVirtualNetworkPeeringsClient{MockVirtualNetworkPeeringsClient: mockClient} + + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(virtualNetworkName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when peering name is empty, but got nil") + } + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + testClient := &testVirtualNetworkPeeringsClient{MockVirtualNetworkPeeringsClient: mockClient} + + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + peering1 := createAzureVirtualNetworkPeering("peering-1", virtualNetworkName) + peering2 := createAzureVirtualNetworkPeering("peering-2", virtualNetworkName) + + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + mockPager := &mockVirtualNetworkPeeringsPager{ + pages: []armnetwork.VirtualNetworkPeeringsClientListResponse{ + { + VirtualNetworkPeeringListResult: armnetwork.VirtualNetworkPeeringListResult{ + Value: []*armnetwork.VirtualNetworkPeering{peering1, peering2}, + }, + }, + }, + } + + testClient := &testVirtualNetworkPeeringsClient{ + MockVirtualNetworkPeeringsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.NetworkVirtualNetworkPeering.String() { + t.Errorf("Expected type %s, got %s", azureshared.NetworkVirtualNetworkPeering, item.GetType()) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + testClient := &testVirtualNetworkPeeringsClient{MockVirtualNetworkPeeringsClient: mockClient} + + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_PeeringWithNilName", func(t *testing.T) { + validPeering := createAzureVirtualNetworkPeering("valid-peering", virtualNetworkName) + + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + mockPager := &mockVirtualNetworkPeeringsPager{ + pages: []armnetwork.VirtualNetworkPeeringsClientListResponse{ + { + VirtualNetworkPeeringListResult: armnetwork.VirtualNetworkPeeringListResult{ + Value: []*armnetwork.VirtualNetworkPeering{ + {Name: nil, ID: new("/some/id")}, + validPeering, + }, + }, + }, + }, + } + + testClient := &testVirtualNetworkPeeringsClient{ + MockVirtualNetworkPeeringsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(virtualNetworkName, "valid-peering") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(virtualNetworkName, "valid-peering"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("peering not found") + + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, virtualNetworkName, "nonexistent-peering", nil).Return( + armnetwork.VirtualNetworkPeeringsClientGetResponse{}, expectedErr) + + testClient := &testVirtualNetworkPeeringsClient{MockVirtualNetworkPeeringsClient: mockClient} + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(virtualNetworkName, "nonexistent-peering") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent peering, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockVirtualNetworkPeeringsClient(ctrl) + testClient := &testVirtualNetworkPeeringsClient{ + MockVirtualNetworkPeeringsClient: mockClient, + pager: &errorVirtualNetworkPeeringsPager{}, + } + + wrapper := manual.NewNetworkVirtualNetworkPeering(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable := adapter.(discovery.SearchableAdapter) + _, err := searchable.Search(ctx, wrapper.Scopes()[0], virtualNetworkName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) +} + +func createAzureVirtualNetworkPeering(peeringName, vnetName string) *armnetwork.VirtualNetworkPeering { + idStr := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/virtualNetworkPeerings/" + peeringName + typeStr := "Microsoft.Network/virtualNetworks/virtualNetworkPeerings" + provisioningState := armnetwork.ProvisioningStateSucceeded + return &armnetwork.VirtualNetworkPeering{ + ID: &idStr, + Name: &peeringName, + Type: &typeStr, + Properties: &armnetwork.VirtualNetworkPeeringPropertiesFormat{ + ProvisioningState: &provisioningState, + }, + } +} diff --git a/sources/azure/manual/network-virtual-network.go b/sources/azure/manual/network-virtual-network.go index 1d24fe8b..354f93f9 100644 --- a/sources/azure/manual/network-virtual-network.go +++ b/sources/azure/manual/network-virtual-network.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" diff --git a/sources/azure/manual/network-virtual-network_test.go b/sources/azure/manual/network-virtual-network_test.go index 880b3c33..c25b2bc7 100644 --- a/sources/azure/manual/network-virtual-network_test.go +++ b/sources/azure/manual/network-virtual-network_test.go @@ -4,10 +4,10 @@ import ( "context" "errors" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" "go.uber.org/mock/gomock" "github.com/overmindtech/cli/go/discovery" @@ -208,13 +208,13 @@ func TestNetworkVirtualNetwork(t *testing.T) { vnet1 := createAzureVirtualNetwork("test-vnet-1") vnet2 := &armnetwork.VirtualNetwork{ Name: nil, // VNet with nil name should cause an error in azureVirtualNetworkToSDPItem - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{to.Ptr("10.0.0.0/16")}, + AddressPrefixes: []*string{new("10.0.0.0/16")}, }, }, } @@ -313,13 +313,7 @@ func TestNetworkVirtualNetwork(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/virtualNetworks/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -391,7 +385,7 @@ func (m *MockVirtualNetworksPager) More() bool { func (mr *MockVirtualNetworksPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockVirtualNetworksPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockVirtualNetworksPager) NextPage(ctx context.Context) (armnetwork.VirtualNetworksClientListResponse, error) { @@ -402,29 +396,29 @@ func (m *MockVirtualNetworksPager) NextPage(ctx context.Context) (armnetwork.Vir return ret0, ret1 } -func (mr *MockVirtualNetworksPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockVirtualNetworksPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockVirtualNetworksPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armnetwork.VirtualNetworksClientListResponse, error)](), ctx) } // createAzureVirtualNetwork creates a mock Azure virtual network for testing func createAzureVirtualNetwork(vnetName string) *armnetwork.VirtualNetwork { return &armnetwork.VirtualNetwork{ - Name: to.Ptr(vnetName), - Location: to.Ptr("eastus"), + Name: new(vnetName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{to.Ptr("10.0.0.0/16")}, + AddressPrefixes: []*string{new("10.0.0.0/16")}, }, Subnets: []*armnetwork.Subnet{ { - Name: to.Ptr("default"), + Name: new("default"), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: to.Ptr("10.0.0.0/24"), + AddressPrefix: new("10.0.0.0/24"), }, }, }, @@ -437,29 +431,29 @@ func createAzureVirtualNetwork(vnetName string) *armnetwork.VirtualNetwork { func createAzureVirtualNetworkWithDefaultNatGatewayAndDhcpOptions(vnetName, subscriptionID, resourceGroup string) *armnetwork.VirtualNetwork { natGatewayID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/natGateways/test-nat-gateway" return &armnetwork.VirtualNetwork{ - Name: to.Ptr(vnetName), - Location: to.Ptr("eastus"), + Name: new(vnetName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armnetwork.VirtualNetworkPropertiesFormat{ AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{to.Ptr("10.0.0.0/16")}, + AddressPrefixes: []*string{new("10.0.0.0/16")}, }, DefaultPublicNatGateway: &armnetwork.SubResource{ - ID: to.Ptr(natGatewayID), + ID: new(natGatewayID), }, DhcpOptions: &armnetwork.DhcpOptions{ DNSServers: []*string{ - to.Ptr("10.0.0.1"), // IP address → stdlib.NetworkIP - to.Ptr("dns.internal"), // hostname → stdlib.NetworkDNS + new("10.0.0.1"), // IP address → stdlib.NetworkIP + new("dns.internal"), // hostname → stdlib.NetworkDNS }, }, Subnets: []*armnetwork.Subnet{ { - Name: to.Ptr("default"), + Name: new("default"), Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: to.Ptr("10.0.0.0/24"), + AddressPrefix: new("10.0.0.0/24"), }, }, }, diff --git a/sources/azure/manual/network-zone_test.go b/sources/azure/manual/network-zone_test.go index c6a8d399..9c3a7a41 100644 --- a/sources/azure/manual/network-zone_test.go +++ b/sources/azure/manual/network-zone_test.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "reflect" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" "go.uber.org/mock/gomock" @@ -121,7 +121,7 @@ func TestNetworkZone(t *testing.T) { // Let's test with a zone that has nil name which will cause an error zoneWithNilName := &armdns.Zone{ Name: nil, - Location: to.Ptr("eastus"), + Location: new("eastus"), Properties: &armdns.ZoneProperties{}, } @@ -231,9 +231,9 @@ func TestNetworkZone(t *testing.T) { zone1 := createAzureZone("example.com", subscriptionID, resourceGroup) zone2 := &armdns.Zone{ Name: nil, // Zone with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armdns.ZoneProperties{}, } @@ -339,13 +339,7 @@ func TestNetworkZone(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Network/dnszones/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -405,15 +399,15 @@ func TestNetworkZone(t *testing.T) { // Test zone without virtual networks zoneName := "example.com" zone := &armdns.Zone{ - Name: to.Ptr(zoneName), - Location: to.Ptr("eastus"), + Name: new(zoneName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armdns.ZoneProperties{ NameServers: []*string{ - to.Ptr("ns1.example.com"), - to.Ptr("ns2.example.com"), + new("ns1.example.com"), + new("ns2.example.com"), }, }, } @@ -481,7 +475,7 @@ func (m *MockZonesPager) More() bool { func (mr *MockZonesPagerMockRecorder) More() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeOf((*MockZonesPager)(nil).More)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "More", reflect.TypeFor[func() bool]()) } func (m *MockZonesPager) NextPage(ctx context.Context) (armdns.ZonesClientListByResourceGroupResponse, error) { @@ -492,9 +486,9 @@ func (m *MockZonesPager) NextPage(ctx context.Context) (armdns.ZonesClientListBy return ret0, ret1 } -func (mr *MockZonesPagerMockRecorder) NextPage(ctx interface{}) *gomock.Call { +func (mr *MockZonesPagerMockRecorder) NextPage(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeOf((*MockZonesPager)(nil).NextPage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextPage", reflect.TypeFor[func(ctx context.Context) (armdns.ZonesClientListByResourceGroupResponse, error)](), ctx) } // createAzureZone creates a mock Azure DNS zone for testing with all linked resources @@ -503,27 +497,27 @@ func createAzureZone(zoneName, subscriptionID, resourceGroup string) *armdns.Zon resolutionVNetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/test-res-vnet", subscriptionID, resourceGroup) return &armdns.Zone{ - Name: to.Ptr(zoneName), - Location: to.Ptr("eastus"), + Name: new(zoneName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armdns.ZoneProperties{ - MaxNumberOfRecordSets: to.Ptr(int64(5000)), - NumberOfRecordSets: to.Ptr(int64(10)), + MaxNumberOfRecordSets: new(int64(5000)), + NumberOfRecordSets: new(int64(10)), NameServers: []*string{ - to.Ptr("ns1.example.com"), - to.Ptr("ns2.example.com"), + new("ns1.example.com"), + new("ns2.example.com"), }, RegistrationVirtualNetworks: []*armdns.SubResource{ { - ID: to.Ptr(registrationVNetID), + ID: new(registrationVNetID), }, }, ResolutionVirtualNetworks: []*armdns.SubResource{ { - ID: to.Ptr(resolutionVNetID), + ID: new(resolutionVNetID), }, }, }, @@ -535,20 +529,20 @@ func createAzureZoneWithDifferentScopeVNet(zoneName, subscriptionID, resourceGro registrationVNetID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/test-reg-vnet", otherSubscriptionID, otherResourceGroup) return &armdns.Zone{ - Name: to.Ptr(zoneName), - Location: to.Ptr("eastus"), + Name: new(zoneName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armdns.ZoneProperties{ - MaxNumberOfRecordSets: to.Ptr(int64(5000)), - NumberOfRecordSets: to.Ptr(int64(10)), + MaxNumberOfRecordSets: new(int64(5000)), + NumberOfRecordSets: new(int64(10)), NameServers: []*string{ - to.Ptr("ns1.example.com"), + new("ns1.example.com"), }, RegistrationVirtualNetworks: []*armdns.SubResource{ { - ID: to.Ptr(registrationVNetID), + ID: new(registrationVNetID), }, }, }, diff --git a/sources/azure/manual/sql-database.go b/sources/azure/manual/sql-database.go index 1de540a9..b637f0a9 100644 --- a/sources/azure/manual/sql-database.go +++ b/sources/azure/manual/sql-database.go @@ -93,13 +93,13 @@ func (s sqlDatabaseWrapper) azureSqlDatabaseToSDPItem(database *armsql.Database, } if database.Properties != nil && database.Properties.ElasticPoolID != nil { - elasticPoolName := azureshared.ExtractSQLElasticPoolNameFromID(*database.Properties.ElasticPoolID) - if elasticPoolName != "" { + elasticPoolServerName, elasticPoolName := azureshared.ExtractSQLElasticPoolInfoFromResourceID(*database.Properties.ElasticPoolID) + if elasticPoolServerName != "" && elasticPoolName != "" { sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ Query: &sdp.Query{ Type: azureshared.SQLElasticPool.String(), Method: sdp.QueryMethod_GET, - Query: elasticPoolName, + Query: shared.CompositeLookupKey(elasticPoolServerName, elasticPoolName), Scope: scope, }, }) @@ -187,15 +187,18 @@ func (s sqlDatabaseWrapper) azureSqlDatabaseToSDPItem(database *armsql.Database, }) case azureshared.SourceResourceTypeSQLElasticPool: + elasticPoolServerName := params["serverName"] elasticPoolName := params["elasticPoolName"] - sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ - Query: &sdp.Query{ - Type: azureshared.SQLElasticPool.String(), - Method: sdp.QueryMethod_GET, - Query: elasticPoolName, - Scope: scope, - }, - }) + if elasticPoolServerName != "" && elasticPoolName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLElasticPool.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(elasticPoolServerName, elasticPoolName), + Scope: scope, + }, + }) + } case azureshared.SourceResourceTypeUnknown: // Synapse SQL Pool and other resource types not yet supported diff --git a/sources/azure/manual/sql-database_test.go b/sources/azure/manual/sql-database_test.go index e043a58f..5e976be7 100644 --- a/sources/azure/manual/sql-database_test.go +++ b/sources/azure/manual/sql-database_test.go @@ -3,9 +3,9 @@ package manual_test import ( "context" "errors" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" "go.uber.org/mock/gomock" @@ -161,10 +161,10 @@ func TestSqlDatabase(t *testing.T) { ExpectedQuery: serverName, ExpectedScope: subscriptionID + "." + resourceGroup, }, { - // SQLElasticPool link + // SQLElasticPool link (composite: serverName + elasticPoolName) ExpectedType: azureshared.SQLElasticPool.String(), ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-pool", + ExpectedQuery: shared.CompositeLookupKey("test-server", "test-pool"), ExpectedScope: subscriptionID + "." + resourceGroup, }, { // SQLDatabaseSchema child resource link @@ -244,13 +244,13 @@ func TestSqlDatabase(t *testing.T) { database1 := createAzureSqlDatabase(serverName, "database-1", "") database2 := &armsql.Database{ Name: nil, // Database with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/test-server/databases/database-2"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/test-server/databases/database-2"), Properties: &armsql.DatabaseProperties{ - Status: to.Ptr(armsql.DatabaseStatusOnline), + Status: new(armsql.DatabaseStatusOnline), }, } @@ -363,13 +363,7 @@ func TestSqlDatabase(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Sql/servers/databases/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -410,20 +404,20 @@ func createAzureSqlDatabase(serverName, databaseName, elasticPoolID string) *arm databaseID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/" + serverName + "/databases/" + databaseName db := &armsql.Database{ - Name: to.Ptr(databaseName), - Location: to.Ptr("eastus"), + Name: new(databaseName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, - ID: to.Ptr(databaseID), + ID: new(databaseID), Properties: &armsql.DatabaseProperties{ - Status: to.Ptr(armsql.DatabaseStatusOnline), + Status: new(armsql.DatabaseStatusOnline), }, } if elasticPoolID != "" { - db.Properties.ElasticPoolID = to.Ptr(elasticPoolID) + db.Properties.ElasticPoolID = new(elasticPoolID) } return db diff --git a/sources/azure/manual/sql-elastic-pool.go b/sources/azure/manual/sql-elastic-pool.go new file mode 100644 index 00000000..85423f20 --- /dev/null +++ b/sources/azure/manual/sql-elastic-pool.go @@ -0,0 +1,260 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var SQLElasticPoolLookupByName = shared.NewItemTypeLookup("name", azureshared.SQLElasticPool) + +type sqlElasticPoolWrapper struct { + client clients.SqlElasticPoolClient + + *azureshared.MultiResourceGroupBase +} + +func NewSqlElasticPool(client clients.SqlElasticPoolClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &sqlElasticPoolWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.SQLElasticPool, + ), + } +} + +func (s sqlElasticPoolWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and elasticPoolName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + elasticPoolName := queryParts[1] + if elasticPoolName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "elasticPoolName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, elasticPoolName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + return s.azureSqlElasticPoolToSDPItem(&resp.ElasticPool, serverName, elasticPoolName, scope) +} + +func (s sqlElasticPoolWrapper) azureSqlElasticPoolToSDPItem(pool *armsql.ElasticPool, serverName, elasticPoolName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(pool, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, elasticPoolName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.SQLElasticPool.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: azureshared.ConvertAzureTags(pool.Tags), + } + + // Link to parent SQL Server (from resource ID or known server name) + if pool.ID != nil { + extractedServerName := azureshared.ExtractPathParamsFromResourceID(*pool.ID, []string{"servers"}) + if len(extractedServerName) >= 1 && extractedServerName[0] != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: extractedServerName[0], + Scope: scope, + }, + }) + } + } + if len(sdpItem.GetLinkedItemQueries()) == 0 { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + } + + // Link to Maintenance Configuration when set + if pool.Properties != nil && pool.Properties.MaintenanceConfigurationID != nil && *pool.Properties.MaintenanceConfigurationID != "" { + configName := azureshared.ExtractResourceName(*pool.Properties.MaintenanceConfigurationID) + if configName != "" { + linkedScope := azureshared.ExtractScopeFromResourceID(*pool.Properties.MaintenanceConfigurationID) + if linkedScope == "" && strings.Contains(*pool.Properties.MaintenanceConfigurationID, "publicMaintenanceConfigurations") { + linkedScope = azureshared.ExtractSubscriptionIDFromResourceID(*pool.Properties.MaintenanceConfigurationID) + } + if linkedScope == "" { + linkedScope = scope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.MaintenanceMaintenanceConfiguration.String(), + Method: sdp.QueryMethod_GET, + Query: configName, + Scope: linkedScope, + }, + }) + } + } + + // Link to SQL Databases (child resource; list by server returns all databases; those in this pool reference this pool via ElasticPoolID) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLDatabase.String(), + Method: sdp.QueryMethod_SEARCH, + Query: serverName, + Scope: scope, + }, + }) + + return sdpItem, nil +} + +func (s sqlElasticPoolWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + SQLServerLookupByName, + SQLElasticPoolLookupByName, + } +} + +func (s sqlElasticPoolWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + for _, pool := range page.Value { + if pool.Name == nil { + continue + } + item, sdpErr := s.azureSqlElasticPoolToSDPItem(pool, serverName, *pool.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s sqlElasticPoolWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, pool := range page.Value { + if pool.Name == nil { + continue + } + item, sdpErr := s.azureSqlElasticPoolToSDPItem(pool, serverName, *pool.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s sqlElasticPoolWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + SQLServerLookupByName, + }, + } +} + +func (s sqlElasticPoolWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.SQLServer: true, + azureshared.SQLDatabase: true, + azureshared.MaintenanceMaintenanceConfiguration: true, + } +} + +func (s sqlElasticPoolWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_mssql_elasticpool.id", + }, + } +} + +func (s sqlElasticPoolWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Sql/servers/elasticPools/read", + } +} + +func (s sqlElasticPoolWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/sql-elastic-pool_test.go b/sources/azure/manual/sql-elastic-pool_test.go new file mode 100644 index 00000000..189eabca --- /dev/null +++ b/sources/azure/manual/sql-elastic-pool_test.go @@ -0,0 +1,320 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockSqlElasticPoolPager struct { + pages []armsql.ElasticPoolsClientListByServerResponse + index int +} + +func (m *mockSqlElasticPoolPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSqlElasticPoolPager) NextPage(ctx context.Context) (armsql.ElasticPoolsClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armsql.ElasticPoolsClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorSqlElasticPoolPager struct{} + +func (e *errorSqlElasticPoolPager) More() bool { + return true +} + +func (e *errorSqlElasticPoolPager) NextPage(ctx context.Context) (armsql.ElasticPoolsClientListByServerResponse, error) { + return armsql.ElasticPoolsClientListByServerResponse{}, errors.New("pager error") +} + +type testSqlElasticPoolClient struct { + *mocks.MockSqlElasticPoolClient + pager clients.SqlElasticPoolPager +} + +func (t *testSqlElasticPoolClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SqlElasticPoolPager { + return t.pager +} + +func TestSqlElasticPool(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + elasticPoolName := "test-pool" + + t.Run("Get", func(t *testing.T) { + pool := createAzureSqlElasticPool(serverName, elasticPoolName) + + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, elasticPoolName).Return( + armsql.ElasticPoolsClientGetResponse{ + ElasticPool: *pool, + }, nil) + + wrapper := manual.NewSqlElasticPool(&testSqlElasticPoolClient{MockSqlElasticPoolClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, elasticPoolName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.SQLElasticPool.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLElasticPool.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(serverName, elasticPoolName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.SQLServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: azureshared.SQLDatabase.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + wrapper := manual.NewSqlElasticPool(&testSqlElasticPoolClient{MockSqlElasticPoolClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing only serverName (1 query part), but got nil") + } + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + wrapper := manual.NewSqlElasticPool(&testSqlElasticPoolClient{MockSqlElasticPoolClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when elastic pool name is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + pool1 := createAzureSqlElasticPool(serverName, "pool-1") + pool2 := createAzureSqlElasticPool(serverName, "pool-2") + + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + pager := &mockSqlElasticPoolPager{ + pages: []armsql.ElasticPoolsClientListByServerResponse{ + { + ElasticPoolListResult: armsql.ElasticPoolListResult{ + Value: []*armsql.ElasticPool{pool1, pool2}, + }, + }, + }, + } + + testClient := &testSqlElasticPoolClient{ + MockSqlElasticPoolClient: mockClient, + pager: pager, + } + + wrapper := manual.NewSqlElasticPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + items, qErr := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if qErr != nil { + t.Fatalf("Expected no error from Search, got: %v", qErr) + } + if len(items) != 2 { + t.Errorf("Expected 2 items from Search, got %d", len(items)) + } + }) + + t.Run("SearchStream", func(t *testing.T) { + pool := createAzureSqlElasticPool(serverName, elasticPoolName) + + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + pager := &mockSqlElasticPoolPager{ + pages: []armsql.ElasticPoolsClientListByServerResponse{ + { + ElasticPoolListResult: armsql.ElasticPoolListResult{ + Value: []*armsql.ElasticPool{pool}, + }, + }, + }, + } + + testClient := &testSqlElasticPoolClient{ + MockSqlElasticPoolClient: mockClient, + pager: pager, + } + wrapper := manual.NewSqlElasticPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchStreamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support SearchStream operation") + } + + stream := discovery.NewRecordingQueryResultStream() + searchStreamable.SearchStream(ctx, wrapper.Scopes()[0], serverName, true, stream) + items := stream.GetItems() + errs := stream.GetErrors() + if len(errs) > 0 { + t.Fatalf("Expected no errors from SearchStream, got: %v", errs) + } + if len(items) != 1 { + t.Errorf("Expected 1 item from SearchStream, got %d", len(items)) + } + }) + + t.Run("SearchWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + wrapper := manual.NewSqlElasticPool(&testSqlElasticPoolClient{MockSqlElasticPoolClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("elastic pool not found") + + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, "nonexistent-pool").Return( + armsql.ElasticPoolsClientGetResponse{}, expectedErr) + + wrapper := manual.NewSqlElasticPool(&testSqlElasticPoolClient{MockSqlElasticPoolClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent-pool") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent elastic pool, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + errorPager := &errorSqlElasticPoolPager{} + testClient := &testSqlElasticPoolClient{ + MockSqlElasticPoolClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewSqlElasticPool(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName) + if qErr == nil { + t.Error("Expected error from Search when pager returns error, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockSqlElasticPoolClient(ctrl) + wrapper := manual.NewSqlElasticPool(&testSqlElasticPoolClient{MockSqlElasticPoolClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Sql/servers/elasticPools/read" + if !slices.Contains(permissions, expectedPermission) { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + potentialLinks := w.PotentialLinks() + if !potentialLinks[azureshared.SQLServer] { + t.Error("Expected PotentialLinks to include SQLServer") + } + if !potentialLinks[azureshared.SQLDatabase] { + t.Error("Expected PotentialLinks to include SQLDatabase") + } + if !potentialLinks[azureshared.MaintenanceMaintenanceConfiguration] { + t.Error("Expected PotentialLinks to include MaintenanceMaintenanceConfiguration") + } + + mappings := w.TerraformMappings() + if len(mappings) == 0 { + t.Error("Expected TerraformMappings to return at least one mapping") + } + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_mssql_elasticpool.id" { + foundMapping = true + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_mssql_elasticpool.id' mapping") + } + }) +} + +func createAzureSqlElasticPool(serverName, elasticPoolName string) *armsql.ElasticPool { + poolID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/" + serverName + "/elasticPools/" + elasticPoolName + state := armsql.ElasticPoolStateReady + return &armsql.ElasticPool{ + Name: &elasticPoolName, + ID: &poolID, + Properties: &armsql.ElasticPoolProperties{ + State: &state, + }, + } +} diff --git a/sources/azure/manual/sql-server-firewall-rule.go b/sources/azure/manual/sql-server-firewall-rule.go new file mode 100644 index 00000000..2640c54b --- /dev/null +++ b/sources/azure/manual/sql-server-firewall-rule.go @@ -0,0 +1,250 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var SQLServerFirewallRuleLookupByName = shared.NewItemTypeLookup("name", azureshared.SQLServerFirewallRule) + +type sqlServerFirewallRuleWrapper struct { + client clients.SqlServerFirewallRuleClient + + *azureshared.MultiResourceGroupBase +} + +func NewSqlServerFirewallRule(client clients.SqlServerFirewallRuleClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &sqlServerFirewallRuleWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.SQLServerFirewallRule, + ), + } +} + +func (s sqlServerFirewallRuleWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and firewallRuleName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + firewallRuleName := queryParts[1] + if firewallRuleName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "firewallRuleName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, firewallRuleName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + return s.azureSqlServerFirewallRuleToSDPItem(&resp.FirewallRule, serverName, firewallRuleName, scope) +} + +func (s sqlServerFirewallRuleWrapper) azureSqlServerFirewallRuleToSDPItem(rule *armsql.FirewallRule, serverName, firewallRuleName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(rule, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, firewallRuleName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.SQLServerFirewallRule.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: nil, // FirewallRule has no Tags in the Azure SDK + } + + // Link to parent SQL Server (from resource ID or known server name) + if rule.ID != nil { + extractedServerName := azureshared.ExtractSQLServerNameFromDatabaseID(*rule.ID) + if extractedServerName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: extractedServerName, + Scope: scope, + }, + }) + } + } else { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + } + + // Link to stdlib IP items for StartIPAddress and EndIPAddress (global scope, GET) + if rule.Properties != nil { + if rule.Properties.StartIPAddress != nil && *rule.Properties.StartIPAddress != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *rule.Properties.StartIPAddress, + Scope: "global", + }, + }) + } + if rule.Properties.EndIPAddress != nil && *rule.Properties.EndIPAddress != "" && (rule.Properties.StartIPAddress == nil || *rule.Properties.EndIPAddress != *rule.Properties.StartIPAddress) { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkIP.String(), + Method: sdp.QueryMethod_GET, + Query: *rule.Properties.EndIPAddress, + Scope: "global", + }, + }) + } + } + + return sdpItem, nil +} + +func (s sqlServerFirewallRuleWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + SQLServerLookupByName, + SQLServerFirewallRuleLookupByName, + } +} + +func (s sqlServerFirewallRuleWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + for _, rule := range page.Value { + if rule.Name == nil { + continue + } + item, sdpErr := s.azureSqlServerFirewallRuleToSDPItem(rule, serverName, *rule.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s sqlServerFirewallRuleWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, rule := range page.Value { + if rule.Name == nil { + continue + } + item, sdpErr := s.azureSqlServerFirewallRuleToSDPItem(rule, serverName, *rule.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s sqlServerFirewallRuleWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + SQLServerLookupByName, + }, + } +} + +func (s sqlServerFirewallRuleWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.SQLServer: true, + stdlib.NetworkIP: true, + } +} + +func (s sqlServerFirewallRuleWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_mssql_firewall_rule.id", + }, + } +} + +func (s sqlServerFirewallRuleWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Sql/servers/firewallRules/read", + } +} + +func (s sqlServerFirewallRuleWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/sql-server-firewall-rule_test.go b/sources/azure/manual/sql-server-firewall-rule_test.go new file mode 100644 index 00000000..852d889b --- /dev/null +++ b/sources/azure/manual/sql-server-firewall-rule_test.go @@ -0,0 +1,324 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +type mockSqlServerFirewallRulePager struct { + pages []armsql.FirewallRulesClientListByServerResponse + index int +} + +func (m *mockSqlServerFirewallRulePager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSqlServerFirewallRulePager) NextPage(ctx context.Context) (armsql.FirewallRulesClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armsql.FirewallRulesClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorSqlServerFirewallRulePager struct{} + +func (e *errorSqlServerFirewallRulePager) More() bool { + return true +} + +func (e *errorSqlServerFirewallRulePager) NextPage(ctx context.Context) (armsql.FirewallRulesClientListByServerResponse, error) { + return armsql.FirewallRulesClientListByServerResponse{}, errors.New("pager error") +} + +type testSqlServerFirewallRuleClient struct { + *mocks.MockSqlServerFirewallRuleClient + pager clients.SqlServerFirewallRulePager +} + +func (t *testSqlServerFirewallRuleClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SqlServerFirewallRulePager { + return t.pager +} + +func TestSqlServerFirewallRule(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + firewallRuleName := "test-rule" + + t.Run("Get", func(t *testing.T) { + rule := createAzureSqlServerFirewallRule(serverName, firewallRuleName) + + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, firewallRuleName).Return( + armsql.FirewallRulesClientGetResponse{ + FirewallRule: *rule, + }, nil) + + wrapper := manual.NewSqlServerFirewallRule(&testSqlServerFirewallRuleClient{MockSqlServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, firewallRuleName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.SQLServerFirewallRule.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLServerFirewallRule, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(serverName, firewallRuleName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.SQLServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "0.0.0.0", + ExpectedScope: "global", + }, + { + ExpectedType: stdlib.NetworkIP.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "255.255.255.255", + ExpectedScope: "global", + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + wrapper := manual.NewSqlServerFirewallRule(&testSqlServerFirewallRuleClient{MockSqlServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing only serverName (1 query part), but got nil") + } + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + wrapper := manual.NewSqlServerFirewallRule(&testSqlServerFirewallRuleClient{MockSqlServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when firewall rule name is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + rule1 := createAzureSqlServerFirewallRule(serverName, "rule1") + rule2 := createAzureSqlServerFirewallRule(serverName, "rule2") + + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + pager := &mockSqlServerFirewallRulePager{ + pages: []armsql.FirewallRulesClientListByServerResponse{ + { + FirewallRuleListResult: armsql.FirewallRuleListResult{ + Value: []*armsql.FirewallRule{rule1, rule2}, + }, + }, + }, + } + + testClient := &testSqlServerFirewallRuleClient{ + MockSqlServerFirewallRuleClient: mockClient, + pager: pager, + } + wrapper := manual.NewSqlServerFirewallRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + items, qErr := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if qErr != nil { + t.Fatalf("Expected no error from Search, got: %v", qErr) + } + if len(items) != 2 { + t.Errorf("Expected 2 items from Search, got %d", len(items)) + } + }) + + t.Run("SearchStream", func(t *testing.T) { + rule1 := createAzureSqlServerFirewallRule(serverName, "rule1") + + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + pager := &mockSqlServerFirewallRulePager{ + pages: []armsql.FirewallRulesClientListByServerResponse{ + { + FirewallRuleListResult: armsql.FirewallRuleListResult{ + Value: []*armsql.FirewallRule{rule1}, + }, + }, + }, + } + + testClient := &testSqlServerFirewallRuleClient{ + MockSqlServerFirewallRuleClient: mockClient, + pager: pager, + } + wrapper := manual.NewSqlServerFirewallRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchStreamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support SearchStream operation") + } + + stream := discovery.NewRecordingQueryResultStream() + searchStreamable.SearchStream(ctx, wrapper.Scopes()[0], serverName, true, stream) + items := stream.GetItems() + errs := stream.GetErrors() + if len(errs) > 0 { + t.Fatalf("Expected no errors from SearchStream, got: %v", errs) + } + if len(items) != 1 { + t.Errorf("Expected 1 item from SearchStream, got %d", len(items)) + } + }) + + t.Run("SearchWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + wrapper := manual.NewSqlServerFirewallRule(&testSqlServerFirewallRuleClient{MockSqlServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("firewall rule not found") + + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, "nonexistent-rule").Return( + armsql.FirewallRulesClientGetResponse{}, expectedErr) + + wrapper := manual.NewSqlServerFirewallRule(&testSqlServerFirewallRuleClient{MockSqlServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent-rule") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent firewall rule, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + errorPager := &errorSqlServerFirewallRulePager{} + testClient := &testSqlServerFirewallRuleClient{ + MockSqlServerFirewallRuleClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewSqlServerFirewallRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName) + if qErr == nil { + t.Error("Expected error from Search when pager returns error, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerFirewallRuleClient(ctrl) + wrapper := manual.NewSqlServerFirewallRule(&testSqlServerFirewallRuleClient{MockSqlServerFirewallRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Sql/servers/firewallRules/read" + found := slices.Contains(permissions, expectedPermission) + if !found { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + potentialLinks := w.PotentialLinks() + if !potentialLinks[azureshared.SQLServer] { + t.Error("Expected PotentialLinks to include SQLServer") + } + if !potentialLinks[stdlib.NetworkIP] { + t.Error("Expected PotentialLinks to include stdlib.NetworkIP") + } + + mappings := w.TerraformMappings() + if len(mappings) == 0 { + t.Error("Expected TerraformMappings to return at least one mapping") + } + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_mssql_firewall_rule.id" { + foundMapping = true + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_mssql_firewall_rule.id' mapping") + } + }) +} + +func createAzureSqlServerFirewallRule(serverName, firewallRuleName string) *armsql.FirewallRule { + ruleID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/" + serverName + "/firewallRules/" + firewallRuleName + return &armsql.FirewallRule{ + Name: new(firewallRuleName), + ID: new(ruleID), + Properties: &armsql.ServerFirewallRuleProperties{ + StartIPAddress: new("0.0.0.0"), + EndIPAddress: new("255.255.255.255"), + }, + } +} diff --git a/sources/azure/manual/sql-server-private-endpoint-connection.go b/sources/azure/manual/sql-server-private-endpoint-connection.go new file mode 100644 index 00000000..9665cb3c --- /dev/null +++ b/sources/azure/manual/sql-server-private-endpoint-connection.go @@ -0,0 +1,236 @@ +package manual + +import ( + "context" + "errors" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var SQLServerPrivateEndpointConnectionLookupByName = shared.NewItemTypeLookup("name", azureshared.SQLServerPrivateEndpointConnection) + +type sqlServerPrivateEndpointConnectionWrapper struct { + client clients.SQLServerPrivateEndpointConnectionsClient + + *azureshared.MultiResourceGroupBase +} + +// NewSQLServerPrivateEndpointConnection returns a SearchableWrapper for Azure SQL server private endpoint connections. +func NewSQLServerPrivateEndpointConnection(client clients.SQLServerPrivateEndpointConnectionsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &sqlServerPrivateEndpointConnectionWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.SQLServerPrivateEndpointConnection, + ), + } +} + +func (s sqlServerPrivateEndpointConnectionWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and privateEndpointConnectionName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + connectionName := queryParts[1] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, connectionName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(&resp.PrivateEndpointConnection, serverName, connectionName, scope) + if sdpErr != nil { + return nil, sdpErr + } + return item, nil +} + +func (s sqlServerPrivateEndpointConnectionWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + SQLServerLookupByName, + SQLServerPrivateEndpointConnectionLookupByName, + } +} + +func (s sqlServerPrivateEndpointConnectionWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, serverName, *conn.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s sqlServerPrivateEndpointConnectionWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, conn := range page.Value { + if conn == nil || conn.Name == nil { + continue + } + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, serverName, *conn.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s sqlServerPrivateEndpointConnectionWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + SQLServerLookupByName, + }, + } +} + +func (s sqlServerPrivateEndpointConnectionWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.SQLServer: true, + azureshared.NetworkPrivateEndpoint: true, + } +} + +func (s sqlServerPrivateEndpointConnectionWrapper) azurePrivateEndpointConnectionToSDPItem(conn *armsql.PrivateEndpointConnection, serverName, connectionName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(conn) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, connectionName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.SQLServerPrivateEndpointConnection.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health from provisioning state (armsql uses PrivateEndpointProvisioningState enum) + if conn.Properties != nil && conn.Properties.ProvisioningState != nil { + state := strings.ToLower(string(*conn.Properties.ProvisioningState)) + switch state { + case "ready": + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case "approving", "dropping": + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case "failed", "rejecting": + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent SQL Server + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + + // Link to Network Private Endpoint when present (may be in different resource group) + if conn.Properties != nil && conn.Properties.PrivateEndpoint != nil && conn.Properties.PrivateEndpoint.ID != nil { + peID := *conn.Properties.PrivateEndpoint.ID + peName := azureshared.ExtractResourceName(peID) + if peName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(peID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: peName, + Scope: linkedScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (s sqlServerPrivateEndpointConnectionWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Sql/servers/privateEndpointConnections/read", + } +} + +func (s sqlServerPrivateEndpointConnectionWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/sql-server-private-endpoint-connection_test.go b/sources/azure/manual/sql-server-private-endpoint-connection_test.go new file mode 100644 index 00000000..8108db7c --- /dev/null +++ b/sources/azure/manual/sql-server-private-endpoint-connection_test.go @@ -0,0 +1,322 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockSQLServerPrivateEndpointConnectionsPager struct { + pages []armsql.PrivateEndpointConnectionsClientListByServerResponse + index int +} + +func (m *mockSQLServerPrivateEndpointConnectionsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSQLServerPrivateEndpointConnectionsPager) NextPage(ctx context.Context) (armsql.PrivateEndpointConnectionsClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armsql.PrivateEndpointConnectionsClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type testSQLServerPrivateEndpointConnectionsClient struct { + *mocks.MockSQLServerPrivateEndpointConnectionsClient + pager clients.SQLServerPrivateEndpointConnectionsPager +} + +func (t *testSQLServerPrivateEndpointConnectionsClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SQLServerPrivateEndpointConnectionsPager { + return t.pager +} + +func TestSQLServerPrivateEndpointConnection(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-sql-server" + connectionName := "test-pec" + + t.Run("Get", func(t *testing.T) { + conn := createAzureSQLServerPrivateEndpointConnection(connectionName, "") + + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, connectionName).Return( + armsql.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testSQLServerPrivateEndpointConnectionsClient{MockSQLServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.SQLServerPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLServerPrivateEndpointConnection, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(serverName, connectionName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(serverName, connectionName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) < 1 { + t.Fatalf("Expected at least 1 linked query, got: %d", len(linkedQueries)) + } + + foundSQLServer := false + for _, lq := range linkedQueries { + if lq.GetQuery().GetType() == azureshared.SQLServer.String() { + foundSQLServer = true + if lq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected SQLServer link method GET, got %v", lq.GetQuery().GetMethod()) + } + if lq.GetQuery().GetQuery() != serverName { + t.Errorf("Expected SQLServer query %s, got %s", serverName, lq.GetQuery().GetQuery()) + } + } + } + if !foundSQLServer { + t.Error("Expected linked query to SQLServer") + } + }) + }) + + t.Run("Get_WithPrivateEndpointLink", func(t *testing.T) { + peID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-pe" + conn := createAzureSQLServerPrivateEndpointConnection(connectionName, peID) + + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, connectionName).Return( + armsql.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testSQLServerPrivateEndpointConnectionsClient{MockSQLServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + foundPrivateEndpoint := false + for _, lq := range sdpItem.GetLinkedItemQueries() { + if lq.GetQuery().GetType() == azureshared.NetworkPrivateEndpoint.String() { + foundPrivateEndpoint = true + if lq.GetQuery().GetQuery() != "test-pe" { + t.Errorf("Expected NetworkPrivateEndpoint query 'test-pe', got %s", lq.GetQuery().GetQuery()) + } + break + } + } + if !foundPrivateEndpoint { + t.Error("Expected linked query to NetworkPrivateEndpoint when PrivateEndpoint ID is set") + } + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + testClient := &testSQLServerPrivateEndpointConnectionsClient{MockSQLServerPrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + conn1 := createAzureSQLServerPrivateEndpointConnection("pec-1", "") + conn2 := createAzureSQLServerPrivateEndpointConnection("pec-2", "") + + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockSQLServerPrivateEndpointConnectionsPager{ + pages: []armsql.PrivateEndpointConnectionsClientListByServerResponse{ + { + PrivateEndpointConnectionListResult: armsql.PrivateEndpointConnectionListResult{ + Value: []*armsql.PrivateEndpointConnection{conn1, conn2}, + }, + }, + }, + } + + testClient := &testSQLServerPrivateEndpointConnectionsClient{ + MockSQLServerPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.SQLServerPrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLServerPrivateEndpointConnection, item.GetType()) + } + } + }) + + t.Run("Search_NilNameSkipped", func(t *testing.T) { + validConn := createAzureSQLServerPrivateEndpointConnection("valid-pec", "") + + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + mockPager := &mockSQLServerPrivateEndpointConnectionsPager{ + pages: []armsql.PrivateEndpointConnectionsClientListByServerResponse{ + { + PrivateEndpointConnectionListResult: armsql.PrivateEndpointConnectionListResult{ + Value: []*armsql.PrivateEndpointConnection{ + {Name: nil}, + validConn, + }, + }, + }, + }, + } + + testClient := &testSQLServerPrivateEndpointConnectionsClient{ + MockSQLServerPrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(serverName, "valid-pec") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(serverName, "valid-pec"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + testClient := &testSQLServerPrivateEndpointConnectionsClient{MockSQLServerPrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("private endpoint connection not found") + + mockClient := mocks.NewMockSQLServerPrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, "nonexistent-pec").Return( + armsql.PrivateEndpointConnectionsClientGetResponse{}, expectedErr) + + testClient := &testSQLServerPrivateEndpointConnectionsClient{MockSQLServerPrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewSQLServerPrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent-pec") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent private endpoint connection, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewSQLServerPrivateEndpointConnection(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + links := wrapper.PotentialLinks() + if !links[azureshared.SQLServer] { + t.Error("Expected SQLServer in PotentialLinks") + } + if !links[azureshared.NetworkPrivateEndpoint] { + t.Error("Expected NetworkPrivateEndpoint in PotentialLinks") + } + }) +} + +func createAzureSQLServerPrivateEndpointConnection(connectionName, privateEndpointID string) *armsql.PrivateEndpointConnection { + ready := armsql.PrivateEndpointProvisioningStateReady + approved := armsql.PrivateLinkServiceConnectionStateStatusApproved + conn := &armsql.PrivateEndpointConnection{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/test-sql-server/privateEndpointConnections/" + connectionName), + Name: new(connectionName), + Type: new("Microsoft.Sql/servers/privateEndpointConnections"), + Properties: &armsql.PrivateEndpointConnectionProperties{ + ProvisioningState: &ready, + PrivateLinkServiceConnectionState: &armsql.PrivateLinkServiceConnectionStateProperty{ + Status: &approved, + }, + }, + } + if privateEndpointID != "" { + conn.Properties.PrivateEndpoint = &armsql.PrivateEndpointProperty{ + ID: new(privateEndpointID), + } + } + return conn +} diff --git a/sources/azure/manual/sql-server-virtual-network-rule.go b/sources/azure/manual/sql-server-virtual-network-rule.go new file mode 100644 index 00000000..0a56dbee --- /dev/null +++ b/sources/azure/manual/sql-server-virtual-network-rule.go @@ -0,0 +1,260 @@ +package manual + +import ( + "context" + "errors" + "fmt" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var SQLServerVirtualNetworkRuleLookupByName = shared.NewItemTypeLookup("name", azureshared.SQLServerVirtualNetworkRule) + +type sqlServerVirtualNetworkRuleWrapper struct { + client clients.SqlServerVirtualNetworkRuleClient + + *azureshared.MultiResourceGroupBase +} + +func NewSqlServerVirtualNetworkRule(client clients.SqlServerVirtualNetworkRuleClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &sqlServerVirtualNetworkRuleWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_DATABASE, + azureshared.SQLServerVirtualNetworkRule, + ), + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: serverName and virtualNetworkRuleName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + ruleName := queryParts[1] + if ruleName == "" { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "virtualNetworkRuleName cannot be empty", + Scope: scope, + ItemType: s.Type(), + } + } + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, serverName, ruleName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + return s.azureSqlServerVirtualNetworkRuleToSDPItem(&resp.VirtualNetworkRule, serverName, ruleName, scope) +} + +func (s sqlServerVirtualNetworkRuleWrapper) azureSqlServerVirtualNetworkRuleToSDPItem(rule *armsql.VirtualNetworkRule, serverName, ruleName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(rule, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(serverName, ruleName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.SQLServerVirtualNetworkRule.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + Tags: nil, // VirtualNetworkRule has no Tags in the Azure SDK + } + + // Link to parent SQL Server (from resource ID or known server name) + if rule.ID != nil { + extractedServerName := azureshared.ExtractSQLServerNameFromDatabaseID(*rule.ID) + if extractedServerName != "" { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: extractedServerName, + Scope: scope, + }, + }) + } + } else { + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.SQLServer.String(), + Method: sdp.QueryMethod_GET, + Query: serverName, + Scope: scope, + }, + }) + } + + // Link to Virtual Network and Subnet when VirtualNetworkSubnetID is set + // Subnet ID format: /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName} + if rule.Properties != nil && rule.Properties.VirtualNetworkSubnetID != nil { + subnetID := *rule.Properties.VirtualNetworkSubnetID + scopeParams := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"subscriptions", "resourceGroups"}) + subnetParams := azureshared.ExtractPathParamsFromResourceID(subnetID, []string{"virtualNetworks", "subnets"}) + if len(scopeParams) >= 2 && len(subnetParams) >= 2 { + subscriptionID := scopeParams[0] + resourceGroupName := scopeParams[1] + vnetName := subnetParams[0] + subnetName := subnetParams[1] + subnetScope := fmt.Sprintf("%s.%s", subscriptionID, resourceGroupName) + // Link to Virtual Network + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkVirtualNetwork.String(), + Method: sdp.QueryMethod_GET, + Query: vnetName, + Scope: subnetScope, + }, + }) + // Link to Subnet + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkSubnet.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(vnetName, subnetName), + Scope: subnetScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (s sqlServerVirtualNetworkRuleWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + SQLServerLookupByName, + SQLServerVirtualNetworkRuleLookupByName, + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: serverName", + Scope: scope, + ItemType: s.Type(), + } + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + for _, rule := range page.Value { + if rule.Name == nil { + continue + } + item, sdpErr := s.azureSqlServerVirtualNetworkRuleToSDPItem(rule, serverName, *rule.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s sqlServerVirtualNetworkRuleWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: serverName"), scope, s.Type())) + return + } + serverName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.ListByServer(ctx, rgScope.ResourceGroup, serverName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, rule := range page.Value { + if rule.Name == nil { + continue + } + item, sdpErr := s.azureSqlServerVirtualNetworkRuleToSDPItem(rule, serverName, *rule.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + SQLServerLookupByName, + }, + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.SQLServer: true, + azureshared.NetworkSubnet: true, + azureshared.NetworkVirtualNetwork: true, + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_mssql_virtual_network_rule.id", + }, + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Sql/servers/virtualNetworkRules/read", + } +} + +func (s sqlServerVirtualNetworkRuleWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/sql-server-virtual-network-rule_test.go b/sources/azure/manual/sql-server-virtual-network-rule_test.go new file mode 100644 index 00000000..9182f287 --- /dev/null +++ b/sources/azure/manual/sql-server-virtual-network-rule_test.go @@ -0,0 +1,359 @@ +package manual_test + +import ( + "context" + "errors" + "slices" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockSqlServerVirtualNetworkRulePager struct { + pages []armsql.VirtualNetworkRulesClientListByServerResponse + index int +} + +func (m *mockSqlServerVirtualNetworkRulePager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockSqlServerVirtualNetworkRulePager) NextPage(ctx context.Context) (armsql.VirtualNetworkRulesClientListByServerResponse, error) { + if m.index >= len(m.pages) { + return armsql.VirtualNetworkRulesClientListByServerResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorSqlServerVirtualNetworkRulePager struct{} + +func (e *errorSqlServerVirtualNetworkRulePager) More() bool { + return true +} + +func (e *errorSqlServerVirtualNetworkRulePager) NextPage(ctx context.Context) (armsql.VirtualNetworkRulesClientListByServerResponse, error) { + return armsql.VirtualNetworkRulesClientListByServerResponse{}, errors.New("pager error") +} + +type testSqlServerVirtualNetworkRuleClient struct { + *mocks.MockSqlServerVirtualNetworkRuleClient + pager clients.SqlServerVirtualNetworkRulePager +} + +func (t *testSqlServerVirtualNetworkRuleClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SqlServerVirtualNetworkRulePager { + return t.pager +} + +func TestSqlServerVirtualNetworkRule(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + serverName := "test-server" + ruleName := "test-vnet-rule" + + t.Run("Get", func(t *testing.T) { + rule := createAzureSqlServerVirtualNetworkRule(serverName, ruleName, "") + + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, ruleName).Return( + armsql.VirtualNetworkRulesClientGetResponse{ + VirtualNetworkRule: *rule, + }, nil) + + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, ruleName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.SQLServerVirtualNetworkRule.String() { + t.Errorf("Expected type %s, got %s", azureshared.SQLServerVirtualNetworkRule, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + expectedUniqueAttrValue := shared.CompositeLookupKey(serverName, ruleName) + if sdpItem.UniqueAttributeValue() != expectedUniqueAttrValue { + t.Errorf("Expected unique attribute value %s, got %s", expectedUniqueAttrValue, sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.SQLServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("Get_WithSubnetLink", func(t *testing.T) { + subnetID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet" + rule := createAzureSqlServerVirtualNetworkRule(serverName, ruleName, subnetID) + + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, ruleName).Return( + armsql.VirtualNetworkRulesClientGetResponse{ + VirtualNetworkRule: *rule, + }, nil) + + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, ruleName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + t.Run("StaticTests", func(t *testing.T) { + queryTests := shared.QueryTests{ + { + ExpectedType: azureshared.SQLServer.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: serverName, + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: azureshared.NetworkVirtualNetwork.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-vnet", + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + { + ExpectedType: azureshared.NetworkSubnet.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: shared.CompositeLookupKey("test-vnet", "test-subnet"), + ExpectedScope: subscriptionID + "." + resourceGroup, + }, + } + shared.RunStaticTests(t, adapter, sdpItem, queryTests) + }) + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], serverName, true) + if qErr == nil { + t.Error("Expected error when providing only serverName (1 query part), but got nil") + } + }) + + t.Run("GetWithEmptyName", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when virtual network rule name is empty, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + rule1 := createAzureSqlServerVirtualNetworkRule(serverName, "rule1", "") + rule2 := createAzureSqlServerVirtualNetworkRule(serverName, "rule2", "") + + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + pager := &mockSqlServerVirtualNetworkRulePager{ + pages: []armsql.VirtualNetworkRulesClientListByServerResponse{ + { + VirtualNetworkRuleListResult: armsql.VirtualNetworkRuleListResult{ + Value: []*armsql.VirtualNetworkRule{rule1, rule2}, + }, + }, + }, + } + + testClient := &testSqlServerVirtualNetworkRuleClient{ + MockSqlServerVirtualNetworkRuleClient: mockClient, + pager: pager, + } + wrapper := manual.NewSqlServerVirtualNetworkRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + items, qErr := searchable.Search(ctx, wrapper.Scopes()[0], serverName, true) + if qErr != nil { + t.Fatalf("Expected no error from Search, got: %v", qErr) + } + if len(items) != 2 { + t.Errorf("Expected 2 items from Search, got %d", len(items)) + } + }) + + t.Run("SearchStream", func(t *testing.T) { + rule1 := createAzureSqlServerVirtualNetworkRule(serverName, "rule1", "") + + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + pager := &mockSqlServerVirtualNetworkRulePager{ + pages: []armsql.VirtualNetworkRulesClientListByServerResponse{ + { + VirtualNetworkRuleListResult: armsql.VirtualNetworkRuleListResult{ + Value: []*armsql.VirtualNetworkRule{rule1}, + }, + }, + }, + } + + testClient := &testSqlServerVirtualNetworkRuleClient{ + MockSqlServerVirtualNetworkRuleClient: mockClient, + pager: pager, + } + wrapper := manual.NewSqlServerVirtualNetworkRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchStreamable, ok := adapter.(discovery.SearchStreamableAdapter) + if !ok { + t.Fatalf("Adapter does not support SearchStream operation") + } + + stream := discovery.NewRecordingQueryResultStream() + searchStreamable.SearchStream(ctx, wrapper.Scopes()[0], serverName, true, stream) + items := stream.GetItems() + errs := stream.GetErrors() + if len(errs) > 0 { + t.Fatalf("Expected no errors from SearchStream, got: %v", errs) + } + if len(items) != 1 { + t.Errorf("Expected 1 item from SearchStream, got %d", len(items)) + } + }) + + t.Run("SearchWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("virtual network rule not found") + + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, serverName, "nonexistent-rule").Return( + armsql.VirtualNetworkRulesClientGetResponse{}, expectedErr) + + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(serverName, "nonexistent-rule") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent virtual network rule, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + errorPager := &errorSqlServerVirtualNetworkRulePager{} + testClient := &testSqlServerVirtualNetworkRuleClient{ + MockSqlServerVirtualNetworkRuleClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewSqlServerVirtualNetworkRule(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0], serverName) + if qErr == nil { + t.Error("Expected error from Search when pager returns error, but got nil") + } + }) + + t.Run("InterfaceCompliance", func(t *testing.T) { + mockClient := mocks.NewMockSqlServerVirtualNetworkRuleClient(ctrl) + wrapper := manual.NewSqlServerVirtualNetworkRule(&testSqlServerVirtualNetworkRuleClient{MockSqlServerVirtualNetworkRuleClient: mockClient}, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + w := wrapper.(sources.Wrapper) + + permissions := w.IAMPermissions() + if len(permissions) == 0 { + t.Error("Expected IAMPermissions to return at least one permission") + } + expectedPermission := "Microsoft.Sql/servers/virtualNetworkRules/read" + found := slices.Contains(permissions, expectedPermission) + if !found { + t.Errorf("Expected IAMPermissions to include %s", expectedPermission) + } + + potentialLinks := w.PotentialLinks() + if !potentialLinks[azureshared.SQLServer] { + t.Error("Expected PotentialLinks to include SQLServer") + } + if !potentialLinks[azureshared.NetworkSubnet] { + t.Error("Expected PotentialLinks to include NetworkSubnet") + } + if !potentialLinks[azureshared.NetworkVirtualNetwork] { + t.Error("Expected PotentialLinks to include NetworkVirtualNetwork") + } + + mappings := w.TerraformMappings() + if len(mappings) == 0 { + t.Error("Expected TerraformMappings to return at least one mapping") + } + foundMapping := false + for _, mapping := range mappings { + if mapping.GetTerraformQueryMap() == "azurerm_mssql_virtual_network_rule.id" { + foundMapping = true + break + } + } + if !foundMapping { + t.Error("Expected TerraformMappings to include 'azurerm_mssql_virtual_network_rule.id' mapping") + } + }) +} + +func createAzureSqlServerVirtualNetworkRule(serverName, ruleName, subnetID string) *armsql.VirtualNetworkRule { + ruleID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/" + serverName + "/virtualNetworkRules/" + ruleName + rule := &armsql.VirtualNetworkRule{ + Name: &ruleName, + ID: &ruleID, + Properties: &armsql.VirtualNetworkRuleProperties{}, + } + if subnetID != "" { + rule.Properties.VirtualNetworkSubnetID = &subnetID + } + return rule +} diff --git a/sources/azure/manual/sql-server_test.go b/sources/azure/manual/sql-server_test.go index d2165407..9e1ad621 100644 --- a/sources/azure/manual/sql-server_test.go +++ b/sources/azure/manual/sql-server_test.go @@ -3,10 +3,10 @@ package manual_test import ( "context" "errors" + "slices" "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" "go.uber.org/mock/gomock" @@ -376,14 +376,14 @@ func TestSqlServer(t *testing.T) { { Properties: &armsql.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armsql.PrivateEndpointProperty{ - ID: to.Ptr(privateEndpointID1), + ID: new(privateEndpointID1), }, }, }, { Properties: &armsql.PrivateEndpointConnectionProperties{ PrivateEndpoint: &armsql.PrivateEndpointProperty{ - ID: to.Ptr(privateEndpointID2), + ID: new(privateEndpointID2), }, }, }, @@ -638,12 +638,12 @@ func TestSqlServer(t *testing.T) { server1 := createAzureSqlServer("server-1", "", "") server2 := &armsql.Server{ Name: nil, // Server with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armsql.ServerProperties{ - Version: to.Ptr("12.0"), + Version: new("12.0"), }, } @@ -838,20 +838,14 @@ func TestSqlServer(t *testing.T) { t.Error("Expected IAMPermissions to return at least one permission") } expectedPermission := "Microsoft.Sql/servers/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } // Verify PredefinedRole // PredefinedRole is available on the wrapper, not the adapter - if roleInterface, ok := interface{}(wrapper).(interface{ PredefinedRole() string }); ok { + if roleInterface, ok := any(wrapper).(interface{ PredefinedRole() string }); ok { role := roleInterface.PredefinedRole() if role != "Reader" { t.Errorf("Expected PredefinedRole to be 'Reader', got %s", role) @@ -902,27 +896,27 @@ func createAzureSqlServer(serverName, primaryUserAssignedIdentityID, fullyQualif serverID := "/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Sql/servers/" + serverName server := &armsql.Server{ - Name: to.Ptr(serverName), - Location: to.Ptr("eastus"), + Name: new(serverName), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, - ID: to.Ptr(serverID), + ID: new(serverID), Properties: &armsql.ServerProperties{ - Version: to.Ptr("12.0"), - AdministratorLogin: to.Ptr("admin"), - FullyQualifiedDomainName: to.Ptr(fullyQualifiedDomainName), + Version: new("12.0"), + AdministratorLogin: new("admin"), + FullyQualifiedDomainName: new(fullyQualifiedDomainName), }, } if primaryUserAssignedIdentityID != "" { - server.Properties.PrimaryUserAssignedIdentityID = to.Ptr(primaryUserAssignedIdentityID) + server.Properties.PrimaryUserAssignedIdentityID = new(primaryUserAssignedIdentityID) } if fullyQualifiedDomainName == "" && serverName != "" { // Set a default FQDN if not provided but server name is set - server.Properties.FullyQualifiedDomainName = to.Ptr(serverName + ".database.windows.net") + server.Properties.FullyQualifiedDomainName = new(serverName + ".database.windows.net") } return server @@ -933,7 +927,7 @@ func createAzureSqlServerWithUserAssignedIdentities(serverName, primaryUserAssig server := createAzureSqlServer(serverName, primaryUserAssignedIdentityID, fullyQualifiedDomainName) if userAssignedIdentities != nil { server.Identity = &armsql.ResourceIdentity{ - Type: to.Ptr(armsql.IdentityTypeUserAssigned), + Type: new(armsql.IdentityTypeUserAssigned), UserAssignedIdentities: userAssignedIdentities, } } @@ -953,7 +947,7 @@ func createAzureSqlServerWithPrivateEndpointConnections(serverName, primaryUserA func createAzureSqlServerWithKeyId(serverName, primaryUserAssignedIdentityID, fullyQualifiedDomainName, keyID string) *armsql.Server { server := createAzureSqlServer(serverName, primaryUserAssignedIdentityID, fullyQualifiedDomainName) if keyID != "" { - server.Properties.KeyID = to.Ptr(keyID) + server.Properties.KeyID = new(keyID) } return server } diff --git a/sources/azure/manual/storage-account.go b/sources/azure/manual/storage-account.go index 5ab37a6e..b5e89808 100644 --- a/sources/azure/manual/storage-account.go +++ b/sources/azure/manual/storage-account.go @@ -172,6 +172,15 @@ func (s storageAccountWrapper) azureStorageAccountToSDPItem(account *armstorage. }, }) + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.StorageEncryptionScope.String(), + Method: sdp.QueryMethod_SEARCH, + Query: accountName, + Scope: scope, + }, + }) + // Link to Private Endpoint Connections (child resource) // Reference: https://learn.microsoft.com/en-us/rest/api/storagerp/private-endpoint-connections/list?view=rest-storagerp-2025-06-01 // Private endpoint connections can be listed using the storage account name @@ -443,6 +452,7 @@ func (s storageAccountWrapper) PotentialLinks() map[shared.ItemType]bool { azureshared.StorageFileShare: true, azureshared.StorageTable: true, azureshared.StorageQueue: true, + azureshared.StorageEncryptionScope: true, azureshared.StoragePrivateEndpointConnection: true, // External resources azureshared.ManagedIdentityUserAssignedIdentity: true, diff --git a/sources/azure/manual/storage-account_test.go b/sources/azure/manual/storage-account_test.go index 99a77918..860b0b75 100644 --- a/sources/azure/manual/storage-account_test.go +++ b/sources/azure/manual/storage-account_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" "go.uber.org/mock/gomock" @@ -87,6 +86,12 @@ func TestStorageAccount(t *testing.T) { ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: accountName, ExpectedScope: subscriptionID + "." + resourceGroup, + }, { + // Storage encryption scope link (child resource) + ExpectedType: azureshared.StorageEncryptionScope.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: accountName, + ExpectedScope: subscriptionID + "." + resourceGroup, }, { // Storage private endpoint connection link (child resource) ExpectedType: azureshared.StoragePrivateEndpointConnection.String(), @@ -194,12 +199,12 @@ func TestStorageAccount(t *testing.T) { account1 := createAzureStorageAccount("teststorageaccount1", "Succeeded") account2 := &armstorage.Account{ Name: nil, // Account with nil name should be skipped - Location: to.Ptr("eastus"), + Location: new("eastus"), Tags: map[string]*string{ - "env": to.Ptr("test"), + "env": new("test"), }, Properties: &armstorage.AccountProperties{ - ProvisioningState: to.Ptr(armstorage.ProvisioningStateSucceeded), + ProvisioningState: new(armstorage.ProvisioningStateSucceeded), }, } @@ -297,20 +302,20 @@ func TestStorageAccount(t *testing.T) { func createAzureStorageAccount(accountName, provisioningState string) *armstorage.Account { state := armstorage.ProvisioningState(provisioningState) return &armstorage.Account{ - Name: to.Ptr(accountName), - Location: to.Ptr("eastus"), - Kind: to.Ptr(armstorage.KindStorageV2), + Name: new(accountName), + Location: new("eastus"), + Kind: new(armstorage.KindStorageV2), Tags: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, Properties: &armstorage.AccountProperties{ ProvisioningState: &state, PrimaryEndpoints: &armstorage.Endpoints{ - Blob: to.Ptr("https://" + accountName + ".blob.core.windows.net/"), - Queue: to.Ptr("https://" + accountName + ".queue.core.windows.net/"), - Table: to.Ptr("https://" + accountName + ".table.core.windows.net/"), - File: to.Ptr("https://" + accountName + ".file.core.windows.net/"), + Blob: new("https://" + accountName + ".blob.core.windows.net/"), + Queue: new("https://" + accountName + ".queue.core.windows.net/"), + Table: new("https://" + accountName + ".table.core.windows.net/"), + File: new("https://" + accountName + ".file.core.windows.net/"), }, }, } diff --git a/sources/azure/manual/storage-blob-container_test.go b/sources/azure/manual/storage-blob-container_test.go index 86a391be..6d392582 100644 --- a/sources/azure/manual/storage-blob-container_test.go +++ b/sources/azure/manual/storage-blob-container_test.go @@ -6,7 +6,6 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" "go.uber.org/mock/gomock" @@ -325,9 +324,9 @@ func TestStorageBlobContainer(t *testing.T) { Name: nil, }, { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/blobServices/default/containers/valid-container"), - Name: to.Ptr("valid-container"), - Type: to.Ptr("Microsoft.Storage/storageAccounts/blobServices/containers"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/blobServices/default/containers/valid-container"), + Name: new("valid-container"), + Type: new("Microsoft.Storage/storageAccounts/blobServices/containers"), }, }, }, @@ -411,27 +410,27 @@ func TestStorageBlobContainer(t *testing.T) { // createAzureBlobContainer creates a mock Azure blob container for testing func createAzureBlobContainer(containerName string) *armstorage.BlobContainer { return &armstorage.BlobContainer{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/blobServices/default/containers/" + containerName), - Name: to.Ptr(containerName), - Type: to.Ptr("Microsoft.Storage/storageAccounts/blobServices/containers"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/blobServices/default/containers/" + containerName), + Name: new(containerName), + Type: new("Microsoft.Storage/storageAccounts/blobServices/containers"), ContainerProperties: &armstorage.ContainerProperties{ - PublicAccess: to.Ptr(armstorage.PublicAccessNone), + PublicAccess: new(armstorage.PublicAccessNone), }, - Etag: to.Ptr("\"0x8D1234567890ABC\""), + Etag: new("\"0x8D1234567890ABC\""), } } // createAzureBlobContainerWithEncryptionScope creates a mock Azure blob container with a default encryption scope func createAzureBlobContainerWithEncryptionScope(containerName, encryptionScopeName string) *armstorage.BlobContainer { return &armstorage.BlobContainer{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/blobServices/default/containers/" + containerName), - Name: to.Ptr(containerName), - Type: to.Ptr("Microsoft.Storage/storageAccounts/blobServices/containers"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/blobServices/default/containers/" + containerName), + Name: new(containerName), + Type: new("Microsoft.Storage/storageAccounts/blobServices/containers"), ContainerProperties: &armstorage.ContainerProperties{ - PublicAccess: to.Ptr(armstorage.PublicAccessNone), - DefaultEncryptionScope: to.Ptr(encryptionScopeName), - DenyEncryptionScopeOverride: to.Ptr(false), + PublicAccess: new(armstorage.PublicAccessNone), + DefaultEncryptionScope: new(encryptionScopeName), + DenyEncryptionScopeOverride: new(false), }, - Etag: to.Ptr("\"0x8D1234567890ABC\""), + Etag: new("\"0x8D1234567890ABC\""), } } diff --git a/sources/azure/manual/storage-encryption-scope.go b/sources/azure/manual/storage-encryption-scope.go new file mode 100644 index 00000000..ea8dcb0e --- /dev/null +++ b/sources/azure/manual/storage-encryption-scope.go @@ -0,0 +1,259 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" + "github.com/overmindtech/cli/sources/stdlib" +) + +var StorageEncryptionScopeLookupByName = shared.NewItemTypeLookup("name", azureshared.StorageEncryptionScope) + +type storageEncryptionScopeWrapper struct { + client clients.EncryptionScopesClient + + *azureshared.MultiResourceGroupBase +} + +func NewStorageEncryptionScope(client clients.EncryptionScopesClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &storageEncryptionScopeWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_STORAGE, + azureshared.StorageEncryptionScope, + ), + } +} + +func (s storageEncryptionScopeWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: storageAccountName and encryptionScopeName", + Scope: scope, + ItemType: s.Type(), + } + } + storageAccountName := queryParts[0] + encryptionScopeName := queryParts[1] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, storageAccountName, encryptionScopeName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item, sdpErr := s.azureEncryptionScopeToSDPItem(&resp.EncryptionScope, storageAccountName, encryptionScopeName, scope) + if sdpErr != nil { + return nil, sdpErr + } + + return item, nil +} + +func (s storageEncryptionScopeWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + StorageAccountLookupByName, + StorageEncryptionScopeLookupByName, + } +} + +func (s storageEncryptionScopeWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: storageAccountName", + Scope: scope, + ItemType: s.Type(), + } + } + storageAccountName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.List(ctx, rgScope.ResourceGroup, storageAccountName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + for _, encScope := range page.Value { + if encScope.Name == nil { + continue + } + + item, sdpErr := s.azureEncryptionScopeToSDPItem(encScope, storageAccountName, *encScope.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s storageEncryptionScopeWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: storageAccountName"), scope, s.Type())) + return + } + storageAccountName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.List(ctx, rgScope.ResourceGroup, storageAccountName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, encScope := range page.Value { + if encScope.Name == nil { + continue + } + item, sdpErr := s.azureEncryptionScopeToSDPItem(encScope, storageAccountName, *encScope.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s storageEncryptionScopeWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + StorageAccountLookupByName, + }, + } +} + +func (s storageEncryptionScopeWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.StorageAccount: true, + azureshared.KeyVaultVault: true, + azureshared.KeyVaultKey: true, + stdlib.NetworkDNS: true, + } +} + +func (s storageEncryptionScopeWrapper) azureEncryptionScopeToSDPItem(encScope *armstorage.EncryptionScope, storageAccountName, encryptionScopeName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(encScope, "tags") + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(storageAccountName, encryptionScopeName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item := &sdp.Item{ + Type: azureshared.StorageEncryptionScope.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + item.LinkedItemQueries = append(item.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.StorageAccount.String(), + Method: sdp.QueryMethod_GET, + Query: storageAccountName, + Scope: scope, + }, + }) + + // Link to Key Vault when encryption scope uses customer-managed keys (source Microsoft.KeyVault) + if encScope.EncryptionScopeProperties != nil && encScope.EncryptionScopeProperties.KeyVaultProperties != nil && encScope.EncryptionScopeProperties.KeyVaultProperties.KeyURI != nil { + keyURI := *encScope.EncryptionScopeProperties.KeyVaultProperties.KeyURI + vaultName := azureshared.ExtractVaultNameFromURI(keyURI) + keyName := azureshared.ExtractKeyNameFromURI(keyURI) + if vaultName != "" { + item.LinkedItemQueries = append(item.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultVault.String(), + Method: sdp.QueryMethod_GET, + Query: vaultName, + Scope: scope, + }, + }) + } + if vaultName != "" && keyName != "" { + item.LinkedItemQueries = append(item.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.KeyVaultKey.String(), + Method: sdp.QueryMethod_GET, + Query: shared.CompositeLookupKey(vaultName, keyName), + Scope: scope, + }, + }) + } + if dnsName := azureshared.ExtractDNSFromURL(keyURI); dnsName != "" { + item.LinkedItemQueries = append(item.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: stdlib.NetworkDNS.String(), + Method: sdp.QueryMethod_SEARCH, + Query: dnsName, + Scope: "global", + }, + }) + } + } + + if encScope.EncryptionScopeProperties != nil && encScope.EncryptionScopeProperties.State != nil { + switch *encScope.EncryptionScopeProperties.State { + case armstorage.EncryptionScopeStateEnabled: + item.Health = sdp.Health_HEALTH_OK.Enum() + case armstorage.EncryptionScopeStateDisabled: + item.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + default: + item.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + return item, nil +} + +func (s storageEncryptionScopeWrapper) TerraformMappings() []*sdp.TerraformMapping { + return []*sdp.TerraformMapping{ + { + TerraformMethod: sdp.QueryMethod_SEARCH, + TerraformQueryMap: "azurerm_storage_encryption_scope.id", + }, + } +} + +func (s storageEncryptionScopeWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Storage/storageAccounts/encryptionScopes/read", + } +} + +func (s storageEncryptionScopeWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/storage-encryption-scope_test.go b/sources/azure/manual/storage-encryption-scope_test.go new file mode 100644 index 00000000..619fdfd0 --- /dev/null +++ b/sources/azure/manual/storage-encryption-scope_test.go @@ -0,0 +1,295 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockEncryptionScopesPager struct { + pages []armstorage.EncryptionScopesClientListResponse + index int +} + +func (m *mockEncryptionScopesPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockEncryptionScopesPager) NextPage(ctx context.Context) (armstorage.EncryptionScopesClientListResponse, error) { + if m.index >= len(m.pages) { + return armstorage.EncryptionScopesClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type errorEncryptionScopesPager struct{} + +func (e *errorEncryptionScopesPager) More() bool { + return true +} + +func (e *errorEncryptionScopesPager) NextPage(ctx context.Context) (armstorage.EncryptionScopesClientListResponse, error) { + return armstorage.EncryptionScopesClientListResponse{}, errors.New("pager error") +} + +type testEncryptionScopesClient struct { + *mocks.MockEncryptionScopesClient + pager clients.EncryptionScopesPager +} + +func (t *testEncryptionScopesClient) List(ctx context.Context, resourceGroupName, accountName string) clients.EncryptionScopesPager { + return t.pager +} + +func TestStorageEncryptionScope(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + storageAccountName := "teststorageaccount" + encryptionScopeName := "test-encryption-scope" + + t.Run("Get", func(t *testing.T) { + encScope := createAzureEncryptionScope(encryptionScopeName) + + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, storageAccountName, encryptionScopeName).Return( + armstorage.EncryptionScopesClientGetResponse{ + EncryptionScope: *encScope, + }, nil) + + testClient := &testEncryptionScopesClient{MockEncryptionScopesClient: mockClient} + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(storageAccountName, encryptionScopeName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.StorageEncryptionScope.String() { + t.Errorf("Expected type %s, got %s", azureshared.StorageEncryptionScope.String(), sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(storageAccountName, encryptionScopeName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(storageAccountName, encryptionScopeName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) != 1 { + t.Fatalf("Expected 1 linked query, got: %d", len(linkedQueries)) + } + + linkedQuery := linkedQueries[0] + if linkedQuery.GetQuery().GetType() != azureshared.StorageAccount.String() { + t.Errorf("Expected linked query type %s, got %s", azureshared.StorageAccount.String(), linkedQuery.GetQuery().GetType()) + } + if linkedQuery.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected linked query method GET, got %s", linkedQuery.GetQuery().GetMethod()) + } + if linkedQuery.GetQuery().GetQuery() != storageAccountName { + t.Errorf("Expected linked query %s, got %s", storageAccountName, linkedQuery.GetQuery().GetQuery()) + } + }) + }) + + t.Run("Get_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + testClient := &testEncryptionScopesClient{MockEncryptionScopesClient: mockClient} + + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], storageAccountName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + scope1 := createAzureEncryptionScope("scope-1") + scope2 := createAzureEncryptionScope("scope-2") + + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + mockPager := &mockEncryptionScopesPager{ + pages: []armstorage.EncryptionScopesClientListResponse{ + { + EncryptionScopeListResult: armstorage.EncryptionScopeListResult{ + Value: []*armstorage.EncryptionScope{scope1, scope2}, + }, + }, + }, + } + + testClient := &testEncryptionScopesClient{ + MockEncryptionScopesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], storageAccountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.StorageEncryptionScope.String() { + t.Errorf("Expected type %s, got %s", azureshared.StorageEncryptionScope.String(), item.GetType()) + } + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + testClient := &testEncryptionScopesClient{MockEncryptionScopesClient: mockClient} + + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("Search_ScopeWithNilName", func(t *testing.T) { + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + validScope := createAzureEncryptionScope("valid-scope") + mockPager := &mockEncryptionScopesPager{ + pages: []armstorage.EncryptionScopesClientListResponse{ + { + EncryptionScopeListResult: armstorage.EncryptionScopeListResult{ + Value: []*armstorage.EncryptionScope{ + {Name: nil}, + validScope, + }, + }, + }, + }, + } + + testClient := &testEncryptionScopesClient{ + MockEncryptionScopesClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], storageAccountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item, got: %d", len(sdpItems)) + } + + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(storageAccountName, "valid-scope") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(storageAccountName, "valid-scope"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("encryption scope not found") + + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, storageAccountName, "nonexistent-scope").Return( + armstorage.EncryptionScopesClientGetResponse{}, expectedErr) + + testClient := &testEncryptionScopesClient{MockEncryptionScopesClient: mockClient} + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := storageAccountName + shared.QuerySeparator + "nonexistent-scope" + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent encryption scope, but got nil") + } + }) + + t.Run("ErrorHandling_Search", func(t *testing.T) { + mockClient := mocks.NewMockEncryptionScopesClient(ctrl) + errorPager := &errorEncryptionScopesPager{} + + testClient := &testEncryptionScopesClient{ + MockEncryptionScopesClient: mockClient, + pager: errorPager, + } + + wrapper := manual.NewStorageEncryptionScope(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + _, err := searchable.Search(ctx, wrapper.Scopes()[0], storageAccountName, true) + if err == nil { + t.Error("Expected error from pager when NextPage returns an error, but got nil") + } + }) +} + +func createAzureEncryptionScope(scopeName string) *armstorage.EncryptionScope { + return &armstorage.EncryptionScope{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/encryptionScopes/" + scopeName), + Name: new(scopeName), + Type: new("Microsoft.Storage/storageAccounts/encryptionScopes"), + EncryptionScopeProperties: &armstorage.EncryptionScopeProperties{ + Source: to.Ptr(armstorage.EncryptionScopeSourceMicrosoftStorage), + State: to.Ptr(armstorage.EncryptionScopeStateEnabled), + }, + } +} diff --git a/sources/azure/manual/storage-fileshare_test.go b/sources/azure/manual/storage-fileshare_test.go index a29503b0..f810dfdb 100644 --- a/sources/azure/manual/storage-fileshare_test.go +++ b/sources/azure/manual/storage-fileshare_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" "go.uber.org/mock/gomock" @@ -241,9 +240,9 @@ func TestStorageFileShare(t *testing.T) { Name: nil, }, { - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/fileServices/default/shares/valid-share"), - Name: to.Ptr("valid-share"), - Type: to.Ptr("Microsoft.Storage/storageAccounts/fileServices/shares"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/fileServices/default/shares/valid-share"), + Name: new("valid-share"), + Type: new("Microsoft.Storage/storageAccounts/fileServices/shares"), }, }, }, @@ -327,13 +326,13 @@ func TestStorageFileShare(t *testing.T) { // createAzureFileShare creates a mock Azure file share for testing func createAzureFileShare(shareName string) *armstorage.FileShare { return &armstorage.FileShare{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/fileServices/default/shares/" + shareName), - Name: to.Ptr(shareName), - Type: to.Ptr("Microsoft.Storage/storageAccounts/fileServices/shares"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/fileServices/default/shares/" + shareName), + Name: new(shareName), + Type: new("Microsoft.Storage/storageAccounts/fileServices/shares"), FileShareProperties: &armstorage.FileShareProperties{ - AccessTier: to.Ptr(armstorage.ShareAccessTierHot), - ShareQuota: to.Ptr(int32(5120)), // 5GB + AccessTier: new(armstorage.ShareAccessTierHot), + ShareQuota: new(int32(5120)), // 5GB }, - Etag: to.Ptr("\"0x8D1234567890ABC\""), + Etag: new("\"0x8D1234567890ABC\""), } } diff --git a/sources/azure/manual/storage-private-endpoint-connection.go b/sources/azure/manual/storage-private-endpoint-connection.go new file mode 100644 index 00000000..2a474430 --- /dev/null +++ b/sources/azure/manual/storage-private-endpoint-connection.go @@ -0,0 +1,235 @@ +package manual + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/shared" +) + +var StoragePrivateEndpointConnectionLookupByName = shared.NewItemTypeLookup("name", azureshared.StoragePrivateEndpointConnection) + +type storagePrivateEndpointConnectionWrapper struct { + client clients.StoragePrivateEndpointConnectionsClient + + *azureshared.MultiResourceGroupBase +} + +// NewStoragePrivateEndpointConnection returns a SearchableWrapper for Azure storage account private endpoint connections. +func NewStoragePrivateEndpointConnection(client clients.StoragePrivateEndpointConnectionsClient, resourceGroupScopes []azureshared.ResourceGroupScope) sources.SearchableWrapper { + return &storagePrivateEndpointConnectionWrapper{ + client: client, + MultiResourceGroupBase: azureshared.NewMultiResourceGroupBase( + resourceGroupScopes, + sdp.AdapterCategory_ADAPTER_CATEGORY_STORAGE, + azureshared.StoragePrivateEndpointConnection, + ), + } +} + +func (s storagePrivateEndpointConnectionWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 2 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Get requires 2 query parts: storageAccountName and privateEndpointConnectionName", + Scope: scope, + ItemType: s.Type(), + } + } + accountName := queryParts[0] + connectionName := queryParts[1] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + resp, err := s.client.Get(ctx, rgScope.ResourceGroup, accountName, connectionName) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(&resp.PrivateEndpointConnection, accountName, connectionName, scope) + if sdpErr != nil { + return nil, sdpErr + } + return item, nil +} + +func (s storagePrivateEndpointConnectionWrapper) GetLookups() sources.ItemTypeLookups { + return sources.ItemTypeLookups{ + StorageAccountLookupByName, + StoragePrivateEndpointConnectionLookupByName, + } +} + +func (s storagePrivateEndpointConnectionWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + if len(queryParts) < 1 { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: "Search requires 1 query part: storageAccountName", + Scope: scope, + ItemType: s.Type(), + } + } + accountName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + pager := s.client.List(ctx, rgScope.ResourceGroup, accountName) + + var items []*sdp.Item + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + for _, conn := range page.Value { + if conn.Name == nil { + continue + } + + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, accountName, *conn.Name, scope) + if sdpErr != nil { + return nil, sdpErr + } + items = append(items, item) + } + } + + return items, nil +} + +func (s storagePrivateEndpointConnectionWrapper) SearchStream(ctx context.Context, stream discovery.QueryResultStream, cache sdpcache.Cache, cacheKey sdpcache.CacheKey, scope string, queryParts ...string) { + if len(queryParts) < 1 { + stream.SendError(azureshared.QueryError(errors.New("Search requires 1 query part: storageAccountName"), scope, s.Type())) + return + } + accountName := queryParts[0] + + rgScope, err := s.ResourceGroupScopeFromScope(scope) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + pager := s.client.List(ctx, rgScope.ResourceGroup, accountName) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + stream.SendError(azureshared.QueryError(err, scope, s.Type())) + return + } + for _, conn := range page.Value { + if conn.Name == nil { + continue + } + item, sdpErr := s.azurePrivateEndpointConnectionToSDPItem(conn, accountName, *conn.Name, scope) + if sdpErr != nil { + stream.SendError(sdpErr) + continue + } + cache.StoreItem(ctx, item, shared.DefaultCacheDuration, cacheKey) + stream.SendItem(item) + } + } +} + +func (s storagePrivateEndpointConnectionWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + { + StorageAccountLookupByName, + }, + } +} + +func (s storagePrivateEndpointConnectionWrapper) PotentialLinks() map[shared.ItemType]bool { + return map[shared.ItemType]bool{ + azureshared.StorageAccount: true, + azureshared.NetworkPrivateEndpoint: true, + } +} + +func (s storagePrivateEndpointConnectionWrapper) azurePrivateEndpointConnectionToSDPItem(conn *armstorage.PrivateEndpointConnection, accountName, connectionName, scope string) (*sdp.Item, *sdp.QueryError) { + attributes, err := shared.ToAttributesWithExclude(conn) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + err = attributes.Set("uniqueAttr", shared.CompositeLookupKey(accountName, connectionName)) + if err != nil { + return nil, azureshared.QueryError(err, scope, s.Type()) + } + + sdpItem := &sdp.Item{ + Type: azureshared.StoragePrivateEndpointConnection.String(), + UniqueAttribute: "uniqueAttr", + Attributes: attributes, + Scope: scope, + } + + // Health from provisioning state + if conn.Properties != nil && conn.Properties.ProvisioningState != nil { + switch *conn.Properties.ProvisioningState { + case armstorage.PrivateEndpointConnectionProvisioningStateSucceeded: + sdpItem.Health = sdp.Health_HEALTH_OK.Enum() + case armstorage.PrivateEndpointConnectionProvisioningStateCreating, + armstorage.PrivateEndpointConnectionProvisioningStateDeleting: + sdpItem.Health = sdp.Health_HEALTH_PENDING.Enum() + case armstorage.PrivateEndpointConnectionProvisioningStateFailed: + sdpItem.Health = sdp.Health_HEALTH_ERROR.Enum() + default: + sdpItem.Health = sdp.Health_HEALTH_UNKNOWN.Enum() + } + } + + // Link to parent Storage Account + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.StorageAccount.String(), + Method: sdp.QueryMethod_GET, + Query: accountName, + Scope: scope, + }, + }) + + // Link to Network Private Endpoint when present (may be in different resource group) + if conn.Properties != nil && conn.Properties.PrivateEndpoint != nil && conn.Properties.PrivateEndpoint.ID != nil { + peID := *conn.Properties.PrivateEndpoint.ID + peName := azureshared.ExtractResourceName(peID) + if peName != "" { + linkedScope := scope + if extractedScope := azureshared.ExtractScopeFromResourceID(peID); extractedScope != "" { + linkedScope = extractedScope + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: azureshared.NetworkPrivateEndpoint.String(), + Method: sdp.QueryMethod_GET, + Query: peName, + Scope: linkedScope, + }, + }) + } + } + + return sdpItem, nil +} + +func (s storagePrivateEndpointConnectionWrapper) IAMPermissions() []string { + return []string{ + "Microsoft.Storage/storageAccounts/privateEndpointConnections/read", + } +} + +func (s storagePrivateEndpointConnectionWrapper) PredefinedRole() string { + return "Reader" +} diff --git a/sources/azure/manual/storage-private-endpoint-connection_test.go b/sources/azure/manual/storage-private-endpoint-connection_test.go new file mode 100644 index 00000000..8bb2d545 --- /dev/null +++ b/sources/azure/manual/storage-private-endpoint-connection_test.go @@ -0,0 +1,321 @@ +package manual_test + +import ( + "context" + "errors" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" + "go.uber.org/mock/gomock" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/azure/clients" + "github.com/overmindtech/cli/sources/azure/manual" + azureshared "github.com/overmindtech/cli/sources/azure/shared" + "github.com/overmindtech/cli/sources/azure/shared/mocks" + "github.com/overmindtech/cli/sources/shared" +) + +type mockPrivateEndpointConnectionsPager struct { + pages []armstorage.PrivateEndpointConnectionsClientListResponse + index int +} + +func (m *mockPrivateEndpointConnectionsPager) More() bool { + return m.index < len(m.pages) +} + +func (m *mockPrivateEndpointConnectionsPager) NextPage(ctx context.Context) (armstorage.PrivateEndpointConnectionsClientListResponse, error) { + if m.index >= len(m.pages) { + return armstorage.PrivateEndpointConnectionsClientListResponse{}, errors.New("no more pages") + } + page := m.pages[m.index] + m.index++ + return page, nil +} + +type testStoragePrivateEndpointConnectionsClient struct { + *mocks.MockStoragePrivateEndpointConnectionsClient + pager clients.PrivateEndpointConnectionsPager +} + +func (t *testStoragePrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName, accountName string) clients.PrivateEndpointConnectionsPager { + return t.pager +} + +func TestStoragePrivateEndpointConnection(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + subscriptionID := "test-subscription" + resourceGroup := "test-rg" + accountName := "teststorageaccount" + connectionName := "test-pec" + + t.Run("Get", func(t *testing.T) { + conn := createAzureStoragePrivateEndpointConnection(connectionName, "") + + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, connectionName).Return( + armstorage.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testStoragePrivateEndpointConnectionsClient{MockStoragePrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + if sdpItem.GetType() != azureshared.StoragePrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.StoragePrivateEndpointConnection, sdpItem.GetType()) + } + + if sdpItem.GetUniqueAttribute() != "uniqueAttr" { + t.Errorf("Expected unique attribute 'uniqueAttr', got %s", sdpItem.GetUniqueAttribute()) + } + + if sdpItem.UniqueAttributeValue() != shared.CompositeLookupKey(accountName, connectionName) { + t.Errorf("Expected unique attribute value %s, got %s", shared.CompositeLookupKey(accountName, connectionName), sdpItem.UniqueAttributeValue()) + } + + if sdpItem.GetScope() != subscriptionID+"."+resourceGroup { + t.Errorf("Expected scope %s, got %s", subscriptionID+"."+resourceGroup, sdpItem.GetScope()) + } + + if err := sdpItem.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + + t.Run("StaticTests", func(t *testing.T) { + linkedQueries := sdpItem.GetLinkedItemQueries() + if len(linkedQueries) < 1 { + t.Fatalf("Expected at least 1 linked query, got: %d", len(linkedQueries)) + } + + foundStorageAccount := false + for _, lq := range linkedQueries { + if lq.GetQuery().GetType() == azureshared.StorageAccount.String() { + foundStorageAccount = true + if lq.GetQuery().GetMethod() != sdp.QueryMethod_GET { + t.Errorf("Expected StorageAccount link method GET, got %v", lq.GetQuery().GetMethod()) + } + if lq.GetQuery().GetQuery() != accountName { + t.Errorf("Expected StorageAccount query %s, got %s", accountName, lq.GetQuery().GetQuery()) + } + } + } + if !foundStorageAccount { + t.Error("Expected linked query to StorageAccount") + } + }) + }) + + t.Run("Get_WithPrivateEndpointLink", func(t *testing.T) { + peID := "/subscriptions/" + subscriptionID + "/resourceGroups/" + resourceGroup + "/providers/Microsoft.Network/privateEndpoints/test-pe" + conn := createAzureStoragePrivateEndpointConnection(connectionName, peID) + + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, connectionName).Return( + armstorage.PrivateEndpointConnectionsClientGetResponse{ + PrivateEndpointConnection: *conn, + }, nil) + + testClient := &testStoragePrivateEndpointConnectionsClient{MockStoragePrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, connectionName) + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + foundPrivateEndpoint := false + for _, lq := range sdpItem.GetLinkedItemQueries() { + if lq.GetQuery().GetType() == azureshared.NetworkPrivateEndpoint.String() { + foundPrivateEndpoint = true + if lq.GetQuery().GetQuery() != "test-pe" { + t.Errorf("Expected NetworkPrivateEndpoint query 'test-pe', got %s", lq.GetQuery().GetQuery()) + } + break + } + } + if !foundPrivateEndpoint { + t.Error("Expected linked query to NetworkPrivateEndpoint when PrivateEndpoint ID is set") + } + }) + + t.Run("GetWithInsufficientQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + testClient := &testStoragePrivateEndpointConnectionsClient{MockStoragePrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], accountName, true) + if qErr == nil { + t.Error("Expected error when providing insufficient query parts, but got nil") + } + }) + + t.Run("Search", func(t *testing.T) { + conn1 := createAzureStoragePrivateEndpointConnection("pec-1", "") + conn2 := createAzureStoragePrivateEndpointConnection("pec-2", "") + + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + mockPager := &mockPrivateEndpointConnectionsPager{ + pages: []armstorage.PrivateEndpointConnectionsClientListResponse{ + { + PrivateEndpointConnectionListResult: armstorage.PrivateEndpointConnectionListResult{ + Value: []*armstorage.PrivateEndpointConnection{conn1, conn2}, + }, + }, + }, + } + + testClient := &testStoragePrivateEndpointConnectionsClient{ + MockStoragePrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], accountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 2 { + t.Fatalf("Expected 2 items, got: %d", len(sdpItems)) + } + + for _, item := range sdpItems { + if err := item.Validate(); err != nil { + t.Fatalf("Expected no validation error, got: %v", err) + } + if item.GetType() != azureshared.StoragePrivateEndpointConnection.String() { + t.Errorf("Expected type %s, got %s", azureshared.StoragePrivateEndpointConnection, item.GetType()) + } + } + }) + + t.Run("Search_NilNameSkipped", func(t *testing.T) { + validConn := createAzureStoragePrivateEndpointConnection("valid-pec", "") + + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + mockPager := &mockPrivateEndpointConnectionsPager{ + pages: []armstorage.PrivateEndpointConnectionsClientListResponse{ + { + PrivateEndpointConnectionListResult: armstorage.PrivateEndpointConnectionListResult{ + Value: []*armstorage.PrivateEndpointConnection{ + {Name: nil}, + validConn, + }, + }, + }, + }, + } + + testClient := &testStoragePrivateEndpointConnectionsClient{ + MockStoragePrivateEndpointConnectionsClient: mockClient, + pager: mockPager, + } + + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter does not support Search operation") + } + + sdpItems, err := searchable.Search(ctx, wrapper.Scopes()[0], accountName, true) + if err != nil { + t.Fatalf("Expected no error, got: %v", err) + } + + if len(sdpItems) != 1 { + t.Fatalf("Expected 1 item (nil name skipped), got: %d", len(sdpItems)) + } + if sdpItems[0].UniqueAttributeValue() != shared.CompositeLookupKey(accountName, "valid-pec") { + t.Errorf("Expected unique value %s, got %s", shared.CompositeLookupKey(accountName, "valid-pec"), sdpItems[0].UniqueAttributeValue()) + } + }) + + t.Run("Search_InvalidQueryParts", func(t *testing.T) { + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + testClient := &testStoragePrivateEndpointConnectionsClient{MockStoragePrivateEndpointConnectionsClient: mockClient} + + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + + _, qErr := wrapper.Search(ctx, wrapper.Scopes()[0]) + if qErr == nil { + t.Error("Expected error when providing no query parts, but got nil") + } + }) + + t.Run("ErrorHandling_Get", func(t *testing.T) { + expectedErr := errors.New("private endpoint connection not found") + + mockClient := mocks.NewMockStoragePrivateEndpointConnectionsClient(ctrl) + mockClient.EXPECT().Get(ctx, resourceGroup, accountName, "nonexistent-pec").Return( + armstorage.PrivateEndpointConnectionsClientGetResponse{}, expectedErr) + + testClient := &testStoragePrivateEndpointConnectionsClient{MockStoragePrivateEndpointConnectionsClient: mockClient} + wrapper := manual.NewStoragePrivateEndpointConnection(testClient, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + query := shared.CompositeLookupKey(accountName, "nonexistent-pec") + _, qErr := adapter.Get(ctx, wrapper.Scopes()[0], query, true) + if qErr == nil { + t.Error("Expected error when getting non-existent private endpoint connection, but got nil") + } + }) + + t.Run("PotentialLinks", func(t *testing.T) { + wrapper := manual.NewStoragePrivateEndpointConnection(nil, []azureshared.ResourceGroupScope{azureshared.NewResourceGroupScope(subscriptionID, resourceGroup)}) + links := wrapper.PotentialLinks() + if !links[azureshared.StorageAccount] { + t.Error("Expected StorageAccount in PotentialLinks") + } + if !links[azureshared.NetworkPrivateEndpoint] { + t.Error("Expected NetworkPrivateEndpoint in PotentialLinks") + } + }) +} + +func createAzureStoragePrivateEndpointConnection(connectionName, privateEndpointID string) *armstorage.PrivateEndpointConnection { + conn := &armstorage.PrivateEndpointConnection{ + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/privateEndpointConnections/" + connectionName), + Name: new(connectionName), + Type: new("Microsoft.Storage/storageAccounts/privateEndpointConnections"), + Properties: &armstorage.PrivateEndpointConnectionProperties{ + ProvisioningState: to.Ptr(armstorage.PrivateEndpointConnectionProvisioningStateSucceeded), + PrivateLinkServiceConnectionState: &armstorage.PrivateLinkServiceConnectionState{ + Status: to.Ptr(armstorage.PrivateEndpointServiceConnectionStatusApproved), + }, + }, + } + if privateEndpointID != "" { + conn.Properties.PrivateEndpoint = &armstorage.PrivateEndpoint{ + ID: new(privateEndpointID), + } + } + return conn +} diff --git a/sources/azure/manual/storage-queues_test.go b/sources/azure/manual/storage-queues_test.go index ad631a15..55c380c4 100644 --- a/sources/azure/manual/storage-queues_test.go +++ b/sources/azure/manual/storage-queues_test.go @@ -5,7 +5,6 @@ import ( "errors" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" "go.uber.org/mock/gomock" @@ -393,13 +392,13 @@ func TestStorageQueues(t *testing.T) { // createAzureQueue creates a mock Azure queue for testing func createAzureQueue(queueName string) *armstorage.Queue { return &armstorage.Queue{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/queueServices/default/queues/" + queueName), - Name: to.Ptr(queueName), - Type: to.Ptr("Microsoft.Storage/storageAccounts/queueServices/queues"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/queueServices/default/queues/" + queueName), + Name: new(queueName), + Type: new("Microsoft.Storage/storageAccounts/queueServices/queues"), QueueProperties: &armstorage.QueueProperties{ Metadata: map[string]*string{ - "env": to.Ptr("test"), - "project": to.Ptr("testing"), + "env": new("test"), + "project": new("testing"), }, }, } diff --git a/sources/azure/manual/storage-table_test.go b/sources/azure/manual/storage-table_test.go index 86d1b2ad..181f872c 100644 --- a/sources/azure/manual/storage-table_test.go +++ b/sources/azure/manual/storage-table_test.go @@ -3,9 +3,9 @@ package manual_test import ( "context" "errors" + "slices" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" "go.uber.org/mock/gomock" @@ -394,13 +394,7 @@ func TestStorageTables(t *testing.T) { } expectedPermission := "Microsoft.Storage/storageAccounts/tableServices/tables/read" - found := false - for _, perm := range permissions { - if perm == expectedPermission { - found = true - break - } - } + found := slices.Contains(permissions, expectedPermission) if !found { t.Errorf("Expected IAMPermissions to include %s", expectedPermission) } @@ -410,9 +404,9 @@ func TestStorageTables(t *testing.T) { // createAzureTable creates a mock Azure table for testing func createAzureTable(tableName string) *armstorage.Table { return &armstorage.Table{ - ID: to.Ptr("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/tableServices/default/tables/" + tableName), - Name: to.Ptr(tableName), - Type: to.Ptr("Microsoft.Storage/storageAccounts/tableServices/tables"), + ID: new("/subscriptions/test-subscription/resourceGroups/test-rg/providers/Microsoft.Storage/storageAccounts/teststorageaccount/tableServices/default/tables/" + tableName), + Name: new(tableName), + Type: new("Microsoft.Storage/storageAccounts/tableServices/tables"), TableProperties: &armstorage.TableProperties{}, } } diff --git a/sources/azure/proc/proc.go b/sources/azure/proc/proc.go index 2fddfca4..db06900c 100644 --- a/sources/azure/proc/proc.go +++ b/sources/azure/proc/proc.go @@ -132,7 +132,7 @@ func InitializeAdapters(ctx context.Context, engine *discovery.Engine, cfg *Azur } // TODO: Implement linker when Azure dynamic adapters are available - var linker interface{} = nil + var linker any = nil discoveryAdapters, err := adapters(ctx, cfg.SubscriptionID, cfg.TenantID, cfg.ClientID, cfg.Regions, cred, linker, true, sharedCache) if err != nil { @@ -206,7 +206,7 @@ func adapters( clientID string, regions []string, cred *azidentity.DefaultAzureCredential, - linker interface{}, // TODO: Use *azureshared.Linker when azureshared package is fully implemented + linker any, // TODO: Use *azureshared.Linker when azureshared package is fully implemented initAzureClients bool, cache sdpcache.Cache, ) ([]discovery.Adapter, error) { diff --git a/sources/azure/shared/item-types.go b/sources/azure/shared/item-types.go index 41b2f74d..aae8f3f7 100644 --- a/sources/azure/shared/item-types.go +++ b/sources/azure/shared/item-types.go @@ -49,6 +49,7 @@ var ( NetworkLoadBalancerOutboundRule = shared.NewItemType(Azure, Network, LoadBalancerOutboundRule) NetworkLoadBalancerInboundNatPool = shared.NewItemType(Azure, Network, LoadBalancerInboundNatPool) NetworkPublicIPPrefix = shared.NewItemType(Azure, Network, PublicIPPrefix) + NetworkCustomIPPrefix = shared.NewItemType(Azure, Network, CustomIPPrefix) NetworkNatGateway = shared.NewItemType(Azure, Network, NatGateway) NetworkDdosProtectionPlan = shared.NewItemType(Azure, Network, DdosProtectionPlan) NetworkApplicationGateway = shared.NewItemType(Azure, Network, ApplicationGateway) @@ -73,6 +74,8 @@ var ( NetworkRouteTable = shared.NewItemType(Azure, Network, RouteTable) NetworkRoute = shared.NewItemType(Azure, Network, Route) NetworkVirtualNetworkGateway = shared.NewItemType(Azure, Network, VirtualNetworkGateway) + NetworkVirtualNetworkGatewayConnection = shared.NewItemType(Azure, Network, VirtualNetworkGatewayConnection) + NetworkLocalNetworkGateway = shared.NewItemType(Azure, Network, LocalNetworkGateway) NetworkPrivateDNSZone = shared.NewItemType(Azure, Network, PrivateDNSZone) NetworkZone = shared.NewItemType(Azure, Network, Zone) NetworkDNSRecordSet = shared.NewItemType(Azure, Network, DNSRecordSet) @@ -82,6 +85,11 @@ var ( NetworkDscpConfiguration = shared.NewItemType(Azure, Network, DscpConfiguration) NetworkVirtualNetworkTap = shared.NewItemType(Azure, Network, VirtualNetworkTap) NetworkNetworkInterfaceTapConfiguration = shared.NewItemType(Azure, Network, NetworkInterfaceTapConfiguration) + NetworkServiceEndpointPolicy = shared.NewItemType(Azure, Network, ServiceEndpointPolicy) + NetworkIpAllocation = shared.NewItemType(Azure, Network, IpAllocation) + + // ExtendedLocation item types + ExtendedLocationCustomLocation = shared.NewItemType(Azure, ExtendedLocation, CustomLocation) //Storage item types StorageAccount = shared.NewItemType(Azure, Storage, Account) diff --git a/sources/azure/shared/mocks/mock_application_gateways_client.go b/sources/azure/shared/mocks/mock_application_gateways_client.go index 88a547c5..2962bef8 100644 --- a/sources/azure/shared/mocks/mock_application_gateways_client.go +++ b/sources/azure/shared/mocks/mock_application_gateways_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_application_security_groups_client.go b/sources/azure/shared/mocks/mock_application_security_groups_client.go new file mode 100644 index 00000000..0df8c08d --- /dev/null +++ b/sources/azure/shared/mocks/mock_application_security_groups_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: application-security-groups-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_application_security_groups_client.go -package=mocks -source=application-security-groups-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockApplicationSecurityGroupsClient is a mock of ApplicationSecurityGroupsClient interface. +type MockApplicationSecurityGroupsClient struct { + ctrl *gomock.Controller + recorder *MockApplicationSecurityGroupsClientMockRecorder + isgomock struct{} +} + +// MockApplicationSecurityGroupsClientMockRecorder is the mock recorder for MockApplicationSecurityGroupsClient. +type MockApplicationSecurityGroupsClientMockRecorder struct { + mock *MockApplicationSecurityGroupsClient +} + +// NewMockApplicationSecurityGroupsClient creates a new mock instance. +func NewMockApplicationSecurityGroupsClient(ctrl *gomock.Controller) *MockApplicationSecurityGroupsClient { + mock := &MockApplicationSecurityGroupsClient{ctrl: ctrl} + mock.recorder = &MockApplicationSecurityGroupsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockApplicationSecurityGroupsClient) EXPECT() *MockApplicationSecurityGroupsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockApplicationSecurityGroupsClient) Get(ctx context.Context, resourceGroupName, applicationSecurityGroupName string, options *armnetwork.ApplicationSecurityGroupsClientGetOptions) (armnetwork.ApplicationSecurityGroupsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, applicationSecurityGroupName, options) + ret0, _ := ret[0].(armnetwork.ApplicationSecurityGroupsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockApplicationSecurityGroupsClientMockRecorder) Get(ctx, resourceGroupName, applicationSecurityGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockApplicationSecurityGroupsClient)(nil).Get), ctx, resourceGroupName, applicationSecurityGroupName, options) +} + +// NewListPager mocks base method. +func (m *MockApplicationSecurityGroupsClient) NewListPager(resourceGroupName string, options *armnetwork.ApplicationSecurityGroupsClientListOptions) clients.ApplicationSecurityGroupsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, options) + ret0, _ := ret[0].(clients.ApplicationSecurityGroupsPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockApplicationSecurityGroupsClientMockRecorder) NewListPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockApplicationSecurityGroupsClient)(nil).NewListPager), resourceGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_batch_accounts_client.go b/sources/azure/shared/mocks/mock_batch_accounts_client.go index 5f3f946c..f02310f1 100644 --- a/sources/azure/shared/mocks/mock_batch_accounts_client.go +++ b/sources/azure/shared/mocks/mock_batch_accounts_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armbatch "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v3" + armbatch "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_batch_application_client.go b/sources/azure/shared/mocks/mock_batch_application_client.go new file mode 100644 index 00000000..70bea7df --- /dev/null +++ b/sources/azure/shared/mocks/mock_batch_application_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: batch-application-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_batch_application_client.go -package=mocks -source=batch-application-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armbatch "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockBatchApplicationsClient is a mock of BatchApplicationsClient interface. +type MockBatchApplicationsClient struct { + ctrl *gomock.Controller + recorder *MockBatchApplicationsClientMockRecorder + isgomock struct{} +} + +// MockBatchApplicationsClientMockRecorder is the mock recorder for MockBatchApplicationsClient. +type MockBatchApplicationsClientMockRecorder struct { + mock *MockBatchApplicationsClient +} + +// NewMockBatchApplicationsClient creates a new mock instance. +func NewMockBatchApplicationsClient(ctrl *gomock.Controller) *MockBatchApplicationsClient { + mock := &MockBatchApplicationsClient{ctrl: ctrl} + mock.recorder = &MockBatchApplicationsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBatchApplicationsClient) EXPECT() *MockBatchApplicationsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockBatchApplicationsClient) Get(ctx context.Context, resourceGroupName, accountName, applicationName string) (armbatch.ApplicationClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, accountName, applicationName) + ret0, _ := ret[0].(armbatch.ApplicationClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockBatchApplicationsClientMockRecorder) Get(ctx, resourceGroupName, accountName, applicationName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockBatchApplicationsClient)(nil).Get), ctx, resourceGroupName, accountName, applicationName) +} + +// List mocks base method. +func (m *MockBatchApplicationsClient) List(ctx context.Context, resourceGroupName, accountName string) clients.BatchApplicationsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(clients.BatchApplicationsPager) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockBatchApplicationsClientMockRecorder) List(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockBatchApplicationsClient)(nil).List), ctx, resourceGroupName, accountName) +} diff --git a/sources/azure/shared/mocks/mock_batch_pool_client.go b/sources/azure/shared/mocks/mock_batch_pool_client.go new file mode 100644 index 00000000..282603e0 --- /dev/null +++ b/sources/azure/shared/mocks/mock_batch_pool_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: batch-pool-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_batch_pool_client.go -package=mocks -source=batch-pool-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armbatch "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/batch/armbatch/v4" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockBatchPoolsClient is a mock of BatchPoolsClient interface. +type MockBatchPoolsClient struct { + ctrl *gomock.Controller + recorder *MockBatchPoolsClientMockRecorder + isgomock struct{} +} + +// MockBatchPoolsClientMockRecorder is the mock recorder for MockBatchPoolsClient. +type MockBatchPoolsClientMockRecorder struct { + mock *MockBatchPoolsClient +} + +// NewMockBatchPoolsClient creates a new mock instance. +func NewMockBatchPoolsClient(ctrl *gomock.Controller) *MockBatchPoolsClient { + mock := &MockBatchPoolsClient{ctrl: ctrl} + mock.recorder = &MockBatchPoolsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBatchPoolsClient) EXPECT() *MockBatchPoolsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockBatchPoolsClient) Get(ctx context.Context, resourceGroupName, accountName, poolName string) (armbatch.PoolClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, accountName, poolName) + ret0, _ := ret[0].(armbatch.PoolClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockBatchPoolsClientMockRecorder) Get(ctx, resourceGroupName, accountName, poolName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockBatchPoolsClient)(nil).Get), ctx, resourceGroupName, accountName, poolName) +} + +// ListByBatchAccount mocks base method. +func (m *MockBatchPoolsClient) ListByBatchAccount(ctx context.Context, resourceGroupName, accountName string) clients.BatchPoolsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByBatchAccount", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(clients.BatchPoolsPager) + return ret0 +} + +// ListByBatchAccount indicates an expected call of ListByBatchAccount. +func (mr *MockBatchPoolsClientMockRecorder) ListByBatchAccount(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByBatchAccount", reflect.TypeOf((*MockBatchPoolsClient)(nil).ListByBatchAccount), ctx, resourceGroupName, accountName) +} diff --git a/sources/azure/shared/mocks/mock_capacity_reservations_client.go b/sources/azure/shared/mocks/mock_capacity_reservations_client.go new file mode 100644 index 00000000..cf65eb9c --- /dev/null +++ b/sources/azure/shared/mocks/mock_capacity_reservations_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: capacity-reservations-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_capacity_reservations_client.go -package=mocks -source=capacity-reservations-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockCapacityReservationsClient is a mock of CapacityReservationsClient interface. +type MockCapacityReservationsClient struct { + ctrl *gomock.Controller + recorder *MockCapacityReservationsClientMockRecorder + isgomock struct{} +} + +// MockCapacityReservationsClientMockRecorder is the mock recorder for MockCapacityReservationsClient. +type MockCapacityReservationsClientMockRecorder struct { + mock *MockCapacityReservationsClient +} + +// NewMockCapacityReservationsClient creates a new mock instance. +func NewMockCapacityReservationsClient(ctrl *gomock.Controller) *MockCapacityReservationsClient { + mock := &MockCapacityReservationsClient{ctrl: ctrl} + mock.recorder = &MockCapacityReservationsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCapacityReservationsClient) EXPECT() *MockCapacityReservationsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockCapacityReservationsClient) Get(ctx context.Context, resourceGroupName, capacityReservationGroupName, capacityReservationName string, options *armcompute.CapacityReservationsClientGetOptions) (armcompute.CapacityReservationsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options) + ret0, _ := ret[0].(armcompute.CapacityReservationsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockCapacityReservationsClientMockRecorder) Get(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockCapacityReservationsClient)(nil).Get), ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options) +} + +// NewListByCapacityReservationGroupPager mocks base method. +func (m *MockCapacityReservationsClient) NewListByCapacityReservationGroupPager(resourceGroupName, capacityReservationGroupName string, options *armcompute.CapacityReservationsClientListByCapacityReservationGroupOptions) clients.CapacityReservationsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByCapacityReservationGroupPager", resourceGroupName, capacityReservationGroupName, options) + ret0, _ := ret[0].(clients.CapacityReservationsPager) + return ret0 +} + +// NewListByCapacityReservationGroupPager indicates an expected call of NewListByCapacityReservationGroupPager. +func (mr *MockCapacityReservationsClientMockRecorder) NewListByCapacityReservationGroupPager(resourceGroupName, capacityReservationGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByCapacityReservationGroupPager", reflect.TypeOf((*MockCapacityReservationsClient)(nil).NewListByCapacityReservationGroupPager), resourceGroupName, capacityReservationGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_dbforpostgresql_flexible_server_private_endpoint_connection_client.go b/sources/azure/shared/mocks/mock_dbforpostgresql_flexible_server_private_endpoint_connection_client.go new file mode 100644 index 00000000..9c60a8a5 --- /dev/null +++ b/sources/azure/shared/mocks/mock_dbforpostgresql_flexible_server_private_endpoint_connection_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: dbforpostgresql-flexible-server-private-endpoint-connection-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_dbforpostgresql_flexible_server_private_endpoint_connection_client.go -package=mocks -source=dbforpostgresql-flexible-server-private-endpoint-connection-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armpostgresqlflexibleservers "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient is a mock of DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient interface. +type MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient struct { + ctrl *gomock.Controller + recorder *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder + isgomock struct{} +} + +// MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder is the mock recorder for MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient. +type MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder struct { + mock *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient +} + +// NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient creates a new mock instance. +func NewMockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient(ctrl *gomock.Controller) *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient { + mock := &MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient{ctrl: ctrl} + mock.recorder = &MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient) EXPECT() *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName, serverName, privateEndpointConnectionName string) (armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, privateEndpointConnectionName) + ret0, _ := ret[0].(armpostgresqlflexibleservers.PrivateEndpointConnectionsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder) Get(ctx, resourceGroupName, serverName, privateEndpointConnectionName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient)(nil).Get), ctx, resourceGroupName, serverName, privateEndpointConnectionName) +} + +// ListByServer mocks base method. +func (m *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.DBforPostgreSQLFlexibleServerPrivateEndpointConnectionsPager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockDBforPostgreSQLFlexibleServerPrivateEndpointConnectionsClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_ddos_protection_plans_client.go b/sources/azure/shared/mocks/mock_ddos_protection_plans_client.go new file mode 100644 index 00000000..d46a8425 --- /dev/null +++ b/sources/azure/shared/mocks/mock_ddos_protection_plans_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ddos-protection-plans-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_ddos_protection_plans_client.go -package=mocks -source=ddos-protection-plans-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockDdosProtectionPlansClient is a mock of DdosProtectionPlansClient interface. +type MockDdosProtectionPlansClient struct { + ctrl *gomock.Controller + recorder *MockDdosProtectionPlansClientMockRecorder + isgomock struct{} +} + +// MockDdosProtectionPlansClientMockRecorder is the mock recorder for MockDdosProtectionPlansClient. +type MockDdosProtectionPlansClientMockRecorder struct { + mock *MockDdosProtectionPlansClient +} + +// NewMockDdosProtectionPlansClient creates a new mock instance. +func NewMockDdosProtectionPlansClient(ctrl *gomock.Controller) *MockDdosProtectionPlansClient { + mock := &MockDdosProtectionPlansClient{ctrl: ctrl} + mock.recorder = &MockDdosProtectionPlansClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDdosProtectionPlansClient) EXPECT() *MockDdosProtectionPlansClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDdosProtectionPlansClient) Get(ctx context.Context, resourceGroupName, ddosProtectionPlanName string, options *armnetwork.DdosProtectionPlansClientGetOptions) (armnetwork.DdosProtectionPlansClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, ddosProtectionPlanName, options) + ret0, _ := ret[0].(armnetwork.DdosProtectionPlansClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDdosProtectionPlansClientMockRecorder) Get(ctx, resourceGroupName, ddosProtectionPlanName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDdosProtectionPlansClient)(nil).Get), ctx, resourceGroupName, ddosProtectionPlanName, options) +} + +// NewListByResourceGroupPager mocks base method. +func (m *MockDdosProtectionPlansClient) NewListByResourceGroupPager(resourceGroupName string, options *armnetwork.DdosProtectionPlansClientListByResourceGroupOptions) clients.DdosProtectionPlansPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByResourceGroupPager", resourceGroupName, options) + ret0, _ := ret[0].(clients.DdosProtectionPlansPager) + return ret0 +} + +// NewListByResourceGroupPager indicates an expected call of NewListByResourceGroupPager. +func (mr *MockDdosProtectionPlansClientMockRecorder) NewListByResourceGroupPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByResourceGroupPager", reflect.TypeOf((*MockDdosProtectionPlansClient)(nil).NewListByResourceGroupPager), resourceGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_dedicated_hosts_client.go b/sources/azure/shared/mocks/mock_dedicated_hosts_client.go new file mode 100644 index 00000000..a05b8cb5 --- /dev/null +++ b/sources/azure/shared/mocks/mock_dedicated_hosts_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: dedicated-hosts-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_dedicated_hosts_client.go -package=mocks -source=dedicated-hosts-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockDedicatedHostsClient is a mock of DedicatedHostsClient interface. +type MockDedicatedHostsClient struct { + ctrl *gomock.Controller + recorder *MockDedicatedHostsClientMockRecorder + isgomock struct{} +} + +// MockDedicatedHostsClientMockRecorder is the mock recorder for MockDedicatedHostsClient. +type MockDedicatedHostsClientMockRecorder struct { + mock *MockDedicatedHostsClient +} + +// NewMockDedicatedHostsClient creates a new mock instance. +func NewMockDedicatedHostsClient(ctrl *gomock.Controller) *MockDedicatedHostsClient { + mock := &MockDedicatedHostsClient{ctrl: ctrl} + mock.recorder = &MockDedicatedHostsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDedicatedHostsClient) EXPECT() *MockDedicatedHostsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDedicatedHostsClient) Get(ctx context.Context, resourceGroupName, hostGroupName, hostName string, options *armcompute.DedicatedHostsClientGetOptions) (armcompute.DedicatedHostsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, hostGroupName, hostName, options) + ret0, _ := ret[0].(armcompute.DedicatedHostsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDedicatedHostsClientMockRecorder) Get(ctx, resourceGroupName, hostGroupName, hostName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDedicatedHostsClient)(nil).Get), ctx, resourceGroupName, hostGroupName, hostName, options) +} + +// NewListByHostGroupPager mocks base method. +func (m *MockDedicatedHostsClient) NewListByHostGroupPager(resourceGroupName, hostGroupName string, options *armcompute.DedicatedHostsClientListByHostGroupOptions) clients.DedicatedHostsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByHostGroupPager", resourceGroupName, hostGroupName, options) + ret0, _ := ret[0].(clients.DedicatedHostsPager) + return ret0 +} + +// NewListByHostGroupPager indicates an expected call of NewListByHostGroupPager. +func (mr *MockDedicatedHostsClientMockRecorder) NewListByHostGroupPager(resourceGroupName, hostGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByHostGroupPager", reflect.TypeOf((*MockDedicatedHostsClient)(nil).NewListByHostGroupPager), resourceGroupName, hostGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_documentdb_database_accounts_client.go b/sources/azure/shared/mocks/mock_documentdb_database_accounts_client.go index 59236e73..3da33d1e 100644 --- a/sources/azure/shared/mocks/mock_documentdb_database_accounts_client.go +++ b/sources/azure/shared/mocks/mock_documentdb_database_accounts_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armcosmos "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos" + armcosmos "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_documentdb_private_endpoint_connection_client.go b/sources/azure/shared/mocks/mock_documentdb_private_endpoint_connection_client.go new file mode 100644 index 00000000..dd7d9c69 --- /dev/null +++ b/sources/azure/shared/mocks/mock_documentdb_private_endpoint_connection_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: documentdb-private-endpoint-connection-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_documentdb_private_endpoint_connection_client.go -package=mocks -source=documentdb-private-endpoint-connection-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armcosmos "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v3" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockDocumentDBPrivateEndpointConnectionsClient is a mock of DocumentDBPrivateEndpointConnectionsClient interface. +type MockDocumentDBPrivateEndpointConnectionsClient struct { + ctrl *gomock.Controller + recorder *MockDocumentDBPrivateEndpointConnectionsClientMockRecorder + isgomock struct{} +} + +// MockDocumentDBPrivateEndpointConnectionsClientMockRecorder is the mock recorder for MockDocumentDBPrivateEndpointConnectionsClient. +type MockDocumentDBPrivateEndpointConnectionsClientMockRecorder struct { + mock *MockDocumentDBPrivateEndpointConnectionsClient +} + +// NewMockDocumentDBPrivateEndpointConnectionsClient creates a new mock instance. +func NewMockDocumentDBPrivateEndpointConnectionsClient(ctrl *gomock.Controller) *MockDocumentDBPrivateEndpointConnectionsClient { + mock := &MockDocumentDBPrivateEndpointConnectionsClient{ctrl: ctrl} + mock.recorder = &MockDocumentDBPrivateEndpointConnectionsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDocumentDBPrivateEndpointConnectionsClient) EXPECT() *MockDocumentDBPrivateEndpointConnectionsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockDocumentDBPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName, accountName, privateEndpointConnectionName string) (armcosmos.PrivateEndpointConnectionsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, accountName, privateEndpointConnectionName) + ret0, _ := ret[0].(armcosmos.PrivateEndpointConnectionsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDocumentDBPrivateEndpointConnectionsClientMockRecorder) Get(ctx, resourceGroupName, accountName, privateEndpointConnectionName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDocumentDBPrivateEndpointConnectionsClient)(nil).Get), ctx, resourceGroupName, accountName, privateEndpointConnectionName) +} + +// ListByDatabaseAccount mocks base method. +func (m *MockDocumentDBPrivateEndpointConnectionsClient) ListByDatabaseAccount(ctx context.Context, resourceGroupName, accountName string) clients.DocumentDBPrivateEndpointConnectionsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByDatabaseAccount", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(clients.DocumentDBPrivateEndpointConnectionsPager) + return ret0 +} + +// ListByDatabaseAccount indicates an expected call of ListByDatabaseAccount. +func (mr *MockDocumentDBPrivateEndpointConnectionsClientMockRecorder) ListByDatabaseAccount(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByDatabaseAccount", reflect.TypeOf((*MockDocumentDBPrivateEndpointConnectionsClient)(nil).ListByDatabaseAccount), ctx, resourceGroupName, accountName) +} diff --git a/sources/azure/shared/mocks/mock_encryption_scopes_client.go b/sources/azure/shared/mocks/mock_encryption_scopes_client.go new file mode 100644 index 00000000..8f8b17b0 --- /dev/null +++ b/sources/azure/shared/mocks/mock_encryption_scopes_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: encryption-scopes-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_encryption_scopes_client.go -package=mocks -source=encryption-scopes-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockEncryptionScopesClient is a mock of EncryptionScopesClient interface. +type MockEncryptionScopesClient struct { + ctrl *gomock.Controller + recorder *MockEncryptionScopesClientMockRecorder + isgomock struct{} +} + +// MockEncryptionScopesClientMockRecorder is the mock recorder for MockEncryptionScopesClient. +type MockEncryptionScopesClientMockRecorder struct { + mock *MockEncryptionScopesClient +} + +// NewMockEncryptionScopesClient creates a new mock instance. +func NewMockEncryptionScopesClient(ctrl *gomock.Controller) *MockEncryptionScopesClient { + mock := &MockEncryptionScopesClient{ctrl: ctrl} + mock.recorder = &MockEncryptionScopesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEncryptionScopesClient) EXPECT() *MockEncryptionScopesClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockEncryptionScopesClient) Get(ctx context.Context, resourceGroupName, accountName, encryptionScopeName string) (armstorage.EncryptionScopesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, accountName, encryptionScopeName) + ret0, _ := ret[0].(armstorage.EncryptionScopesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockEncryptionScopesClientMockRecorder) Get(ctx, resourceGroupName, accountName, encryptionScopeName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockEncryptionScopesClient)(nil).Get), ctx, resourceGroupName, accountName, encryptionScopeName) +} + +// List mocks base method. +func (m *MockEncryptionScopesClient) List(ctx context.Context, resourceGroupName, accountName string) clients.EncryptionScopesPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(clients.EncryptionScopesPager) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockEncryptionScopesClientMockRecorder) List(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockEncryptionScopesClient)(nil).List), ctx, resourceGroupName, accountName) +} diff --git a/sources/azure/shared/mocks/mock_gallery_applications_client.go b/sources/azure/shared/mocks/mock_gallery_applications_client.go new file mode 100644 index 00000000..9bd7fd45 --- /dev/null +++ b/sources/azure/shared/mocks/mock_gallery_applications_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: gallery-applications-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_gallery_applications_client.go -package=mocks -source=gallery-applications-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v7" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockGalleryApplicationsClient is a mock of GalleryApplicationsClient interface. +type MockGalleryApplicationsClient struct { + ctrl *gomock.Controller + recorder *MockGalleryApplicationsClientMockRecorder + isgomock struct{} +} + +// MockGalleryApplicationsClientMockRecorder is the mock recorder for MockGalleryApplicationsClient. +type MockGalleryApplicationsClientMockRecorder struct { + mock *MockGalleryApplicationsClient +} + +// NewMockGalleryApplicationsClient creates a new mock instance. +func NewMockGalleryApplicationsClient(ctrl *gomock.Controller) *MockGalleryApplicationsClient { + mock := &MockGalleryApplicationsClient{ctrl: ctrl} + mock.recorder = &MockGalleryApplicationsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGalleryApplicationsClient) EXPECT() *MockGalleryApplicationsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockGalleryApplicationsClient) Get(ctx context.Context, resourceGroupName, galleryName, galleryApplicationName string, options *armcompute.GalleryApplicationsClientGetOptions) (armcompute.GalleryApplicationsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, galleryName, galleryApplicationName, options) + ret0, _ := ret[0].(armcompute.GalleryApplicationsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockGalleryApplicationsClientMockRecorder) Get(ctx, resourceGroupName, galleryName, galleryApplicationName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockGalleryApplicationsClient)(nil).Get), ctx, resourceGroupName, galleryName, galleryApplicationName, options) +} + +// NewListByGalleryPager mocks base method. +func (m *MockGalleryApplicationsClient) NewListByGalleryPager(resourceGroupName, galleryName string, options *armcompute.GalleryApplicationsClientListByGalleryOptions) clients.GalleryApplicationsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByGalleryPager", resourceGroupName, galleryName, options) + ret0, _ := ret[0].(clients.GalleryApplicationsPager) + return ret0 +} + +// NewListByGalleryPager indicates an expected call of NewListByGalleryPager. +func (mr *MockGalleryApplicationsClientMockRecorder) NewListByGalleryPager(resourceGroupName, galleryName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByGalleryPager", reflect.TypeOf((*MockGalleryApplicationsClient)(nil).NewListByGalleryPager), resourceGroupName, galleryName, options) +} diff --git a/sources/azure/shared/mocks/mock_keyvault_key_client.go b/sources/azure/shared/mocks/mock_keyvault_key_client.go new file mode 100644 index 00000000..1a771be2 --- /dev/null +++ b/sources/azure/shared/mocks/mock_keyvault_key_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: keyvault-key-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_keyvault_key_client.go -package=mocks -source=keyvault-key-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armkeyvault "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockKeysClient is a mock of KeysClient interface. +type MockKeysClient struct { + ctrl *gomock.Controller + recorder *MockKeysClientMockRecorder + isgomock struct{} +} + +// MockKeysClientMockRecorder is the mock recorder for MockKeysClient. +type MockKeysClientMockRecorder struct { + mock *MockKeysClient +} + +// NewMockKeysClient creates a new mock instance. +func NewMockKeysClient(ctrl *gomock.Controller) *MockKeysClient { + mock := &MockKeysClient{ctrl: ctrl} + mock.recorder = &MockKeysClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKeysClient) EXPECT() *MockKeysClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockKeysClient) Get(ctx context.Context, resourceGroupName, vaultName, keyName string, options *armkeyvault.KeysClientGetOptions) (armkeyvault.KeysClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, vaultName, keyName, options) + ret0, _ := ret[0].(armkeyvault.KeysClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockKeysClientMockRecorder) Get(ctx, resourceGroupName, vaultName, keyName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKeysClient)(nil).Get), ctx, resourceGroupName, vaultName, keyName, options) +} + +// NewListPager mocks base method. +func (m *MockKeysClient) NewListPager(resourceGroupName, vaultName string, options *armkeyvault.KeysClientListOptions) clients.KeysPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, vaultName, options) + ret0, _ := ret[0].(clients.KeysPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockKeysClientMockRecorder) NewListPager(resourceGroupName, vaultName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockKeysClient)(nil).NewListPager), resourceGroupName, vaultName, options) +} diff --git a/sources/azure/shared/mocks/mock_keyvault_managed_hsm_private_endpoint_connection_client.go b/sources/azure/shared/mocks/mock_keyvault_managed_hsm_private_endpoint_connection_client.go new file mode 100644 index 00000000..6b24ae39 --- /dev/null +++ b/sources/azure/shared/mocks/mock_keyvault_managed_hsm_private_endpoint_connection_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: keyvault-managed-hsm-private-endpoint-connection-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_keyvault_managed_hsm_private_endpoint_connection_client.go -package=mocks -source=keyvault-managed-hsm-private-endpoint-connection-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armkeyvault "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockKeyVaultManagedHSMPrivateEndpointConnectionsClient is a mock of KeyVaultManagedHSMPrivateEndpointConnectionsClient interface. +type MockKeyVaultManagedHSMPrivateEndpointConnectionsClient struct { + ctrl *gomock.Controller + recorder *MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder + isgomock struct{} +} + +// MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder is the mock recorder for MockKeyVaultManagedHSMPrivateEndpointConnectionsClient. +type MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder struct { + mock *MockKeyVaultManagedHSMPrivateEndpointConnectionsClient +} + +// NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient creates a new mock instance. +func NewMockKeyVaultManagedHSMPrivateEndpointConnectionsClient(ctrl *gomock.Controller) *MockKeyVaultManagedHSMPrivateEndpointConnectionsClient { + mock := &MockKeyVaultManagedHSMPrivateEndpointConnectionsClient{ctrl: ctrl} + mock.recorder = &MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockKeyVaultManagedHSMPrivateEndpointConnectionsClient) EXPECT() *MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockKeyVaultManagedHSMPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName, hsmName, privateEndpointConnectionName string) (armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, hsmName, privateEndpointConnectionName) + ret0, _ := ret[0].(armkeyvault.MHSMPrivateEndpointConnectionsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder) Get(ctx, resourceGroupName, hsmName, privateEndpointConnectionName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKeyVaultManagedHSMPrivateEndpointConnectionsClient)(nil).Get), ctx, resourceGroupName, hsmName, privateEndpointConnectionName) +} + +// ListByResource mocks base method. +func (m *MockKeyVaultManagedHSMPrivateEndpointConnectionsClient) ListByResource(ctx context.Context, resourceGroupName, hsmName string) clients.KeyVaultManagedHSMPrivateEndpointConnectionsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByResource", ctx, resourceGroupName, hsmName) + ret0, _ := ret[0].(clients.KeyVaultManagedHSMPrivateEndpointConnectionsPager) + return ret0 +} + +// ListByResource indicates an expected call of ListByResource. +func (mr *MockKeyVaultManagedHSMPrivateEndpointConnectionsClientMockRecorder) ListByResource(ctx, resourceGroupName, hsmName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResource", reflect.TypeOf((*MockKeyVaultManagedHSMPrivateEndpointConnectionsClient)(nil).ListByResource), ctx, resourceGroupName, hsmName) +} diff --git a/sources/azure/shared/mocks/mock_load_balancers_client.go b/sources/azure/shared/mocks/mock_load_balancers_client.go index a140c027..f8ef7a0b 100644 --- a/sources/azure/shared/mocks/mock_load_balancers_client.go +++ b/sources/azure/shared/mocks/mock_load_balancers_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_nat_gateways_client.go b/sources/azure/shared/mocks/mock_nat_gateways_client.go new file mode 100644 index 00000000..2c53c0e2 --- /dev/null +++ b/sources/azure/shared/mocks/mock_nat_gateways_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: nat-gateways-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_nat_gateways_client.go -package=mocks -source=nat-gateways-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockNatGatewaysClient is a mock of NatGatewaysClient interface. +type MockNatGatewaysClient struct { + ctrl *gomock.Controller + recorder *MockNatGatewaysClientMockRecorder + isgomock struct{} +} + +// MockNatGatewaysClientMockRecorder is the mock recorder for MockNatGatewaysClient. +type MockNatGatewaysClientMockRecorder struct { + mock *MockNatGatewaysClient +} + +// NewMockNatGatewaysClient creates a new mock instance. +func NewMockNatGatewaysClient(ctrl *gomock.Controller) *MockNatGatewaysClient { + mock := &MockNatGatewaysClient{ctrl: ctrl} + mock.recorder = &MockNatGatewaysClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockNatGatewaysClient) EXPECT() *MockNatGatewaysClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockNatGatewaysClient) Get(ctx context.Context, resourceGroupName, natGatewayName string, options *armnetwork.NatGatewaysClientGetOptions) (armnetwork.NatGatewaysClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, natGatewayName, options) + ret0, _ := ret[0].(armnetwork.NatGatewaysClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockNatGatewaysClientMockRecorder) Get(ctx, resourceGroupName, natGatewayName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockNatGatewaysClient)(nil).Get), ctx, resourceGroupName, natGatewayName, options) +} + +// NewListPager mocks base method. +func (m *MockNatGatewaysClient) NewListPager(resourceGroupName string, options *armnetwork.NatGatewaysClientListOptions) clients.NatGatewaysPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, options) + ret0, _ := ret[0].(clients.NatGatewaysPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockNatGatewaysClientMockRecorder) NewListPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockNatGatewaysClient)(nil).NewListPager), resourceGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_network_interfaces_client.go b/sources/azure/shared/mocks/mock_network_interfaces_client.go index 6547f1c9..29b5bd06 100644 --- a/sources/azure/shared/mocks/mock_network_interfaces_client.go +++ b/sources/azure/shared/mocks/mock_network_interfaces_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_network_private_endpoint_client.go b/sources/azure/shared/mocks/mock_network_private_endpoint_client.go new file mode 100644 index 00000000..d933e981 --- /dev/null +++ b/sources/azure/shared/mocks/mock_network_private_endpoint_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: network-private-endpoint-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_network_private_endpoint_client.go -package=mocks -source=network-private-endpoint-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockPrivateEndpointsClient is a mock of PrivateEndpointsClient interface. +type MockPrivateEndpointsClient struct { + ctrl *gomock.Controller + recorder *MockPrivateEndpointsClientMockRecorder + isgomock struct{} +} + +// MockPrivateEndpointsClientMockRecorder is the mock recorder for MockPrivateEndpointsClient. +type MockPrivateEndpointsClientMockRecorder struct { + mock *MockPrivateEndpointsClient +} + +// NewMockPrivateEndpointsClient creates a new mock instance. +func NewMockPrivateEndpointsClient(ctrl *gomock.Controller) *MockPrivateEndpointsClient { + mock := &MockPrivateEndpointsClient{ctrl: ctrl} + mock.recorder = &MockPrivateEndpointsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPrivateEndpointsClient) EXPECT() *MockPrivateEndpointsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockPrivateEndpointsClient) Get(ctx context.Context, resourceGroupName, privateEndpointName string) (armnetwork.PrivateEndpointsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, privateEndpointName) + ret0, _ := ret[0].(armnetwork.PrivateEndpointsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockPrivateEndpointsClientMockRecorder) Get(ctx, resourceGroupName, privateEndpointName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPrivateEndpointsClient)(nil).Get), ctx, resourceGroupName, privateEndpointName) +} + +// List mocks base method. +func (m *MockPrivateEndpointsClient) List(resourceGroupName string) clients.PrivateEndpointsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", resourceGroupName) + ret0, _ := ret[0].(clients.PrivateEndpointsPager) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockPrivateEndpointsClientMockRecorder) List(resourceGroupName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockPrivateEndpointsClient)(nil).List), resourceGroupName) +} diff --git a/sources/azure/shared/mocks/mock_network_security_groups_client.go b/sources/azure/shared/mocks/mock_network_security_groups_client.go index 734d3a13..7a61fc33 100644 --- a/sources/azure/shared/mocks/mock_network_security_groups_client.go +++ b/sources/azure/shared/mocks/mock_network_security_groups_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_postgresql_flexible_server_firewall_rule_client.go b/sources/azure/shared/mocks/mock_postgresql_flexible_server_firewall_rule_client.go new file mode 100644 index 00000000..a5030f7a --- /dev/null +++ b/sources/azure/shared/mocks/mock_postgresql_flexible_server_firewall_rule_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: postgresql-flexible-server-firewall-rule-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_postgresql_flexible_server_firewall_rule_client.go -package=mocks -source=postgresql-flexible-server-firewall-rule-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armpostgresqlflexibleservers "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresqlflexibleservers/v5" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockPostgreSQLFlexibleServerFirewallRuleClient is a mock of PostgreSQLFlexibleServerFirewallRuleClient interface. +type MockPostgreSQLFlexibleServerFirewallRuleClient struct { + ctrl *gomock.Controller + recorder *MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder + isgomock struct{} +} + +// MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder is the mock recorder for MockPostgreSQLFlexibleServerFirewallRuleClient. +type MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder struct { + mock *MockPostgreSQLFlexibleServerFirewallRuleClient +} + +// NewMockPostgreSQLFlexibleServerFirewallRuleClient creates a new mock instance. +func NewMockPostgreSQLFlexibleServerFirewallRuleClient(ctrl *gomock.Controller) *MockPostgreSQLFlexibleServerFirewallRuleClient { + mock := &MockPostgreSQLFlexibleServerFirewallRuleClient{ctrl: ctrl} + mock.recorder = &MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPostgreSQLFlexibleServerFirewallRuleClient) EXPECT() *MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockPostgreSQLFlexibleServerFirewallRuleClient) Get(ctx context.Context, resourceGroupName, serverName, firewallRuleName string) (armpostgresqlflexibleservers.FirewallRulesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, firewallRuleName) + ret0, _ := ret[0].(armpostgresqlflexibleservers.FirewallRulesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder) Get(ctx, resourceGroupName, serverName, firewallRuleName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPostgreSQLFlexibleServerFirewallRuleClient)(nil).Get), ctx, resourceGroupName, serverName, firewallRuleName) +} + +// ListByServer mocks base method. +func (m *MockPostgreSQLFlexibleServerFirewallRuleClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.PostgreSQLFlexibleServerFirewallRulePager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.PostgreSQLFlexibleServerFirewallRulePager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockPostgreSQLFlexibleServerFirewallRuleClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockPostgreSQLFlexibleServerFirewallRuleClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_private_dns_zones_client.go b/sources/azure/shared/mocks/mock_private_dns_zones_client.go new file mode 100644 index 00000000..4e0a6f86 --- /dev/null +++ b/sources/azure/shared/mocks/mock_private_dns_zones_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: private-dns-zones-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_private_dns_zones_client.go -package=mocks -source=private-dns-zones-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armprivatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockPrivateDNSZonesClient is a mock of PrivateDNSZonesClient interface. +type MockPrivateDNSZonesClient struct { + ctrl *gomock.Controller + recorder *MockPrivateDNSZonesClientMockRecorder + isgomock struct{} +} + +// MockPrivateDNSZonesClientMockRecorder is the mock recorder for MockPrivateDNSZonesClient. +type MockPrivateDNSZonesClientMockRecorder struct { + mock *MockPrivateDNSZonesClient +} + +// NewMockPrivateDNSZonesClient creates a new mock instance. +func NewMockPrivateDNSZonesClient(ctrl *gomock.Controller) *MockPrivateDNSZonesClient { + mock := &MockPrivateDNSZonesClient{ctrl: ctrl} + mock.recorder = &MockPrivateDNSZonesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPrivateDNSZonesClient) EXPECT() *MockPrivateDNSZonesClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockPrivateDNSZonesClient) Get(ctx context.Context, resourceGroupName, privateZoneName string, options *armprivatedns.PrivateZonesClientGetOptions) (armprivatedns.PrivateZonesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, privateZoneName, options) + ret0, _ := ret[0].(armprivatedns.PrivateZonesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockPrivateDNSZonesClientMockRecorder) Get(ctx, resourceGroupName, privateZoneName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPrivateDNSZonesClient)(nil).Get), ctx, resourceGroupName, privateZoneName, options) +} + +// NewListByResourceGroupPager mocks base method. +func (m *MockPrivateDNSZonesClient) NewListByResourceGroupPager(resourceGroupName string, options *armprivatedns.PrivateZonesClientListByResourceGroupOptions) clients.PrivateDNSZonesPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListByResourceGroupPager", resourceGroupName, options) + ret0, _ := ret[0].(clients.PrivateDNSZonesPager) + return ret0 +} + +// NewListByResourceGroupPager indicates an expected call of NewListByResourceGroupPager. +func (mr *MockPrivateDNSZonesClientMockRecorder) NewListByResourceGroupPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListByResourceGroupPager", reflect.TypeOf((*MockPrivateDNSZonesClient)(nil).NewListByResourceGroupPager), resourceGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_public_ip_addresses_client.go b/sources/azure/shared/mocks/mock_public_ip_addresses_client.go index 29297475..7e382e67 100644 --- a/sources/azure/shared/mocks/mock_public_ip_addresses_client.go +++ b/sources/azure/shared/mocks/mock_public_ip_addresses_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_public_ip_prefixes_client.go b/sources/azure/shared/mocks/mock_public_ip_prefixes_client.go new file mode 100644 index 00000000..4ee32b09 --- /dev/null +++ b/sources/azure/shared/mocks/mock_public_ip_prefixes_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: public-ip-prefixes-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_public_ip_prefixes_client.go -package=mocks -source=public-ip-prefixes-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockPublicIPPrefixesClient is a mock of PublicIPPrefixesClient interface. +type MockPublicIPPrefixesClient struct { + ctrl *gomock.Controller + recorder *MockPublicIPPrefixesClientMockRecorder + isgomock struct{} +} + +// MockPublicIPPrefixesClientMockRecorder is the mock recorder for MockPublicIPPrefixesClient. +type MockPublicIPPrefixesClientMockRecorder struct { + mock *MockPublicIPPrefixesClient +} + +// NewMockPublicIPPrefixesClient creates a new mock instance. +func NewMockPublicIPPrefixesClient(ctrl *gomock.Controller) *MockPublicIPPrefixesClient { + mock := &MockPublicIPPrefixesClient{ctrl: ctrl} + mock.recorder = &MockPublicIPPrefixesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPublicIPPrefixesClient) EXPECT() *MockPublicIPPrefixesClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockPublicIPPrefixesClient) Get(ctx context.Context, resourceGroupName, publicIPPrefixName string, options *armnetwork.PublicIPPrefixesClientGetOptions) (armnetwork.PublicIPPrefixesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, publicIPPrefixName, options) + ret0, _ := ret[0].(armnetwork.PublicIPPrefixesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockPublicIPPrefixesClientMockRecorder) Get(ctx, resourceGroupName, publicIPPrefixName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPublicIPPrefixesClient)(nil).Get), ctx, resourceGroupName, publicIPPrefixName, options) +} + +// NewListPager mocks base method. +func (m *MockPublicIPPrefixesClient) NewListPager(resourceGroupName string, options *armnetwork.PublicIPPrefixesClientListOptions) clients.PublicIPPrefixesPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, options) + ret0, _ := ret[0].(clients.PublicIPPrefixesPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockPublicIPPrefixesClientMockRecorder) NewListPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockPublicIPPrefixesClient)(nil).NewListPager), resourceGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_record_sets_client.go b/sources/azure/shared/mocks/mock_record_sets_client.go new file mode 100644 index 00000000..241c507b --- /dev/null +++ b/sources/azure/shared/mocks/mock_record_sets_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: record-sets-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_record_sets_client.go -package=mocks -source=record-sets-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armdns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockRecordSetsClient is a mock of RecordSetsClient interface. +type MockRecordSetsClient struct { + ctrl *gomock.Controller + recorder *MockRecordSetsClientMockRecorder + isgomock struct{} +} + +// MockRecordSetsClientMockRecorder is the mock recorder for MockRecordSetsClient. +type MockRecordSetsClientMockRecorder struct { + mock *MockRecordSetsClient +} + +// NewMockRecordSetsClient creates a new mock instance. +func NewMockRecordSetsClient(ctrl *gomock.Controller) *MockRecordSetsClient { + mock := &MockRecordSetsClient{ctrl: ctrl} + mock.recorder = &MockRecordSetsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRecordSetsClient) EXPECT() *MockRecordSetsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockRecordSetsClient) Get(ctx context.Context, resourceGroupName, zoneName, relativeRecordSetName string, recordType armdns.RecordType, options *armdns.RecordSetsClientGetOptions) (armdns.RecordSetsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, options) + ret0, _ := ret[0].(armdns.RecordSetsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockRecordSetsClientMockRecorder) Get(ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockRecordSetsClient)(nil).Get), ctx, resourceGroupName, zoneName, relativeRecordSetName, recordType, options) +} + +// NewListAllByDNSZonePager mocks base method. +func (m *MockRecordSetsClient) NewListAllByDNSZonePager(resourceGroupName, zoneName string, options *armdns.RecordSetsClientListAllByDNSZoneOptions) clients.RecordSetsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListAllByDNSZonePager", resourceGroupName, zoneName, options) + ret0, _ := ret[0].(clients.RecordSetsPager) + return ret0 +} + +// NewListAllByDNSZonePager indicates an expected call of NewListAllByDNSZonePager. +func (mr *MockRecordSetsClientMockRecorder) NewListAllByDNSZonePager(resourceGroupName, zoneName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListAllByDNSZonePager", reflect.TypeOf((*MockRecordSetsClient)(nil).NewListAllByDNSZonePager), resourceGroupName, zoneName, options) +} diff --git a/sources/azure/shared/mocks/mock_route_tables_client.go b/sources/azure/shared/mocks/mock_route_tables_client.go index a37bf0c0..954fd65b 100644 --- a/sources/azure/shared/mocks/mock_route_tables_client.go +++ b/sources/azure/shared/mocks/mock_route_tables_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/mocks/mock_routes_client.go b/sources/azure/shared/mocks/mock_routes_client.go new file mode 100644 index 00000000..fd6f67bb --- /dev/null +++ b/sources/azure/shared/mocks/mock_routes_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: routes-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_routes_client.go -package=mocks -source=routes-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockRoutesClient is a mock of RoutesClient interface. +type MockRoutesClient struct { + ctrl *gomock.Controller + recorder *MockRoutesClientMockRecorder + isgomock struct{} +} + +// MockRoutesClientMockRecorder is the mock recorder for MockRoutesClient. +type MockRoutesClientMockRecorder struct { + mock *MockRoutesClient +} + +// NewMockRoutesClient creates a new mock instance. +func NewMockRoutesClient(ctrl *gomock.Controller) *MockRoutesClient { + mock := &MockRoutesClient{ctrl: ctrl} + mock.recorder = &MockRoutesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRoutesClient) EXPECT() *MockRoutesClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockRoutesClient) Get(ctx context.Context, resourceGroupName, routeTableName, routeName string, options *armnetwork.RoutesClientGetOptions) (armnetwork.RoutesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, routeTableName, routeName, options) + ret0, _ := ret[0].(armnetwork.RoutesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockRoutesClientMockRecorder) Get(ctx, resourceGroupName, routeTableName, routeName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockRoutesClient)(nil).Get), ctx, resourceGroupName, routeTableName, routeName, options) +} + +// NewListPager mocks base method. +func (m *MockRoutesClient) NewListPager(resourceGroupName, routeTableName string, options *armnetwork.RoutesClientListOptions) clients.RoutesPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, routeTableName, options) + ret0, _ := ret[0].(clients.RoutesPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockRoutesClientMockRecorder) NewListPager(resourceGroupName, routeTableName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockRoutesClient)(nil).NewListPager), resourceGroupName, routeTableName, options) +} diff --git a/sources/azure/shared/mocks/mock_security_rules_client.go b/sources/azure/shared/mocks/mock_security_rules_client.go new file mode 100644 index 00000000..1078baa3 --- /dev/null +++ b/sources/azure/shared/mocks/mock_security_rules_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: security-rules-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_security_rules_client.go -package=mocks -source=security-rules-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSecurityRulesClient is a mock of SecurityRulesClient interface. +type MockSecurityRulesClient struct { + ctrl *gomock.Controller + recorder *MockSecurityRulesClientMockRecorder + isgomock struct{} +} + +// MockSecurityRulesClientMockRecorder is the mock recorder for MockSecurityRulesClient. +type MockSecurityRulesClientMockRecorder struct { + mock *MockSecurityRulesClient +} + +// NewMockSecurityRulesClient creates a new mock instance. +func NewMockSecurityRulesClient(ctrl *gomock.Controller) *MockSecurityRulesClient { + mock := &MockSecurityRulesClient{ctrl: ctrl} + mock.recorder = &MockSecurityRulesClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSecurityRulesClient) EXPECT() *MockSecurityRulesClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSecurityRulesClient) Get(ctx context.Context, resourceGroupName, networkSecurityGroupName, securityRuleName string, options *armnetwork.SecurityRulesClientGetOptions) (armnetwork.SecurityRulesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, options) + ret0, _ := ret[0].(armnetwork.SecurityRulesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSecurityRulesClientMockRecorder) Get(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSecurityRulesClient)(nil).Get), ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, options) +} + +// NewListPager mocks base method. +func (m *MockSecurityRulesClient) NewListPager(resourceGroupName, networkSecurityGroupName string, options *armnetwork.SecurityRulesClientListOptions) clients.SecurityRulesPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, networkSecurityGroupName, options) + ret0, _ := ret[0].(clients.SecurityRulesPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockSecurityRulesClientMockRecorder) NewListPager(resourceGroupName, networkSecurityGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockSecurityRulesClient)(nil).NewListPager), resourceGroupName, networkSecurityGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_sql_elastic_pool_client.go b/sources/azure/shared/mocks/mock_sql_elastic_pool_client.go new file mode 100644 index 00000000..cf21d653 --- /dev/null +++ b/sources/azure/shared/mocks/mock_sql_elastic_pool_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sql-elastic-pool-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_sql_elastic_pool_client.go -package=mocks -source=sql-elastic-pool-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armsql "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSqlElasticPoolClient is a mock of SqlElasticPoolClient interface. +type MockSqlElasticPoolClient struct { + ctrl *gomock.Controller + recorder *MockSqlElasticPoolClientMockRecorder + isgomock struct{} +} + +// MockSqlElasticPoolClientMockRecorder is the mock recorder for MockSqlElasticPoolClient. +type MockSqlElasticPoolClientMockRecorder struct { + mock *MockSqlElasticPoolClient +} + +// NewMockSqlElasticPoolClient creates a new mock instance. +func NewMockSqlElasticPoolClient(ctrl *gomock.Controller) *MockSqlElasticPoolClient { + mock := &MockSqlElasticPoolClient{ctrl: ctrl} + mock.recorder = &MockSqlElasticPoolClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSqlElasticPoolClient) EXPECT() *MockSqlElasticPoolClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSqlElasticPoolClient) Get(ctx context.Context, resourceGroupName, serverName, elasticPoolName string) (armsql.ElasticPoolsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, elasticPoolName) + ret0, _ := ret[0].(armsql.ElasticPoolsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSqlElasticPoolClientMockRecorder) Get(ctx, resourceGroupName, serverName, elasticPoolName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSqlElasticPoolClient)(nil).Get), ctx, resourceGroupName, serverName, elasticPoolName) +} + +// ListByServer mocks base method. +func (m *MockSqlElasticPoolClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SqlElasticPoolPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.SqlElasticPoolPager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockSqlElasticPoolClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockSqlElasticPoolClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_sql_server_firewall_rule_client.go b/sources/azure/shared/mocks/mock_sql_server_firewall_rule_client.go new file mode 100644 index 00000000..a25ffe4a --- /dev/null +++ b/sources/azure/shared/mocks/mock_sql_server_firewall_rule_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sql-server-firewall-rule-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_sql_server_firewall_rule_client.go -package=mocks -source=sql-server-firewall-rule-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armsql "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSqlServerFirewallRuleClient is a mock of SqlServerFirewallRuleClient interface. +type MockSqlServerFirewallRuleClient struct { + ctrl *gomock.Controller + recorder *MockSqlServerFirewallRuleClientMockRecorder + isgomock struct{} +} + +// MockSqlServerFirewallRuleClientMockRecorder is the mock recorder for MockSqlServerFirewallRuleClient. +type MockSqlServerFirewallRuleClientMockRecorder struct { + mock *MockSqlServerFirewallRuleClient +} + +// NewMockSqlServerFirewallRuleClient creates a new mock instance. +func NewMockSqlServerFirewallRuleClient(ctrl *gomock.Controller) *MockSqlServerFirewallRuleClient { + mock := &MockSqlServerFirewallRuleClient{ctrl: ctrl} + mock.recorder = &MockSqlServerFirewallRuleClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSqlServerFirewallRuleClient) EXPECT() *MockSqlServerFirewallRuleClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSqlServerFirewallRuleClient) Get(ctx context.Context, resourceGroupName, serverName, firewallRuleName string) (armsql.FirewallRulesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, firewallRuleName) + ret0, _ := ret[0].(armsql.FirewallRulesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSqlServerFirewallRuleClientMockRecorder) Get(ctx, resourceGroupName, serverName, firewallRuleName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSqlServerFirewallRuleClient)(nil).Get), ctx, resourceGroupName, serverName, firewallRuleName) +} + +// ListByServer mocks base method. +func (m *MockSqlServerFirewallRuleClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SqlServerFirewallRulePager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.SqlServerFirewallRulePager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockSqlServerFirewallRuleClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockSqlServerFirewallRuleClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_sql_server_private_endpoint_connection_client.go b/sources/azure/shared/mocks/mock_sql_server_private_endpoint_connection_client.go new file mode 100644 index 00000000..70c814fb --- /dev/null +++ b/sources/azure/shared/mocks/mock_sql_server_private_endpoint_connection_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sql-server-private-endpoint-connection-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_sql_server_private_endpoint_connection_client.go -package=mocks -source=sql-server-private-endpoint-connection-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armsql "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSQLServerPrivateEndpointConnectionsClient is a mock of SQLServerPrivateEndpointConnectionsClient interface. +type MockSQLServerPrivateEndpointConnectionsClient struct { + ctrl *gomock.Controller + recorder *MockSQLServerPrivateEndpointConnectionsClientMockRecorder + isgomock struct{} +} + +// MockSQLServerPrivateEndpointConnectionsClientMockRecorder is the mock recorder for MockSQLServerPrivateEndpointConnectionsClient. +type MockSQLServerPrivateEndpointConnectionsClientMockRecorder struct { + mock *MockSQLServerPrivateEndpointConnectionsClient +} + +// NewMockSQLServerPrivateEndpointConnectionsClient creates a new mock instance. +func NewMockSQLServerPrivateEndpointConnectionsClient(ctrl *gomock.Controller) *MockSQLServerPrivateEndpointConnectionsClient { + mock := &MockSQLServerPrivateEndpointConnectionsClient{ctrl: ctrl} + mock.recorder = &MockSQLServerPrivateEndpointConnectionsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSQLServerPrivateEndpointConnectionsClient) EXPECT() *MockSQLServerPrivateEndpointConnectionsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSQLServerPrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName, serverName, privateEndpointConnectionName string) (armsql.PrivateEndpointConnectionsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, privateEndpointConnectionName) + ret0, _ := ret[0].(armsql.PrivateEndpointConnectionsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSQLServerPrivateEndpointConnectionsClientMockRecorder) Get(ctx, resourceGroupName, serverName, privateEndpointConnectionName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSQLServerPrivateEndpointConnectionsClient)(nil).Get), ctx, resourceGroupName, serverName, privateEndpointConnectionName) +} + +// ListByServer mocks base method. +func (m *MockSQLServerPrivateEndpointConnectionsClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SQLServerPrivateEndpointConnectionsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.SQLServerPrivateEndpointConnectionsPager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockSQLServerPrivateEndpointConnectionsClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockSQLServerPrivateEndpointConnectionsClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_sql_server_virtual_network_rule_client.go b/sources/azure/shared/mocks/mock_sql_server_virtual_network_rule_client.go new file mode 100644 index 00000000..3e512121 --- /dev/null +++ b/sources/azure/shared/mocks/mock_sql_server_virtual_network_rule_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: sql-server-virtual-network-rule-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_sql_server_virtual_network_rule_client.go -package=mocks -source=sql-server-virtual-network-rule-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armsql "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/sql/armsql/v2" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSqlServerVirtualNetworkRuleClient is a mock of SqlServerVirtualNetworkRuleClient interface. +type MockSqlServerVirtualNetworkRuleClient struct { + ctrl *gomock.Controller + recorder *MockSqlServerVirtualNetworkRuleClientMockRecorder + isgomock struct{} +} + +// MockSqlServerVirtualNetworkRuleClientMockRecorder is the mock recorder for MockSqlServerVirtualNetworkRuleClient. +type MockSqlServerVirtualNetworkRuleClientMockRecorder struct { + mock *MockSqlServerVirtualNetworkRuleClient +} + +// NewMockSqlServerVirtualNetworkRuleClient creates a new mock instance. +func NewMockSqlServerVirtualNetworkRuleClient(ctrl *gomock.Controller) *MockSqlServerVirtualNetworkRuleClient { + mock := &MockSqlServerVirtualNetworkRuleClient{ctrl: ctrl} + mock.recorder = &MockSqlServerVirtualNetworkRuleClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSqlServerVirtualNetworkRuleClient) EXPECT() *MockSqlServerVirtualNetworkRuleClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSqlServerVirtualNetworkRuleClient) Get(ctx context.Context, resourceGroupName, serverName, virtualNetworkRuleName string) (armsql.VirtualNetworkRulesClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, serverName, virtualNetworkRuleName) + ret0, _ := ret[0].(armsql.VirtualNetworkRulesClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSqlServerVirtualNetworkRuleClientMockRecorder) Get(ctx, resourceGroupName, serverName, virtualNetworkRuleName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSqlServerVirtualNetworkRuleClient)(nil).Get), ctx, resourceGroupName, serverName, virtualNetworkRuleName) +} + +// ListByServer mocks base method. +func (m *MockSqlServerVirtualNetworkRuleClient) ListByServer(ctx context.Context, resourceGroupName, serverName string) clients.SqlServerVirtualNetworkRulePager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByServer", ctx, resourceGroupName, serverName) + ret0, _ := ret[0].(clients.SqlServerVirtualNetworkRulePager) + return ret0 +} + +// ListByServer indicates an expected call of ListByServer. +func (mr *MockSqlServerVirtualNetworkRuleClientMockRecorder) ListByServer(ctx, resourceGroupName, serverName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServer", reflect.TypeOf((*MockSqlServerVirtualNetworkRuleClient)(nil).ListByServer), ctx, resourceGroupName, serverName) +} diff --git a/sources/azure/shared/mocks/mock_storage_private_endpoint_connection_client.go b/sources/azure/shared/mocks/mock_storage_private_endpoint_connection_client.go new file mode 100644 index 00000000..7fce19df --- /dev/null +++ b/sources/azure/shared/mocks/mock_storage_private_endpoint_connection_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: storage-private-endpoint-connection-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_storage_private_endpoint_connection_client.go -package=mocks -source=storage-private-endpoint-connection-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v3" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockStoragePrivateEndpointConnectionsClient is a mock of StoragePrivateEndpointConnectionsClient interface. +type MockStoragePrivateEndpointConnectionsClient struct { + ctrl *gomock.Controller + recorder *MockStoragePrivateEndpointConnectionsClientMockRecorder + isgomock struct{} +} + +// MockStoragePrivateEndpointConnectionsClientMockRecorder is the mock recorder for MockStoragePrivateEndpointConnectionsClient. +type MockStoragePrivateEndpointConnectionsClientMockRecorder struct { + mock *MockStoragePrivateEndpointConnectionsClient +} + +// NewMockStoragePrivateEndpointConnectionsClient creates a new mock instance. +func NewMockStoragePrivateEndpointConnectionsClient(ctrl *gomock.Controller) *MockStoragePrivateEndpointConnectionsClient { + mock := &MockStoragePrivateEndpointConnectionsClient{ctrl: ctrl} + mock.recorder = &MockStoragePrivateEndpointConnectionsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStoragePrivateEndpointConnectionsClient) EXPECT() *MockStoragePrivateEndpointConnectionsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockStoragePrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName, accountName, privateEndpointConnectionName string) (armstorage.PrivateEndpointConnectionsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, accountName, privateEndpointConnectionName) + ret0, _ := ret[0].(armstorage.PrivateEndpointConnectionsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockStoragePrivateEndpointConnectionsClientMockRecorder) Get(ctx, resourceGroupName, accountName, privateEndpointConnectionName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStoragePrivateEndpointConnectionsClient)(nil).Get), ctx, resourceGroupName, accountName, privateEndpointConnectionName) +} + +// List mocks base method. +func (m *MockStoragePrivateEndpointConnectionsClient) List(ctx context.Context, resourceGroupName, accountName string) clients.PrivateEndpointConnectionsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(clients.PrivateEndpointConnectionsPager) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockStoragePrivateEndpointConnectionsClientMockRecorder) List(ctx, resourceGroupName, accountName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockStoragePrivateEndpointConnectionsClient)(nil).List), ctx, resourceGroupName, accountName) +} diff --git a/sources/azure/shared/mocks/mock_subnets_client.go b/sources/azure/shared/mocks/mock_subnets_client.go new file mode 100644 index 00000000..91455aad --- /dev/null +++ b/sources/azure/shared/mocks/mock_subnets_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: subnets-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_subnets_client.go -package=mocks -source=subnets-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockSubnetsClient is a mock of SubnetsClient interface. +type MockSubnetsClient struct { + ctrl *gomock.Controller + recorder *MockSubnetsClientMockRecorder + isgomock struct{} +} + +// MockSubnetsClientMockRecorder is the mock recorder for MockSubnetsClient. +type MockSubnetsClientMockRecorder struct { + mock *MockSubnetsClient +} + +// NewMockSubnetsClient creates a new mock instance. +func NewMockSubnetsClient(ctrl *gomock.Controller) *MockSubnetsClient { + mock := &MockSubnetsClient{ctrl: ctrl} + mock.recorder = &MockSubnetsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubnetsClient) EXPECT() *MockSubnetsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockSubnetsClient) Get(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string, options *armnetwork.SubnetsClientGetOptions) (armnetwork.SubnetsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, virtualNetworkName, subnetName, options) + ret0, _ := ret[0].(armnetwork.SubnetsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSubnetsClientMockRecorder) Get(ctx, resourceGroupName, virtualNetworkName, subnetName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSubnetsClient)(nil).Get), ctx, resourceGroupName, virtualNetworkName, subnetName, options) +} + +// NewListPager mocks base method. +func (m *MockSubnetsClient) NewListPager(resourceGroupName, virtualNetworkName string, options *armnetwork.SubnetsClientListOptions) clients.SubnetsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, virtualNetworkName, options) + ret0, _ := ret[0].(clients.SubnetsPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockSubnetsClientMockRecorder) NewListPager(resourceGroupName, virtualNetworkName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockSubnetsClient)(nil).NewListPager), resourceGroupName, virtualNetworkName, options) +} diff --git a/sources/azure/shared/mocks/mock_virtual_network_gateways_client.go b/sources/azure/shared/mocks/mock_virtual_network_gateways_client.go new file mode 100644 index 00000000..4d9a99ef --- /dev/null +++ b/sources/azure/shared/mocks/mock_virtual_network_gateways_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: virtual-network-gateways-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_virtual_network_gateways_client.go -package=mocks -source=virtual-network-gateways-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockVirtualNetworkGatewaysClient is a mock of VirtualNetworkGatewaysClient interface. +type MockVirtualNetworkGatewaysClient struct { + ctrl *gomock.Controller + recorder *MockVirtualNetworkGatewaysClientMockRecorder + isgomock struct{} +} + +// MockVirtualNetworkGatewaysClientMockRecorder is the mock recorder for MockVirtualNetworkGatewaysClient. +type MockVirtualNetworkGatewaysClientMockRecorder struct { + mock *MockVirtualNetworkGatewaysClient +} + +// NewMockVirtualNetworkGatewaysClient creates a new mock instance. +func NewMockVirtualNetworkGatewaysClient(ctrl *gomock.Controller) *MockVirtualNetworkGatewaysClient { + mock := &MockVirtualNetworkGatewaysClient{ctrl: ctrl} + mock.recorder = &MockVirtualNetworkGatewaysClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVirtualNetworkGatewaysClient) EXPECT() *MockVirtualNetworkGatewaysClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockVirtualNetworkGatewaysClient) Get(ctx context.Context, resourceGroupName, virtualNetworkGatewayName string, options *armnetwork.VirtualNetworkGatewaysClientGetOptions) (armnetwork.VirtualNetworkGatewaysClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, virtualNetworkGatewayName, options) + ret0, _ := ret[0].(armnetwork.VirtualNetworkGatewaysClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockVirtualNetworkGatewaysClientMockRecorder) Get(ctx, resourceGroupName, virtualNetworkGatewayName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualNetworkGatewaysClient)(nil).Get), ctx, resourceGroupName, virtualNetworkGatewayName, options) +} + +// NewListPager mocks base method. +func (m *MockVirtualNetworkGatewaysClient) NewListPager(resourceGroupName string, options *armnetwork.VirtualNetworkGatewaysClientListOptions) clients.VirtualNetworkGatewaysPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, options) + ret0, _ := ret[0].(clients.VirtualNetworkGatewaysPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockVirtualNetworkGatewaysClientMockRecorder) NewListPager(resourceGroupName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockVirtualNetworkGatewaysClient)(nil).NewListPager), resourceGroupName, options) +} diff --git a/sources/azure/shared/mocks/mock_virtual_network_peerings_client.go b/sources/azure/shared/mocks/mock_virtual_network_peerings_client.go new file mode 100644 index 00000000..8498857b --- /dev/null +++ b/sources/azure/shared/mocks/mock_virtual_network_peerings_client.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: virtual-network-peerings-client.go +// +// Generated by this command: +// +// mockgen -destination=../shared/mocks/mock_virtual_network_peerings_client.go -package=mocks -source=virtual-network-peerings-client.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" + clients "github.com/overmindtech/cli/sources/azure/clients" + gomock "go.uber.org/mock/gomock" +) + +// MockVirtualNetworkPeeringsClient is a mock of VirtualNetworkPeeringsClient interface. +type MockVirtualNetworkPeeringsClient struct { + ctrl *gomock.Controller + recorder *MockVirtualNetworkPeeringsClientMockRecorder + isgomock struct{} +} + +// MockVirtualNetworkPeeringsClientMockRecorder is the mock recorder for MockVirtualNetworkPeeringsClient. +type MockVirtualNetworkPeeringsClientMockRecorder struct { + mock *MockVirtualNetworkPeeringsClient +} + +// NewMockVirtualNetworkPeeringsClient creates a new mock instance. +func NewMockVirtualNetworkPeeringsClient(ctrl *gomock.Controller) *MockVirtualNetworkPeeringsClient { + mock := &MockVirtualNetworkPeeringsClient{ctrl: ctrl} + mock.recorder = &MockVirtualNetworkPeeringsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVirtualNetworkPeeringsClient) EXPECT() *MockVirtualNetworkPeeringsClientMockRecorder { + return m.recorder +} + +// Get mocks base method. +func (m *MockVirtualNetworkPeeringsClient) Get(ctx context.Context, resourceGroupName, virtualNetworkName, peeringName string, options *armnetwork.VirtualNetworkPeeringsClientGetOptions) (armnetwork.VirtualNetworkPeeringsClientGetResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, virtualNetworkName, peeringName, options) + ret0, _ := ret[0].(armnetwork.VirtualNetworkPeeringsClientGetResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockVirtualNetworkPeeringsClientMockRecorder) Get(ctx, resourceGroupName, virtualNetworkName, peeringName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).Get), ctx, resourceGroupName, virtualNetworkName, peeringName, options) +} + +// NewListPager mocks base method. +func (m *MockVirtualNetworkPeeringsClient) NewListPager(resourceGroupName, virtualNetworkName string, options *armnetwork.VirtualNetworkPeeringsClientListOptions) clients.VirtualNetworkPeeringsPager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewListPager", resourceGroupName, virtualNetworkName, options) + ret0, _ := ret[0].(clients.VirtualNetworkPeeringsPager) + return ret0 +} + +// NewListPager indicates an expected call of NewListPager. +func (mr *MockVirtualNetworkPeeringsClientMockRecorder) NewListPager(resourceGroupName, virtualNetworkName, options any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListPager", reflect.TypeOf((*MockVirtualNetworkPeeringsClient)(nil).NewListPager), resourceGroupName, virtualNetworkName, options) +} diff --git a/sources/azure/shared/mocks/mock_virtual_networks_client.go b/sources/azure/shared/mocks/mock_virtual_networks_client.go index d979f6f1..85a33e04 100644 --- a/sources/azure/shared/mocks/mock_virtual_networks_client.go +++ b/sources/azure/shared/mocks/mock_virtual_networks_client.go @@ -13,7 +13,7 @@ import ( context "context" reflect "reflect" - armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v8" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v9" clients "github.com/overmindtech/cli/sources/azure/clients" gomock "go.uber.org/mock/gomock" ) diff --git a/sources/azure/shared/models.go b/sources/azure/shared/models.go index 0025c3c7..a2a12cb6 100644 --- a/sources/azure/shared/models.go +++ b/sources/azure/shared/models.go @@ -49,6 +49,9 @@ const ( // Resources (subscriptions, resource groups) Resources shared.API = "resources" // Microsoft.Resources + + // ExtendedLocation (custom locations, edge zones) + ExtendedLocation shared.API = "extendedlocation" // Microsoft.ExtendedLocation ) // Resources @@ -97,6 +100,7 @@ const ( LoadBalancerOutboundRule shared.Resource = "load-balancer-outbound-rule" LoadBalancerInboundNatPool shared.Resource = "load-balancer-inbound-nat-pool" PublicIPPrefix shared.Resource = "public-ip-prefix" + CustomIPPrefix shared.Resource = "custom-ip-prefix" NatGateway shared.Resource = "nat-gateway" DdosProtectionPlan shared.Resource = "ddos-protection-plan" ApplicationGateway shared.Resource = "application-gateway" @@ -121,6 +125,8 @@ const ( RouteTable shared.Resource = "route-table" Route shared.Resource = "route" VirtualNetworkGateway shared.Resource = "virtual-network-gateway" + VirtualNetworkGatewayConnection shared.Resource = "virtual-network-gateway-connection" + LocalNetworkGateway shared.Resource = "local-network-gateway" PrivateDNSZone shared.Resource = "private-dns-zone" Zone shared.Resource = "zone" DNSRecordSet shared.Resource = "dns-record-set" @@ -130,6 +136,8 @@ const ( DscpConfiguration shared.Resource = "dscp-configuration" VirtualNetworkTap shared.Resource = "virtual-network-tap" NetworkInterfaceTapConfiguration shared.Resource = "network-interface-tap-configuration" + ServiceEndpointPolicy shared.Resource = "service-endpoint-policy" + IpAllocation shared.Resource = "ip-allocation" // Storage resources Account shared.Resource = "account" @@ -220,4 +228,7 @@ const ( // Authorization resources RoleAssignment shared.Resource = "role-assignment" RoleDefinition shared.Resource = "role-definition" + + // ExtendedLocation resources + CustomLocation shared.Resource = "custom-location" ) diff --git a/sources/azure/shared/resource_id_item_type.go b/sources/azure/shared/resource_id_item_type.go new file mode 100644 index 00000000..a1db76c9 --- /dev/null +++ b/sources/azure/shared/resource_id_item_type.go @@ -0,0 +1,98 @@ +package shared + +import ( + "strings" + "unicode" +) + +// azureProviderToAPI maps Azure resource provider namespaces to the short API names used in +// item types (see models.go). Enables generated linked queries to match existing adapter +// naming: azure-{api}-{resource} with kebab-case resource. +var azureProviderToAPI = map[string]string{ + "microsoft.compute": "compute", + "microsoft.network": "network", + "microsoft.storage": "storage", + "microsoft.sql": "sql", + "microsoft.documentdb": "documentdb", + "microsoft.keyvault": "keyvault", + "microsoft.managedidentity": "managedidentity", + "microsoft.batch": "batch", + "microsoft.dbforpostgresql": "dbforpostgresql", + "microsoft.elasticsan": "elasticsan", + "microsoft.authorization": "authorization", + "microsoft.maintenance": "maintenance", + "microsoft.resources": "resources", +} + +// CamelCaseToKebab converts Azure camelCase resource type (e.g. virtualNetworks, publicIPAddresses) +// to kebab-case (e.g. virtual-networks, public-ip-addresses) to match project convention in models.go. +// Consecutive uppercase letters are treated as a single acronym (e.g. IP stays together). +func CamelCaseToKebab(s string) string { + if s == "" { + return "" + } + var b strings.Builder + runes := []rune(s) + for i, r := range runes { + if unicode.IsUpper(r) { + prevLower := i > 0 && unicode.IsLower(runes[i-1]) + nextLower := i+1 < len(runes) && unicode.IsLower(runes[i+1]) + // Insert hyphen before uppercase when: after a lowercase letter, or when this uppercase starts a word (next is lower) + if i > 0 && (prevLower || (unicode.IsUpper(runes[i-1]) && nextLower)) { + b.WriteByte('-') + } + b.WriteRune(unicode.ToLower(r)) + } else { + b.WriteRune(unicode.ToLower(r)) + } + } + return b.String() +} + +// SingularizeResourceType converts Azure plural resource type to singular form to match +// models.go (e.g. virtual-networks -> virtual-network, galleries -> gallery, identities -> identity). +func SingularizeResourceType(kebab string) string { + if kebab == "" { + return kebab + } + // -ies -> -y (e.g. galleries -> gallery, user-assigned-identities -> user-assigned-identity) + if before, ok := strings.CutSuffix(kebab, "ies"); ok { + return before + "y" + } + // -addresses -> -address (e.g. public-ip-addresses -> public-ip-address) + if before, ok := strings.CutSuffix(kebab, "addresses"); ok { + return before + "address" + } + if before, ok := strings.CutSuffix(kebab, "s"); ok { + return before + } + return kebab +} + +// ItemTypeFromLinkedResourceID derives an item type string from an Azure resource ID for use in +// LinkedItemQueries (e.g. ResourceNavigationLink, ServiceAssociationLink). Uses short API names +// and kebab-case singular resource types so generated types match existing adapter naming +// (e.g. azure-network-virtual-network). For unknown providers, returns empty so callers can +// fall back to a generic type such as "azure-resource". +func ItemTypeFromLinkedResourceID(resourceID string) string { + if resourceID == "" { + return "" + } + parts := strings.Split(strings.Trim(resourceID, "/"), "/") + for i, part := range parts { + if strings.EqualFold(part, "providers") && i+2 < len(parts) { + provider := strings.ToLower(parts[i+1]) + resourceTypeRaw := parts[i+2] + api, ok := azureProviderToAPI[provider] + if !ok { + return "" + } + resourceType := SingularizeResourceType(CamelCaseToKebab(resourceTypeRaw)) + if resourceType == "" { + return "" + } + return "azure-" + api + "-" + resourceType + } + } + return "" +} diff --git a/sources/azure/shared/resource_id_item_type_test.go b/sources/azure/shared/resource_id_item_type_test.go new file mode 100644 index 00000000..ae86fba5 --- /dev/null +++ b/sources/azure/shared/resource_id_item_type_test.go @@ -0,0 +1,116 @@ +package shared_test + +import ( + "testing" + + azureshared "github.com/overmindtech/cli/sources/azure/shared" +) + +func TestCamelCaseToKebab(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + {"virtualNetworks", "virtualNetworks", "virtual-networks"}, + {"managedInstances", "managedInstances", "managed-instances"}, + {"applicationGateways", "applicationGateways", "application-gateways"}, + {"publicIPAddresses (acronym)", "publicIPAddresses", "public-ip-addresses"}, + {"empty", "", ""}, + {"single word lowercase", "subnet", "subnet"}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := azureshared.CamelCaseToKebab(tc.input) + if got != tc.expected { + t.Errorf("CamelCaseToKebab(%q) = %q; want %q", tc.input, got, tc.expected) + } + }) + } +} + +func TestSingularizeResourceType(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + {"virtual-networks", "virtual-networks", "virtual-network"}, + {"managed-instances", "managed-instances", "managed-instance"}, + {"galleries -> gallery", "galleries", "gallery"}, + {"user-assigned-identities -> user-assigned-identity", "user-assigned-identities", "user-assigned-identity"}, + {"public-ip-addresses -> public-ip-address", "public-ip-addresses", "public-ip-address"}, + {"no trailing s", "virtual-network", "virtual-network"}, + {"empty", "", ""}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := azureshared.SingularizeResourceType(tc.input) + if got != tc.expected { + t.Errorf("SingularizeResourceType(%q) = %q; want %q", tc.input, got, tc.expected) + } + }) + } +} + +func TestItemTypeFromLinkedResourceID(t *testing.T) { + tests := []struct { + name string + resourceID string + expected string + }{ + { + name: "Microsoft.Network virtualNetworks", + resourceID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/myVnet", + expected: "azure-network-virtual-network", + }, + { + name: "Microsoft.Sql managedInstances", + resourceID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Sql/managedInstances/myMI", + expected: "azure-sql-managed-instance", + }, + { + name: "Microsoft.Compute virtualMachines", + resourceID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", + expected: "azure-compute-virtual-machine", + }, + { + name: "unknown provider returns empty", + resourceID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Unknown/fooBars/name", + expected: "", + }, + { + name: "empty ID returns empty", + resourceID: "", + expected: "", + }, + { + name: "no providers segment returns empty", + resourceID: "/not/a/valid/resource/id", + expected: "", + }, + { + name: "Microsoft.Compute galleries", + resourceID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/galleries/myGallery", + expected: "azure-compute-gallery", + }, + { + name: "Microsoft.ManagedIdentity userAssignedIdentities", + resourceID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myIdentity", + expected: "azure-managedidentity-user-assigned-identity", + }, + { + name: "Microsoft.Network publicIPAddresses (acronym)", + resourceID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/myPublicIP", + expected: "azure-network-public-ip-address", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := azureshared.ItemTypeFromLinkedResourceID(tc.resourceID) + if got != tc.expected { + t.Errorf("ItemTypeFromLinkedResourceID(%q) = %q; want %q", tc.resourceID, got, tc.expected) + } + }) + } +} diff --git a/sources/azure/shared/utils.go b/sources/azure/shared/utils.go index 941ea07f..a06ed1a2 100644 --- a/sources/azure/shared/utils.go +++ b/sources/azure/shared/utils.go @@ -20,16 +20,37 @@ func GetResourceIDPathKeys(resourceType string) []string { pathKeysMap := map[string][]string{ "azure-storage-queue": {"storageAccounts", "queues"}, "azure-storage-blob-container": {"storageAccounts", "containers"}, - "azure-storage-file-share": {"storageAccounts", "shares"}, - "azure-storage-table": {"storageAccounts", "tables"}, + "azure-storage-encryption-scope": {"storageAccounts", "encryptionScopes"}, + "azure-storage-file-share": {"storageAccounts", "shares"}, + "azure-storage-storage-account-private-endpoint-connection": {"storageAccounts", "privateEndpointConnections"}, + "azure-documentdb-private-endpoint-connection": {"databaseAccounts", "privateEndpointConnections"}, + "azure-storage-table": {"storageAccounts", "tables"}, "azure-sql-database": {"servers", "databases"}, // "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/Default-SQL-SouthEastAsia/providers/Microsoft.Sql/servers/testsvr/databases/testdb", - "azure-dbforpostgresql-database": {"flexibleServers", "databases"}, // "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/Default-PostgreSQL-SouthEastAsia/providers/Microsoft.DBforPostgreSQL/flexibleServers/testsvr/databases/testdb", + "azure-sql-elastic-pool": {"servers", "elasticPools"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}", + "azure-sql-server-firewall-rule": {"servers", "firewallRules"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Sql/servers/{serverName}/firewallRules/{ruleName}", + "azure-sql-server-virtual-network-rule": {"servers", "virtualNetworkRules"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Sql/servers/{serverName}/virtualNetworkRules/{ruleName}", + "azure-sql-server-private-endpoint-connection": {"servers", "privateEndpointConnections"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Sql/servers/{serverName}/privateEndpointConnections/{connectionName}", + "azure-dbforpostgresql-database": {"flexibleServers", "databases"}, // "/subscriptions/.../Microsoft.DBforPostgreSQL/flexibleServers/{server}/databases/{db}", + "azure-dbforpostgresql-flexible-server-firewall-rule": {"flexibleServers", "firewallRules"}, // "/subscriptions/.../Microsoft.DBforPostgreSQL/flexibleServers/{server}/firewallRules/{rule}", + "azure-dbforpostgresql-flexible-server-private-endpoint-connection": {"flexibleServers", "privateEndpointConnections"}, // "/subscriptions/.../Microsoft.DBforPostgreSQL/flexibleServers/{server}/privateEndpointConnections/{connectionName}", "azure-keyvault-secret": {"vaults", "secrets"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.KeyVault/vaults/{vaultName}/secrets/{secretName}", + "azure-keyvault-key": {"vaults", "keys"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.KeyVault/vaults/{vaultName}/keys/{keyName}", + "azure-keyvault-managed-hsm-private-endpoint-connection": {"managedHSMs", "privateEndpointConnections"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.KeyVault/managedHSMs/{name}/privateEndpointConnections/{connectionName}", "azure-authorization-role-assignment": {"roleAssignments"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}", "azure-compute-virtual-machine-run-command": {"virtualMachines", "runCommands"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/virtualMachines/{virtualMachineName}/runCommands/{runCommandName}", "azure-compute-virtual-machine-extension": {"virtualMachines", "extensions"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/virtualMachines/{virtualMachineName}/extensions/{extensionName}", "azure-compute-gallery-application-version": {"galleries", "applications", "versions"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{applicationName}/versions/{versionName}", + "azure-compute-gallery-application": {"galleries", "applications"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{applicationName}", "azure-compute-gallery-image": {"galleries", "images"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageName}", + "azure-compute-dedicated-host": {"hostGroups", "hosts"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}", + "azure-compute-capacity-reservation": {"capacityReservationGroups", "capacityReservations"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Compute/capacityReservationGroups/{groupName}/capacityReservations/{reservationName}", + "azure-network-subnet": {"virtualNetworks", "subnets"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnetName}/subnets/{subnetName}", + "azure-network-virtual-network-peering": {"virtualNetworks", "virtualNetworkPeerings"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnetName}/virtualNetworkPeerings/{peeringName}", + "azure-network-route": {"routeTables", "routes"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", + "azure-network-security-rule": {"networkSecurityGroups", "securityRules"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/networkSecurityGroups/{nsgName}/securityRules/{ruleName}", + "azure-batch-batch-application": {"batchAccounts", "applications"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}", + "azure-batch-batch-pool": {"batchAccounts", "pools"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}", + "azure-network-dns-record-set": {"dnszones"}, // "/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Network/dnszones/{zoneName}/{recordType}/{relativeRecordSetName}" } if keys, ok := pathKeysMap[resourceType]; ok { @@ -101,6 +122,42 @@ func ExtractPathParamsFromResourceID(resourceID string, keys []string) []string return results } +// ExtractDNSRecordSetParamsFromResourceID extracts zone name, record type, and relative record set name +// from an Azure DNS record set resource ID. The path format is non-standard: after "dnszones" the next +// three segments are zoneName, recordType (e.g. "A", "AAAA"), and relativeRecordSetName—recordType is +// a value, not a path key, so ExtractPathParamsFromResourceID cannot be used. +// +// Example: .../dnszones/example.com/A/www returns ["example.com", "A", "www"]. +// Returns nil if the path does not match the expected structure. +func ExtractDNSRecordSetParamsFromResourceID(resourceID string) []string { + if resourceID == "" { + return nil + } + parts := strings.Split(strings.Trim(resourceID, "/"), "/") + for i, part := range parts { + if i%2 == 0 && strings.EqualFold(part, "dnszones") && i+3 < len(parts) { + return []string{parts[i+1], parts[i+2], parts[i+3]} + } + } + return nil +} + +// ExtractPathParamsFromResourceIDByType extracts query parts from an Azure resource ID for the given +// resource type. For azure-network-dns-record-set it uses ExtractDNSRecordSetParamsFromResourceID +// because the DNS path format (dnszones/zone/recordType/name) does not follow the usual key/value +// pattern. For all other types it uses GetResourceIDPathKeys and ExtractPathParamsFromResourceID. +// Returns nil if the type is unknown or extraction fails. +func ExtractPathParamsFromResourceIDByType(resourceType string, resourceID string) []string { + if resourceType == "azure-network-dns-record-set" { + return ExtractDNSRecordSetParamsFromResourceID(resourceID) + } + pathKeys := GetResourceIDPathKeys(resourceType) + if pathKeys == nil { + return nil + } + return ExtractPathParamsFromResourceID(resourceID, pathKeys) +} + // ExtractSQLServerNameFromDatabaseID extracts the SQL server name from a SQL database resource ID. // Azure SQL database IDs follow the format: // /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName} diff --git a/sources/azure/shared/utils_test.go b/sources/azure/shared/utils_test.go index 21126095..67cd2a8d 100644 --- a/sources/azure/shared/utils_test.go +++ b/sources/azure/shared/utils_test.go @@ -247,6 +247,73 @@ func TestExtractPathParamsFromResourceID(t *testing.T) { } } +func TestExtractDNSRecordSetParamsFromResourceID(t *testing.T) { + tests := []struct { + name string + resourceID string + expected []string + }{ + { + name: "valid DNS record set ID", + resourceID: "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Network/dnszones/example.com/A/www", + expected: []string{"example.com", "A", "www"}, + }, + { + name: "valid DNS record set ID - AAAA", + resourceID: "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Network/dnszones/zone.net/AAAA/mail", + expected: []string{"zone.net", "AAAA", "mail"}, + }, + { + name: "empty resource ID", + resourceID: "", + expected: nil, + }, + { + name: "no dnszones segment", + resourceID: "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Network/virtualNetworks/vnet", + expected: nil, + }, + { + name: "dnszones but not enough segments after", + resourceID: "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Network/dnszones/example.com", + expected: nil, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := azureshared.ExtractDNSRecordSetParamsFromResourceID(tc.resourceID) + if !reflect.DeepEqual(actual, tc.expected) { + t.Errorf("ExtractDNSRecordSetParamsFromResourceID(%q) = %v; want %v", tc.resourceID, actual, tc.expected) + } + }) + } +} + +func TestExtractPathParamsFromResourceIDByType(t *testing.T) { + t.Run("azure-network-dns-record-set uses DNS extractor", func(t *testing.T) { + resourceID := "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Network/dnszones/example.com/A/www" + actual := azureshared.ExtractPathParamsFromResourceIDByType("azure-network-dns-record-set", resourceID) + expected := []string{"example.com", "A", "www"} + if !reflect.DeepEqual(actual, expected) { + t.Errorf("ExtractPathParamsFromResourceIDByType(azure-network-dns-record-set, ...) = %v; want %v", actual, expected) + } + }) + t.Run("other type uses path keys", func(t *testing.T) { + resourceID := "/subscriptions/sub-id/resourceGroups/rg/providers/Microsoft.Storage/storageAccounts/myaccount/queueServices/default/queues/myqueue" + actual := azureshared.ExtractPathParamsFromResourceIDByType("azure-storage-queue", resourceID) + expected := []string{"myaccount", "myqueue"} + if !reflect.DeepEqual(actual, expected) { + t.Errorf("ExtractPathParamsFromResourceIDByType(azure-storage-queue, ...) = %v; want %v", actual, expected) + } + }) + t.Run("unknown type returns nil", func(t *testing.T) { + actual := azureshared.ExtractPathParamsFromResourceIDByType("azure-unknown-type", "/some/id") + if actual != nil { + t.Errorf("ExtractPathParamsFromResourceIDByType(unknown) = %v; want nil", actual) + } + }) +} + func TestConvertAzureTags(t *testing.T) { tests := []struct { name string @@ -256,9 +323,9 @@ func TestConvertAzureTags(t *testing.T) { { name: "valid tags with values", azureTags: map[string]*string{ - "env": stringPtr("production"), - "project": stringPtr("overmind"), - "team": stringPtr("platform"), + "env": new("production"), + "project": new("overmind"), + "team": new("platform"), }, expected: map[string]string{ "env": "production", @@ -279,9 +346,9 @@ func TestConvertAzureTags(t *testing.T) { { name: "tags with nil values - should be skipped", azureTags: map[string]*string{ - "env": stringPtr("production"), + "env": new("production"), "project": nil, - "team": stringPtr("platform"), + "team": new("platform"), }, expected: map[string]string{ "env": "production", @@ -300,7 +367,7 @@ func TestConvertAzureTags(t *testing.T) { { name: "single tag", azureTags: map[string]*string{ - "env": stringPtr("test"), + "env": new("test"), }, expected: map[string]string{ "env": "test", @@ -309,8 +376,8 @@ func TestConvertAzureTags(t *testing.T) { { name: "tags with empty string values", azureTags: map[string]*string{ - "env": stringPtr(""), - "project": stringPtr("overmind"), + "env": new(""), + "project": new("overmind"), }, expected: map[string]string{ "env": "", @@ -320,8 +387,8 @@ func TestConvertAzureTags(t *testing.T) { { name: "tags with special characters", azureTags: map[string]*string{ - "tag-with-dashes": stringPtr("value_with_underscores"), - "tag.with.dots": stringPtr("value with spaces"), + "tag-with-dashes": new("value_with_underscores"), + "tag.with.dots": new("value with spaces"), }, expected: map[string]string{ "tag-with-dashes": "value_with_underscores", @@ -340,11 +407,6 @@ func TestConvertAzureTags(t *testing.T) { } } -// stringPtr is a helper function to create a pointer to a string -func stringPtr(s string) *string { - return &s -} - func TestExtractSQLServerNameFromDatabaseID(t *testing.T) { tests := []struct { name string diff --git a/sources/gcp/build/package/Dockerfile b/sources/gcp/build/package/Dockerfile index 853a571a..bfd42f67 100644 --- a/sources/gcp/build/package/Dockerfile +++ b/sources/gcp/build/package/Dockerfile @@ -1,5 +1,5 @@ # Build the source binary -FROM golang:1.25-alpine AS builder +FROM golang:1.26-alpine AS builder ARG TARGETOS ARG TARGETARCH ARG BUILD_VERSION diff --git a/sources/gcp/dynamic/README.md b/sources/gcp/dynamic/README.md index 4438728f..6da085b6 100644 --- a/sources/gcp/dynamic/README.md +++ b/sources/gcp/dynamic/README.md @@ -54,8 +54,8 @@ The complete flow from making a GET request to creating an SDP adapter follows t 1. **Adapter Definition**: Define the adapter metadata in the adapter file (see [dynamic-adapter-creation.mdc](adapters/.cursor/rules/dynamic-adapter-creation.mdc)) 2. **Adapter Creation**: Framework creates the appropriate adapter type based on metadata configuration 3. **GET Request Processing**: Validate scope, check cache, construct URL, make HTTP request, convert to SDP item -4. **External Response to SDP Conversion**: Extract attributes, apply blast propagation rules, generate linked item queries -5. **Unit Test Coverage**: Test GET functionality and static tests for blast propagation +4. **External Response to SDP Conversion**: Extract attributes, apply link rules, generate linked item queries +5. **Unit Test Coverage**: Test GET functionality and static tests for link rules For detailed implementation patterns and code examples, refer to the [dynamic adapter creation rules](adapters/.cursor/rules/dynamic-adapter-creation.mdc). @@ -89,13 +89,13 @@ It is highly recommended to use Cursor for creating adapters. There are comprehe ### Adapter Validation 1. **Terraform Mappings GET/Search**: Check from Terraform registry that the mappings are correct -2. **Blast Propagations**: Verify they are comprehensive and attribute values follow standards +2. **Link Rules**: Verify they are comprehensive and attribute values follow standards 3. **Item Selector**: If the item identifier in the API response is something other than `name`, define it properly 4. **Unique Attribute Keys**: Investigate the GET endpoint format and ensure it's correct ### Test Completeness -1. **Blast Propagation/Linked Item Queries**: Verify they work as expected +1. **Linked Item Queries**: Verify they work as expected 2. **Unique Attribute**: Ensure it matches the GET call response 3. **Terraform Mapping for Search**: Confirm it exists if search is supported diff --git a/sources/gcp/dynamic/adapter-searchable-listable.go b/sources/gcp/dynamic/adapter-searchable-listable.go index d439690e..509c2e0a 100644 --- a/sources/gcp/dynamic/adapter-searchable-listable.go +++ b/sources/gcp/dynamic/adapter-searchable-listable.go @@ -24,6 +24,7 @@ type SearchableListableDiscoveryAdapter interface { type SearchableListableAdapter struct { customSearchMethodDescription string searchEndpointFunc gcpshared.EndpointFunc + searchFilterFunc gcpshared.SearchFilterFunc ListableAdapter } @@ -32,6 +33,7 @@ func NewSearchableListableAdapter(searchURLFunc gcpshared.EndpointFunc, listEndp return SearchableListableAdapter{ customSearchMethodDescription: customSearchMethodDesc, searchEndpointFunc: searchURLFunc, + searchFilterFunc: config.SearchFilterFunc, ListableAdapter: ListableAdapter{ listEndpointFunc: listEndpointFunc, Adapter: Adapter{ @@ -131,6 +133,16 @@ func (g SearchableListableAdapter) Search(ctx context.Context, scope, query stri return nil, err } + if g.searchFilterFunc != nil { + filtered := make([]*sdp.Item, 0, len(items)) + for _, item := range items { + if g.searchFilterFunc(query, item) { + filtered = append(filtered, item) + } + } + items = filtered + } + if len(items) == 0 { // Cache not-found when no items were found notFoundErr := &sdp.QueryError{ @@ -153,6 +165,20 @@ func (g SearchableListableAdapter) Search(ctx context.Context, scope, query stri } func (g SearchableListableAdapter) SearchStream(ctx context.Context, scope, query string, ignoreCache bool, stream discovery.QueryResultStream) { + // When a post-filter is configured, fall back to the non-streaming Search + // so we can filter before sending items to the stream. + if g.searchFilterFunc != nil { + items, err := g.Search(ctx, scope, query, ignoreCache) + if err != nil { + stream.SendError(err) + return + } + for _, item := range items { + stream.SendItem(item) + } + return + } + location, err := g.validateScope(scope) if err != nil { stream.SendError(err) diff --git a/sources/gcp/dynamic/adapter.go b/sources/gcp/dynamic/adapter.go index f301d12a..5da9c03b 100644 --- a/sources/gcp/dynamic/adapter.go +++ b/sources/gcp/dynamic/adapter.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "slices" "buf.build/go/protovalidate" log "github.com/sirupsen/logrus" @@ -11,8 +12,8 @@ import ( "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" - gcpshared "github.com/overmindtech/cli/sources/gcp/shared" "github.com/overmindtech/cli/sources" + gcpshared "github.com/overmindtech/cli/sources/gcp/shared" "github.com/overmindtech/cli/sources/shared" ) @@ -29,6 +30,7 @@ type AdapterConfig struct { IAMPermissions []string // List of IAM permissions required by the adapter NameSelector string // By default, it is `name`, but can be overridden for outlier cases ListResponseSelector string + SearchFilterFunc gcpshared.SearchFilterFunc } // Adapter implements discovery.ListableAdapter for GCP dynamic adapters. @@ -103,10 +105,8 @@ func (g Adapter) validateScope(scope string) (gcpshared.LocationInfo, error) { } } - for _, validLoc := range g.locations { - if requestedLoc.Equals(validLoc) { - return requestedLoc, nil - } + if slices.ContainsFunc(g.locations, requestedLoc.Equals) { + return requestedLoc, nil } return gcpshared.LocationInfo{}, &sdp.QueryError{ ErrorType: sdp.QueryError_NOSCOPE, diff --git a/sources/gcp/dynamic/adapters.go b/sources/gcp/dynamic/adapters.go index 1011183a..03074ac9 100644 --- a/sources/gcp/dynamic/adapters.go +++ b/sources/gcp/dynamic/adapters.go @@ -135,6 +135,7 @@ func MakeAdapter(sdpItemType shared.ItemType, linker *gcpshared.Linker, httpCli IAMPermissions: meta.IAMPermissions, NameSelector: meta.NameSelector, ListResponseSelector: meta.ListResponseSelector, + SearchFilterFunc: meta.SearchFilterFunc, } switch adapterType(meta) { diff --git a/sources/gcp/dynamic/adapters/ai-platform-batch-prediction-job_test.go b/sources/gcp/dynamic/adapters/ai-platform-batch-prediction-job_test.go index 52438dcc..c46f1e0c 100644 --- a/sources/gcp/dynamic/adapters/ai-platform-batch-prediction-job_test.go +++ b/sources/gcp/dynamic/adapters/ai-platform-batch-prediction-job_test.go @@ -8,7 +8,7 @@ package adapters_test // implementation: // 1. Protobuf serializes field names to snake_case (e.g., "batch_prediction_jobs") while the // adapter configuration expects camelCase (e.g., "batchPredictionJobs"), affecting list operations -// 2. Blast propagation paths in the adapter expect JSON field names but get protobuf field names, +// 2. Link rule paths in the adapter expect JSON field names but get protobuf field names, // limiting automatic link generation for nested fields like GCS sources and KMS keys // // These limitations don't affect the core functionality testing but are noted for future improvements. @@ -257,7 +257,7 @@ func TestAIPlatformBatchPredictionJob(t *testing.T) { if err != nil { t.Fatalf("Failed to get 'inputConfig' attribute: %v", err) } - inputConfigMap, ok := inputConfig.(map[string]interface{}) + inputConfigMap, ok := inputConfig.(map[string]any) if !ok { t.Fatalf("Expected inputConfig to be a map[string]interface{}, got %T", inputConfig) } @@ -270,7 +270,7 @@ func TestAIPlatformBatchPredictionJob(t *testing.T) { if err != nil { t.Fatalf("Failed to get 'outputConfig' attribute: %v", err) } - outputConfigMap, ok := outputConfig.(map[string]interface{}) + outputConfigMap, ok := outputConfig.(map[string]any) if !ok { t.Fatalf("Expected outputConfig to be a map[string]interface{}, got %T", outputConfig) } @@ -283,7 +283,7 @@ func TestAIPlatformBatchPredictionJob(t *testing.T) { if err != nil { t.Fatalf("Failed to get 'encryptionSpec' attribute: %v", err) } - encryptionSpecMap, ok := encryptionSpec.(map[string]interface{}) + encryptionSpecMap, ok := encryptionSpec.(map[string]any) if !ok { t.Fatalf("Expected encryptionSpec to be a map[string]interface{}, got %T", encryptionSpec) } diff --git a/sources/gcp/dynamic/adapters/ai-platform-custom-job_test.go b/sources/gcp/dynamic/adapters/ai-platform-custom-job_test.go index ae50dcb7..1da39ce2 100644 --- a/sources/gcp/dynamic/adapters/ai-platform-custom-job_test.go +++ b/sources/gcp/dynamic/adapters/ai-platform-custom-job_test.go @@ -121,7 +121,7 @@ func TestAIPlatformCustomJob(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://aiplatform.googleapis.com/v1/projects/%s/locations/global/customJobs/%s", projectID, jobID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Custom job not found"}, + Body: map[string]any{"error": "Custom job not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/ai-platform-endpoint_test.go b/sources/gcp/dynamic/adapters/ai-platform-endpoint_test.go index df9170ce..ae983c8e 100644 --- a/sources/gcp/dynamic/adapters/ai-platform-endpoint_test.go +++ b/sources/gcp/dynamic/adapters/ai-platform-endpoint_test.go @@ -198,7 +198,7 @@ func TestAIPlatformEndpoint(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://aiplatform.googleapis.com/v1/projects/%s/locations/global/endpoints/%s", projectID, endpointName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Endpoint not found"}, + Body: map[string]any{"error": "Endpoint not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/ai-platform-model-deployment-monitoring-job_test.go b/sources/gcp/dynamic/adapters/ai-platform-model-deployment-monitoring-job_test.go index d761c5af..acc2f1ca 100644 --- a/sources/gcp/dynamic/adapters/ai-platform-model-deployment-monitoring-job_test.go +++ b/sources/gcp/dynamic/adapters/ai-platform-model-deployment-monitoring-job_test.go @@ -252,7 +252,7 @@ func TestAIPlatformModelDeploymentMonitoringJob(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://aiplatform.googleapis.com/v1/projects/%s/locations/%s/modelDeploymentMonitoringJobs/%s", projectID, location, jobName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Monitoring job not found"}, + Body: map[string]any{"error": "Monitoring job not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/ai-platform-model_test.go b/sources/gcp/dynamic/adapters/ai-platform-model_test.go index 5034f43f..a6c71567 100644 --- a/sources/gcp/dynamic/adapters/ai-platform-model_test.go +++ b/sources/gcp/dynamic/adapters/ai-platform-model_test.go @@ -180,7 +180,7 @@ func TestAIPlatformModel(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://aiplatform.googleapis.com/v1/projects/%s/locations/global/models/%s", projectID, modelName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Model not found"}, + Body: map[string]any{"error": "Model not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/ai-platform-pipeline-job_test.go b/sources/gcp/dynamic/adapters/ai-platform-pipeline-job_test.go index 3e2cd358..05fff8b4 100644 --- a/sources/gcp/dynamic/adapters/ai-platform-pipeline-job_test.go +++ b/sources/gcp/dynamic/adapters/ai-platform-pipeline-job_test.go @@ -119,7 +119,7 @@ func TestAIPlatformPipelineJob(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://aiplatform.googleapis.com/v1/projects/%s/locations/global/pipelineJobs/%s", projectID, jobID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Pipeline job not found"}, + Body: map[string]any{"error": "Pipeline job not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/big-query-data-transfer-transfer-config_test.go b/sources/gcp/dynamic/adapters/big-query-data-transfer-transfer-config_test.go index b35e51fe..a5b7caa2 100644 --- a/sources/gcp/dynamic/adapters/big-query-data-transfer-transfer-config_test.go +++ b/sources/gcp/dynamic/adapters/big-query-data-transfer-transfer-config_test.go @@ -266,7 +266,7 @@ func TestBigQueryDataTransferTransferConfig(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://bigquerydatatransfer.googleapis.com/v1/projects/%s/locations/%s/transferConfigs/%s", projectID, location, transferConfigName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Resource not found"}, + Body: map[string]any{"error": "Resource not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/big-table-admin-app-profile_test.go b/sources/gcp/dynamic/adapters/big-table-admin-app-profile_test.go index 2cd25def..67ca3253 100644 --- a/sources/gcp/dynamic/adapters/big-table-admin-app-profile_test.go +++ b/sources/gcp/dynamic/adapters/big-table-admin-app-profile_test.go @@ -187,7 +187,7 @@ func TestBigTableAdminAppProfile(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://bigtableadmin.googleapis.com/v2/projects/%s/instances/%s/appProfiles/%s", projectID, instanceName, appProfileID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "App profile not found"}, + Body: map[string]any{"error": "App profile not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/big-table-admin-backup_test.go b/sources/gcp/dynamic/adapters/big-table-admin-backup_test.go index c1c48ec8..d9aa9277 100644 --- a/sources/gcp/dynamic/adapters/big-table-admin-backup_test.go +++ b/sources/gcp/dynamic/adapters/big-table-admin-backup_test.go @@ -130,7 +130,7 @@ func TestBigTableAdminBackup(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://bigtableadmin.googleapis.com/v2/projects/%s/instances/%s/clusters/%s/backups/%s", projectID, instanceName, clusterName, backupID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Backup not found"}, + Body: map[string]any{"error": "Backup not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/big-table-admin-cluster_test.go b/sources/gcp/dynamic/adapters/big-table-admin-cluster_test.go index 51eadc8f..234d5a42 100644 --- a/sources/gcp/dynamic/adapters/big-table-admin-cluster_test.go +++ b/sources/gcp/dynamic/adapters/big-table-admin-cluster_test.go @@ -156,7 +156,7 @@ func TestBigTableAdminCluster(t *testing.T) { if err != nil { t.Fatalf("Failed to get 'encryptionConfig' attribute: %v", err) } - encryptionConfig, ok := val.(map[string]interface{}) + encryptionConfig, ok := val.(map[string]any) if !ok { t.Fatalf("Expected encryptionConfig to be a map[string]interface{}, got %T", val) } @@ -259,7 +259,7 @@ func TestBigTableAdminCluster(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://bigtableadmin.googleapis.com/v2/projects/%s/instances/%s/clusters/%s", projectID, instanceName, clusterName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Cluster not found"}, + Body: map[string]any{"error": "Cluster not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/big-table-admin-instance_test.go b/sources/gcp/dynamic/adapters/big-table-admin-instance_test.go index 2e0fa848..8e25d919 100644 --- a/sources/gcp/dynamic/adapters/big-table-admin-instance_test.go +++ b/sources/gcp/dynamic/adapters/big-table-admin-instance_test.go @@ -110,7 +110,7 @@ func TestBigTableAdminInstance(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://bigtableadmin.googleapis.com/v2/projects/%s/instances/%s", projectID, instanceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Instance not found"}, + Body: map[string]any{"error": "Instance not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/big-table-admin-table_test.go b/sources/gcp/dynamic/adapters/big-table-admin-table_test.go index a8615272..49f24d14 100644 --- a/sources/gcp/dynamic/adapters/big-table-admin-table_test.go +++ b/sources/gcp/dynamic/adapters/big-table-admin-table_test.go @@ -156,7 +156,7 @@ func TestBigTableAdminTable(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://bigtableadmin.googleapis.com/v2/projects/%s/instances/%s/tables/%s", projectID, instanceName, tableName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Table not found"}, + Body: map[string]any{"error": "Table not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/cloud-billing-billing-info_test.go b/sources/gcp/dynamic/adapters/cloud-billing-billing-info_test.go index a94ee7d9..0a64ed57 100644 --- a/sources/gcp/dynamic/adapters/cloud-billing-billing-info_test.go +++ b/sources/gcp/dynamic/adapters/cloud-billing-billing-info_test.go @@ -59,7 +59,7 @@ func TestCloudBillingBillingInfo(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://cloudbilling.googleapis.com/v1/projects/%s/billingInfo", projectID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Billing info not found"}, + Body: map[string]any{"error": "Billing info not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/cloud-build-build_test.go b/sources/gcp/dynamic/adapters/cloud-build-build_test.go index ca64382a..0fd42a0b 100644 --- a/sources/gcp/dynamic/adapters/cloud-build-build_test.go +++ b/sources/gcp/dynamic/adapters/cloud-build-build_test.go @@ -114,7 +114,7 @@ func TestCloudBuildBuild(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://cloudbuild.googleapis.com/v1/projects/%s/builds/%s", projectID, buildID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Build not found"}, + Body: map[string]any{"error": "Build not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/cloud-resource-manager-project_test.go b/sources/gcp/dynamic/adapters/cloud-resource-manager-project_test.go index c542ead7..3c7a2f46 100644 --- a/sources/gcp/dynamic/adapters/cloud-resource-manager-project_test.go +++ b/sources/gcp/dynamic/adapters/cloud-resource-manager-project_test.go @@ -56,7 +56,7 @@ func TestCloudResourceManagerProject(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://cloudresourcemanager.googleapis.com/v3/projects/%s", projectID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Project not found"}, + Body: map[string]any{"error": "Project not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-key_test.go b/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-key_test.go index 51fd89cb..ff3ad560 100644 --- a/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-key_test.go +++ b/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-key_test.go @@ -175,7 +175,7 @@ func TestCloudResourceManagerTagKey(t *testing.T) { if err != nil { t.Fatalf("Failed to get 'purposeData' attribute: %v", err) } - purposeData, ok := val.(map[string]interface{}) + purposeData, ok := val.(map[string]any) if !ok { t.Fatalf("Expected purposeData to be a map, got %T", val) } @@ -235,7 +235,7 @@ func TestCloudResourceManagerTagKey(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://cloudresourcemanager.googleapis.com/v3/tagKeys/%s", tagKeyID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": map[string]interface{}{"code": 404, "message": "TagKey not found"}}, + Body: map[string]any{"error": map[string]any{"code": 404, "message": "TagKey not found"}}, }, } diff --git a/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-value_test.go b/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-value_test.go index 7ad034db..ea844d9c 100644 --- a/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-value_test.go +++ b/sources/gcp/dynamic/adapters/cloud-resource-manager-tag-value_test.go @@ -113,7 +113,7 @@ func TestCloudResourceManagerTagValue(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://cloudresourcemanager.googleapis.com/v3/tagValues/%s", tagValueID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Tag value not found"}, + Body: map[string]any{"error": "Tag value not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/cloudfunctions-function_test.go b/sources/gcp/dynamic/adapters/cloudfunctions-function_test.go index bd203dfd..7f05bcf1 100644 --- a/sources/gcp/dynamic/adapters/cloudfunctions-function_test.go +++ b/sources/gcp/dynamic/adapters/cloudfunctions-function_test.go @@ -176,7 +176,7 @@ func TestCloudFunctionsFunction(t *testing.T) { if err != nil { t.Fatalf("Failed to get 'buildConfig' attribute: %v", err) } - buildConfigMap, ok := buildConfig.(map[string]interface{}) + buildConfigMap, ok := buildConfig.(map[string]any) if !ok { t.Fatalf("Expected buildConfig to be a map, got %T", buildConfig) } @@ -316,7 +316,7 @@ func TestCloudFunctionsFunction(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://cloudfunctions.googleapis.com/v2/projects/%s/locations/%s/functions/%s", projectID, location, functionName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Function not found"}, + Body: map[string]any{"error": "Function not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-external-vpn-gateway_test.go b/sources/gcp/dynamic/adapters/compute-external-vpn-gateway_test.go index be2726ec..7ab06d43 100644 --- a/sources/gcp/dynamic/adapters/compute-external-vpn-gateway_test.go +++ b/sources/gcp/dynamic/adapters/compute-external-vpn-gateway_test.go @@ -117,7 +117,7 @@ func TestComputeExternalVpnGateway(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/externalVpnGateways/%s", projectID, gatewayName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Gateway not found"}, + Body: map[string]any{"error": "Gateway not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-firewall.go b/sources/gcp/dynamic/adapters/compute-firewall.go index 22aeaffc..e3ebac5f 100644 --- a/sources/gcp/dynamic/adapters/compute-firewall.go +++ b/sources/gcp/dynamic/adapters/compute-firewall.go @@ -1,6 +1,9 @@ package adapters import ( + "fmt" + "strings" + "github.com/overmindtech/cli/go/sdp-go" gcpshared "github.com/overmindtech/cli/sources/gcp/shared" ) @@ -19,6 +22,15 @@ var _ = registerableAdapter{ UniqueAttributeKeys: []string{"firewalls"}, IAMPermissions: []string{"compute.firewalls.get", "compute.firewalls.list"}, PredefinedRole: "roles/compute.viewer", + // Tag-based SEARCH: list all firewalls then filter by tag. + SearchEndpointFunc: func(query string, location gcpshared.LocationInfo) string { + if query == "" || strings.Contains(query, "/") { + return "" + } + return fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/firewalls", location.ProjectID) + }, + SearchDescription: "Search for firewalls by network tag. The query is a plain network tag name.", + SearchFilterFunc: firewallTagFilter, }, linkRules: map[string]*gcpshared.Impact{ "network": { @@ -27,6 +39,14 @@ var _ = registerableAdapter{ }, "sourceServiceAccounts": gcpshared.IAMServiceAccountImpactInOnly, "targetServiceAccounts": gcpshared.IAMServiceAccountImpactInOnly, + "targetTags": { + Description: "Firewall rule specifies target_tags to control traffic to VM instances and instance templates with those tags. Overmind automatically discovers these relationships by searching for instances and templates with matching network tags, enabling accurate blast radius analysis when tags change on either firewalls or instances.", + ToSDPItemType: gcpshared.ComputeInstance, + }, + "sourceTags": { + Description: "Firewall rule specifies source_tags to control traffic from VM instances with those tags. Overmind automatically discovers these relationships by searching for instances with matching network tags, enabling accurate blast radius analysis when tags change on either firewalls or instances.", + ToSDPItemType: gcpshared.ComputeInstance, + }, }, terraformMapping: gcpshared.TerraformMapping{ Reference: "https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_firewall", @@ -38,3 +58,28 @@ var _ = registerableAdapter{ }, }, }.Register() + +// firewallTagFilter keeps firewalls whose targetTags or sourceTags contain the query tag. +func firewallTagFilter(query string, item *sdp.Item) bool { + return itemAttributeContainsTag(item, "targetTags", query) || + itemAttributeContainsTag(item, "sourceTags", query) +} + +// itemAttributeContainsTag checks whether an item attribute (expected to be a +// list of strings) contains the given tag value. +func itemAttributeContainsTag(item *sdp.Item, attrKey, tag string) bool { + val, err := item.GetAttributes().Get(attrKey) + if err != nil { + return false + } + list, ok := val.([]any) + if !ok { + return false + } + for _, elem := range list { + if s, ok := elem.(string); ok && s == tag { + return true + } + } + return false +} diff --git a/sources/gcp/dynamic/adapters/compute-firewall_test.go b/sources/gcp/dynamic/adapters/compute-firewall_test.go index aaddcf5d..1a3ba07e 100644 --- a/sources/gcp/dynamic/adapters/compute-firewall_test.go +++ b/sources/gcp/dynamic/adapters/compute-firewall_test.go @@ -130,7 +130,7 @@ func TestComputeFirewall(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/firewalls/%s", projectID, firewallName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Firewall not found"}, + Body: map[string]any{"error": "Firewall not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-global-address_test.go b/sources/gcp/dynamic/adapters/compute-global-address_test.go index 7fccd453..b61954f9 100644 --- a/sources/gcp/dynamic/adapters/compute-global-address_test.go +++ b/sources/gcp/dynamic/adapters/compute-global-address_test.go @@ -24,42 +24,42 @@ func TestComputeGlobalAddress(t *testing.T) { addressName := "test-global-address" globalAddress := &computepb.Address{ Name: &addressName, - Description: stringPtr("Test global address for load balancer"), - Address: stringPtr("203.0.113.12"), - AddressType: stringPtr("EXTERNAL"), - Status: stringPtr("RESERVED"), - Network: stringPtr("global/networks/test-network"), + Description: new("Test global address for load balancer"), + Address: new("203.0.113.12"), + AddressType: new("EXTERNAL"), + Status: new("RESERVED"), + Network: new("global/networks/test-network"), Labels: map[string]string{ "env": "test", "team": "networking", }, - Region: stringPtr("global"), - NetworkTier: stringPtr("PREMIUM"), - CreationTimestamp: stringPtr("2023-01-15T10:30:00.000-08:00"), - Id: uint64Ptr(1234567890123456789), - Kind: stringPtr("compute#globalAddress"), - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/addresses/%s", projectID, addressName)), + Region: new("global"), + NetworkTier: new("PREMIUM"), + CreationTimestamp: new("2023-01-15T10:30:00.000-08:00"), + Id: new(uint64(1234567890123456789)), + Kind: new("compute#globalAddress"), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/addresses/%s", projectID, addressName)), } // Create a second global address for list testing addressName2 := "test-global-address-2" globalAddress2 := &computepb.Address{ Name: &addressName2, - Description: stringPtr("Second test global address"), - Address: stringPtr("203.0.113.13"), - AddressType: stringPtr("EXTERNAL"), - Status: stringPtr("RESERVED"), - Network: stringPtr("global/networks/test-network-2"), + Description: new("Second test global address"), + Address: new("203.0.113.13"), + AddressType: new("EXTERNAL"), + Status: new("RESERVED"), + Network: new("global/networks/test-network-2"), Labels: map[string]string{ "env": "prod", "team": "networking", }, - Region: stringPtr("global"), - NetworkTier: stringPtr("PREMIUM"), - CreationTimestamp: stringPtr("2023-01-16T11:45:00.000-08:00"), - Id: uint64Ptr(1234567890123456790), - Kind: stringPtr("compute#globalAddress"), - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/addresses/%s", projectID, addressName2)), + Region: new("global"), + NetworkTier: new("PREMIUM"), + CreationTimestamp: new("2023-01-16T11:45:00.000-08:00"), + Id: new(uint64(1234567890123456790)), + Kind: new("compute#globalAddress"), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/addresses/%s", projectID, addressName2)), } globalAddresses := &computepb.AddressList{ @@ -193,16 +193,3 @@ func TestComputeGlobalAddress(t *testing.T) { } }) } - -// Helper functions for pointer creation -func stringPtr(s string) *string { - return &s -} - -func uint64Ptr(u uint64) *uint64 { - return &u -} - -func boolPtr(b bool) *bool { - return &b -} diff --git a/sources/gcp/dynamic/adapters/compute-global-forwarding-rule_test.go b/sources/gcp/dynamic/adapters/compute-global-forwarding-rule_test.go index 95cb69dd..d43f66bd 100644 --- a/sources/gcp/dynamic/adapters/compute-global-forwarding-rule_test.go +++ b/sources/gcp/dynamic/adapters/compute-global-forwarding-rule_test.go @@ -24,74 +24,74 @@ func TestComputeGlobalForwardingRule(t *testing.T) { // Mock response for a global forwarding rule using protobuf types globalForwardingRule := &computepb.ForwardingRule{ - Id: uint64Ptr(1234567890123456789), - CreationTimestamp: stringPtr("2023-01-01T00:00:00.000-08:00"), - Name: stringPtr(forwardingRuleName), - Description: stringPtr("Test global forwarding rule"), - Region: stringPtr(""), - IPAddress: stringPtr("203.0.113.1"), - IPProtocol: stringPtr("TCP"), - PortRange: stringPtr("80"), - Target: stringPtr(fmt.Sprintf("projects/%s/global/targetHttpProxies/test-target-proxy", projectID)), - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/forwardingRules/%s", projectID, forwardingRuleName)), - LoadBalancingScheme: stringPtr("EXTERNAL"), - Subnetwork: stringPtr(fmt.Sprintf("projects/%s/regions/us-central1/subnetworks/test-subnet", projectID)), - Network: stringPtr(fmt.Sprintf("projects/%s/global/networks/default", projectID)), - BackendService: stringPtr(fmt.Sprintf("projects/%s/global/backendServices/test-backend-service", projectID)), - ServiceLabel: stringPtr("test-service"), - ServiceName: stringPtr(fmt.Sprintf("%s-test-service.c.%s.internal", forwardingRuleName, projectID)), - Kind: stringPtr("compute#forwardingRule"), - LabelFingerprint: stringPtr("42WmSpB8rSM="), + Id: new(uint64(1234567890123456789)), + CreationTimestamp: new("2023-01-01T00:00:00.000-08:00"), + Name: new(forwardingRuleName), + Description: new("Test global forwarding rule"), + Region: new(""), + IPAddress: new("203.0.113.1"), + IPProtocol: new("TCP"), + PortRange: new("80"), + Target: new(fmt.Sprintf("projects/%s/global/targetHttpProxies/test-target-proxy", projectID)), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/forwardingRules/%s", projectID, forwardingRuleName)), + LoadBalancingScheme: new("EXTERNAL"), + Subnetwork: new(fmt.Sprintf("projects/%s/regions/us-central1/subnetworks/test-subnet", projectID)), + Network: new(fmt.Sprintf("projects/%s/global/networks/default", projectID)), + BackendService: new(fmt.Sprintf("projects/%s/global/backendServices/test-backend-service", projectID)), + ServiceLabel: new("test-service"), + ServiceName: new(fmt.Sprintf("%s-test-service.c.%s.internal", forwardingRuleName, projectID)), + Kind: new("compute#forwardingRule"), + LabelFingerprint: new("42WmSpB8rSM="), Labels: map[string]string{ "env": "test", "team": "devops", }, - NetworkTier: stringPtr("PREMIUM"), - AllowGlobalAccess: boolPtr(false), - AllowPscGlobalAccess: boolPtr(false), + NetworkTier: new("PREMIUM"), + AllowGlobalAccess: new(false), + AllowPscGlobalAccess: new(false), PscConnectionId: nil, - PscConnectionStatus: stringPtr("ACCEPTED"), - Fingerprint: stringPtr("abcd1234efgh5678"), + PscConnectionStatus: new("ACCEPTED"), + Fingerprint: new("abcd1234efgh5678"), } // Mock response for a second global forwarding rule using protobuf types globalForwardingRule2 := &computepb.ForwardingRule{ - Id: uint64Ptr(9876543210987654321), - CreationTimestamp: stringPtr("2023-01-02T00:00:00.000-08:00"), - Name: stringPtr("test-global-forwarding-rule-2"), - Description: stringPtr("Second test global forwarding rule"), - Region: stringPtr(""), - IPAddress: stringPtr("203.0.113.2"), - IPProtocol: stringPtr("TCP"), - PortRange: stringPtr("443"), - Target: stringPtr(fmt.Sprintf("projects/%s/global/targetHttpsProxies/test-target-proxy-2", projectID)), - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/forwardingRules/test-global-forwarding-rule-2", projectID)), - LoadBalancingScheme: stringPtr("EXTERNAL"), - Subnetwork: stringPtr(fmt.Sprintf("projects/%s/regions/us-west1/subnetworks/test-subnet-2", projectID)), - Network: stringPtr(fmt.Sprintf("projects/%s/global/networks/custom-network", projectID)), - BackendService: stringPtr(fmt.Sprintf("projects/%s/global/backendServices/test-backend-service-2", projectID)), - ServiceLabel: stringPtr("test-service-2"), - ServiceName: stringPtr("test-global-forwarding-rule-2-test-service-2.c." + projectID + ".internal"), - Kind: stringPtr("compute#forwardingRule"), - LabelFingerprint: stringPtr("xyz789abc123def="), + Id: new(uint64(9876543210987654321)), + CreationTimestamp: new("2023-01-02T00:00:00.000-08:00"), + Name: new("test-global-forwarding-rule-2"), + Description: new("Second test global forwarding rule"), + Region: new(""), + IPAddress: new("203.0.113.2"), + IPProtocol: new("TCP"), + PortRange: new("443"), + Target: new(fmt.Sprintf("projects/%s/global/targetHttpsProxies/test-target-proxy-2", projectID)), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/forwardingRules/test-global-forwarding-rule-2", projectID)), + LoadBalancingScheme: new("EXTERNAL"), + Subnetwork: new(fmt.Sprintf("projects/%s/regions/us-west1/subnetworks/test-subnet-2", projectID)), + Network: new(fmt.Sprintf("projects/%s/global/networks/custom-network", projectID)), + BackendService: new(fmt.Sprintf("projects/%s/global/backendServices/test-backend-service-2", projectID)), + ServiceLabel: new("test-service-2"), + ServiceName: new("test-global-forwarding-rule-2-test-service-2.c." + projectID + ".internal"), + Kind: new("compute#forwardingRule"), + LabelFingerprint: new("xyz789abc123def="), Labels: map[string]string{ "env": "prod", "service": "web", }, - NetworkTier: stringPtr("PREMIUM"), - AllowGlobalAccess: boolPtr(true), - AllowPscGlobalAccess: boolPtr(true), - PscConnectionId: uint64Ptr(123), - PscConnectionStatus: stringPtr("ACCEPTED"), - Fingerprint: stringPtr("xyz789abc123def456"), + NetworkTier: new("PREMIUM"), + AllowGlobalAccess: new(true), + AllowPscGlobalAccess: new(true), + PscConnectionId: new(uint64(123)), + PscConnectionStatus: new("ACCEPTED"), + Fingerprint: new("xyz789abc123def456"), } // Mock response for list operation using protobuf types globalForwardingRulesList := &computepb.ForwardingRuleList{ - Kind: stringPtr("compute#forwardingRuleList"), - Id: stringPtr("projects/" + projectID + "/global/forwardingRules"), + Kind: new("compute#forwardingRuleList"), + Id: new("projects/" + projectID + "/global/forwardingRules"), Items: []*computepb.ForwardingRule{globalForwardingRule, globalForwardingRule2}, - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/forwardingRules", projectID)), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/forwardingRules", projectID)), } sdpItemType := gcpshared.ComputeGlobalForwardingRule @@ -201,7 +201,7 @@ func TestComputeGlobalForwardingRule(t *testing.T) { // Test labels - check if labels exist before testing labels, err := sdpItem.GetAttributes().Get("labels") if err == nil { - labelsMap, ok := labels.(map[string]interface{}) + labelsMap, ok := labels.(map[string]any) if !ok { t.Fatalf("Expected labels to be a map[string]interface{}, got %T", labels) } @@ -295,7 +295,7 @@ func TestComputeGlobalForwardingRule(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/forwardingRules/%s", projectID, forwardingRuleName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Not found"}, + Body: map[string]any{"error": "Not found"}, }, } @@ -314,8 +314,8 @@ func TestComputeGlobalForwardingRule(t *testing.T) { t.Run("EmptyList", func(t *testing.T) { // Test with empty list response using protobuf types emptyListResponse := &computepb.ForwardingRuleList{ - Kind: stringPtr("compute#forwardingRuleList"), - Id: stringPtr("projects/" + projectID + "/global/forwardingRules"), + Kind: new("compute#forwardingRuleList"), + Id: new("projects/" + projectID + "/global/forwardingRules"), Items: []*computepb.ForwardingRule{}, } diff --git a/sources/gcp/dynamic/adapters/compute-http-health-check_test.go b/sources/gcp/dynamic/adapters/compute-http-health-check_test.go index a3134320..fca9cb2c 100644 --- a/sources/gcp/dynamic/adapters/compute-http-health-check_test.go +++ b/sources/gcp/dynamic/adapters/compute-http-health-check_test.go @@ -22,18 +22,18 @@ func TestComputeHttpHealthCheck(t *testing.T) { healthCheckName := "test-health-check" // Use map since HTTPHealthCheck protobuf doesn't have Name field - healthCheck := map[string]interface{}{ + healthCheck := map[string]any{ "name": healthCheckName, "host": "example.com", } healthCheckName2 := "test-health-check-2" - healthCheck2 := map[string]interface{}{ + healthCheck2 := map[string]any{ "name": healthCheckName2, } - healthCheckList := map[string]interface{}{ - "items": []interface{}{healthCheck, healthCheck2}, + healthCheckList := map[string]any{ + "items": []any{healthCheck, healthCheck2}, } sdpItemType := gcpshared.ComputeHttpHealthCheck @@ -91,7 +91,7 @@ func TestComputeHttpHealthCheck(t *testing.T) { // Even though the link rule uses stdlib.NetworkIP, it should detect // that "192.168.1.1" is an IP address and create an IP link t.Run("StaticTestsWithIP", func(t *testing.T) { - healthCheckWithIP := map[string]interface{}{ + healthCheckWithIP := map[string]any{ "name": "test-health-check-ip", "host": "192.168.1.1", } @@ -192,7 +192,7 @@ func TestComputeHttpHealthCheck(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/httpHealthChecks/%s", projectID, healthCheckName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Health check not found"}, + Body: map[string]any{"error": "Health check not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-instance-template.go b/sources/gcp/dynamic/adapters/compute-instance-template.go index 592cfecf..a5b9b7b4 100644 --- a/sources/gcp/dynamic/adapters/compute-instance-template.go +++ b/sources/gcp/dynamic/adapters/compute-instance-template.go @@ -1,6 +1,9 @@ package adapters import ( + "fmt" + "strings" + "github.com/overmindtech/cli/go/sdp-go" gcpshared "github.com/overmindtech/cli/sources/gcp/shared" "github.com/overmindtech/cli/sources/stdlib" @@ -19,6 +22,15 @@ var _ = registerableAdapter{ UniqueAttributeKeys: []string{"instanceTemplates"}, IAMPermissions: []string{"compute.instanceTemplates.get", "compute.instanceTemplates.list"}, PredefinedRole: "roles/compute.viewer", + // Tag-based SEARCH: list all instance templates then filter by tag. + SearchEndpointFunc: func(query string, location gcpshared.LocationInfo) string { + if query == "" || strings.Contains(query, "/") { + return "" + } + return fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/instanceTemplates", location.ProjectID) + }, + SearchDescription: "Search for instance templates by network tag. The query is a plain network tag name.", + SearchFilterFunc: instanceTemplateTagFilter, }, linkRules: map[string]*gcpshared.Impact{ // https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates/get @@ -108,6 +120,10 @@ var _ = registerableAdapter{ Description: "If the IAM Service Account is deleted or updated: Instances created from this template may fail to authenticate or access required resources. If the template is updated: The service account remains unaffected.", ToSDPItemType: gcpshared.IAMServiceAccount, }, + "properties.tags.items": { + Description: "Instance templates define network tags that will be applied to instances created from the template. Overmind discovers firewall rules and routes with matching tags, showing how firewall and route changes will affect instances created from this template.", + ToSDPItemType: gcpshared.ComputeFirewall, + }, }, terraformMapping: gcpshared.TerraformMapping{ Reference: "https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance_template", @@ -119,3 +135,8 @@ var _ = registerableAdapter{ }, }, }.Register() + +// instanceTemplateTagFilter keeps instance templates whose properties.tags.items contain the query tag. +func instanceTemplateTagFilter(query string, item *sdp.Item) bool { + return itemAttributeContainsTag(item, "properties.tags.items", query) +} diff --git a/sources/gcp/dynamic/adapters/compute-network-endpoint-group_test.go b/sources/gcp/dynamic/adapters/compute-network-endpoint-group_test.go index a4e723ff..f6ee74ed 100644 --- a/sources/gcp/dynamic/adapters/compute-network-endpoint-group_test.go +++ b/sources/gcp/dynamic/adapters/compute-network-endpoint-group_test.go @@ -152,7 +152,7 @@ func TestComputeNetworkEndpointGroup(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/zones/%s/networkEndpointGroups/%s", projectID, zone, negName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "NEG not found"}, + Body: map[string]any{"error": "NEG not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-network_test.go b/sources/gcp/dynamic/adapters/compute-network_test.go index 4968ad42..94f6bbf2 100644 --- a/sources/gcp/dynamic/adapters/compute-network_test.go +++ b/sources/gcp/dynamic/adapters/compute-network_test.go @@ -129,7 +129,7 @@ func TestComputeNetwork(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/networks/%s", projectID, networkName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Network not found"}, + Body: map[string]any{"error": "Network not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-project_test.go b/sources/gcp/dynamic/adapters/compute-project_test.go index d475df6d..68ef96d2 100644 --- a/sources/gcp/dynamic/adapters/compute-project_test.go +++ b/sources/gcp/dynamic/adapters/compute-project_test.go @@ -79,7 +79,7 @@ func TestComputeProject(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s?fields=name", projectID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Project not found"}, + Body: map[string]any{"error": "Project not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-public-delegated-prefix_test.go b/sources/gcp/dynamic/adapters/compute-public-delegated-prefix_test.go index 623ec276..a924dbd5 100644 --- a/sources/gcp/dynamic/adapters/compute-public-delegated-prefix_test.go +++ b/sources/gcp/dynamic/adapters/compute-public-delegated-prefix_test.go @@ -187,7 +187,7 @@ func TestComputePublicDelegatedPrefix(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/publicDelegatedPrefixes/%s", projectID, region, prefixName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Prefix not found"}, + Body: map[string]any{"error": "Prefix not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-region-commitment_test.go b/sources/gcp/dynamic/adapters/compute-region-commitment_test.go index a56ef8e8..59262975 100644 --- a/sources/gcp/dynamic/adapters/compute-region-commitment_test.go +++ b/sources/gcp/dynamic/adapters/compute-region-commitment_test.go @@ -128,7 +128,7 @@ func TestComputeRegionCommitment(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/commitments/%s", projectID, region, commitmentName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Commitment not found"}, + Body: map[string]any{"error": "Commitment not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-route.go b/sources/gcp/dynamic/adapters/compute-route.go index 3a90e854..e39dfe5f 100644 --- a/sources/gcp/dynamic/adapters/compute-route.go +++ b/sources/gcp/dynamic/adapters/compute-route.go @@ -1,6 +1,9 @@ package adapters import ( + "fmt" + "strings" + "github.com/overmindtech/cli/go/sdp-go" gcpshared "github.com/overmindtech/cli/sources/gcp/shared" "github.com/overmindtech/cli/sources/stdlib" @@ -19,6 +22,15 @@ var _ = registerableAdapter{ UniqueAttributeKeys: []string{"routes"}, IAMPermissions: []string{"compute.routes.get", "compute.routes.list"}, PredefinedRole: "roles/compute.viewer", + // Tag-based SEARCH: list all routes then filter by tag. + SearchEndpointFunc: func(query string, location gcpshared.LocationInfo) string { + if query == "" || strings.Contains(query, "/") { + return "" + } + return fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/routes", location.ProjectID) + }, + SearchDescription: "Search for routes by network tag. The query is a plain network tag name.", + SearchFilterFunc: routeTagFilter, }, linkRules: map[string]*gcpshared.Impact{ // https://cloud.google.com/compute/docs/reference/rest/v1/routes/get @@ -65,6 +77,10 @@ var _ = registerableAdapter{ Description: "The URL to an InterconnectAttachment which is the next hop for the route. If the Interconnect Attachment is updated or deleted: The route may no longer forward traffic properly. If the route is updated: The interconnect attachment remains unaffected but traffic routed through it may be affected.", ToSDPItemType: gcpshared.ComputeInterconnectAttachment, }, + "tags": { + Description: "Route specifies network tags to apply routing rules only to instances and instance templates with matching tags. Overmind automatically discovers instances and templates with these tags, enabling blast radius analysis to show which resources will be affected when you modify a route's tags.", + ToSDPItemType: gcpshared.ComputeInstance, + }, }, terraformMapping: gcpshared.TerraformMapping{ Reference: "https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_route", @@ -76,3 +92,8 @@ var _ = registerableAdapter{ }, }, }.Register() + +// routeTagFilter keeps routes whose tags array contains the query tag. +func routeTagFilter(query string, item *sdp.Item) bool { + return itemAttributeContainsTag(item, "tags", query) +} diff --git a/sources/gcp/dynamic/adapters/compute-route_test.go b/sources/gcp/dynamic/adapters/compute-route_test.go index 79a90518..cd9d038d 100644 --- a/sources/gcp/dynamic/adapters/compute-route_test.go +++ b/sources/gcp/dynamic/adapters/compute-route_test.go @@ -141,7 +141,7 @@ func TestComputeRoute(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/routes/%s", projectID, routeName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Route not found"}, + Body: map[string]any{"error": "Route not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-router_test.go b/sources/gcp/dynamic/adapters/compute-router_test.go index 1171e13d..5bb80aba 100644 --- a/sources/gcp/dynamic/adapters/compute-router_test.go +++ b/sources/gcp/dynamic/adapters/compute-router_test.go @@ -27,41 +27,41 @@ func TestComputeRouter(t *testing.T) { // Create mock protobuf object router := &computepb.Router{ - Name: stringPtr(routerName), - Description: stringPtr("Test Router"), - Network: stringPtr(fmt.Sprintf("projects/%s/global/networks/default", projectID)), - Region: stringPtr(fmt.Sprintf("projects/%s/regions/%s", projectID, region)), + Name: new(routerName), + Description: new("Test Router"), + Network: new(fmt.Sprintf("projects/%s/global/networks/default", projectID)), + Region: new(fmt.Sprintf("projects/%s/regions/%s", projectID, region)), Interfaces: []*computepb.RouterInterface{ { - Name: stringPtr("interface-1"), - LinkedInterconnectAttachment: stringPtr(fmt.Sprintf("projects/%s/regions/%s/interconnectAttachments/test-attachment", projectID, region)), - PrivateIpAddress: stringPtr("10.0.0.1"), - Subnetwork: stringPtr(fmt.Sprintf("projects/%s/regions/%s/subnetworks/test-subnet", projectID, region)), - LinkedVpnTunnel: stringPtr(fmt.Sprintf("projects/%s/regions/%s/vpnTunnels/test-tunnel", projectID, region)), + Name: new("interface-1"), + LinkedInterconnectAttachment: new(fmt.Sprintf("projects/%s/regions/%s/interconnectAttachments/test-attachment", projectID, region)), + PrivateIpAddress: new("10.0.0.1"), + Subnetwork: new(fmt.Sprintf("projects/%s/regions/%s/subnetworks/test-subnet", projectID, region)), + LinkedVpnTunnel: new(fmt.Sprintf("projects/%s/regions/%s/vpnTunnels/test-tunnel", projectID, region)), }, }, BgpPeers: []*computepb.RouterBgpPeer{ { - Name: stringPtr("bgp-peer-1"), - PeerIpAddress: stringPtr("192.168.1.1"), - IpAddress: stringPtr("192.168.1.2"), - Ipv4NexthopAddress: stringPtr("192.168.1.3"), - PeerIpv4NexthopAddress: stringPtr("192.168.1.4"), + Name: new("bgp-peer-1"), + PeerIpAddress: new("192.168.1.1"), + IpAddress: new("192.168.1.2"), + Ipv4NexthopAddress: new("192.168.1.3"), + PeerIpv4NexthopAddress: new("192.168.1.4"), }, }, Nats: []*computepb.RouterNat{ { - Name: stringPtr("nat-1"), + Name: new("nat-1"), NatIps: []string{"203.0.113.1", "203.0.113.2"}, DrainNatIps: []string{"203.0.113.3"}, Subnetworks: []*computepb.RouterNatSubnetworkToNat{ { - Name: stringPtr(fmt.Sprintf("projects/%s/regions/%s/subnetworks/nat-subnet", projectID, region)), + Name: new(fmt.Sprintf("projects/%s/regions/%s/subnetworks/nat-subnet", projectID, region)), }, }, Nat64Subnetworks: []*computepb.RouterNatSubnetworkToNat64{ { - Name: stringPtr(fmt.Sprintf("projects/%s/regions/%s/subnetworks/nat64-subnet", projectID, region)), + Name: new(fmt.Sprintf("projects/%s/regions/%s/subnetworks/nat64-subnet", projectID, region)), }, }, }, @@ -71,10 +71,10 @@ func TestComputeRouter(t *testing.T) { // Create second router for list testing routerName2 := "test-router-2" router2 := &computepb.Router{ - Name: stringPtr(routerName2), - Description: stringPtr("Test Router 2"), - Network: stringPtr(fmt.Sprintf("projects/%s/global/networks/default", projectID)), - Region: stringPtr(fmt.Sprintf("projects/%s/regions/%s", projectID, region)), + Name: new(routerName2), + Description: new("Test Router 2"), + Network: new(fmt.Sprintf("projects/%s/global/networks/default", projectID)), + Region: new(fmt.Sprintf("projects/%s/regions/%s", projectID, region)), } // Create list response with multiple items @@ -305,7 +305,7 @@ func TestComputeRouter(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/routers/%s", projectID, region, routerName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Router not found"}, + Body: map[string]any{"error": "Router not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-ssl-certificate_test.go b/sources/gcp/dynamic/adapters/compute-ssl-certificate_test.go index b73f49ca..826a5109 100644 --- a/sources/gcp/dynamic/adapters/compute-ssl-certificate_test.go +++ b/sources/gcp/dynamic/adapters/compute-ssl-certificate_test.go @@ -23,20 +23,20 @@ func TestComputeSSLCertificate(t *testing.T) { // Create mock protobuf object certificate := &computepb.SslCertificate{ - Name: stringPtr(certificateName), - Description: stringPtr("Test SSL Certificate"), - Certificate: stringPtr("-----BEGIN CERTIFICATE-----\nMIIC...test certificate data...\n-----END CERTIFICATE-----"), - PrivateKey: stringPtr("-----BEGIN PRIVATE KEY-----\nMIIE...test private key data...\n-----END PRIVATE KEY-----"), - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s", projectID, certificateName)), + Name: new(certificateName), + Description: new("Test SSL Certificate"), + Certificate: new("-----BEGIN CERTIFICATE-----\nMIIC...test certificate data...\n-----END CERTIFICATE-----"), + PrivateKey: new("-----BEGIN PRIVATE KEY-----\nMIIE...test private key data...\n-----END PRIVATE KEY-----"), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s", projectID, certificateName)), } // Create second certificate for list testing certificateName2 := "test-ssl-certificate-2" certificate2 := &computepb.SslCertificate{ - Name: stringPtr(certificateName2), - Description: stringPtr("Test SSL Certificate 2"), - Certificate: stringPtr("-----BEGIN CERTIFICATE-----\nMIIC...test certificate data 2...\n-----END CERTIFICATE-----"), - SelfLink: stringPtr(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s", projectID, certificateName2)), + Name: new(certificateName2), + Description: new("Test SSL Certificate 2"), + Certificate: new("-----BEGIN CERTIFICATE-----\nMIIC...test certificate data 2...\n-----END CERTIFICATE-----"), + SelfLink: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s", projectID, certificateName2)), } // Create list response with multiple items @@ -133,7 +133,7 @@ func TestComputeSSLCertificate(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s", projectID, certificateName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "SSL Certificate not found"}, + Body: map[string]any{"error": "SSL Certificate not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-ssl-policy_test.go b/sources/gcp/dynamic/adapters/compute-ssl-policy_test.go index e0ee4f86..f1bb0295 100644 --- a/sources/gcp/dynamic/adapters/compute-ssl-policy_test.go +++ b/sources/gcp/dynamic/adapters/compute-ssl-policy_test.go @@ -99,7 +99,7 @@ func TestComputeSSLPolicy(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/sslPolicies/%s", projectID, policyName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "SSL policy not found"}, + Body: map[string]any{"error": "SSL policy not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-subnetwork_test.go b/sources/gcp/dynamic/adapters/compute-subnetwork_test.go index 84643afc..b15ebcfd 100644 --- a/sources/gcp/dynamic/adapters/compute-subnetwork_test.go +++ b/sources/gcp/dynamic/adapters/compute-subnetwork_test.go @@ -117,7 +117,7 @@ func TestComputeSubnetwork(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/subnetworks/%s", projectID, region, subnetworkName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Subnetwork not found"}, + Body: map[string]any{"error": "Subnetwork not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-target-http-proxy_test.go b/sources/gcp/dynamic/adapters/compute-target-http-proxy_test.go index 2f2be523..fe682162 100644 --- a/sources/gcp/dynamic/adapters/compute-target-http-proxy_test.go +++ b/sources/gcp/dynamic/adapters/compute-target-http-proxy_test.go @@ -112,7 +112,7 @@ func TestComputeTargetHttpProxy(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/targetHttpProxies/%s", projectID, proxyName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Proxy not found"}, + Body: map[string]any{"error": "Proxy not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-target-https-proxy_test.go b/sources/gcp/dynamic/adapters/compute-target-https-proxy_test.go index 6354aa99..345196d1 100644 --- a/sources/gcp/dynamic/adapters/compute-target-https-proxy_test.go +++ b/sources/gcp/dynamic/adapters/compute-target-https-proxy_test.go @@ -128,7 +128,7 @@ func TestComputeTargetHttpsProxy(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/targetHttpsProxies/%s", projectID, proxyName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Proxy not found"}, + Body: map[string]any{"error": "Proxy not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-target-pool_test.go b/sources/gcp/dynamic/adapters/compute-target-pool_test.go index aa5fbb2e..922f081f 100644 --- a/sources/gcp/dynamic/adapters/compute-target-pool_test.go +++ b/sources/gcp/dynamic/adapters/compute-target-pool_test.go @@ -192,7 +192,7 @@ func TestComputeTargetPool(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/targetPools/%s", projectID, region, poolName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Target pool not found"}, + Body: map[string]any{"error": "Target pool not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-url-map_test.go b/sources/gcp/dynamic/adapters/compute-url-map_test.go index c997f8bb..634bac8d 100644 --- a/sources/gcp/dynamic/adapters/compute-url-map_test.go +++ b/sources/gcp/dynamic/adapters/compute-url-map_test.go @@ -268,7 +268,7 @@ func TestComputeUrlMap(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/urlMaps/%s", projectID, urlMapName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "URL map not found"}, + Body: map[string]any{"error": "URL map not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-vpn-gateway_test.go b/sources/gcp/dynamic/adapters/compute-vpn-gateway_test.go index 984becbc..5f1f3ecd 100644 --- a/sources/gcp/dynamic/adapters/compute-vpn-gateway_test.go +++ b/sources/gcp/dynamic/adapters/compute-vpn-gateway_test.go @@ -135,7 +135,7 @@ func TestComputeVpnGateway(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/vpnGateways/%s", projectID, region, gatewayName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "VPN gateway not found"}, + Body: map[string]any{"error": "VPN gateway not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/compute-vpn-tunnel_test.go b/sources/gcp/dynamic/adapters/compute-vpn-tunnel_test.go index 4ea038f1..a17a0f29 100644 --- a/sources/gcp/dynamic/adapters/compute-vpn-tunnel_test.go +++ b/sources/gcp/dynamic/adapters/compute-vpn-tunnel_test.go @@ -161,7 +161,7 @@ func TestComputeVpnTunnel(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/vpnTunnels/%s", projectID, region, tunnelName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "VPN tunnel not found"}, + Body: map[string]any{"error": "VPN tunnel not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/container-cluster_test.go b/sources/gcp/dynamic/adapters/container-cluster_test.go index 649f5214..5c4f967b 100644 --- a/sources/gcp/dynamic/adapters/container-cluster_test.go +++ b/sources/gcp/dynamic/adapters/container-cluster_test.go @@ -315,7 +315,7 @@ func TestContainerCluster(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://container.googleapis.com/v1/projects/%s/locations/%s/clusters/%s", projectID, location, clusterName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Cluster not found"}, + Body: map[string]any{"error": "Cluster not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/container-node-pool_test.go b/sources/gcp/dynamic/adapters/container-node-pool_test.go index 0b01b9c3..51b6c0a6 100644 --- a/sources/gcp/dynamic/adapters/container-node-pool_test.go +++ b/sources/gcp/dynamic/adapters/container-node-pool_test.go @@ -210,7 +210,7 @@ func TestContainerNodePool(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://container.googleapis.com/v1/projects/%s/locations/%s/clusters/%s/nodePools/%s", projectID, location, clusterName, nodePoolName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Node pool not found"}, + Body: map[string]any{"error": "Node pool not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/dataform-repository_test.go b/sources/gcp/dynamic/adapters/dataform-repository_test.go index 70518f10..9b553c2c 100644 --- a/sources/gcp/dynamic/adapters/dataform-repository_test.go +++ b/sources/gcp/dynamic/adapters/dataform-repository_test.go @@ -146,7 +146,7 @@ func TestDataformRepository(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dataform.googleapis.com/v1/projects/%s/locations/%s/repositories/%s", projectID, location, repositoryName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Repository not found"}, + Body: map[string]any{"error": "Repository not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/dataplex-aspect-type_test.go b/sources/gcp/dynamic/adapters/dataplex-aspect-type_test.go index fcafe96b..1b76d988 100644 --- a/sources/gcp/dynamic/adapters/dataplex-aspect-type_test.go +++ b/sources/gcp/dynamic/adapters/dataplex-aspect-type_test.go @@ -58,8 +58,8 @@ func TestDataplexAspectType(t *testing.T) { // Create the list response using a map structure instead of the protobuf ListAspectTypesResponse // This is necessary because the dynamic adapter expects JSON-serializable structures // Individual items use proper SDK types, but the list wrapper uses a simple map - aspectTypesList := map[string]interface{}{ - "aspectTypes": []interface{}{aspectType, aspectType2}, + aspectTypesList := map[string]any{ + "aspectTypes": []any{aspectType, aspectType2}, } sdpItemType := gcpshared.DataplexAspectType @@ -236,7 +236,7 @@ func TestDataplexAspectType(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dataplex.googleapis.com/v1/projects/%s/locations/%s/aspectTypes/nonexistent", projectID, location): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": map[string]interface{}{"code": 404, "message": "AspectType not found"}}, + Body: map[string]any{"error": map[string]any{"code": 404, "message": "AspectType not found"}}, }, } diff --git a/sources/gcp/dynamic/adapters/dataplex-data-scan_test.go b/sources/gcp/dynamic/adapters/dataplex-data-scan_test.go index 8f67d5c2..e0e9f057 100644 --- a/sources/gcp/dynamic/adapters/dataplex-data-scan_test.go +++ b/sources/gcp/dynamic/adapters/dataplex-data-scan_test.go @@ -152,7 +152,7 @@ func TestDataplexDataScan(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dataplex.googleapis.com/v1/projects/%s/locations/%s/dataScans/%s", projectID, location, dataScanName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Data scan not found"}, + Body: map[string]any{"error": "Data scan not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/dataplex-entry-group_test.go b/sources/gcp/dynamic/adapters/dataplex-entry-group_test.go index a2b71164..081f35dc 100644 --- a/sources/gcp/dynamic/adapters/dataplex-entry-group_test.go +++ b/sources/gcp/dynamic/adapters/dataplex-entry-group_test.go @@ -122,7 +122,7 @@ func TestDataplexEntryGroup(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dataplex.googleapis.com/v1/projects/%s/locations/%s/entryGroups/%s", projectID, location, entryGroupID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Entry group not found"}, + Body: map[string]any{"error": "Entry group not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/dataproc-auto-scaling-policy_test.go b/sources/gcp/dynamic/adapters/dataproc-auto-scaling-policy_test.go index 1b8c6348..9761b985 100644 --- a/sources/gcp/dynamic/adapters/dataproc-auto-scaling-policy_test.go +++ b/sources/gcp/dynamic/adapters/dataproc-auto-scaling-policy_test.go @@ -149,7 +149,7 @@ func TestDataprocAutoscalingPolicy(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dataproc.googleapis.com/v1/projects/%s/regions/%s/autoscalingPolicies/%s", projectID, region, policyName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Policy not found"}, + Body: map[string]any{"error": "Policy not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/dataproc-cluster_test.go b/sources/gcp/dynamic/adapters/dataproc-cluster_test.go index a9daf0c6..978d14c2 100644 --- a/sources/gcp/dynamic/adapters/dataproc-cluster_test.go +++ b/sources/gcp/dynamic/adapters/dataproc-cluster_test.go @@ -202,7 +202,7 @@ func TestDataprocCluster(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dataproc.googleapis.com/v1/projects/%s/regions/%s/clusters/%s", projectID, region, clusterName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Cluster not found"}, + Body: map[string]any{"error": "Cluster not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/dns-managed-zone_test.go b/sources/gcp/dynamic/adapters/dns-managed-zone_test.go index 59bf5b46..90afa549 100644 --- a/sources/gcp/dynamic/adapters/dns-managed-zone_test.go +++ b/sources/gcp/dynamic/adapters/dns-managed-zone_test.go @@ -156,7 +156,7 @@ func TestDNSManagedZone(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://dns.googleapis.com/dns/v1/projects/%s/managedZones/%s", projectID, zoneName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Managed zone not found"}, + Body: map[string]any{"error": "Managed zone not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/essential-contacts-contact_test.go b/sources/gcp/dynamic/adapters/essential-contacts-contact_test.go index 1dd41c5c..adfc26c4 100644 --- a/sources/gcp/dynamic/adapters/essential-contacts-contact_test.go +++ b/sources/gcp/dynamic/adapters/essential-contacts-contact_test.go @@ -121,7 +121,7 @@ func TestEssentialContactsContact(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://essentialcontacts.googleapis.com/v1/projects/%s/contacts/%s", projectID, contactID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Contact not found"}, + Body: map[string]any{"error": "Contact not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/file-instance_test.go b/sources/gcp/dynamic/adapters/file-instance_test.go index d09cb5cc..190c03a2 100644 --- a/sources/gcp/dynamic/adapters/file-instance_test.go +++ b/sources/gcp/dynamic/adapters/file-instance_test.go @@ -206,7 +206,7 @@ func TestFileInstance(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://file.googleapis.com/v1/projects/%s/locations/%s/instances/%s", projectID, location, instanceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Instance not found"}, + Body: map[string]any{"error": "Instance not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/iam-role_test.go b/sources/gcp/dynamic/adapters/iam-role_test.go index 058cba6c..c3b09a6b 100644 --- a/sources/gcp/dynamic/adapters/iam-role_test.go +++ b/sources/gcp/dynamic/adapters/iam-role_test.go @@ -86,7 +86,7 @@ func TestIAMRole(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://iam.googleapis.com/v1/projects/%s/roles/%s", projectID, roleName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Role not found"}, + Body: map[string]any{"error": "Role not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/logging-bucket_test.go b/sources/gcp/dynamic/adapters/logging-bucket_test.go index 563cfa66..8621cf59 100644 --- a/sources/gcp/dynamic/adapters/logging-bucket_test.go +++ b/sources/gcp/dynamic/adapters/logging-bucket_test.go @@ -121,7 +121,7 @@ func TestLoggingBucket(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://logging.googleapis.com/v2/projects/%s/locations/%s/buckets/%s", projectID, location, bucketName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Bucket not found"}, + Body: map[string]any{"error": "Bucket not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/logging-link_test.go b/sources/gcp/dynamic/adapters/logging-link_test.go index 3b333b2f..b8dd40fc 100644 --- a/sources/gcp/dynamic/adapters/logging-link_test.go +++ b/sources/gcp/dynamic/adapters/logging-link_test.go @@ -114,7 +114,7 @@ func TestLoggingLink(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://logging.googleapis.com/v2/projects/%s/locations/%s/buckets/%s/links/%s", projectID, location, bucketName, linkName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Link not found"}, + Body: map[string]any{"error": "Link not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/logging-saved-query_test.go b/sources/gcp/dynamic/adapters/logging-saved-query_test.go index 9fd02880..22c873e9 100644 --- a/sources/gcp/dynamic/adapters/logging-saved-query_test.go +++ b/sources/gcp/dynamic/adapters/logging-saved-query_test.go @@ -88,7 +88,7 @@ func TestLoggingSavedQuery(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://logging.googleapis.com/v2/projects/%s/locations/%s/savedQueries/%s", projectID, location, queryName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Saved query not found"}, + Body: map[string]any{"error": "Saved query not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/monitoring-alert-policy_test.go b/sources/gcp/dynamic/adapters/monitoring-alert-policy_test.go index 7c10d7a0..da8b4696 100644 --- a/sources/gcp/dynamic/adapters/monitoring-alert-policy_test.go +++ b/sources/gcp/dynamic/adapters/monitoring-alert-policy_test.go @@ -198,7 +198,7 @@ func TestMonitoringAlertPolicy(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://monitoring.googleapis.com/v3/projects/%s/alertPolicies/%s", projectID, policyID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Alert policy not found"}, + Body: map[string]any{"error": "Alert policy not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/monitoring-custom-dashboard_test.go b/sources/gcp/dynamic/adapters/monitoring-custom-dashboard_test.go index 2ed436e8..f6126390 100644 --- a/sources/gcp/dynamic/adapters/monitoring-custom-dashboard_test.go +++ b/sources/gcp/dynamic/adapters/monitoring-custom-dashboard_test.go @@ -121,7 +121,7 @@ func TestMonitoringCustomDashboard(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://monitoring.googleapis.com/v1/projects/%s/dashboards/%s", projectID, dashboardID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Dashboard not found"}, + Body: map[string]any{"error": "Dashboard not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/monitoring-notification-channel_test.go b/sources/gcp/dynamic/adapters/monitoring-notification-channel_test.go index 3057412b..26cce5b3 100644 --- a/sources/gcp/dynamic/adapters/monitoring-notification-channel_test.go +++ b/sources/gcp/dynamic/adapters/monitoring-notification-channel_test.go @@ -137,7 +137,7 @@ func TestMonitoringNotificationChannel(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://monitoring.googleapis.com/v3/projects/%s/notificationChannels/%s", projectID, channelID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Notification channel not found"}, + Body: map[string]any{"error": "Notification channel not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/orgpolicy-policy_test.go b/sources/gcp/dynamic/adapters/orgpolicy-policy_test.go index a476e090..ced3b00f 100644 --- a/sources/gcp/dynamic/adapters/orgpolicy-policy_test.go +++ b/sources/gcp/dynamic/adapters/orgpolicy-policy_test.go @@ -165,7 +165,7 @@ func TestOrgPolicyPolicy(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://orgpolicy.googleapis.com/v2/projects/%s/policies/%s", projectID, policyName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Policy not found"}, + Body: map[string]any{"error": "Policy not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/pubsub-subscription_test.go b/sources/gcp/dynamic/adapters/pubsub-subscription_test.go index d1378ca2..a5a5a3bc 100644 --- a/sources/gcp/dynamic/adapters/pubsub-subscription_test.go +++ b/sources/gcp/dynamic/adapters/pubsub-subscription_test.go @@ -171,7 +171,7 @@ func TestPubSubSubscription(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://pubsub.googleapis.com/v1/projects/%s/subscriptions/%s", projectID, subscriptionName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Subscription not found"}, + Body: map[string]any{"error": "Subscription not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/pubsub-topic_test.go b/sources/gcp/dynamic/adapters/pubsub-topic_test.go index a88b0302..0265b084 100644 --- a/sources/gcp/dynamic/adapters/pubsub-topic_test.go +++ b/sources/gcp/dynamic/adapters/pubsub-topic_test.go @@ -115,7 +115,7 @@ func TestPubSubTopic(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://pubsub.googleapis.com/v1/projects/%s/topics/%s", projectID, topicName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Topic not found"}, + Body: map[string]any{"error": "Topic not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/redis-instance_test.go b/sources/gcp/dynamic/adapters/redis-instance_test.go index 19066bea..dc2a9309 100644 --- a/sources/gcp/dynamic/adapters/redis-instance_test.go +++ b/sources/gcp/dynamic/adapters/redis-instance_test.go @@ -223,7 +223,7 @@ func TestRedisInstance(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://redis.googleapis.com/v1/projects/%s/locations/%s/instances/%s", projectID, location, instanceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Redis instance not found"}, + Body: map[string]any{"error": "Redis instance not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/run-revision_test.go b/sources/gcp/dynamic/adapters/run-revision_test.go index b5a813b5..c1525c37 100644 --- a/sources/gcp/dynamic/adapters/run-revision_test.go +++ b/sources/gcp/dynamic/adapters/run-revision_test.go @@ -113,7 +113,7 @@ func TestRunRevision(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://run.googleapis.com/v2/projects/%s/locations/%s/services/%s/revisions/%s", projectID, location, serviceName, revisionName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Revision not found"}, + Body: map[string]any{"error": "Revision not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/run-service_test.go b/sources/gcp/dynamic/adapters/run-service_test.go index 5d57ff4b..382801c0 100644 --- a/sources/gcp/dynamic/adapters/run-service_test.go +++ b/sources/gcp/dynamic/adapters/run-service_test.go @@ -313,7 +313,7 @@ func TestRunService(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://run.googleapis.com/v2/projects/%s/locations/%s/services/%s", projectID, location, serviceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Service not found"}, + Body: map[string]any{"error": "Service not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/secret-manager-secret_test.go b/sources/gcp/dynamic/adapters/secret-manager-secret_test.go index 4a6f9924..84030fc0 100644 --- a/sources/gcp/dynamic/adapters/secret-manager-secret_test.go +++ b/sources/gcp/dynamic/adapters/secret-manager-secret_test.go @@ -220,7 +220,7 @@ func TestSecretManagerSecret(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://secretmanager.googleapis.com/v1/projects/%s/secrets/%s", projectID, secretID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Secret not found"}, + Body: map[string]any{"error": "Secret not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/security-center-management-security-center-service_test.go b/sources/gcp/dynamic/adapters/security-center-management-security-center-service_test.go index 442c70d9..872d08e9 100644 --- a/sources/gcp/dynamic/adapters/security-center-management-security-center-service_test.go +++ b/sources/gcp/dynamic/adapters/security-center-management-security-center-service_test.go @@ -116,7 +116,7 @@ func TestSecurityCenterManagementSecurityCenterService(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://securitycentermanagement.googleapis.com/v1/projects/%s/locations/%s/securityCenterServices/%s", projectID, location, serviceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Service not found"}, + Body: map[string]any{"error": "Service not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/service-directory-endpoint_test.go b/sources/gcp/dynamic/adapters/service-directory-endpoint_test.go index 96ba6461..ca261d0e 100644 --- a/sources/gcp/dynamic/adapters/service-directory-endpoint_test.go +++ b/sources/gcp/dynamic/adapters/service-directory-endpoint_test.go @@ -157,7 +157,7 @@ func TestServiceDirectoryEndpoint(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://servicedirectory.googleapis.com/v1/projects/%s/locations/%s/namespaces/%s/services/%s/endpoints/%s", projectID, location, namespace, serviceName, endpointName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Endpoint not found"}, + Body: map[string]any{"error": "Endpoint not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/service-usage-service_test.go b/sources/gcp/dynamic/adapters/service-usage-service_test.go index 51ced862..2fc8fab5 100644 --- a/sources/gcp/dynamic/adapters/service-usage-service_test.go +++ b/sources/gcp/dynamic/adapters/service-usage-service_test.go @@ -105,7 +105,7 @@ func TestServiceUsageService(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://serviceusage.googleapis.com/v1/projects/%s/services/%s", projectID, serviceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Service not found"}, + Body: map[string]any{"error": "Service not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/sql-admin-backup_test.go b/sources/gcp/dynamic/adapters/sql-admin-backup_test.go index 1ff3843b..30341e55 100644 --- a/sources/gcp/dynamic/adapters/sql-admin-backup_test.go +++ b/sources/gcp/dynamic/adapters/sql-admin-backup_test.go @@ -160,7 +160,7 @@ func TestSQLAdminBackup(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://sqladmin.googleapis.com/v1/projects/%s/backups/%s", projectID, backupName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Backup not found"}, + Body: map[string]any{"error": "Backup not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/sql-admin-instance_test.go b/sources/gcp/dynamic/adapters/sql-admin-instance_test.go index 3491a795..554159be 100644 --- a/sources/gcp/dynamic/adapters/sql-admin-instance_test.go +++ b/sources/gcp/dynamic/adapters/sql-admin-instance_test.go @@ -241,7 +241,7 @@ func TestSQLAdminInstance(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://sqladmin.googleapis.com/sql/v1/projects/%s/instances/%s", projectID, instanceName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Instance not found"}, + Body: map[string]any{"error": "Instance not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/storage-bucket_test.go b/sources/gcp/dynamic/adapters/storage-bucket_test.go index 937922be..5a6bbb46 100644 --- a/sources/gcp/dynamic/adapters/storage-bucket_test.go +++ b/sources/gcp/dynamic/adapters/storage-bucket_test.go @@ -110,7 +110,7 @@ func TestStorageBucket(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://storage.googleapis.com/storage/v1/b/%s", bucketName): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Bucket not found"}, + Body: map[string]any{"error": "Bucket not found"}, }, } diff --git a/sources/gcp/dynamic/adapters/storage-transfer-transfer-job.go b/sources/gcp/dynamic/adapters/storage-transfer-transfer-job.go index d13eeae9..bc60b3bb 100644 --- a/sources/gcp/dynamic/adapters/storage-transfer-transfer-job.go +++ b/sources/gcp/dynamic/adapters/storage-transfer-transfer-job.go @@ -108,7 +108,7 @@ var _ = registerableAdapter{ }, // TODO: Investigate whether we can/should support multiple items for a given key. // In this case, the eventStream can be an AWS SQS ARN in the form 'arn:aws:sqs:region:account_id:queue_name' - // https://linear.app/overmind/issue/ENG-1348/investigate-supporting-multiple-items-in-blast-propagations + // https://linear.app/overmind/issue/ENG-1348 // Required. Specifies a unique name of the resource such as AWS SQS ARN in the form 'arn:aws:sqs:region:account_id:queue_name', // or Pub/Sub subscription resource name in the form 'projects/{project}/subscriptions/{sub}'. "eventStream.name": { diff --git a/sources/gcp/dynamic/adapters/storage-transfer-transfer-job_test.go b/sources/gcp/dynamic/adapters/storage-transfer-transfer-job_test.go index b8f85d5c..05585a34 100644 --- a/sources/gcp/dynamic/adapters/storage-transfer-transfer-job_test.go +++ b/sources/gcp/dynamic/adapters/storage-transfer-transfer-job_test.go @@ -234,7 +234,7 @@ func TestStorageTransferTransferJob(t *testing.T) { errorResponses := map[string]shared.MockResponse{ fmt.Sprintf("https://storagetransfer.googleapis.com/v1/transferJobs/%s?projectId=%s", jobID, projectID): { StatusCode: http.StatusNotFound, - Body: map[string]interface{}{"error": "Transfer job not found"}, + Body: map[string]any{"error": "Transfer job not found"}, }, } diff --git a/sources/gcp/dynamic/adapters_test.go b/sources/gcp/dynamic/adapters_test.go index 59ba8c50..cc56d16f 100644 --- a/sources/gcp/dynamic/adapters_test.go +++ b/sources/gcp/dynamic/adapters_test.go @@ -72,10 +72,16 @@ func Test_addAdapter(t *testing.T) { testCases := []testCase{ { name: "Listable adapter", - sdpType: gcpshared.ComputeFirewall, + sdpType: gcpshared.ComputeNetwork, locations: projectLocation, listable: true, }, + { + name: "SearchableListable adapter (firewall with tag search)", + sdpType: gcpshared.ComputeFirewall, + locations: projectLocation, + searchableListable: true, + }, { name: "Searchable adapter", sdpType: gcpshared.SQLAdminBackupRun, diff --git a/sources/gcp/dynamic/ai-tools/README.md b/sources/gcp/dynamic/ai-tools/README.md index cd146d97..dc07f1d1 100644 --- a/sources/gcp/dynamic/ai-tools/README.md +++ b/sources/gcp/dynamic/ai-tools/README.md @@ -98,7 +98,7 @@ go run generate-test-ticket-cmd/main.go -v compute-global-address ### What it does 1. **Extract adapter information** from the adapter file in `../adapters/` 2. **Determine protobuf types** based on adapter name patterns -3. **Extract blast propagation** configuration from the adapter +3. **Extract link rules** configuration from the adapter 4. **Generate a Linear URL** with basic fields pre-filled: - Title: "Write unit test for {adapter-name} dynamic adapter" - Assignee: Cursor Agent @@ -115,7 +115,7 @@ The tool generates a Linear URL with basic fields and copies the description to ### Requirements - Must be run from the `prompter` directory - Adapter file must exist in `../adapters/` -- Adapter file must contain valid SDP item type and blast propagation configuration +- Adapter file must contain valid SDP item type and link rules configuration - Go 1.19+ required ## Integration with Cursor Agents @@ -163,7 +163,7 @@ When a Cursor agent picks up the ticket: 2. Follow the comprehensive testing patterns 3. Create the test file with proper structure 4. Include all required test cases (Get, List, Search if supported) -5. Add proper blast propagation tests +5. Add proper link rules tests ## Example Ticket Content @@ -177,7 +177,7 @@ For `compute-global-forwarding-rule`: - **API Endpoints**: - GET: `https://compute.googleapis.com/compute/v1/projects/{project}/global/forwardingRules/{forwardingRule}` - LIST: `https://compute.googleapis.com/compute/v1/projects/{project}/global/forwardingRules` -- **Blast Propagation**: network (InOnly), subnetwork (InOnly), IPAddress (BothWays), backendService (BothWays) +- **Link Rules**: network (InOnly), subnetwork (InOnly), IPAddress (BothWays), backendService (BothWays) ## Benefits @@ -223,7 +223,7 @@ When you just need tests for an existing adapter: The `../adapters/.cursor/rules/dynamic-adapter-creation.md` file ensures that: - Proper adapter structure and patterns are followed - Correct SDP item types and metadata are defined -- Appropriate blast propagation is configured +- Appropriate link rules are configured - Terraform mappings are included when applicable - IAM permissions are properly defined @@ -233,7 +233,7 @@ The `../adapters/.cursor/rules/dynamic-adapter-testing.md` file ensures that: - Proper imports are included - Correct protobuf types are used - Comprehensive test coverage is provided -- Static tests with blast propagation are included +- Static tests with link rules are included - Common mistakes are avoided This ensures consistent, high-quality implementations and unit tests for all dynamic adapters. diff --git a/sources/gcp/dynamic/shared.go b/sources/gcp/dynamic/shared.go index b6ea6204..d058b5e9 100644 --- a/sources/gcp/dynamic/shared.go +++ b/sources/gcp/dynamic/shared.go @@ -17,8 +17,8 @@ import ( "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" "github.com/overmindtech/cli/go/sdpcache" - gcpshared "github.com/overmindtech/cli/sources/gcp/shared" "github.com/overmindtech/cli/sources" + gcpshared "github.com/overmindtech/cli/sources/gcp/shared" "github.com/overmindtech/cli/sources/shared" ) @@ -96,7 +96,7 @@ func externalToSDP( ctx context.Context, location gcpshared.LocationInfo, uniqueAttrKeys []string, - resp map[string]interface{}, + resp map[string]any, sdpAssetType shared.ItemType, linker *gcpshared.Linker, nameSelector string, @@ -148,13 +148,13 @@ func externalToSDP( return sdpItem, nil } -func externalCallSingle(ctx context.Context, httpCli *http.Client, url string) (map[string]interface{}, error) { +func externalCallSingle(ctx context.Context, httpCli *http.Client, url string) (map[string]any, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } - resp, err := httpCli.Do(req) + resp, err := httpCli.Do(req) //nolint:gosec // G107 (SSRF): URL built from GCP API discovery document endpoints and project config, not user input if err != nil { return nil, err } @@ -194,7 +194,7 @@ func externalCallSingle(ctx context.Context, httpCli *http.Client, url string) ( return nil, err } - var result map[string]interface{} + var result map[string]any if err = json.Unmarshal(data, &result); err != nil { return nil, err } @@ -215,7 +215,7 @@ func externalCallMulti(ctx context.Context, itemsSelector string, httpCli *http. return err } - resp, err := httpCli.Do(req) + resp, err := httpCli.Do(req) //nolint:gosec // G107 (SSRF): URL built from GCP API discovery document endpoints with pagination token from GCP responses if err != nil { return err } @@ -250,7 +250,7 @@ func externalCallMulti(ctx context.Context, itemsSelector string, httpCli *http. return err } - var result map[string]interface{} + var result map[string]any if err = json.Unmarshal(data, &result); err != nil { return err } @@ -282,7 +282,7 @@ func externalCallMulti(ctx context.Context, itemsSelector string, httpCli *http. // Add items from this page to our collection for _, item := range items { - if itemMap, ok := item.(map[string]interface{}); ok { + if itemMap, ok := item.(map[string]any); ok { // If out channel is provided, send the item to it select { case out <- itemMap: @@ -319,7 +319,7 @@ func externalCallMulti(ctx context.Context, itemsSelector string, httpCli *http. func potentialLinksFromLinkRules(itemType shared.ItemType, linkRules map[shared.ItemType]map[string]*gcpshared.Impact) []string { potentialLinksMap := make(map[string]bool) - for _, impact := range linkRules[itemType] { + for key, impact := range linkRules[itemType] { potentialLinksMap[impact.ToSDPItemType.String()] = true // Special case: stdlib.NetworkIP and stdlib.NetworkDNS are interchangeable // because the linker automatically detects whether a value is an IP address or DNS name @@ -328,6 +328,17 @@ func potentialLinksFromLinkRules(itemType shared.ItemType, linkRules map[shared. potentialLinksMap["ip"] = true potentialLinksMap["dns"] = true } + // Network tag keys produce additional links via AutoLink that aren't + // captured by ToSDPItemType alone. + if gcpshared.IsNetworkTagKey(key) { + switch itemType { + case gcpshared.ComputeFirewall, gcpshared.ComputeRoute: + potentialLinksMap[gcpshared.ComputeInstance.String()] = true + case gcpshared.ComputeInstance, gcpshared.ComputeInstanceTemplate: + potentialLinksMap[gcpshared.ComputeFirewall.String()] = true + potentialLinksMap[gcpshared.ComputeRoute.String()] = true + } + } } potentialLinks := make([]string, 0, len(potentialLinksMap)) @@ -350,7 +361,7 @@ func aggregateSDPItems(ctx context.Context, a Adapter, url string, location gcps itemsSelector = a.listResponseSelector } - out := make(chan map[string]interface{}) + out := make(chan map[string]any) p := pool.New().WithErrors().WithContext(ctx) p.Go(func(ctx context.Context) error { defer close(out) @@ -401,7 +412,7 @@ func streamSDPItems(ctx context.Context, a Adapter, url string, location gcpshar itemsSelector = a.listResponseSelector } - out := make(chan map[string]interface{}) + out := make(chan map[string]any) p := pool.New().WithErrors().WithContext(ctx) p.Go(func(ctx context.Context) error { defer close(out) diff --git a/sources/gcp/dynamic/shared_test.go b/sources/gcp/dynamic/shared_test.go index 1c7e099d..c11528a7 100644 --- a/sources/gcp/dynamic/shared_test.go +++ b/sources/gcp/dynamic/shared_test.go @@ -22,7 +22,7 @@ func Test_externalToSDP(t *testing.T) { type args struct { location gcpshared.LocationInfo uniqueAttrKeys []string - resp map[string]interface{} + resp map[string]any sdpAssetType shared.ItemType nameSelector string } @@ -38,9 +38,9 @@ func Test_externalToSDP(t *testing.T) { args: args{ location: testLocation, uniqueAttrKeys: []string{"projects", "locations", "instances"}, - resp: map[string]interface{}{ + resp: map[string]any{ "name": "projects/test-project/locations/us-central1/instances/instance-1", - "labels": map[string]interface{}{"env": "prod"}, + "labels": map[string]any{"env": "prod"}, "foo": "bar", }, sdpAssetType: gcpshared.ComputeInstance, @@ -67,10 +67,10 @@ func Test_externalToSDP(t *testing.T) { args: args{ location: testLocation, uniqueAttrKeys: []string{"projects", "locations", "instances"}, - resp: map[string]interface{}{ + resp: map[string]any{ // There is name, but it does not include uniqueAttrKeys, expected to use the name as is. "name": "instance-1", - "labels": map[string]interface{}{"env": "prod"}, + "labels": map[string]any{"env": "prod"}, "foo": "bar", }, sdpAssetType: gcpshared.ComputeInstance, @@ -97,8 +97,8 @@ func Test_externalToSDP(t *testing.T) { args: args{ location: testLocation, uniqueAttrKeys: []string{"projects", "locations", "instances"}, - resp: map[string]interface{}{ - "labels": map[string]interface{}{"env": "prod"}, + resp: map[string]any{ + "labels": map[string]any{"env": "prod"}, "foo": "bar", }, sdpAssetType: gcpshared.ComputeInstance, @@ -111,9 +111,9 @@ func Test_externalToSDP(t *testing.T) { args: args{ location: testLocation, uniqueAttrKeys: []string{"projects", "locations", "instances"}, - resp: map[string]interface{}{ + resp: map[string]any{ "instanceName": "instance-1", - "labels": map[string]interface{}{"env": "prod"}, + "labels": map[string]any{"env": "prod"}, "foo": "bar", }, sdpAssetType: gcpshared.ComputeInstance, @@ -141,7 +141,7 @@ func Test_externalToSDP(t *testing.T) { args: args{ location: testLocation, uniqueAttrKeys: []string{"projects", "locations", "instances"}, - resp: map[string]interface{}{ + resp: map[string]any{ "name": "projects/test-project/locations/us-central1/instances/instance-2", "foo": "baz", }, diff --git a/sources/gcp/integration-tests/compute-address_test.go b/sources/gcp/integration-tests/compute-address_test.go index 008b1c83..332ad10d 100644 --- a/sources/gcp/integration-tests/compute-address_test.go +++ b/sources/gcp/integration-tests/compute-address_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -118,12 +117,12 @@ func TestComputeAddressIntegration(t *testing.T) { func createComputeAddress(ctx context.Context, client *compute.AddressesClient, projectID, region, addressName string) error { // Define the address configuration address := &computepb.Address{ - Name: ptr.To(addressName), + Name: new(addressName), Labels: map[string]string{ "test": "integration", }, - NetworkTier: ptr.To("PREMIUM"), - Region: ptr.To(region), + NetworkTier: new("PREMIUM"), + Region: new(region), } // Create the address diff --git a/sources/gcp/integration-tests/compute-autoscaler_test.go b/sources/gcp/integration-tests/compute-autoscaler_test.go index 8853286e..52ec7f75 100644 --- a/sources/gcp/integration-tests/compute-autoscaler_test.go +++ b/sources/gcp/integration-tests/compute-autoscaler_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -187,20 +186,20 @@ func TestComputeAutoscalerIntegration(t *testing.T) { func createComputeInstanceTemplate(ctx context.Context, client *compute.InstanceTemplatesClient, projectID, name string) error { // Create a new instance template instanceTemplate := &computepb.InstanceTemplate{ - Name: ptr.To(name), + Name: new(name), Properties: &computepb.InstanceProperties{ Disks: []*computepb.AttachedDisk{ { - AutoDelete: ptr.To(true), - Boot: ptr.To(true), - DeviceName: ptr.To(name), + AutoDelete: new(true), + Boot: new(true), + DeviceName: new(name), InitializeParams: &computepb.AttachedDiskInitializeParams{ - DiskSizeGb: ptr.To(int64(10)), - DiskType: ptr.To("pd-balanced"), - SourceImage: ptr.To("projects/debian-cloud/global/images/debian-12-bookworm-v20250415"), + DiskSizeGb: new(int64(10)), + DiskType: new("pd-balanced"), + SourceImage: new("projects/debian-cloud/global/images/debian-12-bookworm-v20250415"), }, - Mode: ptr.To("READ_WRITE"), - Type: ptr.To("PERSISTENT"), + Mode: new("READ_WRITE"), + Type: new("PERSISTENT"), // Labels? Tags? }, @@ -209,17 +208,17 @@ func createComputeInstanceTemplate(ctx context.Context, client *compute.Instance { AccessConfigs: []*computepb.AccessConfig{ { - Kind: ptr.To("compute#accessConfig"), - Name: ptr.To("External NAT"), - NetworkTier: ptr.To("PREMIUM"), - Type: ptr.To("ONE_TO_ONE_NAT"), + Kind: new("compute#accessConfig"), + Name: new("External NAT"), + NetworkTier: new("PREMIUM"), + Type: new("ONE_TO_ONE_NAT"), }, }, - Network: ptr.To("projects/" + projectID + "/global/networks/default"), - StackType: ptr.To("IPV4_ONLY"), + Network: new("projects/" + projectID + "/global/networks/default"), + StackType: new("IPV4_ONLY"), }, }, - MachineType: ptr.To("e2-micro"), + MachineType: new("e2-micro"), Tags: &computepb.Tags{ Items: []string{"overmind-test"}, }, @@ -282,13 +281,13 @@ func deleteComputeInstanceTemplate(ctx context.Context, client *compute.Instance func createComputeAutoscaler(ctx context.Context, client *compute.AutoscalersClient, targetedInstanceGroupManager, projectID, zone, name string) error { // Create a new autoscaler autoscaler := &computepb.Autoscaler{ - Name: ptr.To(name), + Name: new(name), Target: &targetedInstanceGroupManager, AutoscalingPolicy: &computepb.AutoscalingPolicy{ - MinNumReplicas: ptr.To(int32(0)), - MaxNumReplicas: ptr.To(int32(1)), + MinNumReplicas: new(int32(0)), + MaxNumReplicas: new(int32(1)), CpuUtilization: &computepb.AutoscalingPolicyCpuUtilization{ - UtilizationTarget: ptr.To(float64(0.6)), + UtilizationTarget: new(float64(0.6)), }, }, } diff --git a/sources/gcp/integration-tests/compute-disk_test.go b/sources/gcp/integration-tests/compute-disk_test.go index 58997c1c..09ff0934 100644 --- a/sources/gcp/integration-tests/compute-disk_test.go +++ b/sources/gcp/integration-tests/compute-disk_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -129,9 +128,9 @@ func TestComputeDiskIntegration(t *testing.T) { func createDisk(ctx context.Context, client *compute.DisksClient, projectID, zone, diskName string) error { disk := &computepb.Disk{ - Name: ptr.To(diskName), - SizeGb: ptr.To(int64(10)), - Type: ptr.To(fmt.Sprintf( + Name: new(diskName), + SizeGb: new(int64(10)), + Type: new(fmt.Sprintf( "projects/%s/zones/%s/diskTypes/pd-standard", projectID, zone, )), diff --git a/sources/gcp/integration-tests/compute-forwarding-rule_test.go b/sources/gcp/integration-tests/compute-forwarding-rule_test.go index 0b747dbe..b741abad 100644 --- a/sources/gcp/integration-tests/compute-forwarding-rule_test.go +++ b/sources/gcp/integration-tests/compute-forwarding-rule_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -122,7 +121,7 @@ func createComputeForwardingRule(ctx context.Context, client *compute.Forwarding Project: projectID, Region: region, ForwardingRuleResource: &computepb.ForwardingRule{ - Name: ptr.To(ruleName), + Name: new(ruleName), // IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule directs the traffic to the referenced target or backendService. // While creating a forwarding rule, specifying an IPAddress is required under the following circumstances: @@ -143,9 +142,9 @@ func createComputeForwardingRule(ctx context.Context, client *compute.Forwarding // determine the type of IP address that you can use. // For detailed information, see [IP address specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). // When reading an IPAddress, the API always returns the IP address number. - IPAddress: ptr.To("192.168.1.1"), - IPProtocol: ptr.To("TCP"), - PortRange: ptr.To("80-80"), + IPAddress: new("192.168.1.1"), + IPProtocol: new("TCP"), + PortRange: new("80-80"), // The URL of the target resource to receive the matched traffic. // For regional forwarding rules, this target must be in the same region as the forwarding rule. // For global forwarding rules, this target must be a global load balancing resource. @@ -156,7 +155,7 @@ func createComputeForwardingRule(ctx context.Context, client *compute.Forwarding //- all-apis - All supported Google APIs. //- For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. //The target is not mutable once set as a service attachment. - Target: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/targetPools/test-target-pool"), + Target: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/targetPools/test-target-pool"), }, } diff --git a/sources/gcp/integration-tests/compute-healthcheck_test.go b/sources/gcp/integration-tests/compute-healthcheck_test.go index 4e8638ab..f71c81ac 100644 --- a/sources/gcp/integration-tests/compute-healthcheck_test.go +++ b/sources/gcp/integration-tests/compute-healthcheck_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -135,12 +134,12 @@ func TestComputeHealthCheckIntegration(t *testing.T) { // createComputeHealthCheck creates a GCP Compute HealthCheck with the given parameters. func createComputeHealthCheck(ctx context.Context, client *compute.HealthChecksClient, projectID, healthCheckName string) error { healthCheck := &computepb.HealthCheck{ - Name: ptr.To(healthCheckName), - CheckIntervalSec: ptr.To(int32(5)), - TimeoutSec: ptr.To(int32(5)), - Type: ptr.To("TCP"), + Name: new(healthCheckName), + CheckIntervalSec: new(int32(5)), + TimeoutSec: new(int32(5)), + Type: new("TCP"), TcpHealthCheck: &computepb.TCPHealthCheck{ - Port: ptr.To(int32(80)), + Port: new(int32(80)), }, } diff --git a/sources/gcp/integration-tests/compute-image_test.go b/sources/gcp/integration-tests/compute-image_test.go index a58ef552..f0d174b1 100644 --- a/sources/gcp/integration-tests/compute-image_test.go +++ b/sources/gcp/integration-tests/compute-image_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -143,8 +142,8 @@ func TestComputeImageIntegration(t *testing.T) { // createComputeImage creates a GCP Compute Image with the given parameters. func createComputeImage(ctx context.Context, client *compute.ImagesClient, projectID, zone, imageName, diskName string) error { image := &computepb.Image{ - Name: ptr.To(imageName), - SourceDisk: ptr.To(fmt.Sprintf( + Name: new(imageName), + SourceDisk: new(fmt.Sprintf( "projects/%s/zones/%s/disks/%s", projectID, zone, diskName, )), diff --git a/sources/gcp/integration-tests/compute-instance-group-manager_test.go b/sources/gcp/integration-tests/compute-instance-group-manager_test.go index e286d301..e1ba2f33 100644 --- a/sources/gcp/integration-tests/compute-instance-group-manager_test.go +++ b/sources/gcp/integration-tests/compute-instance-group-manager_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -153,22 +152,22 @@ func TestComputeInstanceGroupManagerIntegration(t *testing.T) { // createInstanceTemplate creates a GCP Compute Engine instance template. func createInstanceTemplate(ctx context.Context, client *compute.InstanceTemplatesClient, projectID, templateName string) error { template := &computepb.InstanceTemplate{ - Name: ptr.To(templateName), + Name: new(templateName), Properties: &computepb.InstanceProperties{ - MachineType: ptr.To("e2-micro"), + MachineType: new("e2-micro"), Disks: []*computepb.AttachedDisk{ { - Boot: ptr.To(true), - AutoDelete: ptr.To(true), - Type: ptr.To("PERSISTENT"), + Boot: new(true), + AutoDelete: new(true), + Type: new("PERSISTENT"), InitializeParams: &computepb.AttachedDiskInitializeParams{ - SourceImage: ptr.To("projects/debian-cloud/global/images/family/debian-11"), + SourceImage: new("projects/debian-cloud/global/images/family/debian-11"), }, }, }, NetworkInterfaces: []*computepb.NetworkInterface{ { - Network: ptr.To("global/networks/default"), + Network: new("global/networks/default"), }, }, }, @@ -224,9 +223,9 @@ func deleteInstanceTemplate(ctx context.Context, client *compute.InstanceTemplat // createInstanceGroupManager creates a GCP Compute Engine instance group manager. func createInstanceGroupManager(ctx context.Context, client *compute.InstanceGroupManagersClient, projectID, zone, instanceGroupManagerName, templateName string) error { instanceGroupManager := &computepb.InstanceGroupManager{ - Name: ptr.To(instanceGroupManagerName), - InstanceTemplate: ptr.To(fmt.Sprintf("projects/%s/global/instanceTemplates/%s", projectID, templateName)), - TargetSize: ptr.To(int32(1)), + Name: new(instanceGroupManagerName), + InstanceTemplate: new(fmt.Sprintf("projects/%s/global/instanceTemplates/%s", projectID, templateName)), + TargetSize: new(int32(1)), } req := &computepb.InsertInstanceGroupManagerRequest{ diff --git a/sources/gcp/integration-tests/compute-instance-group_test.go b/sources/gcp/integration-tests/compute-instance-group_test.go index 8017775e..9164db0e 100644 --- a/sources/gcp/integration-tests/compute-instance-group_test.go +++ b/sources/gcp/integration-tests/compute-instance-group_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -131,7 +130,7 @@ func TestComputeInstanceGroupIntegration(t *testing.T) { func createInstanceGroup(ctx context.Context, client *compute.InstanceGroupsClient, projectID, zone, instanceGroupName string) error { instanceGroup := &computepb.InstanceGroup{ - Name: ptr.To(instanceGroupName), + Name: new(instanceGroupName), } req := &computepb.InsertInstanceGroupRequest{ diff --git a/sources/gcp/integration-tests/compute-instance_test.go b/sources/gcp/integration-tests/compute-instance_test.go index 23d4fb87..f0fd7bbb 100644 --- a/sources/gcp/integration-tests/compute-instance_test.go +++ b/sources/gcp/integration-tests/compute-instance_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -121,27 +120,27 @@ func TestComputeInstanceIntegration(t *testing.T) { func createComputeInstance(ctx context.Context, client *compute.InstancesClient, projectID, zone, instanceName, network, subnetwork, region string) error { // Construct the network interface networkInterface := &computepb.NetworkInterface{ - StackType: ptr.To("IPV4_ONLY"), + StackType: new("IPV4_ONLY"), } if network != "" { - networkInterface.Network = ptr.To(fmt.Sprintf("projects/%s/global/networks/%s", projectID, network)) + networkInterface.Network = new(fmt.Sprintf("projects/%s/global/networks/%s", projectID, network)) } if subnetwork != "" { - networkInterface.Subnetwork = ptr.To(fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", projectID, region, subnetwork)) + networkInterface.Subnetwork = new(fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", projectID, region, subnetwork)) } // Define the instance configuration instance := &computepb.Instance{ - Name: ptr.To(instanceName), - MachineType: ptr.To(fmt.Sprintf("zones/%s/machineTypes/e2-micro", zone)), + Name: new(instanceName), + MachineType: new(fmt.Sprintf("zones/%s/machineTypes/e2-micro", zone)), Disks: []*computepb.AttachedDisk{ { - Boot: ptr.To(true), - AutoDelete: ptr.To(true), + Boot: new(true), + AutoDelete: new(true), InitializeParams: &computepb.AttachedDiskInitializeParams{ - SourceImage: ptr.To("projects/debian-cloud/global/images/debian-12-bookworm-v20250415"), - DiskSizeGb: ptr.To(int64(10)), + SourceImage: new("projects/debian-cloud/global/images/debian-12-bookworm-v20250415"), + DiskSizeGb: new(int64(10)), }, }, }, diff --git a/sources/gcp/integration-tests/compute-instant-snapshot_test.go b/sources/gcp/integration-tests/compute-instant-snapshot_test.go index 09de7f70..9222cc50 100644 --- a/sources/gcp/integration-tests/compute-instant-snapshot_test.go +++ b/sources/gcp/integration-tests/compute-instant-snapshot_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -168,8 +167,8 @@ func TestComputeInstantSnapshotIntegration(t *testing.T) { // createInstantSnapshot creates a GCP Compute Instant Snapshot with the given parameters. func createInstantSnapshot(ctx context.Context, client *compute.InstantSnapshotsClient, projectID, zone, snapshotName, diskName string) error { snapshot := &computepb.InstantSnapshot{ - Name: ptr.To(snapshotName), - SourceDisk: ptr.To(diskName), + Name: new(snapshotName), + SourceDisk: new(diskName), Labels: map[string]string{ "test": "integration", }, diff --git a/sources/gcp/integration-tests/compute-machine-image_test.go b/sources/gcp/integration-tests/compute-machine-image_test.go index 59f1ca2b..9ec569ff 100644 --- a/sources/gcp/integration-tests/compute-machine-image_test.go +++ b/sources/gcp/integration-tests/compute-machine-image_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -153,8 +152,8 @@ func TestComputeMachineImageIntegration(t *testing.T) { // createComputeMachineImage creates a GCP Compute Machine Image with the given parameters. func createComputeMachineImage(t *testing.T, ctx context.Context, client *compute.MachineImagesClient, projectID, zone, machineImageName, sourceInstanceName string) error { machineImage := &computepb.MachineImage{ - Name: ptr.To(machineImageName), - SourceInstance: ptr.To(fmt.Sprintf( + Name: new(machineImageName), + SourceInstance: new(fmt.Sprintf( "projects/%s/zones/%s/instances/%s", projectID, zone, sourceInstanceName, )), diff --git a/sources/gcp/integration-tests/compute-node-group_test.go b/sources/gcp/integration-tests/compute-node-group_test.go index 63edb298..67e4dd30 100644 --- a/sources/gcp/integration-tests/compute-node-group_test.go +++ b/sources/gcp/integration-tests/compute-node-group_test.go @@ -13,7 +13,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -204,7 +203,7 @@ func TestComputeNodeGroupIntegration(t *testing.T) { ExpectedType: gcpshared.ComputeNodeGroup.String(), ExpectedMethod: sdp.QueryMethod_SEARCH, ExpectedQuery: nodeTemplateName, - ExpectedScope: "*", + ExpectedScope: "*", }, } @@ -274,8 +273,8 @@ func TestComputeNodeGroupIntegration(t *testing.T) { func createComputeNodeTemplate(ctx context.Context, client *compute.NodeTemplatesClient, projectID, region, name string) error { // Create a new node template nodeTemplate := &computepb.NodeTemplate{ - Name: ptr.To(name), - NodeType: ptr.To("c2-node-60-240"), + Name: new(name), + NodeType: new("c2-node-60-240"), } // Create the node template @@ -336,12 +335,12 @@ func deleteComputeNodeTemplate(ctx context.Context, client *compute.NodeTemplate func createComputeNodeGroup(ctx context.Context, client *compute.NodeGroupsClient, nodeTemplate, projectID, zone, name string) error { // Create a new node group nodeGroup := &computepb.NodeGroup{ - Name: ptr.To(name), - NodeTemplate: ptr.To(nodeTemplate), + Name: new(name), + NodeTemplate: new(nodeTemplate), AutoscalingPolicy: &computepb.NodeGroupAutoscalingPolicy{ - Mode: ptr.To(computepb.NodeGroupAutoscalingPolicy_OFF.String()), - MinNodes: ptr.To(int32(0)), - MaxNodes: ptr.To(int32(1)), + Mode: new(computepb.NodeGroupAutoscalingPolicy_OFF.String()), + MinNodes: new(int32(0)), + MaxNodes: new(int32(1)), }, } diff --git a/sources/gcp/integration-tests/compute-reservation_test.go b/sources/gcp/integration-tests/compute-reservation_test.go index 2046f4b2..75543aa4 100644 --- a/sources/gcp/integration-tests/compute-reservation_test.go +++ b/sources/gcp/integration-tests/compute-reservation_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -132,12 +131,12 @@ func TestComputeReservationIntegration(t *testing.T) { // createComputeReservation creates a GCP Compute Reservation with the given parameters. func createComputeReservation(ctx context.Context, client *compute.ReservationsClient, projectID, zone, reservationName, machineType string) error { reservation := &computepb.Reservation{ - Name: ptr.To(reservationName), + Name: new(reservationName), SpecificReservation: &computepb.AllocationSpecificSKUReservation{ InstanceProperties: &computepb.AllocationSpecificSKUAllocationReservedInstanceProperties{ - MachineType: ptr.To(machineType), + MachineType: new(machineType), }, - Count: ptr.To(int64(1)), + Count: new(int64(1)), }, } diff --git a/sources/gcp/integration-tests/compute-snapshot_test.go b/sources/gcp/integration-tests/compute-snapshot_test.go index b6a73c28..9a211dbc 100644 --- a/sources/gcp/integration-tests/compute-snapshot_test.go +++ b/sources/gcp/integration-tests/compute-snapshot_test.go @@ -12,7 +12,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/googleapis/gax-go/v2/apierror" log "github.com/sirupsen/logrus" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdpcache" @@ -143,8 +142,8 @@ func TestComputeSnapshotIntegration(t *testing.T) { // createComputeSnapshot creates a GCP Compute Snapshot with the given parameters. func createComputeSnapshot(ctx context.Context, client *compute.SnapshotsClient, projectID, zone, snapshotName, diskName string) error { snapshot := &computepb.Snapshot{ - Name: ptr.To(snapshotName), - SourceDisk: ptr.To(fmt.Sprintf( + Name: new(snapshotName), + SourceDisk: new(fmt.Sprintf( "projects/%s/zones/%s/disks/%s", projectID, zone, diskName, )), diff --git a/sources/gcp/integration-tests/kms_vs_asset_inventory_test.go b/sources/gcp/integration-tests/kms_vs_asset_inventory_test.go index 0d23eb9e..b35d421d 100644 --- a/sources/gcp/integration-tests/kms_vs_asset_inventory_test.go +++ b/sources/gcp/integration-tests/kms_vs_asset_inventory_test.go @@ -164,7 +164,7 @@ func TestKMSvsAssetInventoryComparison(t *testing.T) { t.Log("") // Asset Inventory may have indexing delay - retry with backoff - var assetResponse map[string]interface{} + var assetResponse map[string]any var assetLatency time.Duration var foundAsset bool @@ -188,10 +188,7 @@ func TestKMSvsAssetInventoryComparison(t *testing.T) { } // Exponential backoff: 5s, 10s, 20s, 40s... up to 60s - waitTime := time.Duration(5*(1<<(attempt-1))) * time.Second - if waitTime > 60*time.Second { - waitTime = 60 * time.Second - } + waitTime := min(time.Duration(5*(1<<(attempt-1)))*time.Second, 60*time.Second) time.Sleep(waitTime) } @@ -332,7 +329,7 @@ func destroyCryptoKeyVersion(ctx context.Context, client *kms.KeyManagementClien } // callKMSDirectAPI calls the Cloud KMS REST API directly to get a CryptoKey. -func callKMSDirectAPI(ctx context.Context, httpClient *http.Client, cryptoKeyName string) (map[string]interface{}, error) { +func callKMSDirectAPI(ctx context.Context, httpClient *http.Client, cryptoKeyName string) (map[string]any, error) { apiURL := fmt.Sprintf("https://cloudkms.googleapis.com/v1/%s", cryptoKeyName) req, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, nil) @@ -356,7 +353,7 @@ func callKMSDirectAPI(ctx context.Context, httpClient *http.Client, cryptoKeyNam return nil, fmt.Errorf("failed to read response body: %w", err) } - var result map[string]interface{} + var result map[string]any if err := json.Unmarshal(body, &result); err != nil { return nil, fmt.Errorf("failed to unmarshal response: %w", err) } @@ -366,7 +363,7 @@ func callKMSDirectAPI(ctx context.Context, httpClient *http.Client, cryptoKeyNam // callAssetInventoryAPI calls the Cloud Asset Inventory API to find a specific CryptoKey. // Returns the asset if found, nil if not found (may indicate indexing delay). -func callAssetInventoryAPI(ctx context.Context, httpClient *http.Client, projectID, cryptoKeyName string) (map[string]interface{}, error) { +func callAssetInventoryAPI(ctx context.Context, httpClient *http.Client, projectID, cryptoKeyName string) (map[string]any, error) { // Build the Asset Inventory ListAssets URL baseURL := fmt.Sprintf("https://cloudasset.googleapis.com/v1/projects/%s/assets", projectID) @@ -401,13 +398,13 @@ func callAssetInventoryAPI(ctx context.Context, httpClient *http.Client, project return nil, fmt.Errorf("failed to read response body: %w", err) } - var result map[string]interface{} + var result map[string]any if err := json.Unmarshal(body, &result); err != nil { return nil, fmt.Errorf("failed to unmarshal response: %w", err) } // Find the specific CryptoKey in the assets list - assets, ok := result["assets"].([]interface{}) + assets, ok := result["assets"].([]any) if !ok || len(assets) == 0 { return nil, nil // No assets found - may indicate indexing delay } @@ -417,7 +414,7 @@ func callAssetInventoryAPI(ctx context.Context, httpClient *http.Client, project expectedAssetName := fmt.Sprintf("//cloudkms.googleapis.com/%s", cryptoKeyName) for _, asset := range assets { - assetMap, ok := asset.(map[string]interface{}) + assetMap, ok := asset.(map[string]any) if !ok { continue } diff --git a/sources/gcp/integration-tests/network-tags_test.go b/sources/gcp/integration-tests/network-tags_test.go new file mode 100644 index 00000000..634cb3f4 --- /dev/null +++ b/sources/gcp/integration-tests/network-tags_test.go @@ -0,0 +1,466 @@ +// Run commands (assumes RUN_GCP_INTEGRATION_TESTS, GCP_PROJECT_ID, GCP_ZONE are exported): +// +// All: go test ./sources/gcp/integration-tests/ -run "TestNetworkTagRelationships" -count 1 -v +// Setup: go test ./sources/gcp/integration-tests/ -run "TestNetworkTagRelationships/Setup" -count 1 -v +// Run: go test ./sources/gcp/integration-tests/ -run "TestNetworkTagRelationships/(Instance|Firewall|Route)" -count 1 -v +// Teardown: go test ./sources/gcp/integration-tests/ -run "TestNetworkTagRelationships/Teardown" -count 1 -v +// +// Verify created resources with gcloud: +// +// gcloud compute instances describe integration-test-nettag-instance --zone=$GCP_ZONE --project=$GCP_PROJECT_ID --format="value(tags.items)" +// gcloud compute firewall-rules describe integration-test-nettag-fw --project=$GCP_PROJECT_ID --format="value(targetTags)" +// gcloud compute routes describe integration-test-nettag-route --project=$GCP_PROJECT_ID --format="value(tags)" +// + +package integrationtests + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "testing" + + compute "cloud.google.com/go/compute/apiv1" + "cloud.google.com/go/compute/apiv1/computepb" + "github.com/googleapis/gax-go/v2/apierror" + log "github.com/sirupsen/logrus" + + "github.com/overmindtech/cli/go/discovery" + "github.com/overmindtech/cli/go/sdp-go" + "github.com/overmindtech/cli/go/sdpcache" + "github.com/overmindtech/cli/sources" + "github.com/overmindtech/cli/sources/gcp/dynamic" + "github.com/overmindtech/cli/sources/gcp/manual" + gcpshared "github.com/overmindtech/cli/sources/gcp/shared" +) + +const ( + networkTagTestInstance = "integration-test-nettag-instance" + networkTagTestFirewall = "integration-test-nettag-fw" + networkTagTestRoute = "integration-test-nettag-route" + networkTagTestInstanceTemplate = "integration-test-nettag-template" + networkTag = "nettag-test" +) + +func TestNetworkTagRelationships(t *testing.T) { + projectID := os.Getenv("GCP_PROJECT_ID") + if projectID == "" { + t.Skip("GCP_PROJECT_ID environment variable not set") + } + + zone := os.Getenv("GCP_ZONE") + if zone == "" { + t.Skip("GCP_ZONE environment variable not set") + } + + t.Parallel() + + ctx := context.Background() + + instanceClient, err := compute.NewInstancesRESTClient(ctx) + if err != nil { + t.Fatalf("NewInstancesRESTClient: %v", err) + } + defer instanceClient.Close() + + firewallClient, err := compute.NewFirewallsRESTClient(ctx) + if err != nil { + t.Fatalf("NewFirewallsRESTClient: %v", err) + } + defer firewallClient.Close() + + routeClient, err := compute.NewRoutesRESTClient(ctx) + if err != nil { + t.Fatalf("NewRoutesRESTClient: %v", err) + } + defer routeClient.Close() + + instanceTemplateClient, err := compute.NewInstanceTemplatesRESTClient(ctx) + if err != nil { + t.Fatalf("NewInstanceTemplatesRESTClient: %v", err) + } + defer instanceTemplateClient.Close() + + // --- Setup --- + t.Run("Setup", func(t *testing.T) { + if err := createInstanceWithTags(ctx, instanceClient, projectID, zone); err != nil { + t.Fatalf("Failed to create tagged instance: %v", err) + } + if err := createFirewallWithTags(ctx, firewallClient, projectID); err != nil { + t.Fatalf("Failed to create tagged firewall: %v", err) + } + if err := createRouteWithTags(ctx, routeClient, projectID); err != nil { + t.Fatalf("Failed to create tagged route: %v", err) + } + if err := createInstanceTemplateWithTags(ctx, instanceTemplateClient, projectID); err != nil { + t.Fatalf("Failed to create tagged instance template: %v", err) + } + }) + + // --- Run --- + t.Run("InstanceEmitsSearchLinksToFirewallAndRoute", func(t *testing.T) { + wrapper := manual.NewComputeInstance( + gcpshared.NewComputeInstanceClient(instanceClient), + []gcpshared.LocationInfo{gcpshared.NewZonalLocation(projectID, zone)}, + ) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], networkTagTestInstance, true) + if qErr != nil { + t.Fatalf("Get instance: %v", qErr) + } + + assertHasLinkedItemQuery(t, sdpItem, gcpshared.ComputeFirewall.String(), sdp.QueryMethod_SEARCH, networkTag, projectID) + assertHasLinkedItemQuery(t, sdpItem, gcpshared.ComputeRoute.String(), sdp.QueryMethod_SEARCH, networkTag, projectID) + }) + + t.Run("FirewallSearchByTagReturnsFirewall", func(t *testing.T) { + gcpHTTPCli, err := gcpshared.GCPHTTPClientWithOtel(ctx, "") + if err != nil { + t.Fatalf("GCPHTTPClientWithOtel: %v", err) + } + + adapter, err := dynamic.MakeAdapter(gcpshared.ComputeFirewall, gcpshared.NewLinker(), gcpHTTPCli, sdpcache.NewNoOpCache(), []gcpshared.LocationInfo{gcpshared.NewProjectLocation(projectID)}) + if err != nil { + t.Fatalf("MakeAdapter: %v", err) + } + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Firewall adapter does not implement SearchableAdapter") + } + + items, qErr := searchable.Search(ctx, projectID, networkTag, true) + if qErr != nil { + t.Fatalf("Search: %v", qErr) + } + + found := false + for _, item := range items { + if v, err := item.GetAttributes().Get("name"); err == nil && v == networkTagTestFirewall { + found = true + break + } + } + if !found { + t.Errorf("Expected to find firewall %s in search results for tag %q, got %d items", networkTagTestFirewall, networkTag, len(items)) + } + }) + + t.Run("RouteSearchByTagReturnsRoute", func(t *testing.T) { + gcpHTTPCli, err := gcpshared.GCPHTTPClientWithOtel(ctx, "") + if err != nil { + t.Fatalf("GCPHTTPClientWithOtel: %v", err) + } + + adapter, err := dynamic.MakeAdapter(gcpshared.ComputeRoute, gcpshared.NewLinker(), gcpHTTPCli, sdpcache.NewNoOpCache(), []gcpshared.LocationInfo{gcpshared.NewProjectLocation(projectID)}) + if err != nil { + t.Fatalf("MakeAdapter: %v", err) + } + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Route adapter does not implement SearchableAdapter") + } + + items, qErr := searchable.Search(ctx, projectID, networkTag, true) + if qErr != nil { + t.Fatalf("Search: %v", qErr) + } + + found := false + for _, item := range items { + if v, err := item.GetAttributes().Get("name"); err == nil && v == networkTagTestRoute { + found = true + break + } + } + if !found { + t.Errorf("Expected to find route %s in search results for tag %q, got %d items", networkTagTestRoute, networkTag, len(items)) + } + }) + + t.Run("InstanceSearchByTagReturnsInstance", func(t *testing.T) { + wrapper := manual.NewComputeInstance( + gcpshared.NewComputeInstanceClient(instanceClient), + []gcpshared.LocationInfo{gcpshared.NewZonalLocation(projectID, zone)}, + ) + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + searchable, ok := adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Instance adapter does not implement SearchableAdapter") + } + + scopeWithZone := fmt.Sprintf("%s.%s", projectID, zone) + items, qErr := searchable.Search(ctx, scopeWithZone, networkTag, true) + if qErr != nil { + t.Fatalf("Search: %v", qErr) + } + + found := false + for _, item := range items { + if v, err := item.GetAttributes().Get("name"); err == nil && v == networkTagTestInstance { + found = true + break + } + } + if !found { + t.Errorf("Expected to find instance %s in search results for tag %q, got %d items", networkTagTestInstance, networkTag, len(items)) + } + }) + + t.Run("FirewallEmitsSearchLinksToInstance", func(t *testing.T) { + gcpHTTPCli, err := gcpshared.GCPHTTPClientWithOtel(ctx, "") + if err != nil { + t.Fatalf("GCPHTTPClientWithOtel: %v", err) + } + + adapter, err := dynamic.MakeAdapter(gcpshared.ComputeFirewall, gcpshared.NewLinker(), gcpHTTPCli, sdpcache.NewNoOpCache(), []gcpshared.LocationInfo{gcpshared.NewProjectLocation(projectID)}) + if err != nil { + t.Fatalf("MakeAdapter: %v", err) + } + + sdpItem, qErr := adapter.Get(ctx, projectID, networkTagTestFirewall, true) + if qErr != nil { + t.Fatalf("Get firewall: %v", qErr) + } + + assertHasLinkedItemQuery(t, sdpItem, gcpshared.ComputeInstance.String(), sdp.QueryMethod_SEARCH, networkTag, projectID) + }) + + t.Run("RouteEmitsSearchLinksToInstance", func(t *testing.T) { + gcpHTTPCli, err := gcpshared.GCPHTTPClientWithOtel(ctx, "") + if err != nil { + t.Fatalf("GCPHTTPClientWithOtel: %v", err) + } + + adapter, err := dynamic.MakeAdapter(gcpshared.ComputeRoute, gcpshared.NewLinker(), gcpHTTPCli, sdpcache.NewNoOpCache(), []gcpshared.LocationInfo{gcpshared.NewProjectLocation(projectID)}) + if err != nil { + t.Fatalf("MakeAdapter: %v", err) + } + + sdpItem, qErr := adapter.Get(ctx, projectID, networkTagTestRoute, true) + if qErr != nil { + t.Fatalf("Get route: %v", qErr) + } + + assertHasLinkedItemQuery(t, sdpItem, gcpshared.ComputeInstance.String(), sdp.QueryMethod_SEARCH, networkTag, projectID) + }) + + t.Run("InstanceTemplateEmitsSearchLinksToFirewallAndRoute", func(t *testing.T) { + gcpHTTPCli, err := gcpshared.GCPHTTPClientWithOtel(ctx, "") + if err != nil { + t.Fatalf("GCPHTTPClientWithOtel: %v", err) + } + + adapter, err := dynamic.MakeAdapter(gcpshared.ComputeInstanceTemplate, gcpshared.NewLinker(), gcpHTTPCli, sdpcache.NewNoOpCache(), []gcpshared.LocationInfo{gcpshared.NewProjectLocation(projectID)}) + if err != nil { + t.Fatalf("MakeAdapter: %v", err) + } + + sdpItem, qErr := adapter.Get(ctx, projectID, networkTagTestInstanceTemplate, true) + if qErr != nil { + t.Fatalf("Get instance template: %v", qErr) + } + + assertHasLinkedItemQuery(t, sdpItem, gcpshared.ComputeFirewall.String(), sdp.QueryMethod_SEARCH, networkTag, projectID) + assertHasLinkedItemQuery(t, sdpItem, gcpshared.ComputeRoute.String(), sdp.QueryMethod_SEARCH, networkTag, projectID) + }) + + // --- Teardown --- + t.Run("Teardown", func(t *testing.T) { + if err := deleteComputeInstance(ctx, instanceClient, projectID, zone, networkTagTestInstance); err != nil { + t.Errorf("Failed to delete instance: %v", err) + } + if err := deleteFirewall(ctx, firewallClient, projectID, networkTagTestFirewall); err != nil { + t.Errorf("Failed to delete firewall: %v", err) + } + if err := deleteRoute(ctx, routeClient, projectID, networkTagTestRoute); err != nil { + t.Errorf("Failed to delete route: %v", err) + } + if err := deleteInstanceTemplate(ctx, instanceTemplateClient, projectID, networkTagTestInstanceTemplate); err != nil { + t.Errorf("Failed to delete instance template: %v", err) + } + }) +} + +func assertHasLinkedItemQuery(t *testing.T, item *sdp.Item, expectedType string, expectedMethod sdp.QueryMethod, expectedQuery, expectedScope string) { + t.Helper() + for _, liq := range item.GetLinkedItemQueries() { + q := liq.GetQuery() + if q.GetType() == expectedType && q.GetMethod() == expectedMethod && q.GetQuery() == expectedQuery && q.GetScope() == expectedScope { + return + } + } + t.Errorf("Missing LinkedItemQuery{type=%s, method=%s, query=%s, scope=%s} on item %s", + expectedType, expectedMethod, expectedQuery, expectedScope, item.UniqueAttributeValue()) +} + +// --- Resource creation/deletion helpers --- + +func createInstanceWithTags(ctx context.Context, client *compute.InstancesClient, projectID, zone string) error { + instance := &computepb.Instance{ + Name: new(networkTagTestInstance), + MachineType: new(fmt.Sprintf("zones/%s/machineTypes/e2-micro", zone)), + Tags: &computepb.Tags{ + Items: []string{networkTag}, + }, + Disks: []*computepb.AttachedDisk{ + { + Boot: new(true), + AutoDelete: new(true), + InitializeParams: &computepb.AttachedDiskInitializeParams{ + SourceImage: new("projects/debian-cloud/global/images/debian-12-bookworm-v20250415"), + DiskSizeGb: new(int64(10)), + }, + }, + }, + NetworkInterfaces: []*computepb.NetworkInterface{ + {StackType: new("IPV4_ONLY")}, + }, + } + + op, err := client.Insert(ctx, &computepb.InsertInstanceRequest{ + Project: projectID, + Zone: zone, + InstanceResource: instance, + }) + if err != nil { + var apiErr *apierror.APIError + if errors.As(err, &apiErr) && apiErr.HTTPCode() == http.StatusConflict { + log.Printf("Instance %s already exists, skipping", networkTagTestInstance) + return nil + } + return fmt.Errorf("insert instance: %w", err) + } + return op.Wait(ctx) +} + +func createFirewallWithTags(ctx context.Context, client *compute.FirewallsClient, projectID string) error { + fw := &computepb.Firewall{ + Name: new(networkTagTestFirewall), + Network: new(fmt.Sprintf("projects/%s/global/networks/default", projectID)), + TargetTags: []string{networkTag}, + Allowed: []*computepb.Allowed{ + { + IPProtocol: new("tcp"), + Ports: []string{"8080"}, + }, + }, + SourceRanges: []string{"0.0.0.0/0"}, + } + + op, err := client.Insert(ctx, &computepb.InsertFirewallRequest{ + Project: projectID, + FirewallResource: fw, + }) + if err != nil { + var apiErr *apierror.APIError + if errors.As(err, &apiErr) && apiErr.HTTPCode() == http.StatusConflict { + log.Printf("Firewall %s already exists, skipping", networkTagTestFirewall) + return nil + } + return fmt.Errorf("insert firewall: %w", err) + } + return op.Wait(ctx) +} + +func createRouteWithTags(ctx context.Context, client *compute.RoutesClient, projectID string) error { + route := &computepb.Route{ + Name: new(networkTagTestRoute), + Network: new(fmt.Sprintf("projects/%s/global/networks/default", projectID)), + DestRange: new("10.99.0.0/24"), + NextHopGateway: new(fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", projectID)), + Tags: []string{networkTag}, + Priority: new(uint32(900)), + } + + op, err := client.Insert(ctx, &computepb.InsertRouteRequest{ + Project: projectID, + RouteResource: route, + }) + if err != nil { + var apiErr *apierror.APIError + if errors.As(err, &apiErr) && apiErr.HTTPCode() == http.StatusConflict { + log.Printf("Route %s already exists, skipping", networkTagTestRoute) + return nil + } + return fmt.Errorf("insert route: %w", err) + } + return op.Wait(ctx) +} + +func deleteFirewall(ctx context.Context, client *compute.FirewallsClient, projectID, name string) error { + op, err := client.Delete(ctx, &computepb.DeleteFirewallRequest{ + Project: projectID, + Firewall: name, + }) + if err != nil { + var apiErr *apierror.APIError + if errors.As(err, &apiErr) && apiErr.HTTPCode() == http.StatusNotFound { + return nil + } + return fmt.Errorf("delete firewall: %w", err) + } + return op.Wait(ctx) +} + +func deleteRoute(ctx context.Context, client *compute.RoutesClient, projectID, name string) error { + op, err := client.Delete(ctx, &computepb.DeleteRouteRequest{ + Project: projectID, + Route: name, + }) + if err != nil { + var apiErr *apierror.APIError + if errors.As(err, &apiErr) && apiErr.HTTPCode() == http.StatusNotFound { + return nil + } + return fmt.Errorf("delete route: %w", err) + } + return op.Wait(ctx) +} + +func createInstanceTemplateWithTags(ctx context.Context, client *compute.InstanceTemplatesClient, projectID string) error { + template := &computepb.InstanceTemplate{ + Name: new(networkTagTestInstanceTemplate), + Properties: &computepb.InstanceProperties{ + MachineType: new("e2-micro"), + Tags: &computepb.Tags{ + Items: []string{networkTag}, + }, + Disks: []*computepb.AttachedDisk{ + { + Boot: new(true), + AutoDelete: new(true), + InitializeParams: &computepb.AttachedDiskInitializeParams{ + SourceImage: new("projects/debian-cloud/global/images/debian-12-bookworm-v20250415"), + DiskSizeGb: new(int64(10)), + }, + }, + }, + NetworkInterfaces: []*computepb.NetworkInterface{ + { + Network: new("global/networks/default"), + StackType: new("IPV4_ONLY"), + }, + }, + }, + } + + op, err := client.Insert(ctx, &computepb.InsertInstanceTemplateRequest{ + Project: projectID, + InstanceTemplateResource: template, + }) + if err != nil { + var apiErr *apierror.APIError + if errors.As(err, &apiErr) && apiErr.HTTPCode() == http.StatusConflict { + log.Printf("Instance template %s already exists, skipping", networkTagTestInstanceTemplate) + return nil + } + return fmt.Errorf("insert instance template: %w", err) + } + return op.Wait(ctx) +} diff --git a/sources/gcp/integration-tests/service-account-impersonation_test.go b/sources/gcp/integration-tests/service-account-impersonation_test.go index 53b380d8..dec678cf 100644 --- a/sources/gcp/integration-tests/service-account-impersonation_test.go +++ b/sources/gcp/integration-tests/service-account-impersonation_test.go @@ -7,14 +7,15 @@ import ( "fmt" "net/http" "os" + "slices" "strings" "testing" "time" - compute "cloud.google.com/go/compute/apiv1" - "cloud.google.com/go/compute/apiv1/computepb" authcredentials "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/oauth2adapt" + compute "cloud.google.com/go/compute/apiv1" + "cloud.google.com/go/compute/apiv1/computepb" credentials "cloud.google.com/go/iam/credentials/apiv1" credentialspb "cloud.google.com/go/iam/credentials/apiv1/credentialspb" "github.com/google/uuid" @@ -562,13 +563,7 @@ func grantServiceAccountTokenCreator(ctx context.Context, iamService *iam.Servic for i, binding := range policy.Bindings { if binding.Role == role { // Check if member already exists - memberFound := false - for _, m := range binding.Members { - if m == member { - memberFound = true - break - } - } + memberFound := slices.Contains(binding.Members, member) if !memberFound { policy.Bindings[i].Members = append(policy.Bindings[i].Members, member) } @@ -632,10 +627,8 @@ func verifyServiceAccountTokenCreatorBinding(ctx context.Context, iamService *ia for _, binding := range policy.Bindings { if binding.Role == role { // Check if the impersonator service account is in the members list - for _, m := range binding.Members { - if m == member { - return true, nil - } + if slices.Contains(binding.Members, member) { + return true, nil } } } @@ -686,13 +679,7 @@ func grantProjectIAMRole(ctx context.Context, crmService *cloudresourcemanager.S for i, binding := range policy.Bindings { if binding.Role == role { // Check if member already exists - memberFound := false - for _, m := range binding.Members { - if m == member { - memberFound = true - break - } - } + memberFound := slices.Contains(binding.Members, member) if !memberFound { policy.Bindings[i].Members = append(policy.Bindings[i].Members, member) } diff --git a/sources/gcp/manual/README.md b/sources/gcp/manual/README.md index dc2e8b09..410e0c9d 100644 --- a/sources/gcp/manual/README.md +++ b/sources/gcp/manual/README.md @@ -58,7 +58,7 @@ Refer to the [cursor rules](.cursor/rules/gcp-manual-adapter-creation.mdc) for c 3. **Handle Complex Resource Linking**: - Parse non-standard API response formats - Extract resource identifiers from various formats - - Create appropriate linked item queries with correct blast propagation + - Create appropriate linked item queries 4. **Include Comprehensive Tests**: - Unit tests for all methods @@ -89,12 +89,12 @@ When reviewing PRs for manual adapters, ensure: ### ✅ Linked Item Queries - [ ] Example values in tests match actual GCP resource formats - [ ] Scopes for linked item queries are correct (verify with linked resource documentation) -- [ ] Blast propagation rules are appropriate for resource relationships +- [ ] Linked item queries are appropriately defined - [ ] All possible resource references are handled (no missing cases) ### ✅ Documentation and References - [ ] GCP API documentation URLs are included in comments -- [ ] Resource relationship explanations are documented +- [ ] Resource linking explanations are documented - [ ] Complex parsing logic is well-commented - [ ] Official GCP reference links are provided for linked resources diff --git a/sources/gcp/manual/cloud-kms-crypto-key-version_test.go b/sources/gcp/manual/cloud-kms-crypto-key-version_test.go index e8c8c976..606c572b 100644 --- a/sources/gcp/manual/cloud-kms-crypto-key-version_test.go +++ b/sources/gcp/manual/cloud-kms-crypto-key-version_test.go @@ -23,7 +23,7 @@ func TestCloudKMSCryptoKeyVersion(t *testing.T) { defer cache.Clear() // Pre-populate cache with a CryptoKeyVersion item - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/1", "uniqueAttr": "us|test-keyring|test-key|1", "state": "ENABLED", @@ -105,13 +105,13 @@ func TestCloudKMSCryptoKeyVersion(t *testing.T) { defer cache.Clear() // Pre-populate cache with CryptoKeyVersion items under SEARCH cache key (by cryptoKey) - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/1", "uniqueAttr": "us|test-keyring|test-key|1", }) _ = attrs1.Set("uniqueAttr", "us|test-keyring|test-key|1") - attrs2, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs2, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/2", "uniqueAttr": "us|test-keyring|test-key|2", }) @@ -256,13 +256,13 @@ func TestCloudKMSCryptoKeyVersion(t *testing.T) { defer cache.Clear() // Pre-populate cache with CryptoKeyVersion items under SEARCH cache key (by cryptoKey) - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring/cryptoKeys/my-key/cryptoKeyVersions/1", "uniqueAttr": "us-central1|my-keyring|my-key|1", }) _ = attrs1.Set("uniqueAttr", "us-central1|my-keyring|my-key|1") - attrs2, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs2, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring/cryptoKeys/my-key/cryptoKeyVersions/2", "uniqueAttr": "us-central1|my-keyring|my-key|2", }) @@ -332,7 +332,7 @@ func TestCloudKMSCryptoKeyVersion(t *testing.T) { defer cache.Clear() // Pre-populate cache with CryptoKeyVersion items - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/europe-west1/keyRings/prod-keyring/cryptoKeys/prod-key/cryptoKeyVersions/1", "uniqueAttr": "europe-west1|prod-keyring|prod-key|1", }) @@ -376,7 +376,7 @@ func TestCloudKMSCryptoKeyVersion(t *testing.T) { defer cache.Clear() // Pre-populate cache with a CryptoKeyVersion item with linked queries - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/1", "uniqueAttr": "us|test-keyring|test-key|1", }) diff --git a/sources/gcp/manual/cloud-kms-crypto-key_test.go b/sources/gcp/manual/cloud-kms-crypto-key_test.go index 4bd7195f..6790ba57 100644 --- a/sources/gcp/manual/cloud-kms-crypto-key_test.go +++ b/sources/gcp/manual/cloud-kms-crypto-key_test.go @@ -23,7 +23,7 @@ func TestCloudKMSCryptoKey(t *testing.T) { defer cache.Clear() // Pre-populate cache with a CryptoKey item - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key", "uniqueAttr": "global|test-keyring|test-key", }) @@ -104,13 +104,13 @@ func TestCloudKMSCryptoKey(t *testing.T) { defer cache.Clear() // Pre-populate cache with CryptoKey items under SEARCH cache key (by keyRing) - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key-1", "uniqueAttr": "global|test-keyring|test-key-1", }) _ = attrs1.Set("uniqueAttr", "global|test-keyring|test-key-1") - attrs2, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs2, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key-2", "uniqueAttr": "global|test-keyring|test-key-2", }) @@ -238,7 +238,7 @@ func TestCloudKMSCryptoKey(t *testing.T) { // Pre-populate cache with a specific CryptoKey item // Note: Terraform queries with full path are converted to GET operations by the adapter framework - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring/cryptoKeys/my-key-1", "uniqueAttr": "us-central1|my-keyring|my-key-1", }) @@ -293,13 +293,13 @@ func TestCloudKMSCryptoKey(t *testing.T) { defer cache.Clear() // Pre-populate cache with CryptoKey items - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring/cryptoKeys/my-key-1", "uniqueAttr": "us-central1|my-keyring|my-key-1", }) _ = attrs1.Set("uniqueAttr", "us-central1|my-keyring|my-key-1") - attrs2, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs2, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring/cryptoKeys/my-key-2", "uniqueAttr": "us-central1|my-keyring|my-key-2", }) @@ -344,13 +344,19 @@ func TestCloudKMSCryptoKey(t *testing.T) { t.Fatalf("Expected 2 items with legacy format, got: %d", len(items)) } - // Verify the returned items have the correct unique attributes - uniqueAttr1, err := items[0].GetAttributes().Get("uniqueAttr") - if err != nil { - t.Fatalf("Failed to get uniqueAttr from item 1: %v", err) + // Verify both expected items are present (order is not guaranteed) + found := make(map[string]bool) + for _, item := range items { + ua, err := item.GetAttributes().Get("uniqueAttr") + if err != nil { + t.Fatalf("Failed to get uniqueAttr: %v", err) + } + found[ua.(string)] = true } - if uniqueAttr1 != "us-central1|my-keyring|my-key-1" { - t.Fatalf("Expected uniqueAttr 'us-central1|my-keyring|my-key-1', got: %v", uniqueAttr1) + for _, expected := range []string{"us-central1|my-keyring|my-key-1", "us-central1|my-keyring|my-key-2"} { + if !found[expected] { + t.Fatalf("Expected item with uniqueAttr %q not found in results", expected) + } } }) @@ -375,7 +381,7 @@ func TestCloudKMSCryptoKey(t *testing.T) { defer cache.Clear() // Pre-populate cache with a CryptoKey item with linked queries - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key", "uniqueAttr": "global|test-keyring|test-key", }) diff --git a/sources/gcp/manual/cloud-kms-key-ring_test.go b/sources/gcp/manual/cloud-kms-key-ring_test.go index 713d169b..f2cd5398 100644 --- a/sources/gcp/manual/cloud-kms-key-ring_test.go +++ b/sources/gcp/manual/cloud-kms-key-ring_test.go @@ -23,7 +23,7 @@ func TestCloudKMSKeyRing(t *testing.T) { defer cache.Clear() // Pre-populate cache with a KeyRing item (simulating what the loader would do) - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring", "uniqueAttr": "us|test-keyring", }) @@ -99,13 +99,13 @@ func TestCloudKMSKeyRing(t *testing.T) { defer cache.Clear() // Pre-populate cache with KeyRing items under LIST cache key - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring-1", "uniqueAttr": "us|test-keyring-1", }) _ = attrs1.Set("uniqueAttr", "us|test-keyring-1") - attrs2, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs2, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring-2", "uniqueAttr": "us|test-keyring-2", }) @@ -230,7 +230,7 @@ func TestCloudKMSKeyRing(t *testing.T) { defer cache.Clear() // Pre-populate cache with KeyRing items under SEARCH cache key (by location) - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring", "uniqueAttr": "us|test-keyring", }) @@ -316,7 +316,7 @@ func TestCloudKMSKeyRing(t *testing.T) { defer cache.Clear() // Pre-populate cache with KeyRing item - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring", "uniqueAttr": "us-central1|my-keyring", }) @@ -370,7 +370,7 @@ func TestCloudKMSKeyRing(t *testing.T) { defer cache.Clear() // Pre-populate cache with KeyRing item - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us-central1/keyRings/my-keyring", "uniqueAttr": "us-central1|my-keyring", }) @@ -423,7 +423,7 @@ func TestCloudKMSKeyRing(t *testing.T) { defer cache.Clear() // Pre-populate cache with a KeyRing item - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "projects/test-project-id/locations/us/keyRings/test-keyring", "uniqueAttr": "us|test-keyring", }) diff --git a/sources/gcp/manual/compute-address.go b/sources/gcp/manual/compute-address.go index 3e5f8735..fcbf5f4a 100644 --- a/sources/gcp/manual/compute-address.go +++ b/sources/gcp/manual/compute-address.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -188,7 +187,7 @@ func (c computeAddressWrapper) listAggregatedStream(ctx context.Context, stream p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListAddressesRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-address_test.go b/sources/gcp/manual/compute-address_test.go index 8171cd94..19e60358 100644 --- a/sources/gcp/manual/compute-address_test.go +++ b/sources/gcp/manual/compute-address_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -382,11 +381,11 @@ func TestComputeAddress(t *testing.T) { func createComputeAddress(addressName string) *computepb.Address { return &computepb.Address{ - Name: ptr.To(addressName), + Name: new(addressName), Labels: map[string]string{"env": "test"}, - Network: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/networks/network"), - Subnetwork: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/subnetworks/default"), - Address: ptr.To("192.168.1.3"), + Network: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/networks/network"), + Subnetwork: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/subnetworks/default"), + Address: new("192.168.1.3"), } } @@ -398,6 +397,6 @@ func createComputeAddressWithUsers(addressName string, users []string) *computep func createComputeAddressWithIPCollection(addressName string, ipCollection string) *computepb.Address { addr := createComputeAddress(addressName) - addr.IpCollection = ptr.To(ipCollection) + addr.IpCollection = new(ipCollection) return addr } diff --git a/sources/gcp/manual/compute-autoscaler.go b/sources/gcp/manual/compute-autoscaler.go index 18923b2a..f2b21eac 100644 --- a/sources/gcp/manual/compute-autoscaler.go +++ b/sources/gcp/manual/compute-autoscaler.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -181,7 +180,7 @@ func (c computeAutoscalerWrapper) listAggregatedStream(ctx context.Context, stre p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListAutoscalersRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-autoscaler_test.go b/sources/gcp/manual/compute-autoscaler_test.go index 67ab1aa3..65d3e43c 100644 --- a/sources/gcp/manual/compute-autoscaler_test.go +++ b/sources/gcp/manual/compute-autoscaler_test.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -256,15 +255,15 @@ func TestComputeAutoscalerWrapper(t *testing.T) { // Create an autoscaler fixture (as returned from GCP API). func createAutoscalerApiFixture(autoscalerName string) *computepb.Autoscaler { return &computepb.Autoscaler{ - Name: ptr.To(autoscalerName), - Target: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/instanceGroupManagers/test-instance-group"), + Name: new(autoscalerName), + Target: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/instanceGroupManagers/test-instance-group"), AutoscalingPolicy: &computepb.AutoscalingPolicy{ - MinNumReplicas: ptr.To(int32(1)), - MaxNumReplicas: ptr.To(int32(5)), + MinNumReplicas: new(int32(1)), + MaxNumReplicas: new(int32(5)), CpuUtilization: &computepb.AutoscalingPolicyCpuUtilization{ - UtilizationTarget: ptr.To(float64(0.6)), + UtilizationTarget: new(float64(0.6)), }, }, - Zone: ptr.To("us-central1-a"), + Zone: new("us-central1-a"), } } diff --git a/sources/gcp/manual/compute-backend-service.go b/sources/gcp/manual/compute-backend-service.go index d61a47dd..3905387d 100644 --- a/sources/gcp/manual/compute-backend-service.go +++ b/sources/gcp/manual/compute-backend-service.go @@ -4,13 +4,13 @@ import ( "context" "errors" "fmt" + "slices" "strings" "sync/atomic" "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -66,10 +66,8 @@ func (c computeBackendServiceWrapper) validateAndParseScope(scope string) (gcpsh allLocations := append([]gcpshared.LocationInfo{}, c.projectLocations...) allLocations = append(allLocations, c.regionLocations...) - for _, configuredLoc := range allLocations { - if location.Equals(configuredLoc) { - return location, nil - } + if slices.ContainsFunc(allLocations, location.Equals) { + return location, nil } return gcpshared.LocationInfo{}, &sdp.QueryError{ @@ -275,7 +273,7 @@ func (c computeBackendServiceWrapper) listAggregatedStream(ctx context.Context, p.Go(func(ctx context.Context) error { it := c.globalClient.AggregatedList(ctx, &computepb.AggregatedListBackendServicesRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-backend-service_test.go b/sources/gcp/manual/compute-backend-service_test.go index 0a781f0f..f01d4fad 100644 --- a/sources/gcp/manual/compute-backend-service_test.go +++ b/sources/gcp/manual/compute-backend-service_test.go @@ -11,7 +11,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -439,7 +438,7 @@ func TestComputeBackendService(t *testing.T) { backendService := createComputeBackendService("test-backend-service") backendService.Backends = []*computepb.Backend{ { - Group: ptr.To(instanceGroupURL), + Group: new(instanceGroupURL), }, } @@ -512,9 +511,9 @@ func TestComputeBackendService(t *testing.T) { backendService := createComputeBackendService("test-backend-service") backendService.HaPolicy = &computepb.BackendServiceHAPolicy{ Leader: &computepb.BackendServiceHAPolicyLeader{ - BackendGroup: ptr.To(backendGroupURL), + BackendGroup: new(backendGroupURL), NetworkEndpoint: &computepb.BackendServiceHAPolicyLeaderNetworkEndpoint{ - Instance: ptr.To(instanceName), + Instance: new(instanceName), }, }, } @@ -591,7 +590,7 @@ func TestComputeBackendService(t *testing.T) { region := "us-central1" regionURL := fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s", projectID, region) backendService := createComputeBackendService("test-backend-service") - backendService.Region = ptr.To(regionURL) + backendService.Region = new(regionURL) mockGlobalClient.EXPECT().Get(ctx, gomock.Any()).Return(backendService, nil) @@ -726,14 +725,14 @@ func TestComputeBackendService(t *testing.T) { func createComputeBackendService(name string) *computepb.BackendService { return &computepb.BackendService{ - Name: ptr.To(name), - Network: ptr.To("global/networks/network"), - SecurityPolicy: ptr.To("https://compute.googleapis.com/compute/v1/projects/test-project/global/securityPolicies/test-security-policy"), - EdgeSecurityPolicy: ptr.To("https://compute.googleapis.com/compute/v1/projects/test-project/global/securityPolicies/test-edge-security-policy"), + Name: new(name), + Network: new("global/networks/network"), + SecurityPolicy: new("https://compute.googleapis.com/compute/v1/projects/test-project/global/securityPolicies/test-security-policy"), + EdgeSecurityPolicy: new("https://compute.googleapis.com/compute/v1/projects/test-project/global/securityPolicies/test-edge-security-policy"), SecuritySettings: &computepb.SecuritySettings{ - ClientTlsPolicy: ptr.To("https://networksecurity.googleapis.com/v1/projects/test-project/locations/test-location/clientTlsPolicies/test-client-tls-policy"), + ClientTlsPolicy: new("https://networksecurity.googleapis.com/v1/projects/test-project/locations/test-location/clientTlsPolicies/test-client-tls-policy"), }, - ServiceLbPolicy: ptr.To(" https://networkservices.googleapis.com/v1alpha1/name=projects/test-project/locations/test-location/serviceLbPolicies/test-service-lb-policy"), + ServiceLbPolicy: new(" https://networkservices.googleapis.com/v1alpha1/name=projects/test-project/locations/test-location/serviceLbPolicies/test-service-lb-policy"), ServiceBindings: []string{ "https://networkservices.googleapis.com/v1alpha1/projects/test-project/locations/test-location/serviceBindings/test-service-binding", }, diff --git a/sources/gcp/manual/compute-disk.go b/sources/gcp/manual/compute-disk.go index 697e074c..116e2b14 100644 --- a/sources/gcp/manual/compute-disk.go +++ b/sources/gcp/manual/compute-disk.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -186,7 +185,7 @@ func (c computeDiskWrapper) listAggregatedStream(ctx context.Context, stream dis p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListDisksRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-disk_test.go b/sources/gcp/manual/compute-disk_test.go index 4fcd801d..11b44819 100644 --- a/sources/gcp/manual/compute-disk_test.go +++ b/sources/gcp/manual/compute-disk_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -63,10 +62,10 @@ func TestComputeDisk(t *testing.T) { sourceValue: "projects/test-project-id/global/images/test-image", expectedLinked: shared.QueryTests{ { - ExpectedType: gcpshared.ComputeImage.String(), - ExpectedMethod: sdp.QueryMethod_SEARCH, - ExpectedQuery: "projects/test-project-id/global/images/test-image", - ExpectedScope: "test-project-id", + ExpectedType: gcpshared.ComputeImage.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "projects/test-project-id/global/images/test-image", + ExpectedScope: "test-project-id", }, }, }, @@ -76,10 +75,10 @@ func TestComputeDisk(t *testing.T) { sourceValue: "projects/test-project-id/global/snapshots/test-snapshot", expectedLinked: shared.QueryTests{ { - ExpectedType: gcpshared.ComputeSnapshot.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-snapshot", - ExpectedScope: "test-project-id", + ExpectedType: gcpshared.ComputeSnapshot.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-snapshot", + ExpectedScope: "test-project-id", }, }, }, @@ -89,10 +88,10 @@ func TestComputeDisk(t *testing.T) { sourceValue: "projects/test-project-id/zones/us-central1-a/instantSnapshots/test-instant-snapshot", expectedLinked: shared.QueryTests{ { - ExpectedType: gcpshared.ComputeInstantSnapshot.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-instant-snapshot", - ExpectedScope: "test-project-id.us-central1-a", + ExpectedType: gcpshared.ComputeInstantSnapshot.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-instant-snapshot", + ExpectedScope: "test-project-id.us-central1-a", }, }, }, @@ -102,10 +101,10 @@ func TestComputeDisk(t *testing.T) { sourceValue: "projects/test-project-id/zones/us-central1-a/disks/source-disk", expectedLinked: shared.QueryTests{ { - ExpectedType: gcpshared.ComputeDisk.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "source-disk", - ExpectedScope: "test-project-id.us-central1-a", + ExpectedType: gcpshared.ComputeDisk.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "source-disk", + ExpectedScope: "test-project-id.us-central1-a", }, }, }, @@ -113,46 +112,46 @@ func TestComputeDisk(t *testing.T) { // These are always present resourcePolicyTest := shared.QueryTest{ - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-policy", - ExpectedScope: "test-project-id.us-central1", + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-policy", + ExpectedScope: "test-project-id.us-central1", } userTest := shared.QueryTest{ - ExpectedType: gcpshared.ComputeInstance.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-instance", - ExpectedScope: "test-project-id.us-central1-a", + ExpectedType: gcpshared.ComputeInstance.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-instance", + ExpectedScope: "test-project-id.us-central1-a", } diskTypeTest := shared.QueryTest{ - ExpectedType: gcpshared.ComputeDiskType.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "pd-standard", - ExpectedScope: "test-project-id.us-central1-a", + ExpectedType: gcpshared.ComputeDiskType.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "pd-standard", + ExpectedScope: "test-project-id.us-central1-a", } diskEncryptionKeyTest := shared.QueryTest{ - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", - ExpectedScope: "test-project-id", + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", + ExpectedScope: "test-project-id", } sourceImageEncryptionKeyTest := shared.QueryTest{ - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", - ExpectedScope: "test-project-id", + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", + ExpectedScope: "test-project-id", } sourceSnapshotEncryptionKeyTest := shared.QueryTest{ - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", - ExpectedScope: "test-project-id", + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", + ExpectedScope: "test-project-id", } sourceConsistencyGroupPolicy := shared.QueryTest{ - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-group-policy", - ExpectedScope: "test-project-id.us-central1", + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-group-policy", + ExpectedScope: "test-project-id.us-central1", } for _, tc := range cases { @@ -405,7 +404,7 @@ func TestComputeDisk(t *testing.T) { // Test with gs:// URI format sourceStorageObject := "gs://test-bucket/path/to/image.tar.gz" disk := createComputeDisk("test-disk", computepb.Disk_READY) - disk.SourceStorageObject = ptr.To(sourceStorageObject) + disk.SourceStorageObject = new(sourceStorageObject) mockClient.EXPECT().Get(ctx, gomock.Any()).Return(disk, nil) @@ -420,61 +419,61 @@ func TestComputeDisk(t *testing.T) { // Base queries that are always present baseQueries := shared.QueryTests{ { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeInstance.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-instance", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeInstance.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-instance", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.ComputeDiskType.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "pd-standard", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDiskType.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "pd-standard", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-group-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-group-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeImage.String(), - ExpectedMethod: sdp.QueryMethod_SEARCH, - ExpectedQuery: "projects/test-project-id/global/images/test-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.ComputeImage.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "projects/test-project-id/global/images/test-image", + ExpectedScope: projectID, }, } // Add the new query we're testing queryTests := append(baseQueries, shared.QueryTest{ - ExpectedType: gcpshared.StorageBucket.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-bucket", - ExpectedScope: projectID, + ExpectedType: gcpshared.StorageBucket.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-bucket", + ExpectedScope: projectID, }) shared.RunStaticTests(t, adapter, sdpItem, queryTests) @@ -486,7 +485,7 @@ func TestComputeDisk(t *testing.T) { storagePoolURL := fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/zones/%s/storagePools/test-storage-pool", projectID, zone) disk := createComputeDisk("test-disk", computepb.Disk_READY) - disk.StoragePool = ptr.To(storagePoolURL) + disk.StoragePool = new(storagePoolURL) mockClient.EXPECT().Get(ctx, gomock.Any()).Return(disk, nil) @@ -501,61 +500,61 @@ func TestComputeDisk(t *testing.T) { // Base queries that are always present (same as above) baseQueries := shared.QueryTests{ { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeInstance.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-instance", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeInstance.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-instance", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.ComputeDiskType.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "pd-standard", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDiskType.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "pd-standard", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-group-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-group-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeImage.String(), - ExpectedMethod: sdp.QueryMethod_SEARCH, - ExpectedQuery: "projects/test-project-id/global/images/test-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.ComputeImage.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "projects/test-project-id/global/images/test-image", + ExpectedScope: projectID, }, } // Add the new query we're testing queryTests := append(baseQueries, shared.QueryTest{ - ExpectedType: gcpshared.ComputeStoragePool.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-storage-pool", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeStoragePool.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-storage-pool", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }) shared.RunStaticTests(t, adapter, sdpItem, queryTests) @@ -569,8 +568,8 @@ func TestComputeDisk(t *testing.T) { consistencyGroupPolicyURL := fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/us-central1/resourcePolicies/test-consistency-policy", projectID) disk := createComputeDisk("test-disk", computepb.Disk_READY) disk.AsyncPrimaryDisk = &computepb.DiskAsyncReplication{ - Disk: ptr.To(primaryDiskURL), - ConsistencyGroupPolicy: ptr.To(consistencyGroupPolicyURL), + Disk: new(primaryDiskURL), + ConsistencyGroupPolicy: new(consistencyGroupPolicyURL), } mockClient.EXPECT().Get(ctx, gomock.Any()).Return(disk, nil) @@ -586,68 +585,68 @@ func TestComputeDisk(t *testing.T) { // Base queries that are always present baseQueries := shared.QueryTests{ { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeInstance.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-instance", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeInstance.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-instance", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.ComputeDiskType.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "pd-standard", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDiskType.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "pd-standard", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-group-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-group-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeImage.String(), - ExpectedMethod: sdp.QueryMethod_SEARCH, - ExpectedQuery: "projects/test-project-id/global/images/test-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.ComputeImage.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "projects/test-project-id/global/images/test-image", + ExpectedScope: projectID, }, } // Add the new queries we're testing queryTests := append(baseQueries, shared.QueryTest{ - ExpectedType: gcpshared.ComputeDisk.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "primary-disk", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDisk.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "primary-disk", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, shared.QueryTest{ - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, ) @@ -665,13 +664,13 @@ func TestComputeDisk(t *testing.T) { disk.AsyncSecondaryDisks = map[string]*computepb.DiskAsyncReplicationList{ "secondary-disk-1": { AsyncReplicationDisk: &computepb.DiskAsyncReplication{ - Disk: ptr.To(secondaryDisk1URL), - ConsistencyGroupPolicy: ptr.To(consistencyGroupPolicyURL), + Disk: new(secondaryDisk1URL), + ConsistencyGroupPolicy: new(consistencyGroupPolicyURL), }, }, "secondary-disk-2": { AsyncReplicationDisk: &computepb.DiskAsyncReplication{ - Disk: ptr.To(secondaryDisk2URL), + Disk: new(secondaryDisk2URL), }, }, } @@ -689,74 +688,74 @@ func TestComputeDisk(t *testing.T) { // Base queries that are always present baseQueries := shared.QueryTests{ { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeInstance.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-instance", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeInstance.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-instance", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.ComputeDiskType.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "pd-standard", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDiskType.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "pd-standard", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-disk", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-image", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", - ExpectedScope: projectID, + ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "global|test-keyring|test-key|test-version-source-snapshot", + ExpectedScope: projectID, }, { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-group-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-group-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, { - ExpectedType: gcpshared.ComputeImage.String(), - ExpectedMethod: sdp.QueryMethod_SEARCH, - ExpectedQuery: "projects/test-project-id/global/images/test-image", - ExpectedScope: projectID, + ExpectedType: gcpshared.ComputeImage.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "projects/test-project-id/global/images/test-image", + ExpectedScope: projectID, }, } // Add the new queries we're testing queryTests := append(baseQueries, shared.QueryTest{ - ExpectedType: gcpshared.ComputeDisk.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "secondary-disk-1", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDisk.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "secondary-disk-1", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, shared.QueryTest{ - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-consistency-policy", - ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-consistency-policy", + ExpectedScope: fmt.Sprintf("%s.us-central1", projectID), }, shared.QueryTest{ - ExpectedType: gcpshared.ComputeDisk.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "secondary-disk-2", - ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), + ExpectedType: gcpshared.ComputeDisk.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "secondary-disk-2", + ExpectedScope: fmt.Sprintf("%s.%s", projectID, zone), }, ) @@ -912,39 +911,39 @@ func createComputeDisk(diskName string, status computepb.Disk_Status) *computepb // sourceValue is the value to set for the source field. func createComputeDiskWithSource(diskName string, status computepb.Disk_Status, sourceType, sourceValue string) *computepb.Disk { disk := &computepb.Disk{ - Name: ptr.To(diskName), + Name: new(diskName), Labels: map[string]string{"env": "test"}, - Type: ptr.To("projects/test-project-id/zones/us-central1-a/diskTypes/pd-standard"), - Status: ptr.To(status.String()), + Type: new("projects/test-project-id/zones/us-central1-a/diskTypes/pd-standard"), + Status: new(status.String()), ResourcePolicies: []string{"projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"}, Users: []string{"projects/test-project-id/zones/us-central1-a/instances/test-instance"}, DiskEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-disk"), - RawKey: ptr.To("test-key"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-disk"), + RawKey: new("test-key"), }, SourceImageEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-image"), - RawKey: ptr.To("test-key"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-image"), + RawKey: new("test-key"), }, SourceSnapshotEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-snapshot"), - RawKey: ptr.To("test-key"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-snapshot"), + RawKey: new("test-key"), }, - SourceConsistencyGroupPolicy: ptr.To("projects/test-project-id/regions/us-central1/resourcePolicies/test-consistency-group-policy"), + SourceConsistencyGroupPolicy: new("projects/test-project-id/regions/us-central1/resourcePolicies/test-consistency-group-policy"), } switch sourceType { case "image": - disk.SourceImage = ptr.To(sourceValue) + disk.SourceImage = new(sourceValue) case "snapshot": - disk.SourceSnapshot = ptr.To(sourceValue) + disk.SourceSnapshot = new(sourceValue) case "instantSnapshot": - disk.SourceInstantSnapshot = ptr.To(sourceValue) + disk.SourceInstantSnapshot = new(sourceValue) case "disk": - disk.SourceDisk = ptr.To(sourceValue) + disk.SourceDisk = new(sourceValue) default: // Default to image if unknown type - disk.SourceImage = ptr.To("projects/test-project-id/global/images/test-image") + disk.SourceImage = new("projects/test-project-id/global/images/test-image") } return disk diff --git a/sources/gcp/manual/compute-forwarding-rule.go b/sources/gcp/manual/compute-forwarding-rule.go index 2eecc0ac..81b51302 100644 --- a/sources/gcp/manual/compute-forwarding-rule.go +++ b/sources/gcp/manual/compute-forwarding-rule.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -193,7 +192,7 @@ func (c computeForwardingRuleWrapper) listAggregatedStream(ctx context.Context, p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListForwardingRulesRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-forwarding-rule_test.go b/sources/gcp/manual/compute-forwarding-rule_test.go index 976cbea9..8350fb77 100644 --- a/sources/gcp/manual/compute-forwarding-rule_test.go +++ b/sources/gcp/manual/compute-forwarding-rule_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -249,7 +248,7 @@ func TestComputeForwardingRule(t *testing.T) { // Test with TargetHttpProxy targetURL := fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/global/targetHttpProxies/test-target-proxy", projectID) forwardingRule := createForwardingRule("test-rule", projectID, region, "192.168.1.1") - forwardingRule.Target = ptr.To(targetURL) + forwardingRule.Target = new(targetURL) mockClient.EXPECT().Get(ctx, gomock.Any()).Return(forwardingRule, nil) @@ -306,7 +305,7 @@ func TestComputeForwardingRule(t *testing.T) { baseForwardingRuleURL := fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/forwardingRules/base-forwarding-rule", projectID, region) forwardingRule := createForwardingRule("test-rule", projectID, region, "192.168.1.1") - forwardingRule.BaseForwardingRule = ptr.To(baseForwardingRuleURL) + forwardingRule.BaseForwardingRule = new(baseForwardingRuleURL) mockClient.EXPECT().Get(ctx, gomock.Any()).Return(forwardingRule, nil) @@ -363,7 +362,7 @@ func TestComputeForwardingRule(t *testing.T) { ipCollectionURL := fmt.Sprintf("projects/%s/regions/%s/publicDelegatedPrefixes/test-prefix", projectID, region) forwardingRule := createForwardingRule("test-rule", projectID, region, "192.168.1.1") - forwardingRule.IpCollection = ptr.To(ipCollectionURL) + forwardingRule.IpCollection = new(ipCollectionURL) mockClient.EXPECT().Get(ctx, gomock.Any()).Return(forwardingRule, nil) @@ -423,8 +422,8 @@ func TestComputeForwardingRule(t *testing.T) { forwardingRule := createForwardingRule("test-rule", projectID, region, "192.168.1.1") forwardingRule.ServiceDirectoryRegistrations = []*computepb.ForwardingRuleServiceDirectoryRegistration{ { - Namespace: ptr.To(namespaceURL), - Service: ptr.To(serviceName), + Namespace: new(namespaceURL), + Service: new(serviceName), }, } @@ -489,11 +488,11 @@ func TestComputeForwardingRule(t *testing.T) { func createForwardingRule(name, projectID, region, ipAddress string) *computepb.ForwardingRule { return &computepb.ForwardingRule{ - Name: ptr.To(name), - IPAddress: ptr.To(ipAddress), + Name: new(name), + IPAddress: new(ipAddress), Labels: map[string]string{"env": "test"}, - Network: ptr.To(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/test-network", projectID)), - Subnetwork: ptr.To(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/subnetworks/test-subnetwork", projectID, region)), - BackendService: ptr.To(fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/backendServices/backend-service", projectID, region)), + Network: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/networks/test-network", projectID)), + Subnetwork: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s/subnetworks/test-subnetwork", projectID, region)), + BackendService: new(fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/regions/%s/backendServices/backend-service", projectID, region)), } } diff --git a/sources/gcp/manual/compute-healthcheck.go b/sources/gcp/manual/compute-healthcheck.go index b24e0ae6..fa3ed72c 100644 --- a/sources/gcp/manual/compute-healthcheck.go +++ b/sources/gcp/manual/compute-healthcheck.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" "net" + "slices" "sync/atomic" "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -67,10 +67,8 @@ func (c computeHealthCheckWrapper) validateAndParseScope(scope string) (gcpshare allLocations := append([]gcpshared.LocationInfo{}, c.projectLocations...) allLocations = append(allLocations, c.regionLocations...) - for _, configuredLoc := range allLocations { - if location.Equals(configuredLoc) { - return location, nil - } + if slices.ContainsFunc(allLocations, location.Equals) { + return location, nil } return gcpshared.LocationInfo{}, &sdp.QueryError{ @@ -269,7 +267,7 @@ func (c computeHealthCheckWrapper) listAggregatedStream(ctx context.Context, str p.Go(func(ctx context.Context) error { it := c.globalClient.AggregatedList(ctx, &computepb.AggregatedListHealthChecksRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-healthcheck_test.go b/sources/gcp/manual/compute-healthcheck_test.go index 5df60f4f..ec0839d3 100644 --- a/sources/gcp/manual/compute-healthcheck_test.go +++ b/sources/gcp/manual/compute-healthcheck_test.go @@ -11,7 +11,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -455,52 +454,52 @@ func TestComputeHealthCheck(t *testing.T) { func createHealthCheck(healthCheckName string) *computepb.HealthCheck { return &computepb.HealthCheck{ - Name: ptr.To(healthCheckName), - CheckIntervalSec: ptr.To(int32(5)), - TimeoutSec: ptr.To(int32(5)), - Type: ptr.To("TCP"), + Name: new(healthCheckName), + CheckIntervalSec: new(int32(5)), + TimeoutSec: new(int32(5)), + Type: new("TCP"), TcpHealthCheck: &computepb.TCPHealthCheck{ - Port: ptr.To(int32(80)), + Port: new(int32(80)), }, } } func createHTTPHealthCheck(healthCheckName, host string) *computepb.HealthCheck { return &computepb.HealthCheck{ - Name: ptr.To(healthCheckName), - CheckIntervalSec: ptr.To(int32(5)), - TimeoutSec: ptr.To(int32(5)), - Type: ptr.To("HTTP"), + Name: new(healthCheckName), + CheckIntervalSec: new(int32(5)), + TimeoutSec: new(int32(5)), + Type: new("HTTP"), HttpHealthCheck: &computepb.HTTPHealthCheck{ - Port: ptr.To(int32(80)), - Host: ptr.To(host), - RequestPath: ptr.To("/"), + Port: new(int32(80)), + Host: new(host), + RequestPath: new("/"), }, } } func createHTTPSHealthCheck(healthCheckName, host string) *computepb.HealthCheck { return &computepb.HealthCheck{ - Name: ptr.To(healthCheckName), - CheckIntervalSec: ptr.To(int32(5)), - TimeoutSec: ptr.To(int32(5)), - Type: ptr.To("HTTPS"), + Name: new(healthCheckName), + CheckIntervalSec: new(int32(5)), + TimeoutSec: new(int32(5)), + Type: new("HTTPS"), HttpsHealthCheck: &computepb.HTTPSHealthCheck{ - Port: ptr.To(int32(443)), - Host: ptr.To(host), - RequestPath: ptr.To("/"), + Port: new(int32(443)), + Host: new(host), + RequestPath: new("/"), }, } } func createHealthCheckWithSourceRegions(healthCheckName string, regions []string) *computepb.HealthCheck { return &computepb.HealthCheck{ - Name: ptr.To(healthCheckName), - CheckIntervalSec: ptr.To(int32(30)), - TimeoutSec: ptr.To(int32(5)), - Type: ptr.To("TCP"), + Name: new(healthCheckName), + CheckIntervalSec: new(int32(30)), + TimeoutSec: new(int32(5)), + Type: new("TCP"), TcpHealthCheck: &computepb.TCPHealthCheck{ - Port: ptr.To(int32(80)), + Port: new(int32(80)), }, SourceRegions: regions, } @@ -508,13 +507,13 @@ func createHealthCheckWithSourceRegions(healthCheckName string, regions []string func createRegionalHealthCheck(healthCheckName, region string) *computepb.HealthCheck { return &computepb.HealthCheck{ - Name: ptr.To(healthCheckName), - CheckIntervalSec: ptr.To(int32(5)), - TimeoutSec: ptr.To(int32(5)), - Type: ptr.To("TCP"), + Name: new(healthCheckName), + CheckIntervalSec: new(int32(5)), + TimeoutSec: new(int32(5)), + Type: new("TCP"), TcpHealthCheck: &computepb.TCPHealthCheck{ - Port: ptr.To(int32(80)), + Port: new(int32(80)), }, - Region: ptr.To(region), + Region: new(region), } } diff --git a/sources/gcp/manual/compute-image_test.go b/sources/gcp/manual/compute-image_test.go index 16567d49..a205ad58 100644 --- a/sources/gcp/manual/compute-image_test.go +++ b/sources/gcp/manual/compute-image_test.go @@ -12,7 +12,6 @@ import ( "google.golang.org/api/iterator" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -389,7 +388,7 @@ func TestComputeImage(t *testing.T) { expectedImageName := "test-image-family-20240101" // When searching by name (not URI), Search tries Get first, then falls back to GetFromFamily - mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -399,7 +398,7 @@ func TestComputeImage(t *testing.T) { return nil, status.Error(codes.NotFound, "image not found") }) - mockClient.EXPECT().GetFromFamily(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetFromFamilyImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().GetFromFamily(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetFromFamilyImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -457,7 +456,7 @@ func TestComputeImage(t *testing.T) { familyURI := "projects/" + projectID + "/global/images/family/test-image-family" expectedImageName := "test-image-family-20240101" - mockClient.EXPECT().GetFromFamily(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetFromFamilyImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().GetFromFamily(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetFromFamilyImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -498,7 +497,7 @@ func TestComputeImage(t *testing.T) { imageURI := "projects/" + projectID + "/global/images/test-image-exact" expectedImageName := "test-image-exact" - mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -542,7 +541,7 @@ func TestComputeImage(t *testing.T) { expectedImageName := "test-image-name" // First Get call fails with NotFound - mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -553,7 +552,7 @@ func TestComputeImage(t *testing.T) { }) // Then GetFromFamily succeeds (treating name as family) - mockClient.EXPECT().GetFromFamily(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetFromFamilyImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().GetFromFamily(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetFromFamilyImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -593,7 +592,7 @@ func TestComputeImage(t *testing.T) { exactImageName := "test-image-exact" - mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...interface{}) (*computepb.Image, error) { + mockClient.EXPECT().Get(ctx, gomock.Any()).DoAndReturn(func(ctx context.Context, req *computepb.GetImageRequest, opts ...any) (*computepb.Image, error) { if req.GetProject() != projectID { t.Errorf("Expected project %s, got %s", projectID, req.GetProject()) } @@ -625,9 +624,9 @@ func TestComputeImage(t *testing.T) { func createComputeImage(imageName string, status computepb.Image_Status) *computepb.Image { return &computepb.Image{ - Name: ptr.To(imageName), + Name: new(imageName), Labels: map[string]string{"env": "test"}, - Status: ptr.To(status.String()), + Status: new(status.String()), } } @@ -639,9 +638,9 @@ func createComputeImageWithLinks(projectID, imageName string, status computepb.I replacementImageURL := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/images/test-replacement-image", projectID) return &computepb.Image{ - Name: ptr.To(imageName), + Name: new(imageName), Labels: map[string]string{"env": "test"}, - Status: ptr.To(status.String()), + Status: new(status.String()), SourceDisk: &sourceDiskURL, SourceSnapshot: &sourceSnapshotURL, SourceImage: &sourceImageURL, @@ -650,19 +649,19 @@ func createComputeImageWithLinks(projectID, imageName string, status computepb.I fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/licenses/test-license-2", projectID), }, RawDisk: &computepb.RawDisk{ - Source: ptr.To(fmt.Sprintf("gs://%s-raw-disk-bucket/raw-disk.tar.gz", projectID)), + Source: new(fmt.Sprintf("gs://%s-raw-disk-bucket/raw-disk.tar.gz", projectID)), }, ImageEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(fmt.Sprintf("projects/%s/locations/global/keyRings/test-keyring/cryptoKeys/test-image-key/cryptoKeyVersions/test-version-image", projectID)), - KmsKeyServiceAccount: ptr.To(fmt.Sprintf("projects/%s/serviceAccounts/test-image-kms-sa@%s.iam.gserviceaccount.com", projectID, projectID)), + KmsKeyName: new(fmt.Sprintf("projects/%s/locations/global/keyRings/test-keyring/cryptoKeys/test-image-key/cryptoKeyVersions/test-version-image", projectID)), + KmsKeyServiceAccount: new(fmt.Sprintf("projects/%s/serviceAccounts/test-image-kms-sa@%s.iam.gserviceaccount.com", projectID, projectID)), }, SourceImageEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(fmt.Sprintf("projects/%s/locations/global/keyRings/test-keyring/cryptoKeys/test-source-image-key/cryptoKeyVersions/test-version-source-image", projectID)), - KmsKeyServiceAccount: ptr.To(fmt.Sprintf("projects/%s/serviceAccounts/test-source-image-kms-sa@%s.iam.gserviceaccount.com", projectID, projectID)), + KmsKeyName: new(fmt.Sprintf("projects/%s/locations/global/keyRings/test-keyring/cryptoKeys/test-source-image-key/cryptoKeyVersions/test-version-source-image", projectID)), + KmsKeyServiceAccount: new(fmt.Sprintf("projects/%s/serviceAccounts/test-source-image-kms-sa@%s.iam.gserviceaccount.com", projectID, projectID)), }, SourceSnapshotEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(fmt.Sprintf("projects/%s/locations/global/keyRings/test-keyring/cryptoKeys/test-source-snapshot-key/cryptoKeyVersions/test-version-source-snapshot", projectID)), - KmsKeyServiceAccount: ptr.To(fmt.Sprintf("projects/%s/serviceAccounts/test-source-snapshot-kms-sa@%s.iam.gserviceaccount.com", projectID, projectID)), + KmsKeyName: new(fmt.Sprintf("projects/%s/locations/global/keyRings/test-keyring/cryptoKeys/test-source-snapshot-key/cryptoKeyVersions/test-version-source-snapshot", projectID)), + KmsKeyServiceAccount: new(fmt.Sprintf("projects/%s/serviceAccounts/test-source-snapshot-kms-sa@%s.iam.gserviceaccount.com", projectID, projectID)), }, Deprecated: &computepb.DeprecationStatus{ Replacement: &replacementImageURL, diff --git a/sources/gcp/manual/compute-instance-group-manager.go b/sources/gcp/manual/compute-instance-group-manager.go index bb6cf3e5..ef7f5269 100644 --- a/sources/gcp/manual/compute-instance-group-manager.go +++ b/sources/gcp/manual/compute-instance-group-manager.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -189,7 +188,7 @@ func (c computeInstanceGroupManagerWrapper) listAggregatedStream(ctx context.Con p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListInstanceGroupManagersRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-instance-group-manager_test.go b/sources/gcp/manual/compute-instance-group-manager_test.go index ea85e239..63fcf17b 100644 --- a/sources/gcp/manual/compute-instance-group-manager_test.go +++ b/sources/gcp/manual/compute-instance-group-manager_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -150,26 +149,26 @@ func TestComputeInstanceGroupManager(t *testing.T) { t.Run("VersionsWithInstanceTemplates", func(t *testing.T) { // Create IGM with versions array containing multiple templates igm := &computepb.InstanceGroupManager{ - Name: ptr.To("test-instance-group-manager"), + Name: new("test-instance-group-manager"), Status: &computepb.InstanceGroupManagerStatus{ - IsStable: ptr.To(true), + IsStable: new(true), }, Versions: []*computepb.InstanceGroupManagerVersion{ { - Name: ptr.To("canary"), - InstanceTemplate: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/instanceTemplates/canary-template"), + Name: new("canary"), + InstanceTemplate: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/instanceTemplates/canary-template"), }, { - Name: ptr.To("stable"), - InstanceTemplate: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/instanceTemplates/stable-template"), + Name: new("stable"), + InstanceTemplate: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/instanceTemplates/stable-template"), }, }, - InstanceGroup: ptr.To("projects/test-project-id/zones/us-central1-a/instanceGroups/test-group"), + InstanceGroup: new("projects/test-project-id/zones/us-central1-a/instanceGroups/test-group"), TargetPools: []string{ "https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/targetPools/test-pool", }, ResourcePolicies: &computepb.InstanceGroupManagerResourcePolicies{ - WorkloadPolicy: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), + WorkloadPolicy: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), }, } @@ -223,24 +222,24 @@ func TestComputeInstanceGroupManager(t *testing.T) { t.Run("AutoHealingPoliciesWithHealthCheck", func(t *testing.T) { // Create IGM with auto-healing policy containing health check igm := &computepb.InstanceGroupManager{ - Name: ptr.To("test-instance-group-manager"), + Name: new("test-instance-group-manager"), Status: &computepb.InstanceGroupManagerStatus{ - IsStable: ptr.To(true), + IsStable: new(true), }, - Zone: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a"), - InstanceTemplate: ptr.To(instanceTemplateName), - InstanceGroup: ptr.To("projects/test-project-id/zones/us-central1-a/instanceGroups/test-group"), + Zone: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a"), + InstanceTemplate: new(instanceTemplateName), + InstanceGroup: new("projects/test-project-id/zones/us-central1-a/instanceGroups/test-group"), AutoHealingPolicies: []*computepb.InstanceGroupManagerAutoHealingPolicy{ { - HealthCheck: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/healthChecks/test-health-check"), - InitialDelaySec: ptr.To[int32](300), + HealthCheck: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/healthChecks/test-health-check"), + InitialDelaySec: new(int32(300)), }, }, TargetPools: []string{ "https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/targetPools/test-pool", }, ResourcePolicies: &computepb.InstanceGroupManagerResourcePolicies{ - WorkloadPolicy: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), + WorkloadPolicy: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), }, } @@ -500,18 +499,18 @@ func TestComputeInstanceGroupManager(t *testing.T) { func createInstanceGroupManager(name string, isStable bool, instanceTemplate string) *computepb.InstanceGroupManager { return &computepb.InstanceGroupManager{ - Name: ptr.To(name), + Name: new(name), Status: &computepb.InstanceGroupManagerStatus{ - IsStable: ptr.To(isStable), + IsStable: new(isStable), }, - Zone: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a"), - InstanceTemplate: ptr.To(instanceTemplate), - InstanceGroup: ptr.To("projects/test-project-id/zones/us-central1-a/instanceGroups/test-group"), + Zone: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a"), + InstanceTemplate: new(instanceTemplate), + InstanceGroup: new("projects/test-project-id/zones/us-central1-a/instanceGroups/test-group"), TargetPools: []string{ "https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/targetPools/test-pool", }, ResourcePolicies: &computepb.InstanceGroupManagerResourcePolicies{ - WorkloadPolicy: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), + WorkloadPolicy: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), }, } } diff --git a/sources/gcp/manual/compute-instance-group.go b/sources/gcp/manual/compute-instance-group.go index 82184f13..06d1d10c 100644 --- a/sources/gcp/manual/compute-instance-group.go +++ b/sources/gcp/manual/compute-instance-group.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -179,7 +178,7 @@ func (c computeInstanceGroupWrapper) listAggregatedStream(ctx context.Context, s p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListInstanceGroupsRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-instance-group_test.go b/sources/gcp/manual/compute-instance-group_test.go index 9fcfc6ef..13891ac7 100644 --- a/sources/gcp/manual/compute-instance-group_test.go +++ b/sources/gcp/manual/compute-instance-group_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -233,9 +232,9 @@ func TestComputeInstanceGroup(t *testing.T) { func createComputeInstanceGroup(name, network, subnetwork, projectID, zone string) *computepb.InstanceGroup { return &computepb.InstanceGroup{ - Name: ptr.To(name), - Network: ptr.To(fmt.Sprintf("projects/%s/global/networks/%s", projectID, network)), - Subnetwork: ptr.To(fmt.Sprintf("projects/%s/regions/us-central1/subnetworks/%s", projectID, subnetwork)), - Zone: ptr.To(fmt.Sprintf("projects/%s/zones/%s", projectID, zone)), + Name: new(name), + Network: new(fmt.Sprintf("projects/%s/global/networks/%s", projectID, network)), + Subnetwork: new(fmt.Sprintf("projects/%s/regions/us-central1/subnetworks/%s", projectID, subnetwork)), + Zone: new(fmt.Sprintf("projects/%s/zones/%s", projectID, zone)), } } diff --git a/sources/gcp/manual/compute-instance.go b/sources/gcp/manual/compute-instance.go index 240d3728..32c08658 100644 --- a/sources/gcp/manual/compute-instance.go +++ b/sources/gcp/manual/compute-instance.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -21,6 +20,7 @@ import ( ) var ComputeInstanceLookupByName = shared.NewItemTypeLookup("name", gcpshared.ComputeInstance) +var ComputeInstanceLookupByNetworkTag = shared.NewItemTypeLookup("networkTag", gcpshared.ComputeInstance) type computeInstanceWrapper struct { client gcpshared.ComputeInstanceClient @@ -67,6 +67,8 @@ func (c computeInstanceWrapper) PotentialLinks() map[shared.ItemType]bool { gcpshared.ComputeInstanceTemplate, gcpshared.ComputeRegionInstanceTemplate, gcpshared.ComputeInstanceGroupManager, + gcpshared.ComputeFirewall, + gcpshared.ComputeRoute, ) } @@ -92,6 +94,57 @@ func (c computeInstanceWrapper) SupportsWildcardScope() bool { return true } +func (c computeInstanceWrapper) SearchLookups() []sources.ItemTypeLookups { + return []sources.ItemTypeLookups{ + {ComputeInstanceLookupByNetworkTag}, + } +} + +// Search finds compute instances by network tag. The engine routes +// project-scoped SEARCH queries to zonal scopes via substring matching, so +// scope is a zonal scope like "project.zone". We list all instances via +// AggregatedList and filter to the matching zone + tag. +func (c computeInstanceWrapper) Search(ctx context.Context, scope string, queryParts ...string) ([]*sdp.Item, *sdp.QueryError) { + tag := queryParts[0] + + allItems, qErr := c.List(ctx, "*") + if qErr != nil { + return nil, qErr + } + + var matched []*sdp.Item + for _, item := range allItems { + if item.GetScope() != scope { + continue + } + + tagsVal, err := item.GetAttributes().Get("tags") + if err != nil { + continue + } + tagsMap, ok := tagsVal.(map[string]any) + if !ok { + continue + } + itemsVal, ok := tagsMap["items"] + if !ok { + continue + } + itemsList, ok := itemsVal.([]any) + if !ok { + continue + } + for _, t := range itemsList { + if s, ok := t.(string); ok && s == tag { + matched = append(matched, item) + break + } + } + } + + return matched, nil +} + func (c computeInstanceWrapper) Get(ctx context.Context, scope string, queryParts ...string) (*sdp.Item, *sdp.QueryError) { location, err := c.LocationFromScope(scope) if err != nil { @@ -194,7 +247,7 @@ func (c computeInstanceWrapper) listAggregatedStream(ctx context.Context, stream p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListInstancesRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { @@ -598,6 +651,36 @@ func (c computeInstanceWrapper) gcpComputeInstanceToSDPItem(ctx context.Context, } } + // Link to firewalls and routes by network tag. + // Tag-based SEARCH lists all firewalls/routes in scope then filters; + // may be slow in very large projects. + if tags := instance.GetTags(); tags != nil { + for _, tag := range tags.GetItems() { + tag = strings.TrimSpace(tag) + if tag == "" { + continue + } + sdpItem.LinkedItemQueries = append(sdpItem.LinkedItemQueries, + &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: gcpshared.ComputeFirewall.String(), + Method: sdp.QueryMethod_SEARCH, + Query: tag, + Scope: location.ProjectID, + }, + }, + &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: gcpshared.ComputeRoute.String(), + Method: sdp.QueryMethod_SEARCH, + Query: tag, + Scope: location.ProjectID, + }, + }, + ) + } + } + // Set health based on status switch instance.GetStatus() { case computepb.Instance_RUNNING.String(): diff --git a/sources/gcp/manual/compute-instance_test.go b/sources/gcp/manual/compute-instance_test.go index f441a8e2..5818f8f2 100644 --- a/sources/gcp/manual/compute-instance_test.go +++ b/sources/gcp/manual/compute-instance_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -255,9 +254,9 @@ func TestComputeInstance(t *testing.T) { t.Fatalf("Expected 2 items, got: %d", len(items)) } - _, ok = adapter.(discovery.SearchStreamableAdapter) - if ok { - t.Fatalf("Adapter should not support SearchStream operation") + _, ok = adapter.(discovery.SearchableAdapter) + if !ok { + t.Fatalf("Adapter should support Search operation (for network tag search)") } }) @@ -372,16 +371,16 @@ func TestComputeInstance(t *testing.T) { instance := createComputeInstance("test-instance", computepb.Instance_RUNNING) instance.Disks = []*computepb.AttachedDisk{ { - DeviceName: ptr.To("test-disk"), - Source: ptr.To(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/test-instance", projectID, zone)), + DeviceName: new("test-disk"), + Source: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/test-instance", projectID, zone)), InitializeParams: &computepb.AttachedDiskInitializeParams{ - SourceImage: ptr.To(sourceImageURL), - SourceSnapshot: ptr.To(sourceSnapshotURL), + SourceImage: new(sourceImageURL), + SourceSnapshot: new(sourceSnapshotURL), SourceImageEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(sourceImageKeyName), + KmsKeyName: new(sourceImageKeyName), }, SourceSnapshotEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(sourceSnapshotKeyName), + KmsKeyName: new(sourceSnapshotKeyName), }, }, }, @@ -478,10 +477,10 @@ func TestComputeInstance(t *testing.T) { instance := createComputeInstance("test-instance", computepb.Instance_RUNNING) instance.Disks = []*computepb.AttachedDisk{ { - DeviceName: ptr.To("test-disk"), - Source: ptr.To(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/test-instance", projectID, zone)), + DeviceName: new("test-disk"), + Source: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/test-instance", projectID, zone)), DiskEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(diskKeyName), + KmsKeyName: new(diskKeyName), }, }, } @@ -557,10 +556,10 @@ func TestComputeInstance(t *testing.T) { instance := createComputeInstance("test-instance", computepb.Instance_RUNNING) instance.Disks = []*computepb.AttachedDisk{ { - DeviceName: ptr.To("test-disk"), - Source: ptr.To(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/test-instance", projectID, zone)), + DeviceName: new("test-disk"), + Source: new(fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/test-instance", projectID, zone)), DiskEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To(diskKeyName), + KmsKeyName: new(diskKeyName), }, }, } @@ -636,7 +635,7 @@ func TestComputeInstance(t *testing.T) { instance := createComputeInstance("test-instance", computepb.Instance_RUNNING) instance.ServiceAccounts = []*computepb.ServiceAccount{ { - Email: ptr.To(serviceAccountEmail), + Email: new(serviceAccountEmail), }, } @@ -715,12 +714,12 @@ func TestComputeInstance(t *testing.T) { instance.Metadata = &computepb.Metadata{ Items: []*computepb.Items{ { - Key: ptr.To("instance-template"), - Value: ptr.To(instanceTemplateURI), + Key: new("instance-template"), + Value: new(instanceTemplateURI), }, { - Key: ptr.To("created-by"), - Value: ptr.To(igmURI), + Key: new("created-by"), + Value: new(igmURI), }, }, } @@ -806,8 +805,8 @@ func TestComputeInstance(t *testing.T) { instance.Metadata = &computepb.Metadata{ Items: []*computepb.Items{ { - Key: ptr.To("instance-template"), - Value: ptr.To(instanceTemplateURI), + Key: new("instance-template"), + Value: new(instanceTemplateURI), }, }, } @@ -876,6 +875,70 @@ func TestComputeInstance(t *testing.T) { }) }) + t.Run("GetWithNetworkTags", func(t *testing.T) { + wrapper := manual.NewComputeInstance(mockClient, []gcpshared.LocationInfo{gcpshared.NewZonalLocation(projectID, zone)}) + + instance := createComputeInstance("test-instance", computepb.Instance_RUNNING) + instance.Tags = &computepb.Tags{ + Items: []string{"web-server", "http-server"}, + } + + mockClient.EXPECT().Get(ctx, gomock.Any()).Return(instance, nil) + + adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) + + sdpItem, qErr := adapter.Get(ctx, wrapper.Scopes()[0], "test-instance", true) + if qErr != nil { + t.Fatalf("Expected no error, got: %v", qErr) + } + + // Verify SEARCH links to ComputeFirewall and ComputeRoute for each tag + tagLinkTests := shared.QueryTests{ + { + ExpectedType: gcpshared.ComputeFirewall.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "web-server", + ExpectedScope: projectID, + }, + { + ExpectedType: gcpshared.ComputeRoute.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "web-server", + ExpectedScope: projectID, + }, + { + ExpectedType: gcpshared.ComputeFirewall.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "http-server", + ExpectedScope: projectID, + }, + { + ExpectedType: gcpshared.ComputeRoute.String(), + ExpectedMethod: sdp.QueryMethod_SEARCH, + ExpectedQuery: "http-server", + ExpectedScope: projectID, + }, + } + + for _, qt := range tagLinkTests { + found := false + for _, liq := range sdpItem.GetLinkedItemQueries() { + q := liq.GetQuery() + if q.GetType() == qt.ExpectedType && + q.GetMethod() == qt.ExpectedMethod && + q.GetQuery() == qt.ExpectedQuery && + q.GetScope() == qt.ExpectedScope { + found = true + break + } + } + if !found { + t.Errorf("Missing LinkedItemQuery{type=%s, method=%s, query=%s, scope=%s}", + qt.ExpectedType, qt.ExpectedMethod, qt.ExpectedQuery, qt.ExpectedScope) + } + } + }) + t.Run("SupportsWildcardScope", func(t *testing.T) { wrapper := manual.NewComputeInstance(mockClient, []gcpshared.LocationInfo{gcpshared.NewZonalLocation(projectID, zone)}) adapter := sources.WrapperToAdapter(wrapper, sdpcache.NewNoOpCache()) @@ -1017,23 +1080,23 @@ func TestComputeInstance(t *testing.T) { func createComputeInstance(instanceName string, status computepb.Instance_Status) *computepb.Instance { return &computepb.Instance{ - Name: ptr.To(instanceName), + Name: new(instanceName), Labels: map[string]string{"env": "test"}, Disks: []*computepb.AttachedDisk{ { - DeviceName: ptr.To("test-disk"), - Source: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/disks/test-instance"), + DeviceName: new("test-disk"), + Source: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/disks/test-instance"), }, }, NetworkInterfaces: []*computepb.NetworkInterface{ { - NetworkIP: ptr.To("192.168.1.3"), - Subnetwork: ptr.To("projects/test-project-id/regions/us-central1/subnetworks/default"), - Network: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/networks/network"), - Ipv6Address: ptr.To("2001:0db8:85a3:0000:0000:8a2e:0370:7334"), + NetworkIP: new("192.168.1.3"), + Subnetwork: new("projects/test-project-id/regions/us-central1/subnetworks/default"), + Network: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/networks/network"), + Ipv6Address: new("2001:0db8:85a3:0000:0000:8a2e:0370:7334"), }, }, - Status: ptr.To(status.String()), + Status: new(status.String()), ResourcePolicies: []string{ "projects/test-project-id/regions/us-central1/resourcePolicies/test-policy", }, diff --git a/sources/gcp/manual/compute-instant-snapshot.go b/sources/gcp/manual/compute-instant-snapshot.go index fb9d71e4..7fdb21d7 100644 --- a/sources/gcp/manual/compute-instant-snapshot.go +++ b/sources/gcp/manual/compute-instant-snapshot.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -176,7 +175,7 @@ func (c computeInstantSnapshotWrapper) listAggregatedStream(ctx context.Context, p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListInstantSnapshotsRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-instant-snapshot_test.go b/sources/gcp/manual/compute-instant-snapshot_test.go index 62813897..fc0ec912 100644 --- a/sources/gcp/manual/compute-instant-snapshot_test.go +++ b/sources/gcp/manual/compute-instant-snapshot_test.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -314,13 +313,13 @@ func TestComputeInstantSnapshot(t *testing.T) { func createComputeInstantSnapshot(snapshotName, zone string, status computepb.InstantSnapshot_Status) *computepb.InstantSnapshot { return &computepb.InstantSnapshot{ - Name: ptr.To(snapshotName), + Name: new(snapshotName), Labels: map[string]string{"env": "test"}, - Status: ptr.To(status.String()), - Zone: ptr.To(zone), - SourceDisk: ptr.To( + Status: new(status.String()), + Zone: new(zone), + SourceDisk: new( "projects/test-project-id/zones/" + zone + "/disks/test-disk", ), - Architecture: ptr.To(computepb.InstantSnapshot_X86_64.String()), + Architecture: new(computepb.InstantSnapshot_X86_64.String()), } } diff --git a/sources/gcp/manual/compute-machine-image_test.go b/sources/gcp/manual/compute-machine-image_test.go index 7aac8391..7f8ab02c 100644 --- a/sources/gcp/manual/compute-machine-image_test.go +++ b/sources/gcp/manual/compute-machine-image_test.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -373,66 +372,66 @@ func TestComputeMachineImage(t *testing.T) { func createComputeMachineImage(imageName string, status computepb.MachineImage_Status) *computepb.MachineImage { return &computepb.MachineImage{ - Name: ptr.To(imageName), + Name: new(imageName), Labels: map[string]string{"env": "test"}, - Status: ptr.To(status.String()), + Status: new(status.String()), InstanceProperties: &computepb.InstanceProperties{ NetworkInterfaces: []*computepb.NetworkInterface{ { - Network: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/networks/test-network"), - Subnetwork: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/subnetworks/test-subnetwork"), - NetworkAttachment: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/networkAttachments/test-network-attachment"), - NetworkIP: ptr.To("10.0.0.1"), - Ipv6Address: ptr.To("2001:db8::1"), + Network: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/networks/test-network"), + Subnetwork: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/subnetworks/test-subnetwork"), + NetworkAttachment: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/networkAttachments/test-network-attachment"), + NetworkIP: new("10.0.0.1"), + Ipv6Address: new("2001:db8::1"), AccessConfigs: []*computepb.AccessConfig{ { - NatIP: ptr.To("203.0.113.1"), + NatIP: new("203.0.113.1"), }, }, Ipv6AccessConfigs: []*computepb.AccessConfig{ { - ExternalIpv6: ptr.To("2001:db8::2"), + ExternalIpv6: new("2001:db8::2"), }, }, }, }, Disks: []*computepb.AttachedDisk{ { - Source: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/disks/test-disk"), + Source: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/disks/test-disk"), DiskEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-disk"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-disk"), }, InitializeParams: &computepb.AttachedDiskInitializeParams{ - SourceImage: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/images/test-source-image"), - SourceSnapshot: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/global/snapshots/test-source-snapshot"), + SourceImage: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/images/test-source-image"), + SourceSnapshot: new("https://www.googleapis.com/compute/v1/projects/test-project-id/global/snapshots/test-source-snapshot"), SourceImageEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-image"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-image"), }, SourceSnapshotEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-snapshot"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-snapshot"), }, }, }, }, ServiceAccounts: []*computepb.ServiceAccount{ { - Email: ptr.To("test-sa@test-project-id.iam.gserviceaccount.com"), + Email: new("test-sa@test-project-id.iam.gserviceaccount.com"), }, }, GuestAccelerators: []*computepb.AcceleratorConfig{ { - AcceleratorType: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/acceleratorTypes/nvidia-tesla-k80"), - AcceleratorCount: ptr.To[int32](1), + AcceleratorType: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/acceleratorTypes/nvidia-tesla-k80"), + AcceleratorCount: new(int32(1)), }, }, }, MachineImageEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-machine-encryption-key"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-machine-encryption-key"), }, - SourceInstance: ptr.To("projects/test-project-id/zones/us-central1-a/instances/test-instance"), + SourceInstance: new("projects/test-project-id/zones/us-central1-a/instances/test-instance"), SavedDisks: []*computepb.SavedDisk{ { - SourceDisk: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/disks/test-saved-disk"), + SourceDisk: new("https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/disks/test-saved-disk"), }, }, } diff --git a/sources/gcp/manual/compute-node-group.go b/sources/gcp/manual/compute-node-group.go index cc6b6960..6e0b1253 100644 --- a/sources/gcp/manual/compute-node-group.go +++ b/sources/gcp/manual/compute-node-group.go @@ -8,8 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -192,7 +190,7 @@ func (c computeNodeGroupWrapper) listAggregatedStream(ctx context.Context, strea p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListNodeGroupsRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { @@ -274,7 +272,7 @@ func (c computeNodeGroupWrapper) SearchStream(ctx context.Context, stream discov req := &computepb.ListNodeGroupsRequest{ Project: location.ProjectID, Zone: location.Zone, - Filter: ptr.To("nodeTemplate = " + nodeTemplate), + Filter: new("nodeTemplate = " + nodeTemplate), } it := c.client.List(ctx, req) diff --git a/sources/gcp/manual/compute-node-group_test.go b/sources/gcp/manual/compute-node-group_test.go index e099d5f3..0860f085 100644 --- a/sources/gcp/manual/compute-node-group_test.go +++ b/sources/gcp/manual/compute-node-group_test.go @@ -10,7 +10,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -453,8 +452,8 @@ func TestComputeNodeGroup(t *testing.T) { func createComputeNodeGroup(name, templateUrl string, status computepb.NodeGroup_Status) *computepb.NodeGroup { return &computepb.NodeGroup{ - Name: ptr.To(name), - NodeTemplate: ptr.To(templateUrl), - Status: ptr.To(status.String()), + Name: new(name), + NodeTemplate: new(templateUrl), + Status: new(status.String()), } } diff --git a/sources/gcp/manual/compute-node-template.go b/sources/gcp/manual/compute-node-template.go index 34b40b17..4beea88f 100644 --- a/sources/gcp/manual/compute-node-template.go +++ b/sources/gcp/manual/compute-node-template.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -176,7 +175,7 @@ func (c computeNodeTemplateWrapper) listAggregatedStream(ctx context.Context, st p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListNodeTemplatesRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-node-template_test.go b/sources/gcp/manual/compute-node-template_test.go index e4299916..00a8fbcb 100644 --- a/sources/gcp/manual/compute-node-template_test.go +++ b/sources/gcp/manual/compute-node-template_test.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -266,12 +265,12 @@ func TestComputeNodeTemplate(t *testing.T) { // Create an node template fixture (as returned from GCP API). func createNodeTemplateApiFixture(nodeTemplateName string) *computepb.NodeTemplate { return &computepb.NodeTemplate{ - Name: ptr.To(nodeTemplateName), - NodeType: ptr.To("c2-node-60-240"), + Name: new(nodeTemplateName), + NodeType: new("c2-node-60-240"), ServerBinding: &computepb.ServerBinding{ - Type: ptr.To("RESTART_NODE_ON_ANY_SERVER"), + Type: new("RESTART_NODE_ON_ANY_SERVER"), }, - SelfLink: ptr.To("test-self-link"), - Region: ptr.To("us-central1"), + SelfLink: new("test-self-link"), + Region: new("us-central1"), } } diff --git a/sources/gcp/manual/compute-region-instance-group-manager_test.go b/sources/gcp/manual/compute-region-instance-group-manager_test.go index 2e90abaa..4004adfc 100644 --- a/sources/gcp/manual/compute-region-instance-group-manager_test.go +++ b/sources/gcp/manual/compute-region-instance-group-manager_test.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -314,17 +313,17 @@ func TestComputeRegionInstanceGroupManager(t *testing.T) { func createRegionInstanceGroupManager(name string, isStable bool, instanceTemplate string) *computepb.InstanceGroupManager { return &computepb.InstanceGroupManager{ - Name: ptr.To(name), + Name: new(name), Status: &computepb.InstanceGroupManagerStatus{ - IsStable: ptr.To(isStable), - Autoscaler: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/autoscalers/test-autoscaler"), + IsStable: new(isStable), + Autoscaler: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/autoscalers/test-autoscaler"), }, - Region: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1"), - InstanceTemplate: ptr.To(instanceTemplate), - InstanceGroup: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/instanceGroups/test-group"), + Region: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1"), + InstanceTemplate: new(instanceTemplate), + InstanceGroup: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/instanceGroups/test-group"), TargetPools: []string{"https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/targetPools/test-pool"}, ResourcePolicies: &computepb.InstanceGroupManagerResourcePolicies{ - WorkloadPolicy: ptr.To("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), + WorkloadPolicy: new("https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy"), }, } } diff --git a/sources/gcp/manual/compute-reservation.go b/sources/gcp/manual/compute-reservation.go index b29260a8..cc541a54 100644 --- a/sources/gcp/manual/compute-reservation.go +++ b/sources/gcp/manual/compute-reservation.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "github.com/sourcegraph/conc/pool" "google.golang.org/api/iterator" - "google.golang.org/protobuf/proto" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -178,7 +177,7 @@ func (c computeReservationWrapper) listAggregatedStream(ctx context.Context, str p.Go(func(ctx context.Context) error { it := c.client.AggregatedList(ctx, &computepb.AggregatedListReservationsRequest{ Project: projectID, - ReturnPartialSuccess: proto.Bool(true), // Handle partial failures gracefully + ReturnPartialSuccess: new(true), // Handle partial failures gracefully }) for { diff --git a/sources/gcp/manual/compute-reservation_test.go b/sources/gcp/manual/compute-reservation_test.go index 9e317dbc..8cefce1b 100644 --- a/sources/gcp/manual/compute-reservation_test.go +++ b/sources/gcp/manual/compute-reservation_test.go @@ -9,7 +9,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -283,18 +282,18 @@ func TestComputeReservation(t *testing.T) { func createComputeReservation(reservationName string, status computepb.Reservation_Status) *computepb.Reservation { return &computepb.Reservation{ - Name: ptr.To(reservationName), - Commitment: ptr.To( + Name: new(reservationName), + Commitment: new( "https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/commitments/test-commitment", ), SpecificReservation: &computepb.AllocationSpecificSKUReservation{ InstanceProperties: &computepb.AllocationSpecificSKUAllocationReservedInstanceProperties{ - MachineType: ptr.To( + MachineType: new( "https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/machineTypes/n1-standard-1", ), GuestAccelerators: []*computepb.AcceleratorConfig{ { - AcceleratorType: ptr.To( + AcceleratorType: new( "https://www.googleapis.com/compute/v1/projects/test-project-id/zones/us-central1-a/acceleratorTypes/nvidia-tesla-k80", ), }, @@ -304,6 +303,6 @@ func createComputeReservation(reservationName string, status computepb.Reservati ResourcePolicies: map[string]string{ "policy1": "https://www.googleapis.com/compute/v1/projects/test-project-id/regions/us-central1/resourcePolicies/test-policy", }, - Status: ptr.To(status.String()), + Status: new(status.String()), } } diff --git a/sources/gcp/manual/compute-security-policy_test.go b/sources/gcp/manual/compute-security-policy_test.go index b93d4db3..685aa79c 100644 --- a/sources/gcp/manual/compute-security-policy_test.go +++ b/sources/gcp/manual/compute-security-policy_test.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -199,13 +198,13 @@ func TestComputeSecurityPolicy(t *testing.T) { func createComputeSecurityPolicy(policyName string) *computepb.SecurityPolicy { return &computepb.SecurityPolicy{ - Name: ptr.To(policyName), + Name: new(policyName), Labels: map[string]string{"env": "test"}, Rules: []*computepb.SecurityPolicyRule{ { - Priority: ptr.To(int32(1000)), + Priority: new(int32(1000)), }, }, - Region: ptr.To("us-central1"), + Region: new("us-central1"), } } diff --git a/sources/gcp/manual/compute-snapshot_test.go b/sources/gcp/manual/compute-snapshot_test.go index 34b013f8..9856915c 100644 --- a/sources/gcp/manual/compute-snapshot_test.go +++ b/sources/gcp/manual/compute-snapshot_test.go @@ -8,7 +8,6 @@ import ( "cloud.google.com/go/compute/apiv1/computepb" "go.uber.org/mock/gomock" "google.golang.org/api/iterator" - "k8s.io/utils/ptr" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -77,10 +76,10 @@ func TestComputeSnapshot(t *testing.T) { ExpectedScope: "test-project-id", }, { - ExpectedType: gcpshared.ComputeResourcePolicy.String(), - ExpectedMethod: sdp.QueryMethod_GET, - ExpectedQuery: "test-source-snapshot-schedule-policy", - ExpectedScope: "test-project-id.us-central1", + ExpectedType: gcpshared.ComputeResourcePolicy.String(), + ExpectedMethod: sdp.QueryMethod_GET, + ExpectedQuery: "test-source-snapshot-schedule-policy", + ExpectedScope: "test-project-id.us-central1", }, { ExpectedType: gcpshared.CloudKMSCryptoKeyVersion.String(), @@ -295,23 +294,23 @@ func TestComputeSnapshot(t *testing.T) { func createComputeSnapshot(snapshotName string, status computepb.Snapshot_Status) *computepb.Snapshot { return &computepb.Snapshot{ - Name: ptr.To(snapshotName), + Name: new(snapshotName), Labels: map[string]string{"env": "test"}, - Status: ptr.To(status.String()), - SourceInstantSnapshot: ptr.To("projects/test-project-id/zones/us-central1-a/instantSnapshots/test-instant-snapshot"), + Status: new(status.String()), + SourceInstantSnapshot: new("projects/test-project-id/zones/us-central1-a/instantSnapshots/test-instant-snapshot"), StorageLocations: []string{"us-central1"}, Licenses: []string{"projects/test-project-id/global/licenses/test-license"}, SourceDiskEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-disk"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-disk"), }, - SourceDisk: ptr.To("projects/test-project-id/zones/us-central1-a/disks/test-disk"), + SourceDisk: new("projects/test-project-id/zones/us-central1-a/disks/test-disk"), SourceInstantSnapshotEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-snapshot"), - RawKey: ptr.To("test-key"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-source-snapshot"), + RawKey: new("test-key"), }, - SourceSnapshotSchedulePolicy: ptr.To("projects/test-project-id/regions/us-central1/resourcePolicies/test-source-snapshot-schedule-policy"), + SourceSnapshotSchedulePolicy: new("projects/test-project-id/regions/us-central1/resourcePolicies/test-source-snapshot-schedule-policy"), SnapshotEncryptionKey: &computepb.CustomerEncryptionKey{ - KmsKeyName: ptr.To("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-snapshot"), + KmsKeyName: new("projects/test-project-id/locations/global/keyRings/test-keyring/cryptoKeys/test-key/cryptoKeyVersions/test-version-snapshot"), }, } } diff --git a/sources/gcp/manual/storage-bucket-iam-policy.go b/sources/gcp/manual/storage-bucket-iam-policy.go index 3ce013d0..b98fcc2b 100644 --- a/sources/gcp/manual/storage-bucket-iam-policy.go +++ b/sources/gcp/manual/storage-bucket-iam-policy.go @@ -158,7 +158,7 @@ func (w *storageBucketIAMPolicyWrapper) policyToItem(location gcpshared.Location } type policyAttrs struct { - Bucket string `json:"bucket"` + Bucket string `json:"bucket"` Bindings []policyBinding `json:"bindings"` } attrs, err := shared.ToAttributesWithExclude(policyAttrs{Bucket: bucketName, Bindings: policyBindings}) @@ -274,12 +274,12 @@ func extractCustomRoleProjectAndID(role string) (projectID, roleID string) { return "", "" } rest := strings.TrimPrefix(role, prefix) - idx := strings.Index(rest, suffix) - if idx == -1 { + before, after, ok := strings.Cut(rest, suffix) + if !ok { return "", "" } - projectID = rest[:idx] - roleID = rest[idx+len(suffix):] + projectID = before + roleID = after if projectID == "" || roleID == "" { return "", "" } @@ -291,10 +291,10 @@ func extractCustomRoleProjectAndID(role string) (projectID, roleID string) { // For deleted members, any "?uid=..." suffix is stripped so the result is a valid DNS link. func extractDomainFromDomainMember(member string) string { var domain string - if strings.HasPrefix(member, "deleted:domain:") { - domain = strings.TrimPrefix(member, "deleted:domain:") - } else if strings.HasPrefix(member, "domain:") { - domain = strings.TrimPrefix(member, "domain:") + if after, ok := strings.CutPrefix(member, "deleted:domain:"); ok { + domain = after + } else if after, ok := strings.CutPrefix(member, "domain:"); ok { + domain = after } else { return "" } @@ -309,8 +309,8 @@ func extractDomainFromDomainMember(member string) string { // (projectOwner:projectId, projectEditor:projectId, projectViewer:projectId), or "" otherwise. func extractProjectIDFromProjectPrincipalMember(member string) string { for _, prefix := range []string{"projectOwner:", "projectEditor:", "projectViewer:"} { - if strings.HasPrefix(member, prefix) { - return strings.TrimPrefix(member, prefix) + if after, ok := strings.CutPrefix(member, prefix); ok { + return after } } return "" @@ -320,10 +320,10 @@ func extractProjectIDFromProjectPrincipalMember(member string) string { // For deleted members, any "?uid=..." suffix is stripped so the result is a valid IAMServiceAccount lookup query (email only). func extractServiceAccountEmailFromMember(member string) string { var email string - if strings.HasPrefix(member, "deleted:serviceAccount:") { - email = strings.TrimPrefix(member, "deleted:serviceAccount:") - } else if strings.HasPrefix(member, "serviceAccount:") { - email = strings.TrimPrefix(member, "serviceAccount:") + if after, ok := strings.CutPrefix(member, "deleted:serviceAccount:"); ok { + email = after + } else if after, ok := strings.CutPrefix(member, "serviceAccount:"); ok { + email = after } else { return "" } @@ -339,21 +339,21 @@ func extractServiceAccountEmailFromMember(member string) string { // use a shared domain where the first label is not a project ID, so we return "" to avoid invalid links. // For Google-managed SAs (e.g. name@gcp-sa-logging.iam.gserviceaccount.com) use isGoogleManagedServiceAccountDomain to skip. func extractProjectFromServiceAccountEmail(email string) string { - at := strings.Index(email, "@") - if at == -1 { + _, after, ok := strings.Cut(email, "@") + if !ok { return "" } - domain := email[at+1:] + domain := after // Only use first label as project when domain is project.iam.gserviceaccount.com. // developer.gserviceaccount.com and appspot.gserviceaccount.com must not be treated as project IDs. if !strings.HasSuffix(domain, ".iam.gserviceaccount.com") { return "" } - dot := strings.Index(domain, ".") - if dot == -1 { + before, _, ok := strings.Cut(domain, ".") + if !ok { return "" } - return domain[:dot] + return before } // isGoogleManagedServiceAccountDomain reports whether the domain's first label is a known diff --git a/sources/gcp/proc/proc.go b/sources/gcp/proc/proc.go index 588f8e3d..d59b1ff6 100644 --- a/sources/gcp/proc/proc.go +++ b/sources/gcp/proc/proc.go @@ -59,13 +59,13 @@ func (r *ProjectPermissionCheckResult) FormatError() error { // Build error message var errMsg strings.Builder - errMsg.WriteString(fmt.Sprintf("%d out of %d projects (%.1f%%) failed permission checks\n\n", - r.FailureCount, totalProjects, failurePercentage)) + fmt.Fprintf(&errMsg, "%d out of %d projects (%.1f%%) failed permission checks\n\n", + r.FailureCount, totalProjects, failurePercentage) // List failed projects with their errors errMsg.WriteString("Failed projects:\n") for projectID, err := range r.ProjectErrors { - errMsg.WriteString(fmt.Sprintf(" - %s: %v\n", projectID, err)) + fmt.Fprintf(&errMsg, " - %s: %v\n", projectID, err) } return errors.New(errMsg.String()) diff --git a/sources/gcp/proc/proc_test.go b/sources/gcp/proc/proc_test.go index 5089058b..c5476e72 100644 --- a/sources/gcp/proc/proc_test.go +++ b/sources/gcp/proc/proc_test.go @@ -3,6 +3,7 @@ package proc import ( "context" "fmt" + "slices" "sort" "strings" "sync" @@ -113,11 +114,8 @@ func Test_ensureMandatoryFieldsInDynamicAdapters(t *testing.T) { foundPerm := false for _, perm := range role.IAMPermissions { - for _, iamPerm := range meta.IAMPermissions { - if perm == iamPerm { - foundPerm = true - break - } + if slices.Contains(meta.IAMPermissions, perm) { + foundPerm = true } } @@ -580,14 +578,12 @@ func TestProjectHealthChecker_Check_ConcurrentAccess(t *testing.T) { errors := make(chan error, concurrency) for range concurrency { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { _, err := checker.Check(ctx) if err != nil { errors <- err } - }() + }) } wg.Wait() @@ -717,11 +713,11 @@ func TestCriticalTerraformMappingsRegistered(t *testing.T) { // Overmind type it should resolve to, and which attribute is extracted from // the Terraform plan to perform the lookup. criticalMappings := []struct { - terraformType string - expectedType string - expectedField string - expectedMethod sdp.QueryMethod - reason string // documents why this mapping is critical + terraformType string + expectedType string + expectedField string + expectedMethod sdp.QueryMethod + reason string // documents why this mapping is critical }{ // Core resource mappings { diff --git a/sources/gcp/shared/adapter-meta.go b/sources/gcp/shared/adapter-meta.go index 455ad73c..b2c24b37 100644 --- a/sources/gcp/shared/adapter-meta.go +++ b/sources/gcp/shared/adapter-meta.go @@ -8,6 +8,11 @@ import ( "github.com/overmindtech/cli/sources/shared" ) +// SearchFilterFunc filters items returned by SEARCH. Takes the search query +// and an SDP item; returns true to keep the item. Used for tag-based SEARCH +// where the GCP API does not support server-side filtering. +type SearchFilterFunc func(query string, item *sdp.Item) bool + // LocationLevel defines at which level of the GCP hierarchy a resource is located. type LocationLevel string @@ -48,6 +53,10 @@ type AdapterMeta struct { // However, there is an exception: https://cloud.google.com/dataproc/docs/reference/rest/v1/ListAutoscalingPoliciesResponse // Expected: `autoscalingPolicies` by convention, but the API returns `policies` ListResponseSelector string + // SearchFilterFunc, if set, is applied after listing items during SEARCH + // to keep only items matching the query. Used for tag-based SEARCH where + // the API has no server-side filter. + SearchFilterFunc SearchFilterFunc } // ============================================= diff --git a/sources/gcp/shared/base.go b/sources/gcp/shared/base.go index 57e46afa..e15f2037 100644 --- a/sources/gcp/shared/base.go +++ b/sources/gcp/shared/base.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "github.com/overmindtech/cli/go/discovery" "github.com/overmindtech/cli/go/sdp-go" @@ -80,10 +81,8 @@ func (z *ZoneBase) LocationFromScope(scope string) (LocationInfo, error) { if !location.Zonal() { return LocationInfo{}, fmt.Errorf("scope %s is not zonal", scope) } - for _, loc := range z.locations { - if location.Equals(loc) { - return location, nil - } + if slices.ContainsFunc(z.locations, location.Equals) { + return location, nil } return LocationInfo{}, fmt.Errorf("scope %s not found in adapter locations", scope) } @@ -142,10 +141,8 @@ func (r *RegionBase) LocationFromScope(scope string) (LocationInfo, error) { if !location.Regional() { return LocationInfo{}, fmt.Errorf("scope %s is not regional", scope) } - for _, loc := range r.locations { - if location.Equals(loc) { - return location, nil - } + if slices.ContainsFunc(r.locations, location.Equals) { + return location, nil } return LocationInfo{}, fmt.Errorf("scope %s not found in adapter locations", scope) } @@ -203,10 +200,8 @@ func (p *ProjectBase) LocationFromScope(scope string) (LocationInfo, error) { if !location.ProjectLevel() { return LocationInfo{}, fmt.Errorf("scope %s is not project-level", scope) } - for _, loc := range p.locations { - if location.Equals(loc) { - return location, nil - } + if slices.ContainsFunc(p.locations, location.Equals) { + return location, nil } return LocationInfo{}, fmt.Errorf("scope %s not found in adapter locations", scope) } diff --git a/sources/gcp/shared/kms-asset-loader.go b/sources/gcp/shared/kms-asset-loader.go index 93c481cb..2bae3e7f 100644 --- a/sources/gcp/shared/kms-asset-loader.go +++ b/sources/gcp/shared/kms-asset-loader.go @@ -66,7 +66,7 @@ func (l *CloudKMSAssetLoader) EnsureLoaded(ctx context.Context) error { // Use singleflight to ensure only one load runs at a time // Concurrent callers wait for the same result - _, err, _ := l.group.Do("load", func() (interface{}, error) { + _, err, _ := l.group.Do("load", func() (any, error) { // Double-check TTL after acquiring the flight l.mu.Lock() if time.Since(l.lastLoadTime) < shared.DefaultCacheDuration { @@ -231,7 +231,7 @@ func (l *CloudKMSAssetLoader) fetchAssetsPage(ctx context.Context, pageToken str // Cloud Asset API requires quota project header req.Header.Set("X-Goog-User-Project", l.projectID) - resp, err := l.httpClient.Do(req) + resp, err := l.httpClient.Do(req) //nolint:gosec // G107 (SSRF): URL built from hardcoded https://cloudasset.googleapis.com/v1 base with project ID if err != nil { return nil, "", fmt.Errorf("failed to execute request: %w", err) } diff --git a/sources/gcp/shared/linker.go b/sources/gcp/shared/linker.go index 719640b8..47872c12 100644 --- a/sources/gcp/shared/linker.go +++ b/sources/gcp/shared/linker.go @@ -43,6 +43,26 @@ func NewLinker() *Linker { } } +// networkTagKeys lists the attribute keys that carry GCP network tags. +var networkTagKeys = map[string]bool{ + "targetTags": true, + "sourceTags": true, + "tags": true, + "tags.items": true, + "properties.tags.items": true, +} + +// IsNetworkTagKey returns true when the key is a known network-tag attribute. +func IsNetworkTagKey(key string) bool { + return networkTagKeys[key] +} + +// isNetworkTag returns true when the key is a known network-tag attribute and +// the value looks like a plain tag (no "/" — not a resource URI). +func isNetworkTag(key, value string) bool { + return networkTagKeys[key] && !strings.Contains(value, "/") +} + // AutoLink tries to find the item type of the TO item based on its GCP resource name. // If the item type is identified, it links the FROM item to the TO item. func (l *Linker) AutoLink(ctx context.Context, projectID string, fromSDPItem *sdp.Item, fromSDPItemType shared.ItemType, toItemGCPResourceName string, keys []string) { @@ -61,6 +81,64 @@ func (l *Linker) AutoLink(ctx context.Context, projectID string, fromSDPItem *sd "ovm.gcp.key": key, } + // Network tag handling: detect plain tag values on known tag keys and + // emit SEARCH-based links instead of the normal resource-path flow. + if isNetworkTag(key, toItemGCPResourceName) { + tag := strings.TrimSpace(toItemGCPResourceName) + if tag == "" { + return // skip empty/whitespace-only tags (R2) + } + + switch fromSDPItemType { + case ComputeFirewall, ComputeRoute: + // Tag-based SEARCH lists all instances and instance templates in scope then filters; + // may be slow in very large projects. + fromSDPItem.LinkedItemQueries = append(fromSDPItem.LinkedItemQueries, + &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: ComputeInstance.String(), + Method: sdp.QueryMethod_SEARCH, + Query: tag, + Scope: projectID, + }, + }, + &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: ComputeInstanceTemplate.String(), + Method: sdp.QueryMethod_SEARCH, + Query: tag, + Scope: projectID, + }, + }, + ) + case ComputeInstance, ComputeInstanceTemplate: + // Tag-based SEARCH lists all firewalls/routes in scope then filters; + // may be slow in very large projects. + fromSDPItem.LinkedItemQueries = append(fromSDPItem.LinkedItemQueries, + &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: ComputeFirewall.String(), + Method: sdp.QueryMethod_SEARCH, + Query: tag, + Scope: projectID, + }, + }, + &sdp.LinkedItemQuery{ + Query: &sdp.Query{ + Type: ComputeRoute.String(), + Method: sdp.QueryMethod_SEARCH, + Query: tag, + Scope: projectID, + }, + }, + ) + default: + log.WithContext(ctx).WithFields(lf).Debug("network tag on unexpected item type, skipping") + } + + return + } + impacts, ok := LinkRules[fromSDPItemType] if !ok { log.WithContext(ctx).WithFields(lf).Warnf("there are no link rules for the FROM item type") diff --git a/sources/gcp/shared/linker_test.go b/sources/gcp/shared/linker_test.go index 71806580..6fe15d47 100644 --- a/sources/gcp/shared/linker_test.go +++ b/sources/gcp/shared/linker_test.go @@ -226,6 +226,84 @@ func TestLinker_AutoLink(t *testing.T) { } } +func TestLinker_AutoLink_NetworkTags(t *testing.T) { + projectID := "my-project" + l := NewLinker() + + t.Run("Firewall targetTags → SEARCH ComputeInstance", func(t *testing.T) { + item := &sdp.Item{} + l.AutoLink(context.TODO(), projectID, item, ComputeFirewall, "web-server", []string{"targetTags"}) + + assertLinkedItemQuery(t, item, ComputeInstance.String(), sdp.QueryMethod_SEARCH, "web-server", projectID) + }) + + t.Run("Firewall sourceTags → SEARCH ComputeInstance", func(t *testing.T) { + item := &sdp.Item{} + l.AutoLink(context.TODO(), projectID, item, ComputeFirewall, "nat-gateway", []string{"sourceTags"}) + + assertLinkedItemQuery(t, item, ComputeInstance.String(), sdp.QueryMethod_SEARCH, "nat-gateway", projectID) + }) + + t.Run("Route tags → SEARCH ComputeInstance", func(t *testing.T) { + item := &sdp.Item{} + l.AutoLink(context.TODO(), projectID, item, ComputeRoute, "backend", []string{"tags"}) + + assertLinkedItemQuery(t, item, ComputeInstance.String(), sdp.QueryMethod_SEARCH, "backend", projectID) + }) + + t.Run("Instance template tags.items → SEARCH ComputeFirewall and ComputeRoute", func(t *testing.T) { + item := &sdp.Item{} + l.AutoLink(context.TODO(), projectID, item, ComputeInstanceTemplate, "http-server", []string{"properties", "tags", "items"}) + + if len(item.GetLinkedItemQueries()) != 2 { + t.Fatalf("expected 2 linked item queries, got %d", len(item.GetLinkedItemQueries())) + } + + assertLinkedItemQuery(t, item, ComputeFirewall.String(), sdp.QueryMethod_SEARCH, "http-server", projectID) + + q2 := item.GetLinkedItemQueries()[1].GetQuery() + if q2.GetType() != ComputeRoute.String() { + t.Errorf("second query type = %s, want %s", q2.GetType(), ComputeRoute.String()) + } + if q2.GetMethod() != sdp.QueryMethod_SEARCH { + t.Errorf("second query method = %s, want SEARCH", q2.GetMethod()) + } + }) + + t.Run("Empty tag is skipped", func(t *testing.T) { + item := &sdp.Item{} + l.AutoLink(context.TODO(), projectID, item, ComputeFirewall, " ", []string{"targetTags"}) + + if len(item.GetLinkedItemQueries()) != 0 { + t.Fatalf("expected 0 linked item queries for empty tag, got %d", len(item.GetLinkedItemQueries())) + } + }) + + t.Run("URI value on tag key falls through to normal linking", func(t *testing.T) { + item := &sdp.Item{} + l.AutoLink(context.TODO(), projectID, item, ComputeRoute, "projects/my-project/zones/us-central1-a/instances/my-vm", []string{"tags"}) + + // Should NOT be treated as network tag (contains /), falls through to normal link rules + for _, liq := range item.GetLinkedItemQueries() { + if liq.GetQuery().GetMethod() == sdp.QueryMethod_SEARCH && liq.GetQuery().GetType() == ComputeInstance.String() && liq.GetQuery().GetQuery() == "projects/my-project/zones/us-central1-a/instances/my-vm" { + t.Error("URI value on tag key should not produce a network-tag SEARCH link") + } + } + }) +} + +func assertLinkedItemQuery(t *testing.T, item *sdp.Item, expectedType string, expectedMethod sdp.QueryMethod, expectedQuery, expectedScope string) { + t.Helper() + for _, liq := range item.GetLinkedItemQueries() { + q := liq.GetQuery() + if q.GetType() == expectedType && q.GetMethod() == expectedMethod && q.GetQuery() == expectedQuery && q.GetScope() == expectedScope { + return + } + } + t.Errorf("did not find LinkedItemQuery{type=%s, method=%s, query=%s, scope=%s} in %d queries", + expectedType, expectedMethod, expectedQuery, expectedScope, len(item.GetLinkedItemQueries())) +} + func Test_determineScope(t *testing.T) { type args struct { ctx context.Context diff --git a/sources/gcp/shared/location_info.go b/sources/gcp/shared/location_info.go index 4a5c65e1..e362f65b 100644 --- a/sources/gcp/shared/location_info.go +++ b/sources/gcp/shared/location_info.go @@ -2,6 +2,7 @@ package shared import ( "fmt" + "slices" "strings" ) @@ -226,10 +227,8 @@ func GetProjectIDsFromLocations(locationSlices ...[]LocationInfo) []string { // (e.g., filtering aggregatedList results to only configured locations). func HasLocationInSlices(loc LocationInfo, locationSlices ...[]LocationInfo) bool { for _, locations := range locationSlices { - for _, configuredLoc := range locations { - if loc.Equals(configuredLoc) { - return true - } + if slices.ContainsFunc(locations, loc.Equals) { + return true } } return false diff --git a/sources/gcp/shared/manual-adapter-links.go b/sources/gcp/shared/manual-adapter-links.go index d3004001..612d5acf 100644 --- a/sources/gcp/shared/manual-adapter-links.go +++ b/sources/gcp/shared/manual-adapter-links.go @@ -571,7 +571,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: projectIDFromName, Scope: projectIDFromName, // Project scope uses project ID as scope }, - } + } } } else if strings.HasPrefix(name, "folders/") { folderID := ExtractPathParam("folders", name) @@ -583,7 +583,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: folderID, Scope: projectID, // Folder scope uses project ID (may need adjustment when folder adapter is created) }, - } + } } } else if strings.HasPrefix(name, "organizations/") { orgID := ExtractPathParam("organizations", name) @@ -595,7 +595,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: orgID, Scope: projectID, // Organization scope uses project ID (may need adjustment when org adapter is created) }, - } + } } } return nil @@ -615,7 +615,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: folderID, Scope: projectID, // Folder scope uses project ID (may need adjustment when folder adapter is created) }, - } + } } } return nil @@ -635,7 +635,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: orgID, Scope: projectID, // Organization scope uses project ID (may need adjustment when org adapter is created) }, - } + } } } return nil @@ -685,7 +685,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: httpURL, Scope: "global", }, - } + } } } return nil @@ -737,7 +737,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: shared.CompositeLookupKey(values[1], values[2]), Scope: values[0], }, - } + } } } @@ -752,7 +752,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: shared.CompositeLookupKey(values[1], values[2]), Scope: values[0], }, - } + } } } @@ -807,7 +807,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: values[1], Scope: values[0], }, - } + } } } @@ -883,7 +883,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: dataset, Scope: scope, }, - } + } } } @@ -898,7 +898,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: parts[1], // dataset ID Scope: parts[0], // project ID }, - } + } } } @@ -942,7 +942,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: shared.CompositeLookupKey(dataset, model), Scope: scope, }, - } + } } } @@ -959,7 +959,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: shared.CompositeLookupKey(dataset, model), Scope: scope, }, - } + } } } @@ -987,7 +987,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: values[1], Scope: values[0], }, - } + } } } @@ -1002,7 +1002,7 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem Query: values[1], Scope: values[0], }, - } + } } } @@ -1011,8 +1011,8 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem // Extract bucket name (everything before the first slash) bucketName := query - if idx := strings.Index(query, "/"); idx != -1 { - bucketName = query[:idx] + if before, _, ok := strings.Cut(query, "/"); ok { + bucketName = before } // Validate bucket name is not empty @@ -1037,8 +1037,8 @@ var ManualAdapterLinksByAssetType = map[shared.ItemType]func(projectID, fromItem // StorageBucketIAMPolicy: link by bucket name using GET (one policy item per bucket). StorageBucketIAMPolicy: func(projectID, _, query string) *sdp.LinkedItemQuery { bucketName := query - if idx := strings.Index(query, "/"); idx != -1 { - bucketName = query[:idx] + if before, _, ok := strings.Cut(query, "/"); ok { + bucketName = before } if projectID == "" || bucketName == "" { return nil diff --git a/sources/shared/util.go b/sources/shared/util.go index 4f04fa51..aa4b98df 100644 --- a/sources/shared/util.go +++ b/sources/shared/util.go @@ -9,7 +9,7 @@ import ( // ToAttributesWithExclude converts an interface to SDP attributes using the `sdp.ToAttributesSorted` // function, and also allows the user to exclude certain top-level fields from // the resulting attributes -func ToAttributesWithExclude(i interface{}, exclusions ...string) (*sdp.ItemAttributes, error) { +func ToAttributesWithExclude(i any, exclusions ...string) (*sdp.ItemAttributes, error) { attrs, err := sdp.ToAttributesViaJson(i) if err != nil { return nil, err diff --git a/sources/snapshot/README.md b/sources/snapshot/README.md index 4981e1d7..cf949ef7 100644 --- a/sources/snapshot/README.md +++ b/sources/snapshot/README.md @@ -4,7 +4,7 @@ A discovery source that serves items from a snapshot file or URL, enabling local ## Overview -The snapshot source loads a protobuf snapshot (`.pb` file) at startup and responds to NATS discovery queries (GET, LIST, SEARCH) with items from that snapshot. This enables: +The snapshot source loads a snapshot file (JSON or protobuf format) at startup and responds to NATS discovery queries (GET, LIST, SEARCH) with items from that snapshot. This enables: - **Local testing**: Run backend services (gateway, api-server, NATS) locally with consistent snapshot data - **Deterministic v6 re-runs**: Re-run change analysis and blast radius calculations with the same snapshot data @@ -12,7 +12,8 @@ The snapshot source loads a protobuf snapshot (`.pb` file) at startup and respon ## Features -- **Snapshot loading**: Loads snapshots from local files or HTTP(S) URLs +- **Snapshot loading**: Loads snapshots from local files or HTTP(S) URLs (JSON or protobuf format) +- **Format detection**: Automatically detects JSON (`.json`) or protobuf (`.pb`) format - **Wildcard scope support**: Single adapter handles all types and scopes in the snapshot - **Full query support**: Implements GET, LIST, and SEARCH query methods - **In-memory indexing**: Fast lookups by type, scope, GUN, or query string @@ -45,7 +46,7 @@ The snapshot source requires a snapshot file or URL to be specified: ```bash ALLOW_UNAUTHENTICATED=true \ -SNAPSHOT_SOURCE=/workspace/services/api-server/service/changeanalysis/testdata/snapshot.pb \ +SNAPSHOT_SOURCE=/workspace/services/api-server/service/changeanalysis/testdata/snapshot.json \ NATS_SERVICE_HOST=nats \ NATS_SERVICE_PORT=4222 \ go run ./sources/snapshot/main.go --log=debug --json-log=false @@ -64,7 +65,7 @@ Update the `SNAPSHOT_SOURCE` environment variable in the launch config to point ```bash ALLOW_UNAUTHENTICATED=true \ -SNAPSHOT_SOURCE=https://gateway-host/area51/snapshots/{uuid}/protobuf \ +SNAPSHOT_SOURCE=https://gateway-host/area51/snapshots/{uuid}/json \ NATS_SERVICE_HOST=nats \ NATS_SERVICE_PORT=4222 \ go run ./sources/snapshot/main.go @@ -130,7 +131,7 @@ go test -v Test snapshot loading: ```bash cd sources/snapshot -go run main.go --snapshot-source=/path/to/snapshot.pb --help +go run main.go --snapshot-source=/path/to/snapshot.json --help ``` Verify with real snapshot: @@ -166,4 +167,6 @@ go test -run TestLoadSnapshotFromFile -v ./adapters - **Linear issue**: [ENG-2577](https://linear.app/overmind/issue/ENG-2577) - **Snapshot protobuf**: `sdp/snapshots.proto` - **Discovery engine**: `go/discovery/` -- **Test snapshot**: `services/api-server/service/changeanalysis/testdata/snapshot.pb` +- **Test snapshots**: + - JSON format (recommended): `services/api-server/service/changeanalysis/testdata/snapshot.json` + - Protobuf format (legacy): `services/api-server/service/changeanalysis/testdata/snapshot.pb` diff --git a/sources/snapshot/adapters/index.go b/sources/snapshot/adapters/index.go index fcbd92d0..4808d833 100644 --- a/sources/snapshot/adapters/index.go +++ b/sources/snapshot/adapters/index.go @@ -69,8 +69,7 @@ func NewSnapshotIndex(snapshot *sdp.Snapshot) (*SnapshotIndex, error) { // hydrateLinkedItems populates each item's LinkedItems field from the snapshot // edges. For each edge, the item matching edge.From gets a LinkedItem pointing -// to edge.To (with blast propagation). Edges whose From item is not in the -// snapshot are skipped. +// to edge.To. Edges whose From item is not in the snapshot are skipped. func (idx *SnapshotIndex) hydrateLinkedItems() { // Build a map from item reference key → existing LinkedItem targets so // we don't add duplicates when the item already carries some LinkedItems. diff --git a/sources/snapshot/adapters/index_test.go b/sources/snapshot/adapters/index_test.go index 0cb4a6ce..a5774e65 100644 --- a/sources/snapshot/adapters/index_test.go +++ b/sources/snapshot/adapters/index_test.go @@ -7,15 +7,15 @@ import ( ) func createTestSnapshot() *sdp.Snapshot { - attrs1, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs1, _ := sdp.ToAttributesViaJson(map[string]any{ "instanceId": "i-12345", "name": "test-instance", }) - attrs2, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs2, _ := sdp.ToAttributesViaJson(map[string]any{ "instanceId": "i-67890", "name": "test-instance-2", }) - attrs3, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs3, _ := sdp.ToAttributesViaJson(map[string]any{ "bucketName": "my-test-bucket", }) diff --git a/sources/snapshot/adapters/loader.go b/sources/snapshot/adapters/loader.go index d0dcb96a..eb1c9133 100644 --- a/sources/snapshot/adapters/loader.go +++ b/sources/snapshot/adapters/loader.go @@ -1,6 +1,7 @@ package adapters import ( + "bytes" "context" "fmt" "io" @@ -10,6 +11,7 @@ import ( "github.com/overmindtech/cli/go/sdp-go" log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) @@ -33,20 +35,32 @@ func LoadSnapshot(ctx context.Context, source string) (*sdp.Snapshot, error) { } } - // Unmarshal the protobuf data + // Unmarshal the data (detect JSON vs protobuf format) snapshot := &sdp.Snapshot{} - if err := proto.Unmarshal(data, snapshot); err != nil { - return nil, fmt.Errorf("failed to unmarshal snapshot protobuf: %w", err) + trimmed := bytes.TrimSpace(data) + if len(trimmed) > 0 && trimmed[0] == '{' { + // JSON format + if err := protojson.Unmarshal(data, snapshot); err != nil { + return nil, fmt.Errorf("failed to unmarshal snapshot JSON: %w", err) + } + log.Info("Loaded snapshot from JSON format") + } else { + // Protobuf format + if err := proto.Unmarshal(data, snapshot); err != nil { + return nil, fmt.Errorf("failed to unmarshal snapshot protobuf: %w", err) + } + log.Info("Loaded snapshot from protobuf format") } - // Validate snapshot has items - if snapshot.GetProperties() == nil || len(snapshot.GetProperties().GetItems()) == 0 { - return nil, fmt.Errorf("snapshot has no items") + if snapshot.GetProperties() == nil { + return nil, fmt.Errorf("snapshot has no properties") } + items := len(snapshot.GetProperties().GetItems()) + edges := len(snapshot.GetProperties().GetEdges()) log.WithFields(log.Fields{ - "items": len(snapshot.GetProperties().GetItems()), - "edges": len(snapshot.GetProperties().GetEdges()), + "items": items, + "edges": edges, }).Info("Snapshot loaded successfully") return snapshot, nil @@ -60,7 +74,7 @@ func loadSnapshotFromURL(ctx context.Context, url string) ([]byte, error) { } client := &http.Client{} - resp, err := client.Do(req) + resp, err := client.Do(req) //nolint:gosec // G107 (SSRF): URL comes from operator-supplied snapshot source config, not from untrusted network input if err != nil { return nil, fmt.Errorf("HTTP request failed: %w", err) } diff --git a/sources/snapshot/adapters/loader_test.go b/sources/snapshot/adapters/loader_test.go index 7ae5d63b..a2b65c40 100644 --- a/sources/snapshot/adapters/loader_test.go +++ b/sources/snapshot/adapters/loader_test.go @@ -14,7 +14,7 @@ import ( func TestLoadSnapshotFromFile(t *testing.T) { // Create a test snapshot - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "test-item", }) @@ -63,7 +63,7 @@ func TestLoadSnapshotFromFile(t *testing.T) { func TestLoadSnapshotFromURL(t *testing.T) { // Create a test snapshot - attrs, _ := sdp.ToAttributesViaJson(map[string]interface{}{ + attrs, _ := sdp.ToAttributesViaJson(map[string]any{ "name": "test-item", }) @@ -107,7 +107,7 @@ func TestLoadSnapshotFromURL(t *testing.T) { } func TestLoadSnapshotEmptyItems(t *testing.T) { - // Create a snapshot with no items + // Create a snapshot with no items (e.g. revlink warmup for account with no sources) snapshot := &sdp.Snapshot{ Properties: &sdp.SnapshotProperties{ Name: "empty-snapshot", @@ -128,11 +128,17 @@ func TestLoadSnapshotEmptyItems(t *testing.T) { t.Fatalf("Failed to write test snapshot file: %v", err) } - // Test loading - should fail validation + // Empty snapshots are allowed (e.g. for benchmarking or accounts with no discovered infra) ctx := context.Background() - _, err = LoadSnapshot(ctx, tmpFile) - if err == nil { - t.Error("Expected error for snapshot with no items, got nil") + loaded, err := LoadSnapshot(ctx, tmpFile) + if err != nil { + t.Fatalf("LoadSnapshot with empty items should succeed: %v", err) + } + if len(loaded.GetProperties().GetItems()) != 0 { + t.Errorf("Expected 0 items, got %d", len(loaded.GetProperties().GetItems())) + } + if loaded.GetProperties().GetName() != "empty-snapshot" { + t.Errorf("Expected name 'empty-snapshot', got %q", loaded.GetProperties().GetName()) } } diff --git a/sources/snapshot/cmd/root.go b/sources/snapshot/cmd/root.go index 0882ba22..e99791ea 100644 --- a/sources/snapshot/cmd/root.go +++ b/sources/snapshot/cmd/root.go @@ -78,7 +78,13 @@ with fixed data and deterministic re-runs of v6 investigations.`, e.SetInitError(initErr) sentry.CaptureException(initErr) } else { - e.StartSendingHeartbeats(ctx) + e.MarkAdaptersInitialized() + // Start() already launched the heartbeat loop, so StartSendingHeartbeats + // is a no-op here. Send an immediate heartbeat so the API server learns + // the source is healthy without waiting for the next tick. + if err := e.SendHeartbeat(ctx, nil); err != nil { + log.WithError(err).Warn("Failed to send post-init heartbeat") + } } <-ctx.Done() diff --git a/sources/transformer.go b/sources/transformer.go index 92c9f0d0..6cca672a 100644 --- a/sources/transformer.go +++ b/sources/transformer.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strings" "buf.build/go/protovalidate" @@ -241,10 +242,8 @@ func (s *standardAdapterCore) validateScopes(scope string) error { } } - for _, expectedScope := range s.Scopes() { - if scope == expectedScope { - return nil - } + if slices.Contains(s.Scopes(), scope) { + return nil } return &sdp.QueryError{ @@ -368,6 +367,7 @@ func (s *standardAdapterImpl) Metadata() *sdp.AdapterMetadata { } if s.wrapper.PotentialLinks() != nil { + a.PotentialLinks = []string{} for link := range s.wrapper.PotentialLinks() { a.PotentialLinks = append(a.PotentialLinks, link.String()) } @@ -540,6 +540,7 @@ func (s *standardListableAdapterImpl) Metadata() *sdp.AdapterMetadata { } if s.wrapper.PotentialLinks() != nil { + a.PotentialLinks = []string{} for link := range s.wrapper.PotentialLinks() { a.PotentialLinks = append(a.PotentialLinks, link.String()) } @@ -613,9 +614,9 @@ func (s *standardSearchableAdapterImpl) Search(ctx context.Context, scope string // This must be a terraform query in Azure resource ID format: // /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Storage/storageAccounts/{account}/queueServices/default/queues/{queue} // - // Extract the relevant parts from the resource ID based on the resource type - pathKeys := azureshared.GetResourceIDPathKeys(s.wrapper.Type()) - if pathKeys == nil { + // Extract the relevant parts from the resource ID based on the resource type. + // Distinguish "unknown type" (no path keys) from "extraction failed" (malformed or unsupported ID format). + if azureshared.GetResourceIDPathKeys(s.wrapper.Type()) == nil { return nil, &sdp.QueryError{ ErrorType: sdp.QueryError_OTHER, ErrorString: fmt.Sprintf( @@ -625,8 +626,17 @@ func (s *standardSearchableAdapterImpl) Search(ctx context.Context, scope string ), } } - - queryParts = azureshared.ExtractPathParamsFromResourceID(query, pathKeys) + queryParts = azureshared.ExtractPathParamsFromResourceIDByType(s.wrapper.Type(), query) + if queryParts == nil { + return nil, &sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: fmt.Sprintf( + "failed to extract query parts from resource ID for resource type %s (invalid or unsupported format): %s", + s.wrapper.Type(), + query, + ), + } + } if len(queryParts) != len(s.wrapper.GetLookups()) { return nil, &sdp.QueryError{ ErrorType: sdp.QueryError_OTHER, @@ -817,9 +827,9 @@ func (s *standardSearchableAdapterImpl) SearchStream(ctx context.Context, scope // This must be a terraform query in Azure resource ID format: // /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Storage/storageAccounts/{account}/queueServices/default/queues/{queue} // - // Extract the relevant parts from the resource ID based on the resource type - pathKeys := azureshared.GetResourceIDPathKeys(s.wrapper.Type()) - if pathKeys == nil { + // Extract the relevant parts from the resource ID based on the resource type. + // Distinguish "unknown type" (no path keys) from "extraction failed" (malformed or unsupported ID format). + if azureshared.GetResourceIDPathKeys(s.wrapper.Type()) == nil { stream.SendError(&sdp.QueryError{ ErrorType: sdp.QueryError_OTHER, ErrorString: fmt.Sprintf( @@ -830,8 +840,18 @@ func (s *standardSearchableAdapterImpl) SearchStream(ctx context.Context, scope }) return } - - queryParts = azureshared.ExtractPathParamsFromResourceID(query, pathKeys) + queryParts = azureshared.ExtractPathParamsFromResourceIDByType(s.wrapper.Type(), query) + if queryParts == nil { + stream.SendError(&sdp.QueryError{ + ErrorType: sdp.QueryError_OTHER, + ErrorString: fmt.Sprintf( + "failed to extract query parts from resource ID for resource type %s (invalid or unsupported format): %s", + s.wrapper.Type(), + query, + ), + }) + return + } if len(queryParts) != len(s.wrapper.GetLookups()) { stream.SendError(&sdp.QueryError{ ErrorType: sdp.QueryError_OTHER, @@ -858,11 +878,6 @@ func (s *standardSearchableAdapterImpl) SearchStream(ctx context.Context, scope return } - if s.searchStreamable == nil { - log.WithField("adapter", s.Name()).Debug("search stream operation not supported") - return - } - // This must be a regular query in the format of: // {{datasetName}}|{{tableName}} queryParts = strings.Split(query, shared.QuerySeparator) @@ -889,6 +904,38 @@ func (s *standardSearchableAdapterImpl) SearchStream(ctx context.Context, scope return } + if s.searchStreamable == nil { + // No streaming implementation; fall back to the batch Search method + // and send items individually. Without this, wrappers that implement + // SearchableWrapper but not SearchStreamableWrapper would silently + // return zero items because the engine always prefers SearchStream. + items, qErr := s.searchable.Search(ctx, scope, queryParts...) + if qErr != nil { + if IsNotFound(qErr) { + s.cache.StoreError(ctx, qErr, shared.DefaultCacheDuration, ck) + } + stream.SendError(qErr) + return + } + if len(items) == 0 { + notFoundErr := &sdp.QueryError{ + ErrorType: sdp.QueryError_NOTFOUND, + ErrorString: fmt.Sprintf("no %s found for search query '%s'", s.Type(), query), + Scope: scope, + SourceName: s.Name(), + ItemType: s.Type(), + ResponderName: s.Name(), + } + s.cache.StoreError(ctx, notFoundErr, shared.DefaultCacheDuration, ck) + return + } + for _, item := range items { + s.cache.StoreItem(ctx, item, shared.DefaultCacheDuration, ck) + stream.SendItem(item) + } + return + } + s.searchStreamable.SearchStream(ctx, stream, s.cache, ck, scope, queryParts...) } @@ -920,6 +967,7 @@ func (s *standardSearchableAdapterImpl) Metadata() *sdp.AdapterMetadata { } if s.wrapper.PotentialLinks() != nil { + a.PotentialLinks = []string{} for link := range s.wrapper.PotentialLinks() { a.PotentialLinks = append(a.PotentialLinks, link.String()) } @@ -979,6 +1027,7 @@ func (s *standardSearchableListableAdapterImpl) Metadata() *sdp.AdapterMetadata } if s.wrapper.PotentialLinks() != nil { + a.PotentialLinks = []string{} for link := range s.wrapper.PotentialLinks() { a.PotentialLinks = append(a.PotentialLinks, link.String()) } @@ -1061,13 +1110,7 @@ func validatePredefinedRole(wrapper Wrapper) error { // Check if all IAM permissions from the wrapper exist in the predefined role's IAMPermissions for _, perm := range iamPermissions { - found := false - for _, rolePerm := range role.IAMPermissions { - if perm == rolePerm { - found = true - break - } - } + found := slices.Contains(role.IAMPermissions, perm) if !found { return fmt.Errorf("IAM permission %s from wrapper is not included in predefined role %s IAMPermissions", perm, pRole) } diff --git a/sources/transformer_test.go b/sources/transformer_test.go index ac18c762..c78a2730 100644 --- a/sources/transformer_test.go +++ b/sources/transformer_test.go @@ -150,24 +150,20 @@ func TestListErrorCausesCacheHang(t *testing.T) { var secondDuration time.Duration // First goroutine: calls List(), gets cache miss, underlying returns error - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { start := time.Now() _, firstErr = adapter.(interface { List(context.Context, string, bool) ([]*sdp.Item, error) }).List(ctx, scope, false) firstDuration = time.Since(start) - }() + }) // Give first goroutine time to start and hit the error time.Sleep(50 * time.Millisecond) // Second goroutine: calls List() after first has hit error // Should be woken immediately by done() and retry quickly - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Use a timeout to prevent infinite hang if bug exists ctx2, cancel := context.WithTimeout(ctx, 500*time.Millisecond) defer cancel() @@ -177,7 +173,7 @@ func TestListErrorCausesCacheHang(t *testing.T) { List(context.Context, string, bool) ([]*sdp.Item, error) }).List(ctx2, scope, false) secondDuration = time.Since(start) - }() + }) wg.Wait() diff --git a/stdlib-source/adapters/certificate.go b/stdlib-source/adapters/certificate.go index 018c2c5e..6fd6e67a 100644 --- a/stdlib-source/adapters/certificate.go +++ b/stdlib-source/adapters/certificate.go @@ -135,7 +135,7 @@ func (s *CertificateAdapter) Search(ctx context.Context, scope string, query str continue } - attributes, err = sdp.ToAttributes(map[string]interface{}{ + attributes, err = sdp.ToAttributes(map[string]any{ "issuer": cert.Issuer.String(), "subject": cert.Subject.String(), "notBefore": cert.NotBefore.String(), @@ -149,7 +149,7 @@ func (s *CertificateAdapter) Search(ctx context.Context, scope string, query str "keyUsage": getKeyUsage(cert.KeyUsage), "extendedKeyUsage": getExtendedKeyUsage(cert.ExtKeyUsage), "version": cert.Version, - "basicConstraints": map[string]interface{}{ + "basicConstraints": map[string]any{ "CA": cert.IsCA, "pathLen": cert.MaxPathLen, }, diff --git a/stdlib-source/adapters/certificate_test.go b/stdlib-source/adapters/certificate_test.go index 871fda7c..05580e0e 100644 --- a/stdlib-source/adapters/certificate_test.go +++ b/stdlib-source/adapters/certificate_test.go @@ -98,7 +98,7 @@ func TestCertificateList(t *testing.T) { type CertTest struct { Attribute string - Expected interface{} + Expected any } func (c *CertTest) Run(t *testing.T, cert *sdp.Item) { @@ -185,7 +185,7 @@ func TestCertificateSearch(t *testing.T) { }, { Attribute: "basicConstraints", - Expected: map[string]interface{}{ + Expected: map[string]any{ "pathLen": float64(0), "CA": true, }, diff --git a/stdlib-source/adapters/dns.go b/stdlib-source/adapters/dns.go index 3d7879fe..dfbe264a 100644 --- a/stdlib-source/adapters/dns.go +++ b/stdlib-source/adapters/dns.go @@ -472,7 +472,7 @@ func (d *DNSAdapter) makeQueryImpl(ctx context.Context, query string, server str name := trimDnsSuffix(cname.Hdr.Name) target := trimDnsSuffix(cname.Target) - attrs, err = sdp.ToAttributes(map[string]interface{}{ + attrs, err = sdp.ToAttributes(map[string]any{ "name": name, "type": "CNAME", "ttl": cname.Hdr.Ttl, @@ -569,7 +569,7 @@ func GroupAnswers(answers []dns.RR) *AnswerGroup { // AToItem Converts a set of A or AAAA records to an item func AToItem(name string, records []dns.RR) (*sdp.Item, error) { - recordAttrs := make([]map[string]interface{}, 0) + recordAttrs := make([]map[string]any, 0) liq := make([]*sdp.LinkedItemQuery, 0) for _, r := range records { @@ -585,7 +585,7 @@ func AToItem(name string, records []dns.RR) (*sdp.Item, error) { ip = aaaa.AAAA } - recordAttrs = append(recordAttrs, map[string]interface{}{ + recordAttrs = append(recordAttrs, map[string]any{ "ttl": hdr.Ttl, "type": typ, "ip": ip.String(), @@ -607,7 +607,7 @@ func AToItem(name string, records []dns.RR) (*sdp.Item, error) { return fmt.Sprint(i) < fmt.Sprint(j) }) - attrs, err := sdp.ToAttributes(map[string]interface{}{ + attrs, err := sdp.ToAttributes(map[string]any{ "name": name, "type": "address", "records": recordAttrs, diff --git a/stdlib-source/adapters/http.go b/stdlib-source/adapters/http.go index 4db87391..5b593dd7 100644 --- a/stdlib-source/adapters/http.go +++ b/stdlib-source/adapters/http.go @@ -187,7 +187,7 @@ func (s *HTTPAdapter) Get(ctx context.Context, scope string, query string, ignor // we are only running a HEAD request this is unlikely to be a problem tr := &http.Transport{ TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, //nolint:gosec // This is fine for a HEAD request + InsecureSkipVerify: true, //nolint:gosec // G402 (TLS skip verify): intentional—adapter inspects TLS certificate details via HEAD request, not trusting the content }, } client := &http.Client{ @@ -214,7 +214,7 @@ func (s *HTTPAdapter) Get(ctx context.Context, scope string, query string, ignor var res *http.Response - res, err = client.Do(req) + res, err = client.Do(req) //nolint:gosec // G107 (SSRF): URL is the SDP query target; hostname validated by validateHostname() which blocks link-local/metadata IPs if err != nil { err = &sdp.QueryError{ @@ -254,7 +254,7 @@ func (s *HTTPAdapter) Get(ctx context.Context, scope string, query string, ignor // Convert the attributes from a golang map, to the structure required for // the SDP protocol - attributes, err := sdp.ToAttributes(map[string]interface{}{ + attributes, err := sdp.ToAttributes(map[string]any{ "name": query, "status": res.StatusCode, "statusString": res.Status, @@ -319,7 +319,7 @@ func (s *HTTPAdapter) Get(ctx context.Context, scope string, query string, ignor version = "unknown" } - attributes.Set("tls", map[string]interface{}{ + attributes.Set("tls", map[string]any{ "version": version, "certificate": CertToName(tlsState.PeerCertificates[0]), "serverName": tlsState.ServerName, diff --git a/stdlib-source/adapters/http_test.go b/stdlib-source/adapters/http_test.go index 5370cd79..6ecc61d2 100644 --- a/stdlib-source/adapters/http_test.go +++ b/stdlib-source/adapters/http_test.go @@ -306,7 +306,7 @@ func TestHTTPGet(t *testing.T) { t.Fatal(err) } - var status interface{} + var status any status, err = item.GetAttributes().Get("status") if err != nil { @@ -326,7 +326,7 @@ func TestHTTPGet(t *testing.T) { t.Fatal(err) } - var status interface{} + var status any status, err = item.GetAttributes().Get("status") if err != nil { @@ -360,7 +360,7 @@ func TestHTTPGet(t *testing.T) { t.Fatal(err) } - var status interface{} + var status any status, err = item.GetAttributes().Get("status") if err != nil { t.Fatal(err) @@ -371,7 +371,7 @@ func TestHTTPGet(t *testing.T) { } // Check that the location header contains the relative URL - var location interface{} + var location any location, err = item.GetAttributes().Get("location") if err != nil { t.Fatal(err) @@ -502,7 +502,7 @@ func TestHTTPGet(t *testing.T) { } // The request should succeed, but the redirect should be marked as blocked - var locationError interface{} + var locationError any locationError, err = item.GetAttributes().Get("location-error") if err != nil { t.Fatal("Expected location-error attribute for blocked redirect") @@ -560,7 +560,7 @@ func TestHTTPSearch(t *testing.T) { } // Verify the item has the expected status (200 for OK page) - var status interface{} + var status any status, err = item.GetAttributes().Get("status") if err != nil { t.Fatal(err) diff --git a/stdlib-source/adapters/ip.go b/stdlib-source/adapters/ip.go index dfc1d419..e24816cc 100644 --- a/stdlib-source/adapters/ip.go +++ b/stdlib-source/adapters/ip.go @@ -127,7 +127,7 @@ func (bc *IPAdapter) Get(ctx context.Context, scope string, query string, ignore } } - attributes, err = sdp.ToAttributes(map[string]interface{}{ + attributes, err = sdp.ToAttributes(map[string]any{ "ip": ip.String(), "unspecified": ip.IsUnspecified(), "loopback": ip.IsLoopback(), diff --git a/stdlib-source/adapters/main.go b/stdlib-source/adapters/main.go index 1c8ddfa0..c69dbb8c 100644 --- a/stdlib-source/adapters/main.go +++ b/stdlib-source/adapters/main.go @@ -171,7 +171,7 @@ func parseRdapUrl(rdapUrl string) (*RDAPUrl, error) { } var RDAPTransforms = sdp.AddDefaultTransforms(sdp.TransformMap{ - reflect.TypeOf(rdap.Link{}): func(i interface{}) interface{} { + reflect.TypeFor[rdap.Link](): func(i any) any { // We only want to return the href for links link, ok := i.(rdap.Link) @@ -181,7 +181,7 @@ var RDAPTransforms = sdp.AddDefaultTransforms(sdp.TransformMap{ return "" }, - reflect.TypeOf(rdap.VCard{}): func(i interface{}) interface{} { + reflect.TypeFor[rdap.VCard](): func(i any) any { vcard, ok := i.(rdap.VCard) if ok { @@ -230,7 +230,7 @@ var RDAPTransforms = sdp.AddDefaultTransforms(sdp.TransformMap{ return nil }, - reflect.TypeOf(&rdap.DecodeData{}): func(i interface{}) interface{} { + reflect.TypeFor[*rdap.DecodeData](): func(i any) any { // Exclude these return nil }, diff --git a/stdlib-source/adapters/rdap-asn.go b/stdlib-source/adapters/rdap-asn.go index dd41e7b9..41e7ae4f 100644 --- a/stdlib-source/adapters/rdap-asn.go +++ b/stdlib-source/adapters/rdap-asn.go @@ -99,7 +99,7 @@ func (s *RdapASNAdapter) Get(ctx context.Context, scope string, query string, ig return nil, fmt.Errorf("Unexpected response type: %T", response.Object) } - attributes, err := sdp.ToAttributesCustom(map[string]interface{}{ + attributes, err := sdp.ToAttributesCustom(map[string]any{ "conformance": asn.Conformance, "objectClassName": asn.ObjectClassName, "notices": asn.Notices, diff --git a/stdlib-source/adapters/rdap-domain.go b/stdlib-source/adapters/rdap-domain.go index 1502e069..afe5e15a 100644 --- a/stdlib-source/adapters/rdap-domain.go +++ b/stdlib-source/adapters/rdap-domain.go @@ -145,7 +145,7 @@ func (s *RdapDomainAdapter) Search(ctx context.Context, scope string, query stri } } - attributes, err := sdp.ToAttributesCustom(map[string]interface{}{ + attributes, err := sdp.ToAttributesCustom(map[string]any{ "conformance": domain.Conformance, "events": domain.Events, "handle": domain.Handle, diff --git a/stdlib-source/adapters/rdap-entity.go b/stdlib-source/adapters/rdap-entity.go index 9cb30028..1a462717 100644 --- a/stdlib-source/adapters/rdap-entity.go +++ b/stdlib-source/adapters/rdap-entity.go @@ -146,7 +146,7 @@ func (s *RdapEntityAdapter) runEntityRequest(ctx context.Context, query string, return nil, fmt.Errorf("Expected Entity, got %T", response.Object) } - attributes, err := sdp.ToAttributesCustom(map[string]interface{}{ + attributes, err := sdp.ToAttributesCustom(map[string]any{ "asEventActor": entity.AsEventActor, "conformance": entity.Conformance, "events": entity.Events, diff --git a/stdlib-source/adapters/rdap-ip-network.go b/stdlib-source/adapters/rdap-ip-network.go index 212b55a8..5b58dc7c 100644 --- a/stdlib-source/adapters/rdap-ip-network.go +++ b/stdlib-source/adapters/rdap-ip-network.go @@ -155,7 +155,7 @@ func (s *RdapIPNetworkAdapter) Search(ctx context.Context, scope string, query s s.IPCache.Store(network, ipNetwork, RdapCacheDuration) } - attributes, err := sdp.ToAttributesCustom(map[string]interface{}{ + attributes, err := sdp.ToAttributesCustom(map[string]any{ "conformance": ipNetwork.Conformance, "country": ipNetwork.Country, "endAddress": ipNetwork.EndAddress, diff --git a/stdlib-source/adapters/rdap-nameserver.go b/stdlib-source/adapters/rdap-nameserver.go index 0d0f052f..31e14e5a 100644 --- a/stdlib-source/adapters/rdap-nameserver.go +++ b/stdlib-source/adapters/rdap-nameserver.go @@ -141,7 +141,7 @@ func (s *RdapNameserverAdapter) Search(ctx context.Context, scope string, query return nil, fmt.Errorf("Expected Nameserver, got %T", response.Object) } - attributes, err := sdp.ToAttributesCustom(map[string]interface{}{ + attributes, err := sdp.ToAttributesCustom(map[string]any{ "conformance": nameserver.Conformance, "objectClassName": nameserver.ObjectClassName, "notices": nameserver.Notices, diff --git a/stdlib-source/build/package/Dockerfile b/stdlib-source/build/package/Dockerfile index deb0f149..4c8d1112 100644 --- a/stdlib-source/build/package/Dockerfile +++ b/stdlib-source/build/package/Dockerfile @@ -1,5 +1,5 @@ # Build the source binary -FROM golang:1.25-alpine AS builder +FROM golang:1.26-alpine AS builder ARG TARGETOS ARG TARGETARCH ARG BUILD_VERSION diff --git a/stdlib-source/cmd/root.go b/stdlib-source/cmd/root.go index d2e37d6d..7b4122a1 100644 --- a/stdlib-source/cmd/root.go +++ b/stdlib-source/cmd/root.go @@ -94,7 +94,13 @@ var rootCmd = &cobra.Command{ e.SetInitError(initErr) sentry.CaptureException(initErr) } else { - e.StartSendingHeartbeats(ctx) + e.MarkAdaptersInitialized() + // Start() already launched the heartbeat loop, so StartSendingHeartbeats + // is a no-op here. Send an immediate heartbeat so the API server learns + // the source is healthy without waiting for the next tick. + if err := e.SendHeartbeat(ctx, nil); err != nil { + log.WithError(err).Warn("Failed to send post-init heartbeat") + } } <-ctx.Done() diff --git a/tfutils/aws_config.go b/tfutils/aws_config.go index bb957134..3baad19b 100644 --- a/tfutils/aws_config.go +++ b/tfutils/aws_config.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "maps" "net/http" "net/url" "os" @@ -56,7 +57,7 @@ type ProviderFile struct { type AWSProvider struct { Name string `hcl:"name,label" yaml:"name,omitempty"` Alias string `hcl:"alias,optional" yaml:"alias,omitempty"` - AccessKey string `hcl:"access_key,optional" yaml:"access_key,omitempty"` + AccessKey string `hcl:"access_key,optional" yaml:"access_key,omitempty"` //nolint:gosec // G101: field name, not a hardcoded credential; deserialized from local Terraform HCL config, never marshaled into logs or HTTP responses SecretKey string `hcl:"secret_key,optional" yaml:"secret_key,omitempty"` Token string `hcl:"token,optional" yaml:"token,omitempty"` Region string `hcl:"region,optional" yaml:"region,omitempty"` @@ -117,7 +118,7 @@ type AssumeRoleWithWebIdentity struct { // restore the default value to a cty value after tfconfig has // passed it through JSON to "void the caller needing to deal with // cty" -func ctyFromTfconfig(v interface{}) cty.Value { +func ctyFromTfconfig(v any) cty.Value { switch def := v.(type) { case bool: return cty.BoolVal(def) @@ -127,13 +128,13 @@ func ctyFromTfconfig(v interface{}) cty.Value { return cty.NumberIntVal(int64(def)) case string: return cty.StringVal(def) - case []interface{}: + case []any: d := make([]cty.Value, 0, len(def)) for _, v := range def { d = append(d, ctyFromTfconfig(v)) } return cty.ListVal(d) - case map[string]interface{}: + case map[string]any: d := map[string]cty.Value{} for k, v := range def { d[k] = ctyFromTfconfig(v) @@ -313,9 +314,7 @@ func setVariables(dest *hcl.EvalContext, variables map[string]cty.Value) { if variablesDest == nil { variablesDest = map[string]cty.Value{} } - for k, v := range variables { - variablesDest[k] = v - } + maps.Copy(variablesDest, variables) dest.Variables["var"] = cty.ObjectVal(variablesDest) } diff --git a/tfutils/azure_config.go b/tfutils/azure_config.go index c10af272..81f41ed7 100644 --- a/tfutils/azure_config.go +++ b/tfutils/azure_config.go @@ -17,7 +17,7 @@ type AzureProvider struct { SubscriptionID string `hcl:"subscription_id,optional" yaml:"subscription_id,omitempty"` TenantID string `hcl:"tenant_id,optional" yaml:"tenant_id,omitempty"` ClientID string `hcl:"client_id,optional" yaml:"client_id,omitempty"` - ClientSecret string `hcl:"client_secret,optional" yaml:"client_secret,omitempty"` + ClientSecret string `hcl:"client_secret,optional" yaml:"client_secret,omitempty"` //nolint:gosec // G101: field name, not a hardcoded credential; deserialized from local Terraform HCL config, never marshaled into logs or HTTP responses Environment string `hcl:"environment,optional" yaml:"environment,omitempty"` // Throw any additional stuff into here so it doesn't fail diff --git a/tfutils/gcp_config.go b/tfutils/gcp_config.go index 68853ebf..a82eda01 100644 --- a/tfutils/gcp_config.go +++ b/tfutils/gcp_config.go @@ -15,7 +15,7 @@ type GCPProvider struct { Name string `hcl:"name,label" yaml:"name,omitempty"` Alias string `hcl:"alias,optional" yaml:"alias,omitempty"` Credentials string `hcl:"credentials,optional" yaml:"credentials,omitempty"` - AccessToken string `hcl:"access_token,optional" yaml:"access_token,omitempty"` + AccessToken string `hcl:"access_token,optional" yaml:"access_token,omitempty"` //nolint:gosec // G101: field name, not a hardcoded credential; deserialized from local Terraform HCL config, never marshaled into logs or HTTP responses ImpersonateServiceAccount string `hcl:"impersonate_service_account,optional" yaml:"impersonate_service_account,omitempty"` Project string `hcl:"project,optional" yaml:"project,omitempty"` Region string `hcl:"region,optional" yaml:"region,omitempty"` diff --git a/tfutils/plan.go b/tfutils/plan.go index 19624902..574d4234 100644 --- a/tfutils/plan.go +++ b/tfutils/plan.go @@ -18,14 +18,14 @@ type Plan struct { FormatVersion string `json:"format_version,omitempty"` TerraformVersion string `json:"terraform_version,omitempty"` Variables Variables `json:"variables,omitempty"` - PlannedValues StateValues `json:"planned_values,omitempty"` + PlannedValues StateValues `json:"planned_values"` // ResourceDrift and ResourceChanges are sorted in a user-friendly order // that is undefined at this time, but consistent. ResourceDrift []ResourceChange `json:"resource_drift,omitempty"` ResourceChanges []ResourceChange `json:"resource_changes,omitempty"` OutputChanges map[string]Change `json:"output_changes,omitempty"` - PriorState State `json:"prior_state,omitempty"` - Config planConfig `json:"configuration,omitempty"` + PriorState State `json:"prior_state"` + Config planConfig `json:"configuration"` RelevantAttributes []ResourceAttr `json:"relevant_attributes,omitempty"` Checks json.RawMessage `json:"checks,omitempty"` Timestamp string `json:"timestamp,omitempty"` @@ -35,7 +35,7 @@ type Plan struct { // Config represents the complete configuration source type planConfig struct { ProviderConfigs map[string]ProviderConfig `json:"provider_config,omitempty"` - RootModule ConfigModule `json:"root_module,omitempty"` + RootModule ConfigModule `json:"root_module"` } // ProviderConfig describes all of the provider configurations throughout the @@ -43,12 +43,12 @@ type planConfig struct { // provider configurations are the one concept in Terraform that can span across // module boundaries. type ProviderConfig struct { - Name string `json:"name,omitempty"` - FullName string `json:"full_name,omitempty"` - Alias string `json:"alias,omitempty"` - VersionConstraint string `json:"version_constraint,omitempty"` - ModuleAddress string `json:"module_address,omitempty"` - Expressions map[string]interface{} `json:"expressions,omitempty"` + Name string `json:"name,omitempty"` + FullName string `json:"full_name,omitempty"` + Alias string `json:"alias,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` + ModuleAddress string `json:"module_address,omitempty"` + Expressions map[string]any `json:"expressions,omitempty"` } type ConfigModule struct { @@ -120,13 +120,13 @@ func (m ConfigModule) DigResource(address string) *ConfigResource { } type moduleCall struct { - Source string `json:"source,omitempty"` - Expressions map[string]interface{} `json:"expressions,omitempty"` - CountExpression *expression `json:"count_expression,omitempty"` - ForEachExpression *expression `json:"for_each_expression,omitempty"` - Module ConfigModule `json:"module,omitempty"` - VersionConstraint string `json:"version_constraint,omitempty"` - DependsOn []string `json:"depends_on,omitempty"` + Source string `json:"source,omitempty"` + Expressions map[string]any `json:"expressions,omitempty"` + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + Module ConfigModule `json:"module"` + VersionConstraint string `json:"version_constraint,omitempty"` + DependsOn []string `json:"depends_on,omitempty"` } // variables is the JSON representation of the variables provided to the current @@ -164,7 +164,7 @@ type ConfigResource struct { // Expressions" describes the resource-type-specific content of the // configuration block. - Expressions map[string]interface{} `json:"expressions,omitempty"` + Expressions map[string]any `json:"expressions,omitempty"` // SchemaVersion indicates which version of the resource type schema the // "values" property conforms to. @@ -181,14 +181,14 @@ type ConfigResource struct { type output struct { Sensitive bool `json:"sensitive,omitempty"` - Expression expression `json:"expression,omitempty"` + Expression expression `json:"expression"` DependsOn []string `json:"depends_on,omitempty"` Description string `json:"description,omitempty"` } type provisioner struct { - Type string `json:"type,omitempty"` - Expressions map[string]interface{} `json:"expressions,omitempty"` + Type string `json:"type,omitempty"` + Expressions map[string]any `json:"expressions,omitempty"` } // expression represents any unparsed expression @@ -220,7 +220,7 @@ type Variable struct { // prior state (which is always complete) and the planned new state. type StateValues struct { Outputs map[string]Output `json:"outputs,omitempty"` - RootModule Module `json:"root_module,omitempty"` + RootModule Module `json:"root_module"` } // Get a specific resource from this module or its children @@ -293,13 +293,13 @@ type Resource struct { // AttributeValues is the JSON representation of the attribute values of the // resource, whose structure depends on the resource type schema. -type AttributeValues map[string]interface{} +type AttributeValues map[string]any var indexBrackets = regexp.MustCompile(`\[(\d+)\]`) // Digs through the attribute values to find the value at the given key. This // supports nested keys i.e. "foo.bar" and arrays i.e. "foo[0]" -func (av AttributeValues) Dig(key string) (interface{}, bool) { +func (av AttributeValues) Dig(key string) (any, bool) { sections := strings.Split(key, ".") if len(sections) == 0 { @@ -312,7 +312,7 @@ func (av AttributeValues) Dig(key string) (interface{}, bool) { // Check for an index indexMatches := indexBrackets.FindStringSubmatch(section) - var value interface{} + var value any var ok bool if len(indexMatches) == 0 { @@ -339,7 +339,7 @@ func (av AttributeValues) Dig(key string) (interface{}, bool) { } // Check if the value is an array - array, ok := arr.([]interface{}) + array, ok := arr.([]any) if !ok { return nil, false @@ -359,7 +359,7 @@ func (av AttributeValues) Dig(key string) (interface{}, bool) { } // If there are more sections, then we need to dig deeper - childMap, ok := value.(map[string]interface{}) + childMap, ok := value.(map[string]any) if !ok { return nil, false @@ -413,7 +413,7 @@ type ResourceChange struct { Deposed string `json:"deposed,omitempty"` // Change describes the change that will be made to this object - Change Change `json:"change,omitempty"` + Change Change `json:"change"` // ActionReason is a keyword representing some optional extra context // for why the actions in Change.Actions were chosen. diff --git a/tfutils/plan_mapper.go b/tfutils/plan_mapper.go index d79409eb..4b904286 100644 --- a/tfutils/plan_mapper.go +++ b/tfutils/plan_mapper.go @@ -12,10 +12,10 @@ import ( "github.com/getsentry/sentry-go" "github.com/google/uuid" awsAdapters "github.com/overmindtech/cli/aws-source/adapters" - k8sAdapters "github.com/overmindtech/cli/k8s-source/adapters" "github.com/overmindtech/cli/go/sdp-go" - gcpAdapters "github.com/overmindtech/cli/sources/gcp/proc" + k8sAdapters "github.com/overmindtech/cli/k8s-source/adapters" azureAdapters "github.com/overmindtech/cli/sources/azure/proc" + gcpAdapters "github.com/overmindtech/cli/sources/gcp/proc" log "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -385,7 +385,7 @@ func mapResourceToQuery(itemDiff *sdp.ItemDiff, terraformResource *Resource, map // isJSONPlanFile checks if the supplied bytes are valid JSON that could be a plan file. // This is used to determine if we need to convert a binary plan or if it's already JSON. func isJSONPlanFile(bytes []byte) bool { - var jsonValue interface{} + var jsonValue any err := json.Unmarshal(bytes, &jsonValue) if err != nil { @@ -400,7 +400,7 @@ func isJSONPlanFile(bytes []byte) bool { // pass a state file to Overmind rather than a plan file since the commands to // create them are similar func isStateFile(bytes []byte) bool { - fields := make(map[string]interface{}) + fields := make(map[string]any) err := json.Unmarshal(bytes, &fields) if err != nil { @@ -651,7 +651,7 @@ func maskSensitiveData(attributes, sensitive any) any { // Finds fields from the `before` and `after` attributes that are known after // apply and replaces the "after" value with the string "(known after apply)" func handleKnownAfterApply(before, after *sdp.ItemAttributes, afterUnknown json.RawMessage) error { - var afterUnknownInterface interface{} + var afterUnknownInterface any err := json.Unmarshal(afterUnknown, &afterUnknownInterface) if err != nil { return fmt.Errorf("could not unmarshal `after_unknown` from plan: %w", err) @@ -684,9 +684,9 @@ func handleKnownAfterApply(before, after *sdp.ItemAttributes, afterUnknown json. // "after" values for fields that are known after apply. By default these are // `null` which produces a bad diff, so we replace them with (known after apply) // to more accurately mirror what Terraform does in the CLI -func insertKnownAfterApply(before, after *structpb.Value, afterUnknown interface{}) error { +func insertKnownAfterApply(before, after *structpb.Value, afterUnknown any) error { switch afterUnknown := afterUnknown.(type) { - case map[string]interface{}: + case map[string]any: for k, v := range afterUnknown { if v == true { if afterFields := after.GetStructValue().GetFields(); afterFields != nil { @@ -711,7 +711,7 @@ func insertKnownAfterApply(before, after *structpb.Value, afterUnknown interface } } } - case []interface{}: + case []any: for i, v := range afterUnknown { if v == true { // If this value in a slice is true, set the corresponding value diff --git a/tfutils/plan_mapper_test.go b/tfutils/plan_mapper_test.go index e6788c2f..8149d8f0 100644 --- a/tfutils/plan_mapper_test.go +++ b/tfutils/plan_mapper_test.go @@ -27,52 +27,52 @@ func TestMapResourceToQuery_PendingCreation(t *testing.T) { t.Parallel() tests := []struct { - name string - itemDiffStatus sdp.ItemDiffStatus - hasMappings bool - expectedMapStatus MapStatus + name string + itemDiffStatus sdp.ItemDiffStatus + hasMappings bool + expectedMapStatus MapStatus expectedMappingStatus sdp.MappedItemMappingStatus - expectMappingError bool + expectMappingError bool }{ { - name: "CREATED with missing attributes - pending creation", - itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_CREATED, - hasMappings: true, - expectedMapStatus: MapStatusPendingCreation, + name: "CREATED with missing attributes - pending creation", + itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_CREATED, + hasMappings: true, + expectedMapStatus: MapStatusPendingCreation, expectedMappingStatus: sdp.MappedItemMappingStatus_MAPPED_ITEM_MAPPING_STATUS_PENDING_CREATION, - expectMappingError: false, + expectMappingError: false, }, { - name: "UPDATED with missing attributes - error", - itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_UPDATED, - hasMappings: true, - expectedMapStatus: MapStatusNotEnoughInfo, + name: "UPDATED with missing attributes - error", + itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_UPDATED, + hasMappings: true, + expectedMapStatus: MapStatusNotEnoughInfo, expectedMappingStatus: sdp.MappedItemMappingStatus_MAPPED_ITEM_MAPPING_STATUS_ERROR, - expectMappingError: true, + expectMappingError: true, }, { - name: "DELETED with missing attributes - error", - itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_DELETED, - hasMappings: true, - expectedMapStatus: MapStatusNotEnoughInfo, + name: "DELETED with missing attributes - error", + itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_DELETED, + hasMappings: true, + expectedMapStatus: MapStatusNotEnoughInfo, expectedMappingStatus: sdp.MappedItemMappingStatus_MAPPED_ITEM_MAPPING_STATUS_ERROR, - expectMappingError: true, + expectMappingError: true, }, { - name: "REPLACED with missing attributes - error", - itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_REPLACED, - hasMappings: true, - expectedMapStatus: MapStatusNotEnoughInfo, + name: "REPLACED with missing attributes - error", + itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_REPLACED, + hasMappings: true, + expectedMapStatus: MapStatusNotEnoughInfo, expectedMappingStatus: sdp.MappedItemMappingStatus_MAPPED_ITEM_MAPPING_STATUS_ERROR, - expectMappingError: true, + expectMappingError: true, }, { - name: "No mappings - unsupported", - itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_CREATED, - hasMappings: false, - expectedMapStatus: MapStatusUnsupported, + name: "No mappings - unsupported", + itemDiffStatus: sdp.ItemDiffStatus_ITEM_DIFF_STATUS_CREATED, + hasMappings: false, + expectedMapStatus: MapStatusUnsupported, expectedMappingStatus: sdp.MappedItemMappingStatus_MAPPED_ITEM_MAPPING_STATUS_UNSUPPORTED, - expectMappingError: false, + expectMappingError: false, }, } @@ -629,28 +629,28 @@ func TestMaskSensitiveData(t *testing.T) { } func TestHandleKnownAfterApply(t *testing.T) { - before, err := sdp.ToAttributes(map[string]interface{}{ + before, err := sdp.ToAttributes(map[string]any{ "string_value": "foo", "int_value": 42, "bool_value": true, "float_value": 3.14, "data": "secret", // Known after apply but doesn't exist in the "after" map, this happens sometimes - "list_value": []interface{}{ + "list_value": []any{ "foo", "bar", }, - "map_value": map[string]interface{}{ + "map_value": map[string]any{ "foo": "bar", "bar": "baz", }, - "map_value2": map[string]interface{}{ - "ding": map[string]interface{}{ + "map_value2": map[string]any{ + "ding": map[string]any{ "foo": "bar", }, }, - "nested_list": []interface{}{ - []interface{}{}, - []interface{}{ + "nested_list": []any{ + []any{}, + []any{ "foo", "bar", }, @@ -660,26 +660,26 @@ func TestHandleKnownAfterApply(t *testing.T) { t.Fatal(err) } - after, err := sdp.ToAttributes(map[string]interface{}{ + after, err := sdp.ToAttributes(map[string]any{ "string_value": "bar", // I want to see a diff here "int_value": nil, // These are going to be known after apply "bool_value": nil, // These are going to be known after apply "float_value": 3.14, - "list_value": []interface{}{ + "list_value": []any{ "foo", "bar", "baz", // So is this one }, - "map_value": map[string]interface{}{ // This whole thing will be known after apply + "map_value": map[string]any{ // This whole thing will be known after apply "foo": "bar", }, - "map_value2": map[string]interface{}{ - "ding": map[string]interface{}{ + "map_value2": map[string]any{ + "ding": map[string]any{ "foo": nil, // This will be known after apply }, }, - "nested_list": []interface{}{ - []interface{}{ + "nested_list": []any{ + []any{ "foo", }, }, @@ -748,7 +748,7 @@ func TestHandleKnownAfterApply(t *testing.T) { t.Error(err) } - if list, ok := i.([]interface{}); ok { + if list, ok := i.([]any); ok { if list[2] != KnownAfterApply { t.Errorf("expected third string_value to be %v, got %v", KnownAfterApply, list[2]) } @@ -810,7 +810,7 @@ func interpolateScope(scope string, data map[string]any) (string, error) { } // Digs through a map using the same logic that terraform does i.e. foo.bar[0] -func terraformDig(srcMapPtr interface{}, path string) interface{} { +func terraformDig(srcMapPtr any, path string) any { // Split the path on each period parts := strings.Split(path, ".") @@ -821,7 +821,7 @@ func terraformDig(srcMapPtr interface{}, path string) interface{} { // Check for an index in this section indexMatches := indexBrackets.FindStringSubmatch(parts[0]) - var value interface{} + var value any if len(indexMatches) == 0 { // No index, just get the value @@ -838,7 +838,7 @@ func terraformDig(srcMapPtr interface{}, path string) interface{} { } // Get the value - arr, ok := dig.Interface(srcMapPtr, keyName).([]interface{}) + arr, ok := dig.Interface(srcMapPtr, keyName).([]any) if !ok { return nil @@ -856,13 +856,13 @@ func terraformDig(srcMapPtr interface{}, path string) interface{} { return value } else { // Force it to another map[string]interface{} - valueMap := make(map[string]interface{}) + valueMap := make(map[string]any) if mapString, ok := value.(map[string]string); ok { for k, v := range mapString { valueMap[k] = v } - } else if mapInterface, ok := value.(map[string]interface{}); ok { + } else if mapInterface, ok := value.(map[string]any); ok { valueMap = mapInterface } else if mapAttributeValues, ok := value.(AttributeValues); ok { valueMap = mapAttributeValues