diff --git a/admin/server/cmd/server/options.go b/admin/server/cmd/server/options.go index 3ccdcbdfe2..791751c082 100644 --- a/admin/server/cmd/server/options.go +++ b/admin/server/cmd/server/options.go @@ -203,6 +203,27 @@ func (o *ValidatedOptions) Complete(ctx context.Context) (*Options, error) { }, nil } +// NewAdminHandler creates an http.Handler for the admin API with all middleware configured. +func NewAdminHandler( + logger *slog.Logger, + dbClient database.DBClient, + csClient ocm.ClusterServiceClientSpec, + fpaCredRetriever fpa.FirstPartyApplicationTokenCredentialRetriever, +) http.Handler { + // Submux for V1 HCP endpoints + v1HCPMux := middleware.NewHCPResourceServerMux() + v1HCPMux.Handle("GET", "/helloworld", hcp.HCPHelloWorld(dbClient, csClient)) + v1HCPMux.Handle("GET", "/hellworld/lbs", hcp.HCPDemoListLoadbalancers(dbClient, csClient, fpaCredRetriever)) + + v1HCPMux.Handle("GET", "/cosmosdump", cosmosdump.NewCosmosDumpHandler(dbClient)) + + rootMux := http.NewServeMux() + rootMux.Handle("/admin/helloworld", handlers.HelloWorldHandler()) + rootMux.Handle("/admin/v1/hcp/", http.StripPrefix("/admin/v1/hcp", v1HCPMux.Handler())) + + return middleware.WithClientPrincipal(middleware.WithLowercaseURLPathValue(middleware.WithLogger(logger, rootMux))) +} + func (opts *Options) Run(ctx context.Context) error { logger := opts.Logger logger.Info("Reporting health.", "port", opts.HealthPort) @@ -214,18 +235,11 @@ func (opts *Options) Run(ctx context.Context) error { logger.Info("Running server", "port", opts.Port) - // Submux for V1 HCP endpoints - v1HCPMux := middleware.NewHCPResourceServerMux() - v1HCPMux.Handle("GET", "/helloworld", hcp.HCPHelloWorld(opts.DbClient, opts.ClustersServiceClient, opts.FpaCredentialRetriever)) - v1HCPMux.Handle("GET", "/cosmosdump", cosmosdump.NewCosmosDumpHandler(opts.DbClient)) - - rootMux := http.NewServeMux() - rootMux.Handle("/admin/helloworld", handlers.HelloWorldHandler()) - rootMux.Handle("/admin/v1/hcp/", http.StripPrefix("/admin/v1/hcp", v1HCPMux.Handler())) + handler := NewAdminHandler(opts.Logger, opts.DbClient, opts.ClustersServiceClient, opts.FpaCredentialRetriever) s := http.Server{ Addr: net.JoinHostPort("", strconv.Itoa(opts.Port)), - Handler: middleware.WithClientPrincipal(middleware.WithLowercaseURLPathValue(middleware.WithLogger(logger, rootMux))), + Handler: handler, } interrupts.ListenAndServe(&s, 5*time.Second) interrupts.WaitForGracefulShutdown() diff --git a/admin/server/handlers/hcp/helloworld.go b/admin/server/handlers/hcp/helloworld.go index cdf95baf1f..cea250457f 100644 --- a/admin/server/handlers/hcp/helloworld.go +++ b/admin/server/handlers/hcp/helloworld.go @@ -36,7 +36,7 @@ import ( // in tandem. // -func HCPHelloWorld(dbClient database.DBClient, csClient ocm.ClusterServiceClientSpec, fpaCredentialRetriever fpa.FirstPartyApplicationTokenCredentialRetriever) http.Handler { +func HCPHelloWorld(dbClient database.DBClient, csClient ocm.ClusterServiceClientSpec) http.Handler { return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { // get the azure resource ID for this HCP resourceID, err := utils.ResourceIDFromContext(request.Context()) @@ -66,20 +66,53 @@ func HCPHelloWorld(dbClient database.DBClient, csClient ocm.ClusterServiceClient return } + // some output + output := map[string]any{ + "resourceID": hcp.ID.String(), + "internalClusterID": hcp.ServiceProviderProperties.ClusterServiceID, + "clientPrincipalName": clientPrincipalName, + "tenantID": csCluster.Azure().TenantID(), + "managedResourceGroup": csCluster.Azure().ManagedResourceGroupName(), + "hcpName": hcp.Name, + } + err = json.NewEncoder(writer).Encode(output) + if err != nil { + http.Error(writer, fmt.Sprintf("failed to encode output: %v", err), http.StatusInternalServerError) + return + } + }) +} + +func HCPDemoListLoadbalancers(dbClient database.DBClient, csClient ocm.ClusterServiceClientSpec, fpaCredentialRetriever fpa.FirstPartyApplicationTokenCredentialRetriever) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + // get the azure resource ID for this HCP + resourceID, err := utils.ResourceIDFromContext(request.Context()) + if err != nil { + http.Error(writer, fmt.Sprintf("failed to get resource ID: %v", err), http.StatusInternalServerError) + return + } + + // load the HCP from the cosmos DB + hcp, err := dbClient.HCPClusters(resourceID.SubscriptionID, resourceID.ResourceGroupName).Get(request.Context(), resourceID.Name) + if err != nil { + http.Error(writer, fmt.Sprintf("failed to get HCP: %v", err), http.StatusInternalServerError) + return + } + // get first party application token credentials for the HCP - tokenCredential, err := fpaCredentialRetriever.RetrieveCredential(csCluster.Azure().TenantID()) + tokenCredential, err := fpaCredentialRetriever.RetrieveCredential(hcp.Identity.TenantID) if err != nil { http.Error(writer, fmt.Sprintf("failed to get FPA token credentials: %v", err), http.StatusInternalServerError) return } // fetch all loadbalancers from the managedresource group using azuresdk - lbClient, err := armnetwork.NewLoadBalancersClient(csCluster.Azure().SubscriptionID(), tokenCredential, nil) + lbClient, err := armnetwork.NewLoadBalancersClient(hcp.ID.SubscriptionID, tokenCredential, nil) if err != nil { http.Error(writer, fmt.Sprintf("failed to create load balancer client: %v", err), http.StatusInternalServerError) return } - pager := lbClient.NewListPager(csCluster.Azure().ManagedResourceGroupName(), nil) + pager := lbClient.NewListPager(hcp.CustomerProperties.Platform.ManagedResourceGroup, nil) var loadBalancers []string for pager.More() { page, err := pager.NextPage(request.Context()) @@ -95,15 +128,7 @@ func HCPHelloWorld(dbClient database.DBClient, csClient ocm.ClusterServiceClient } // some output - output := map[string]any{ - "resourceID": resourceID.String(), - "internalClusterID": hcp.ServiceProviderProperties.ClusterServiceID, - "clientPrincipalName": clientPrincipalName, - "hcp": hcp, - "tenantID": csCluster.Azure().TenantID(), - "managedResourceGroup": csCluster.Azure().ManagedResourceGroupName(), - "loadBalancers": loadBalancers, - } + output := map[string]any{"loadBalancers": loadBalancers} err = json.NewEncoder(writer).Encode(output) if err != nil { http.Error(writer, fmt.Sprintf("failed to encode output: %v", err), http.StatusInternalServerError) diff --git a/test-integration/Makefile b/test-integration/Makefile index 15dd27aed1..c445c1d6cc 100644 --- a/test-integration/Makefile +++ b/test-integration/Makefile @@ -13,12 +13,12 @@ LDFLAGS = -ldflags "\ # Default target .DEFAULT_GOAL := test -# Build the binary +# Run the integration tests with the Cosmos DB emulator running in a container. test: - go test github.com/Azure/ARO-HCP/test-int -.PHONY: build + ./hack/start-cosmos-emulator.sh + ./hack/test-integration.sh +.PHONY: test clean: .PHONY: clean - diff --git a/test-integration/admin/admin_crud_test.go b/test-integration/admin/admin_crud_test.go new file mode 100644 index 0000000000..49cb7bc697 --- /dev/null +++ b/test-integration/admin/admin_crud_test.go @@ -0,0 +1,70 @@ +// Copyright 2025 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package admin + +import ( + "context" + "embed" + "io/fs" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/Azure/ARO-HCP/internal/api" + "github.com/Azure/ARO-HCP/test-integration/utils/databasemutationhelpers" + "github.com/Azure/ARO-HCP/test-integration/utils/integrationutils" +) + +//go:embed artifacts +var artifacts embed.FS + +func TestAdminCRUD(t *testing.T) { + integrationutils.SkipIfNotSimulationTesting(t) + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + allCRUDDirFS, err := fs.Sub(artifacts, "artifacts/AdminCRUD") + require.NoError(t, err) + + crudSuiteDirs := api.Must(fs.ReadDir(allCRUDDirFS, ".")) + for _, crudSuiteDirEntry := range crudSuiteDirs { + crudSuiteDir := api.Must(fs.Sub(allCRUDDirFS, crudSuiteDirEntry.Name())) + t.Run(crudSuiteDirEntry.Name(), func(t *testing.T) { + testAdminCRUDSuite( + ctx, + t, + crudSuiteDir) + }) + } +} + +func testAdminCRUDSuite(ctx context.Context, t *testing.T, crudSuiteDir fs.FS) { + testDirs := api.Must(fs.ReadDir(crudSuiteDir, ".")) + for _, testDirEntry := range testDirs { + testDir := api.Must(fs.Sub(crudSuiteDir, testDirEntry.Name())) + + currTest, err := databasemutationhelpers.NewResourceMutationTest( + ctx, + databasemutationhelpers.NothingCRUDSpecializer{}, + testDirEntry.Name(), + testDir, + ) + require.NoError(t, err) + + t.Run(testDirEntry.Name(), currTest.RunTest) + } +} diff --git a/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/00-load-initial-cosmos-state/01-cluster.json b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/00-load-initial-cosmos-state/01-cluster.json new file mode 100644 index 0000000000..e1c838e717 --- /dev/null +++ b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/00-load-initial-cosmos-state/01-cluster.json @@ -0,0 +1,25 @@ +{ + "id": "|subscriptions|0465bc32-c654-41b8-8d87-9815d7abe8f6|resourcegroups|some-resource-group|providers|microsoft.redhatopenshift|hcpopenshiftclusters|some-hcp-cluster", + "partitionKey": "0465bc32-c654-41b8-8d87-9815d7abe8f6", + "resourceType": "Microsoft.RedHatOpenShift/hcpOpenShiftClusters", + "properties": { + "internalId": "/api/aro_hcp/v1alpha1/clusters/fixed-value", + "identity": { + "type": "UserAssigned" + }, + "internalState": { + "internalAPI": { + "location": "eastus", + "serviceProviderProperties": { + "clusterServiceID": "" + } + } + }, + "provisioningState": "Succeeded", + "resourceId": "/subscriptions/0465bc32-c654-41b8-8d87-9815d7abe8f6/resourceGroups/some-resource-group/providers/Microsoft.RedHatOpenShift/hcpOpenShiftClusters/some-hcp-cluster", + "systemData": {}, + "tags": { + "foo": "bar" + } + } +} diff --git a/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/01-loadClusterService-initial-state/01-cluster.json b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/01-loadClusterService-initial-state/01-cluster.json new file mode 100644 index 0000000000..a5b209fd06 --- /dev/null +++ b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/01-loadClusterService-initial-state/01-cluster.json @@ -0,0 +1,90 @@ +{ + "api": { + "cidr_block_access": { + "allow": { + "mode": "allow_all" + } + }, + "listening": "external" + }, + "azure": { + "etcd_encryption": { + "data_encryption": { + "customer_managed": { + "encryption_type": "kms", + "kms": { + "active_key": { + "key_name": "encryptionKeyName", + "key_vault_name": "keyVaultName", + "key_version": "2024-12-01-preview" + } + } + }, + "key_management_mode": "customer_managed" + } + }, + "managed_resource_group_name": "fake-mrg-name", + "network_security_group_resource_id": "/subscriptions/6b690bec-0c16-4ecb-8f67-781caf40bba7/resourceGroups/bar/providers/Microsoft.Network/networkSecurityGroups/nsg", + "nodes_outbound_connectivity": { + "outbound_type": "load_balancer" + }, + "operators_authentication": { + "managed_identities": { + "control_plane_operators_managed_identities": {}, + "data_plane_operators_managed_identities": {}, + "managed_identities_data_plane_identity_url": "" + } + }, + "resource_group_name": "resourcegroupname", + "resource_name": "create-with-tags", + "subnet_resource_id": "/subscriptions/6b690bec-0c16-4ecb-8f67-781caf40bba7/resourceGroups/bar/providers/Microsoft.Network/virtualNetworks/vnet/subnets/subnet", + "subscription_id": "6b690bec-0c16-4ecb-8f67-781caf40bba7", + "tenant_id": "fake-tenant-id" + }, + "ccs": { + "enabled": true, + "kind": "CCS" + }, + "cloud_provider": { + "id": "azure", + "kind": "CloudProvider" + }, + "flavour": { + "id": "osd-4", + "kind": "Flavour" + }, + "href": "/api/aro_hcp/v1alpha1/clusters/fixed-value", + "hypershift": { + "enabled": true + }, + "id": "fixed-value", + "image_registry": { + "state": "disabled" + }, + "kind": "Cluster", + "name": "create-with-tags", + "network": { + "host_prefix": 23, + "machine_cidr": "10.0.0.0/16", + "pod_cidr": "10.128.0.0/14", + "service_cidr": "172.30.0.0/16", + "type": "OVNKubernetes" + }, + "node_drain_grace_period": { + "unit": "minutes", + "value": 0 + }, + "product": { + "id": "aro", + "kind": "Product" + }, + "region": { + "id": "fake-location", + "kind": "CloudRegion" + }, + "version": { + "channel_group": "stable", + "id": "", + "kind": "Version" + } +} \ No newline at end of file diff --git a/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/02-httpGet-helloworld/00-key.json b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/02-httpGet-helloworld/00-key.json new file mode 100644 index 0000000000..bb3efd4eb1 --- /dev/null +++ b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/02-httpGet-helloworld/00-key.json @@ -0,0 +1,3 @@ +{ + "resourceID": "/admin/v1/hcp/subscriptions/0465bc32-c654-41b8-8d87-9815d7abe8f6/resourceGroups/some-resource-group/providers/Microsoft.RedHatOpenShift/hcpOpenShiftClusters/some-hcp-cluster/helloworld" +} diff --git a/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/02-httpGet-helloworld/helloworld.json b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/02-httpGet-helloworld/helloworld.json new file mode 100644 index 0000000000..77f9c0e7d8 --- /dev/null +++ b/test-integration/admin/artifacts/AdminCRUD/HCP/hello-world/02-httpGet-helloworld/helloworld.json @@ -0,0 +1,8 @@ +{ + "resourceID": "/subscriptions/0465bc32-c654-41b8-8d87-9815d7abe8f6/resourceGroups/some-resource-group/providers/Microsoft.RedHatOpenShift/hcpOpenShiftClusters/some-hcp-cluster", + "internalClusterID": "/api/aro_hcp/v1alpha1/clusters/fixed-value", + "clientPrincipalName": "test-user@example.com", + "tenantID": "fake-tenant-id", + "managedResourceGroup": "fake-mrg-name", + "hcpName": "some-hcp-cluster" +} diff --git a/test-integration/frontend/cluster_mutation_test.go b/test-integration/frontend/cluster_mutation_test.go index b037f6ee66..98925885c0 100644 --- a/test-integration/frontend/cluster_mutation_test.go +++ b/test-integration/frontend/cluster_mutation_test.go @@ -36,11 +36,12 @@ func TestFrontendClusterMutation(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - frontend, testInfo, err := integrationutils.NewFrontendFromTestingEnv(ctx, t) + testInfo, err := integrationutils.NewIntegrationTestInfoFromEnv(ctx, t) require.NoError(t, err) defer testInfo.Cleanup(context.Background()) - go frontend.Run(ctx, ctx.Done()) + err = testInfo.Start(ctx) + require.NoError(t, err) subscriptionID := "0465bc32-c654-41b8-8d87-9815d7abe8f6" // TODO could read from JSON resourceGroupName := "some-resource-group" @@ -63,14 +64,14 @@ func TestFrontendClusterMutation(t *testing.T) { type clusterMutationTest struct { ctx context.Context testDir fs.FS - testInfo *integrationutils.FrontendIntegrationTestInfo + testInfo *integrationutils.IntegrationTestInfo subscriptionID string resourceGroupName string genericMutationTestInfo *integrationutils.GenericMutationTest } -func newClusterMutationTest(ctx context.Context, testDir fs.FS, testInfo *integrationutils.FrontendIntegrationTestInfo, subscriptionID, resourceGroupName string) (*clusterMutationTest, error) { +func newClusterMutationTest(ctx context.Context, testDir fs.FS, testInfo *integrationutils.IntegrationTestInfo, subscriptionID, resourceGroupName string) (*clusterMutationTest, error) { genericMutationTestInfo, err := integrationutils.ReadGenericMutationTest(testDir) if err != nil { return nil, err @@ -93,7 +94,7 @@ func (tt *clusterMutationTest) runTest(t *testing.T) { toCreate := &hcpsdk20240610preview.HcpOpenShiftCluster{} require.NoError(t, json.Unmarshal(tt.genericMutationTestInfo.CreateJSON, toCreate)) - clusterClient := tt.testInfo.Get20240610ClientFactory(tt.subscriptionID).NewHcpOpenShiftClustersClient() + clusterClient := integrationutils.Get20240610ClientFactory(tt.testInfo.FrontendURL, tt.subscriptionID).NewHcpOpenShiftClustersClient() _, mutationErr := clusterClient.BeginCreateOrUpdate(ctx, tt.resourceGroupName, *toCreate.Name, *toCreate, nil) if tt.genericMutationTestInfo.IsUpdateTest() || tt.genericMutationTestInfo.IsPatchTest() { diff --git a/test-integration/frontend/cluster_read_test.go b/test-integration/frontend/cluster_read_test.go index 45099ce1b9..06be08c83c 100644 --- a/test-integration/frontend/cluster_read_test.go +++ b/test-integration/frontend/cluster_read_test.go @@ -37,11 +37,12 @@ func TestFrontendClusterRead(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - frontend, testInfo, err := integrationutils.NewFrontendFromTestingEnv(ctx, t) + testInfo, err := integrationutils.NewIntegrationTestInfoFromEnv(ctx, t) require.NoError(t, err) defer testInfo.Cleanup(context.Background()) - go frontend.Run(ctx, ctx.Done()) + err = testInfo.Start(ctx) + require.NoError(t, err) subscriptionID := "0465bc32-c654-41b8-8d87-9815d7abe8f6" // TODO could read from JSON err = testInfo.CreateInitialCosmosContent(ctx, api.Must(fs.Sub(artifacts, "artifacts/ClusterReadOldData/initial-cosmos-state"))) @@ -54,7 +55,7 @@ func TestFrontendClusterRead(t *testing.T) { resourceGroup := "some-resource-group" hcpClusterName := "some-hcp-cluster" - hcpCluster, err := testInfo.Get20240610ClientFactory(subscriptionID).NewHcpOpenShiftClustersClient().Get(ctx, resourceGroup, hcpClusterName, nil) + hcpCluster, err := integrationutils.Get20240610ClientFactory(testInfo.FrontendURL, subscriptionID).NewHcpOpenShiftClustersClient().Get(ctx, resourceGroup, hcpClusterName, nil) require.NoError(t, err) actualJSON, err := json.MarshalIndent(hcpCluster, "", " ") @@ -66,7 +67,7 @@ func TestFrontendClusterRead(t *testing.T) { require.NoError(t, json.Unmarshal(api.Must(artifacts.ReadFile("artifacts/ClusterReadOldData/some-hcp-cluster--expected.json")), &expectedMap)) require.Equal(t, expectedMap, actualMap) - _, err = testInfo.Get20240610ClientFactory(subscriptionID).NewHcpOpenShiftClustersClient().BeginDelete(ctx, resourceGroup, hcpClusterName, nil) + _, err = integrationutils.Get20240610ClientFactory(testInfo.FrontendURL, subscriptionID).NewHcpOpenShiftClustersClient().BeginDelete(ctx, resourceGroup, hcpClusterName, nil) require.NoError(t, err) // the poller will never be done because we aren't running the backend. Just let it be. } diff --git a/test-integration/frontend/externalauth_mutation_test.go b/test-integration/frontend/externalauth_mutation_test.go index 3e112934c4..bd17c888f4 100644 --- a/test-integration/frontend/externalauth_mutation_test.go +++ b/test-integration/frontend/externalauth_mutation_test.go @@ -37,11 +37,12 @@ func TestFrontendExternalAuthMutation(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - frontend, testInfo, err := integrationutils.NewFrontendFromTestingEnv(ctx, t) + testInfo, err := integrationutils.NewIntegrationTestInfoFromEnv(ctx, t) require.NoError(t, err) defer testInfo.Cleanup(context.Background()) - go frontend.Run(ctx, ctx.Done()) + err = testInfo.Start(ctx) + require.NoError(t, err) subscriptionID := "0465bc32-c654-41b8-8d87-9815d7abe8f6" // TODO could read from JSON resourceGroupName := "some-resource-group" @@ -69,14 +70,14 @@ func TestFrontendExternalAuthMutation(t *testing.T) { type externalAuthMutationTest struct { ctx context.Context testDir fs.FS - testInfo *integrationutils.FrontendIntegrationTestInfo + testInfo *integrationutils.IntegrationTestInfo subscriptionID string resourceGroupName string genericMutationTestInfo *integrationutils.GenericMutationTest } -func newExternalAuthMutationTest(ctx context.Context, testDir fs.FS, testInfo *integrationutils.FrontendIntegrationTestInfo, subscriptionID, resourceGroupName string) (*externalAuthMutationTest, error) { +func newExternalAuthMutationTest(ctx context.Context, testDir fs.FS, testInfo *integrationutils.IntegrationTestInfo, subscriptionID, resourceGroupName string) (*externalAuthMutationTest, error) { genericMutationTestInfo, err := integrationutils.ReadGenericMutationTest(testDir) if err != nil { return nil, err @@ -101,7 +102,7 @@ func (tt *externalAuthMutationTest) runTest(t *testing.T) { hcpClusterName := strings.Split(t.Name(), "/")[1] toCreate := &hcpsdk20240610preview.ExternalAuth{} require.NoError(t, json.Unmarshal(tt.genericMutationTestInfo.CreateJSON, toCreate)) - externalAuthClient := tt.testInfo.Get20240610ClientFactory(tt.subscriptionID).NewExternalAuthsClient() + externalAuthClient := integrationutils.Get20240610ClientFactory(tt.testInfo.FrontendURL, tt.subscriptionID).NewExternalAuthsClient() _, mutationErr := externalAuthClient.BeginCreateOrUpdate(ctx, tt.resourceGroupName, hcpClusterName, *toCreate.Name, *toCreate, nil) if tt.genericMutationTestInfo.IsUpdateTest() || tt.genericMutationTestInfo.IsPatchTest() { diff --git a/test-integration/frontend/launch_test.go b/test-integration/frontend/launch_test.go index 90393ebce8..32e44d0893 100644 --- a/test-integration/frontend/launch_test.go +++ b/test-integration/frontend/launch_test.go @@ -31,11 +31,12 @@ func TestLaunch(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - frontend, testInfo, err := integrationutils.NewFrontendFromTestingEnv(ctx, t) + testInfo, err := integrationutils.NewIntegrationTestInfoFromEnv(ctx, t) require.NoError(t, err) defer testInfo.Cleanup(context.Background()) - go frontend.Run(ctx, ctx.Done()) + err = testInfo.Start(ctx) + require.NoError(t, err) // run for a little bit and don't crash time.Sleep(5 * time.Second) diff --git a/test-integration/frontend/nodepool_mutation_test.go b/test-integration/frontend/nodepool_mutation_test.go index 98f864819c..19addb276c 100644 --- a/test-integration/frontend/nodepool_mutation_test.go +++ b/test-integration/frontend/nodepool_mutation_test.go @@ -37,11 +37,12 @@ func TestFrontendNodePoolMutation(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - frontend, testInfo, err := integrationutils.NewFrontendFromTestingEnv(ctx, t) + testInfo, err := integrationutils.NewIntegrationTestInfoFromEnv(ctx, t) require.NoError(t, err) defer testInfo.Cleanup(context.Background()) - go frontend.Run(ctx, ctx.Done()) + err = testInfo.Start(ctx) + require.NoError(t, err) subscriptionID := "0465bc32-c654-41b8-8d87-9815d7abe8f6" // TODO could read from JSON resourceGroupName := "some-resource-group" @@ -69,14 +70,14 @@ func TestFrontendNodePoolMutation(t *testing.T) { type nodePoolMutationTest struct { ctx context.Context testDir fs.FS - testInfo *integrationutils.FrontendIntegrationTestInfo + testInfo *integrationutils.IntegrationTestInfo subscriptionID string resourceGroupName string genericMutationTestInfo *integrationutils.GenericMutationTest } -func newNodePoolMutationTest(ctx context.Context, testDir fs.FS, testInfo *integrationutils.FrontendIntegrationTestInfo, subscriptionID, resourceGroupName string) (*nodePoolMutationTest, error) { +func newNodePoolMutationTest(ctx context.Context, testDir fs.FS, testInfo *integrationutils.IntegrationTestInfo, subscriptionID, resourceGroupName string) (*nodePoolMutationTest, error) { genericMutationTestInfo, err := integrationutils.ReadGenericMutationTest(testDir) if err != nil { return nil, err @@ -101,7 +102,7 @@ func (tt *nodePoolMutationTest) runTest(t *testing.T) { hcpClusterName := strings.Split(t.Name(), "/")[1] toCreate := &hcpsdk20240610preview.NodePool{} require.NoError(t, json.Unmarshal(tt.genericMutationTestInfo.CreateJSON, toCreate)) - nodePoolClient := tt.testInfo.Get20240610ClientFactory(tt.subscriptionID).NewNodePoolsClient() + nodePoolClient := integrationutils.Get20240610ClientFactory(tt.testInfo.FrontendURL, tt.subscriptionID).NewNodePoolsClient() _, mutationErr := nodePoolClient.BeginCreateOrUpdate(ctx, tt.resourceGroupName, hcpClusterName, *toCreate.Name, *toCreate, nil) if tt.genericMutationTestInfo.IsUpdateTest() || tt.genericMutationTestInfo.IsPatchTest() { diff --git a/test-integration/go.mod b/test-integration/go.mod index 59453888be..b152a23191 100644 --- a/test-integration/go.mod +++ b/test-integration/go.mod @@ -4,6 +4,7 @@ go 1.24.11 require ( dario.cat/mergo v1.0.1 + github.com/Azure/ARO-HCP/admin/server v0.0.0-00010101000000-000000000000 github.com/Azure/ARO-HCP/backend v0.0.0-00010101000000-000000000000 github.com/Azure/ARO-HCP/frontend v0.0.0-20251215171147-ff127faefe61 github.com/Azure/ARO-HCP/internal v0.0.0-00010101000000-000000000000 @@ -21,6 +22,8 @@ require ( ) require ( + github.com/Azure/azure-kusto-go v0.16.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6 v6.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/tracing/azotel v0.4.0 // indirect github.com/Azure/retry v0.0.0-20250221010952-92c9290cea0f // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect @@ -41,7 +44,9 @@ require ( github.com/openshift-online/ocm-api-model/model v0.0.435 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/samber/lo v1.51.0 // indirect github.com/sanity-io/litter v1.5.8 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/vmihailenco/msgpack/v4 v4.3.13 // indirect @@ -122,6 +127,8 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect ) +replace github.com/Azure/ARO-HCP/admin/server => ../admin/server + replace github.com/Azure/ARO-HCP/internal => ../internal replace github.com/Azure/ARO-HCP/backend => ../backend diff --git a/test-integration/go.sum b/test-integration/go.sum index ad8be3d434..63d75346c0 100644 --- a/test-integration/go.sum +++ b/test-integration/go.sum @@ -2,6 +2,8 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/ARO-HCP/frontend v0.0.0-20251215171147-ff127faefe61 h1:uVWJr/c7ctjVoi/TzBzCOYxPmzFK46YVtLzgMPeVvpA= github.com/Azure/ARO-HCP/frontend v0.0.0-20251215171147-ff127faefe61/go.mod h1:b0G/0atqxIXTr3p8xOHeWzrR4U4CidyYil/H0pKypaQ= +github.com/Azure/azure-kusto-go v0.16.1 h1:vCBWcQghmC1qIErUUgVNWHxGhZVStu1U/hki6iBA14k= +github.com/Azure/azure-kusto-go v0.16.1/go.mod h1:9F2zvXH8B6eWzgI1S4k1ZXAIufnBZ1bv1cW1kB1n3D0= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= @@ -14,6 +16,10 @@ github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v1.4.1 h1:ToPLhnXvatKVN4Zkcx github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos v1.4.1/go.mod h1:Krtog/7tz27z75TwM5cIS8bxEH4dcBUezcq+kGVeZEo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6 v6.2.0 h1:HYGD75g0bQ3VO/Omedm54v4LrD3B1cGImuRF3AJ5wLo= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6 v6.2.0/go.mod h1:ulHyBFJOI0ONiRL4vcJTmS7rx18jQQlEPmAgo80cRdM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/tracing/azotel v0.4.0 h1:RTTsXUJWn0jumeX62Mb153wYXykqnrzYBYDeHp0kiuk= @@ -175,8 +181,12 @@ github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI= +github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= github.com/sanity-io/litter v1.5.8 h1:uM/2lKrWdGbRXDrIq08Lh9XtVYoeGtcQxk9rtQ7+rYg= github.com/sanity-io/litter v1.5.8/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= @@ -189,6 +199,8 @@ github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRci github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= diff --git a/test-integration/hack/_emulator_handling.sh b/test-integration/hack/_emulator_handling.sh new file mode 100644 index 0000000000..9a4e5db628 --- /dev/null +++ b/test-integration/hack/_emulator_handling.sh @@ -0,0 +1,120 @@ +#!/bin/bash +# Shared functions for Cosmos DB emulator management + +# Constants +DEFAULT_COSMOS_ENDPOINT="https://localhost:8081" + +# Choose container runtime (prefer podman, fallback to docker) +get_container_runtime() { + if command -v podman >/dev/null 2>&1; then + echo "podman" + elif command -v docker >/dev/null 2>&1; then + echo "docker" + else + echo "Error: Neither podman nor docker found. Please install one of them." >&2 + exit 1 + fi +} +CONTAINER_RUNTIME=$(get_container_runtime) + +get_running_emulator_container_name() { + ${CONTAINER_RUNTIME} ps --filter "name=local-cosmos-emulator-*" --format "{{.Names}}" | head -n 1 +} + +# Stop and remove emulator containers +stop_emulator() { + local save_logs=${1:-false} + + echo "Stopping and removing existing container(s)..." + + if [ "${save_logs}" = "true" ]; then + # Save logs before stopping + local tmp_data_dir="${ARTIFACT_DIR:-/tmp}" + mkdir -p "$tmp_data_dir" + + local containers + containers=$(${CONTAINER_RUNTIME} ps -aq --filter "name=local-cosmos-emulator-*" 2>/dev/null || true) + + for container in $containers; do + local container_name + container_name=$(${CONTAINER_RUNTIME} inspect --format='{{.Name}}' "$container" | sed 's|^/||') + if ${CONTAINER_RUNTIME} ps -q --filter "id=$container" | grep -q .; then + echo "Saving logs for container: $container_name" + ${CONTAINER_RUNTIME} logs "$container" > "${tmp_data_dir}/${container_name}.log" 2>&1 || true + fi + done + fi + + ${CONTAINER_RUNTIME} ps -q --filter "name=local-cosmos-emulator-*" | xargs -r "${CONTAINER_RUNTIME}" stop + ${CONTAINER_RUNTIME} ps -aq --filter "name=local-cosmos-emulator-*" | xargs -r "${CONTAINER_RUNTIME}" rm +} + +# Start the emulator container and wait until ready (handles OS differences internally) +start_emulator() { + local container_name=$1 + local partition_count=$2 + local os_type + local container_image + local ready_log_message + + os_type=$(uname -s) + container_image="mcr.microsoft.com/cosmosdb/linux/azure-cosmos-emulator:latest" + ready_log_message="Started $((partition_count+1))/$((partition_count+1)) partitions" + + if [ "${os_type}" = "Darwin" ]; then + # on OSX we need to use the vnext-preview image because the regular one does not support ARM64 + # and also fails when running in qemu emulation mode under podman. + # vnext-preview docs: https://learn.microsoft.com/en-gb/azure/cosmos-db/emulator-linux#docker-commands + container_image="mcr.microsoft.com/cosmosdb/linux/azure-cosmos-emulator:vnext-preview" + # the vnext-preview image logs a different message when ready + ready_log_message="PostgreSQL and pgcosmos extension are ready" + fi + + echo "Starting Cosmos DB emulator with container name: ${container_name}" + ${CONTAINER_RUNTIME} run \ + --publish 8081:8081 \ + --publish 10250-10255:10250-10255 \ + -e AZURE_COSMOS_EMULATOR_IP_ADDRESS_OVERRIDE=127.0.0.1 \ + -e AZURE_COSMOS_EMULATOR_PARTITION_COUNT="${partition_count}" \ + -e PROTOCOL=https \ + --name "${container_name}" \ + --detach \ + "${container_image}" + + # Wait for emulator to be ready by checking logs + echo "Waiting for Cosmos DB emulator to be ready..." + for i in {1..60}; do + logs_output=$(${CONTAINER_RUNTIME} logs "${container_name}" 2>&1) + if echo "${logs_output}" | grep -q "${ready_log_message}"; then + echo "Cosmos DB emulator is ready!" + break + fi + if [ "$i" -eq 60 ]; then + echo "Timeout waiting for Cosmos DB emulator to be ready" + ${CONTAINER_RUNTIME} logs "${container_name}" + exit 1 + fi + echo "Attempt $i/60: Waiting for emulator to start..." + sleep 5 + done + + # Wait for HTTPS endpoint to be available + echo "Waiting for HTTPS endpoint to be available..." + for i in {1..30}; do + if curl --insecure -s "${DEFAULT_COSMOS_ENDPOINT}" >/dev/null 2>&1; then + echo "HTTPS endpoint is ready!" + break + fi + if [ "$i" -eq 30 ]; then + echo "Error: Timeout waiting for HTTPS endpoint to be available" + ${CONTAINER_RUNTIME} logs "${container_name}" + return 1 + fi + echo "Attempt $i/30: Waiting for HTTPS endpoint..." + sleep 2 + done + + echo "Cosmos DB emulator started successfully!" + echo "Container name: ${CONTAINER_NAME}" + echo "Endpoint: ${DEFAULT_COSMOS_ENDPOINT}" +} diff --git a/test-integration/hack/start-cosmos-emulator.sh b/test-integration/hack/start-cosmos-emulator.sh index bffff642c5..9082004ac2 100755 --- a/test-integration/hack/start-cosmos-emulator.sh +++ b/test-integration/hack/start-cosmos-emulator.sh @@ -2,80 +2,31 @@ set -o errexit set -o nounset set -o pipefail +set -x -set -x # Turn on command tracing +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "${SCRIPT_DIR}/_emulator_handling.sh" -# these are the default values of the emulator container. -DEFAULT_COSMOS_ENDPOINT="https://localhost:8081" +# Control whether to restart an existing emulator +RESTART_EXISTING_EMULATOR="${RESTART_EXISTING_EMULATOR:-false}" -echo "Starting Cosmos DB emulator..." +# Number of partitions to use for the emulator +# Increase if a lot of tests run in parallel and start failing with 503 errors +PARTITION_COUNT="${PARTITION_COUNT:-50}" -# Generate random container name -CONTAINER_NAME="local-cosmos-emulator-$(shuf -i 1000-9999 -n 1)" - -# Choose container runtime (prefer podman, fallback to docker) -CONTAINER_RUNTIME="" -if command -v podman >/dev/null 2>&1; then - CONTAINER_RUNTIME="podman" -elif command -v docker >/dev/null 2>&1; then - CONTAINER_RUNTIME="docker" -else - echo "Error: Neither podman nor docker found. Please install one of them." - exit 1 -fi - -echo "Using container runtime: ${CONTAINER_RUNTIME}" - -# Stop any existing emulator first -if ${CONTAINER_RUNTIME} ps -q --filter "name=local-cosmos-emulator-*" | grep -q .; then - echo "Found existing Cosmos DB emulator containers, stopping them..." - ${CONTAINER_RUNTIME} ps -q --filter "name=local-cosmos-emulator-*" | xargs -r ${CONTAINER_RUNTIME} stop - ${CONTAINER_RUNTIME} ps -aq --filter "name=local-cosmos-emulator-*" | xargs -r ${CONTAINER_RUNTIME} rm -fi - -echo "Starting Cosmos DB emulator with container name: ${CONTAINER_NAME}" -${CONTAINER_RUNTIME} run \ - --publish 8081:8081 \ - --publish 10250-10255:10250-10255 \ - -e AZURE_COSMOS_EMULATOR_IP_ADDRESS_OVERRIDE=127.0.0.1 \ - --name "${CONTAINER_NAME}" \ - --detach \ - mcr.microsoft.com/cosmosdb/linux/azure-cosmos-emulator:latest - -# Wait for emulator to be ready by checking logs -echo "Waiting for Cosmos DB emulator to be ready..." -for i in {1..60}; do - if ${CONTAINER_RUNTIME} logs "${CONTAINER_NAME}" 2>&1 | grep -q "Started 11/11 partitions"; then - echo "Cosmos DB emulator is ready!" - break - fi - if [ "$i" -eq 60 ]; then - echo "Timeout waiting for Cosmos DB emulator to be ready" - exit 1 +RUNNING_CONTAINER=$(get_running_emulator_container_name) +if [ -n "${RUNNING_CONTAINER}" ]; then + if [ "${RESTART_EXISTING_EMULATOR}" != "true" ]; then + echo "Cosmos DB emulator is already running." + echo "Container name: ${RUNNING_CONTAINER}" + echo "Endpoint: ${DEFAULT_COSMOS_ENDPOINT}" + exit 0 fi - echo "Attempt $i/60: Waiting for emulator to start all partitions..." - sleep 5 -done -netstat -anlp - -# Wait for HTTPS endpoint to be available -echo "Waiting for HTTPS endpoint to be available..." -for i in {1..30}; do - if curl --insecure -s "${DEFAULT_COSMOS_ENDPOINT}/_explorer/emulator.pem" >/dev/null 2>&1; then - echo "HTTPS endpoint is ready!" - break - fi - if [ "$i" -eq 30 ]; then - echo "Timeout waiting for HTTPS endpoint to be available, will continue and try anyway." - break - fi - echo "Attempt $i/30: Waiting for HTTPS endpoint..." - sleep 2 -done + echo "Found existing Cosmos DB emulator container: ${RUNNING_CONTAINER}" + stop_emulator + echo "Will start a new emulator container..." +fi -echo "✅ Cosmos DB emulator started successfully!" -echo "Container name: ${CONTAINER_NAME}" -echo "Endpoint: ${DEFAULT_COSMOS_ENDPOINT}" -echo "" -echo "To stop all Cosmos emulators, run: ./frontend/hack/stop-all-cosmos-emulators.sh" \ No newline at end of file +CONTAINER_NAME="local-cosmos-emulator-$(shuf -i 1000-9999 -n 1)" +start_emulator "${CONTAINER_NAME}" "${PARTITION_COUNT}" \ No newline at end of file diff --git a/test-integration/hack/stop-cosmos-emulator.sh b/test-integration/hack/stop-cosmos-emulator.sh index 6046d16622..176b47a2df 100755 --- a/test-integration/hack/stop-cosmos-emulator.sh +++ b/test-integration/hack/stop-cosmos-emulator.sh @@ -2,19 +2,15 @@ set -euo pipefail -echo "Stopping all Cosmos DB emulator containers..." +# Source shared emulator handling functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# shellcheck source=test-integration/hack/_emulator_handling.sh +source "${SCRIPT_DIR}/_emulator_handling.sh" -# Choose container runtime (prefer podman, fallback to docker) -CONTAINER_RUNTIME="" -if command -v podman >/dev/null 2>&1; then - CONTAINER_RUNTIME="podman" -elif command -v docker >/dev/null 2>&1; then - CONTAINER_RUNTIME="docker" -else - echo "Error: Neither podman nor docker found. Please install one of them." - exit 1 -fi +echo "Stopping all Cosmos DB emulator containers..." +# Get container runtime +CONTAINER_RUNTIME=$(get_container_runtime) echo "Using container runtime: ${CONTAINER_RUNTIME}" # Find all cosmos emulator containers @@ -28,24 +24,8 @@ fi echo "Found Cosmos DB emulator containers:" ${CONTAINER_RUNTIME} ps -a --filter "name=local-cosmos-emulator-*" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" -# Save logs from running containers before stopping them -TMP_DATA_DIR="${ARTIFACT_DIR:-/tmp}" -mkdir -p "$TMP_DATA_DIR" - -for container in $CONTAINERS; do - container_name=$(${CONTAINER_RUNTIME} inspect --format='{{.Name}}' "$container" | sed 's|^/||') - if ${CONTAINER_RUNTIME} ps -q --filter "id=$container" | grep -q .; then - echo "Saving logs for container: $container_name" - ${CONTAINER_RUNTIME} logs "$container" > "${TMP_DATA_DIR}/${container_name}.log" 2>&1 || true - fi -done - -# Stop and remove all containers -echo "Stopping containers..." -echo "$CONTAINERS" | xargs -r ${CONTAINER_RUNTIME} stop - -echo "Removing containers..." -echo "$CONTAINERS" | xargs -r ${CONTAINER_RUNTIME} rm -v +# Stop and remove all containers with log saving +stop_emulator "${CONTAINER_RUNTIME}" "true" echo "✅ All Cosmos DB emulator containers stopped and removed." -echo "Container logs saved to: $TMP_DATA_DIR" \ No newline at end of file +echo "Container logs saved to: ${ARTIFACT_DIR:-/tmp}" \ No newline at end of file diff --git a/test-integration/utils/databasemutationhelpers/http_test_accessor.go b/test-integration/utils/databasemutationhelpers/http_test_accessor.go new file mode 100644 index 0000000000..73932eedac --- /dev/null +++ b/test-integration/utils/databasemutationhelpers/http_test_accessor.go @@ -0,0 +1,113 @@ +// Copyright 2026 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package databasemutationhelpers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/Azure/ARO-HCP/internal/utils" +) + +type HTTPTestAccessor interface { + Get(ctx context.Context, resourceIDString string) (any, error) + List(ctx context.Context, parentResourceIDString string) ([]any, error) + CreateOrUpdate(ctx context.Context, resourceIDString string, content []byte) error + Patch(ctx context.Context, resourceIDString string, content []byte) error + Delete(ctx context.Context, resourceIDString string) error +} + +type httpHTTPTestAccessor struct { + url string + headers map[string]string +} + +func newHTTPTestAccessor(url string, headers map[string]string) *httpHTTPTestAccessor { + return &httpHTTPTestAccessor{ + url: url, + headers: headers, + } +} + +var _ HTTPTestAccessor = &httpHTTPTestAccessor{} + +func (a *httpHTTPTestAccessor) Get(ctx context.Context, resourceIDString string) (any, error) { + return a.doRequest(ctx, http.MethodGet, resourceIDString, nil) +} + +func (a *httpHTTPTestAccessor) List(ctx context.Context, parentResourceIDString string) ([]any, error) { + return nil, utils.TrackError(fmt.Errorf("not implemented yet")) +} + +func (a *httpHTTPTestAccessor) CreateOrUpdate(ctx context.Context, resourceIDString string, content []byte) error { + _, err := a.doRequest(ctx, http.MethodPut, resourceIDString, content) + return err +} + +func (a *httpHTTPTestAccessor) Patch(ctx context.Context, resourceIDString string, content []byte) error { + _, err := a.doRequest(ctx, http.MethodPatch, resourceIDString, content) + return err +} + +func (a *httpHTTPTestAccessor) Delete(ctx context.Context, resourceIDString string) error { + _, err := a.doRequest(ctx, http.MethodDelete, resourceIDString, nil) + return err +} + +func (a *httpHTTPTestAccessor) doRequest(ctx context.Context, method, path string, body []byte) (any, error) { + var reqBody io.Reader + if len(body) > 0 { + reqBody = bytes.NewReader(body) + } + + req, err := http.NewRequestWithContext(ctx, method, a.url+path, reqBody) + if err != nil { + return nil, utils.TrackError(err) + } + + for key, value := range a.headers { + req.Header.Set(key, value) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, utils.TrackError(err) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, utils.TrackError(fmt.Errorf("HTTP %d", resp.StatusCode)) + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, utils.TrackError(err) + } + + if len(bodyBytes) == 0 { + return nil, nil + } + + var result map[string]any + if err := json.Unmarshal(bodyBytes, &result); err != nil { + return nil, utils.TrackError(err) + } + + return result, nil +} diff --git a/test-integration/utils/databasemutationhelpers/per_resource_http.go b/test-integration/utils/databasemutationhelpers/per_resource_http.go index de419d8fae..72ac04c01c 100644 --- a/test-integration/utils/databasemutationhelpers/per_resource_http.go +++ b/test-integration/utils/databasemutationhelpers/per_resource_http.go @@ -32,14 +32,6 @@ import ( hcpsdk20240610preview "github.com/Azure/ARO-HCP/test/sdk/v20240610preview/resourcemanager/redhatopenshifthcp/armredhatopenshifthcp" ) -type HTTPTestAccessor interface { - Get(ctx context.Context, resourceIDString string) (any, error) - List(ctx context.Context, parentResourceIDString string) ([]any, error) - CreateOrUpdate(ctx context.Context, resourceIDString string, content []byte) error - Patch(ctx context.Context, resourceIDString string, content []byte) error - Delete(ctx context.Context, resourceIDString string) error -} - type frontendHTTPTestAccessor struct { frontEndURL string frontendClient *hcpsdk20240610preview.ClientFactory diff --git a/test-integration/utils/databasemutationhelpers/resource_crud_test_util.go b/test-integration/utils/databasemutationhelpers/resource_crud_test_util.go index 453d711e90..1504d0be63 100644 --- a/test-integration/utils/databasemutationhelpers/resource_crud_test_util.go +++ b/test-integration/utils/databasemutationhelpers/resource_crud_test_util.go @@ -19,25 +19,21 @@ import ( "encoding/json" "fmt" "io/fs" - "net/http" "sort" "strconv" "strings" "testing" - "time" "github.com/neilotoole/slogt" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/wait" - + azcorearm "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos" "github.com/Azure/ARO-HCP/internal/api" "github.com/Azure/ARO-HCP/internal/database" "github.com/Azure/ARO-HCP/internal/utils" "github.com/Azure/ARO-HCP/test-integration/utils/integrationutils" - hcpsdk20240610preview "github.com/Azure/ARO-HCP/test/sdk/v20240610preview/resourcemanager/redhatopenshifthcp/armredhatopenshifthcp" ) type ResourceMutationTest struct { @@ -104,29 +100,19 @@ func (tt *ResourceMutationTest) RunTest(t *testing.T) { defer cancel() ctx = utils.ContextWithLogger(ctx, slogt.New(t, slogt.JSON())) - frontend, testInfo, err := integrationutils.NewFrontendFromTestingEnv(ctx, t) + testInfo, err := integrationutils.NewIntegrationTestInfoFromEnv(ctx, t) require.NoError(t, err) cleanupCtx := context.Background() cleanupCtx = utils.ContextWithLogger(cleanupCtx, slogt.New(t, slogt.JSON())) defer testInfo.Cleanup(cleanupCtx) - go frontend.Run(ctx, ctx.Done()) - - // wait for migration to complete to eliminate races with our test's second call migrateCosmos and to ensure the server is ready for testing - err = wait.PollUntilContextCancel(ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { - _, err := http.Get(testInfo.FrontendURL) - if err != nil { - t.Log(err) - return false, nil - } - return true, nil - }) + err = testInfo.Start(ctx) require.NoError(t, err) stepInput := StepInput{ CosmosContainer: testInfo.CosmosResourcesContainer(), DBClient: testInfo.DBClient, - FrontendClient: testInfo.Get20240610ClientFactory, FrontendURL: testInfo.FrontendURL, + AdminURL: testInfo.AdminURL, ClusterServiceMockInfo: testInfo.ClusterServiceMock, } for _, step := range tt.steps { @@ -297,10 +283,20 @@ func readRawBytesInDir(dir fs.FS) ([][]byte, error) { } type StepInput struct { - CosmosContainer *azcosmos.ContainerClient - DBClient database.DBClient - FrontendClient func(subscriptionID string) *hcpsdk20240610preview.ClientFactory - FrontendURL string - + CosmosContainer *azcosmos.ContainerClient + DBClient database.DBClient ClusterServiceMockInfo *integrationutils.ClusterServiceMock + FrontendURL string + AdminURL string +} + +func (s StepInput) HTTPTestAccessor(key ResourceKey) HTTPTestAccessor { + if strings.HasPrefix(key.ResourceID, "/admin/") { + return newHTTPTestAccessor(s.AdminURL, map[string]string{ + "X-Ms-Client-Principal-Name": "test-user@example.com", + "Content-Type": "application/json", + }) + } + subscriptionID := api.Must(azcorearm.ParseResourceID(key.ResourceID)).SubscriptionID + return newFrontendHTTPTestAccessor(s.FrontendURL, integrationutils.Get20240610ClientFactory(s.FrontendURL, subscriptionID)) } diff --git a/test-integration/utils/databasemutationhelpers/step_completeoperation.go b/test-integration/utils/databasemutationhelpers/step_completeoperation.go index 6659cde21a..c379642bde 100644 --- a/test-integration/utils/databasemutationhelpers/step_completeoperation.go +++ b/test-integration/utils/databasemutationhelpers/step_completeoperation.go @@ -26,7 +26,7 @@ import ( type completeOperationStep struct { stepID StepID - key FrontendResourceKey + key ResourceKey } func newCompleteOperationStep(stepID StepID, stepDir fs.FS) (*completeOperationStep, error) { @@ -34,7 +34,7 @@ func newCompleteOperationStep(stepID StepID, stepDir fs.FS) (*completeOperationS if err != nil { return nil, fmt.Errorf("failed to read key.json: %w", err) } - var key FrontendResourceKey + var key ResourceKey if err := json.Unmarshal(keyBytes, &key); err != nil { return nil, fmt.Errorf("failed to unmarshal key.json: %w", err) } diff --git a/test-integration/utils/databasemutationhelpers/step_httpcreate.go b/test-integration/utils/databasemutationhelpers/step_httpcreate.go index 982736b17d..27457e85a1 100644 --- a/test-integration/utils/databasemutationhelpers/step_httpcreate.go +++ b/test-integration/utils/databasemutationhelpers/step_httpcreate.go @@ -24,15 +24,11 @@ import ( "testing" "github.com/stretchr/testify/require" - - azcorearm "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - - "github.com/Azure/ARO-HCP/internal/api" ) type httpCreateStep struct { stepID StepID - key FrontendResourceKey + key ResourceKey resources [][]byte expectedError string @@ -43,7 +39,7 @@ func newHTTPCreateStep(stepID StepID, stepDir fs.FS) (*httpCreateStep, error) { if err != nil { return nil, fmt.Errorf("failed to read key.json: %w", err) } - var key FrontendResourceKey + var key ResourceKey if err := json.Unmarshal(keyBytes, &key); err != nil { return nil, fmt.Errorf("failed to unmarshal key.json: %w", err) } @@ -74,8 +70,7 @@ func (l *httpCreateStep) StepID() StepID { } func (l *httpCreateStep) RunTest(ctx context.Context, t *testing.T, stepInput StepInput) { - subscriptionID := api.Must(azcorearm.ParseResourceID(l.key.ResourceID)).SubscriptionID - accessor := newFrontendHTTPTestAccessor(stepInput.FrontendURL, stepInput.FrontendClient(subscriptionID)) + accessor := stepInput.HTTPTestAccessor(l.key) for _, resource := range l.resources { err := accessor.CreateOrUpdate(ctx, l.key.ResourceID, resource) diff --git a/test-integration/utils/databasemutationhelpers/step_httpget.go b/test-integration/utils/databasemutationhelpers/step_httpget.go index 4ec3c196e7..b1898712e0 100644 --- a/test-integration/utils/databasemutationhelpers/step_httpget.go +++ b/test-integration/utils/databasemutationhelpers/step_httpget.go @@ -24,19 +24,15 @@ import ( "testing" "github.com/stretchr/testify/require" - - azcorearm "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - - "github.com/Azure/ARO-HCP/internal/api" ) -type FrontendResourceKey struct { +type ResourceKey struct { ResourceID string `json:"resourceId"` } type httpGetStep struct { stepID StepID - key FrontendResourceKey + key ResourceKey expectedResource map[string]any expectedError string @@ -47,7 +43,7 @@ func newHTTPGetStep(stepID StepID, stepDir fs.FS) (*httpGetStep, error) { if err != nil { return nil, fmt.Errorf("failed to read key.json: %w", err) } - var key FrontendResourceKey + var key ResourceKey if err := json.Unmarshal(keyBytes, &key); err != nil { return nil, fmt.Errorf("failed to unmarshal key.json: %w", err) } @@ -90,9 +86,7 @@ func (l *httpGetStep) StepID() StepID { } func (l *httpGetStep) RunTest(ctx context.Context, t *testing.T, stepInput StepInput) { - resourceID := api.Must(azcorearm.ParseResourceID(l.key.ResourceID)) - subscriptionID := resourceID.SubscriptionID - accessor := newFrontendHTTPTestAccessor(stepInput.FrontendURL, stepInput.FrontendClient(subscriptionID)) + accessor := stepInput.HTTPTestAccessor(l.key) actual, err := accessor.Get(ctx, l.key.ResourceID) switch { case len(l.expectedError) > 0: diff --git a/test-integration/utils/databasemutationhelpers/step_httplist.go b/test-integration/utils/databasemutationhelpers/step_httplist.go index e3454c7949..7ff8461d7b 100644 --- a/test-integration/utils/databasemutationhelpers/step_httplist.go +++ b/test-integration/utils/databasemutationhelpers/step_httplist.go @@ -22,15 +22,11 @@ import ( "testing" "github.com/stretchr/testify/require" - - azcorearm "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - - "github.com/Azure/ARO-HCP/internal/api" ) type httpListStep struct { stepID StepID - key FrontendResourceKey + key ResourceKey expectedResources []*map[string]any } @@ -40,7 +36,7 @@ func newHTTPListStep(stepID StepID, stepDir fs.FS) (*httpListStep, error) { if err != nil { return nil, fmt.Errorf("failed to read key.json: %w", err) } - var key FrontendResourceKey + var key ResourceKey if err := json.Unmarshal(keyBytes, &key); err != nil { return nil, fmt.Errorf("failed to unmarshal key.json: %w", err) } @@ -64,9 +60,7 @@ func (l *httpListStep) StepID() StepID { } func (l *httpListStep) RunTest(ctx context.Context, t *testing.T, stepInput StepInput) { - resourceID := api.Must(azcorearm.ParseResourceID(l.key.ResourceID)) - subscriptionID := resourceID.SubscriptionID - accessor := newFrontendHTTPTestAccessor(stepInput.FrontendURL, stepInput.FrontendClient(subscriptionID)) + accessor := stepInput.HTTPTestAccessor(l.key) actualResources, err := accessor.List(ctx, l.key.ResourceID) require.NoError(t, err) diff --git a/test-integration/utils/integrationutils/frontend_testinfo.go b/test-integration/utils/integrationutils/frontend_testinfo.go index bd640b4c96..c4c6c8f81a 100644 --- a/test-integration/utils/integrationutils/frontend_testinfo.go +++ b/test-integration/utils/integrationutils/frontend_testinfo.go @@ -25,7 +25,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/data/azcosmos" - "github.com/Azure/ARO-HCP/frontend/pkg/frontend" "github.com/Azure/ARO-HCP/internal/api" "github.com/Azure/ARO-HCP/internal/api/arm" "github.com/Azure/ARO-HCP/internal/database" @@ -41,17 +40,19 @@ type CosmosIntegrationTestInfo struct { DatabaseName string } -type FrontendIntegrationTestInfo struct { +type IntegrationTestInfo struct { *CosmosIntegrationTestInfo *ClusterServiceMock ArtifactsDir string FrontendURL string - Frontend *frontend.Frontend + AdminURL string + + Start func(ctx context.Context) error } -func (s *FrontendIntegrationTestInfo) Get20240610ClientFactory(subscriptionID string) *hcpsdk20240610preview.ClientFactory { +func Get20240610ClientFactory(frontendURL string, subscriptionID string) *hcpsdk20240610preview.ClientFactory { return api.Must( hcpsdk20240610preview.NewClientFactory(subscriptionID, nil, &azcorearm.ClientOptions{ @@ -64,7 +65,7 @@ func (s *FrontendIntegrationTestInfo) Get20240610ClientFactory(subscriptionID st Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ cloud.ResourceManager: { Audience: "https://management.core.windows.net/", - Endpoint: s.FrontendURL, + Endpoint: frontendURL, }, }, }, @@ -87,7 +88,7 @@ func (emptySystemData) Do(req *policy.Request) (*http.Response, error) { return req.Next() } -func (s *FrontendIntegrationTestInfo) Cleanup(ctx context.Context) { +func (s *IntegrationTestInfo) Cleanup(ctx context.Context) { s.CosmosIntegrationTestInfo.Cleanup(ctx) s.ClusterServiceMock.Cleanup(ctx) } diff --git a/test-integration/utils/integrationutils/mutation_test_utils.go b/test-integration/utils/integrationutils/mutation_test_utils.go index 39a8a0dc95..86db48f976 100644 --- a/test-integration/utils/integrationutils/mutation_test_utils.go +++ b/test-integration/utils/integrationutils/mutation_test_utils.go @@ -86,7 +86,7 @@ type GenericMutationTest struct { expectedErrors []expectedFieldError } -func (h *GenericMutationTest) Initialize(ctx context.Context, testInfo *FrontendIntegrationTestInfo) error { +func (h *GenericMutationTest) Initialize(ctx context.Context, testInfo *IntegrationTestInfo) error { if h.initialCosmosState != nil { err := testInfo.CreateInitialCosmosContent(ctx, h.initialCosmosState) if err != nil { diff --git a/test-integration/utils/integrationutils/utils.go b/test-integration/utils/integrationutils/utils.go index 690639d77a..4a97f98147 100644 --- a/test-integration/utils/integrationutils/utils.go +++ b/test-integration/utils/integrationutils/utils.go @@ -18,15 +18,21 @@ import ( "context" "fmt" "net" + "net/http" + "net/http/httptest" "os" "sync" "testing" + "time" - // register the APIs. _ "github.com/Azure/ARO-HCP/internal/api/v20240610preview" "github.com/prometheus/client_golang/prometheus" + // register the APIs. + "k8s.io/apimachinery/pkg/util/wait" + + server "github.com/Azure/ARO-HCP/admin/server/cmd/server" "github.com/Azure/ARO-HCP/frontend/pkg/frontend" "github.com/Azure/ARO-HCP/internal/api/arm" "github.com/Azure/ARO-HCP/internal/audit" @@ -60,41 +66,84 @@ func getArtifactDir() string { return artifactDir } -func NewFrontendFromTestingEnv(ctx context.Context, t *testing.T) (*frontend.Frontend, *FrontendIntegrationTestInfo, error) { +type TestingEnvRunner func(ctx context.Context, t *testing.T) error + +func NewIntegrationTestInfoFromEnv(ctx context.Context, t *testing.T) (*IntegrationTestInfo, error) { + logger := utils.DefaultLogger() + + // cosmos setup cosmosTestEnv, err := NewCosmosFromTestingEnv(ctx, t) if err != nil { - return nil, nil, err + return nil, err } - logger := utils.DefaultLogger() + // cluster service setup + clusterServiceMockInfo := NewClusterServiceMock(t, cosmosTestEnv.ArtifactsDir) - listener, err := net.Listen("tcp4", "127.0.0.1:0") + // frontend setup + frontendListener, err := net.Listen("tcp4", "127.0.0.1:0") if err != nil { - return nil, nil, err + return nil, err } - - metricsListener, err := net.Listen("tcp4", "127.0.0.1:0") + frontendMetricsListener, err := net.Listen("tcp4", "127.0.0.1:0") if err != nil { - return nil, nil, err + return nil, err } - noOpAuditClient, err := audit.NewOtelAuditClient(audit.CreateConn(false)) if err != nil { - return nil, nil, err + return nil, err } - metricsRegistry := prometheus.NewRegistry() + aroHCPFrontend := frontend.NewFrontend(logger, frontendListener, frontendMetricsListener, metricsRegistry, cosmosTestEnv.DBClient, clusterServiceMockInfo.MockClusterServiceClient, noOpAuditClient, "fake-location") + + // admin setup + adminHandler := server.NewAdminHandler( + logger, + cosmosTestEnv.DBClient, + clusterServiceMockInfo.MockClusterServiceClient, + nil, + ) + adminListener, err := net.Listen("tcp4", "127.0.0.1:0") + if err != nil { + return nil, err + } - clusterServiceMockInfo := NewClusterServiceMock(t, cosmosTestEnv.ArtifactsDir) - - aroHCPFrontend := frontend.NewFrontend(logger, listener, metricsListener, metricsRegistry, cosmosTestEnv.DBClient, clusterServiceMockInfo.MockClusterServiceClient, noOpAuditClient, "fake-location") - testInfo := &FrontendIntegrationTestInfo{ + frontendURL := fmt.Sprintf("http://%s", frontendListener.Addr().String()) + adminURL := fmt.Sprintf("http://%s", adminListener.Addr().String()) + testInfo := &IntegrationTestInfo{ CosmosIntegrationTestInfo: cosmosTestEnv, ClusterServiceMock: clusterServiceMockInfo, ArtifactsDir: cosmosTestEnv.ArtifactsDir, - FrontendURL: fmt.Sprintf("http://%s", listener.Addr().String()), + FrontendURL: frontendURL, + AdminURL: adminURL, + Start: func(ctx context.Context) error { + go aroHCPFrontend.Run(ctx, ctx.Done()) + go runServer(ctx, adminListener, adminHandler) + serverUrls := []string{frontendURL, adminURL} + // frontend: wait for migration to complete to eliminate races with our test's second call migrateCosmos and to ensure the server is ready for testing + err = wait.PollUntilContextCancel(ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { + for _, url := range serverUrls { + _, err := http.Get(url) + if err != nil { + t.Log(err) + return false, nil + } + } + return true, nil + }) + return err + }, } - return aroHCPFrontend, testInfo, nil + return testInfo, nil +} + +func runServer(ctx context.Context, listener net.Listener, handler http.Handler) { + adminApiServer := httptest.NewUnstartedServer(handler) + adminApiServer.Listener = listener + adminApiServer.Start() + + <-ctx.Done() + adminApiServer.Close() } func MarkOperationsCompleteForName(ctx context.Context, dbClient database.DBClient, subscriptionID, resourceName string) error {