From da8e43ba81d934be2d83fa226dc7b27b9b940e3a Mon Sep 17 00:00:00 2001 From: Angel Marin Date: Tue, 27 Jan 2026 23:45:00 +0100 Subject: [PATCH] adapter-cli --- cmd/adapter/dev.go | 28 +++ cmd/adapter/dev_run.go | 137 +++++++++++++++ cmd/adapter/dev_validate.go | 106 ++++++++++++ cmd/adapter/main.go | 1 + internal/dev/dryrun_k8s.go | 220 ++++++++++++++++++++++++ internal/dev/event_source.go | 134 +++++++++++++++ internal/dev/mock_api.go | 247 +++++++++++++++++++++++++++ internal/dev/output.go | 200 ++++++++++++++++++++++ internal/dev/trace.go | 209 +++++++++++++++++++++++ internal/dev/types.go | 115 +++++++++++++ internal/dev/validation.go | 198 +++++++++++++++++++++ sample.yaml | 48 ++++++ test/cloudevent.example.cluster.json | 14 ++ 13 files changed, 1657 insertions(+) create mode 100644 cmd/adapter/dev.go create mode 100644 cmd/adapter/dev_run.go create mode 100644 cmd/adapter/dev_validate.go create mode 100644 internal/dev/dryrun_k8s.go create mode 100644 internal/dev/event_source.go create mode 100644 internal/dev/mock_api.go create mode 100644 internal/dev/output.go create mode 100644 internal/dev/trace.go create mode 100644 internal/dev/types.go create mode 100644 internal/dev/validation.go create mode 100644 sample.yaml create mode 100644 test/cloudevent.example.cluster.json diff --git a/cmd/adapter/dev.go b/cmd/adapter/dev.go new file mode 100644 index 0000000..ddaa8eb --- /dev/null +++ b/cmd/adapter/dev.go @@ -0,0 +1,28 @@ +package main + +import ( + "github.com/spf13/cobra" +) + +// devCmd is the parent command for development tools +var devCmd = &cobra.Command{ + Use: "dev", + Short: "Development tools for adapter testing and debugging", + Long: `Development tools for testing and debugging adapters locally. + +These commands help adapter developers: + - Validate configuration files offline + - Test event processing without a message broker + - Preview what would happen without connecting to Kubernetes + +Examples: + # Validate an adapter configuration + adapter dev validate --config ./adapter-config.yaml + + # Dry-run an event through the adapter + adapter dev run --config ./adapter-config.yaml --event ./test-event.json`, +} + +func init() { + // Subcommands are added in their respective files +} diff --git a/cmd/adapter/dev_run.go b/cmd/adapter/dev_run.go new file mode 100644 index 0000000..5f1540f --- /dev/null +++ b/cmd/adapter/dev_run.go @@ -0,0 +1,137 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/dev" + "github.com/spf13/cobra" +) + +var ( + runConfigPath string + runEventPath string + runMockAPIResponses string + runEnvFile string + runVerbose bool + runOutput string + runShowManifests bool + runShowPayloads bool + runShowParams bool +) + +var runCmd = &cobra.Command{ + Use: "run", + Short: "Dry-run an event through the adapter", + Long: `Process an event through the adapter in dry-run mode. + +This command executes the full adapter pipeline without: + - Connecting to a real message broker + - Making actual Kubernetes API calls + - Making actual HyperFleet API calls + +It shows what WOULD happen if the event were processed. + +Examples: + # Basic dry-run + adapter dev run --config ./adapter-config.yaml --event ./test-event.json + + # With mock API responses + adapter dev run --config ./adapter-config.yaml --event ./test-event.json \ + --mock-api-responses ./mock-responses.yaml + + # Show rendered manifests + adapter dev run --config ./adapter-config.yaml --event ./test-event.json \ + --show-manifests --verbose + + # Load environment from .env file + adapter dev run --config ./adapter-config.yaml --event ./test-event.json \ + --env-file .env.local`, + RunE: runDryRun, +} + +func init() { + runCmd.Flags().StringVarP(&runConfigPath, "config", "c", "", + "Path to adapter configuration file (required)") + runCmd.Flags().StringVarP(&runEventPath, "event", "e", "", + "Path to event file in JSON or YAML format (required)") + runCmd.Flags().StringVar(&runMockAPIResponses, "mock-api-responses", "", + "Path to YAML file with mock API responses") + runCmd.Flags().StringVar(&runEnvFile, "env-file", "", + "Path to .env file for environment variables") + runCmd.Flags().BoolVarP(&runVerbose, "verbose", "v", false, + "Show detailed execution trace") + runCmd.Flags().StringVarP(&runOutput, "output", "o", "text", + "Output format: text or json") + runCmd.Flags().BoolVar(&runShowManifests, "show-manifests", false, + "Display rendered Kubernetes manifests") + runCmd.Flags().BoolVar(&runShowPayloads, "show-payloads", false, + "Display built payloads") + runCmd.Flags().BoolVar(&runShowParams, "show-params", false, + "Display extracted parameters") + + _ = runCmd.MarkFlagRequired("config") + _ = runCmd.MarkFlagRequired("event") + + devCmd.AddCommand(runCmd) +} + +func runDryRun(cmd *cobra.Command, args []string) error { + // Load environment variables from .env file if provided + if runEnvFile != "" { + envVars, err := dev.LoadEnvFile(runEnvFile) + if err != nil { + return fmt.Errorf("failed to load env file: %w", err) + } + if err := dev.ApplyEnvVars(envVars); err != nil { + return fmt.Errorf("failed to apply env vars: %w", err) + } + } + + // Load the event + eventSource := dev.NewFileEventSource(runEventPath) + eventData, err := eventSource.LoadEvent() + if err != nil { + return fmt.Errorf("failed to load event: %w", err) + } + + // Configure run options + opts := &dev.RunOptions{ + MockAPIResponsesPath: runMockAPIResponses, + ShowManifests: runShowManifests, + ShowPayloads: runShowPayloads, + ShowParams: runShowParams, + EnvFilePath: runEnvFile, + } + + // Execute with trace + ctx := context.Background() + result, err := dev.ExecuteWithTrace(ctx, runConfigPath, eventData, opts) + if err != nil { + return fmt.Errorf("execution failed: %w", err) + } + + // Determine output format + format := dev.OutputFormatText + if runOutput == "json" { + format = dev.OutputFormatJSON + } + + // Enable verbose if show flags are set + verbose := runVerbose || runShowManifests || runShowPayloads || runShowParams + + writer := dev.NewOutputWriter(os.Stdout, format, verbose) + + // Write the results + if err := writer.WriteTraceResult(result); err != nil { + return fmt.Errorf("failed to write results: %w", err) + } + + // Exit with error code if execution failed + if !result.Success { + os.Exit(1) + } + + return nil +} diff --git a/cmd/adapter/dev_validate.go b/cmd/adapter/dev_validate.go new file mode 100644 index 0000000..3a65788 --- /dev/null +++ b/cmd/adapter/dev_validate.go @@ -0,0 +1,106 @@ +package main + +import ( + "fmt" + "os" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/dev" + "github.com/spf13/cobra" +) + +var ( + validateConfigPath string + validateVerbose bool + validateOutput string + validateStrict bool + validateEnvFile string +) + +var validateCmd = &cobra.Command{ + Use: "validate", + Short: "Validate adapter configuration file", + Long: `Validate an adapter configuration file offline. + +This command checks: + - YAML syntax and schema structure + - Parameter definitions and references + - CEL expression syntax + - Go template variable references + - Kubernetes manifest structure + +Examples: + # Basic validation + adapter dev validate --config ./adapter-config.yaml + + # Verbose output with details + adapter dev validate --config ./adapter-config.yaml --verbose + + # JSON output for CI pipelines + adapter dev validate --config ./adapter-config.yaml --output json + + # Strict mode (treat warnings as errors) + adapter dev validate --config ./adapter-config.yaml --strict + + # With environment variables from .env file + adapter dev validate --config ./adapter-config.yaml --env-file .env.local`, + RunE: runValidate, +} + +func init() { + validateCmd.Flags().StringVarP(&validateConfigPath, "config", "c", "", + "Path to adapter configuration file (required)") + validateCmd.Flags().BoolVarP(&validateVerbose, "verbose", "v", false, + "Show detailed validation results") + validateCmd.Flags().StringVarP(&validateOutput, "output", "o", "text", + "Output format: text or json") + validateCmd.Flags().BoolVar(&validateStrict, "strict", false, + "Treat warnings as errors") + validateCmd.Flags().StringVar(&validateEnvFile, "env-file", "", + "Path to .env file for required environment variables") + + _ = validateCmd.MarkFlagRequired("config") + + devCmd.AddCommand(validateCmd) +} + +func runValidate(cmd *cobra.Command, args []string) error { + // Load environment variables from .env file if provided + if validateEnvFile != "" { + envVars, err := dev.LoadEnvFile(validateEnvFile) + if err != nil { + return fmt.Errorf("failed to load env file: %w", err) + } + if err := dev.ApplyEnvVars(envVars); err != nil { + return fmt.Errorf("failed to apply env vars: %w", err) + } + } + + // Determine output format + format := dev.OutputFormatText + if validateOutput == "json" { + format = dev.OutputFormatJSON + } + + writer := dev.NewOutputWriter(os.Stdout, format, validateVerbose) + + // Validate the configuration + result, err := dev.ValidateConfigWithOpts(validateConfigPath, dev.ValidateOptions{ + Strict: validateStrict, + Verbose: validateVerbose, + }) + if err != nil { + return fmt.Errorf("validation error: %w", err) + } + + // Write the results + if err := writer.WriteValidationResult(result); err != nil { + return fmt.Errorf("failed to write results: %w", err) + } + + // Exit with error code if validation failed + if !result.Valid { + os.Exit(1) + } + + return nil +} diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index d4f8d04..de191ed 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -118,6 +118,7 @@ and HyperFleet API calls.`, // Add subcommands rootCmd.AddCommand(serveCmd) rootCmd.AddCommand(versionCmd) + rootCmd.AddCommand(devCmd) // Execute if err := rootCmd.Execute(); err != nil { diff --git a/internal/dev/dryrun_k8s.go b/internal/dev/dryrun_k8s.go new file mode 100644 index 0000000..7e4748d --- /dev/null +++ b/internal/dev/dryrun_k8s.go @@ -0,0 +1,220 @@ +package dev + +import ( + "context" + "fmt" + "sync" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "gopkg.in/yaml.v3" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// DryRunK8sClient implements k8s_client.K8sClient for dry-run mode. +// It logs all operations without executing them and tracks what would happen. +type DryRunK8sClient struct { + mu sync.Mutex + // Operations records all operations that would be performed + Operations []K8sOperation + // ExistingResources can be pre-populated with resources that should "exist" + ExistingResources map[string]*unstructured.Unstructured + // SecretValues can be pre-populated with secret values for extraction + SecretValues map[string]string + // ConfigMapValues can be pre-populated with configmap values for extraction + ConfigMapValues map[string]string +} + +// K8sOperation represents a Kubernetes operation that would be performed +type K8sOperation struct { + // Type is the operation type (get, create, update, delete, discover) + Type string + // GVK is the GroupVersionKind of the resource + GVK schema.GroupVersionKind + // Namespace is the resource namespace + Namespace string + // Name is the resource name + Name string + // Resource is the resource object (for create/update) + Resource *unstructured.Unstructured + // Manifest is the YAML representation + Manifest string +} + +// NewDryRunK8sClient creates a new DryRunK8sClient +func NewDryRunK8sClient() *DryRunK8sClient { + return &DryRunK8sClient{ + Operations: make([]K8sOperation, 0), + ExistingResources: make(map[string]*unstructured.Unstructured), + SecretValues: make(map[string]string), + ConfigMapValues: make(map[string]string), + } +} + +// resourceKey generates a unique key for a resource +func resourceKey(gvk schema.GroupVersionKind, namespace, name string) string { + return fmt.Sprintf("%s/%s/%s/%s", gvk.Group, gvk.Kind, namespace, name) +} + +// GetResource retrieves a resource (returns from ExistingResources or nil) +func (c *DryRunK8sClient) GetResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) { + c.mu.Lock() + defer c.mu.Unlock() + + c.Operations = append(c.Operations, K8sOperation{ + Type: "get", + GVK: gvk, + Namespace: namespace, + Name: name, + }) + + key := resourceKey(gvk, namespace, name) + if res, exists := c.ExistingResources[key]; exists { + return res.DeepCopy(), nil + } + + // Return "not found" by default + return nil, fmt.Errorf("resource %s not found (dry-run)", key) +} + +// CreateResource records a create operation +func (c *DryRunK8sClient) CreateResource(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + c.mu.Lock() + defer c.mu.Unlock() + + manifest, _ := yaml.Marshal(obj.Object) + + c.Operations = append(c.Operations, K8sOperation{ + Type: "create", + GVK: obj.GroupVersionKind(), + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + Resource: obj.DeepCopy(), + Manifest: string(manifest), + }) + + // Return a copy with some "server-generated" fields + result := obj.DeepCopy() + result.SetResourceVersion("1") + result.SetUID("dry-run-uid") + + // Store it so subsequent gets can find it + key := resourceKey(obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()) + c.ExistingResources[key] = result + + return result, nil +} + +// UpdateResource records an update operation +func (c *DryRunK8sClient) UpdateResource(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + c.mu.Lock() + defer c.mu.Unlock() + + manifest, _ := yaml.Marshal(obj.Object) + + c.Operations = append(c.Operations, K8sOperation{ + Type: "update", + GVK: obj.GroupVersionKind(), + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + Resource: obj.DeepCopy(), + Manifest: string(manifest), + }) + + // Return a copy with incremented resource version + result := obj.DeepCopy() + result.SetResourceVersion("2") + + // Update the stored resource + key := resourceKey(obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()) + c.ExistingResources[key] = result + + return result, nil +} + +// DeleteResource records a delete operation +func (c *DryRunK8sClient) DeleteResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.Operations = append(c.Operations, K8sOperation{ + Type: "delete", + GVK: gvk, + Namespace: namespace, + Name: name, + }) + + // Remove from existing resources + key := resourceKey(gvk, namespace, name) + delete(c.ExistingResources, key) + + return nil +} + +// DiscoverResources records a discover operation +func (c *DryRunK8sClient) DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery k8s_client.Discovery) (*unstructured.UnstructuredList, error) { + c.mu.Lock() + defer c.mu.Unlock() + + c.Operations = append(c.Operations, K8sOperation{ + Type: "discover", + GVK: gvk, + Namespace: discovery.GetNamespace(), + Name: discovery.GetName(), + }) + + // Return an empty list + return &unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{}, + }, nil +} + +// ExtractFromSecret returns a pre-configured value or empty string +func (c *DryRunK8sClient) ExtractFromSecret(ctx context.Context, path string) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if val, exists := c.SecretValues[path]; exists { + return val, nil + } + return "", fmt.Errorf("secret %s not found (dry-run)", path) +} + +// ExtractFromConfigMap returns a pre-configured value or empty string +func (c *DryRunK8sClient) ExtractFromConfigMap(ctx context.Context, path string) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if val, exists := c.ConfigMapValues[path]; exists { + return val, nil + } + return "", fmt.Errorf("configmap %s not found (dry-run)", path) +} + +// GetOperations returns all recorded operations +func (c *DryRunK8sClient) GetOperations() []K8sOperation { + c.mu.Lock() + defer c.mu.Unlock() + return append([]K8sOperation{}, c.Operations...) +} + +// GetManifests returns all recorded manifests +func (c *DryRunK8sClient) GetManifests() map[string]string { + c.mu.Lock() + defer c.mu.Unlock() + + manifests := make(map[string]string) + for _, op := range c.Operations { + if op.Type == "create" || op.Type == "update" { + key := fmt.Sprintf("%s/%s", op.Namespace, op.Name) + if op.Namespace == "" { + key = op.Name + } + manifests[key] = op.Manifest + } + } + return manifests +} + +// Ensure DryRunK8sClient implements K8sClient +var _ k8s_client.K8sClient = (*DryRunK8sClient)(nil) diff --git a/internal/dev/event_source.go b/internal/dev/event_source.go new file mode 100644 index 0000000..810406f --- /dev/null +++ b/internal/dev/event_source.go @@ -0,0 +1,134 @@ +package dev + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +// EventSource provides events for development testing +type EventSource interface { + // LoadEvent loads an event and returns the raw data + LoadEvent() (map[string]interface{}, error) +} + +// FileEventSource loads events from JSON or YAML files +type FileEventSource struct { + path string +} + +// NewFileEventSource creates a new FileEventSource +func NewFileEventSource(path string) *FileEventSource { + return &FileEventSource{path: path} +} + +// LoadEvent loads the event from the file +func (s *FileEventSource) LoadEvent() (map[string]interface{}, error) { + data, err := os.ReadFile(s.path) + if err != nil { + return nil, fmt.Errorf("failed to read event file %s: %w", s.path, err) + } + + var event map[string]interface{} + + ext := strings.ToLower(filepath.Ext(s.path)) + switch ext { + case ".yaml", ".yml": + if err := yaml.Unmarshal(data, &event); err != nil { + return nil, fmt.Errorf("failed to parse YAML event file: %w", err) + } + case ".json": + if err := json.Unmarshal(data, &event); err != nil { + return nil, fmt.Errorf("failed to parse JSON event file: %w", err) + } + default: + // Try JSON first, then YAML + if err := json.Unmarshal(data, &event); err != nil { + if err := yaml.Unmarshal(data, &event); err != nil { + return nil, fmt.Errorf("failed to parse event file (tried JSON and YAML): %w", err) + } + } + } + + return event, nil +} + +// InlineEventSource parses events from inline JSON/YAML strings +type InlineEventSource struct { + data string +} + +// NewInlineEventSource creates a new InlineEventSource +func NewInlineEventSource(data string) *InlineEventSource { + return &InlineEventSource{data: data} +} + +// LoadEvent parses the inline event data +func (s *InlineEventSource) LoadEvent() (map[string]interface{}, error) { + var event map[string]interface{} + + // Try JSON first (more common for inline data) + if err := json.Unmarshal([]byte(s.data), &event); err != nil { + // Try YAML as fallback + if err := yaml.Unmarshal([]byte(s.data), &event); err != nil { + return nil, fmt.Errorf("failed to parse inline event data: %w", err) + } + } + + return event, nil +} + +// LoadEnvFile loads environment variables from a .env file +func LoadEnvFile(path string) (map[string]string, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read env file %s: %w", path, err) + } + + envVars := make(map[string]string) + lines := strings.Split(string(data), "\n") + + for lineNum, line := range lines { + line = strings.TrimSpace(line) + + // Skip empty lines and comments + if line == "" || strings.HasPrefix(line, "#") { + continue + } + + // Parse KEY=VALUE + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid env file format at line %d: %s", lineNum+1, line) + } + + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + + // Remove surrounding quotes if present + if len(value) >= 2 { + if (value[0] == '"' && value[len(value)-1] == '"') || + (value[0] == '\'' && value[len(value)-1] == '\'') { + value = value[1 : len(value)-1] + } + } + + envVars[key] = value + } + + return envVars, nil +} + +// ApplyEnvVars sets environment variables from a map +func ApplyEnvVars(envVars map[string]string) error { + for key, value := range envVars { + if err := os.Setenv(key, value); err != nil { + return fmt.Errorf("failed to set env var %s: %w", key, err) + } + } + return nil +} diff --git a/internal/dev/mock_api.go b/internal/dev/mock_api.go new file mode 100644 index 0000000..62fdb71 --- /dev/null +++ b/internal/dev/mock_api.go @@ -0,0 +1,247 @@ +package dev + +import ( + "context" + "encoding/json" + "fmt" + "os" + "regexp" + "sync" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "gopkg.in/yaml.v3" +) + +// MockAPIClient implements hyperfleet_api.Client for dry-run mode. +// It records all API calls and returns configurable mock responses. +type MockAPIClient struct { + mu sync.Mutex + // Requests records all API requests made + Requests []APIRequest + // CallRecords records all API calls with their responses + CallRecords []APICallRecord + // Responses maps URL patterns to mock responses + Responses map[string]*MockResponse + // DefaultResponse is returned when no matching pattern is found + DefaultResponse *MockResponse + // baseURL is the mock base URL + baseURL string +} + +// APIRequest represents a recorded API request +type APIRequest struct { + Method string + URL string + Body []byte + Headers map[string]string +} + +// APICallRecord represents a recorded API request with its response +type APICallRecord struct { + Request APIRequest + Response *MockResponse +} + +// MockResponse defines a mock response for API calls +type MockResponse struct { + StatusCode int `yaml:"statusCode" json:"statusCode"` + Body interface{} `yaml:"body" json:"body"` + Headers map[string]string `yaml:"headers" json:"headers"` +} + +// NewMockAPIClient creates a new MockAPIClient +func NewMockAPIClient() *MockAPIClient { + return &MockAPIClient{ + Requests: make([]APIRequest, 0), + CallRecords: make([]APICallRecord, 0), + Responses: make(map[string]*MockResponse), + DefaultResponse: &MockResponse{ + StatusCode: 200, + Body: map[string]interface{}{"status": "ok"}, + Headers: map[string]string{"Content-Type": "application/json"}, + }, + baseURL: "https://api.example.com", + } +} + +// SetResponse configures a mock response for a URL pattern +func (c *MockAPIClient) SetResponse(urlPattern string, resp *MockResponse) { + c.mu.Lock() + defer c.mu.Unlock() + c.Responses[urlPattern] = resp +} + +// LoadResponsesFromFile loads mock responses from a YAML or JSON file +func (c *MockAPIClient) LoadResponsesFromFile(path string) error { + data, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read mock responses file: %w", err) + } + + var responses map[string]*MockResponse + + // Try YAML first, then JSON + if err := yaml.Unmarshal(data, &responses); err != nil { + if err := json.Unmarshal(data, &responses); err != nil { + return fmt.Errorf("failed to parse mock responses file: %w", err) + } + } + + c.mu.Lock() + defer c.mu.Unlock() + for pattern, resp := range responses { + c.Responses[pattern] = resp + } + + return nil +} + +// findResponse finds a matching response for a URL +func (c *MockAPIClient) findResponse(url string) *MockResponse { + // Try exact match first + if resp, exists := c.Responses[url]; exists { + return resp + } + + // Try pattern matching + for pattern, resp := range c.Responses { + if matched, _ := regexp.MatchString(pattern, url); matched { + return resp + } + } + + return c.DefaultResponse +} + +// Do executes an HTTP request and returns a mock response +func (c *MockAPIClient) Do(ctx context.Context, req *hyperfleet_api.Request) (*hyperfleet_api.Response, error) { + c.mu.Lock() + defer c.mu.Unlock() + + // Record the request + apiReq := APIRequest{ + Method: req.Method, + URL: req.URL, + Body: req.Body, + Headers: req.Headers, + } + c.Requests = append(c.Requests, apiReq) + + // Find matching response + mockResp := c.findResponse(req.URL) + + // Record the call with its response + c.CallRecords = append(c.CallRecords, APICallRecord{ + Request: apiReq, + Response: mockResp, + }) + + // Convert body to JSON bytes + var bodyBytes []byte + if mockResp.Body != nil { + var err error + bodyBytes, err = json.Marshal(mockResp.Body) + if err != nil { + return nil, fmt.Errorf("failed to marshal mock response body: %w", err) + } + } + + // Build response headers + headers := make(map[string][]string) + for k, v := range mockResp.Headers { + headers[k] = []string{v} + } + + return &hyperfleet_api.Response{ + StatusCode: mockResp.StatusCode, + Status: fmt.Sprintf("%d OK", mockResp.StatusCode), + Headers: headers, + Body: bodyBytes, + Attempts: 1, + }, nil +} + +// Get performs a GET request +func (c *MockAPIClient) Get(ctx context.Context, url string, opts ...hyperfleet_api.RequestOption) (*hyperfleet_api.Response, error) { + req := &hyperfleet_api.Request{ + Method: "GET", + URL: url, + } + for _, opt := range opts { + opt(req) + } + return c.Do(ctx, req) +} + +// Post performs a POST request +func (c *MockAPIClient) Post(ctx context.Context, url string, body []byte, opts ...hyperfleet_api.RequestOption) (*hyperfleet_api.Response, error) { + req := &hyperfleet_api.Request{ + Method: "POST", + URL: url, + Body: body, + } + for _, opt := range opts { + opt(req) + } + return c.Do(ctx, req) +} + +// Put performs a PUT request +func (c *MockAPIClient) Put(ctx context.Context, url string, body []byte, opts ...hyperfleet_api.RequestOption) (*hyperfleet_api.Response, error) { + req := &hyperfleet_api.Request{ + Method: "PUT", + URL: url, + Body: body, + } + for _, opt := range opts { + opt(req) + } + return c.Do(ctx, req) +} + +// Patch performs a PATCH request +func (c *MockAPIClient) Patch(ctx context.Context, url string, body []byte, opts ...hyperfleet_api.RequestOption) (*hyperfleet_api.Response, error) { + req := &hyperfleet_api.Request{ + Method: "PATCH", + URL: url, + Body: body, + } + for _, opt := range opts { + opt(req) + } + return c.Do(ctx, req) +} + +// Delete performs a DELETE request +func (c *MockAPIClient) Delete(ctx context.Context, url string, opts ...hyperfleet_api.RequestOption) (*hyperfleet_api.Response, error) { + req := &hyperfleet_api.Request{ + Method: "DELETE", + URL: url, + } + for _, opt := range opts { + opt(req) + } + return c.Do(ctx, req) +} + +// BaseURL returns the mock base URL +func (c *MockAPIClient) BaseURL() string { + return c.baseURL +} + +// GetRequests returns all recorded requests +func (c *MockAPIClient) GetRequests() []APIRequest { + c.mu.Lock() + defer c.mu.Unlock() + return append([]APIRequest{}, c.Requests...) +} + +// GetCallRecords returns all recorded API calls with their responses +func (c *MockAPIClient) GetCallRecords() []APICallRecord { + c.mu.Lock() + defer c.mu.Unlock() + return append([]APICallRecord{}, c.CallRecords...) +} + +// Ensure MockAPIClient implements Client +var _ hyperfleet_api.Client = (*MockAPIClient)(nil) diff --git a/internal/dev/output.go b/internal/dev/output.go new file mode 100644 index 0000000..5549df6 --- /dev/null +++ b/internal/dev/output.go @@ -0,0 +1,200 @@ +package dev + +import ( + "encoding/json" + "fmt" + "io" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" +) + +// OutputFormat represents the output format +type OutputFormat string + +const ( + OutputFormatText OutputFormat = "text" + OutputFormatJSON OutputFormat = "json" +) + +// OutputWriter formats and writes results to output +type OutputWriter interface { + // WriteValidationResult writes validation results + WriteValidationResult(result *ValidationResult) error + // WriteTraceResult writes execution trace results + WriteTraceResult(result *TraceResult) error + // WriteError writes an error message + WriteError(err error) error +} + +// NewOutputWriter creates an OutputWriter for the given format +func NewOutputWriter(out io.Writer, format OutputFormat, verbose bool) OutputWriter { + switch format { + case OutputFormatJSON: + return &jsonOutputWriter{out: out} + default: + return &textOutputWriter{out: out, verbose: verbose} + } +} + +// textOutputWriter writes human-readable text output +type textOutputWriter struct { + out io.Writer + verbose bool +} + +func (w *textOutputWriter) WriteValidationResult(result *ValidationResult) error { + if result.Details != nil { + w.writeCategory("Schema Validation", result.Details.Schema) + w.writeCategory("Parameter Validation", result.Details.Params) + w.writeCategory("CEL Expressions", result.Details.CEL) + w.writeCategory("Go Templates", result.Details.Templates) + w.writeCategory("K8s Manifests", result.Details.Manifests) + _, _ = fmt.Fprintln(w.out) + } + + if result.Valid { + _, _ = fmt.Fprintln(w.out, "Validation: SUCCESS") + } else { + _, _ = fmt.Fprintln(w.out, "Validation: FAILED") + _, _ = fmt.Fprintln(w.out) + for _, err := range result.Errors { + _, _ = fmt.Fprintf(w.out, " [ERROR] %s: %s\n", err.Path, err.Message) + } + } + + if w.verbose && len(result.Warnings) > 0 { + _, _ = fmt.Fprintln(w.out) + for _, warn := range result.Warnings { + _, _ = fmt.Fprintf(w.out, " [WARN] %s: %s\n", warn.Path, warn.Message) + } + } + + return nil +} + +func (w *textOutputWriter) writeCategory(name string, cat ValidationCategory) { + status := "PASS" + if !cat.Passed { + status = "FAIL" + } + countStr := "" + if cat.Count > 0 { + countStr = fmt.Sprintf(" (%d items)", cat.Count) + } + _, _ = fmt.Fprintf(w.out, "%-25s %s%s\n", name+":", status, countStr) +} + +func (w *textOutputWriter) WriteTraceResult(result *TraceResult) error { + _, _ = fmt.Fprintf(w.out, "Dry-Run Execution Results\n") + _, _ = fmt.Fprintf(w.out, "%s\n\n", strings.Repeat("=", 25)) + + for _, phase := range result.Phases { + var status string + switch phase.Status { + case "failed": + status = "[FAILED]" + case "skipped": + status = "[SKIPPED]" + case "dry-run": + status = "[DRY-RUN]" + default: + status = "[SUCCESS]" + } + + phaseName := formatPhaseName(phase.Phase) + _, _ = fmt.Fprintf(w.out, "Phase: %-20s %s\n", phaseName, status) + + if phase.Error != nil { + _, _ = fmt.Fprintf(w.out, " Error: %v\n", phase.Error) + } + + if w.verbose && len(phase.Details) > 0 { + for key, val := range phase.Details { + _, _ = fmt.Fprintf(w.out, " %s: %v\n", key, val) + } + } + _, _ = fmt.Fprintln(w.out) + } + + // Show rendered outputs if available + if result.RenderedOutputs != nil { + if len(result.RenderedOutputs.Manifests) > 0 { + _, _ = fmt.Fprintln(w.out, "Rendered Manifests:") + for name, manifest := range result.RenderedOutputs.Manifests { + _, _ = fmt.Fprintf(w.out, " [%s]\n", name) + if w.verbose { + for _, line := range strings.Split(manifest, "\n") { + _, _ = fmt.Fprintf(w.out, " %s\n", line) + } + } + } + _, _ = fmt.Fprintln(w.out) + } + + if len(result.RenderedOutputs.APICalls) > 0 { + _, _ = fmt.Fprintln(w.out, "API Calls (simulated):") + for _, call := range result.RenderedOutputs.APICalls { + _, _ = fmt.Fprintf(w.out, " %s %s\n", call.Method, call.URL) + if w.verbose { + if call.Body != "" { + _, _ = fmt.Fprintf(w.out, " Request Body: %s\n", truncate(call.Body, 200)) + } + _, _ = fmt.Fprintf(w.out, " Response [%d]: %s\n", call.StatusCode, truncate(call.Response, 200)) + } + } + } + } + + return nil +} + +func (w *textOutputWriter) WriteError(err error) error { + _, _ = fmt.Fprintf(w.out, "Error: %v\n", err) + return nil +} + +func formatPhaseName(phase executor.ExecutionPhase) string { + switch phase { + case executor.PhaseParamExtraction: + return "Parameter Extraction" + case executor.PhasePreconditions: + return "Preconditions" + case executor.PhaseResources: + return "Resources" + case executor.PhasePostActions: + return "Post Actions" + default: + return string(phase) + } +} + +func truncate(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + return s[:maxLen] + "..." +} + +// jsonOutputWriter writes JSON output +type jsonOutputWriter struct { + out io.Writer +} + +func (w *jsonOutputWriter) WriteValidationResult(result *ValidationResult) error { + return w.writeJSON(result) +} + +func (w *jsonOutputWriter) WriteTraceResult(result *TraceResult) error { + return w.writeJSON(result) +} + +func (w *jsonOutputWriter) WriteError(err error) error { + return w.writeJSON(map[string]string{"error": err.Error()}) +} + +func (w *jsonOutputWriter) writeJSON(v interface{}) error { + enc := json.NewEncoder(w.out) + enc.SetIndent("", " ") + return enc.Encode(v) +} diff --git a/internal/dev/trace.go b/internal/dev/trace.go new file mode 100644 index 0000000..6e157fa --- /dev/null +++ b/internal/dev/trace.go @@ -0,0 +1,209 @@ +package dev + +import ( + "context" + "encoding/json" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// ExecuteWithTrace runs the adapter executor in dry-run mode and captures a detailed trace +func ExecuteWithTrace(ctx context.Context, configPath string, eventData map[string]interface{}, opts *RunOptions) (*TraceResult, error) { + startTime := time.Now() + + // Load the configuration + config, err := config_loader.Load(configPath) + if err != nil { + return nil, err + } + + // Create dry-run clients + k8sClient := NewDryRunK8sClient() + apiClient := NewMockAPIClient() + + // Load mock API responses if provided + if opts != nil && opts.MockAPIResponsesPath != "" { + if err := apiClient.LoadResponsesFromFile(opts.MockAPIResponsesPath); err != nil { + return nil, err + } + } + + // Create a test logger for dry-run (minimal output) + log := logger.NewTestLogger() + + // Build the executor + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sClient). + WithLogger(log). + Build() + if err != nil { + return nil, err + } + + // Convert event data to JSON for the executor + eventBytes, err := json.Marshal(eventData) + if err != nil { + return nil, err + } + + // Execute + execResult := exec.Execute(ctx, eventBytes) + + // Build the trace result + trace := &TraceResult{ + Success: execResult.Status == executor.StatusSuccess, + ExecutionResult: execResult, + Duration: time.Since(startTime), + Phases: buildPhaseTraces(execResult), + RenderedOutputs: &RenderedOutputs{ + Manifests: k8sClient.GetManifests(), + Payloads: make(map[string]string), + APICalls: buildAPICallTraces(apiClient.GetCallRecords()), + }, + } + + return trace, nil +} + +// RunOptions configures the dry-run execution +type RunOptions struct { + // MockAPIResponsesPath is the path to mock API responses file + MockAPIResponsesPath string + // ShowManifests enables manifest output + ShowManifests bool + // ShowPayloads enables payload output + ShowPayloads bool + // ShowParams enables parameter output + ShowParams bool + // EnvFilePath is the path to .env file + EnvFilePath string +} + +// buildPhaseTraces builds phase traces from execution result +func buildPhaseTraces(result *executor.ExecutionResult) []PhaseTrace { + phases := []PhaseTrace{} + + // Phase 1: Parameter Extraction + paramDetails := make(map[string]interface{}) + if result.Params != nil { + paramDetails["count"] = len(result.Params) + paramDetails["params"] = result.Params + } + phases = append(phases, PhaseTrace{ + Phase: executor.PhaseParamExtraction, + Status: getPhaseStatus(executor.PhaseParamExtraction, result), + Details: paramDetails, + Error: result.Errors[executor.PhaseParamExtraction], + }) + + // Phase 2: Preconditions + precondDetails := make(map[string]interface{}) + if result.PreconditionResults != nil { + precondDetails["count"] = len(result.PreconditionResults) + matched := 0 + for _, r := range result.PreconditionResults { + if r.Matched { + matched++ + } + } + precondDetails["matched"] = matched + } + phases = append(phases, PhaseTrace{ + Phase: executor.PhasePreconditions, + Status: getPhaseStatus(executor.PhasePreconditions, result), + Details: precondDetails, + Error: result.Errors[executor.PhasePreconditions], + }) + + // Phase 3: Resources + resourceDetails := make(map[string]interface{}) + if result.ResourceResults != nil { + resourceDetails["count"] = len(result.ResourceResults) + operations := make(map[string]int) + for _, r := range result.ResourceResults { + operations[string(r.Operation)]++ + } + resourceDetails["operations"] = operations + } + if result.ResourcesSkipped { + resourceDetails["skipped"] = true + resourceDetails["skipReason"] = result.SkipReason + } + phases = append(phases, PhaseTrace{ + Phase: executor.PhaseResources, + Status: getResourcePhaseStatus(result), + Details: resourceDetails, + Error: result.Errors[executor.PhaseResources], + }) + + // Phase 4: Post Actions + postDetails := make(map[string]interface{}) + if result.PostActionResults != nil { + postDetails["count"] = len(result.PostActionResults) + executed := 0 + for _, r := range result.PostActionResults { + if r.APICallMade { + executed++ + } + } + postDetails["executed"] = executed + } + phases = append(phases, PhaseTrace{ + Phase: executor.PhasePostActions, + Status: getPhaseStatus(executor.PhasePostActions, result), + Details: postDetails, + Error: result.Errors[executor.PhasePostActions], + }) + + return phases +} + +// getPhaseStatus determines the status string for a phase +func getPhaseStatus(phase executor.ExecutionPhase, result *executor.ExecutionResult) string { + if result.Errors[phase] != nil { + return "failed" + } + return "success" +} + +// getResourcePhaseStatus determines the status string for the resource phase +func getResourcePhaseStatus(result *executor.ExecutionResult) string { + if result.Errors[executor.PhaseResources] != nil { + return "failed" + } + if result.ResourcesSkipped { + return "skipped" + } + return "dry-run" +} + +// buildAPICallTraces builds API call traces from recorded API calls +func buildAPICallTraces(records []APICallRecord) []APICallTrace { + traces := make([]APICallTrace, len(records)) + for i, record := range records { + trace := APICallTrace{ + Method: record.Request.Method, + URL: record.Request.URL, + Body: string(record.Request.Body), + StatusCode: 200, // Default status code + } + + // Include response data if available + if record.Response != nil { + trace.StatusCode = record.Response.StatusCode + if record.Response.Body != nil { + if bodyBytes, err := json.Marshal(record.Response.Body); err == nil { + trace.Response = string(bodyBytes) + } + } + } + + traces[i] = trace + } + return traces +} diff --git a/internal/dev/types.go b/internal/dev/types.go new file mode 100644 index 0000000..43983fb --- /dev/null +++ b/internal/dev/types.go @@ -0,0 +1,115 @@ +package dev + +import ( + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" +) + +// ValidationResult contains the result of config validation +type ValidationResult struct { + // Valid indicates overall validation success + Valid bool + // Errors contains validation errors + Errors []ValidationIssue + // Warnings contains validation warnings + Warnings []ValidationIssue + // Details contains component-specific validation results + Details *ValidationDetails +} + +// ValidationIssue represents a single validation error or warning +type ValidationIssue struct { + // Path is the location in the config (e.g., "spec.preconditions[0].expression") + Path string + // Message describes the issue + Message string + // Type indicates the kind of validation (schema, cel, template, manifest) + Type string +} + +// ValidationDetails contains detailed validation results by category +type ValidationDetails struct { + Schema ValidationCategory + Params ValidationCategory + CEL ValidationCategory + Templates ValidationCategory + Manifests ValidationCategory +} + +// ValidationCategory represents validation results for a specific category +type ValidationCategory struct { + Passed bool + Count int + Issues []ValidationIssue +} + +// TraceResult contains detailed execution trace for dry-run mode +type TraceResult struct { + // Success indicates if the overall execution succeeded + Success bool + // ExecutionResult is the underlying executor result + ExecutionResult *executor.ExecutionResult + // Phases contains trace information for each phase + Phases []PhaseTrace + // Duration is the total execution duration + Duration time.Duration + // RenderedOutputs contains rendered templates and manifests + RenderedOutputs *RenderedOutputs +} + +// PhaseTrace contains trace information for a single execution phase +type PhaseTrace struct { + // Phase is the execution phase + Phase executor.ExecutionPhase + // Status indicates success or failure + Status string + // Duration is how long the phase took + Duration time.Duration + // Details contains phase-specific details + Details map[string]interface{} + // Error contains any error that occurred + Error error +} + +// RenderedOutputs contains rendered templates and manifests for preview +type RenderedOutputs struct { + // Manifests maps resource name to rendered YAML + Manifests map[string]string + // Payloads maps payload name to rendered JSON + Payloads map[string]string + // APICalls contains simulated API call details + APICalls []APICallTrace +} + +// APICallTrace represents a traced API call +type APICallTrace struct { + // Method is the HTTP method + Method string + // URL is the target URL + URL string + // Body is the request body (if any) + Body string + // Response is the mock response (if configured) + Response string + // StatusCode is the mock status code + StatusCode int +} + +// ResourceTrace represents a traced resource operation +type ResourceTrace struct { + // Name is the resource config name + Name string + // Kind is the Kubernetes kind + Kind string + // APIVersion is the Kubernetes apiVersion + APIVersion string + // Namespace is the target namespace + Namespace string + // ResourceName is the Kubernetes resource name + ResourceName string + // Operation is what would happen (create, update, skip) + Operation string + // Manifest is the rendered manifest YAML + Manifest string +} diff --git a/internal/dev/validation.go b/internal/dev/validation.go new file mode 100644 index 0000000..25a190d --- /dev/null +++ b/internal/dev/validation.go @@ -0,0 +1,198 @@ +package dev + +import ( + "fmt" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" +) + +// ValidateConfig performs detailed validation of an adapter configuration file +func ValidateConfig(configPath string) (*ValidationResult, error) { + result := &ValidationResult{ + Valid: true, + Errors: []ValidationIssue{}, + Warnings: []ValidationIssue{}, + Details: &ValidationDetails{ + Schema: ValidationCategory{Passed: true}, + Params: ValidationCategory{Passed: true}, + CEL: ValidationCategory{Passed: true}, + Templates: ValidationCategory{Passed: true}, + Manifests: ValidationCategory{Passed: true}, + }, + } + + // Load and validate the configuration + config, err := config_loader.Load(configPath) + if err != nil { + // Parse the error to categorize it + categorizeLoadError(err, result) + result.Valid = false + // Add the raw error to Errors for visibility + if len(result.Errors) == 0 { + result.Errors = append(result.Errors, ValidationIssue{ + Path: "config", + Message: err.Error(), + Type: "schema", + }) + } + return result, nil + } + + // Config loaded successfully - validation passed + result.Valid = true + + // Count items for details + countConfigItems(config, result) + + return result, nil +} + +// categorizeLoadError parses validation errors and categorizes them +func categorizeLoadError(err error, result *ValidationResult) { + errStr := err.Error() + + // Check if it's a validation errors collection + if strings.Contains(errStr, "validation failed") { + // Parse individual errors from the ValidationErrors + lines := strings.Split(errStr, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "validation failed") { + continue + } + line = strings.TrimPrefix(line, "- ") + + issue := parseValidationLine(line) + result.Errors = append(result.Errors, issue) + categorizeIssue(issue, result) + } + } else { + // Single error - likely a file or parsing error + issue := ValidationIssue{ + Path: "config", + Message: errStr, + Type: "schema", + } + result.Errors = append(result.Errors, issue) + result.Details.Schema.Passed = false + result.Details.Schema.Issues = append(result.Details.Schema.Issues, issue) + } +} + +// parseValidationLine parses a validation error line into a ValidationIssue +func parseValidationLine(line string) ValidationIssue { + issue := ValidationIssue{ + Type: "unknown", + } + + // Format is typically "path: message" + parts := strings.SplitN(line, ": ", 2) + if len(parts) == 2 { + issue.Path = parts[0] + issue.Message = parts[1] + } else { + issue.Path = "config" + issue.Message = line + } + + // Determine type based on path or message content + pathLower := strings.ToLower(issue.Path) + msgLower := strings.ToLower(issue.Message) + + switch { + case strings.Contains(msgLower, "cel"): + issue.Type = "cel" + case strings.Contains(pathLower, "expression"): + issue.Type = "cel" + case strings.Contains(msgLower, "template variable"): + issue.Type = "template" + case strings.Contains(pathLower, "manifest"): + issue.Type = "manifest" + case strings.Contains(msgLower, "kubernetes"): + issue.Type = "manifest" + case strings.Contains(pathLower, "param"): + issue.Type = "param" + case strings.Contains(msgLower, "operator"): + issue.Type = "param" + default: + issue.Type = "schema" + } + + return issue +} + +// categorizeIssue adds an issue to the appropriate category +func categorizeIssue(issue ValidationIssue, result *ValidationResult) { + switch issue.Type { + case "cel": + result.Details.CEL.Passed = false + result.Details.CEL.Issues = append(result.Details.CEL.Issues, issue) + case "template": + result.Details.Templates.Passed = false + result.Details.Templates.Issues = append(result.Details.Templates.Issues, issue) + case "manifest": + result.Details.Manifests.Passed = false + result.Details.Manifests.Issues = append(result.Details.Manifests.Issues, issue) + case "param": + result.Details.Params.Passed = false + result.Details.Params.Issues = append(result.Details.Params.Issues, issue) + default: + result.Details.Schema.Passed = false + result.Details.Schema.Issues = append(result.Details.Schema.Issues, issue) + } +} + +// countConfigItems counts various items in the config for reporting +func countConfigItems(config *config_loader.AdapterConfig, result *ValidationResult) { + // Count parameters + result.Details.Params.Count = len(config.Spec.Params) + + // Count CEL expressions + celCount := 0 + for _, precond := range config.Spec.Preconditions { + if precond.Expression != "" { + celCount++ + } + } + result.Details.CEL.Count = celCount + + // Count resources (manifests) + result.Details.Manifests.Count = len(config.Spec.Resources) + + // Count template usages (approximate - just count resources and payloads) + templateCount := len(config.Spec.Resources) + if config.Spec.Post != nil { + templateCount += len(config.Spec.Post.Payloads) + } + result.Details.Templates.Count = templateCount +} + +// ValidateConfigWithOptions performs validation with additional options +type ValidateOptions struct { + // Strict treats warnings as errors + Strict bool + // Verbose includes additional checks + Verbose bool +} + +// ValidateConfigWithOpts validates config with options +func ValidateConfigWithOpts(configPath string, opts ValidateOptions) (*ValidationResult, error) { + result, err := ValidateConfig(configPath) + if err != nil { + return nil, err + } + + if opts.Strict && len(result.Warnings) > 0 { + result.Valid = false + for _, warn := range result.Warnings { + result.Errors = append(result.Errors, ValidationIssue{ + Path: warn.Path, + Message: fmt.Sprintf("[strict] %s", warn.Message), + Type: warn.Type, + }) + } + } + + return result, nil +} diff --git a/sample.yaml b/sample.yaml new file mode 100644 index 0000000..203b2d1 --- /dev/null +++ b/sample.yaml @@ -0,0 +1,48 @@ +# Mock API responses for adapter dry-run testing +# Keys are URL patterns (exact match or regex) + +# Exact URL match for cluster status endpoint +"https://api.example.com/api/hyperfleet/v1/clusters/cluster-123": + statusCode: 200 + body: + id: "cluster-123" + kind: "Cluster" + generation: 1 + status: + phase: "NotReady" + conditions: + - type: "Available" + status: "True" + reason: "ClusterReady" + cloudProvider: "aws" + region: "us-east-1" + vpcId: "vpc-12345" + headers: + Content-Type: "application/json" + +# Regex pattern to match any cluster ID +"https://api.example.com/api/hyperfleet/v1/clusters/[^/]+$": + statusCode: 200 + body: + phase: "Ready" + cloudProvider: "aws" + vpcId: "vpc-default" + +# Match cluster status updates (POST) +"https://api.example.com/api/hyperfleet/v1/clusters/.*/status": + statusCode: 200 + body: + message: "Status updated successfully" + +# Simulate a 404 for specific resource +"https://api.example.com/api/hyperfleet/v1/nodepools/missing": + statusCode: 404 + body: + error: "NodePool not found" + code: "NOT_FOUND" + +# Simulate a server error +"https://api.example.com/api/hyperfleet/v1/clusters/broken": + statusCode: 500 + body: + error: "Internal server error" diff --git a/test/cloudevent.example.cluster.json b/test/cloudevent.example.cluster.json new file mode 100644 index 0000000..ed12014 --- /dev/null +++ b/test/cloudevent.example.cluster.json @@ -0,0 +1,14 @@ +{ + "specversion": "1.0", + "type": "com.redhat.hyperfleet.cluster.reconcile.v1", + "source": "/hyperfleet/service/sentinel", + "id": "cluster-123", + "time": "2025-10-23T12:00:00Z", + "datacontenttype": "application/json", + "data": { + "id": "cluster-123", + "kind": "Cluster", + "href": "https://api.hyperfleet.com/v1/clusters/11111111-1111-1111-1111-111111111111", + "generation": 5 + } +}