diff --git a/cmd/adapter-migrate/main.go b/cmd/adapter-migrate/main.go new file mode 100644 index 0000000..27e0171 --- /dev/null +++ b/cmd/adapter-migrate/main.go @@ -0,0 +1,332 @@ +// adapter-migrate is a CLI tool for migrating HyperFleet adapter configurations. +// It converts legacy AdapterConfig YAML files to native Serverless Workflow format. +package main + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/converter" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/loader" + "github.com/serverlessworkflow/sdk-go/v3/model" + "github.com/serverlessworkflow/sdk-go/v3/parser" + "github.com/spf13/cobra" + "sigs.k8s.io/yaml" +) + +// Build-time variables set via ldflags +var ( + version = "0.1.0" + commit = "none" + buildDate = "unknown" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "adapter-migrate", + Short: "HyperFleet Adapter Migration Tool", + Long: `A migration tool for HyperFleet adapter configurations. + +This tool helps you migrate from legacy AdapterConfig YAML format to +native Serverless Workflow (SWF) format. + +Commands: + convert - Convert an AdapterConfig to SWF workflow format + validate - Validate a workflow file (either format) + detect - Detect the format of a configuration file`, + CompletionOptions: cobra.CompletionOptions{ + DisableDefaultCmd: true, + }, + } + + // Convert command + convertCmd := &cobra.Command{ + Use: "convert", + Short: "Convert AdapterConfig to Serverless Workflow format", + Long: `Convert a legacy AdapterConfig YAML file to native Serverless Workflow format. + +Example: + adapter-migrate convert --input adapter.yaml --output workflow.yaml + adapter-migrate convert -i adapter.yaml -o workflow.yaml + adapter-migrate convert -i adapter.yaml # prints to stdout`, + RunE: runConvert, + } + + var inputFile, outputFile string + var overwrite bool + + convertCmd.Flags().StringVarP(&inputFile, "input", "i", "", "Input AdapterConfig file (required)") + convertCmd.Flags().StringVarP(&outputFile, "output", "o", "", "Output SWF workflow file (prints to stdout if not specified)") + convertCmd.Flags().BoolVarP(&overwrite, "overwrite", "f", false, "Overwrite output file if it exists") + _ = convertCmd.MarkFlagRequired("input") + + // Validate command + validateCmd := &cobra.Command{ + Use: "validate", + Short: "Validate a workflow configuration file", + Long: `Validate a workflow configuration file (either AdapterConfig or SWF format). + +Example: + adapter-migrate validate --file adapter.yaml + adapter-migrate validate -f workflow.yaml`, + RunE: runValidate, + } + + var validateFile string + validateCmd.Flags().StringVarP(&validateFile, "file", "f", "", "Configuration file to validate (required)") + _ = validateCmd.MarkFlagRequired("file") + + // Detect command + detectCmd := &cobra.Command{ + Use: "detect", + Short: "Detect the format of a configuration file", + Long: `Detect whether a file is in AdapterConfig or Serverless Workflow format. + +Example: + adapter-migrate detect --file config.yaml + adapter-migrate detect -f config.yaml`, + RunE: runDetect, + } + + var detectFile string + detectCmd.Flags().StringVarP(&detectFile, "file", "f", "", "Configuration file to detect format (required)") + _ = detectCmd.MarkFlagRequired("file") + + // Version command + versionCmd := &cobra.Command{ + Use: "version", + Short: "Print version information", + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("HyperFleet Adapter Migration Tool\n") + fmt.Printf(" Version: %s\n", version) + fmt.Printf(" Commit: %s\n", commit) + fmt.Printf(" Built: %s\n", buildDate) + }, + } + + // Add subcommands + rootCmd.AddCommand(convertCmd) + rootCmd.AddCommand(validateCmd) + rootCmd.AddCommand(detectCmd) + rootCmd.AddCommand(versionCmd) + + if err := rootCmd.Execute(); err != nil { + os.Exit(1) + } +} + +func runConvert(cmd *cobra.Command, args []string) error { + inputFile, _ := cmd.Flags().GetString("input") + outputFile, _ := cmd.Flags().GetString("output") + overwrite, _ := cmd.Flags().GetBool("overwrite") + + // Read input file + data, err := os.ReadFile(inputFile) + if err != nil { + return fmt.Errorf("failed to read input file: %w", err) + } + + // Detect format + format := loader.DetectFormat(data) + if format == loader.FormatSWF { + fmt.Fprintf(os.Stderr, "Note: %s is already in Serverless Workflow format\n", inputFile) + if outputFile == "" { + fmt.Print(string(data)) + } else { + return writeOutput(outputFile, data, overwrite) + } + return nil + } + + if format != loader.FormatAdapterConfig { + return fmt.Errorf("unable to detect format of %s: must be AdapterConfig", inputFile) + } + + // Parse AdapterConfig + absPath, _ := filepath.Abs(inputFile) + baseDir := filepath.Dir(absPath) + + adapterConfig, err := config_loader.Parse(data, + config_loader.WithBaseDir(baseDir), + config_loader.WithSkipSemanticValidation(), + ) + if err != nil { + return fmt.Errorf("failed to parse AdapterConfig: %w", err) + } + + // Convert to SWF workflow + workflow, err := converter.ConvertAdapterConfig(adapterConfig) + if err != nil { + return fmt.Errorf("failed to convert to workflow: %w", err) + } + + // Serialize to YAML via JSON (SDK uses custom JSON marshaling) + swfYAML, err := workflowToYAML(workflow) + if err != nil { + return fmt.Errorf("failed to serialize workflow: %w", err) + } + + // Add header comment + header := fmt.Sprintf(`# Serverless Workflow converted from AdapterConfig +# Original: %s +# Converted by: adapter-migrate v%s +# +# NOTE: This is an auto-generated workflow. Custom task types (hf:*) require +# the HyperFleet adapter runtime for execution. + +`, filepath.Base(inputFile), version) + + output := []byte(header + string(swfYAML)) + + if outputFile == "" { + fmt.Print(string(output)) + } else { + if err := writeOutput(outputFile, output, overwrite); err != nil { + return err + } + fmt.Fprintf(os.Stderr, "Converted %s to %s\n", inputFile, outputFile) + } + + return nil +} + +func runValidate(cmd *cobra.Command, args []string) error { + file, _ := cmd.Flags().GetString("file") + + // Read file + data, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + format := loader.DetectFormat(data) + + switch format { + case loader.FormatSWF: + // Validate as SWF + workflow, err := parser.FromYAMLSource(data) + if err != nil { + fmt.Printf("INVALID: Serverless Workflow validation failed:\n %s\n", err) + return fmt.Errorf("validation failed") + } + fmt.Printf("VALID: Serverless Workflow\n") + fmt.Printf(" Name: %s\n", workflow.Document.Name) + fmt.Printf(" Version: %s\n", workflow.Document.Version) + fmt.Printf(" Namespace: %s\n", workflow.Document.Namespace) + if workflow.Do != nil { + fmt.Printf(" Tasks: %d\n", len(*workflow.Do)) + } + + case loader.FormatAdapterConfig: + // Validate as AdapterConfig + absPath, _ := filepath.Abs(file) + baseDir := filepath.Dir(absPath) + + adapterConfig, err := config_loader.Parse(data, config_loader.WithBaseDir(baseDir)) + if err != nil { + fmt.Printf("INVALID: AdapterConfig validation failed:\n %s\n", err) + return fmt.Errorf("validation failed") + } + fmt.Printf("VALID: AdapterConfig\n") + fmt.Printf(" Name: %s\n", adapterConfig.Metadata.Name) + fmt.Printf(" Namespace: %s\n", adapterConfig.Metadata.Namespace) + fmt.Printf(" Version: %s\n", adapterConfig.Spec.Adapter.Version) + fmt.Printf(" Params: %d\n", len(adapterConfig.Spec.Params)) + fmt.Printf(" Preconditions: %d\n", len(adapterConfig.Spec.Preconditions)) + fmt.Printf(" Resources: %d\n", len(adapterConfig.Spec.Resources)) + + default: + fmt.Printf("UNKNOWN: Unable to detect format\n") + fmt.Printf(" File must be either AdapterConfig (kind: AdapterConfig) or\n") + fmt.Printf(" Serverless Workflow (document.dsl)\n") + return fmt.Errorf("unknown format") + } + + return nil +} + +func runDetect(cmd *cobra.Command, args []string) error { + file, _ := cmd.Flags().GetString("file") + + // Read file + data, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + format := loader.DetectFormat(data) + + switch format { + case loader.FormatSWF: + fmt.Printf("Format: Serverless Workflow (SWF)\n") + fmt.Printf("File: %s\n", file) + fmt.Printf("Status: Ready for use with HyperFleet adapter\n") + + case loader.FormatAdapterConfig: + fmt.Printf("Format: AdapterConfig (legacy)\n") + fmt.Printf("File: %s\n", file) + fmt.Printf("Status: Can be converted to SWF format\n") + fmt.Printf("\nTo convert, run:\n") + fmt.Printf(" adapter-migrate convert -i %s -o %s\n", file, suggestOutputName(file)) + + default: + fmt.Printf("Format: Unknown\n") + fmt.Printf("File: %s\n", file) + fmt.Printf("Status: Not a recognized configuration format\n") + return fmt.Errorf("unknown format") + } + + return nil +} + +func writeOutput(path string, data []byte, overwrite bool) error { + if !overwrite { + if _, err := os.Stat(path); err == nil { + return fmt.Errorf("output file %s already exists (use -f to overwrite)", path) + } + } + + if err := os.WriteFile(path, data, 0644); err != nil { + return fmt.Errorf("failed to write output file: %w", err) + } + + return nil +} + +func suggestOutputName(inputPath string) string { + ext := filepath.Ext(inputPath) + base := strings.TrimSuffix(inputPath, ext) + + // If it has "adapter" in the name, suggest "workflow" + if strings.Contains(strings.ToLower(base), "adapter") { + base = strings.Replace(base, "adapter", "workflow", 1) + base = strings.Replace(base, "Adapter", "Workflow", 1) + } else { + base = base + "-workflow" + } + + return base + ".yaml" +} + +// workflowToYAML serializes a workflow to YAML using the SDK's custom JSON marshaling. +// The SWF SDK uses custom MarshalJSON for TaskItem/TaskList types, so we serialize +// to JSON first, then convert to YAML to preserve the correct structure. +func workflowToYAML(workflow *model.Workflow) ([]byte, error) { + // Serialize to JSON (uses SDK's custom MarshalJSON) + jsonBytes, err := json.Marshal(workflow) + if err != nil { + return nil, fmt.Errorf("failed to marshal workflow to JSON: %w", err) + } + + // Convert JSON to YAML + yamlBytes, err := yaml.JSONToYAML(jsonBytes) + if err != nil { + return nil, fmt.Errorf("failed to convert JSON to YAML: %w", err) + } + + return yamlBytes, nil +} diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index d4f8d04..1c14173 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -10,9 +10,10 @@ import ( "time" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/runner" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/health" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/otel" @@ -161,33 +162,57 @@ func runServe() error { log.Infof(ctx, "Starting Hyperfleet Adapter version=%s commit=%s built=%s tag=%s", version, commit, buildDate, tag) - // Load adapter configuration - // If configPath flag is empty, config_loader.Load will read from ADAPTER_CONFIG_PATH env var - log.Info(ctx, "Loading adapter configuration...") - adapterConfig, err := config_loader.Load(configPath, config_loader.WithAdapterVersion(version)) + // Load workflow configuration (supports both AdapterConfig and native SWF formats) + // If configPath flag is empty, loader will read from ADAPTER_CONFIG_PATH env var + log.Info(ctx, "Loading workflow configuration...") + loadResult, err := loader.Load(configPath, loader.WithAdapterVersion(version)) if err != nil { errCtx := logger.WithErrorField(ctx, err) - log.Errorf(errCtx, "Failed to load adapter configuration") - return fmt.Errorf("failed to load adapter configuration: %w", err) + log.Errorf(errCtx, "Failed to load workflow configuration") + return fmt.Errorf("failed to load workflow configuration: %w", err) } - // Recreate logger with component name from adapter config - log, err = logger.NewLogger(buildLoggerConfig(adapterConfig.Metadata.Name)) + // Extract metadata based on format + var componentName, componentNamespace string + var apiConfig config_loader.HyperfleetAPIConfig + + if loadResult.Format == loader.FormatAdapterConfig && loadResult.AdapterConfig != nil { + // Legacy format - extract from AdapterConfig + componentName = loadResult.AdapterConfig.Metadata.Name + componentNamespace = loadResult.AdapterConfig.Metadata.Namespace + apiConfig = loadResult.AdapterConfig.Spec.HyperfleetAPI + } else { + // Native SWF format - extract from workflow document + componentName = loadResult.Workflow.Document.Name + if ns, ok := loadResult.Workflow.Document.Metadata["namespace"].(string); ok { + componentNamespace = ns + } else { + componentNamespace = loadResult.Workflow.Document.Namespace + } + // For native SWF, use default/environment-based API config + apiConfig = config_loader.HyperfleetAPIConfig{ + Timeout: "30s", + RetryAttempts: 3, + RetryBackoff: "exponential", + } + } + + // Recreate logger with component name from config + log, err = logger.NewLogger(buildLoggerConfig(componentName)) if err != nil { - return fmt.Errorf("failed to create logger with adapter config: %w", err) + return fmt.Errorf("failed to create logger with config: %w", err) } - log.Infof(ctx, "Adapter configuration loaded successfully: name=%s namespace=%s", - adapterConfig.Metadata.Name, adapterConfig.Metadata.Namespace) + log.Infof(ctx, "Workflow configuration loaded successfully: name=%s namespace=%s format=%s", + componentName, componentNamespace, loadResult.Format) log.Infof(ctx, "HyperFleet API client configured: timeout=%s retryAttempts=%d", - adapterConfig.Spec.HyperfleetAPI.Timeout, - adapterConfig.Spec.HyperfleetAPI.RetryAttempts) + apiConfig.Timeout, apiConfig.RetryAttempts) // Get trace sample ratio from environment (default: 10%) sampleRatio := otel.GetTraceSampleRatio(log, ctx) // Initialize OpenTelemetry for trace_id/span_id generation and HTTP propagation - tp, err := otel.InitTracer(adapterConfig.Metadata.Name, version, sampleRatio) + tp, err := otel.InitTracer(componentName, version, sampleRatio) if err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to initialize OpenTelemetry") @@ -203,7 +228,7 @@ func runServe() error { }() // Start health server immediately (readiness starts as false) - healthServer := health.NewServer(log, HealthServerPort, adapterConfig.Metadata.Name) + healthServer := health.NewServer(log, HealthServerPort, componentName) if err := healthServer.Start(ctx); err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to start health server") @@ -222,7 +247,7 @@ func runServe() error { // Start metrics server with build info metricsServer := health.NewMetricsServer(log, MetricsServerPort, health.MetricsConfig{ - Component: adapterConfig.Metadata.Name, + Component: componentName, Version: version, Commit: commit, }) @@ -242,7 +267,7 @@ func runServe() error { // Create HyperFleet API client from config log.Info(ctx, "Creating HyperFleet API client...") - apiClient, err := createAPIClient(adapterConfig.Spec.HyperfleetAPI, log) + apiClient, err := createAPIClient(apiConfig, log) if err != nil { errCtx := logger.WithErrorField(ctx, err) log.Errorf(errCtx, "Failed to create HyperFleet API client") @@ -259,27 +284,35 @@ func runServe() error { return fmt.Errorf("failed to create Kubernetes client: %w", err) } - // Create the executor using the builder pattern - log.Info(ctx, "Creating event executor...") - exec, err := executor.NewBuilder(). - WithAdapterConfig(adapterConfig). + // Create the SWF-based workflow runner + // This replaces the legacy executor with Serverless Workflow SDK execution + log.Info(ctx, "Creating SWF workflow runner...") + runnerBuilder := runner.NewBuilder(). WithAPIClient(apiClient). WithK8sClient(k8sClient). - WithLogger(log). - Build() + WithLogger(log) + + // Use workflow directly for native SWF, or AdapterConfig for legacy format + if loadResult.Format == loader.FormatAdapterConfig && loadResult.AdapterConfig != nil { + runnerBuilder = runnerBuilder.WithAdapterConfig(loadResult.AdapterConfig) + } else { + runnerBuilder = runnerBuilder.WithWorkflow(loadResult.Workflow) + } + + workflowRunner, err := runnerBuilder.Build() if err != nil { errCtx := logger.WithErrorField(ctx, err) - log.Errorf(errCtx, "Failed to create executor") - return fmt.Errorf("failed to create executor: %w", err) + log.Errorf(errCtx, "Failed to create workflow runner") + return fmt.Errorf("failed to create workflow runner: %w", err) } - // Create the event handler from the executor - // This handler will: - // 1. Extract params from event data - // 2. Execute preconditions (API calls, condition checks) - // 3. Create/update Kubernetes resources - // 4. Execute post actions (status reporting) - handler := exec.CreateHandler() + // Create the event handler from the workflow runner + // The SWF runner executes the 4-phase pipeline: + // 1. Extract params from event data (hf:extract) + // 2. Execute preconditions (hf:preconditions) + // 3. Create/update Kubernetes resources (hf:resources) + // 4. Execute post actions (hf:post) + handler := workflowRunner.CreateHandler() // Handle signals for graceful shutdown sigCh := make(chan os.Signal, 1) diff --git a/configs/workflow-config-template.yaml b/configs/workflow-config-template.yaml new file mode 100644 index 0000000..ee2101e --- /dev/null +++ b/configs/workflow-config-template.yaml @@ -0,0 +1,166 @@ +# Serverless Workflow converted from AdapterConfig +# Original: adapter-config-template.yaml +# Converted by: adapter-migrate v0.2.0 +# +# NOTE: This workflow uses native SWF constructs for preconditions and post phases. +# Only hf:resources requires the HyperFleet adapter runtime for Kubernetes operations. + +do: +- extract_params: + set: + clusterId: ${ .event.id } + hyperfleetApiBaseUrl: ${ .env.HYPERFLEET_API_BASE_URL } + hyperfleetApiVersion: ${ .env.HYPERFLEET_API_VERSION // "v1" } +- phase_preconditions: + do: + # Precondition: clusterStatus - fetch cluster and check phase + - cluster_status: + try: + - api: + call: http + with: + method: GET + endpoint: ${ .params.hyperfleetApiBaseUrl + "/api/hyperfleet/" + .params.hyperfleetApiVersion + "/clusters/" + .params.clusterId } + export: + as: |- + ${ . + { + clusterStatus: .content, + clusterName: .content.name, + clusterPhase: .content.status.phase, + generationId: .content.generation, + cluster_status_ok: (.content.status.phase == "NotReady") + } } + catch: + retry: + limit: + attempt: + count: 3 + backoff: + exponential: {} + do: + - fail: + set: + cluster_status_ok: false + + # Final evaluation: compute allMatched and notMetReason + - evaluate: + set: + allMatched: ${ .cluster_status_ok // false } + notMetReason: |- + ${ if (.cluster_status_ok // false) == false then "clusterStatus failed" else "" end } + +- phase_resources: + if: ${ .allMatched == true } + call: hf:resources + with: + config: [] # Resources would be defined here from the original AdapterConfig +- phase_post: + do: + # Build the cluster status payload using jq expression + - build_cluster_status_payload: + set: + clusterStatusPayload: |- + ${ { + adapter: (.params.metadata.name // "unknown"), + conditions: [ + { + type: "Applied", + status: (if (.resources.clusterNamespace.status.phase // "") == "Active" then "True" else "False" end), + reason: (if (.resources.clusterNamespace.status.phase // "") == "Active" then "NamespaceCreated" else "NamespacePending" end), + message: (if (.resources.clusterNamespace.status.phase // "") == "Active" then "Namespace created successfully" else "Namespace creation in progress" end) + }, + { + type: "Available", + status: (if (.resources.clusterNamespace.status.phase // "") == "Active" then "True" else "False" end), + reason: (if (.resources.clusterNamespace.status.phase // "") == "Active" then "NamespaceReady" else "NamespaceNotReady" end), + message: (if (.resources.clusterNamespace.status.phase // "") == "Active" then "Namespace is active and ready" else "Namespace is not active and ready" end) + }, + { + type: "Health", + status: (if (.adapter.executionStatus // "") == "success" then "True" elif (.adapter.executionStatus // "") == "failed" then "False" else "Unknown" end), + reason: (if (.adapter.errorReason // "") != "" then .adapter.errorReason else "Healthy" end), + message: (if (.adapter.errorMessage // "") != "" then .adapter.errorMessage else "All adapter operations completed successfully" end) + } + ], + data: { + namespace: { + name: (.resources.clusterNamespace.metadata.name // ""), + status: (.resources.clusterNamespace.status.phase // "") + } + }, + observed_generation: .generationId, + observed_time: (now | strftime("%Y-%m-%dT%H:%M:%SZ")) + } } + ## this beeceptor service is useful for debugging HTTP requests + ## you can browse them at: https://app.beeceptor.com/console/hyperfleet + - test_script_log: + try: + - api: + call: http + with: + method: POST + endpoint: https://hyperfleet.free.beeceptor.com + headers: + Content-Type: application/json + body: ${ .clusterStatusPayload } + export: + as: |- + ${ . + { + report_cluster_status_response: .content, + report_cluster_status_status: .response.statusCode + } } + catch: + retry: + limit: + attempt: + count: 3 + backoff: + exponential: {} + do: + - error: + set: + report_cluster_status_failed: true + report_cluster_status_error: ${ .error.message // "API call failed" } + + # Report cluster status using native HTTP call with retry + - report_cluster_status: + try: + - api: + call: http + with: + method: POST + endpoint: ${ .params.hyperfleetApiBaseUrl + "/api/hyperfleet/" + .params.hyperfleetApiVersion + "/clusters/" + .params.clusterId + "/statuses" } + headers: + Content-Type: application/json + body: ${ .clusterStatusPayload } + export: + as: |- + ${ . + { + report_cluster_status_response: .content, + report_cluster_status_status: .response.statusCode + } } + catch: + retry: + limit: + attempt: + count: 3 + backoff: + exponential: {} + do: + - error: + set: + report_cluster_status_failed: true + report_cluster_status_error: ${ .error.message // "API call failed" } +document: + dsl: 1.0.0 + metadata: + namespace: hyperfleet-system + originalAPIVersion: hyperfleet.redhat.com/v1alpha1 + originalKind: AdapterConfig + name: example-adapter + namespace: hyperfleet + tags: + hyperfleet.io/adapter-type: example + hyperfleet.io/component: adapter + title: 'Adapter: example-adapter' + version: 0.1.0 diff --git a/go.mod b/go.mod index f4fd06c..edde41c 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/mitchellh/copystructure v1.2.0 github.com/openshift-hyperfleet/hyperfleet-broker v1.0.1 github.com/prometheus/client_golang v1.23.2 + github.com/serverlessworkflow/sdk-go/v3 v3.2.0 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 @@ -22,6 +23,7 @@ require ( k8s.io/apimachinery v0.34.1 k8s.io/client-go v0.34.1 sigs.k8s.io/controller-runtime v0.22.4 + sigs.k8s.io/yaml v1.6.0 ) require ( @@ -78,6 +80,8 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/itchyny/gojq v0.12.17 // indirect + github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect @@ -105,7 +109,7 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect @@ -122,9 +126,15 @@ require ( github.com/spf13/viper v1.21.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect @@ -155,5 +165,4 @@ require ( sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 897782b..2739823 100644 --- a/go.sum +++ b/go.sum @@ -175,6 +175,10 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg= +github.com/itchyny/gojq v0.12.17/go.mod h1:WBrEMkgAfAGO1LUcGOckBl5O726KPp+OlkKug0I/FEY= +github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= +github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -248,8 +252,9 @@ github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0 github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= @@ -268,6 +273,8 @@ github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/serverlessworkflow/sdk-go/v3 v3.2.0 h1:UapUYBkOxAQ6hPnyvZjMsZngMzGTuWfoVm4JXJTtAQU= +github.com/serverlessworkflow/sdk-go/v3 v3.2.0/go.mod h1:N/TVPogY5OsZ+NG7NeD9oZ30VO6oHahxAsoeBPnh/Nw= github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -305,12 +312,26 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU= github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= diff --git a/internal/swf/converter/adapter_to_swf.go b/internal/swf/converter/adapter_to_swf.go new file mode 100644 index 0000000..6e270d2 --- /dev/null +++ b/internal/swf/converter/adapter_to_swf.go @@ -0,0 +1,914 @@ +package converter + +import ( + "fmt" + "regexp" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/tasks" + "github.com/serverlessworkflow/sdk-go/v3/model" +) + +// ConvertAdapterConfig converts a legacy AdapterConfig to a Serverless Workflow model. +// This enables running existing adapter configurations through the SWF engine. +func ConvertAdapterConfig(config *config_loader.AdapterConfig) (*model.Workflow, error) { + if config == nil { + return nil, fmt.Errorf("config cannot be nil") + } + + // Build workflow document + workflow := &model.Workflow{ + Document: model.Document{ + DSL: "1.0.0", + Namespace: "hyperfleet", + Name: sanitizeName(config.Metadata.Name), + Version: config.Spec.Adapter.Version, + Title: fmt.Sprintf("Adapter: %s", config.Metadata.Name), + Tags: config.Metadata.Labels, + Metadata: map[string]interface{}{ + "originalKind": config.Kind, + "originalAPIVersion": config.APIVersion, + "namespace": config.Metadata.Namespace, + }, + }, + } + + // Build task list for the 4-phase pipeline + taskList := make(model.TaskList, 0) + + // Phase 1: Parameter Extraction + if len(config.Spec.Params) > 0 { + paramsTask := convertParamsPhase(config.Spec.Params) + taskList = append(taskList, paramsTask) + } + + // Phase 2: Preconditions + var hasPreconditions bool + if len(config.Spec.Preconditions) > 0 { + hasPreconditions = true + preconditionsTask := convertPreconditionsPhase(config.Spec.Preconditions) + taskList = append(taskList, preconditionsTask) + } + + // Phase 3: Resources (with conditional on preconditions) + if len(config.Spec.Resources) > 0 { + resourcesTask := convertResourcesPhase(config.Spec.Resources, hasPreconditions) + taskList = append(taskList, resourcesTask) + } + + // Phase 4: Post-processing + if config.Spec.Post != nil { + postTask := convertPostPhase(config.Spec.Post) + taskList = append(taskList, postTask) + } + + workflow.Do = &taskList + + return workflow, nil +} + +// sanitizeName converts a name to a valid SWF name (hostname_rfc1123 compatible). +func sanitizeName(name string) string { + if name == "" { + return "unnamed-workflow" + } + // Replace underscores and other invalid characters with dashes + result := make([]byte, 0, len(name)) + for i := 0; i < len(name); i++ { + c := name[i] + if (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' { + result = append(result, c) + } else if c >= 'A' && c <= 'Z' { + result = append(result, c+32) // lowercase + } else { + result = append(result, '-') + } + } + return string(result) +} + +// convertParamsPhase converts the params configuration to native SWF set tasks +// for event and env sources, and hf:k8s-read for secrets/configmaps. +func convertParamsPhase(params []config_loader.Parameter) *model.TaskItem { + // Separate params by source type + setValues := make(map[string]interface{}) + var k8sSecrets []map[string]interface{} + var k8sConfigMaps []map[string]interface{} + + for _, p := range params { + switch { + case strings.HasPrefix(p.Source, "event."): + // Native SWF: use jq expression to extract from event data + path := strings.TrimPrefix(p.Source, "event.") + expr := buildJQExpression(".event."+path, p.Default) + setValues[p.Name] = expr + + case strings.HasPrefix(p.Source, "env."): + // Native SWF: use jq expression to extract from injected env + envVar := strings.TrimPrefix(p.Source, "env.") + expr := buildJQExpression(".env."+envVar, p.Default) + setValues[p.Name] = expr + + case strings.HasPrefix(p.Source, "secret."): + // K8s secret: needs hf:k8s-read task + ref := strings.TrimPrefix(p.Source, "secret.") + secretDef := map[string]interface{}{ + "name": p.Name, + "ref": ref, + } + if p.Required { + secretDef["required"] = true + } + if p.Default != nil { + secretDef["default"] = p.Default + } + k8sSecrets = append(k8sSecrets, secretDef) + + case strings.HasPrefix(p.Source, "configmap."): + // K8s configmap: needs hf:k8s-read task + ref := strings.TrimPrefix(p.Source, "configmap.") + cmDef := map[string]interface{}{ + "name": p.Name, + "ref": ref, + } + if p.Required { + cmDef["required"] = true + } + if p.Default != nil { + cmDef["default"] = p.Default + } + k8sConfigMaps = append(k8sConfigMaps, cmDef) + + default: + // Unknown source type, try as event path + expr := buildJQExpression(".event."+p.Source, p.Default) + setValues[p.Name] = expr + } + } + + // If we have only set values (no K8s reads), return a native set task + if len(k8sSecrets) == 0 && len(k8sConfigMaps) == 0 { + return &model.TaskItem{ + Key: "extract_params", + Task: &model.SetTask{ + Set: setValues, + }, + } + } + + // If we have K8s reads but no set values, return just hf:k8s-read + if len(setValues) == 0 { + with := make(map[string]interface{}) + if len(k8sSecrets) > 0 { + with["secrets"] = k8sSecrets + } + if len(k8sConfigMaps) > 0 { + with["configmaps"] = k8sConfigMaps + } + return &model.TaskItem{ + Key: "load_k8s_params", + Task: &model.CallFunction{ + Call: tasks.TaskK8sRead, + With: with, + }, + } + } + + // Mixed case: we have both set values and K8s reads + // For now, return the legacy hf:extract to handle all cases + // TODO: Return a DoTask with both set and hf:k8s-read tasks in sequence + sources := make([]map[string]interface{}, 0, len(params)) + for _, p := range params { + sourceConfig := map[string]interface{}{ + "name": p.Name, + "source": p.Source, + } + if p.Type != "" { + sourceConfig["type"] = p.Type + } + if p.Required { + sourceConfig["required"] = true + } + if p.Default != nil { + sourceConfig["default"] = p.Default + } + sources = append(sources, sourceConfig) + } + + return &model.TaskItem{ + Key: "phase_params", + Task: &model.CallFunction{ + Call: tasks.TaskExtract, + With: map[string]interface{}{ + "sources": sources, + }, + }, + } +} + +// buildJQExpression creates a jq expression with optional default value. +// Example: buildJQExpression(".env.API_URL", "http://localhost") -> "${ .env.API_URL // \"http://localhost\" }" +func buildJQExpression(path string, defaultVal interface{}) string { + if defaultVal == nil { + return fmt.Sprintf("${ %s }", path) + } + + // Format default value based on type + switch v := defaultVal.(type) { + case string: + // String default: use quotes + return fmt.Sprintf("${ %s // \"%s\" }", path, v) + case bool: + return fmt.Sprintf("${ %s // %t }", path, v) + case int, int32, int64, float32, float64: + return fmt.Sprintf("${ %s // %v }", path, v) + default: + // For complex types, just use the path without default + return fmt.Sprintf("${ %s }", path) + } +} + +// convertPreconditionsPhase converts preconditions to native SWF tasks. +// Uses call:http with export for API calls and if conditions for short-circuiting. +func convertPreconditionsPhase(preconditions []config_loader.Precondition) *model.TaskItem { + if len(preconditions) == 0 { + return nil + } + + // Build a list of tasks for the preconditions phase + taskList := make(model.TaskList, 0, len(preconditions)+1) + + // Collect precondition names for the final evaluation + var precondNames []string + + for i, p := range preconditions { + precondNames = append(precondNames, p.Name) + + // Convert each precondition to native SWF task(s) + precondTask := convertSinglePrecondition(&p, i > 0, precondNames[:i]) + if precondTask != nil { + taskList = append(taskList, precondTask) + } + } + + // Add final evaluation task to compute allMatched and notMetReason + evaluateTask := buildEvaluateTask(precondNames) + taskList = append(taskList, evaluateTask) + + // Wrap all tasks in a DoTask + return &model.TaskItem{ + Key: "phase_preconditions", + Task: &model.DoTask{ + Do: &taskList, + }, + } +} + +// convertSinglePrecondition converts a single precondition to native SWF task(s). +// If hasIfCondition is true, the task is conditionally executed based on previous preconditions. +func convertSinglePrecondition(p *config_loader.Precondition, hasIfCondition bool, previousPrecondNames []string) *model.TaskItem { + taskName := toSnakeCase(p.Name) + + // Build if condition based on previous preconditions + var ifExpr string + if hasIfCondition && len(previousPrecondNames) > 0 { + // Only execute if all previous preconditions passed + ifExpr = "${ " + BuildAllMatchedExpr(previousPrecondNames) + " }" + } + + // If there's an API call, build HTTP task with optional retry + if p.APICall != nil { + return buildPreconditionWithAPICall(p, taskName, ifExpr) + } + + // If there's only an expression (CEL), use a set task + if p.Expression != "" { + return buildPreconditionWithExpression(p, taskName, ifExpr) + } + + // Fallback: just set _ok to true + okField := taskName + "_ok" + setTask := BuildSetTask(map[string]interface{}{ + okField: true, + }) + if ifExpr != "" { + setTask.If = model.NewExpr(ifExpr) + } + + return &model.TaskItem{ + Key: taskName, + Task: setTask, + } +} + +// buildPreconditionWithAPICall builds a precondition task that makes an API call. +func buildPreconditionWithAPICall(p *config_loader.Precondition, taskName, ifExpr string) *model.TaskItem { + // Convert Go template URL to jq expression + url := ConvertGoTemplateToJQ(p.APICall.URL) + + // Convert headers + headers := make(map[string]string) + for _, h := range p.APICall.Headers { + headers[h.Name] = ConvertGoTemplateToJQ(h.Value) + } + + // Build export expression + exportExpr := BuildPreconditionExportExpr(p.Name, p.Capture, p.Conditions) + + // Build the HTTP call task + httpTask := BuildHTTPCallTaskWithExport(p.APICall.Method, url, headers, exportExpr) + + // If there's retry configuration, wrap in try/catch + if p.APICall.RetryAttempts > 0 { + // Build the inner HTTP task + innerTaskList := model.TaskList{ + &model.TaskItem{Key: "api", Task: httpTask}, + } + + // Build catch block that sets _ok to false + okField := taskName + "_ok" + catchTaskList := model.TaskList{ + BuildSetTaskItem("fail", map[string]interface{}{okField: false}), + } + + tryTask := BuildTryWithRetry(innerTaskList, p.APICall.RetryAttempts, catchTaskList) + + // Add if condition + if ifExpr != "" { + tryTask.If = model.NewExpr(ifExpr) + } + + return &model.TaskItem{ + Key: taskName, + Task: tryTask, + } + } + + // No retry - just use the HTTP task directly + if ifExpr != "" { + httpTask.If = model.NewExpr(ifExpr) + } + + return &model.TaskItem{ + Key: taskName, + Task: httpTask, + } +} + +// buildPreconditionWithExpression builds a precondition task that evaluates a CEL expression. +// Since we're using jq, we'll use hf:cel for complex expressions or convert simple ones. +func buildPreconditionWithExpression(p *config_loader.Precondition, taskName, ifExpr string) *model.TaskItem { + okField := taskName + "_ok" + + // For simple expressions, try to convert to jq + // For complex CEL expressions, fall back to hf:cel task + if isSimpleCELExpression(p.Expression) { + jqExpr := celToJQ(p.Expression) + setValues := map[string]interface{}{ + okField: "${ " + jqExpr + " }", + } + setTask := BuildSetTask(setValues) + if ifExpr != "" { + setTask.If = model.NewExpr(ifExpr) + } + return &model.TaskItem{ + Key: taskName, + Task: setTask, + } + } + + // Fall back to hf:cel for complex expressions + celTask := &model.CallFunction{ + Call: tasks.TaskCEL, + With: map[string]interface{}{ + "expression": p.Expression, + "resultKey": okField, + }, + } + if ifExpr != "" { + celTask.If = model.NewExpr(ifExpr) + } + + return &model.TaskItem{ + Key: taskName, + Task: celTask, + } +} + +// buildEvaluateTask builds the final evaluation task that computes allMatched and notMetReason. +func buildEvaluateTask(precondNames []string) *model.TaskItem { + allMatchedExpr := "${ " + BuildAllMatchedExpr(precondNames) + " }" + notMetReasonExpr := "${ " + BuildNotMetReasonExpr(precondNames) + " }" + + return BuildSetTaskItem("evaluate", map[string]interface{}{ + "allMatched": allMatchedExpr, + "notMetReason": notMetReasonExpr, + }) +} + +// isSimpleCELExpression checks if a CEL expression can be easily converted to jq. +func isSimpleCELExpression(expr string) bool { + // Simple expressions are field comparisons like "field == value" or "field != \"\"" + // Complex expressions contain function calls, ternary operators, etc. + complexPatterns := []string{"?.", ".orValue(", ".filter(", ".map(", ".exists(", ".all("} + for _, pattern := range complexPatterns { + if strings.Contains(expr, pattern) { + return false + } + } + return true +} + +// convertPreconditionsPhaseOld is the legacy conversion that uses hf:preconditions. +// Deprecated: Use convertPreconditionsPhase which generates native SWF tasks. +func convertPreconditionsPhaseOld(preconditions []config_loader.Precondition) *model.TaskItem { + configs := make([]map[string]interface{}, 0, len(preconditions)) + + for _, p := range preconditions { + precondConfig := map[string]interface{}{ + "name": p.Name, + } + + // Add API call if present + if p.APICall != nil { + apiCallConfig := map[string]interface{}{ + "method": p.APICall.Method, + "url": p.APICall.URL, + } + if p.APICall.Timeout != "" { + apiCallConfig["timeout"] = p.APICall.Timeout + } + if p.APICall.RetryAttempts > 0 { + apiCallConfig["retryAttempts"] = p.APICall.RetryAttempts + } + if p.APICall.RetryBackoff != "" { + apiCallConfig["retryBackoff"] = p.APICall.RetryBackoff + } + if len(p.APICall.Headers) > 0 { + headers := make(map[string]string) + for _, h := range p.APICall.Headers { + headers[h.Name] = h.Value + } + apiCallConfig["headers"] = headers + } + if p.APICall.Body != "" { + apiCallConfig["body"] = p.APICall.Body + } + precondConfig["apiCall"] = apiCallConfig + } + + // Add captures if present + if len(p.Capture) > 0 { + captures := make([]map[string]interface{}, 0, len(p.Capture)) + for _, c := range p.Capture { + captureConfig := map[string]interface{}{ + "name": c.Name, + } + if c.Field != "" { + captureConfig["field"] = c.Field + } + if c.Expression != "" { + captureConfig["expression"] = c.Expression + } + captures = append(captures, captureConfig) + } + precondConfig["capture"] = captures + } + + // Add conditions if present + if len(p.Conditions) > 0 { + conditions := make([]map[string]interface{}, 0, len(p.Conditions)) + for _, c := range p.Conditions { + conditions = append(conditions, map[string]interface{}{ + "field": c.Field, + "operator": c.Operator, + "value": c.Value, + }) + } + precondConfig["conditions"] = conditions + } + + // Add expression if present + if p.Expression != "" { + precondConfig["expression"] = p.Expression + } + + configs = append(configs, precondConfig) + } + + return &model.TaskItem{ + Key: "phase_preconditions", + Task: &model.CallFunction{ + Call: tasks.TaskPreconditions, + With: map[string]interface{}{ + "config": configs, + }, + }, + } +} + +// convertResourcesPhase converts resources to an hf:resources task. +// If hasPreconditions is true, adds an `if` condition to skip when preconditions fail. +func convertResourcesPhase(resources []config_loader.Resource, hasPreconditions bool) *model.TaskItem { + resourceConfigs := make([]map[string]interface{}, 0, len(resources)) + + for _, r := range resources { + resourceConfig := map[string]interface{}{ + "name": r.Name, + "manifest": r.Manifest, + } + + if r.RecreateOnChange { + resourceConfig["recreateOnChange"] = true + } + + if r.Discovery != nil { + discoveryConfig := make(map[string]interface{}) + if r.Discovery.Namespace != "" { + discoveryConfig["namespace"] = r.Discovery.Namespace + } + if r.Discovery.ByName != "" { + discoveryConfig["byName"] = r.Discovery.ByName + } + if r.Discovery.BySelectors != nil && r.Discovery.BySelectors.LabelSelector != nil { + bySelectors := map[string]interface{}{ + "labelSelector": r.Discovery.BySelectors.LabelSelector, + } + discoveryConfig["bySelectors"] = bySelectors + } + resourceConfig["discovery"] = discoveryConfig + } + + resourceConfigs = append(resourceConfigs, resourceConfig) + } + + resourcesCallTask := &model.CallFunction{ + Call: tasks.TaskResources, + With: map[string]interface{}{ + "config": resourceConfigs, + }, + } + + // If there are preconditions, add an `if` condition to skip when allMatched is false + if hasPreconditions { + resourcesCallTask.If = model.NewExpr("${ .allMatched == true }") + } + + return &model.TaskItem{ + Key: "phase_resources", + Task: resourcesCallTask, + } +} + +// convertPostPhase converts post-processing configuration to native SWF tasks. +// Uses a hybrid approach: +// - Payload building: set tasks with jq for simple fields, hf:cel for complex CEL expressions +// - API calls: native call:http with try/catch for retry +func convertPostPhase(post *config_loader.PostConfig) *model.TaskItem { + taskList := make(model.TaskList, 0) + + // Step 1: Convert payloads to set tasks + for _, p := range post.Payloads { + payloadTask := convertPayloadToSetTask(&p) + if payloadTask != nil { + taskList = append(taskList, payloadTask) + } + } + + // Step 2: Convert post actions to native tasks + for _, a := range post.PostActions { + actionTasks := convertPostActionToTasks(&a) + taskList = append(taskList, actionTasks...) + } + + // If no tasks, return nil + if len(taskList) == 0 { + return nil + } + + // Wrap all tasks in a DoTask + return &model.TaskItem{ + Key: "phase_post", + Task: &model.DoTask{ + Do: &taskList, + }, + } +} + +// convertPayloadToSetTask converts a payload definition to a set task. +// For complex CEL expressions that can't be converted, falls back to hf:cel. +func convertPayloadToSetTask(p *config_loader.Payload) *model.TaskItem { + taskName := "build_" + toSnakeCase(p.Name) + + // Get the build definition + var buildDef interface{} + if p.Build != nil { + buildDef = p.Build + } else if p.BuildRefContent != nil { + buildDef = p.BuildRefContent + } else { + return nil + } + + // Try to convert the build definition to jq expressions + jqExpr, canConvert := convertBuildDefToJQ(buildDef) + + if canConvert { + // Use a set task with jq expression + return BuildSetTaskItem(taskName, map[string]interface{}{ + p.Name: "${ " + jqExpr + " }", + }) + } + + // Fall back to hf:cel for complex expressions + return &model.TaskItem{ + Key: taskName, + Task: &model.CallFunction{ + Call: tasks.TaskCEL, + With: map[string]interface{}{ + "build": buildDef, + "resultKey": p.Name, + }, + }, + } +} + +// convertBuildDefToJQ attempts to convert a build definition to a jq expression. +// Returns the jq expression and whether conversion was successful. +func convertBuildDefToJQ(buildDef interface{}) (string, bool) { + switch v := buildDef.(type) { + case map[string]interface{}: + return convertMapToJQ(v) + case string: + // Simple string - might be a Go template + return convertGoTemplateToJQExpr(v), true + default: + return "", false + } +} + +// convertMapToJQ converts a map build definition to a jq object expression. +func convertMapToJQ(m map[string]interface{}) (string, bool) { + parts := make([]string, 0, len(m)) + + for key, value := range m { + valueExpr, ok := convertValueToJQ(value) + if !ok { + return "", false + } + parts = append(parts, fmt.Sprintf("%s: %s", key, valueExpr)) + } + + return "{ " + strings.Join(parts, ", ") + " }", true +} + +// convertValueToJQ converts a value to a jq expression. +func convertValueToJQ(value interface{}) (string, bool) { + switch v := value.(type) { + case string: + // Check if it's a Go template + if strings.Contains(v, "{{") { + return convertGoTemplateToJQExpr(v), true + } + return formatJQValue(v), true + + case map[string]interface{}: + // Check if this is a field/expression definition + if _, hasField := v["field"]; hasField { + expr, ok := BuildPayloadFieldExpr(v) + return expr, ok + } + if _, hasExpr := v["expression"]; hasExpr { + expr, ok := BuildPayloadFieldExpr(v) + return expr, ok + } + // Nested object + return convertMapToJQ(v) + + case []interface{}: + items := make([]string, 0, len(v)) + for _, item := range v { + itemExpr, ok := convertValueToJQ(item) + if !ok { + return "", false + } + items = append(items, itemExpr) + } + return "[ " + strings.Join(items, ", ") + " ]", true + + case bool, int, int64, float64: + return fmt.Sprintf("%v", v), true + + case nil: + return "null", true + + default: + return "", false + } +} + +// convertGoTemplateToJQExpr converts a Go template string to a jq string expression. +func convertGoTemplateToJQExpr(template string) string { + // Check if it's purely a template reference like {{ .field }} + re := regexp.MustCompile(`^\{\{\s*\.([^}]+)\s*\}\}$`) + if matches := re.FindStringSubmatch(template); len(matches) == 2 { + fieldPath := strings.TrimSpace(matches[1]) + return ".params." + fieldPath + } + + // For mixed strings, we need string interpolation + // jq uses \(.expr) for interpolation inside strings + converted := ConvertGoTemplateToJQ(template) + if strings.Contains(converted, "${") { + // Convert ${ .field } to jq string interpolation + reJQ := regexp.MustCompile(`\$\{\s*([^}]+)\s*\}`) + result := reJQ.ReplaceAllString(converted, `\($1)`) + return `"` + result + `"` + } + + return formatJQValue(template) +} + +// convertPostActionToTasks converts a post action to native SWF tasks. +func convertPostActionToTasks(a *config_loader.PostAction) []*model.TaskItem { + tasks := make([]*model.TaskItem, 0) + + // Handle API call + if a.APICall != nil { + apiTask := convertPostAPICallToTask(a.Name, a.APICall) + if apiTask != nil { + tasks = append(tasks, apiTask) + } + } + + // Log actions are handled at runtime (no task needed in workflow) + // They could be converted to set tasks that store log messages if needed + + return tasks +} + +// convertPostAPICallToTask converts an API call to a native HTTP task with optional retry. +func convertPostAPICallToTask(name string, apiCall *config_loader.APICall) *model.TaskItem { + taskName := toSnakeCase(name) + + // Convert URL - handle Go templates + url := ConvertGoTemplateToJQ(apiCall.URL) + + // Convert headers + headers := make(map[string]string) + for _, h := range apiCall.Headers { + headers[h.Name] = ConvertGoTemplateToJQ(h.Value) + } + + // Build the HTTP task + httpTask := &model.CallHTTP{ + Call: "http", + With: model.HTTPArguments{ + Method: apiCall.Method, + Headers: headers, + Output: "content", + }, + } + + // Set endpoint + if isRuntimeExpression(url) { + httpTask.With.Endpoint = &model.Endpoint{ + RuntimeExpression: model.NewExpr(url), + } + } else { + httpTask.With.Endpoint = model.NewEndpoint(url) + } + + // Handle body - convert Go templates + if apiCall.Body != "" { + bodyExpr := ConvertGoTemplateToJQ(apiCall.Body) + // If body is a runtime expression, we need to handle it differently + if isRuntimeExpression(bodyExpr) { + // Store the expression reference - will be evaluated at runtime + httpTask.With.Body = []byte(bodyExpr) + } else { + httpTask.With.Body = []byte(apiCall.Body) + } + } + + // Add export to capture response + httpTask.Export = &model.Export{ + As: model.NewObjectOrRuntimeExpr(fmt.Sprintf("${ . + { %s_response: .content, %s_status: .response.statusCode } }", taskName, taskName)), + } + + // Wrap in try/catch if retry is configured + if apiCall.RetryAttempts > 0 { + innerTaskList := model.TaskList{ + &model.TaskItem{Key: "api", Task: httpTask}, + } + + // Build catch block + catchTaskList := model.TaskList{ + BuildSetTaskItem("error", map[string]interface{}{ + taskName + "_failed": true, + taskName + "_error": "${ .error.message // \"API call failed\" }", + }), + } + + tryTask := BuildTryWithRetry(innerTaskList, apiCall.RetryAttempts, catchTaskList) + + return &model.TaskItem{ + Key: taskName, + Task: tryTask, + } + } + + return &model.TaskItem{ + Key: taskName, + Task: httpTask, + } +} + +// convertPostPhaseOld is the legacy conversion that uses hf:post. +// Deprecated: Use convertPostPhase which generates native SWF tasks. +func convertPostPhaseOld(post *config_loader.PostConfig) *model.TaskItem { + postConfig := make(map[string]interface{}) + + // Convert payloads + if len(post.Payloads) > 0 { + payloads := make([]map[string]interface{}, 0, len(post.Payloads)) + for _, p := range post.Payloads { + payload := map[string]interface{}{ + "name": p.Name, + } + if p.Build != nil { + payload["build"] = p.Build + } + if p.BuildRef != "" { + payload["buildRef"] = p.BuildRef + } + if p.BuildRefContent != nil { + payload["buildRefContent"] = p.BuildRefContent + } + payloads = append(payloads, payload) + } + postConfig["payloads"] = payloads + } + + // Convert post actions + if len(post.PostActions) > 0 { + actions := make([]map[string]interface{}, 0, len(post.PostActions)) + for _, a := range post.PostActions { + action := map[string]interface{}{ + "name": a.Name, + } + + if a.APICall != nil { + apiCallConfig := map[string]interface{}{ + "method": a.APICall.Method, + "url": a.APICall.URL, + } + if a.APICall.Timeout != "" { + apiCallConfig["timeout"] = a.APICall.Timeout + } + if a.APICall.RetryAttempts > 0 { + apiCallConfig["retryAttempts"] = a.APICall.RetryAttempts + } + if a.APICall.RetryBackoff != "" { + apiCallConfig["retryBackoff"] = a.APICall.RetryBackoff + } + if len(a.APICall.Headers) > 0 { + headers := make(map[string]string) + for _, h := range a.APICall.Headers { + headers[h.Name] = h.Value + } + apiCallConfig["headers"] = headers + } + if a.APICall.Body != "" { + apiCallConfig["body"] = a.APICall.Body + } + action["apiCall"] = apiCallConfig + } + + if a.Log != nil { + action["log"] = map[string]interface{}{ + "message": a.Log.Message, + "level": a.Log.Level, + } + } + + actions = append(actions, action) + } + postConfig["postActions"] = actions + } + + return &model.TaskItem{ + Key: "phase_post", + Task: &model.CallFunction{ + Call: tasks.TaskPost, + With: postConfig, + }, + } +} + +// WorkflowFromConfig is a convenience function that converts and returns the workflow. +// Returns an error if conversion fails. +func WorkflowFromConfig(config *config_loader.AdapterConfig) (*model.Workflow, error) { + return ConvertAdapterConfig(config) +} diff --git a/internal/swf/converter/adapter_to_swf_test.go b/internal/swf/converter/adapter_to_swf_test.go new file mode 100644 index 0000000..8e85be3 --- /dev/null +++ b/internal/swf/converter/adapter_to_swf_test.go @@ -0,0 +1,531 @@ +package converter + +import ( + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/serverlessworkflow/sdk-go/v3/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertAdapterConfig_Nil(t *testing.T) { + workflow, err := ConvertAdapterConfig(nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "config cannot be nil") + assert.Nil(t, workflow) +} + +func TestConvertAdapterConfig_Minimal(t *testing.T) { + config := &config_loader.AdapterConfig{ + Kind: "AdapterConfig", + APIVersion: "v1alpha1", + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "default", + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{ + Version: "1.0.0", + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + + assert.Equal(t, "1.0.0", workflow.Document.DSL) + assert.Equal(t, "hyperfleet", workflow.Document.Namespace) + assert.Equal(t, "test-adapter", workflow.Document.Name) + assert.Equal(t, "1.0.0", workflow.Document.Version) + assert.Equal(t, "Adapter: test-adapter", workflow.Document.Title) +} + +func TestConvertAdapterConfig_WithParams(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "params-adapter", + }, + Spec: config_loader.AdapterConfigSpec{ + Params: []config_loader.Parameter{ + { + Name: "clusterId", + Source: "event.id", + Type: "string", + Required: true, + }, + { + Name: "count", + Source: "event.count", + Type: "int", + Default: 10, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + // Should have one task - extract_params (native SWF set task) + assert.Len(t, *workflow.Do, 1) + + task := (*workflow.Do)[0] + assert.Equal(t, "extract_params", task.Key) +} + +func TestConvertAdapterConfig_WithPreconditions(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "preconditions-adapter", + }, + Spec: config_loader.AdapterConfigSpec{ + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "check-cluster", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "https://api.example.com/clusters/{{ .clusterId }}", + Timeout: "10s", + }, + }, + Conditions: []config_loader.Condition{ + { + Field: "status", + Operator: "equals", + Value: "ready", + }, + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + // Should have one task - phase_preconditions (now a DoTask with nested tasks) + assert.Len(t, *workflow.Do, 1) + + task := (*workflow.Do)[0] + assert.Equal(t, "phase_preconditions", task.Key) + + // Verify it's a DoTask with nested tasks + doTask, ok := task.Task.(*model.DoTask) + require.True(t, ok, "phase_preconditions should be a DoTask") + require.NotNil(t, doTask.Do) + + // Should have 2 tasks: check_cluster (HTTP) and evaluate (set) + assert.Len(t, *doTask.Do, 2) + + // First task should be the HTTP call for check-cluster + checkClusterTask := (*doTask.Do)[0] + assert.Equal(t, "check_cluster", checkClusterTask.Key) + + // Last task should be evaluate + evaluateTask := (*doTask.Do)[1] + assert.Equal(t, "evaluate", evaluateTask.Key) +} + +func TestConvertAdapterConfig_WithResources(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "resources-adapter", + }, + Spec: config_loader.AdapterConfigSpec{ + Resources: []config_loader.Resource{ + { + Name: "my-configmap", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cm", + }, + }, + Discovery: &config_loader.DiscoveryConfig{ + ByName: "test-cm", + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + // Should have one task - phase_resources + assert.Len(t, *workflow.Do, 1) + + task := (*workflow.Do)[0] + assert.Equal(t, "phase_resources", task.Key) +} + +func TestConvertAdapterConfig_WithPost(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "post-adapter", + }, + Spec: config_loader.AdapterConfigSpec{ + Post: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "notify", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "https://api.example.com/notify", + Body: `{"status": "completed"}`, + }, + }, + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + // Should have one task - phase_post (now a DoTask with native HTTP call) + assert.Len(t, *workflow.Do, 1) + + task := (*workflow.Do)[0] + assert.Equal(t, "phase_post", task.Key) + + // Verify it's a DoTask with nested tasks + doTask, ok := task.Task.(*model.DoTask) + require.True(t, ok, "phase_post should be a DoTask") + require.NotNil(t, doTask.Do) + + // Should have one task: notify (HTTP call) + assert.Len(t, *doTask.Do, 1) + assert.Equal(t, "notify", (*doTask.Do)[0].Key) +} + +func TestConvertAdapterConfig_FullPipeline(t *testing.T) { + config := &config_loader.AdapterConfig{ + Kind: "AdapterConfig", + APIVersion: "v1alpha1", + Metadata: config_loader.Metadata{ + Name: "full-pipeline-adapter", + Namespace: "test-namespace", + Labels: map[string]string{ + "app": "test", + }, + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{ + Version: "2.0.0", + }, + Params: []config_loader.Parameter{ + { + Name: "clusterId", + Source: "event.id", + }, + }, + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "check-exists", + }, + Expression: "clusterId != \"\"", + }, + }, + Resources: []config_loader.Resource{ + { + Name: "configmap", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + }, + Discovery: &config_loader.DiscoveryConfig{ + ByName: "test-cm", + }, + }, + }, + Post: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "report-status", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "https://api.example.com/status", + }, + }, + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + // Should have 4 phases + assert.Len(t, *workflow.Do, 4) + + // Verify phase order (params now uses native set task with "extract_params" key) + assert.Equal(t, "extract_params", (*workflow.Do)[0].Key) + assert.Equal(t, "phase_preconditions", (*workflow.Do)[1].Key) + assert.Equal(t, "phase_resources", (*workflow.Do)[2].Key) + assert.Equal(t, "phase_post", (*workflow.Do)[3].Key) + + // Check metadata preservation + assert.Equal(t, "AdapterConfig", workflow.Document.Metadata["originalKind"]) + assert.Equal(t, "v1alpha1", workflow.Document.Metadata["originalAPIVersion"]) + assert.Equal(t, "test-namespace", workflow.Document.Metadata["namespace"]) + + // Check labels are preserved as tags + assert.Equal(t, map[string]string{"app": "test"}, workflow.Document.Tags) +} + +func TestSanitizeName(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "empty", + input: "", + expected: "unnamed-workflow", + }, + { + name: "already valid", + input: "my-adapter", + expected: "my-adapter", + }, + { + name: "uppercase", + input: "MyAdapter", + expected: "myadapter", + }, + { + name: "underscores", + input: "my_adapter_name", + expected: "my-adapter-name", + }, + { + name: "spaces", + input: "my adapter", + expected: "my-adapter", + }, + { + name: "special characters", + input: "my@adapter!name", + expected: "my-adapter-name", + }, + { + name: "numbers", + input: "adapter123", + expected: "adapter123", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := sanitizeName(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestWorkflowFromConfig(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "convenience-test", + }, + } + + workflow, err := WorkflowFromConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + assert.Equal(t, "convenience-test", workflow.Document.Name) +} + +func TestConvertAdapterConfig_PreconditionsWithCapture(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "capture-test", + }, + Spec: config_loader.AdapterConfigSpec{ + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "fetch-data", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "https://api.example.com/data", + Headers: []config_loader.Header{ + {Name: "Authorization", Value: "Bearer {{ .token }}"}, + }, + }, + }, + Capture: []config_loader.CaptureField{ + { + Name: "dataId", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Field: "id", + }, + }, + { + Name: "status", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Expression: "data.status", + }, + }, + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + assert.Len(t, *workflow.Do, 1) +} + +func TestConvertAdapterConfig_ResourcesWithDiscovery(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "discovery-test", + }, + Spec: config_loader.AdapterConfigSpec{ + Resources: []config_loader.Resource{ + { + Name: "update-cm", + RecreateOnChange: true, + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + }, + Discovery: &config_loader.DiscoveryConfig{ + Namespace: "default", + ByName: "existing-cm", + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + assert.Len(t, *workflow.Do, 1) + assert.Equal(t, "phase_resources", (*workflow.Do)[0].Key) +} + +func TestConvertAdapterConfig_ResourcesWithPreconditionsHasIfCondition(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "if-condition-test", + }, + Spec: config_loader.AdapterConfigSpec{ + Preconditions: []config_loader.Precondition{ + { + ActionBase: config_loader.ActionBase{ + Name: "check", + }, + Expression: "true", + }, + }, + Resources: []config_loader.Resource{ + { + Name: "configmap", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + }, + Discovery: &config_loader.DiscoveryConfig{ + ByName: "test-cm", + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + // Should have 2 tasks + assert.Len(t, *workflow.Do, 2) + + // The resources phase should have an `if` condition since there are preconditions + resourcesTask := (*workflow.Do)[1] + assert.Equal(t, "phase_resources", resourcesTask.Key) + + // Verify it's a CallFunction with an If condition + callFunc, ok := resourcesTask.Task.(*model.CallFunction) + require.True(t, ok, "phase_resources should be a CallFunction") + require.NotNil(t, callFunc.If, "phase_resources should have an If condition") + assert.Equal(t, "${ .allMatched == true }", callFunc.If.Value) +} + +func TestConvertAdapterConfig_PostWithPayloads(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "payloads-test", + }, + Spec: config_loader.AdapterConfigSpec{ + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "notification", + Build: map[string]interface{}{ + "message": "Cluster created", + "id": "{{ .clusterId }}", + }, + }, + { + Name: "status-update", + BuildRef: "statusPayload", + }, + }, + PostActions: []config_loader.PostAction{ + { + ActionBase: config_loader.ActionBase{ + Name: "send-notification", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "https://api.example.com/notify", + Body: `{{ .notification }}`, + Timeout: "30s", + RetryAttempts: 3, + RetryBackoff: "exponential", + }, + }, + }, + }, + }, + }, + } + + workflow, err := ConvertAdapterConfig(config) + require.NoError(t, err) + require.NotNil(t, workflow) + require.NotNil(t, workflow.Do) + + assert.Len(t, *workflow.Do, 1) + assert.Equal(t, "phase_post", (*workflow.Do)[0].Key) +} diff --git a/internal/swf/converter/jq_builder.go b/internal/swf/converter/jq_builder.go new file mode 100644 index 0000000..bbe03cd --- /dev/null +++ b/internal/swf/converter/jq_builder.go @@ -0,0 +1,494 @@ +package converter + +import ( + "fmt" + "regexp" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" +) + +// ConditionToJQ converts a single condition to a jq expression. +// Maps adapter config operators to jq equivalents. +func ConditionToJQ(field, operator string, value interface{}) string { + // Ensure field has a dot prefix if not already + if !strings.HasPrefix(field, ".") { + field = "." + field + } + + switch strings.ToLower(operator) { + case "equals", "eq", "==": + return fmt.Sprintf("(%s == %s)", field, formatJQValue(value)) + + case "notequals", "neq", "!=": + return fmt.Sprintf("(%s != %s)", field, formatJQValue(value)) + + case "in": + // value should be an array + values := toStringSlice(value) + if len(values) == 0 { + return "false" + } + var conditions []string + for _, v := range values { + conditions = append(conditions, fmt.Sprintf("(%s == %s)", field, formatJQValue(v))) + } + return "(" + strings.Join(conditions, " or ") + ")" + + case "notin": + values := toStringSlice(value) + if len(values) == 0 { + return "true" + } + var conditions []string + for _, v := range values { + conditions = append(conditions, fmt.Sprintf("(%s != %s)", field, formatJQValue(v))) + } + return "(" + strings.Join(conditions, " and ") + ")" + + case "contains": + return fmt.Sprintf("(%s | contains(%s))", field, formatJQValue(value)) + + case "notcontains": + return fmt.Sprintf("(%s | contains(%s) | not)", field, formatJQValue(value)) + + case "startswith": + return fmt.Sprintf("(%s | startswith(%s))", field, formatJQValue(value)) + + case "endswith": + return fmt.Sprintf("(%s | endswith(%s))", field, formatJQValue(value)) + + case "greaterthan", "gt", ">": + return fmt.Sprintf("(%s > %s)", field, formatJQValue(value)) + + case "greaterthanorequals", "gte", ">=": + return fmt.Sprintf("(%s >= %s)", field, formatJQValue(value)) + + case "lessthan", "lt", "<": + return fmt.Sprintf("(%s < %s)", field, formatJQValue(value)) + + case "lessthanorequals", "lte", "<=": + return fmt.Sprintf("(%s <= %s)", field, formatJQValue(value)) + + case "exists": + return fmt.Sprintf("(%s != null)", field) + + case "notexists": + return fmt.Sprintf("(%s == null)", field) + + case "empty": + return fmt.Sprintf("((%s == null) or (%s == \"\") or (%s == []))", field, field, field) + + case "notempty": + return fmt.Sprintf("((%s != null) and (%s != \"\") and (%s != []))", field, field, field) + + case "matches": + // Regex match - jq uses test() for regex + return fmt.Sprintf("(%s | test(%s))", field, formatJQValue(value)) + + default: + // Default to equals + return fmt.Sprintf("(%s == %s)", field, formatJQValue(value)) + } +} + +// ConditionsToJQ combines multiple conditions with AND logic. +func ConditionsToJQ(conditions []config_loader.Condition) string { + if len(conditions) == 0 { + return "true" + } + + var jqConditions []string + for _, c := range conditions { + jqConditions = append(jqConditions, ConditionToJQ(c.Field, c.Operator, c.Value)) + } + + return strings.Join(jqConditions, " and ") +} + +// CaptureToJQ builds a jq expression to capture fields from an API response. +// Returns an expression that creates an object with the captured values. +func CaptureToJQ(captures []config_loader.CaptureField) string { + if len(captures) == 0 { + return "{}" + } + + var parts []string + for _, c := range captures { + if c.Expression != "" { + // CEL expression - we'll convert to jq approximation + parts = append(parts, fmt.Sprintf("%s: %s", c.Name, celToJQ(c.Expression))) + } else if c.Field != "" { + // Field path extraction + fieldPath := normalizeFieldPath(c.Field) + parts = append(parts, fmt.Sprintf("%s: .content%s", c.Name, fieldPath)) + } + } + + return "{ " + strings.Join(parts, ", ") + " }" +} + +// BuildPreconditionExportExpr builds the export expression for a precondition. +// This expression captures the response and evaluates the condition in one pass. +func BuildPreconditionExportExpr(precondName string, captures []config_loader.CaptureField, conditions []config_loader.Condition) string { + var parts []string + + // Store the full response under the precondition name + parts = append(parts, fmt.Sprintf("%s: .content", precondName)) + + // Add captured fields + for _, c := range captures { + if c.Expression != "" { + parts = append(parts, fmt.Sprintf("%s: %s", c.Name, celToJQ(c.Expression))) + } else if c.Field != "" { + fieldPath := normalizeFieldPath(c.Field) + parts = append(parts, fmt.Sprintf("%s: .content%s", c.Name, fieldPath)) + } + } + + // Add the _ok flag based on conditions + okField := toSnakeCase(precondName) + "_ok" + if len(conditions) > 0 { + condExpr := buildConditionExprForExport(conditions) + parts = append(parts, fmt.Sprintf("%s: %s", okField, condExpr)) + } else { + // No conditions means always ok (API call succeeded) + parts = append(parts, fmt.Sprintf("%s: true", okField)) + } + + return "${ . + { " + strings.Join(parts, ", ") + " } }" +} + +// buildConditionExprForExport builds condition expressions that work in export context. +// In export context, captured fields are accessed from .content +func buildConditionExprForExport(conditions []config_loader.Condition) string { + if len(conditions) == 0 { + return "true" + } + + var jqConditions []string + for _, c := range conditions { + field := c.Field + // In export context, fields come from .content + if !strings.HasPrefix(field, ".") { + field = ".content." + field + } else if !strings.HasPrefix(field, ".content") { + field = ".content" + field + } + jqConditions = append(jqConditions, ConditionToJQ(field, c.Operator, c.Value)) + } + + return "(" + strings.Join(jqConditions, " and ") + ")" +} + +// BuildAllMatchedExpr builds the final allMatched expression from all precondition _ok flags. +func BuildAllMatchedExpr(precondNames []string) string { + if len(precondNames) == 0 { + return "true" + } + + var okChecks []string + for _, name := range precondNames { + okField := toSnakeCase(name) + "_ok" + okChecks = append(okChecks, fmt.Sprintf("(.%s // false)", okField)) + } + + return strings.Join(okChecks, " and ") +} + +// BuildNotMetReasonExpr builds the notMetReason expression that identifies which precondition failed. +func BuildNotMetReasonExpr(precondNames []string) string { + if len(precondNames) == 0 { + return `""` + } + + // Build a cascading if-elif-else expression + var expr strings.Builder + expr.WriteString("(") + + for i, name := range precondNames { + okField := toSnakeCase(name) + "_ok" + if i > 0 { + expr.WriteString(" else ") + } + expr.WriteString(fmt.Sprintf("if (.%s // false) == false then \"%s failed\"", okField, name)) + } + + expr.WriteString(" else \"\" end)") + return expr.String() +} + +// ConvertGoTemplateToJQ converts Go template syntax ({{ .field }}) to jq expressions (${ .field }). +func ConvertGoTemplateToJQ(template string) string { + // Pattern to match Go template expressions + re := regexp.MustCompile(`\{\{\s*\.([^}]+)\s*\}\}`) + + return re.ReplaceAllStringFunc(template, func(match string) string { + // Extract the field path + matches := re.FindStringSubmatch(match) + if len(matches) < 2 { + return match + } + fieldPath := strings.TrimSpace(matches[1]) + return fmt.Sprintf("${ .params.%s }", fieldPath) + }) +} + +// formatJQValue formats a value for use in a jq expression. +func formatJQValue(value interface{}) string { + switch v := value.(type) { + case string: + // Escape quotes in string + escaped := strings.ReplaceAll(v, `"`, `\"`) + return fmt.Sprintf(`"%s"`, escaped) + case bool: + if v { + return "true" + } + return "false" + case nil: + return "null" + case int, int32, int64, float32, float64: + return fmt.Sprintf("%v", v) + default: + // For complex types, convert to string + return fmt.Sprintf(`"%v"`, v) + } +} + +// toStringSlice converts an interface{} to a slice of strings. +func toStringSlice(value interface{}) []string { + switch v := value.(type) { + case []string: + return v + case []interface{}: + var result []string + for _, item := range v { + result = append(result, fmt.Sprintf("%v", item)) + } + return result + case string: + return []string{v} + default: + return nil + } +} + +// normalizeFieldPath ensures a field path starts with a dot. +func normalizeFieldPath(field string) string { + // Handle JSONPath-style paths like {.status.phase} + field = strings.TrimPrefix(field, "{") + field = strings.TrimSuffix(field, "}") + + if !strings.HasPrefix(field, ".") { + return "." + field + } + return field +} + +// celToJQ converts a CEL expression to a jq approximation. +// This is a best-effort conversion for common patterns. +func celToJQ(celExpr string) string { + // Simple field access: response.status.phase -> .response.status.phase + if !strings.Contains(celExpr, "(") && !strings.Contains(celExpr, " ") && !strings.Contains(celExpr, "?") { + return "." + celExpr + } + + // Try to convert common CEL patterns to jq + return convertCELToJQ(celExpr) +} + +// convertCELToJQ converts common CEL expression patterns to jq equivalents. +// Handles: +// - Optional chaining: foo.?bar.?baz -> .foo.bar.baz // null +// - orValue(): foo.orValue("default") -> .foo // "default" +// - Ternary: condition ? trueVal : falseVal -> if condition then trueVal else falseVal end +// - Comparison operators: ==, !=, <, >, <=, >= +func convertCELToJQ(celExpr string) string { + expr := strings.TrimSpace(celExpr) + + // Handle ternary expressions: condition ? trueVal : falseVal + if ternaryResult := convertCELTernary(expr); ternaryResult != "" { + return ternaryResult + } + + // Handle .orValue() pattern: field.orValue("default") + expr = convertCELOrValue(expr) + + // Handle optional chaining: foo.?bar -> foo.bar + // In jq, we'll handle nulls with // operator + expr = convertCELOptionalChaining(expr) + + // Add leading dot if it's a field path + if len(expr) > 0 && expr[0] != '.' && expr[0] != '(' && expr[0] != '"' { + expr = "." + expr + } + + return expr +} + +// convertCELTernary converts CEL ternary expressions to jq if-then-else. +// Pattern: condition ? trueVal : falseVal -> if condition then trueVal else falseVal end +func convertCELTernary(expr string) string { + // Find the ? and : that form the ternary + questionIdx := findTernaryQuestion(expr) + if questionIdx == -1 { + return "" + } + + colonIdx := findTernaryColon(expr, questionIdx) + if colonIdx == -1 { + return "" + } + + condition := strings.TrimSpace(expr[:questionIdx]) + trueVal := strings.TrimSpace(expr[questionIdx+1 : colonIdx]) + falseVal := strings.TrimSpace(expr[colonIdx+1:]) + + // Recursively convert each part + conditionJQ := convertCELToJQ(condition) + trueValJQ := convertCELToJQ(trueVal) + falseValJQ := convertCELToJQ(falseVal) + + return fmt.Sprintf("(if %s then %s else %s end)", conditionJQ, trueValJQ, falseValJQ) +} + +// findTernaryQuestion finds the index of ? in a ternary expression, ignoring ?. optional chaining. +func findTernaryQuestion(expr string) int { + depth := 0 + for i := 0; i < len(expr); i++ { + switch expr[i] { + case '(': + depth++ + case ')': + depth-- + case '?': + // Check if this is optional chaining (?.) + if i+1 < len(expr) && expr[i+1] == '.' { + continue + } + if depth == 0 { + return i + } + } + } + return -1 +} + +// findTernaryColon finds the matching : for a ternary expression. +func findTernaryColon(expr string, questionIdx int) int { + depth := 0 + for i := questionIdx + 1; i < len(expr); i++ { + switch expr[i] { + case '(': + depth++ + case ')': + depth-- + case '?': + // Nested ternary - skip to its colon + if i+1 < len(expr) && expr[i+1] != '.' { + depth++ + } + case ':': + if depth == 0 { + return i + } + depth-- + } + } + return -1 +} + +// convertCELOrValue converts .orValue("default") to jq's // operator. +// Pattern: field.orValue("default") -> (field // "default") +func convertCELOrValue(expr string) string { + re := regexp.MustCompile(`\.orValue\(([^)]+)\)`) + return re.ReplaceAllStringFunc(expr, func(match string) string { + // Extract the default value + matches := re.FindStringSubmatch(match) + if len(matches) < 2 { + return match + } + defaultVal := strings.TrimSpace(matches[1]) + return " // " + defaultVal + }) +} + +// convertCELOptionalChaining converts CEL's ?. to regular field access. +// In jq, we handle null safety with // operator at the end of the chain. +func convertCELOptionalChaining(expr string) string { + // Replace .? with . (jq handles null traversal differently) + result := strings.ReplaceAll(expr, ".?", ".") + + // If the expression had optional chaining, wrap with null coalescing + if strings.Contains(expr, ".?") && !strings.Contains(result, " // ") { + // Don't add // null if there's already a default + result = "(" + result + " // null)" + } + + return result +} + +// ConvertCELExpressionToJQ is the public entry point for CEL to jq conversion. +// Returns the jq expression and a boolean indicating if conversion was successful. +func ConvertCELExpressionToJQ(celExpr string) (string, bool) { + // Check for patterns we can't handle + unsupportedPatterns := []string{".filter(", ".map(", ".exists(", ".all(", ".size()", "has("} + for _, pattern := range unsupportedPatterns { + if strings.Contains(celExpr, pattern) { + return "", false + } + } + + jqExpr := convertCELToJQ(celExpr) + return jqExpr, true +} + +// BuildPayloadFieldExpr builds a jq expression for a payload field. +// Handles both simple field references and CEL expressions. +func BuildPayloadFieldExpr(fieldDef map[string]interface{}) (string, bool) { + // Check for field reference + if field, ok := fieldDef["field"].(string); ok && field != "" { + defaultVal := fieldDef["default"] + jqField := "." + strings.TrimPrefix(field, ".") + if defaultVal != nil { + return fmt.Sprintf("(%s // %s)", jqField, formatJQValue(defaultVal)), true + } + return jqField, true + } + + // Check for expression + if expr, ok := fieldDef["expression"].(string); ok && expr != "" { + jqExpr, ok := ConvertCELExpressionToJQ(expr) + if !ok { + return "", false + } + defaultVal := fieldDef["default"] + if defaultVal != nil { + return fmt.Sprintf("(%s // %s)", jqExpr, formatJQValue(defaultVal)), true + } + return jqExpr, true + } + + return "", false +} + +// toSnakeCase converts a string to snake_case. +func toSnakeCase(s string) string { + // Replace hyphens with underscores + s = strings.ReplaceAll(s, "-", "_") + + // Insert underscores before uppercase letters and convert to lowercase + var result strings.Builder + for i, r := range s { + if i > 0 && r >= 'A' && r <= 'Z' { + result.WriteRune('_') + } + if r >= 'A' && r <= 'Z' { + result.WriteRune(r + 32) // Convert to lowercase + } else { + result.WriteRune(r) + } + } + return result.String() +} diff --git a/internal/swf/converter/jq_builder_test.go b/internal/swf/converter/jq_builder_test.go new file mode 100644 index 0000000..a7a572a --- /dev/null +++ b/internal/swf/converter/jq_builder_test.go @@ -0,0 +1,222 @@ +package converter + +import ( + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/stretchr/testify/assert" +) + +func TestConditionToJQ_Equals(t *testing.T) { + tests := []struct { + name string + field string + operator string + value interface{} + expected string + }{ + { + name: "equals string", + field: "status", + operator: "equals", + value: "ready", + expected: `(.status == "ready")`, + }, + { + name: "equals with dot prefix", + field: ".status", + operator: "equals", + value: "ready", + expected: `(.status == "ready")`, + }, + { + name: "equals number", + field: "count", + operator: "equals", + value: 10, + expected: `(.count == 10)`, + }, + { + name: "equals bool", + field: "enabled", + operator: "equals", + value: true, + expected: `(.enabled == true)`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ConditionToJQ(tt.field, tt.operator, tt.value) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestConditionToJQ_NotEquals(t *testing.T) { + result := ConditionToJQ("status", "notEquals", "failed") + assert.Equal(t, `(.status != "failed")`, result) +} + +func TestConditionToJQ_GreaterThan(t *testing.T) { + result := ConditionToJQ("count", "greaterThan", 5) + assert.Equal(t, `(.count > 5)`, result) +} + +func TestConditionToJQ_LessThan(t *testing.T) { + result := ConditionToJQ("count", "lessThan", 10) + assert.Equal(t, `(.count < 10)`, result) +} + +func TestConditionToJQ_Contains(t *testing.T) { + result := ConditionToJQ("name", "contains", "cluster") + assert.Equal(t, `(.name | contains("cluster"))`, result) +} + +func TestConditionToJQ_Exists(t *testing.T) { + result := ConditionToJQ("metadata.name", "exists", nil) + assert.Equal(t, `(.metadata.name != null)`, result) +} + +func TestConditionToJQ_NotExists(t *testing.T) { + result := ConditionToJQ("metadata.name", "notExists", nil) + assert.Equal(t, `(.metadata.name == null)`, result) +} + +func TestConditionToJQ_In(t *testing.T) { + result := ConditionToJQ("status", "in", []string{"ready", "running", "active"}) + expected := `((.status == "ready") or (.status == "running") or (.status == "active"))` + assert.Equal(t, expected, result) +} + +func TestConditionsToJQ(t *testing.T) { + conditions := []config_loader.Condition{ + {Field: "status", Operator: "equals", Value: "ready"}, + {Field: "count", Operator: "greaterThan", Value: 0}, + } + + result := ConditionsToJQ(conditions) + expected := `(.status == "ready") and (.count > 0)` + assert.Equal(t, expected, result) +} + +func TestConditionsToJQ_Empty(t *testing.T) { + result := ConditionsToJQ(nil) + assert.Equal(t, "true", result) +} + +func TestCaptureToJQ(t *testing.T) { + captures := []config_loader.CaptureField{ + { + Name: "clusterPhase", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Field: "status.phase", + }, + }, + { + Name: "clusterName", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Field: "metadata.name", + }, + }, + } + + result := CaptureToJQ(captures) + assert.Contains(t, result, "clusterPhase: .content.status.phase") + assert.Contains(t, result, "clusterName: .content.metadata.name") +} + +func TestBuildPreconditionExportExpr(t *testing.T) { + captures := []config_loader.CaptureField{ + { + Name: "phase", + FieldExpressionDef: config_loader.FieldExpressionDef{ + Field: "status.phase", + }, + }, + } + conditions := []config_loader.Condition{ + {Field: "status.phase", Operator: "equals", Value: "Ready"}, + } + + result := BuildPreconditionExportExpr("check-cluster", captures, conditions) + + // Should contain the precondition name + assert.Contains(t, result, "check-cluster: .content") + // Should contain the captured field + assert.Contains(t, result, "phase: .content.status.phase") + // Should contain the _ok flag + assert.Contains(t, result, "check_cluster_ok:") +} + +func TestBuildAllMatchedExpr(t *testing.T) { + precondNames := []string{"check-cluster", "check-resources"} + result := BuildAllMatchedExpr(precondNames) + assert.Contains(t, result, ".check_cluster_ok") + assert.Contains(t, result, ".check_resources_ok") + assert.Contains(t, result, " and ") +} + +func TestBuildNotMetReasonExpr(t *testing.T) { + precondNames := []string{"check-cluster", "check-resources"} + result := BuildNotMetReasonExpr(precondNames) + assert.Contains(t, result, "check_cluster_ok") + assert.Contains(t, result, "check-cluster failed") + assert.Contains(t, result, "check-resources failed") +} + +func TestConvertGoTemplateToJQ(t *testing.T) { + tests := []struct { + name string + template string + expected string + }{ + { + name: "simple field", + template: "{{ .clusterId }}", + expected: "${ .params.clusterId }", + }, + { + name: "field in URL", + template: "https://api.example.com/clusters/{{ .clusterId }}/status", + expected: "https://api.example.com/clusters/${ .params.clusterId }/status", + }, + { + name: "multiple fields", + template: "{{ .namespace }}/{{ .name }}", + expected: "${ .params.namespace }/${ .params.name }", + }, + { + name: "no template", + template: "https://api.example.com/static", + expected: "https://api.example.com/static", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ConvertGoTemplateToJQ(tt.template) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestToSnakeCase(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"check-cluster", "check_cluster"}, + {"checkCluster", "check_cluster"}, + {"CheckCluster", "check_cluster"}, + {"check_cluster", "check_cluster"}, + {"simple", "simple"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + result := toSnakeCase(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/internal/swf/converter/task_builder.go b/internal/swf/converter/task_builder.go new file mode 100644 index 0000000..61c16ad --- /dev/null +++ b/internal/swf/converter/task_builder.go @@ -0,0 +1,176 @@ +package converter + +import ( + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/serverlessworkflow/sdk-go/v3/model" +) + +// BuildHTTPCallTask creates a CallHTTP task for making API requests. +func BuildHTTPCallTask(method, endpointExpr string, headers map[string]string) *model.CallHTTP { + endpoint := model.NewEndpoint(endpointExpr) + + // If endpoint contains ${, it's a runtime expression + if isRuntimeExpression(endpointExpr) { + endpoint = &model.Endpoint{ + RuntimeExpression: model.NewExpr(endpointExpr), + } + } + + return &model.CallHTTP{ + Call: "http", + With: model.HTTPArguments{ + Method: method, + Endpoint: endpoint, + Headers: headers, + Output: "content", + }, + } +} + +// BuildHTTPCallTaskWithExport creates a CallHTTP task with export configuration. +func BuildHTTPCallTaskWithExport(method, endpointExpr string, headers map[string]string, exportExpr string) *model.CallHTTP { + task := BuildHTTPCallTask(method, endpointExpr, headers) + + if exportExpr != "" { + task.Export = &model.Export{ + As: model.NewObjectOrRuntimeExpr(exportExpr), + } + } + + return task +} + +// BuildHTTPCallTaskItem creates a TaskItem wrapping a CallHTTP task. +func BuildHTTPCallTaskItem(name, method, endpointExpr string, headers map[string]string, exportExpr string) *model.TaskItem { + task := BuildHTTPCallTaskWithExport(method, endpointExpr, headers, exportExpr) + return &model.TaskItem{ + Key: name, + Task: task, + } +} + +// BuildTryWithRetry wraps a task in a try/catch block with retry configuration. +func BuildTryWithRetry(innerTasks model.TaskList, retryAttempts int, catchTasks model.TaskList) *model.TryTask { + tryTask := &model.TryTask{ + Try: &innerTasks, + Catch: &model.TryTaskCatch{ + Retry: &model.RetryPolicy{ + Limit: model.RetryLimit{ + Attempt: &model.RetryLimitAttempt{ + Count: retryAttempts, + }, + }, + Backoff: &model.RetryBackoff{ + Exponential: &model.BackoffDefinition{}, + }, + }, + }, + } + + if len(catchTasks) > 0 { + tryTask.Catch.Do = &catchTasks + } + + return tryTask +} + +// BuildTryTaskItem creates a TaskItem wrapping a TryTask. +func BuildTryTaskItem(name string, innerTasks model.TaskList, retryAttempts int, catchTasks model.TaskList) *model.TaskItem { + return &model.TaskItem{ + Key: name, + Task: BuildTryWithRetry(innerTasks, retryAttempts, catchTasks), + } +} + +// BuildSetTask creates a SetTask for setting values in the workflow context. +func BuildSetTask(values map[string]interface{}) *model.SetTask { + return &model.SetTask{ + Set: values, + } +} + +// BuildSetTaskItem creates a TaskItem wrapping a SetTask. +func BuildSetTaskItem(name string, values map[string]interface{}) *model.TaskItem { + return &model.TaskItem{ + Key: name, + Task: BuildSetTask(values), + } +} + +// BuildDoTask creates a DoTask containing nested tasks. +func BuildDoTask(tasks model.TaskList) *model.DoTask { + return &model.DoTask{ + Do: &tasks, + } +} + +// BuildDoTaskItem creates a TaskItem wrapping a DoTask. +func BuildDoTaskItem(name string, tasks model.TaskList) *model.TaskItem { + return &model.TaskItem{ + Key: name, + Task: BuildDoTask(tasks), + } +} + +// BuildConditionalTask adds an `if` condition to a task. +func BuildConditionalTask(task model.Task, ifExpr string) model.Task { + if ifExpr == "" { + return task + } + + base := task.GetBase() + if base != nil { + base.If = model.NewExpr(ifExpr) + } + + return task +} + +// BuildConditionalTaskItem creates a TaskItem with an if condition. +func BuildConditionalTaskItem(name string, task model.Task, ifExpr string) *model.TaskItem { + conditionalTask := BuildConditionalTask(task, ifExpr) + return &model.TaskItem{ + Key: name, + Task: conditionalTask, + } +} + +// BuildPreconditionHTTPTask builds the HTTP call task for a precondition. +func BuildPreconditionHTTPTask(precond *config_loader.Precondition) *model.TaskItem { + if precond.APICall == nil { + return nil + } + + // Convert Go template URL to jq expression + url := ConvertGoTemplateToJQ(precond.APICall.URL) + + // Convert headers + headers := make(map[string]string) + for _, h := range precond.APICall.Headers { + headers[h.Name] = ConvertGoTemplateToJQ(h.Value) + } + + // Build export expression that captures response and evaluates conditions + exportExpr := BuildPreconditionExportExpr(precond.Name, precond.Capture, precond.Conditions) + + return BuildHTTPCallTaskItem( + "api", + precond.APICall.Method, + url, + headers, + exportExpr, + ) +} + +// BuildPreconditionCatchSetTask builds the set task for the catch block. +func BuildPreconditionCatchSetTask(precondName string) *model.TaskItem { + okField := toSnakeCase(precondName) + "_ok" + return BuildSetTaskItem("fail", map[string]interface{}{ + okField: false, + }) +} + +// isRuntimeExpression checks if a string is a runtime expression (${ ... }). +func isRuntimeExpression(s string) bool { + return len(s) > 3 && s[0:2] == "${" && s[len(s)-1] == '}' +} diff --git a/internal/swf/loader/loader.go b/internal/swf/loader/loader.go new file mode 100644 index 0000000..2fb2b70 --- /dev/null +++ b/internal/swf/loader/loader.go @@ -0,0 +1,231 @@ +// Package loader provides unified loading of workflow configurations. +// It supports both legacy AdapterConfig format and native Serverless Workflow YAML. +package loader + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/converter" + "github.com/serverlessworkflow/sdk-go/v3/model" + "github.com/serverlessworkflow/sdk-go/v3/parser" + "gopkg.in/yaml.v3" +) + +// WorkflowFormat represents the detected format of a workflow configuration. +type WorkflowFormat string + +const ( + // FormatAdapterConfig is the legacy HyperFleet adapter configuration format. + FormatAdapterConfig WorkflowFormat = "adapter-config" + // FormatSWF is the native Serverless Workflow format. + FormatSWF WorkflowFormat = "swf" + // FormatUnknown indicates the format could not be detected. + FormatUnknown WorkflowFormat = "unknown" +) + +// LoadResult contains the result of loading a workflow configuration. +type LoadResult struct { + // Workflow is the parsed Serverless Workflow model. + Workflow *model.Workflow + + // AdapterConfig is the original AdapterConfig (nil if loaded from SWF format). + AdapterConfig *config_loader.AdapterConfig + + // Format indicates which format was detected and loaded. + Format WorkflowFormat + + // FilePath is the path from which the config was loaded. + FilePath string +} + +// LoadOption configures the loader behavior. +type LoadOption func(*loaderConfig) + +type loaderConfig struct { + adapterVersion string + skipSemanticValidation bool +} + +// WithAdapterVersion validates config against expected adapter version (for AdapterConfig format). +func WithAdapterVersion(version string) LoadOption { + return func(c *loaderConfig) { + c.adapterVersion = version + } +} + +// WithSkipSemanticValidation skips semantic validation (for AdapterConfig format). +func WithSkipSemanticValidation() LoadOption { + return func(c *loaderConfig) { + c.skipSemanticValidation = true + } +} + +// Load loads a workflow from a file, automatically detecting the format. +// It supports both legacy AdapterConfig YAML and native Serverless Workflow YAML. +func Load(filePath string, opts ...LoadOption) (*LoadResult, error) { + if filePath == "" { + filePath = config_loader.ConfigPathFromEnv() + } + if filePath == "" { + return nil, fmt.Errorf("config file path is required (pass as parameter or set %s environment variable)", config_loader.EnvConfigPath) + } + + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read config file %q: %w", filePath, err) + } + + absPath, err := filepath.Abs(filePath) + if err != nil { + return nil, fmt.Errorf("failed to get absolute path for %q: %w", filePath, err) + } + + return Parse(data, absPath, opts...) +} + +// Parse parses workflow configuration from YAML bytes. +func Parse(data []byte, filePath string, opts ...LoadOption) (*LoadResult, error) { + cfg := &loaderConfig{} + for _, opt := range opts { + opt(cfg) + } + + format := DetectFormat(data) + + switch format { + case FormatSWF: + return loadSWF(data, filePath) + case FormatAdapterConfig: + return loadAdapterConfig(data, filePath, cfg) + default: + return nil, fmt.Errorf("unable to detect config format: file must be either AdapterConfig (kind: AdapterConfig) or Serverless Workflow (document.dsl)") + } +} + +// DetectFormat determines whether the YAML data is AdapterConfig or SWF format. +func DetectFormat(data []byte) WorkflowFormat { + // Try to detect format by parsing minimal structure + var probe struct { + Kind string `yaml:"kind"` + Document struct { + DSL string `yaml:"dsl"` + } `yaml:"document"` + } + + if err := yaml.Unmarshal(data, &probe); err != nil { + return FormatUnknown + } + + // Check for SWF format (has document.dsl) + if probe.Document.DSL != "" { + return FormatSWF + } + + // Check for AdapterConfig format (has kind: AdapterConfig) + if probe.Kind == config_loader.ExpectedKind { + return FormatAdapterConfig + } + + // Additional heuristics + content := string(data) + + // Look for SWF indicators + if strings.Contains(content, "document:") && strings.Contains(content, "dsl:") { + return FormatSWF + } + + // Look for AdapterConfig indicators + if strings.Contains(content, "apiVersion:") && strings.Contains(content, "kind:") { + return FormatAdapterConfig + } + + return FormatUnknown +} + +// loadSWF loads a native Serverless Workflow YAML file. +func loadSWF(data []byte, filePath string) (*LoadResult, error) { + workflow, err := parser.FromYAMLSource(data) + if err != nil { + return nil, fmt.Errorf("failed to parse Serverless Workflow: %w", err) + } + + return &LoadResult{ + Workflow: workflow, + Format: FormatSWF, + FilePath: filePath, + }, nil +} + +// loadAdapterConfig loads a legacy AdapterConfig and converts it to SWF. +func loadAdapterConfig(data []byte, filePath string, cfg *loaderConfig) (*LoadResult, error) { + var loaderOpts []config_loader.LoaderOption + + if cfg.adapterVersion != "" { + loaderOpts = append(loaderOpts, config_loader.WithAdapterVersion(cfg.adapterVersion)) + } + if cfg.skipSemanticValidation { + loaderOpts = append(loaderOpts, config_loader.WithSkipSemanticValidation()) + } + + // Set base directory for resolving relative paths + baseDir := filepath.Dir(filePath) + loaderOpts = append([]config_loader.LoaderOption{config_loader.WithBaseDir(baseDir)}, loaderOpts...) + + adapterConfig, err := config_loader.Parse(data, loaderOpts...) + if err != nil { + return nil, fmt.Errorf("failed to parse AdapterConfig: %w", err) + } + + // Convert to SWF workflow + workflow, err := converter.ConvertAdapterConfig(adapterConfig) + if err != nil { + return nil, fmt.Errorf("failed to convert AdapterConfig to workflow: %w", err) + } + + return &LoadResult{ + Workflow: workflow, + AdapterConfig: adapterConfig, + Format: FormatAdapterConfig, + FilePath: filePath, + }, nil +} + +// LoadWorkflow is a convenience function that loads and returns just the workflow. +func LoadWorkflow(filePath string, opts ...LoadOption) (*model.Workflow, error) { + result, err := Load(filePath, opts...) + if err != nil { + return nil, err + } + return result.Workflow, nil +} + +// LoadWorkflowFromBytes parses workflow from bytes and returns just the workflow. +func LoadWorkflowFromBytes(data []byte, opts ...LoadOption) (*model.Workflow, error) { + result, err := Parse(data, "", opts...) + if err != nil { + return nil, err + } + return result.Workflow, nil +} + +// IsNativeWorkflow checks if a file contains a native SWF workflow. +func IsNativeWorkflow(filePath string) (bool, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return false, err + } + return DetectFormat(data) == FormatSWF, nil +} + +// IsAdapterConfig checks if a file contains a legacy AdapterConfig. +func IsAdapterConfig(filePath string) (bool, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return false, err + } + return DetectFormat(data) == FormatAdapterConfig, nil +} diff --git a/internal/swf/loader/loader_test.go b/internal/swf/loader/loader_test.go new file mode 100644 index 0000000..6cd7b00 --- /dev/null +++ b/internal/swf/loader/loader_test.go @@ -0,0 +1,361 @@ +package loader + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectFormat_SWF(t *testing.T) { + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: my-workflow + version: "1.0.0" +do: + - setValues: + set: + hello: world +` + format := DetectFormat([]byte(swfYAML)) + assert.Equal(t, FormatSWF, format) +} + +func TestDetectFormat_AdapterConfig(t *testing.T) { + adapterYAML := ` +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: test-adapter + namespace: default +spec: + adapter: + version: "1.0.0" +` + format := DetectFormat([]byte(adapterYAML)) + assert.Equal(t, FormatAdapterConfig, format) +} + +func TestDetectFormat_Unknown(t *testing.T) { + unknownYAML := ` +some: + random: yaml + without: markers +` + format := DetectFormat([]byte(unknownYAML)) + assert.Equal(t, FormatUnknown, format) +} + +func TestDetectFormat_InvalidYAML(t *testing.T) { + invalidYAML := ` +not: valid: yaml: content +` + format := DetectFormat([]byte(invalidYAML)) + assert.Equal(t, FormatUnknown, format) +} + +func TestParse_SWF(t *testing.T) { + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: my-workflow + version: "1.0.0" +do: + - setValues: + set: + hello: world +` + result, err := Parse([]byte(swfYAML), "/tmp/test.yaml") + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, FormatSWF, result.Format) + assert.NotNil(t, result.Workflow) + assert.Nil(t, result.AdapterConfig) + assert.Equal(t, "my-workflow", result.Workflow.Document.Name) + assert.Equal(t, "1.0.0", result.Workflow.Document.DSL) +} + +func TestParse_AdapterConfig(t *testing.T) { + adapterYAML := ` +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: test-adapter + namespace: default +spec: + adapter: + version: "1.0.0" + hyperfleetApi: + timeout: "30s" + retryAttempts: 3 + retryBackoff: exponential + kubernetes: + apiVersion: v1 + params: + - name: clusterId + source: event.id +` + result, err := Parse([]byte(adapterYAML), "/tmp/test.yaml") + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, FormatAdapterConfig, result.Format) + assert.NotNil(t, result.Workflow) + assert.NotNil(t, result.AdapterConfig) + assert.Equal(t, "test-adapter", result.Workflow.Document.Name) + assert.Equal(t, "test-adapter", result.AdapterConfig.Metadata.Name) +} + +func TestParse_Unknown(t *testing.T) { + unknownYAML := ` +some: + random: yaml +` + result, err := Parse([]byte(unknownYAML), "/tmp/test.yaml") + assert.Error(t, err) + assert.Contains(t, err.Error(), "unable to detect config format") + assert.Nil(t, result) +} + +func TestLoad_FileNotFound(t *testing.T) { + _, err := Load("/nonexistent/path/config.yaml") + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to read config file") +} + +func TestLoad_EmptyPathWithoutEnv(t *testing.T) { + // Temporarily unset the env var + original := os.Getenv("ADAPTER_CONFIG_PATH") + os.Unsetenv("ADAPTER_CONFIG_PATH") + defer func() { + if original != "" { + os.Setenv("ADAPTER_CONFIG_PATH", original) + } + }() + + _, err := Load("") + assert.Error(t, err) + assert.Contains(t, err.Error(), "config file path is required") +} + +func TestLoad_SWFFile(t *testing.T) { + // Create a temporary SWF file + tempDir := t.TempDir() + filePath := filepath.Join(tempDir, "workflow.yaml") + + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: loaded-workflow + version: "1.0.0" +do: + - setValues: + set: + key: value +` + err := os.WriteFile(filePath, []byte(swfYAML), 0644) + require.NoError(t, err) + + result, err := Load(filePath) + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, FormatSWF, result.Format) + assert.Equal(t, "loaded-workflow", result.Workflow.Document.Name) + assert.Equal(t, filePath, result.FilePath) +} + +func TestLoad_AdapterConfigFile(t *testing.T) { + // Create a temporary AdapterConfig file + tempDir := t.TempDir() + filePath := filepath.Join(tempDir, "adapter.yaml") + + adapterYAML := ` +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: loaded-adapter + namespace: default +spec: + adapter: + version: "1.0.0" + hyperfleetApi: + timeout: "30s" + retryAttempts: 3 + retryBackoff: exponential + kubernetes: + apiVersion: v1 +` + err := os.WriteFile(filePath, []byte(adapterYAML), 0644) + require.NoError(t, err) + + result, err := Load(filePath) + require.NoError(t, err) + require.NotNil(t, result) + + assert.Equal(t, FormatAdapterConfig, result.Format) + assert.Equal(t, "loaded-adapter", result.Workflow.Document.Name) + assert.NotNil(t, result.AdapterConfig) +} + +func TestLoadWorkflow(t *testing.T) { + // Create a temporary SWF file + tempDir := t.TempDir() + filePath := filepath.Join(tempDir, "workflow.yaml") + + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: convenience-workflow + version: "1.0.0" +do: + - setValues: + set: + key: value +` + err := os.WriteFile(filePath, []byte(swfYAML), 0644) + require.NoError(t, err) + + workflow, err := LoadWorkflow(filePath) + require.NoError(t, err) + require.NotNil(t, workflow) + assert.Equal(t, "convenience-workflow", workflow.Document.Name) +} + +func TestLoadWorkflowFromBytes(t *testing.T) { + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: bytes-workflow + version: "1.0.0" +do: + - setValues: + set: + key: value +` + workflow, err := LoadWorkflowFromBytes([]byte(swfYAML)) + require.NoError(t, err) + require.NotNil(t, workflow) + assert.Equal(t, "bytes-workflow", workflow.Document.Name) +} + +func TestIsNativeWorkflow(t *testing.T) { + tempDir := t.TempDir() + + // Create SWF file + swfPath := filepath.Join(tempDir, "workflow.yaml") + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: test-workflow + version: "1.0.0" +` + err := os.WriteFile(swfPath, []byte(swfYAML), 0644) + require.NoError(t, err) + + // Create AdapterConfig file + adapterPath := filepath.Join(tempDir, "adapter.yaml") + adapterYAML := ` +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: test-adapter +spec: + adapter: + version: "1.0.0" +` + err = os.WriteFile(adapterPath, []byte(adapterYAML), 0644) + require.NoError(t, err) + + // Test SWF file + isSWF, err := IsNativeWorkflow(swfPath) + require.NoError(t, err) + assert.True(t, isSWF) + + // Test AdapterConfig file + isSWF, err = IsNativeWorkflow(adapterPath) + require.NoError(t, err) + assert.False(t, isSWF) +} + +func TestIsAdapterConfig(t *testing.T) { + tempDir := t.TempDir() + + // Create AdapterConfig file + adapterPath := filepath.Join(tempDir, "adapter.yaml") + adapterYAML := ` +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: test-adapter +spec: + adapter: + version: "1.0.0" +` + err := os.WriteFile(adapterPath, []byte(adapterYAML), 0644) + require.NoError(t, err) + + // Create SWF file + swfPath := filepath.Join(tempDir, "workflow.yaml") + swfYAML := ` +document: + dsl: "1.0.0" + namespace: test + name: test-workflow + version: "1.0.0" +` + err = os.WriteFile(swfPath, []byte(swfYAML), 0644) + require.NoError(t, err) + + // Test AdapterConfig file + isAdapter, err := IsAdapterConfig(adapterPath) + require.NoError(t, err) + assert.True(t, isAdapter) + + // Test SWF file + isAdapter, err = IsAdapterConfig(swfPath) + require.NoError(t, err) + assert.False(t, isAdapter) +} + +func TestWorkflowFormatConstants(t *testing.T) { + assert.Equal(t, WorkflowFormat("adapter-config"), FormatAdapterConfig) + assert.Equal(t, WorkflowFormat("swf"), FormatSWF) + assert.Equal(t, WorkflowFormat("unknown"), FormatUnknown) +} + +func TestParse_WithOptions(t *testing.T) { + adapterYAML := ` +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: options-test + namespace: default +spec: + adapter: + version: "2.0.0" + hyperfleetApi: + timeout: "30s" + retryAttempts: 3 + retryBackoff: exponential + kubernetes: + apiVersion: v1 +` + result, err := Parse([]byte(adapterYAML), "/tmp/test.yaml", + WithAdapterVersion("2.0.0"), + WithSkipSemanticValidation(), + ) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, FormatAdapterConfig, result.Format) +} diff --git a/internal/swf/runner/context.go b/internal/swf/runner/context.go new file mode 100644 index 0000000..3cf8f19 --- /dev/null +++ b/internal/swf/runner/context.go @@ -0,0 +1,283 @@ +// Package runner provides the HyperFleet workflow runner that extends +// the Serverless Workflow SDK with custom task execution capabilities. +package runner + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// ExecutionPhase represents which phase of execution +type ExecutionPhase string + +const ( + PhaseParamExtraction ExecutionPhase = "param_extraction" + PhasePreconditions ExecutionPhase = "preconditions" + PhaseResources ExecutionPhase = "resources" + PhasePostActions ExecutionPhase = "post_actions" +) + +// ExecutionStatus represents the status of execution +type ExecutionStatus string + +const ( + StatusSuccess ExecutionStatus = "success" + StatusFailed ExecutionStatus = "failed" +) + +// WorkflowContext holds runtime context during workflow execution. +// This bridges the SWF SDK execution with HyperFleet-specific state. +type WorkflowContext struct { + // Ctx is the Go context for cancellation and deadlines + Ctx context.Context + + // EventData is the original CloudEvent data payload + EventData map[string]any + + // Params holds extracted parameters and captured fields from preconditions + Params map[string]any + + // Resources holds created/updated K8s resources keyed by resource name + Resources map[string]*unstructured.Unstructured + + // PreconditionResponses holds full API responses from preconditions + // keyed by precondition name (for use in CEL expressions) + PreconditionResponses map[string]any + + // Adapter holds adapter execution metadata + Adapter AdapterMetadata + + // CurrentPhase tracks the current execution phase + CurrentPhase ExecutionPhase + + // Errors tracks errors by phase + Errors map[ExecutionPhase]error + + // ResourcesSkipped indicates if resources were skipped + ResourcesSkipped bool + + // SkipReason explains why resources were skipped + SkipReason string +} + +// AdapterMetadata holds adapter execution metadata for CEL expressions +type AdapterMetadata struct { + ExecutionStatus string `json:"executionStatus"` + ErrorReason string `json:"errorReason,omitempty"` + ErrorMessage string `json:"errorMessage,omitempty"` + ResourcesSkipped bool `json:"resourcesSkipped,omitempty"` + SkipReason string `json:"skipReason,omitempty"` +} + +// NewWorkflowContext creates a new workflow execution context. +func NewWorkflowContext(ctx context.Context, eventData map[string]any) *WorkflowContext { + return &WorkflowContext{ + Ctx: ctx, + EventData: eventData, + Params: make(map[string]any), + Resources: make(map[string]*unstructured.Unstructured), + PreconditionResponses: make(map[string]any), + Errors: make(map[ExecutionPhase]error), + CurrentPhase: PhaseParamExtraction, + Adapter: AdapterMetadata{ + ExecutionStatus: string(StatusSuccess), + }, + } +} + +// SetParam sets a parameter value. +func (wc *WorkflowContext) SetParam(name string, value any) { + wc.Params[name] = value +} + +// GetParam retrieves a parameter value. +func (wc *WorkflowContext) GetParam(name string) (any, bool) { + v, ok := wc.Params[name] + return v, ok +} + +// SetResource stores a created/updated Kubernetes resource. +func (wc *WorkflowContext) SetResource(name string, resource *unstructured.Unstructured) { + wc.Resources[name] = resource +} + +// GetResource retrieves a stored Kubernetes resource. +func (wc *WorkflowContext) GetResource(name string) (*unstructured.Unstructured, bool) { + r, ok := wc.Resources[name] + return r, ok +} + +// SetPreconditionResponse stores the full API response from a precondition. +func (wc *WorkflowContext) SetPreconditionResponse(name string, response any) { + wc.PreconditionResponses[name] = response +} + +// SetError marks the execution as failed with an error. +func (wc *WorkflowContext) SetError(phase ExecutionPhase, reason, message string, err error) { + wc.Adapter.ExecutionStatus = string(StatusFailed) + wc.Adapter.ErrorReason = reason + wc.Adapter.ErrorMessage = message + wc.Errors[phase] = err +} + +// SetSkipped marks resources as skipped (not an error). +func (wc *WorkflowContext) SetSkipped(reason, message string) { + wc.ResourcesSkipped = true + wc.SkipReason = reason + wc.Adapter.ResourcesSkipped = true + wc.Adapter.SkipReason = message +} + +// GetCELVariables returns all variables for CEL expression evaluation. +// This includes params, adapter metadata, resources, and precondition responses. +func (wc *WorkflowContext) GetCELVariables() map[string]any { + result := make(map[string]any) + + // Copy all params + for k, v := range wc.Params { + result[k] = v + } + + // Add adapter metadata + result["adapter"] = map[string]any{ + "executionStatus": wc.Adapter.ExecutionStatus, + "errorReason": wc.Adapter.ErrorReason, + "errorMessage": wc.Adapter.ErrorMessage, + "resourcesSkipped": wc.Adapter.ResourcesSkipped, + "skipReason": wc.Adapter.SkipReason, + } + + // Add resources (convert unstructured to maps) + resources := make(map[string]any) + for name, resource := range wc.Resources { + if resource != nil { + resources[name] = resource.Object + } + } + result["resources"] = resources + + // Add precondition responses + for name, response := range wc.PreconditionResponses { + result[name] = response + } + + return result +} + +// ToWorkflowInput creates the input data structure for the SWF workflow. +// This combines event data, config, and context into a single map. +func (wc *WorkflowContext) ToWorkflowInput(config map[string]any) map[string]any { + return map[string]any{ + "event": wc.EventData, + "config": config, + "params": wc.Params, + "adapter": wc.adapterToMap(), + } +} + +func (wc *WorkflowContext) adapterToMap() map[string]any { + return map[string]any{ + "executionStatus": wc.Adapter.ExecutionStatus, + "errorReason": wc.Adapter.ErrorReason, + "errorMessage": wc.Adapter.ErrorMessage, + "resourcesSkipped": wc.Adapter.ResourcesSkipped, + "skipReason": wc.Adapter.SkipReason, + } +} + +// EvaluationRecord tracks a single condition evaluation during execution +type EvaluationRecord struct { + Phase ExecutionPhase + Name string + EvaluationType string + Expression string + Matched bool + Timestamp time.Time +} + +// WorkflowResult contains the result of workflow execution. +type WorkflowResult struct { + // Status is the overall execution status + Status ExecutionStatus + + // CurrentPhase is the phase where execution ended + CurrentPhase ExecutionPhase + + // Params contains the extracted parameters + Params map[string]any + + // PreconditionResults contains results of precondition evaluations + PreconditionResults []PreconditionResult + + // ResourceResults contains results of resource operations + ResourceResults []ResourceResult + + // PostActionResults contains results of post-action executions + PostActionResults []PostActionResult + + // Errors contains errors keyed by the phase where they occurred + Errors map[ExecutionPhase]error + + // ResourcesSkipped indicates if resources were skipped + ResourcesSkipped bool + + // SkipReason is why resources were skipped + SkipReason string + + // Output is the final workflow output + Output any +} + +// PreconditionResult contains the result of a single precondition evaluation. +type PreconditionResult struct { + Name string + Matched bool + APICallMade bool + CapturedFields map[string]any + Error error +} + +// ResourceResult contains the result of a single resource operation. +type ResourceResult struct { + Name string + Kind string + Namespace string + ResourceName string + Operation string // create, update, recreate, skip + OperationReason string + Resource *unstructured.Unstructured + Error error +} + +// PostActionResult contains the result of a single post-action execution. +type PostActionResult struct { + Name string + APICallMade bool + HTTPStatus int + Error error +} + +// GetOutput returns the workflow output as a map. +// If the output is not a map, returns an empty map. +func (wr *WorkflowResult) GetOutput() map[string]any { + if wr.Output == nil { + return make(map[string]any) + } + if m, ok := wr.Output.(map[string]any); ok { + return m + } + return make(map[string]any) +} + +// GetTaskOutput retrieves the output of a specific task phase. +// This is a simplified implementation that looks for task outputs in the main output. +func (wr *WorkflowResult) GetTaskOutput(taskName string) (map[string]any, bool) { + output := wr.GetOutput() + if taskOutput, ok := output[taskName].(map[string]any); ok { + return taskOutput, true + } + // Return the full output if task-specific output not found + return output, true +} diff --git a/internal/swf/runner/context_test.go b/internal/swf/runner/context_test.go new file mode 100644 index 0000000..d2f588b --- /dev/null +++ b/internal/swf/runner/context_test.go @@ -0,0 +1,259 @@ +package runner + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func TestNewWorkflowContext(t *testing.T) { + ctx := context.Background() + eventData := map[string]any{ + "id": "test-123", + "type": "cluster.created", + } + + wfCtx := NewWorkflowContext(ctx, eventData) + + require.NotNil(t, wfCtx) + assert.Equal(t, eventData, wfCtx.EventData) + assert.Equal(t, PhaseParamExtraction, wfCtx.CurrentPhase) + assert.Equal(t, string(StatusSuccess), wfCtx.Adapter.ExecutionStatus) + assert.NotNil(t, wfCtx.Params) + assert.NotNil(t, wfCtx.Resources) + assert.NotNil(t, wfCtx.PreconditionResponses) + assert.NotNil(t, wfCtx.Errors) +} + +func TestWorkflowContext_SetParam(t *testing.T) { + wfCtx := NewWorkflowContext(context.Background(), nil) + + wfCtx.SetParam("clusterId", "cluster-123") + wfCtx.SetParam("count", 42) + + val, ok := wfCtx.GetParam("clusterId") + assert.True(t, ok) + assert.Equal(t, "cluster-123", val) + + val, ok = wfCtx.GetParam("count") + assert.True(t, ok) + assert.Equal(t, 42, val) + + _, ok = wfCtx.GetParam("nonexistent") + assert.False(t, ok) +} + +func TestWorkflowContext_SetResource(t *testing.T) { + wfCtx := NewWorkflowContext(context.Background(), nil) + + resource := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]any{ + "name": "test-cm", + "namespace": "default", + }, + }, + } + + wfCtx.SetResource("myConfigMap", resource) + + r, ok := wfCtx.GetResource("myConfigMap") + assert.True(t, ok) + require.NotNil(t, r) + assert.Equal(t, "ConfigMap", r.GetKind()) + + _, ok = wfCtx.GetResource("nonexistent") + assert.False(t, ok) +} + +func TestWorkflowContext_SetPreconditionResponse(t *testing.T) { + wfCtx := NewWorkflowContext(context.Background(), nil) + + response := map[string]any{ + "status": "active", + "data": []any{"a", "b"}, + } + + wfCtx.SetPreconditionResponse("checkCluster", response) + + assert.Equal(t, response, wfCtx.PreconditionResponses["checkCluster"]) +} + +func TestWorkflowContext_SetError(t *testing.T) { + wfCtx := NewWorkflowContext(context.Background(), nil) + + testErr := assert.AnError + wfCtx.SetError(PhasePreconditions, "API_ERROR", "Failed to call API", testErr) + + assert.Equal(t, string(StatusFailed), wfCtx.Adapter.ExecutionStatus) + assert.Equal(t, "API_ERROR", wfCtx.Adapter.ErrorReason) + assert.Equal(t, "Failed to call API", wfCtx.Adapter.ErrorMessage) + assert.Equal(t, testErr, wfCtx.Errors[PhasePreconditions]) +} + +func TestWorkflowContext_SetSkipped(t *testing.T) { + wfCtx := NewWorkflowContext(context.Background(), nil) + + wfCtx.SetSkipped("PRECONDITION_NOT_MET", "Cluster not in ready state") + + assert.True(t, wfCtx.ResourcesSkipped) + assert.Equal(t, "PRECONDITION_NOT_MET", wfCtx.SkipReason) + assert.True(t, wfCtx.Adapter.ResourcesSkipped) + assert.Equal(t, "Cluster not in ready state", wfCtx.Adapter.SkipReason) +} + +func TestWorkflowContext_GetCELVariables(t *testing.T) { + wfCtx := NewWorkflowContext(context.Background(), nil) + + // Set some params + wfCtx.SetParam("clusterId", "cluster-123") + wfCtx.SetParam("status", "ready") + + // Set a resource + resource := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]any{ + "name": "test-cm", + }, + }, + } + wfCtx.SetResource("configMap", resource) + + // Set a precondition response + wfCtx.SetPreconditionResponse("checkStatus", map[string]any{ + "result": "ok", + }) + + vars := wfCtx.GetCELVariables() + + // Check params are present + assert.Equal(t, "cluster-123", vars["clusterId"]) + assert.Equal(t, "ready", vars["status"]) + + // Check adapter metadata + adapter, ok := vars["adapter"].(map[string]any) + require.True(t, ok) + assert.Equal(t, string(StatusSuccess), adapter["executionStatus"]) + + // Check resources + resources, ok := vars["resources"].(map[string]any) + require.True(t, ok) + assert.NotNil(t, resources["configMap"]) + + // Check precondition response + assert.Equal(t, map[string]any{"result": "ok"}, vars["checkStatus"]) +} + +func TestWorkflowContext_ToWorkflowInput(t *testing.T) { + eventData := map[string]any{"id": "event-123"} + wfCtx := NewWorkflowContext(context.Background(), eventData) + wfCtx.SetParam("clusterId", "cluster-456") + + config := map[string]any{ + "metadata": map[string]any{"name": "test-adapter"}, + } + + input := wfCtx.ToWorkflowInput(config) + + assert.Equal(t, eventData, input["event"]) + assert.Equal(t, config, input["config"]) + assert.Equal(t, wfCtx.Params, input["params"]) + + adapter, ok := input["adapter"].(map[string]any) + require.True(t, ok) + assert.Equal(t, string(StatusSuccess), adapter["executionStatus"]) +} + +func TestWorkflowResult_GetOutput(t *testing.T) { + tests := []struct { + name string + output any + expected map[string]any + }{ + { + name: "nil output", + output: nil, + expected: map[string]any{}, + }, + { + name: "map output", + output: map[string]any{ + "key": "value", + }, + expected: map[string]any{ + "key": "value", + }, + }, + { + name: "non-map output", + output: "string value", + expected: map[string]any{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := &WorkflowResult{Output: tt.output} + got := result.GetOutput() + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestWorkflowResult_GetTaskOutput(t *testing.T) { + tests := []struct { + name string + result *WorkflowResult + taskName string + wantOK bool + }{ + { + name: "task output exists", + result: &WorkflowResult{ + Output: map[string]any{ + "phase_params": map[string]any{ + "clusterId": "123", + }, + }, + }, + taskName: "phase_params", + wantOK: true, + }, + { + name: "task output not found - returns full output", + result: &WorkflowResult{ + Output: map[string]any{ + "clusterId": "123", + }, + }, + taskName: "nonexistent", + wantOK: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, ok := tt.result.GetTaskOutput(tt.taskName) + assert.Equal(t, tt.wantOK, ok) + }) + } +} + +func TestExecutionPhaseConstants(t *testing.T) { + assert.Equal(t, ExecutionPhase("param_extraction"), PhaseParamExtraction) + assert.Equal(t, ExecutionPhase("preconditions"), PhasePreconditions) + assert.Equal(t, ExecutionPhase("resources"), PhaseResources) + assert.Equal(t, ExecutionPhase("post_actions"), PhasePostActions) +} + +func TestExecutionStatusConstants(t *testing.T) { + assert.Equal(t, ExecutionStatus("success"), StatusSuccess) + assert.Equal(t, ExecutionStatus("failed"), StatusFailed) +} diff --git a/internal/swf/runner/hyperfleet_runner.go b/internal/swf/runner/hyperfleet_runner.go new file mode 100644 index 0000000..c8b0cd0 --- /dev/null +++ b/internal/swf/runner/hyperfleet_runner.go @@ -0,0 +1,341 @@ +package runner + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/converter" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/tasks" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + pkgotel "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/otel" + "github.com/serverlessworkflow/sdk-go/v3/model" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +// HyperFleetRunner is the main entry point for running workflows in the HyperFleet context. +// It integrates the SWF runner with CloudEvents handling and OTel tracing. +type HyperFleetRunner struct { + workflow *model.Workflow + runner *Runner + config *HyperFleetConfig + log logger.Logger + k8sClient k8s_client.K8sClient + apiClient hyperfleet_api.Client +} + +// HyperFleetConfig holds configuration for the HyperFleet runner. +type HyperFleetConfig struct { + // AdapterConfig is the legacy adapter configuration (optional, will be converted) + AdapterConfig *config_loader.AdapterConfig + // Workflow is the native SWF workflow (takes precedence over AdapterConfig) + Workflow *model.Workflow + // K8sClient is the Kubernetes client + K8sClient k8s_client.K8sClient + // APIClient is the HyperFleet API client + APIClient hyperfleet_api.Client + // Logger is the logger instance + Logger logger.Logger +} + +// HyperFleetResult contains the result of processing an event. +type HyperFleetResult struct { + // Status is the overall execution status + Status ExecutionStatus + // Output is the final output from the workflow + Output map[string]any + // Error is the error if execution failed + Error error + // Phases contains results of each phase + Phases map[string]PhaseResult +} + +// PhaseResult contains the result of a single workflow phase. +type PhaseResult struct { + Name string + Status ExecutionStatus + Output map[string]any + Skipped bool +} + +// NewHyperFleetRunner creates a new HyperFleet runner. +func NewHyperFleetRunner(config *HyperFleetConfig) (*HyperFleetRunner, error) { + if config == nil { + return nil, fmt.Errorf("config is required") + } + + if config.Logger == nil { + return nil, fmt.Errorf("logger is required") + } + + // Determine the workflow to use + var workflow *model.Workflow + var err error + + if config.Workflow != nil { + workflow = config.Workflow + } else if config.AdapterConfig != nil { + // Convert legacy AdapterConfig to SWF Workflow + workflow, err = converter.ConvertAdapterConfig(config.AdapterConfig) + if err != nil { + return nil, fmt.Errorf("failed to convert adapter config: %w", err) + } + } else { + return nil, fmt.Errorf("either Workflow or AdapterConfig is required") + } + + // Create task registry with dependencies + deps := &tasks.Dependencies{ + K8sClient: config.K8sClient, + APIClient: config.APIClient, + Logger: config.Logger, + } + + registry := tasks.NewRegistry() + if err := tasks.RegisterAllWithDeps(registry, deps); err != nil { + return nil, fmt.Errorf("failed to register tasks: %w", err) + } + + // Create SWF runner + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + TaskRegistry: registry, + K8sClient: config.K8sClient, + APIClient: config.APIClient, + Logger: config.Logger, + }) + if err != nil { + return nil, fmt.Errorf("failed to create runner: %w", err) + } + + return &HyperFleetRunner{ + workflow: workflow, + runner: runner, + config: config, + log: config.Logger, + k8sClient: config.K8sClient, + apiClient: config.APIClient, + }, nil +} + +// Execute processes event data according to the workflow configuration. +func (r *HyperFleetRunner) Execute(ctx context.Context, data interface{}) *HyperFleetResult { + // Start OTel span and add trace context to logs + ctx, span := r.startTracedExecution(ctx) + defer span.End() + + result := &HyperFleetResult{ + Status: StatusSuccess, + Phases: make(map[string]PhaseResult), + } + + // Parse event data + eventData, rawData, err := parseEventData(data) + if err != nil { + result.Status = StatusFailed + result.Error = fmt.Errorf("failed to parse event data: %w", err) + return result + } + + // Set resource context for logging + if eventData.OwnedReference != nil { + ctx = logger.WithResourceType(ctx, eventData.Kind) + ctx = logger.WithDynamicResourceID(ctx, eventData.Kind, eventData.ID) + ctx = logger.WithDynamicResourceID(ctx, eventData.OwnedReference.Kind, eventData.OwnedReference.ID) + } else if eventData.ID != "" { + ctx = logger.WithDynamicResourceID(ctx, eventData.Kind, eventData.ID) + } + + r.log.Info(ctx, "Processing event via SWF engine") + + // Build initial input for workflow + input := map[string]any{ + "event": rawData, + "params": make(map[string]any), + } + + // Run the workflow + wfCtx, err := r.runner.Run(ctx, input) + if err != nil { + result.Status = StatusFailed + result.Error = fmt.Errorf("workflow execution failed: %w", err) + errCtx := logger.WithErrorField(ctx, err) + r.log.Errorf(errCtx, "Workflow execution failed") + return result + } + + // Extract results from workflow context + result.Output = wfCtx.GetOutput() + + // Extract phase results + for _, phase := range []string{"phase_params", "phase_preconditions", "phase_resources", "phase_post"} { + if phaseOutput, ok := wfCtx.GetTaskOutput(phase); ok { + phaseResult := PhaseResult{ + Name: phase, + Status: StatusSuccess, + Output: phaseOutput, + } + if errMsg, hasErr := phaseOutput["error"].(string); hasErr && errMsg != "" { + phaseResult.Status = StatusFailed + result.Status = StatusFailed + } + result.Phases[phase] = phaseResult + } + } + + if result.Status == StatusSuccess { + r.log.Info(ctx, "Event execution finished: status=success") + } else { + r.log.Errorf(ctx, "Event execution finished: status=failed") + } + + return result +} + +// CreateHandler creates an event handler function for use with the broker subscriber. +// This matches the interface expected by broker_consumer. +func (r *HyperFleetRunner) CreateHandler() func(ctx context.Context, evt *event.Event) error { + return func(ctx context.Context, evt *event.Event) error { + // Add event ID to context for logging correlation + ctx = logger.WithEventID(ctx, evt.ID()) + + // Extract W3C trace context from CloudEvent extensions + ctx = pkgotel.ExtractTraceContextFromCloudEvent(ctx, evt) + + // Log event metadata + r.log.Infof(ctx, "Event received: id=%s type=%s source=%s time=%s", + evt.ID(), evt.Type(), evt.Source(), evt.Time()) + + // Execute the workflow + _ = r.Execute(ctx, evt.Data()) + + r.log.Infof(ctx, "Event processed: type=%s source=%s time=%s", + evt.Type(), evt.Source(), evt.Time()) + + return nil + } +} + +// startTracedExecution creates an OTel span and adds trace context to logs. +func (r *HyperFleetRunner) startTracedExecution(ctx context.Context) (context.Context, trace.Span) { + componentName := r.workflow.Document.Name + ctx, span := otel.Tracer(componentName).Start(ctx, "Execute") + + // Add trace_id and span_id to logger context + ctx = logger.WithOTelTraceContext(ctx) + + return ctx, span +} + +// GetWorkflow returns the underlying SWF workflow model. +func (r *HyperFleetRunner) GetWorkflow() *model.Workflow { + return r.workflow +} + +// ResourceRef represents a reference to a HyperFleet resource. +type ResourceRef struct { + ID string `json:"id,omitempty"` + Kind string `json:"kind,omitempty"` + Href string `json:"href,omitempty"` +} + +// EventData represents the data payload of a HyperFleet CloudEvent. +type EventData struct { + ID string `json:"id,omitempty"` + Kind string `json:"kind,omitempty"` + Href string `json:"href,omitempty"` + Generation int64 `json:"generation,omitempty"` + OwnedReference *ResourceRef `json:"owned_reference,omitempty"` +} + +// parseEventData parses event data from various input types. +func parseEventData(data interface{}) (*EventData, map[string]interface{}, error) { + if data == nil { + return &EventData{}, make(map[string]interface{}), nil + } + + var jsonBytes []byte + var err error + + switch v := data.(type) { + case []byte: + if len(v) == 0 { + return &EventData{}, make(map[string]interface{}), nil + } + jsonBytes = v + case map[string]interface{}: + jsonBytes, err = json.Marshal(v) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal map data: %w", err) + } + default: + jsonBytes, err = json.Marshal(v) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal data: %w", err) + } + } + + var eventData EventData + if err := json.Unmarshal(jsonBytes, &eventData); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal to EventData: %w", err) + } + + var rawData map[string]interface{} + if err := json.Unmarshal(jsonBytes, &rawData); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal to map: %w", err) + } + + return &eventData, rawData, nil +} + +// HyperFleetRunnerBuilder provides a fluent interface for building a HyperFleetRunner. +type HyperFleetRunnerBuilder struct { + config *HyperFleetConfig +} + +// NewBuilder creates a new HyperFleetRunnerBuilder. +func NewBuilder() *HyperFleetRunnerBuilder { + return &HyperFleetRunnerBuilder{ + config: &HyperFleetConfig{}, + } +} + +// WithAdapterConfig sets the legacy adapter configuration. +func (b *HyperFleetRunnerBuilder) WithAdapterConfig(config *config_loader.AdapterConfig) *HyperFleetRunnerBuilder { + b.config.AdapterConfig = config + return b +} + +// WithWorkflow sets the native SWF workflow. +func (b *HyperFleetRunnerBuilder) WithWorkflow(workflow *model.Workflow) *HyperFleetRunnerBuilder { + b.config.Workflow = workflow + return b +} + +// WithK8sClient sets the Kubernetes client. +func (b *HyperFleetRunnerBuilder) WithK8sClient(client k8s_client.K8sClient) *HyperFleetRunnerBuilder { + b.config.K8sClient = client + return b +} + +// WithAPIClient sets the HyperFleet API client. +func (b *HyperFleetRunnerBuilder) WithAPIClient(client hyperfleet_api.Client) *HyperFleetRunnerBuilder { + b.config.APIClient = client + return b +} + +// WithLogger sets the logger. +func (b *HyperFleetRunnerBuilder) WithLogger(log logger.Logger) *HyperFleetRunnerBuilder { + b.config.Logger = log + return b +} + +// Build creates the HyperFleetRunner. +func (b *HyperFleetRunnerBuilder) Build() (*HyperFleetRunner, error) { + return NewHyperFleetRunner(b.config) +} diff --git a/internal/swf/runner/runner.go b/internal/swf/runner/runner.go new file mode 100644 index 0000000..d121cd6 --- /dev/null +++ b/internal/swf/runner/runner.go @@ -0,0 +1,743 @@ +package runner + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/tasks" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/serverlessworkflow/sdk-go/v3/impl/expr" + "github.com/serverlessworkflow/sdk-go/v3/model" +) + +// Runner executes Serverless Workflow definitions with HyperFleet custom tasks. +// It extends the SDK's workflow execution with custom task runners for +// Kubernetes operations, API calls, CEL expressions, and more. +type Runner struct { + workflow *model.Workflow + taskRegistry *tasks.Registry + deps *tasks.Dependencies + log logger.Logger + httpClient *http.Client +} + +// RunnerConfig holds configuration for creating a Runner. +type RunnerConfig struct { + // Workflow is the SWF workflow definition to execute + Workflow *model.Workflow + + // TaskRegistry is the registry of custom task runners (optional, uses default if nil) + TaskRegistry *tasks.Registry + + // K8sClient is the Kubernetes client for resource operations + K8sClient k8s_client.K8sClient + + // APIClient is the HyperFleet API client for HTTP calls + APIClient hyperfleet_api.Client + + // Logger is the logging interface + Logger logger.Logger +} + +// NewRunner creates a new workflow runner with the given configuration. +func NewRunner(cfg *RunnerConfig) (*Runner, error) { + if cfg == nil { + return nil, fmt.Errorf("config is required") + } + if cfg.Workflow == nil { + return nil, fmt.Errorf("workflow is required") + } + if cfg.Logger == nil { + return nil, fmt.Errorf("logger is required") + } + + registry := cfg.TaskRegistry + if registry == nil { + registry = tasks.DefaultRegistry() + } + + deps := &tasks.Dependencies{ + K8sClient: cfg.K8sClient, + APIClient: cfg.APIClient, + Logger: cfg.Logger, + } + + // Create HTTP client with sensible defaults for native SWF HTTP tasks + httpClient := &http.Client{ + Timeout: 30 * time.Second, + } + + return &Runner{ + workflow: cfg.Workflow, + taskRegistry: registry, + deps: deps, + log: cfg.Logger, + httpClient: httpClient, + }, nil +} + +// Run executes the workflow with the given input. +// The input should contain event data and configuration. +func (r *Runner) Run(ctx context.Context, input map[string]any) (*WorkflowResult, error) { + r.log.Info(ctx, "Starting workflow execution") + + // Inject environment variables into input for native SWF expression access + // This allows workflows to use ${ .env.HYPERFLEET_API_BASE_URL } syntax + input["env"] = r.collectEnvironmentVariables() + + // Create workflow context + eventData, _ := input["event"].(map[string]any) + if eventData == nil { + eventData = make(map[string]any) + } + wfCtx := NewWorkflowContext(ctx, eventData) + + // Execute workflow tasks + result, err := r.executeWorkflow(wfCtx, input) + if err != nil { + r.log.Errorf(ctx, "Workflow execution failed: %v", err) + return nil, err + } + + r.log.Info(ctx, "Workflow execution completed") + return result, nil +} + +// collectEnvironmentVariables reads HYPERFLEET_* prefixed environment variables. +// Returns a map with both the original name and a short version without prefix. +// Example: HYPERFLEET_API_BASE_URL is accessible as both +// .env.HYPERFLEET_API_BASE_URL and .env.API_BASE_URL +// +// Note: Returns map[string]any (not map[string]string) because gojq requires +// interface{} types for proper JSON-like traversal. +func (r *Runner) collectEnvironmentVariables() map[string]any { + envVars := make(map[string]any) + for _, env := range os.Environ() { + if strings.HasPrefix(env, "HYPERFLEET_") { + parts := strings.SplitN(env, "=", 2) + if len(parts) == 2 { + // Store with original name + envVars[parts[0]] = parts[1] + // Also store without prefix for cleaner access + key := strings.TrimPrefix(parts[0], "HYPERFLEET_") + envVars[key] = parts[1] + } + } + } + return envVars +} + +// executeWorkflow processes the workflow's task list. +func (r *Runner) executeWorkflow(wfCtx *WorkflowContext, input map[string]any) (*WorkflowResult, error) { + result := &WorkflowResult{ + Status: StatusSuccess, + CurrentPhase: PhaseParamExtraction, + Params: make(map[string]any), + Errors: make(map[ExecutionPhase]error), + } + + if r.workflow.Do == nil { + return result, nil + } + + // Process each task in the workflow + currentOutput := input + for _, taskItem := range *r.workflow.Do { + taskOutput, err := r.executeTask(wfCtx, taskItem, currentOutput) + if err != nil { + result.Status = StatusFailed + result.Errors[wfCtx.CurrentPhase] = err + return result, err + } + + // Update output for next task + if taskOutput != nil { + currentOutput = taskOutput + } + } + + // Populate result from context + result.Params = wfCtx.Params + result.ResourcesSkipped = wfCtx.ResourcesSkipped + result.SkipReason = wfCtx.SkipReason + result.Output = currentOutput + + return result, nil +} + +// executeTask executes a single task from the workflow. +func (r *Runner) executeTask(wfCtx *WorkflowContext, taskItem *model.TaskItem, input map[string]any) (map[string]any, error) { + taskName := taskItem.Key + + r.log.Debugf(wfCtx.Ctx, "Executing task: %s", taskName) + + // Check if condition - skip task if evaluates to false + if taskItem.Task.GetBase() != nil && taskItem.Task.GetBase().If != nil { + shouldExecute, err := r.evaluateIfCondition(wfCtx.Ctx, taskItem.Task.GetBase().If, input) + if err != nil { + return nil, fmt.Errorf("failed to evaluate if condition for task %s: %w", taskName, err) + } + if !shouldExecute { + r.log.Debugf(wfCtx.Ctx, "Skipping task %s: if condition evaluated to false", taskName) + return input, nil + } + } + + // Check if this is a custom HyperFleet task (CallFunction with hf: prefix) + if callFunc, ok := taskItem.Task.(*model.CallFunction); ok && tasks.IsHyperFleetTask(callFunc.Call) { + return r.executeCustomTask(wfCtx, taskName, callFunc, input) + } + + // Handle other built-in task types + switch t := taskItem.Task.(type) { + case *model.SetTask: + return r.executeSetTask(wfCtx, taskName, t, input) + case *model.DoTask: + return r.executeDoTask(wfCtx, taskName, t, input) + case *model.ForTask: + return r.executeForTask(wfCtx, taskName, t, input) + case *model.SwitchTask: + return r.executeSwitchTask(wfCtx, taskName, t, input) + case *model.CallHTTP: + return r.executeHTTPTask(wfCtx, taskName, t, input) + case *model.TryTask: + return r.executeTryTask(wfCtx, taskName, t, input) + default: + return nil, fmt.Errorf("unsupported task type %T for task %s", t, taskName) + } +} + +// executeCustomTask executes a HyperFleet custom task. +func (r *Runner) executeCustomTask(wfCtx *WorkflowContext, taskName string, callFunc *model.CallFunction, input map[string]any) (map[string]any, error) { + // Get the task runner from registry + runner, err := r.taskRegistry.Create(callFunc.Call, r.deps) + if err != nil { + return nil, fmt.Errorf("failed to create task runner for %s: %w", callFunc.Call, err) + } + + // Prepare arguments from the 'with' field + args := callFunc.With + if args == nil { + args = make(map[string]any) + } + + // Execute the task + r.log.Debugf(wfCtx.Ctx, "Executing custom task: %s (type: %s)", taskName, callFunc.Call) + output, err := runner.Run(wfCtx.Ctx, args, input) + if err != nil { + return nil, fmt.Errorf("task %s failed: %w", taskName, err) + } + + // Handle export if specified + if callFunc.Export != nil && callFunc.Export.As != nil { + // Export output to workflow context + // This is simplified - full implementation would evaluate JQ expression + r.log.Debugf(wfCtx.Ctx, "Exporting task output: %s", taskName) + } + + return output, nil +} + +// executeSetTask handles the Set task type. +// It evaluates JQ runtime expressions (e.g., ${ .env.HYPERFLEET_API_BASE_URL }) +// and sets the resulting values in the workflow context. +// +// For compatibility with HyperFleet custom tasks (hf:preconditions, hf:post, etc.), +// values are also stored under output["params"] since those tasks expect params there. +func (r *Runner) executeSetTask(wfCtx *WorkflowContext, taskName string, task *model.SetTask, input map[string]any) (map[string]any, error) { + // Copy input to output + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + + // Get or create the params map for HyperFleet task compatibility + params, _ := output["params"].(map[string]any) + if params == nil { + params = make(map[string]any) + output["params"] = params + } + + if task.Set != nil { + for k, v := range task.Set { + // Evaluate the value using the SDK's expression evaluator + // This handles runtime expressions like ${ .env.VAR_NAME // "default" } + evaluated, err := expr.TraverseAndEvaluate(v, input, wfCtx.Ctx) + if err != nil { + return nil, fmt.Errorf("failed to evaluate expression for '%s': %w", k, err) + } + + // Store at root level (standard SWF behavior) + output[k] = evaluated + // Also store in params for HyperFleet task compatibility + params[k] = evaluated + wfCtx.SetParam(k, evaluated) + r.log.Debugf(wfCtx.Ctx, "Set %s = %v", k, evaluated) + } + } + + return output, nil +} + +// executeDoTask handles nested task lists. +func (r *Runner) executeDoTask(wfCtx *WorkflowContext, taskName string, task *model.DoTask, input map[string]any) (map[string]any, error) { + if task.Do == nil { + return input, nil + } + + currentOutput := input + for _, nestedTask := range *task.Do { + output, err := r.executeTask(wfCtx, nestedTask, currentOutput) + if err != nil { + return nil, err + } + if output != nil { + currentOutput = output + } + } + + return currentOutput, nil +} + +// executeForTask handles iteration over collections. +func (r *Runner) executeForTask(wfCtx *WorkflowContext, taskName string, task *model.ForTask, input map[string]any) (map[string]any, error) { + // Simplified for loop implementation + // Full implementation would evaluate 'in' expression and iterate + + r.log.Debugf(wfCtx.Ctx, "For task %s: iteration not yet implemented", taskName) + return input, nil +} + +// executeSwitchTask handles conditional branching. +func (r *Runner) executeSwitchTask(wfCtx *WorkflowContext, taskName string, task *model.SwitchTask, input map[string]any) (map[string]any, error) { + // Simplified switch implementation + // Full implementation would evaluate 'when' expressions + + r.log.Debugf(wfCtx.Ctx, "Switch task %s: conditional branching not yet implemented", taskName) + return input, nil +} + +// evaluateIfCondition evaluates the `if` condition for a task. +// Returns true if the task should execute, false if it should be skipped. +func (r *Runner) evaluateIfCondition(ctx context.Context, ifExpr *model.RuntimeExpression, input map[string]any) (bool, error) { + if ifExpr == nil { + return true, nil + } + + result, err := expr.TraverseAndEvaluate(ifExpr.Value, input, ctx) + if err != nil { + return false, fmt.Errorf("if expression evaluation failed: %w", err) + } + + // Convert result to boolean + switch v := result.(type) { + case bool: + return v, nil + case nil: + return false, nil + default: + // Any non-nil, non-false value is truthy + return true, nil + } +} + +// executeHTTPTask executes a native SWF HTTP call task. +func (r *Runner) executeHTTPTask(wfCtx *WorkflowContext, taskName string, task *model.CallHTTP, input map[string]any) (map[string]any, error) { + r.log.Debugf(wfCtx.Ctx, "Executing HTTP task: %s", taskName) + + // Evaluate endpoint URL + endpointStr := task.With.Endpoint.String() + evaluatedEndpoint, err := expr.TraverseAndEvaluate(endpointStr, input, wfCtx.Ctx) + if err != nil { + return nil, fmt.Errorf("failed to evaluate endpoint for task %s: %w", taskName, err) + } + + urlStr, ok := evaluatedEndpoint.(string) + if !ok { + urlStr = endpointStr + } + + r.log.Debugf(wfCtx.Ctx, "HTTP %s %s", task.With.Method, urlStr) + + // Prepare request body + var bodyReader io.Reader + if task.With.Body != nil { + // First unmarshal the RawMessage to get the actual value + var bodyValue any + if err := json.Unmarshal(task.With.Body, &bodyValue); err != nil { + // If unmarshal fails, use raw bytes + bodyReader = bytes.NewReader(task.With.Body) + } else { + // Check if it's a string that might be a runtime expression + if bodyStr, ok := bodyValue.(string); ok { + // Evaluate as potential jq expression + bodyData, err := expr.TraverseAndEvaluate(bodyStr, input, wfCtx.Ctx) + if err != nil { + // Not an expression, use the string as-is + bodyReader = strings.NewReader(bodyStr) + } else { + switch b := bodyData.(type) { + case string: + bodyReader = strings.NewReader(b) + case []byte: + bodyReader = bytes.NewReader(b) + default: + jsonBody, err := json.Marshal(b) + if err != nil { + return nil, fmt.Errorf("failed to marshal request body: %w", err) + } + bodyReader = bytes.NewReader(jsonBody) + } + } + } else { + // It's already an object/array, marshal it back to JSON + jsonBody, err := json.Marshal(bodyValue) + if err != nil { + return nil, fmt.Errorf("failed to marshal request body: %w", err) + } + bodyReader = bytes.NewReader(jsonBody) + } + } + } + + // Create HTTP request + req, err := http.NewRequestWithContext(wfCtx.Ctx, strings.ToUpper(task.With.Method), urlStr, bodyReader) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP request: %w", err) + } + + // Set headers + for key, value := range task.With.Headers { + evaluatedValue, err := expr.TraverseAndEvaluate(value, input, wfCtx.Ctx) + if err != nil { + req.Header.Set(key, value) + } else if strVal, ok := evaluatedValue.(string); ok { + req.Header.Set(key, strVal) + } else { + req.Header.Set(key, value) + } + } + + // Set Content-Type if not already set and we have a body + if bodyReader != nil && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + + // Execute request + resp, err := r.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("HTTP request failed: %w", err) + } + defer resp.Body.Close() + + // Read response body + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + // Parse response as JSON if possible + var content any + if err := json.Unmarshal(respBody, &content); err != nil { + content = string(respBody) + } + + // Build response object based on output mode + httpResponse := map[string]any{ + "statusCode": resp.StatusCode, + "status": resp.Status, + "headers": headerToMap(resp.Header), + "body": string(respBody), + "content": content, + } + + // Determine output based on output mode + var output any + switch task.With.Output { + case "raw": + output = string(respBody) + case "content": + output = content + case "response": + output = httpResponse + default: + // Default is content + output = content + } + + // Copy input to result + result := make(map[string]any) + for k, v := range input { + result[k] = v + } + + // Store HTTP response in result + result["response"] = httpResponse + result["content"] = content + + // Apply export if specified + if task.Export != nil && task.Export.As != nil { + exportedResult, err := r.applyExport(wfCtx.Ctx, task.Export, result, output) + if err != nil { + return nil, fmt.Errorf("failed to apply export for task %s: %w", taskName, err) + } + return exportedResult, nil + } + + return result, nil +} + +// executeTryTask handles try/catch blocks with retry support. +func (r *Runner) executeTryTask(wfCtx *WorkflowContext, taskName string, task *model.TryTask, input map[string]any) (map[string]any, error) { + r.log.Debugf(wfCtx.Ctx, "Executing try task: %s", taskName) + + if task.Try == nil { + return input, nil + } + + var lastErr error + maxAttempts := 1 + + // Determine retry configuration + if task.Catch != nil && task.Catch.Retry != nil && task.Catch.Retry.Limit.Attempt != nil { + maxAttempts = task.Catch.Retry.Limit.Attempt.Count + if maxAttempts < 1 { + maxAttempts = 1 + } + } + + currentOutput := input + + for attempt := 1; attempt <= maxAttempts; attempt++ { + if attempt > 1 { + r.log.Debugf(wfCtx.Ctx, "Try task %s: retry attempt %d/%d", taskName, attempt, maxAttempts) + + // Apply backoff delay if configured + if task.Catch != nil && task.Catch.Retry != nil && task.Catch.Retry.Backoff != nil { + delay := r.calculateBackoffDelay(task.Catch.Retry.Backoff, attempt) + if delay > 0 { + select { + case <-time.After(delay): + case <-wfCtx.Ctx.Done(): + return nil, wfCtx.Ctx.Err() + } + } + } + } + + // Execute try block + var tryErr error + tryOutput := currentOutput + for _, tryTask := range *task.Try { + output, err := r.executeTask(wfCtx, tryTask, tryOutput) + if err != nil { + tryErr = err + break + } + if output != nil { + tryOutput = output + } + } + + if tryErr == nil { + // Success + return tryOutput, nil + } + + lastErr = tryErr + r.log.Debugf(wfCtx.Ctx, "Try task %s: attempt %d failed: %v", taskName, attempt, tryErr) + } + + // All retries exhausted, execute catch.do if present + if task.Catch != nil && task.Catch.Do != nil { + r.log.Debugf(wfCtx.Ctx, "Try task %s: executing catch block", taskName) + + // Set error information in context + catchInput := make(map[string]any) + for k, v := range currentOutput { + catchInput[k] = v + } + if task.Catch.As != "" { + catchInput[task.Catch.As] = map[string]any{ + "message": lastErr.Error(), + } + } + + for _, catchTask := range *task.Catch.Do { + output, err := r.executeTask(wfCtx, catchTask, catchInput) + if err != nil { + return nil, fmt.Errorf("catch block failed: %w", err) + } + if output != nil { + catchInput = output + } + } + return catchInput, nil + } + + return nil, fmt.Errorf("try task %s failed after %d attempts: %w", taskName, maxAttempts, lastErr) +} + +// calculateBackoffDelay calculates the delay for retry backoff. +func (r *Runner) calculateBackoffDelay(backoff *model.RetryBackoff, attempt int) time.Duration { + baseDelay := 1 * time.Second + + if backoff.Constant != nil { + return baseDelay + } + + if backoff.Linear != nil { + return baseDelay * time.Duration(attempt) + } + + if backoff.Exponential != nil { + // Exponential backoff: base * 2^(attempt-1) + return baseDelay * time.Duration(1< 0 { + result[k] = v[0] + } + } + return result +} + +// GetWorkflow returns the workflow definition. +func (r *Runner) GetWorkflow() *model.Workflow { + return r.workflow +} + +// RunnerBuilder provides a fluent interface for building a Runner. +type RunnerBuilder struct { + config *RunnerConfig +} + +// NewRunnerBuilder creates a new builder. +func NewRunnerBuilder() *RunnerBuilder { + return &RunnerBuilder{ + config: &RunnerConfig{}, + } +} + +// WithWorkflow sets the workflow definition. +func (b *RunnerBuilder) WithWorkflow(workflow *model.Workflow) *RunnerBuilder { + b.config.Workflow = workflow + return b +} + +// WithTaskRegistry sets a custom task registry. +func (b *RunnerBuilder) WithTaskRegistry(registry *tasks.Registry) *RunnerBuilder { + b.config.TaskRegistry = registry + return b +} + +// WithK8sClient sets the Kubernetes client. +func (b *RunnerBuilder) WithK8sClient(client k8s_client.K8sClient) *RunnerBuilder { + b.config.K8sClient = client + return b +} + +// WithAPIClient sets the HyperFleet API client. +func (b *RunnerBuilder) WithAPIClient(client hyperfleet_api.Client) *RunnerBuilder { + b.config.APIClient = client + return b +} + +// WithLogger sets the logger. +func (b *RunnerBuilder) WithLogger(log logger.Logger) *RunnerBuilder { + b.config.Logger = log + return b +} + +// Build creates the Runner. +func (b *RunnerBuilder) Build() (*Runner, error) { + return NewRunner(b.config) +} diff --git a/internal/swf/runner/runner_test.go b/internal/swf/runner/runner_test.go new file mode 100644 index 0000000..53af316 --- /dev/null +++ b/internal/swf/runner/runner_test.go @@ -0,0 +1,328 @@ +package runner + +import ( + "context" + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/swf/tasks" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/serverlessworkflow/sdk-go/v3/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewRunner(t *testing.T) { + log := logger.NewTestLogger() + + tests := []struct { + name string + config *RunnerConfig + expectError bool + errContains string + }{ + { + name: "nil config", + config: nil, + expectError: true, + errContains: "config is required", + }, + { + name: "missing workflow", + config: &RunnerConfig{ + Logger: log, + }, + expectError: true, + errContains: "workflow is required", + }, + { + name: "missing logger", + config: &RunnerConfig{ + Workflow: &model.Workflow{}, + }, + expectError: true, + errContains: "logger is required", + }, + { + name: "valid config - minimal", + config: &RunnerConfig{ + Workflow: &model.Workflow{}, + Logger: log, + }, + expectError: false, + }, + { + name: "valid config - with custom registry", + config: &RunnerConfig{ + Workflow: &model.Workflow{}, + Logger: log, + TaskRegistry: tasks.NewRegistry(), + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + runner, err := NewRunner(tt.config) + + if tt.expectError { + assert.Error(t, err) + if tt.errContains != "" { + assert.Contains(t, err.Error(), tt.errContains) + } + return + } + + require.NoError(t, err) + require.NotNil(t, runner) + }) + } +} + +func TestRunner_GetWorkflow(t *testing.T) { + log := logger.NewTestLogger() + workflow := &model.Workflow{ + Document: model.Document{ + Name: "test-workflow", + }, + } + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + Logger: log, + }) + require.NoError(t, err) + + got := runner.GetWorkflow() + assert.Equal(t, workflow, got) + assert.Equal(t, "test-workflow", got.Document.Name) +} + +func TestRunner_Run_EmptyWorkflow(t *testing.T) { + log := logger.NewTestLogger() + workflow := &model.Workflow{} + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + Logger: log, + }) + require.NoError(t, err) + + result, err := runner.Run(context.Background(), map[string]any{}) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, StatusSuccess, result.Status) +} + +func TestRunner_Run_WithEventData(t *testing.T) { + log := logger.NewTestLogger() + workflow := &model.Workflow{ + Do: &model.TaskList{}, + } + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + Logger: log, + }) + require.NoError(t, err) + + input := map[string]any{ + "event": map[string]any{ + "id": "cluster-123", + "type": "cluster.created", + }, + } + + result, err := runner.Run(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, StatusSuccess, result.Status) +} + +func TestRunner_Run_SetTask(t *testing.T) { + log := logger.NewTestLogger() + + // Create a workflow with a Set task + taskList := model.TaskList{ + { + Key: "set_values", + Task: &model.SetTask{ + Set: map[string]any{ + "clusterId": "test-123", + "status": "ready", + }, + }, + }, + } + + workflow := &model.Workflow{ + Do: &taskList, + } + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + Logger: log, + }) + require.NoError(t, err) + + result, err := runner.Run(context.Background(), map[string]any{}) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, StatusSuccess, result.Status) + + // Check the params were set + assert.Equal(t, "test-123", result.Params["clusterId"]) + assert.Equal(t, "ready", result.Params["status"]) +} + +func TestRunner_Run_DoTask(t *testing.T) { + log := logger.NewTestLogger() + + // Create a workflow with a Do task containing nested tasks + nestedTasks := model.TaskList{ + { + Key: "inner_set", + Task: &model.SetTask{ + Set: map[string]any{ + "nestedValue": "from-nested", + }, + }, + }, + } + + taskList := model.TaskList{ + { + Key: "outer_do", + Task: &model.DoTask{ + Do: &nestedTasks, + }, + }, + } + + workflow := &model.Workflow{ + Do: &taskList, + } + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + Logger: log, + }) + require.NoError(t, err) + + result, err := runner.Run(context.Background(), map[string]any{}) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, StatusSuccess, result.Status) + assert.Equal(t, "from-nested", result.Params["nestedValue"]) +} + +func TestRunner_Run_CustomTask(t *testing.T) { + log := logger.NewTestLogger() + + // Create a registry with a test task + registry := tasks.NewRegistry() + err := registry.Register("hf:test", func(deps *tasks.Dependencies) (tasks.TaskRunner, error) { + return &testTaskRunner{}, nil + }) + require.NoError(t, err) + + // Create a workflow with a custom HyperFleet task + taskList := model.TaskList{ + { + Key: "custom_task", + Task: &model.CallFunction{ + Call: "hf:test", + With: map[string]any{ + "arg1": "value1", + }, + }, + }, + } + + workflow := &model.Workflow{ + Do: &taskList, + } + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + TaskRegistry: registry, + Logger: log, + }) + require.NoError(t, err) + + result, err := runner.Run(context.Background(), map[string]any{}) + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, StatusSuccess, result.Status) +} + +func TestRunner_Run_CustomTaskNotFound(t *testing.T) { + log := logger.NewTestLogger() + + // Create a workflow with an unregistered custom task + taskList := model.TaskList{ + { + Key: "unknown_task", + Task: &model.CallFunction{ + Call: "hf:unknown", + With: map[string]any{}, + }, + }, + } + + workflow := &model.Workflow{ + Do: &taskList, + } + + // Use empty registry + registry := tasks.NewRegistry() + + runner, err := NewRunner(&RunnerConfig{ + Workflow: workflow, + TaskRegistry: registry, + Logger: log, + }) + require.NoError(t, err) + + result, err := runner.Run(context.Background(), map[string]any{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no task runner registered") + assert.Nil(t, result) +} + +func TestRunnerBuilder(t *testing.T) { + log := logger.NewTestLogger() + workflow := &model.Workflow{} + registry := tasks.NewRegistry() + + runner, err := NewRunnerBuilder(). + WithWorkflow(workflow). + WithTaskRegistry(registry). + WithLogger(log). + Build() + + require.NoError(t, err) + require.NotNil(t, runner) + assert.Equal(t, workflow, runner.GetWorkflow()) +} + +func TestRunnerBuilder_MissingRequired(t *testing.T) { + _, err := NewRunnerBuilder().Build() + assert.Error(t, err) +} + +// testTaskRunner is a simple task runner for testing +type testTaskRunner struct{} + +func (r *testTaskRunner) Name() string { + return "hf:test" +} + +func (r *testTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + output["testRan"] = true + return output, nil +} diff --git a/internal/swf/tasks/base.go b/internal/swf/tasks/base.go new file mode 100644 index 0000000..f7dd3c6 --- /dev/null +++ b/internal/swf/tasks/base.go @@ -0,0 +1,68 @@ +// Package tasks provides custom task runners for HyperFleet workflows. +// These task runners extend the Serverless Workflow SDK with domain-specific +// operations for Kubernetes resource management, HyperFleet API calls, +// CEL expression evaluation, and more. +package tasks + +import ( + "context" +) + +// TaskRunner defines the interface for custom HyperFleet task runners. +// Each task runner handles a specific type of operation (e.g., hf:extract, hf:k8s). +type TaskRunner interface { + // Name returns the task type identifier (e.g., "hf:extract", "hf:k8s") + Name() string + + // Run executes the task with the given arguments and input data. + // The args map contains task-specific configuration from the workflow definition. + // The input map contains the current workflow data/context. + // Returns the task output and any error that occurred. + Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) +} + +// TaskRunnerFactory creates a TaskRunner with the given dependencies. +// This allows task runners to be constructed with required services +// (e.g., K8s client, API client) without hardcoding dependencies. +type TaskRunnerFactory func(deps *Dependencies) (TaskRunner, error) + +// Dependencies contains shared services and clients used by task runners. +// These are injected when building the workflow runner. +type Dependencies struct { + // K8sClient is the Kubernetes client for resource operations + K8sClient any // Will be typed as k8s_client.K8sClient + + // APIClient is the HyperFleet API client for HTTP calls + APIClient any // Will be typed as hyperfleet_api.Client + + // Logger is the logging interface + Logger any // Will be typed as logger.Logger +} + +// TaskResult represents the outcome of a task execution. +type TaskResult struct { + // Output contains the task's output data + Output map[string]any + + // Error contains any error message if the task failed + Error string + + // Metadata contains additional information about the execution + Metadata map[string]any +} + +// NewTaskResult creates a successful task result with the given output. +func NewTaskResult(output map[string]any) *TaskResult { + return &TaskResult{ + Output: output, + Metadata: make(map[string]any), + } +} + +// NewTaskError creates a failed task result with the given error. +func NewTaskError(err error) *TaskResult { + return &TaskResult{ + Output: make(map[string]any), + Error: err.Error(), + } +} diff --git a/internal/swf/tasks/cel.go b/internal/swf/tasks/cel.go new file mode 100644 index 0000000..efdd7d8 --- /dev/null +++ b/internal/swf/tasks/cel.go @@ -0,0 +1,227 @@ +package tasks + +import ( + "context" + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// CELTaskRunner implements the hf:cel task for CEL expression evaluation. +// It evaluates CEL expressions against the workflow context. +type CELTaskRunner struct { + log logger.Logger +} + +// NewCELTaskRunner creates a new CEL task runner. +func NewCELTaskRunner(deps *Dependencies) (TaskRunner, error) { + var log logger.Logger + if deps != nil && deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &CELTaskRunner{log: log}, nil +} + +func (r *CELTaskRunner) Name() string { + return TaskCEL +} + +// Run executes the CEL expression evaluation task. +// Args should contain: +// - expression: The CEL expression to evaluate +// - variables: Optional map of additional variables for evaluation +// +// Returns a map with: +// - value: The evaluation result +// - valueType: The CEL type of the result +// - matched: Boolean indicating if the result is truthy +// - error: Error message if evaluation failed +func (r *CELTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + expression, ok := args["expression"].(string) + if !ok || expression == "" { + return nil, fmt.Errorf("expression is required for hf:cel task") + } + + // Build evaluation context from input and additional variables + evalCtx := criteria.NewEvaluationContext() + + // Add input data + for k, v := range input { + evalCtx.Set(k, v) + } + + // Add explicit variables if provided + if vars, ok := args["variables"].(map[string]any); ok { + for k, v := range vars { + evalCtx.Set(k, v) + } + } + + // Create evaluator using the public API + evaluator, err := criteria.NewEvaluator(ctx, evalCtx, r.log) + if err != nil { + return nil, fmt.Errorf("failed to create evaluator: %w", err) + } + + // Evaluate the expression + result, err := evaluator.EvaluateCEL(expression) + if err != nil { + return nil, fmt.Errorf("CEL parse error: %w", err) + } + + output := map[string]any{ + "value": result.Value, + "valueType": result.ValueType, + "matched": result.Matched, + "expression": result.Expression, + } + + if result.Error != nil { + output["error"] = result.Error.Error() + } + + // Merge with input for workflow continuation + for k, v := range input { + if _, exists := output[k]; !exists { + output[k] = v + } + } + + return output, nil +} + +// CELConditionRunner implements the hf:cel task variant for condition evaluation. +// It evaluates structured conditions against the workflow context. +type CELConditionRunner struct { + log logger.Logger +} + +// NewCELConditionRunner creates a new condition runner. +func NewCELConditionRunner(deps *Dependencies) (TaskRunner, error) { + var log logger.Logger + if deps != nil && deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &CELConditionRunner{log: log}, nil +} + +func (r *CELConditionRunner) Name() string { + return "hf:condition" +} + +// Run evaluates conditions against the workflow context. +// Args should contain: +// - conditions: Array of condition objects with field, operator, value +// +// Returns a map with: +// - matched: Boolean indicating if all conditions matched +// - results: Array of individual condition results +func (r *CELConditionRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + conditionsRaw, ok := args["conditions"].([]any) + if !ok || len(conditionsRaw) == 0 { + // No conditions means matched + return map[string]any{ + "matched": true, + "results": []any{}, + }, nil + } + + // Build evaluation context + evalCtx := criteria.NewEvaluationContext() + for k, v := range input { + evalCtx.Set(k, v) + } + + // Parse conditions + var conditions []criteria.ConditionDef + for _, c := range conditionsRaw { + condMap, ok := c.(map[string]any) + if !ok { + continue + } + + field, _ := condMap["field"].(string) + operator, _ := condMap["operator"].(string) + value := condMap["value"] + + conditions = append(conditions, criteria.ConditionDef{ + Field: field, + Operator: criteria.Operator(operator), + Value: value, + }) + } + + // Create evaluator + evaluator, err := criteria.NewEvaluator(ctx, evalCtx, r.log) + if err != nil { + return nil, fmt.Errorf("failed to create evaluator: %w", err) + } + + // Evaluate conditions + result, err := evaluator.EvaluateConditions(conditions) + if err != nil { + return nil, fmt.Errorf("condition evaluation failed: %w", err) + } + + // Build results array + results := make([]any, 0, len(result.Results)) + for _, r := range result.Results { + results = append(results, map[string]any{ + "field": r.Field, + "operator": string(r.Operator), + "expectedValue": r.ExpectedValue, + "fieldValue": r.FieldValue, + "matched": r.Matched, + }) + } + + output := map[string]any{ + "matched": result.Matched, + "results": results, + } + + // Merge with input + for k, v := range input { + if _, exists := output[k]; !exists { + output[k] = v + } + } + + return output, nil +} + +// noopLogger is a minimal logger for use when real logging is not available. +type noopLogger struct{} + +func (l *noopLogger) Debug(ctx context.Context, msg string) {} +func (l *noopLogger) Debugf(ctx context.Context, format string, args ...any) {} +func (l *noopLogger) Info(ctx context.Context, msg string) {} +func (l *noopLogger) Infof(ctx context.Context, format string, args ...any) {} +func (l *noopLogger) Warn(ctx context.Context, msg string) {} +func (l *noopLogger) Warnf(ctx context.Context, format string, args ...any) {} +func (l *noopLogger) Error(ctx context.Context, msg string) {} +func (l *noopLogger) Errorf(ctx context.Context, format string, args ...any) {} +func (l *noopLogger) Fatal(ctx context.Context, msg string) {} +func (l *noopLogger) With(key string, value any) logger.Logger { return l } +func (l *noopLogger) WithFields(fields map[string]any) logger.Logger { return l } +func (l *noopLogger) Without(key string) logger.Logger { return l } + +func init() { + // Register the CEL task runner in the default registry + _ = RegisterDefault(TaskCEL, NewCELTaskRunner) +} diff --git a/internal/swf/tasks/cel_test.go b/internal/swf/tasks/cel_test.go new file mode 100644 index 0000000..301134f --- /dev/null +++ b/internal/swf/tasks/cel_test.go @@ -0,0 +1,375 @@ +package tasks + +import ( + "context" + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewCELTaskRunner(t *testing.T) { + runner, err := NewCELTaskRunner(nil) + require.NoError(t, err) + require.NotNil(t, runner) + assert.Equal(t, TaskCEL, runner.Name()) +} + +func TestNewCELTaskRunner_WithLogger(t *testing.T) { + deps := &Dependencies{ + Logger: logger.NewTestLogger(), + } + runner, err := NewCELTaskRunner(deps) + require.NoError(t, err) + require.NotNil(t, runner) +} + +func TestCELTaskRunner_Run(t *testing.T) { + runner, _ := NewCELTaskRunner(nil) + ctx := context.Background() + + tests := []struct { + name string + args map[string]any + input map[string]any + expectError bool + expectedMatch bool + expectedValue any + checkValueType bool + }{ + { + name: "missing expression", + args: map[string]any{}, + input: map[string]any{}, + expectError: true, + }, + { + name: "empty expression", + args: map[string]any{ + "expression": "", + }, + input: map[string]any{}, + expectError: true, + }, + { + name: "simple true expression", + args: map[string]any{ + "expression": "true", + }, + input: map[string]any{}, + expectedMatch: true, + expectedValue: true, + }, + { + name: "simple false expression", + args: map[string]any{ + "expression": "false", + }, + input: map[string]any{}, + expectedMatch: false, + expectedValue: false, + }, + { + name: "comparison expression", + args: map[string]any{ + "expression": "status == \"active\"", + }, + input: map[string]any{ + "status": "active", + }, + expectedMatch: true, + expectedValue: true, + }, + { + name: "comparison expression - false", + args: map[string]any{ + "expression": "status == \"active\"", + }, + input: map[string]any{ + "status": "inactive", + }, + expectedMatch: false, + expectedValue: false, + }, + { + name: "arithmetic expression", + args: map[string]any{ + "expression": "count + 10", + }, + input: map[string]any{ + "count": int64(5), + }, + expectedMatch: true, // Non-zero is truthy + expectedValue: int64(15), + }, + { + name: "string expression", + args: map[string]any{ + "expression": "name + \" World\"", + }, + input: map[string]any{ + "name": "Hello", + }, + expectedMatch: true, // Non-empty string is truthy + expectedValue: "Hello World", + }, + { + name: "nested field access", + args: map[string]any{ + "expression": "cluster.status == \"ready\"", + }, + input: map[string]any{ + "cluster": map[string]any{ + "status": "ready", + }, + }, + expectedMatch: true, + expectedValue: true, + }, + { + name: "with additional variables", + args: map[string]any{ + "expression": "a + b", + "variables": map[string]any{ + "a": int64(10), + "b": int64(20), + }, + }, + input: map[string]any{}, + expectedMatch: true, + expectedValue: int64(30), + }, + { + name: "variables override input", + args: map[string]any{ + "expression": "value", + "variables": map[string]any{ + "value": "from-variables", + }, + }, + input: map[string]any{ + "value": "from-input", + }, + expectedMatch: true, + expectedValue: "from-variables", + }, + { + name: "list contains check", + args: map[string]any{ + "expression": "\"a\" in items", + }, + input: map[string]any{ + "items": []any{"a", "b", "c"}, + }, + expectedMatch: true, + expectedValue: true, + }, + { + name: "size function", + args: map[string]any{ + "expression": "size(items) > 0", + }, + input: map[string]any{ + "items": []any{"a", "b"}, + }, + expectedMatch: true, + expectedValue: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output, err := runner.Run(ctx, tt.args, tt.input) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + require.NotNil(t, output) + + assert.Equal(t, tt.expectedMatch, output["matched"], "matched mismatch") + assert.Equal(t, tt.expectedValue, output["value"], "value mismatch") + }) + } +} + +func TestCELTaskRunner_PreservesInput(t *testing.T) { + runner, _ := NewCELTaskRunner(nil) + ctx := context.Background() + + input := map[string]any{ + "existingKey": "existingValue", + "count": int64(42), + } + + args := map[string]any{ + "expression": "count * 2", + } + + output, err := runner.Run(ctx, args, input) + require.NoError(t, err) + + // Original input should be preserved + assert.Equal(t, "existingValue", output["existingKey"]) + assert.Equal(t, int64(42), output["count"]) + + // CEL results should be added + assert.NotNil(t, output["value"]) + assert.NotNil(t, output["matched"]) +} + +func TestNewCELConditionRunner(t *testing.T) { + runner, err := NewCELConditionRunner(nil) + require.NoError(t, err) + require.NotNil(t, runner) + assert.Equal(t, "hf:condition", runner.Name()) +} + +func TestCELConditionRunner_Run(t *testing.T) { + runner, _ := NewCELConditionRunner(nil) + ctx := context.Background() + + tests := []struct { + name string + args map[string]any + input map[string]any + expectError bool + expectedMatch bool + }{ + { + name: "no conditions - matches", + args: map[string]any{}, + input: map[string]any{}, + expectedMatch: true, + }, + { + name: "empty conditions - matches", + args: map[string]any{ + "conditions": []any{}, + }, + input: map[string]any{}, + expectedMatch: true, + }, + { + name: "single equals condition - matches", + args: map[string]any{ + "conditions": []any{ + map[string]any{ + "field": "status", + "operator": "equals", + "value": "active", + }, + }, + }, + input: map[string]any{ + "status": "active", + }, + expectedMatch: true, + }, + { + name: "single equals condition - no match", + args: map[string]any{ + "conditions": []any{ + map[string]any{ + "field": "status", + "operator": "equals", + "value": "active", + }, + }, + }, + input: map[string]any{ + "status": "inactive", + }, + expectedMatch: false, + }, + { + name: "multiple conditions - all match", + args: map[string]any{ + "conditions": []any{ + map[string]any{ + "field": "status", + "operator": "equals", + "value": "active", + }, + map[string]any{ + "field": "count", + "operator": "greaterThan", + "value": int64(0), + }, + }, + }, + input: map[string]any{ + "status": "active", + "count": int64(5), + }, + expectedMatch: true, + }, + { + name: "multiple conditions - one fails", + args: map[string]any{ + "conditions": []any{ + map[string]any{ + "field": "status", + "operator": "equals", + "value": "active", + }, + map[string]any{ + "field": "count", + "operator": "greaterThan", + "value": int64(10), + }, + }, + }, + input: map[string]any{ + "status": "active", + "count": int64(5), + }, + expectedMatch: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output, err := runner.Run(ctx, tt.args, tt.input) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + require.NotNil(t, output) + + assert.Equal(t, tt.expectedMatch, output["matched"]) + }) + } +} + +func TestNoopLogger(t *testing.T) { + // Test that noopLogger implements all required methods without panicking + l := &noopLogger{} + ctx := context.Background() + + assert.NotPanics(t, func() { + l.Debug(ctx, "test") + l.Debugf(ctx, "test %s", "arg") + l.Info(ctx, "test") + l.Infof(ctx, "test %s", "arg") + l.Warn(ctx, "test") + l.Warnf(ctx, "test %s", "arg") + l.Error(ctx, "test") + l.Errorf(ctx, "test %s", "arg") + l.Fatal(ctx, "test") + _ = l.With("key", "value") + _ = l.WithFields(map[string]any{"key": "value"}) + _ = l.Without("key") + }) + + // With methods should return the same logger + assert.Equal(t, l, l.With("key", "value")) + assert.Equal(t, l, l.WithFields(map[string]any{})) + assert.Equal(t, l, l.Without("key")) +} diff --git a/internal/swf/tasks/extract.go b/internal/swf/tasks/extract.go new file mode 100644 index 0000000..c04e11e --- /dev/null +++ b/internal/swf/tasks/extract.go @@ -0,0 +1,332 @@ +package tasks + +import ( + "context" + "fmt" + "math" + "os" + "strconv" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" +) + +// ExtractTaskRunner implements the hf:extract task for parameter extraction. +// It extracts parameters from event data, environment variables, secrets, and configmaps. +type ExtractTaskRunner struct { + k8sClient k8s_client.K8sClient +} + +// NewExtractTaskRunner creates a new extract task runner. +func NewExtractTaskRunner(deps *Dependencies) (TaskRunner, error) { + var k8sClient k8s_client.K8sClient + if deps != nil && deps.K8sClient != nil { + var ok bool + k8sClient, ok = deps.K8sClient.(k8s_client.K8sClient) + if !ok { + return nil, fmt.Errorf("invalid K8sClient type") + } + } + + return &ExtractTaskRunner{ + k8sClient: k8sClient, + }, nil +} + +func (r *ExtractTaskRunner) Name() string { + return TaskExtract +} + +// Run executes the parameter extraction task. +// Args should contain a "sources" array with parameter definitions. +// Returns extracted parameters as a map. +func (r *ExtractTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + // Get event data from input + eventData, _ := input["event"].(map[string]any) + if eventData == nil { + eventData = make(map[string]any) + } + + // Get parameter sources from args + sources, ok := args["sources"].([]any) + if !ok { + // Try getting from config.spec.params path + if config, ok := args["config"].(map[string]any); ok { + if spec, ok := config["spec"].(map[string]any); ok { + sources, _ = spec["params"].([]any) + } + } + } + + params := make(map[string]any) + + for _, src := range sources { + paramDef, ok := src.(map[string]any) + if !ok { + continue + } + + name, _ := paramDef["name"].(string) + source, _ := paramDef["source"].(string) + paramType, _ := paramDef["type"].(string) + required, _ := paramDef["required"].(bool) + defaultVal := paramDef["default"] + + if name == "" { + continue + } + + // Extract the parameter value + value, err := r.extractParam(ctx, source, eventData) + if err != nil { + if required { + return nil, fmt.Errorf("failed to extract required parameter '%s' from source '%s': %w", name, source, err) + } + // Use default for non-required params if extraction fails + if defaultVal != nil { + params[name] = defaultVal + } + continue + } + + // Apply default if value is nil or empty string + isEmpty := value == nil + if s, ok := value.(string); ok && s == "" { + isEmpty = true + } + if isEmpty && defaultVal != nil { + value = defaultVal + } + + // Apply type conversion if specified + if value != nil && paramType != "" { + converted, convErr := convertParamType(value, paramType) + if convErr != nil { + if required { + return nil, fmt.Errorf("failed to convert parameter '%s' to type '%s': %w", name, paramType, convErr) + } + // Use default for non-required params if conversion fails + if defaultVal != nil { + params[name] = defaultVal + } + continue + } + value = converted + } + + if value != nil { + params[name] = value + } + } + + // Add metadata if available in args + if metadata, ok := args["metadata"].(map[string]any); ok { + params["metadata"] = metadata + } + + // Return merged output with extracted params + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + output["params"] = params + + return output, nil +} + +// extractParam extracts a single parameter based on its source. +func (r *ExtractTaskRunner) extractParam(ctx context.Context, source string, eventData map[string]any) (any, error) { + switch { + case strings.HasPrefix(source, "env."): + return extractFromEnv(source[4:]) + case strings.HasPrefix(source, "event."): + return extractFromEvent(source[6:], eventData) + case strings.HasPrefix(source, "secret."): + return r.extractFromSecret(ctx, source[7:]) + case strings.HasPrefix(source, "configmap."): + return r.extractFromConfigMap(ctx, source[10:]) + case source == "": + return nil, nil + default: + // Try to extract from event data directly + return extractFromEvent(source, eventData) + } +} + +func (r *ExtractTaskRunner) extractFromSecret(ctx context.Context, path string) (any, error) { + if r.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot extract from secret") + } + return r.k8sClient.ExtractFromSecret(ctx, path) +} + +func (r *ExtractTaskRunner) extractFromConfigMap(ctx context.Context, path string) (any, error) { + if r.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot extract from configmap") + } + return r.k8sClient.ExtractFromConfigMap(ctx, path) +} + +// extractFromEnv extracts a value from environment variables. +func extractFromEnv(envVar string) (any, error) { + value, exists := os.LookupEnv(envVar) + if !exists { + return nil, fmt.Errorf("environment variable %s not set", envVar) + } + return value, nil +} + +// extractFromEvent extracts a value from event data using dot notation. +func extractFromEvent(path string, eventData map[string]any) (any, error) { + parts := strings.Split(path, ".") + var current any = eventData + + for i, part := range parts { + switch v := current.(type) { + case map[string]any: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) + } + current = val + case map[any]any: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) + } + current = val + default: + return nil, fmt.Errorf("cannot access field '%s': parent is not a map (got %T)", part, current) + } + } + + return current, nil +} + +// convertParamType converts a value to the specified type. +func convertParamType(value any, targetType string) (any, error) { + switch targetType { + case "string": + return convertToString(value) + case "int", "int64": + return convertToInt64(value) + case "float", "float64": + return convertToFloat64(value) + case "bool": + return convertToBool(value) + default: + return nil, fmt.Errorf("unsupported type: %s", targetType) + } +} + +func convertToString(value any) (string, error) { + switch v := value.(type) { + case string: + return v, nil + case int, int8, int16, int32, int64: + return fmt.Sprintf("%d", v), nil + case uint, uint8, uint16, uint32, uint64: + return fmt.Sprintf("%d", v), nil + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(v, 'f', -1, 64), nil + case bool: + return strconv.FormatBool(v), nil + default: + return fmt.Sprintf("%v", v), nil + } +} + +func convertToInt64(value any) (int64, error) { + switch v := value.(type) { + case int: + return int64(v), nil + case int64: + return v, nil + case float64: + return int64(v), nil + case string: + if i, err := strconv.ParseInt(v, 10, 64); err == nil { + return i, nil + } + if f, err := strconv.ParseFloat(v, 64); err == nil { + return int64(f), nil + } + return 0, fmt.Errorf("cannot convert string '%s' to int", v) + case bool: + if v { + return 1, nil + } + return 0, nil + case uint64: + if v > math.MaxInt64 { + return 0, fmt.Errorf("uint64 value %d overflows int64", v) + } + return int64(v), nil + default: + return 0, fmt.Errorf("cannot convert %T to int", value) + } +} + +func convertToFloat64(value any) (float64, error) { + switch v := value.(type) { + case float64: + return v, nil + case float32: + return float64(v), nil + case int: + return float64(v), nil + case int64: + return float64(v), nil + case string: + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, fmt.Errorf("cannot convert string '%s' to float: %w", v, err) + } + return f, nil + case bool: + if v { + return 1.0, nil + } + return 0.0, nil + default: + return 0, fmt.Errorf("cannot convert %T to float", value) + } +} + +func convertToBool(value any) (bool, error) { + switch v := value.(type) { + case bool: + return v, nil + case string: + if v == "" { + return false, nil + } + b, err := strconv.ParseBool(v) + if err != nil { + lower := strings.ToLower(v) + switch lower { + case "yes", "y", "on", "1": + return true, nil + case "no", "n", "off", "0": + return false, nil + } + return false, fmt.Errorf("cannot convert string '%s' to bool", v) + } + return b, nil + case int: + return v != 0, nil + case int64: + return v != 0, nil + case float64: + return v != 0, nil + default: + return false, fmt.Errorf("cannot convert %T to bool", value) + } +} + +func init() { + // Register the extract task runner in the default registry + _ = RegisterDefault(TaskExtract, NewExtractTaskRunner) +} diff --git a/internal/swf/tasks/extract_test.go b/internal/swf/tasks/extract_test.go new file mode 100644 index 0000000..eadee10 --- /dev/null +++ b/internal/swf/tasks/extract_test.go @@ -0,0 +1,408 @@ +package tasks + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewExtractTaskRunner(t *testing.T) { + runner, err := NewExtractTaskRunner(nil) + require.NoError(t, err) + require.NotNil(t, runner) + assert.Equal(t, TaskExtract, runner.Name()) +} + +func TestExtractTaskRunner_Run(t *testing.T) { + runner, _ := NewExtractTaskRunner(nil) + ctx := context.Background() + + tests := []struct { + name string + args map[string]any + input map[string]any + setup func() + cleanup func() + expectError bool + checkParams func(t *testing.T, output map[string]any) + }{ + { + name: "no sources", + args: map[string]any{}, + input: map[string]any{}, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Empty(t, params) + }, + }, + { + name: "extract from event using dot notation", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "clusterId", + "source": "event.id", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{ + "id": "cluster-123", + }, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "cluster-123", params["clusterId"]) + }, + }, + { + name: "extract from nested event field", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "status", + "source": "event.cluster.status", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{ + "cluster": map[string]any{ + "status": "ready", + }, + }, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "ready", params["status"]) + }, + }, + { + name: "extract from env", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "testEnvVar", + "source": "env.TEST_EXTRACT_VAR", + }, + }, + }, + input: map[string]any{}, + setup: func() { + os.Setenv("TEST_EXTRACT_VAR", "env-value") + }, + cleanup: func() { + os.Unsetenv("TEST_EXTRACT_VAR") + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "env-value", params["testEnvVar"]) + }, + }, + { + name: "required field missing - uses default", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "missing", + "source": "event.nonexistent", + "required": false, + "default": "default-value", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{}, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "default-value", params["missing"]) + }, + }, + { + name: "multiple sources", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "id", + "source": "event.id", + }, + map[string]any{ + "name": "kind", + "source": "event.kind", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{ + "id": "resource-123", + "kind": "Cluster", + }, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "resource-123", params["id"]) + assert.Equal(t, "Cluster", params["kind"]) + }, + }, + { + name: "direct field access without prefix", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "directId", + "source": "id", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{ + "id": "direct-value", + }, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "direct-value", params["directId"]) + }, + }, + { + name: "type conversion to int", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "count", + "source": "event.count", + "type": "int", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{ + "count": "42", + }, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, int64(42), params["count"]) + }, + }, + { + name: "type conversion to bool", + args: map[string]any{ + "sources": []any{ + map[string]any{ + "name": "enabled", + "source": "event.enabled", + "type": "bool", + }, + }, + }, + input: map[string]any{ + "event": map[string]any{ + "enabled": "true", + }, + }, + checkParams: func(t *testing.T, output map[string]any) { + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, true, params["enabled"]) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + tt.setup() + } + if tt.cleanup != nil { + defer tt.cleanup() + } + + output, err := runner.Run(ctx, tt.args, tt.input) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + require.NotNil(t, output) + + if tt.checkParams != nil { + tt.checkParams(t, output) + } + }) + } +} + +func TestExtractTaskRunner_RequiredFieldMissing(t *testing.T) { + runner, _ := NewExtractTaskRunner(nil) + ctx := context.Background() + + args := map[string]any{ + "sources": []any{ + map[string]any{ + "name": "required", + "source": "event.nonexistent", + "required": true, + }, + }, + } + + input := map[string]any{ + "event": map[string]any{}, + } + + _, err := runner.Run(ctx, args, input) + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed to extract required parameter") +} + +func TestExtractTaskRunner_PreservesInput(t *testing.T) { + runner, _ := NewExtractTaskRunner(nil) + ctx := context.Background() + + input := map[string]any{ + "existingKey": "existingValue", + "event": map[string]any{ + "id": "test-id", + }, + } + + args := map[string]any{ + "sources": []any{ + map[string]any{ + "name": "eventId", + "source": "event.id", + }, + }, + } + + output, err := runner.Run(ctx, args, input) + require.NoError(t, err) + + // Original input should be preserved + assert.Equal(t, "existingValue", output["existingKey"]) + assert.Equal(t, input["event"], output["event"]) + + // Params should contain extracted value + params, ok := output["params"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "test-id", params["eventId"]) +} + +func TestExtractFromEvent(t *testing.T) { + tests := []struct { + name string + path string + eventData map[string]any + expected any + expectError bool + }{ + { + name: "simple field", + path: "id", + eventData: map[string]any{"id": "123"}, + expected: "123", + }, + { + name: "nested field", + path: "cluster.status", + eventData: map[string]any{"cluster": map[string]any{"status": "ready"}}, + expected: "ready", + }, + { + name: "deeply nested", + path: "a.b.c.d", + eventData: map[string]any{"a": map[string]any{"b": map[string]any{"c": map[string]any{"d": "value"}}}}, + expected: "value", + }, + { + name: "missing field", + path: "nonexistent", + eventData: map[string]any{}, + expectError: true, + }, + { + name: "missing nested field", + path: "a.missing", + eventData: map[string]any{"a": map[string]any{}}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := extractFromEvent(tt.path, tt.eventData) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestConvertParamType(t *testing.T) { + tests := []struct { + name string + value any + targetType string + expected any + expectError bool + }{ + // String conversions + {"string to string", "hello", "string", "hello", false}, + {"int to string", 42, "string", "42", false}, + {"bool to string", true, "string", "true", false}, + + // Int conversions + {"string to int", "123", "int", int64(123), false}, + {"float to int", 3.14, "int", int64(3), false}, + {"invalid string to int", "abc", "int", nil, true}, + + // Float conversions + {"string to float", "3.14", "float", 3.14, false}, + {"int to float", 42, "float", float64(42), false}, + + // Bool conversions + {"string to bool true", "true", "bool", true, false}, + {"string to bool yes", "yes", "bool", true, false}, + {"string to bool false", "false", "bool", false, false}, + {"string to bool no", "no", "bool", false, false}, + {"int to bool", 1, "bool", true, false}, + {"zero to bool", 0, "bool", false, false}, + + // Unsupported type + {"unsupported type", "value", "unsupported", nil, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertParamType(tt.value, tt.targetType) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/internal/swf/tasks/http.go b/internal/swf/tasks/http.go new file mode 100644 index 0000000..665d419 --- /dev/null +++ b/internal/swf/tasks/http.go @@ -0,0 +1,187 @@ +package tasks + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" +) + +// HTTPTaskRunner implements the hf:http task for HTTP API calls. +// It wraps the HyperFleet API client to make HTTP requests. +type HTTPTaskRunner struct { + apiClient hyperfleet_api.Client +} + +// NewHTTPTaskRunner creates a new HTTP task runner. +func NewHTTPTaskRunner(deps *Dependencies) (TaskRunner, error) { + var apiClient hyperfleet_api.Client + if deps != nil && deps.APIClient != nil { + var ok bool + apiClient, ok = deps.APIClient.(hyperfleet_api.Client) + if !ok { + return nil, fmt.Errorf("invalid APIClient type") + } + } + + return &HTTPTaskRunner{ + apiClient: apiClient, + }, nil +} + +func (r *HTTPTaskRunner) Name() string { + return TaskHTTP +} + +// Run executes an HTTP request. +// Args should contain: +// - method: HTTP method (GET, POST, PUT, PATCH, DELETE) +// - url: URL to call (can be relative if apiClient has baseURL) +// - headers: Optional map of headers +// - body: Optional request body (string or map) +// - timeout: Optional timeout duration string (e.g., "30s") +// - retryAttempts: Optional number of retry attempts +// - retryBackoff: Optional backoff strategy (exponential, linear, constant) +// +// Returns a map with: +// - statusCode: HTTP status code +// - status: HTTP status string +// - body: Response body (parsed as JSON if possible) +// - rawBody: Response body as string +// - duration: Request duration +// - attempts: Number of attempts made +func (r *HTTPTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + if r.apiClient == nil { + return nil, fmt.Errorf("API client not configured") + } + + // Extract method + method, ok := args["method"].(string) + if !ok || method == "" { + return nil, fmt.Errorf("method is required for hf:http task") + } + + // Extract URL + url, ok := args["url"].(string) + if !ok || url == "" { + return nil, fmt.Errorf("url is required for hf:http task") + } + + // Build request options + var opts []hyperfleet_api.RequestOption + + // Add headers + if headers, ok := args["headers"].(map[string]any); ok { + headerMap := make(map[string]string) + for k, v := range headers { + if s, ok := v.(string); ok { + headerMap[k] = s + } + } + opts = append(opts, hyperfleet_api.WithHeaders(headerMap)) + } + + // Add timeout + if timeoutStr, ok := args["timeout"].(string); ok && timeoutStr != "" { + if timeout, err := time.ParseDuration(timeoutStr); err == nil { + opts = append(opts, hyperfleet_api.WithRequestTimeout(timeout)) + } + } + + // Add retry attempts + if attempts, ok := args["retryAttempts"].(int); ok { + opts = append(opts, hyperfleet_api.WithRequestRetryAttempts(attempts)) + } else if attemptsFloat, ok := args["retryAttempts"].(float64); ok { + opts = append(opts, hyperfleet_api.WithRequestRetryAttempts(int(attemptsFloat))) + } + + // Add retry backoff + if backoff, ok := args["retryBackoff"].(string); ok && backoff != "" { + opts = append(opts, hyperfleet_api.WithRequestRetryBackoff(hyperfleet_api.BackoffStrategy(backoff))) + } + + // Prepare body + var body []byte + if bodyArg, ok := args["body"]; ok && bodyArg != nil { + switch v := bodyArg.(type) { + case string: + body = []byte(v) + case []byte: + body = v + case map[string]any: + jsonBody, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("failed to marshal body: %w", err) + } + body = jsonBody + } + } + + // Execute request + var resp *hyperfleet_api.Response + var err error + + switch method { + case "GET": + resp, err = r.apiClient.Get(ctx, url, opts...) + case "POST": + resp, err = r.apiClient.Post(ctx, url, body, opts...) + case "PUT": + resp, err = r.apiClient.Put(ctx, url, body, opts...) + case "PATCH": + resp, err = r.apiClient.Patch(ctx, url, body, opts...) + case "DELETE": + resp, err = r.apiClient.Delete(ctx, url, opts...) + default: + return nil, fmt.Errorf("unsupported HTTP method: %s", method) + } + + // Build output + output := make(map[string]any) + + // Copy input for continuation + for k, v := range input { + output[k] = v + } + + if err != nil { + output["error"] = err.Error() + if resp != nil { + output["statusCode"] = resp.StatusCode + output["status"] = resp.Status + } + return output, nil // Don't return error, let workflow handle it + } + + output["statusCode"] = resp.StatusCode + output["status"] = resp.Status + output["rawBody"] = resp.BodyString() + output["duration"] = resp.Duration.String() + output["attempts"] = resp.Attempts + output["success"] = resp.IsSuccess() + + // Try to parse body as JSON + if len(resp.Body) > 0 { + var jsonBody map[string]any + if err := json.Unmarshal(resp.Body, &jsonBody); err == nil { + output["body"] = jsonBody + } else { + // Try as array + var jsonArray []any + if err := json.Unmarshal(resp.Body, &jsonArray); err == nil { + output["body"] = jsonArray + } else { + output["body"] = resp.BodyString() + } + } + } + + return output, nil +} + +func init() { + // Register the HTTP task runner in the default registry + _ = RegisterDefault(TaskHTTP, NewHTTPTaskRunner) +} diff --git a/internal/swf/tasks/k8s.go b/internal/swf/tasks/k8s.go new file mode 100644 index 0000000..68ccc08 --- /dev/null +++ b/internal/swf/tasks/k8s.go @@ -0,0 +1,467 @@ +package tasks + +import ( + "context" + "fmt" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// K8sTaskRunner implements the hf:k8s task for Kubernetes resource management. +// It handles create/update/recreate/skip operations based on discovery and generation tracking. +type K8sTaskRunner struct { + k8sClient k8s_client.K8sClient + log logger.Logger +} + +// NewK8sTaskRunner creates a new Kubernetes task runner. +func NewK8sTaskRunner(deps *Dependencies) (TaskRunner, error) { + var k8sClient k8s_client.K8sClient + var log logger.Logger + + if deps != nil { + if deps.K8sClient != nil { + var ok bool + k8sClient, ok = deps.K8sClient.(k8s_client.K8sClient) + if !ok { + return nil, fmt.Errorf("invalid K8sClient type") + } + } + if deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &K8sTaskRunner{ + k8sClient: k8sClient, + log: log, + }, nil +} + +func (r *K8sTaskRunner) Name() string { + return TaskK8s +} + +// Run executes a Kubernetes resource operation. +// Args should contain: +// - name: Resource name (for tracking) +// - manifest: The Kubernetes manifest (map with apiVersion, kind, metadata, spec) +// - discovery: Optional discovery config (namespace, byName, bySelectors) +// - recreateOnChange: Optional boolean to recreate instead of update +// +// Returns a map with: +// - operation: The operation performed (create, update, recreate, skip) +// - operationReason: Why this operation was chosen +// - resource: The resulting resource object +// - error: Error message if operation failed +func (r *K8sTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + if r.k8sClient == nil { + return nil, fmt.Errorf("Kubernetes client not configured") + } + + name, _ := args["name"].(string) + if name == "" { + name = "unnamed" + } + + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + + // Build manifest + manifestData, ok := args["manifest"].(map[string]any) + if !ok { + return nil, fmt.Errorf("manifest is required for hf:k8s task") + } + + // Render templates in manifest + params, _ := input["params"].(map[string]any) + if params == nil { + params = make(map[string]any) + } + + renderedManifest, err := renderManifestTemplates(manifestData, params) + if err != nil { + output["error"] = fmt.Sprintf("failed to render manifest: %v", err) + return output, nil + } + + manifest := &unstructured.Unstructured{Object: renderedManifest} + + // Validate manifest + if manifest.GetAPIVersion() == "" { + output["error"] = "manifest missing apiVersion" + return output, nil + } + if manifest.GetKind() == "" { + output["error"] = "manifest missing kind" + return output, nil + } + if manifest.GetName() == "" { + output["error"] = "manifest missing metadata.name" + return output, nil + } + + gvk := manifest.GroupVersionKind() + recreateOnChange, _ := args["recreateOnChange"].(bool) + + r.log.Infof(ctx, "K8s[%s] processing: %s/%s %s", name, manifest.GetNamespace(), manifest.GetName(), gvk.Kind) + + // Discovery + var existingResource *unstructured.Unstructured + if discoveryConfig, ok := args["discovery"].(map[string]any); ok { + existingResource, err = r.discoverResource(ctx, gvk, discoveryConfig, params) + if err != nil && !apierrors.IsNotFound(err) { + output["error"] = fmt.Sprintf("discovery failed: %v", err) + return output, nil + } + } + + // Determine operation + var operation string + var operationReason string + var resultResource *unstructured.Unstructured + + manifestGen := k8s_client.GetGenerationAnnotation(manifest) + + if existingResource != nil { + existingGen := k8s_client.GetGenerationAnnotation(existingResource) + + if existingGen == manifestGen { + operation = "skip" + operationReason = fmt.Sprintf("generation %d unchanged", existingGen) + resultResource = existingResource + } else if recreateOnChange { + operation = "recreate" + operationReason = fmt.Sprintf("generation changed %d->%d, recreateOnChange=true", existingGen, manifestGen) + } else { + operation = "update" + operationReason = fmt.Sprintf("generation changed %d->%d", existingGen, manifestGen) + } + } else { + operation = "create" + operationReason = "resource not found" + } + + r.log.Infof(ctx, "K8s[%s] operation=%s reason=%s", name, operation, operationReason) + + // Execute operation + switch operation { + case "create": + resultResource, err = r.k8sClient.CreateResource(ctx, manifest) + case "update": + manifest.SetResourceVersion(existingResource.GetResourceVersion()) + manifest.SetUID(existingResource.GetUID()) + resultResource, err = r.k8sClient.UpdateResource(ctx, manifest) + case "recreate": + resultResource, err = r.recreateResource(ctx, existingResource, manifest) + case "skip": + // Already set above + } + + if err != nil { + output["error"] = err.Error() + output["operation"] = operation + output["operationReason"] = operationReason + return output, nil + } + + output["operation"] = operation + output["operationReason"] = operationReason + output["success"] = true + + if resultResource != nil { + output["resource"] = resultResource.Object + output["resourceName"] = resultResource.GetName() + output["resourceNamespace"] = resultResource.GetNamespace() + output["resourceKind"] = gvk.Kind + + // Store in resources map + if resources, ok := output["resources"].(map[string]any); ok { + resources[name] = resultResource.Object + } else { + output["resources"] = map[string]any{ + name: resultResource.Object, + } + } + } + + return output, nil +} + +// discoverResource discovers an existing resource using the discovery config. +func (r *K8sTaskRunner) discoverResource(ctx context.Context, gvk schema.GroupVersionKind, discoveryConfig map[string]any, params map[string]any) (*unstructured.Unstructured, error) { + namespace, _ := discoveryConfig["namespace"].(string) + if namespace != "" { + // Render namespace template + rendered, err := RenderTemplate(namespace, params) + if err == nil { + namespace = rendered + } + } + + // Discovery by name + if byName, ok := discoveryConfig["byName"].(string); ok && byName != "" { + name, err := RenderTemplate(byName, params) + if err != nil { + return nil, fmt.Errorf("failed to render byName: %w", err) + } + return r.k8sClient.GetResource(ctx, gvk, namespace, name) + } + + // Discovery by label selector + if bySelectors, ok := discoveryConfig["bySelectors"].(map[string]any); ok { + if labelSelector, ok := bySelectors["labelSelector"].(map[string]any); ok { + // Render label values + renderedLabels := make(map[string]string) + for k, v := range labelSelector { + key, _ := k, v.(string) + val, _ := v.(string) + renderedKey, _ := RenderTemplate(key, params) + renderedVal, _ := RenderTemplate(val, params) + renderedLabels[renderedKey] = renderedVal + } + + selectorStr := k8s_client.BuildLabelSelector(renderedLabels) + config := &k8s_client.DiscoveryConfig{ + Namespace: namespace, + LabelSelector: selectorStr, + } + + list, err := r.k8sClient.DiscoverResources(ctx, gvk, config) + if err != nil { + return nil, err + } + + if len(list.Items) == 0 { + return nil, apierrors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, "") + } + + return k8s_client.GetLatestGenerationResource(list), nil + } + } + + return nil, nil +} + +// recreateResource deletes and recreates a resource. +func (r *K8sTaskRunner) recreateResource(ctx context.Context, existing, manifest *unstructured.Unstructured) (*unstructured.Unstructured, error) { + gvk := existing.GroupVersionKind() + namespace := existing.GetNamespace() + name := existing.GetName() + + // Delete + if err := r.k8sClient.DeleteResource(ctx, gvk, namespace, name); err != nil { + return nil, fmt.Errorf("failed to delete for recreation: %w", err) + } + + // Wait for deletion + if err := r.waitForDeletion(ctx, gvk, namespace, name); err != nil { + return nil, fmt.Errorf("failed waiting for deletion: %w", err) + } + + // Create + return r.k8sClient.CreateResource(ctx, manifest) +} + +// waitForDeletion polls until the resource is deleted. +func (r *K8sTaskRunner) waitForDeletion(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) error { + const pollInterval = 100 * time.Millisecond + + ticker := time.NewTicker(pollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled: %w", ctx.Err()) + case <-ticker.C: + _, err := r.k8sClient.GetResource(ctx, gvk, namespace, name) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return fmt.Errorf("error checking deletion: %w", err) + } + } + } +} + +// renderManifestTemplates recursively renders Go templates in a manifest. +func renderManifestTemplates(data map[string]any, params map[string]any) (map[string]any, error) { + result := make(map[string]any) + + for k, v := range data { + renderedKey, err := RenderTemplate(k, params) + if err != nil { + return nil, fmt.Errorf("failed to render key '%s': %w", k, err) + } + + renderedValue, err := renderValue(v, params) + if err != nil { + return nil, fmt.Errorf("failed to render value for key '%s': %w", k, err) + } + + result[renderedKey] = renderedValue + } + + return result, nil +} + +func renderValue(v any, params map[string]any) (any, error) { + switch val := v.(type) { + case string: + return RenderTemplate(val, params) + case map[string]any: + return renderManifestTemplates(val, params) + case []any: + result := make([]any, len(val)) + for i, item := range val { + rendered, err := renderValue(item, params) + if err != nil { + return nil, err + } + result[i] = rendered + } + return result, nil + default: + return v, nil + } +} + +// ResourcesTaskRunner implements the hf:resources task. +// It manages multiple Kubernetes resources in sequence. +type ResourcesTaskRunner struct { + k8sClient k8s_client.K8sClient + log logger.Logger +} + +// NewResourcesTaskRunner creates a new resources task runner. +func NewResourcesTaskRunner(deps *Dependencies) (TaskRunner, error) { + var k8sClient k8s_client.K8sClient + var log logger.Logger + + if deps != nil { + if deps.K8sClient != nil { + var ok bool + k8sClient, ok = deps.K8sClient.(k8s_client.K8sClient) + if !ok { + return nil, fmt.Errorf("invalid K8sClient type") + } + } + if deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &ResourcesTaskRunner{ + k8sClient: k8sClient, + log: log, + }, nil +} + +func (r *ResourcesTaskRunner) Name() string { + return TaskResources +} + +// Run manages multiple Kubernetes resources. +// Args should contain: +// - config: Array of resource configurations +// +// Returns a map with: +// - results: Array of individual resource results +// - success: Boolean indicating if all operations succeeded +func (r *ResourcesTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + + resourceConfigs, ok := args["config"].([]any) + if !ok || len(resourceConfigs) == 0 { + output["results"] = []any{} + output["success"] = true + return output, nil + } + + results := make([]any, 0, len(resourceConfigs)) + allSuccess := true + resources := make(map[string]any) + + // Create single resource runner + singleRunner, err := NewK8sTaskRunner(&Dependencies{ + K8sClient: r.k8sClient, + Logger: r.log, + }) + if err != nil { + return nil, err + } + + for _, resourceConfig := range resourceConfigs { + resourceArgs, ok := resourceConfig.(map[string]any) + if !ok { + continue + } + + result, err := singleRunner.Run(ctx, resourceArgs, input) + if err != nil { + allSuccess = false + results = append(results, map[string]any{ + "name": resourceArgs["name"], + "error": err.Error(), + }) + break + } + + name, _ := resourceArgs["name"].(string) + results = append(results, map[string]any{ + "name": name, + "operation": result["operation"], + "operationReason": result["operationReason"], + "success": result["success"], + }) + + // Collect resources + if resource, ok := result["resource"].(map[string]any); ok && name != "" { + resources[name] = resource + } + + if success, ok := result["success"].(bool); !ok || !success { + allSuccess = false + break + } + } + + output["results"] = results + output["success"] = allSuccess + output["resources"] = resources + + return output, nil +} + +func init() { + _ = RegisterDefault(TaskK8s, NewK8sTaskRunner) + _ = RegisterDefault(TaskResources, NewResourcesTaskRunner) +} diff --git a/internal/swf/tasks/k8s_read.go b/internal/swf/tasks/k8s_read.go new file mode 100644 index 0000000..339b5f2 --- /dev/null +++ b/internal/swf/tasks/k8s_read.go @@ -0,0 +1,141 @@ +package tasks + +import ( + "context" + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" +) + +// K8sReadTaskRunner implements the hf:k8s-read task for reading secrets and configmaps. +// This task is used when native SWF set tasks cannot access K8s resources directly. +// +// Example usage in workflow: +// +// call: hf:k8s-read +// with: +// secrets: +// - name: apiToken +// ref: hyperfleet-system/api-credentials.token +// configmaps: +// - name: apiEndpoint +// ref: hyperfleet-system/config.endpoint +type K8sReadTaskRunner struct { + k8sClient k8s_client.K8sClient +} + +// NewK8sReadTaskRunner creates a new K8s read task runner. +func NewK8sReadTaskRunner(deps *Dependencies) (TaskRunner, error) { + var k8sClient k8s_client.K8sClient + if deps != nil && deps.K8sClient != nil { + var ok bool + k8sClient, ok = deps.K8sClient.(k8s_client.K8sClient) + if !ok { + return nil, fmt.Errorf("invalid K8sClient type") + } + } + + return &K8sReadTaskRunner{ + k8sClient: k8sClient, + }, nil +} + +func (r *K8sReadTaskRunner) Name() string { + return TaskK8sRead +} + +// Run reads secrets and configmaps from Kubernetes. +// Args should contain: +// - secrets: array of {name, ref} where ref is "namespace/name.key" +// - configmaps: array of {name, ref} where ref is "namespace/name.key" +// +// Returns extracted values as a map merged with the input. +func (r *K8sReadTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + if r.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot read K8s resources") + } + + result := make(map[string]any) + + // Read secrets + if secrets, ok := args["secrets"].([]any); ok { + for _, s := range secrets { + secretDef, ok := s.(map[string]any) + if !ok { + continue + } + + name, _ := secretDef["name"].(string) + ref, _ := secretDef["ref"].(string) + + if name == "" || ref == "" { + continue + } + + value, err := r.k8sClient.ExtractFromSecret(ctx, ref) + if err != nil { + // Check if this secret is required + required, _ := secretDef["required"].(bool) + if required { + return nil, fmt.Errorf("failed to read required secret '%s' from '%s': %w", name, ref, err) + } + // Use default if provided + if defaultVal, hasDefault := secretDef["default"]; hasDefault { + result[name] = defaultVal + } + continue + } + + result[name] = value + } + } + + // Read configmaps + if configmaps, ok := args["configmaps"].([]any); ok { + for _, c := range configmaps { + cmDef, ok := c.(map[string]any) + if !ok { + continue + } + + name, _ := cmDef["name"].(string) + ref, _ := cmDef["ref"].(string) + + if name == "" || ref == "" { + continue + } + + value, err := r.k8sClient.ExtractFromConfigMap(ctx, ref) + if err != nil { + // Check if this configmap is required + required, _ := cmDef["required"].(bool) + if required { + return nil, fmt.Errorf("failed to read required configmap '%s' from '%s': %w", name, ref, err) + } + // Use default if provided + if defaultVal, hasDefault := cmDef["default"]; hasDefault { + result[name] = defaultVal + } + continue + } + + result[name] = value + } + } + + // Merge results into input + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + for k, v := range result { + output[k] = v + } + + return output, nil +} + +func init() { + // Register the k8s-read task runner in the default registry + _ = RegisterDefault(TaskK8sRead, NewK8sReadTaskRunner) +} diff --git a/internal/swf/tasks/post.go b/internal/swf/tasks/post.go new file mode 100644 index 0000000..1505043 --- /dev/null +++ b/internal/swf/tasks/post.go @@ -0,0 +1,396 @@ +package tasks + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// DEPRECATION NOTICE: +// The hf:post task is deprecated in favor of native Serverless Workflow constructs. +// The converter now generates: +// - set tasks with jq expressions for simple payload fields +// - hf:cel tasks for complex CEL expressions in payload building +// - call:http tasks for API calls with native try/catch for retry +// +// This custom task is kept for backward compatibility with existing workflows +// but should not be used for new implementations. Use the native SWF approach +// generated by convertPostPhase() in converter/adapter_to_swf.go instead. + +// PostTaskRunner implements the hf:post task for post-processing actions. +// Deprecated: Use native SWF constructs (set, call:http, try/catch) instead. +// It handles payload building and API calls after resource creation. +type PostTaskRunner struct { + apiClient hyperfleet_api.Client + log logger.Logger +} + +// NewPostTaskRunner creates a new post task runner. +func NewPostTaskRunner(deps *Dependencies) (TaskRunner, error) { + var apiClient hyperfleet_api.Client + var log logger.Logger + + if deps != nil { + if deps.APIClient != nil { + var ok bool + apiClient, ok = deps.APIClient.(hyperfleet_api.Client) + if !ok { + return nil, fmt.Errorf("invalid APIClient type") + } + } + if deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &PostTaskRunner{ + apiClient: apiClient, + log: log, + }, nil +} + +func (r *PostTaskRunner) Name() string { + return TaskPost +} + +// Run executes post-processing actions. +// Args should contain: +// - payloads: Array of payload configurations +// - postActions: Array of post-action configurations +// +// Returns a map with: +// - payloads: Map of built payloads +// - results: Array of post-action results +// - success: Boolean indicating if all actions succeeded +func (r *PostTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + + params, _ := input["params"].(map[string]any) + if params == nil { + params = make(map[string]any) + } + + builtPayloads := make(map[string]any) + allSuccess := true + + // Step 1: Build payloads + if payloadsRaw, ok := args["payloads"].([]any); ok && len(payloadsRaw) > 0 { + r.log.Infof(ctx, "Post: building %d payloads", len(payloadsRaw)) + + // Create evaluation context + evalCtx := criteria.NewEvaluationContext() + for k, v := range input { + evalCtx.Set(k, v) + } + + evaluator, err := criteria.NewEvaluator(ctx, evalCtx, r.log) + if err != nil { + output["error"] = fmt.Sprintf("failed to create evaluator: %v", err) + output["success"] = false + return output, nil + } + + for _, payloadRaw := range payloadsRaw { + payload, ok := payloadRaw.(map[string]any) + if !ok { + continue + } + + name, _ := payload["name"].(string) + if name == "" { + continue + } + + // Determine build source + var buildDef any + if build, ok := payload["build"]; ok && build != nil { + buildDef = build + } else if buildRefContent, ok := payload["buildRefContent"].(map[string]any); ok { + buildDef = buildRefContent + } else { + output["error"] = fmt.Sprintf("payload '%s' has neither build nor buildRefContent", name) + output["success"] = false + return output, nil + } + + // Build the payload + builtPayload, err := r.buildPayload(ctx, buildDef, evaluator, params) + if err != nil { + output["error"] = fmt.Sprintf("failed to build payload '%s': %v", name, err) + output["success"] = false + return output, nil + } + + // Convert to JSON string for template rendering + jsonBytes, err := json.Marshal(builtPayload) + if err != nil { + output["error"] = fmt.Sprintf("failed to marshal payload '%s': %v", name, err) + output["success"] = false + return output, nil + } + + builtPayloads[name] = string(jsonBytes) + params[name] = string(jsonBytes) + + r.log.Debugf(ctx, "Post: built payload '%s'", name) + } + } + + output["payloads"] = builtPayloads + + // Step 2: Execute post actions + results := make([]any, 0) + if actionsRaw, ok := args["postActions"].([]any); ok && len(actionsRaw) > 0 { + r.log.Infof(ctx, "Post: executing %d post actions", len(actionsRaw)) + + for _, actionRaw := range actionsRaw { + action, ok := actionRaw.(map[string]any) + if !ok { + continue + } + + name, _ := action["name"].(string) + result := map[string]any{ + "name": name, + "status": "success", + } + + // Execute log action if present + if logConfig, ok := action["log"].(map[string]any); ok { + message, _ := logConfig["message"].(string) + level, _ := logConfig["level"].(string) + if level == "" { + level = "info" + } + + // Render template in message + renderedMessage, err := RenderTemplate(message, params) + if err == nil { + message = renderedMessage + } + + switch level { + case "debug": + r.log.Debugf(ctx, "PostAction[%s]: %s", name, message) + case "warning", "warn": + r.log.Warnf(ctx, "PostAction[%s]: %s", name, message) + case "error": + r.log.Errorf(ctx, "PostAction[%s]: %s", name, message) + default: + r.log.Infof(ctx, "PostAction[%s]: %s", name, message) + } + } + + // Execute API call if present + if apiCallConfig, ok := action["apiCall"].(map[string]any); ok { + apiResult, err := r.executeAPICall(ctx, apiCallConfig, params) + if err != nil { + result["status"] = "failed" + result["error"] = err.Error() + allSuccess = false + results = append(results, result) + break + } + result["apiCallMade"] = true + result["httpStatus"] = apiResult["statusCode"] + result["apiResponse"] = apiResult["body"] + } + + results = append(results, result) + r.log.Infof(ctx, "PostAction[%s]: SUCCESS", name) + } + } + + output["results"] = results + output["success"] = allSuccess + + return output, nil +} + +// buildPayload builds a payload from a build definition. +func (r *PostTaskRunner) buildPayload(ctx context.Context, build any, evaluator *criteria.Evaluator, params map[string]any) (any, error) { + switch v := build.(type) { + case map[string]any: + return r.buildMapPayload(ctx, v, evaluator, params) + default: + return build, nil + } +} + +// buildMapPayload builds a map payload, evaluating expressions as needed. +func (r *PostTaskRunner) buildMapPayload(ctx context.Context, m map[string]any, evaluator *criteria.Evaluator, params map[string]any) (map[string]any, error) { + result := make(map[string]any) + + for k, v := range m { + // Render the key + renderedKey, err := RenderTemplate(k, params) + if err != nil { + return nil, fmt.Errorf("failed to render key '%s': %w", k, err) + } + + // Process the value + processedValue, err := r.processValue(ctx, v, evaluator, params) + if err != nil { + return nil, fmt.Errorf("failed to process value for key '%s': %w", k, err) + } + + result[renderedKey] = processedValue + } + + return result, nil +} + +// processValue processes a value, evaluating expressions as needed. +func (r *PostTaskRunner) processValue(ctx context.Context, v any, evaluator *criteria.Evaluator, params map[string]any) (any, error) { + switch val := v.(type) { + case map[string]any: + // Check if this is a value definition with field or expression + if field, ok := val["field"].(string); ok && field != "" { + defaultVal := val["default"] + result, err := evaluator.ExtractValue(field, "") + if err != nil { + return nil, err + } + if result.Value == nil { + return defaultVal, nil + } + return result.Value, nil + } + if expr, ok := val["expression"].(string); ok && expr != "" { + defaultVal := val["default"] + result, err := evaluator.ExtractValue("", expr) + if err != nil { + return nil, err + } + if result.Value == nil { + return defaultVal, nil + } + return result.Value, nil + } + + // Recursively process nested maps + return r.buildMapPayload(ctx, val, evaluator, params) + + case []any: + result := make([]any, len(val)) + for i, item := range val { + processed, err := r.processValue(ctx, item, evaluator, params) + if err != nil { + return nil, err + } + result[i] = processed + } + return result, nil + + case string: + return RenderTemplate(val, params) + + default: + return v, nil + } +} + +// executeAPICall executes an API call for a post action. +func (r *PostTaskRunner) executeAPICall(ctx context.Context, apiCallConfig map[string]any, params map[string]any) (map[string]any, error) { + if r.apiClient == nil { + return nil, fmt.Errorf("API client not configured") + } + + method, _ := apiCallConfig["method"].(string) + url, _ := apiCallConfig["url"].(string) + + // Render URL template + renderedURL, err := RenderTemplate(url, params) + if err == nil { + url = renderedURL + } + + r.log.Infof(ctx, "Post: API call %s %s", method, url) + + // Build request options + var opts []hyperfleet_api.RequestOption + + // Add headers + if headers, ok := apiCallConfig["headers"].(map[string]string); ok { + opts = append(opts, hyperfleet_api.WithHeaders(headers)) + } + + // Prepare body + var body []byte + if bodyStr, ok := apiCallConfig["body"].(string); ok && bodyStr != "" { + renderedBody, err := RenderTemplate(bodyStr, params) + if err == nil { + body = []byte(renderedBody) + } else { + body = []byte(bodyStr) + } + } + + // Execute request + var resp *hyperfleet_api.Response + + switch strings.ToUpper(method) { + case "GET": + resp, err = r.apiClient.Get(ctx, url, opts...) + case "POST": + resp, err = r.apiClient.Post(ctx, url, body, opts...) + case "PUT": + resp, err = r.apiClient.Put(ctx, url, body, opts...) + case "PATCH": + resp, err = r.apiClient.Patch(ctx, url, body, opts...) + case "DELETE": + resp, err = r.apiClient.Delete(ctx, url, opts...) + default: + return nil, fmt.Errorf("unsupported HTTP method: %s", method) + } + + result := make(map[string]any) + + if err != nil { + result["error"] = err.Error() + if resp != nil { + result["statusCode"] = resp.StatusCode + } + return result, err + } + + result["statusCode"] = resp.StatusCode + result["status"] = resp.Status + result["success"] = resp.IsSuccess() + + // Try to parse body as JSON + if len(resp.Body) > 0 { + var jsonBody map[string]any + if err := json.Unmarshal(resp.Body, &jsonBody); err == nil { + result["body"] = jsonBody + } else { + result["body"] = resp.BodyString() + } + } + + if !resp.IsSuccess() { + return result, fmt.Errorf("API call returned status %d", resp.StatusCode) + } + + return result, nil +} + +func init() { + _ = RegisterDefault(TaskPost, NewPostTaskRunner) +} diff --git a/internal/swf/tasks/precondition.go b/internal/swf/tasks/precondition.go new file mode 100644 index 0000000..a045e54 --- /dev/null +++ b/internal/swf/tasks/precondition.go @@ -0,0 +1,413 @@ +package tasks + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// DEPRECATION NOTICE: +// The hf:precondition and hf:preconditions tasks are deprecated in favor of native +// Serverless Workflow constructs. The converter now generates: +// - call:http tasks for API calls +// - export expressions to capture fields and evaluate conditions +// - if conditions for short-circuit behavior +// - set tasks for final allMatched/notMetReason computation +// +// These custom tasks are kept for backward compatibility with existing workflows +// but should not be used for new implementations. Use the native SWF approach +// generated by convertPreconditionsPhase() in converter/adapter_to_swf.go instead. + +// PreconditionTaskRunner implements the hf:precondition task. +// Deprecated: Use native SWF call:http with export instead. +// It evaluates a single precondition including API calls, field capture, and conditions. +type PreconditionTaskRunner struct { + apiClient hyperfleet_api.Client + log logger.Logger +} + +// NewPreconditionTaskRunner creates a new precondition task runner. +func NewPreconditionTaskRunner(deps *Dependencies) (TaskRunner, error) { + var apiClient hyperfleet_api.Client + var log logger.Logger + + if deps != nil { + if deps.APIClient != nil { + var ok bool + apiClient, ok = deps.APIClient.(hyperfleet_api.Client) + if !ok { + return nil, fmt.Errorf("invalid APIClient type") + } + } + if deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &PreconditionTaskRunner{ + apiClient: apiClient, + log: log, + }, nil +} + +func (r *PreconditionTaskRunner) Name() string { + return TaskPrecondition +} + +// Run evaluates a precondition. +// Args should contain: +// - name: Precondition name +// - apiCall: Optional API call config (method, url, headers, body, timeout, retryAttempts, retryBackoff) +// - capture: Optional array of fields to capture from API response +// - conditions: Optional array of structured conditions +// - expression: Optional CEL expression +// +// Returns a map with: +// - matched: Boolean indicating if the precondition matched +// - capturedFields: Map of captured field values +// - apiCallMade: Boolean indicating if an API call was made +// - error: Error message if evaluation failed +func (r *PreconditionTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + name, _ := args["name"].(string) + if name == "" { + name = "unnamed" + } + + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + + capturedFields := make(map[string]any) + var apiResponseData map[string]any + + // Step 1: Execute API call if configured + if apiCallConfig, ok := args["apiCall"].(map[string]any); ok { + if r.apiClient == nil { + return nil, fmt.Errorf("API client not configured for precondition %s", name) + } + + method, _ := apiCallConfig["method"].(string) + url, _ := apiCallConfig["url"].(string) + + // Render URL template if needed + if params, ok := input["params"].(map[string]any); ok { + renderedURL, err := RenderTemplate(url, params) + if err == nil { + url = renderedURL + } + } + + r.log.Infof(ctx, "Precondition[%s] making API call: %s %s", name, method, url) + + // Build request options + var opts []hyperfleet_api.RequestOption + + if headers, ok := apiCallConfig["headers"].(map[string]any); ok { + headerMap := make(map[string]string) + for k, v := range headers { + if s, ok := v.(string); ok { + headerMap[k] = s + } + } + opts = append(opts, hyperfleet_api.WithHeaders(headerMap)) + } + + // Execute the API call + var resp *hyperfleet_api.Response + var err error + + switch strings.ToUpper(method) { + case "GET": + resp, err = r.apiClient.Get(ctx, url, opts...) + case "POST": + var body []byte + if bodyStr, ok := apiCallConfig["body"].(string); ok { + body = []byte(bodyStr) + } + resp, err = r.apiClient.Post(ctx, url, body, opts...) + default: + resp, err = r.apiClient.Get(ctx, url, opts...) + } + + output["apiCallMade"] = true + + if err != nil || resp == nil || !resp.IsSuccess() { + errMsg := "API call failed" + if err != nil { + errMsg = err.Error() + } else if resp != nil { + errMsg = fmt.Sprintf("HTTP %d: %s", resp.StatusCode, resp.Status) + } + output["matched"] = false + output["error"] = errMsg + return output, nil + } + + // Parse response + if err := json.Unmarshal(resp.Body, &apiResponseData); err != nil { + output["matched"] = false + output["error"] = fmt.Sprintf("failed to parse API response: %v", err) + return output, nil + } + + // Store response under precondition name + output[name] = apiResponseData + + // Capture fields + if captures, ok := args["capture"].([]any); ok && len(captures) > 0 { + captureCtx := criteria.NewEvaluationContext() + captureCtx.SetVariablesFromMap(apiResponseData) + + captureEvaluator, evalErr := criteria.NewEvaluator(ctx, captureCtx, r.log) + if evalErr == nil { + for _, c := range captures { + captureDef, ok := c.(map[string]any) + if !ok { + continue + } + captureName, _ := captureDef["name"].(string) + field, _ := captureDef["field"].(string) + expression, _ := captureDef["expression"].(string) + + if captureName == "" { + continue + } + + extractResult, err := captureEvaluator.ExtractValue(field, expression) + if err == nil && extractResult.Value != nil { + capturedFields[captureName] = extractResult.Value + r.log.Debugf(ctx, "Captured %s = %v", captureName, extractResult.Value) + } + } + } + } + } + + output["capturedFields"] = capturedFields + + // Merge captured fields into params + if params, ok := output["params"].(map[string]any); ok { + for k, v := range capturedFields { + params[k] = v + } + } + + // Step 2: Evaluate conditions + evalCtx := criteria.NewEvaluationContext() + for k, v := range input { + evalCtx.Set(k, v) + } + if apiResponseData != nil { + evalCtx.Set(name, apiResponseData) + } + for k, v := range capturedFields { + evalCtx.Set(k, v) + } + + evaluator, err := criteria.NewEvaluator(ctx, evalCtx, r.log) + if err != nil { + output["matched"] = false + output["error"] = fmt.Sprintf("failed to create evaluator: %v", err) + return output, nil + } + + matched := true + + // Evaluate structured conditions + if conditions, ok := args["conditions"].([]any); ok && len(conditions) > 0 { + var condDefs []criteria.ConditionDef + for _, c := range conditions { + condMap, ok := c.(map[string]any) + if !ok { + continue + } + field, _ := condMap["field"].(string) + operator, _ := condMap["operator"].(string) + value := condMap["value"] + + condDefs = append(condDefs, criteria.ConditionDef{ + Field: field, + Operator: criteria.Operator(operator), + Value: value, + }) + } + + condResult, err := evaluator.EvaluateConditions(condDefs) + if err != nil { + output["matched"] = false + output["error"] = fmt.Sprintf("condition evaluation failed: %v", err) + return output, nil + } + + matched = condResult.Matched + } else if expression, ok := args["expression"].(string); ok && expression != "" { + // Evaluate CEL expression + celResult, err := evaluator.EvaluateCEL(strings.TrimSpace(expression)) + if err != nil { + output["matched"] = false + output["error"] = fmt.Sprintf("CEL evaluation failed: %v", err) + return output, nil + } + matched = celResult.Matched + } + + output["matched"] = matched + + if matched { + r.log.Infof(ctx, "Precondition[%s] evaluated: MET", name) + } else { + r.log.Infof(ctx, "Precondition[%s] evaluated: NOT_MET", name) + } + + return output, nil +} + +// PreconditionsTaskRunner implements the hf:preconditions task. +// Deprecated: Use native SWF constructs generated by convertPreconditionsPhase() instead. +// It evaluates multiple preconditions in sequence, stopping on first failure. +type PreconditionsTaskRunner struct { + apiClient hyperfleet_api.Client + log logger.Logger +} + +// NewPreconditionsTaskRunner creates a new preconditions task runner. +func NewPreconditionsTaskRunner(deps *Dependencies) (TaskRunner, error) { + var apiClient hyperfleet_api.Client + var log logger.Logger + + if deps != nil { + if deps.APIClient != nil { + var ok bool + apiClient, ok = deps.APIClient.(hyperfleet_api.Client) + if !ok { + return nil, fmt.Errorf("invalid APIClient type") + } + } + if deps.Logger != nil { + var ok bool + log, ok = deps.Logger.(logger.Logger) + if !ok { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + } else { + log = &noopLogger{} + } + + return &PreconditionsTaskRunner{ + apiClient: apiClient, + log: log, + }, nil +} + +func (r *PreconditionsTaskRunner) Name() string { + return TaskPreconditions +} + +// Run evaluates multiple preconditions in sequence. +// Args should contain: +// - config: Array of precondition configurations +// - context: Current workflow context +// +// Returns a map with: +// - allMatched: Boolean indicating if all preconditions matched +// - results: Array of individual precondition results +// - notMetReason: String explaining why preconditions weren't met (if applicable) +func (r *PreconditionsTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + output := make(map[string]any) + for k, v := range input { + output[k] = v + } + + preconditions, ok := args["config"].([]any) + if !ok || len(preconditions) == 0 { + output["allMatched"] = true + output["results"] = []any{} + return output, nil + } + + results := make([]any, 0, len(preconditions)) + allMatched := true + var notMetReason string + + // Create single precondition runner + singleRunner, err := NewPreconditionTaskRunner(&Dependencies{ + APIClient: r.apiClient, + Logger: r.log, + }) + if err != nil { + return nil, err + } + + for _, precondConfig := range preconditions { + precondArgs, ok := precondConfig.(map[string]any) + if !ok { + continue + } + + result, err := singleRunner.Run(ctx, precondArgs, input) + if err != nil { + allMatched = false + notMetReason = fmt.Sprintf("precondition evaluation error: %v", err) + results = append(results, map[string]any{ + "name": precondArgs["name"], + "matched": false, + "error": err.Error(), + }) + break + } + + matched, _ := result["matched"].(bool) + results = append(results, map[string]any{ + "name": precondArgs["name"], + "matched": matched, + "capturedFields": result["capturedFields"], + "apiCallMade": result["apiCallMade"], + }) + + // Merge captured fields into input for next precondition + if capturedFields, ok := result["capturedFields"].(map[string]any); ok { + if params, ok := input["params"].(map[string]any); ok { + for k, v := range capturedFields { + params[k] = v + } + } + } + + if !matched { + allMatched = false + name, _ := precondArgs["name"].(string) + notMetReason = fmt.Sprintf("precondition '%s' not met", name) + break + } + } + + output["allMatched"] = allMatched + output["results"] = results + if notMetReason != "" { + output["notMetReason"] = notMetReason + } + + return output, nil +} + +func init() { + _ = RegisterDefault(TaskPrecondition, NewPreconditionTaskRunner) + _ = RegisterDefault(TaskPreconditions, NewPreconditionsTaskRunner) +} diff --git a/internal/swf/tasks/registry.go b/internal/swf/tasks/registry.go new file mode 100644 index 0000000..522b6ae --- /dev/null +++ b/internal/swf/tasks/registry.go @@ -0,0 +1,137 @@ +package tasks + +import ( + "fmt" + "strings" + "sync" +) + +const ( + // TaskPrefix is the prefix for all HyperFleet custom tasks + TaskPrefix = "hf:" + + // Task type constants + TaskExtract = "hf:extract" + TaskHTTP = "hf:http" + TaskK8s = "hf:k8s" + TaskK8sRead = "hf:k8s-read" // Reads secrets and configmaps from K8s + TaskCEL = "hf:cel" + TaskTemplate = "hf:template" + TaskPrecondition = "hf:precondition" + TaskPreconditions = "hf:preconditions" + TaskResources = "hf:resources" + TaskPost = "hf:post" +) + +// Registry manages custom task runner factories. +// It provides thread-safe registration and lookup of task runners. +type Registry struct { + mu sync.RWMutex + factories map[string]TaskRunnerFactory +} + +// NewRegistry creates a new empty task registry. +func NewRegistry() *Registry { + return &Registry{ + factories: make(map[string]TaskRunnerFactory), + } +} + +// Register adds a task runner factory to the registry. +// Returns an error if a factory is already registered for the given name. +func (r *Registry) Register(name string, factory TaskRunnerFactory) error { + r.mu.Lock() + defer r.mu.Unlock() + + if _, exists := r.factories[name]; exists { + return fmt.Errorf("task runner already registered: %s", name) + } + + r.factories[name] = factory + return nil +} + +// MustRegister registers a task runner factory, panicking on error. +// Use this for registrations that should never fail (e.g., built-in tasks). +func (r *Registry) MustRegister(name string, factory TaskRunnerFactory) { + if err := r.Register(name, factory); err != nil { + panic(err) + } +} + +// Get retrieves a task runner factory by name. +// Returns the factory and true if found, nil and false otherwise. +func (r *Registry) Get(name string) (TaskRunnerFactory, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + + factory, exists := r.factories[name] + return factory, exists +} + +// Create instantiates a task runner for the given task name using the provided dependencies. +// Returns an error if no factory is registered for the task name. +func (r *Registry) Create(name string, deps *Dependencies) (TaskRunner, error) { + factory, exists := r.Get(name) + if !exists { + return nil, fmt.Errorf("no task runner registered for: %s", name) + } + + return factory(deps) +} + +// IsHyperFleetTask checks if the given call name is a HyperFleet custom task. +func IsHyperFleetTask(callName string) bool { + return strings.HasPrefix(callName, TaskPrefix) +} + +// ListRegistered returns a list of all registered task names. +func (r *Registry) ListRegistered() []string { + r.mu.RLock() + defer r.mu.RUnlock() + + names := make([]string, 0, len(r.factories)) + for name := range r.factories { + names = append(names, name) + } + return names +} + +// defaultRegistry is the global registry instance used for built-in tasks. +var defaultRegistry = NewRegistry() + +// DefaultRegistry returns the global default registry. +func DefaultRegistry() *Registry { + return defaultRegistry +} + +// RegisterDefault registers a task runner factory in the default registry. +func RegisterDefault(name string, factory TaskRunnerFactory) error { + return defaultRegistry.Register(name, factory) +} + +// RegisterAllWithDeps registers all built-in HyperFleet tasks with the given registry. +// This is used to populate a custom registry with all available task runners. +func RegisterAllWithDeps(registry *Registry, deps *Dependencies) error { + // List of all built-in task factories + builtInTasks := map[string]TaskRunnerFactory{ + TaskExtract: NewExtractTaskRunner, + TaskHTTP: NewHTTPTaskRunner, + TaskK8s: NewK8sTaskRunner, + TaskK8sRead: NewK8sReadTaskRunner, + TaskCEL: NewCELTaskRunner, + TaskTemplate: NewTemplateTaskRunner, + TaskPrecondition: NewPreconditionTaskRunner, + TaskPreconditions: NewPreconditionsTaskRunner, + TaskResources: NewResourcesTaskRunner, + TaskPost: NewPostTaskRunner, + } + + for name, factory := range builtInTasks { + if err := registry.Register(name, factory); err != nil { + return fmt.Errorf("failed to register %s: %w", name, err) + } + } + + return nil +} diff --git a/internal/swf/tasks/registry_test.go b/internal/swf/tasks/registry_test.go new file mode 100644 index 0000000..1ba6176 --- /dev/null +++ b/internal/swf/tasks/registry_test.go @@ -0,0 +1,181 @@ +package tasks + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewRegistry(t *testing.T) { + r := NewRegistry() + require.NotNil(t, r) + assert.Empty(t, r.ListRegistered()) +} + +func TestRegistry_Register(t *testing.T) { + r := NewRegistry() + + // Create a simple test factory + factory := func(deps *Dependencies) (TaskRunner, error) { + return &testRunner{name: "test"}, nil + } + + // First registration should succeed + err := r.Register("test:task", factory) + require.NoError(t, err) + + // Second registration with same name should fail + err = r.Register("test:task", factory) + assert.Error(t, err) + assert.Contains(t, err.Error(), "already registered") + + // Different name should succeed + err = r.Register("test:task2", factory) + require.NoError(t, err) + + // Verify both are registered + registered := r.ListRegistered() + assert.Len(t, registered, 2) +} + +func TestRegistry_MustRegister(t *testing.T) { + r := NewRegistry() + + factory := func(deps *Dependencies) (TaskRunner, error) { + return &testRunner{name: "test"}, nil + } + + // First registration should not panic + assert.NotPanics(t, func() { + r.MustRegister("test:task", factory) + }) + + // Second registration should panic + assert.Panics(t, func() { + r.MustRegister("test:task", factory) + }) +} + +func TestRegistry_Get(t *testing.T) { + r := NewRegistry() + + factory := func(deps *Dependencies) (TaskRunner, error) { + return &testRunner{name: "test"}, nil + } + + _ = r.Register("test:task", factory) + + // Existing task + f, ok := r.Get("test:task") + assert.True(t, ok) + assert.NotNil(t, f) + + // Non-existing task + f, ok = r.Get("nonexistent") + assert.False(t, ok) + assert.Nil(t, f) +} + +func TestRegistry_Create(t *testing.T) { + r := NewRegistry() + + factory := func(deps *Dependencies) (TaskRunner, error) { + return &testRunner{name: "created"}, nil + } + + _ = r.Register("test:task", factory) + + // Create existing task + runner, err := r.Create("test:task", nil) + require.NoError(t, err) + require.NotNil(t, runner) + assert.Equal(t, "created", runner.Name()) + + // Create non-existing task + _, err = r.Create("nonexistent", nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no task runner registered") +} + +func TestIsHyperFleetTask(t *testing.T) { + tests := []struct { + name string + taskName string + expected bool + }{ + {"hf:extract", "hf:extract", true}, + {"hf:k8s", "hf:k8s", true}, + {"hf:custom", "hf:custom", true}, + {"http", "http", false}, + {"openapi", "openapi", false}, + {"empty", "", false}, + {"hf without colon", "hfextract", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsHyperFleetTask(tt.taskName) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestTaskConstants(t *testing.T) { + // Verify all task constants have the correct prefix + tasks := []string{ + TaskExtract, + TaskHTTP, + TaskK8s, + TaskCEL, + TaskTemplate, + TaskPrecondition, + TaskPreconditions, + TaskResources, + TaskPost, + } + + for _, task := range tasks { + assert.True(t, IsHyperFleetTask(task), "task %s should be a HyperFleet task", task) + } +} + +func TestRegisterAllWithDeps(t *testing.T) { + r := NewRegistry() + deps := &Dependencies{} + + err := RegisterAllWithDeps(r, deps) + require.NoError(t, err) + + // Verify all expected tasks are registered + expectedTasks := []string{ + TaskExtract, + TaskHTTP, + TaskK8s, + TaskCEL, + TaskTemplate, + TaskPrecondition, + TaskPreconditions, + TaskResources, + TaskPost, + } + + registered := r.ListRegistered() + for _, task := range expectedTasks { + assert.Contains(t, registered, task, "expected task %s to be registered", task) + } +} + +// testRunner is a simple TaskRunner implementation for testing +type testRunner struct { + name string +} + +func (r *testRunner) Name() string { + return r.name +} + +func (r *testRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + return input, nil +} diff --git a/internal/swf/tasks/template.go b/internal/swf/tasks/template.go new file mode 100644 index 0000000..e350251 --- /dev/null +++ b/internal/swf/tasks/template.go @@ -0,0 +1,183 @@ +package tasks + +import ( + "bytes" + "context" + "fmt" + "strconv" + "strings" + "text/template" + "time" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// TemplateTaskRunner implements the hf:template task for Go template rendering. +// It renders Go templates with access to workflow context data. +type TemplateTaskRunner struct{} + +// NewTemplateTaskRunner creates a new template task runner. +func NewTemplateTaskRunner(deps *Dependencies) (TaskRunner, error) { + return &TemplateTaskRunner{}, nil +} + +func (r *TemplateTaskRunner) Name() string { + return TaskTemplate +} + +// Run executes the template rendering task. +// Args should contain: +// - template: The Go template string to render +// - data: Optional map of data to use (defaults to input) +// +// Returns a map with: +// - result: The rendered template string +func (r *TemplateTaskRunner) Run(ctx context.Context, args map[string]any, input map[string]any) (map[string]any, error) { + templateStr, ok := args["template"].(string) + if !ok || templateStr == "" { + return nil, fmt.Errorf("template is required for hf:template task") + } + + // Get data - use args["data"] if provided, otherwise use input + data := input + if argData, ok := args["data"].(map[string]any); ok { + data = argData + } + + // Render the template + result, err := RenderTemplate(templateStr, data) + if err != nil { + return nil, fmt.Errorf("template rendering failed: %w", err) + } + + output := make(map[string]any) + + // Copy input for continuation + for k, v := range input { + output[k] = v + } + + output["result"] = result + + return output, nil +} + +// templateFuncs provides common functions for Go templates. +// This mirrors the functions available in the executor/utils.go. +var templateFuncs = template.FuncMap{ + // Time functions + "now": time.Now, + "date": func(layout string, t time.Time) string { + return t.Format(layout) + }, + "dateFormat": func(layout string, t time.Time) string { + return t.Format(layout) + }, + // String functions + "lower": strings.ToLower, + "upper": strings.ToUpper, + "title": func(s string) string { + return cases.Title(language.English).String(s) + }, + "trim": strings.TrimSpace, + "replace": strings.ReplaceAll, + "contains": strings.Contains, + "hasPrefix": strings.HasPrefix, + "hasSuffix": strings.HasSuffix, + // Default value function + "default": func(defaultVal, val any) any { + if val == nil || val == "" { + return defaultVal + } + return val + }, + // Quote function + "quote": func(s string) string { + return fmt.Sprintf("%q", s) + }, + // Type conversion functions + "int": func(v any) int { + switch val := v.(type) { + case int: + return val + case int64: + return int(val) + case float64: + return int(val) + case string: + i, _ := strconv.Atoi(val) + return i + default: + return 0 + } + }, + "int64": func(v any) int64 { + switch val := v.(type) { + case int: + return int64(val) + case int64: + return val + case float64: + return int64(val) + case string: + i, _ := strconv.ParseInt(val, 10, 64) + return i + default: + return 0 + } + }, + "float64": func(v any) float64 { + switch val := v.(type) { + case int: + return float64(val) + case int64: + return float64(val) + case float64: + return val + case string: + f, _ := strconv.ParseFloat(val, 64) + return f + default: + return 0 + } + }, + "string": func(v any) string { + return fmt.Sprintf("%v", v) + }, +} + +// RenderTemplate renders a Go template string with the given data. +// This is a shared utility function. +func RenderTemplate(templateStr string, data map[string]any) (string, error) { + // If no template delimiters, return as-is + if !strings.Contains(templateStr, "{{") { + return templateStr, nil + } + + tmpl, err := template.New("template").Funcs(templateFuncs).Option("missingkey=error").Parse(templateStr) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template: %w", err) + } + + return buf.String(), nil +} + +// RenderTemplateBytes renders a Go template string and returns bytes. +func RenderTemplateBytes(templateStr string, data map[string]any) ([]byte, error) { + result, err := RenderTemplate(templateStr, data) + if err != nil { + return nil, err + } + return []byte(result), nil +} + +func init() { + // Register the template task runner in the default registry + _ = RegisterDefault(TaskTemplate, NewTemplateTaskRunner) +} diff --git a/internal/swf/tasks/template_test.go b/internal/swf/tasks/template_test.go new file mode 100644 index 0000000..14dc452 --- /dev/null +++ b/internal/swf/tasks/template_test.go @@ -0,0 +1,251 @@ +package tasks + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewTemplateTaskRunner(t *testing.T) { + runner, err := NewTemplateTaskRunner(nil) + require.NoError(t, err) + require.NotNil(t, runner) + assert.Equal(t, TaskTemplate, runner.Name()) +} + +func TestTemplateTaskRunner_Run(t *testing.T) { + runner, _ := NewTemplateTaskRunner(nil) + ctx := context.Background() + + tests := []struct { + name string + args map[string]any + input map[string]any + expected string + expectError bool + }{ + { + name: "missing template", + args: map[string]any{}, + input: map[string]any{}, + expectError: true, + }, + { + name: "simple template without variables", + args: map[string]any{ + "template": "Hello World", + }, + input: map[string]any{}, + expected: "Hello World", + }, + { + name: "template with variable substitution", + args: map[string]any{ + "template": "Hello {{ .name }}!", + }, + input: map[string]any{ + "name": "Alice", + }, + expected: "Hello Alice!", + }, + { + name: "template with nested data", + args: map[string]any{ + "template": "Cluster: {{ .cluster.id }}", + }, + input: map[string]any{ + "cluster": map[string]any{ + "id": "cluster-123", + }, + }, + expected: "Cluster: cluster-123", + }, + { + name: "template with custom data arg", + args: map[string]any{ + "template": "Value: {{ .value }}", + "data": map[string]any{ + "value": "custom", + }, + }, + input: map[string]any{ + "value": "from-input", + }, + expected: "Value: custom", // data arg takes precedence + }, + { + name: "template with lower function", + args: map[string]any{ + "template": "{{ lower .name }}", + }, + input: map[string]any{ + "name": "ALICE", + }, + expected: "alice", + }, + { + name: "template with upper function", + args: map[string]any{ + "template": "{{ upper .name }}", + }, + input: map[string]any{ + "name": "alice", + }, + expected: "ALICE", + }, + { + name: "template with default function - nil value", + args: map[string]any{ + "template": "{{ default \"unknown\" .value }}", + }, + input: map[string]any{ + "value": nil, + }, + expected: "unknown", + }, + { + name: "template with default function - empty string", + args: map[string]any{ + "template": "{{ default \"unknown\" .value }}", + }, + input: map[string]any{ + "value": "", + }, + expected: "unknown", + }, + { + name: "template with default function - has value", + args: map[string]any{ + "template": "{{ default \"unknown\" .value }}", + }, + input: map[string]any{ + "value": "actual", + }, + expected: "actual", + }, + { + name: "no template delimiters returns as-is", + args: map[string]any{ + "template": "plain text without delimiters", + }, + input: map[string]any{}, + expected: "plain text without delimiters", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output, err := runner.Run(ctx, tt.args, tt.input) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + require.NotNil(t, output) + assert.Equal(t, tt.expected, output["result"]) + }) + } +} + +func TestRenderTemplate(t *testing.T) { + tests := []struct { + name string + template string + data map[string]any + expected string + expectError bool + }{ + { + name: "no delimiters", + template: "plain text", + data: map[string]any{}, + expected: "plain text", + }, + { + name: "simple substitution", + template: "Hello {{ .name }}", + data: map[string]any{"name": "World"}, + expected: "Hello World", + }, + { + name: "missing key with missingkey=error", + template: "Hello {{ .missing }}", + data: map[string]any{}, + expectError: true, + }, + { + name: "trim function", + template: "{{ trim .value }}", + data: map[string]any{"value": " spaced "}, + expected: "spaced", + }, + { + name: "replace function", + template: "{{ replace .value \"-\" \"_\" }}", + data: map[string]any{"value": "a-b-c"}, + expected: "a_b_c", + }, + { + name: "contains function", + template: "{{ if contains .value \"test\" }}yes{{ else }}no{{ end }}", + data: map[string]any{"value": "this is a test"}, + expected: "yes", + }, + { + name: "int conversion", + template: "{{ int .value }}", + data: map[string]any{"value": "42"}, + expected: "42", + }, + { + name: "quote function", + template: "{{ quote .value }}", + data: map[string]any{"value": "hello"}, + expected: `"hello"`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := RenderTemplate(tt.template, tt.data) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestTemplateTaskRunner_PreservesInput(t *testing.T) { + runner, _ := NewTemplateTaskRunner(nil) + ctx := context.Background() + + input := map[string]any{ + "existingKey": "existingValue", + "nested": map[string]any{ + "key": "value", + }, + } + + args := map[string]any{ + "template": "rendered", + } + + output, err := runner.Run(ctx, args, input) + require.NoError(t, err) + + // Original input should be preserved + assert.Equal(t, "existingValue", output["existingKey"]) + assert.Equal(t, input["nested"], output["nested"]) + + // Result should be added + assert.Equal(t, "rendered", output["result"]) +}