diff --git a/cmd/family-migrator/main.go b/cmd/family-migrator/main.go index 2f19977..067c6df 100644 --- a/cmd/family-migrator/main.go +++ b/cmd/family-migrator/main.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "github.com/upbound/extensions-migration/pkg/cache" "github.com/upbound/extensions-migration/pkg/converter/configuration" ) @@ -49,6 +50,8 @@ const ( providerAwsChoice = "provider-aws" providerAzureChoice = "provider-azure" providerGcpChoice = "provider-gcp" + + cacheFileName = ".cache" ) // Options represents the available options for the family-migrator. @@ -68,7 +71,9 @@ type Options struct { KubeConfig string `name:"kubeconfig" help:"Path to the kubeconfig to use."` } `kong:"cmd"` - Execute struct{} `kong:"cmd"` + Execute struct { + LoadPlan int `name:"load-plan" help:"Start the execution of a migration plan from specified step."` + } `kong:"cmd"` PlanPath string `name:"plan-path" help:"Migration plan output path." survey:"plan-path"` @@ -101,6 +106,39 @@ func main() { } func generatePlan(kongCtx *kong.Context, opts *Options, planDir string) { + r := registerConverters(kongCtx, opts) + + fsSource, err := migration.NewFileSystemSource(opts.Generate.PackageRoot) + kongCtx.FatalIfErrorf(err, "Failed to initialize the migration FileSystem source from path: %s", opts.Generate.PackageRoot) + + if len(opts.Generate.KubeConfig) == 0 { + homeDir, err := os.UserHomeDir() + kongCtx.FatalIfErrorf(err, "Failed to get user's home") + opts.Generate.KubeConfig = filepath.Join(homeDir, defaultKubeConfig) + } + kubeSource, err := migration.NewKubernetesSourceFromKubeConfig(opts.Generate.KubeConfig, migration.WithRegistry(r), migration.WithCategories([]migration.Category{migration.CategoryManaged})) + kongCtx.FatalIfErrorf(err, "Failed to initialize the migration Kubernetes source from kubeconfig: %s", opts.Generate.KubeConfig) + + pg := migration.NewPlanGenerator(r, nil, migration.NewFileSystemTarget(migration.WithParentDirectory(planDir)), migration.WithEnableConfigurationMigrationSteps(), migration.WithMultipleSources(fsSource, kubeSource), migration.WithSkipGVKs(schema.GroupVersionKind{})) + kongCtx.FatalIfErrorf(pg.GeneratePlan(), "Failed to generate the migration plan for the provider families") + + setPkgParameters(&pg.Plan, *opts) + buff, err := yaml.Marshal(pg.Plan) + kongCtx.FatalIfErrorf(err, "Failed to marshal the migration plan to YAML") + kongCtx.FatalIfErrorf(os.WriteFile(opts.PlanPath, buff, 0600), "Failed to store the migration plan at path: %s", opts.PlanPath) + + var moveExecution bool + moveExecutionPhaseQuestion := &survey.Confirm{ + Message: fmt.Sprintf("The migration plan has been generated at path: %s. The referred resource manifests and the patch documents can be found under: %s.\n"+ + "Would you like to proceed to the execution phase?", opts.PlanPath, planDir), + } + kongCtx.FatalIfErrorf(survey.AskOne(moveExecutionPhaseQuestion, &moveExecution)) + if moveExecution { + executePlan(kongCtx, planDir, opts) + } +} + +func registerConverters(kongCtx *kong.Context, opts *Options) *migration.Registry { r := migration.NewRegistry(runtime.NewScheme()) err := r.AddCrossplanePackageTypes() kongCtx.FatalIfErrorf(err, "Failed to register the Provider package types with the migration registry") @@ -156,35 +194,7 @@ func generatePlan(kongCtx *kong.Context, opts *Options, planDir string) { PackageURL: opts.Generate.SourceConfigurationPackage, }) kongCtx.FatalIfErrorf(r.AddCompositionTypes(), "Failed to register the Crossplane Composition types with the migration registry") - - fsSource, err := migration.NewFileSystemSource(opts.Generate.PackageRoot) - kongCtx.FatalIfErrorf(err, "Failed to initialize the migration FileSystem source from path: %s", opts.Generate.PackageRoot) - - if len(opts.Generate.KubeConfig) == 0 { - homeDir, err := os.UserHomeDir() - kongCtx.FatalIfErrorf(err, "Failed to get user's home") - opts.Generate.KubeConfig = filepath.Join(homeDir, defaultKubeConfig) - } - kubeSource, err := migration.NewKubernetesSourceFromKubeConfig(opts.Generate.KubeConfig, migration.WithRegistry(r), migration.WithCategories([]migration.Category{migration.CategoryManaged})) - kongCtx.FatalIfErrorf(err, "Failed to initialize the migration Kubernetes source from kubeconfig: %s", opts.Generate.KubeConfig) - - pg := migration.NewPlanGenerator(r, nil, migration.NewFileSystemTarget(migration.WithParentDirectory(planDir)), migration.WithEnableConfigurationMigrationSteps(), migration.WithMultipleSources(fsSource, kubeSource), migration.WithSkipGVKs(schema.GroupVersionKind{})) - kongCtx.FatalIfErrorf(pg.GeneratePlan(), "Failed to generate the migration plan for the provider families") - - setPkgParameters(&pg.Plan, *opts) - buff, err := yaml.Marshal(pg.Plan) - kongCtx.FatalIfErrorf(err, "Failed to marshal the migration plan to YAML") - kongCtx.FatalIfErrorf(os.WriteFile(opts.PlanPath, buff, 0600), "Failed to store the migration plan at path: %s", opts.PlanPath) - - var moveExecution bool - moveExecutionPhaseQuestion := &survey.Confirm{ - Message: fmt.Sprintf("The migration plan has been generated at path: %s. The referred resource manifests and the patch documents can be found under: %s.\n"+ - "Would you like to proceed to the execution phase?", opts.PlanPath, planDir), - } - kongCtx.FatalIfErrorf(survey.AskOne(moveExecutionPhaseQuestion, &moveExecution)) - if moveExecution { - executePlan(kongCtx, planDir, opts) - } + return r } func executePlan(kongCtx *kong.Context, planDir string, opts *Options) { @@ -193,6 +203,35 @@ func executePlan(kongCtx *kong.Context, planDir string, opts *Options) { kongCtx.FatalIfErrorf(err, "Failed to read the migration plan from path: %s", opts.PlanPath) kongCtx.FatalIfErrorf(yaml.Unmarshal(buff, plan), "Failed to unmarshal the migration plan: %s", opts.PlanPath) + var c cache.Cache + var startIndex int + readPlanHash, err := cache.CalculateHash(buff) + kongCtx.FatalIfErrorf(err, "Failed to calculate hash of migration plan") + cacheFilePath := filepath.Join(planDir, cacheFileName) + + if opts.Execute.LoadPlan == 0 { + if cache.IsCacheExists(cacheFilePath) { + cacheBuff, err := os.ReadFile(cacheFilePath) + kongCtx.FatalIfErrorf(err, "Failed to read the cache file") + kongCtx.FatalIfErrorf(yaml.Unmarshal(cacheBuff, &c), "Failed to unmarshal the cache file") + if c.Hash == readPlanHash && cache.AskToContinueExecution(kongCtx) { + startIndex = c.Step + } else { + cache.ClearCache(cacheFilePath, kongCtx) + } + } + } else { + var continueToLoadPlan bool + kongCtx.FatalIfErrorf(survey.AskOne(&survey.Confirm{ + Message: fmt.Sprintf("You specified a step index to execute the plan: %d. "+ + "Do you want to start from this step to execution", opts.Execute.LoadPlan), + }, &continueToLoadPlan)) + if continueToLoadPlan { + startIndex = opts.Execute.LoadPlan + } + } + c.Hash = readPlanHash + stepByStep := askExecutionSteps(kongCtx, plan, opts, planDir) zl := zap.New(zap.UseDevMode(opts.Debug)) log := logging.NewLogrLogger(zl.WithName("fork-executor")) @@ -204,13 +243,18 @@ func executePlan(kongCtx *kong.Context, planDir string, opts *Options) { planExecutor = migration.NewPlanExecutor(*plan, []migration.Executor{executor}, migration.WithExecutorCallback(&executionCallback{ logger: logging.NewLogrLogger(zl.WithName("family-migrator")), - })) + }), migration.WithStartIndex(startIndex)) } else { - planExecutor = migration.NewPlanExecutor(*plan, []migration.Executor{executor}) + planExecutor = migration.NewPlanExecutor(*plan, []migration.Executor{executor}, migration.WithStartIndex(startIndex)) } backupDir := filepath.Join(planDir, "backup") kongCtx.FatalIfErrorf(os.MkdirAll(backupDir, 0o700), "Failed to mkdir backup directory: %s", backupDir) - kongCtx.FatalIfErrorf(planExecutor.Execute(), "Failed to execute the migration plan at path: %s", opts.PlanPath) + err = planExecutor.Execute() + if err != nil { + c.Step = planExecutor.LastSuccessfulStep + 1 + kongCtx.FatalIfErrorf(os.WriteFile(cacheFilePath, []byte(c.String()), 0600)) + kongCtx.FatalIfErrorf(err, "Failed to execute the migration plan at path: %s", opts.PlanPath) + } } func setPkgParameters(plan *migration.Plan, opts Options) { @@ -337,10 +381,13 @@ func askExecutionSteps(kongCtx *kong.Context, plan *migration.Plan, opts *Option } kongCtx.FatalIfErrorf(survey.AskOne(manualExecutionSteps, &displaySteps)) if displaySteps { - for _, s := range plan.Spec.Steps { + for i, s := range plan.Spec.Steps { + buff := strings.Builder{} + buff.WriteString(fmt.Sprintf("%d. %s: %s\n", i+1, s.Name, s.Description)) for _, c := range s.ManualExecution { - fmt.Println(c) + buff.WriteString(fmt.Sprintf("$ %s \n", c)) } + fmt.Println(buff.String()) } } @@ -445,5 +492,4 @@ func (cb *executionCallback) StepFailed(s migration.Step, index int, diagnostics cb.logger.Info("Step will be run again", "index", index, "name", s.Name, "err", err) return migration.CallbackResult{Action: migration.ActionRepeat} } - } diff --git a/go.mod b/go.mod index 574456b..68bc836 100644 --- a/go.mod +++ b/go.mod @@ -89,3 +89,5 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +replace github.com/upbound/upjet => ../upjet diff --git a/go.sum b/go.sum index 9ec0a17..a863426 100644 --- a/go.sum +++ b/go.sum @@ -342,8 +342,6 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/upbound/upjet v0.9.0-rc.0.0.20230707151127-90af5e54611a h1:D0E7zn1NCZv9fiDgnG9p9wI4IV7kZYNMugt3xUlWutw= -github.com/upbound/upjet v0.9.0-rc.0.0.20230707151127-90af5e54611a/go.mod h1:3wLiq0eRZ+o6i6TWvELJ0jHv3pSnHk7r/zC2zFRDif8= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go new file mode 100644 index 0000000..6cd2760 --- /dev/null +++ b/pkg/cache/cache.go @@ -0,0 +1,51 @@ +package cache + +import ( + "bytes" + "fmt" + "github.com/AlecAivazis/survey/v2" + "github.com/alecthomas/kong" + "github.com/pkg/errors" + "hash/fnv" + "io" + "os" +) + +type Cache struct { + Hash uint32 `json:"hash"` + Step int `json:"step"` +} + +func IsCacheExists(cacheFilePath string) bool { + _, err := os.Stat(cacheFilePath) + return !os.IsNotExist(err) +} + +func AskToContinueExecution(kongCtx *kong.Context) bool { + var response bool + kongCtx.FatalIfErrorf(survey.AskOne(&survey.Confirm{ + Message: "An uncompleted plan execution was found. Would you like to continue executing this plan from where you left off?", + }, &response), "") + return response +} + +func ClearCache(cacheFilePath string, kongCtx *kong.Context) { + err := os.Remove(cacheFilePath) + if err != nil { + kongCtx.FatalIfErrorf(err, "Failed to remove cache file.") + } +} + +func CalculateHash(buff []byte) (uint32, error) { + h := fnv.New32a() + buffer := &bytes.Buffer{} + buffer.Write(buff) + if _, err := io.Copy(h, buffer); err != nil { + return 0, errors.Wrap(err, "cannot copy file content") + } + return h.Sum32(), nil +} + +func (c *Cache) String() string { + return fmt.Sprintf("hash: %d\nstep: %d", c.Hash, c.Step) +}