diff --git a/cmd/reset.go b/cmd/reset.go new file mode 100644 index 00000000..22f17204 --- /dev/null +++ b/cmd/reset.go @@ -0,0 +1,73 @@ +package cmd + +import ( + "errors" + "fmt" + "os" + + "github.com/localstack/lstk/internal/config" + "github.com/localstack/lstk/internal/emulator/aws" + "github.com/localstack/lstk/internal/endpoint" + "github.com/localstack/lstk/internal/env" + "github.com/localstack/lstk/internal/output" + "github.com/localstack/lstk/internal/reset" + "github.com/localstack/lstk/internal/runtime" + "github.com/localstack/lstk/internal/ui" + "github.com/spf13/cobra" +) + +func newResetCmd(cfg *env.Env) *cobra.Command { + var force bool + + cmd := &cobra.Command{ + Use: "reset", + Short: "Reset emulator state", + Long: `Reset the running emulator's in-memory state. + +All resources created in the emulator (S3 buckets, Lambda functions, etc.) are +discarded. The emulator keeps running; only its state is cleared. + +To wipe the on-disk volume (certificates, persistence data, cached tools) +instead, stop the emulator and run "lstk volume clear".`, + PreRunE: initConfig(nil), + RunE: func(cmd *cobra.Command, args []string) error { + appConfig, err := config.Get() + if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } + + var awsContainer *config.ContainerConfig + for i, c := range appConfig.Containers { + if c.Type == config.EmulatorAWS { + awsContainer = &appConfig.Containers[i] + break + } + } + if awsContainer == nil { + return errors.New("reset is only supported for the AWS emulator") + } + + interactive := isInteractiveMode(cfg) + if !interactive && !force { + return errors.New("reset requires confirmation; use --force to skip in non-interactive mode") + } + + rt, err := runtime.NewDockerRuntime(cfg.DockerHost) + if err != nil { + return err + } + host, _ := endpoint.ResolveHost(cmd.Context(), awsContainer.Port, cfg.LocalStackHost) + resetter := aws.NewClient() + + containers := []config.ContainerConfig{*awsContainer} + + if interactive { + return ui.RunReset(cmd.Context(), rt, containers, resetter, host, force) + } + return reset.Reset(cmd.Context(), rt, containers, resetter, host, force, output.NewPlainSink(os.Stdout)) + }, + } + + cmd.Flags().BoolVar(&force, "force", false, "Skip confirmation prompt") + return cmd +} diff --git a/cmd/root.go b/cmd/root.go index 909203e4..c703bd25 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -79,6 +79,7 @@ func NewRootCmd(cfg *env.Env, tel *telemetry.Client, logger log.Logger) *cobra.C newDocsCmd(), newAWSCmd(cfg), newSnapshotCmd(cfg), + newResetCmd(cfg), ) return root diff --git a/internal/emulator/aws/client.go b/internal/emulator/aws/client.go index c5a23ca2..375883b2 100644 --- a/internal/emulator/aws/client.go +++ b/internal/emulator/aws/client.go @@ -133,6 +133,25 @@ func (c *Client) FetchResources(ctx context.Context, host string) ([]emulator.Re return rows, nil } +func (c *Client) ResetState(ctx context.Context, host string) error { + url := fmt.Sprintf("http://%s/_localstack/state/reset", host) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + resp, err := c.http.Do(req) + if err != nil { + return fmt.Errorf("connect to LocalStack: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("LocalStack returned status %d", resp.StatusCode) + } + return nil +} + func (c *Client) ExportState(ctx context.Context, host string, dst io.Writer) error { url := fmt.Sprintf("http://%s/_localstack/pods/state", host) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) diff --git a/internal/reset/mock_state_resetter_test.go b/internal/reset/mock_state_resetter_test.go new file mode 100644 index 00000000..a5e2d5ca --- /dev/null +++ b/internal/reset/mock_state_resetter_test.go @@ -0,0 +1,55 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: reset.go +// +// Generated by this command: +// +// mockgen -source=reset.go -destination=mock_state_resetter_test.go -package=reset_test +// + +// Package reset_test is a generated GoMock package. +package reset_test + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockStateResetter is a mock of StateResetter interface. +type MockStateResetter struct { + ctrl *gomock.Controller + recorder *MockStateResetterMockRecorder + isgomock struct{} +} + +// MockStateResetterMockRecorder is the mock recorder for MockStateResetter. +type MockStateResetterMockRecorder struct { + mock *MockStateResetter +} + +// NewMockStateResetter creates a new mock instance. +func NewMockStateResetter(ctrl *gomock.Controller) *MockStateResetter { + mock := &MockStateResetter{ctrl: ctrl} + mock.recorder = &MockStateResetterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStateResetter) EXPECT() *MockStateResetterMockRecorder { + return m.recorder +} + +// ResetState mocks base method. +func (m *MockStateResetter) ResetState(ctx context.Context, host string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetState", ctx, host) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResetState indicates an expected call of ResetState. +func (mr *MockStateResetterMockRecorder) ResetState(ctx, host any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetState", reflect.TypeOf((*MockStateResetter)(nil).ResetState), ctx, host) +} diff --git a/internal/reset/reset.go b/internal/reset/reset.go new file mode 100644 index 00000000..2ffde066 --- /dev/null +++ b/internal/reset/reset.go @@ -0,0 +1,75 @@ +package reset + +//go:generate mockgen -source=reset.go -destination=mock_state_resetter_test.go -package=reset_test + +import ( + "context" + "fmt" + + "github.com/localstack/lstk/internal/config" + "github.com/localstack/lstk/internal/container" + "github.com/localstack/lstk/internal/output" + "github.com/localstack/lstk/internal/runtime" +) + +// StateResetter clears state in the running LocalStack instance. +type StateResetter interface { + ResetState(ctx context.Context, host string) error +} + +func Reset(ctx context.Context, rt runtime.Runtime, containers []config.ContainerConfig, resetter StateResetter, host string, force bool, sink output.Sink) (retErr error) { + if err := rt.IsHealthy(ctx); err != nil { + rt.EmitUnhealthyError(sink, err) + return output.NewSilentError(fmt.Errorf("runtime not healthy: %w", err)) + } + + runningContainers, err := container.RunningEmulators(ctx, rt, containers) + if err != nil { + return fmt.Errorf("checking emulator status: %w", err) + } + if len(runningContainers) == 0 { + sink.Emit(output.ErrorEvent{ + Title: "LocalStack is not running", + Actions: []output.ErrorAction{ + {Label: "Start LocalStack:", Value: "lstk"}, + {Label: "See help:", Value: "lstk -h"}, + }, + }) + return output.NewSilentError(fmt.Errorf("LocalStack is not running")) + } + + if !force { + responseCh := make(chan output.InputResponse, 1) + sink.Emit(output.UserInputRequestEvent{ + Prompt: "Reset emulator state? All resources will be lost", + Options: []output.InputOption{ + {Key: "y", Label: "Yes"}, + {Key: "n", Label: "NO"}, + }, + ResponseCh: responseCh, + }) + + select { + case resp := <-responseCh: + if resp.Cancelled || resp.SelectedKey != "y" { + sink.Emit(output.MessageEvent{Severity: output.SeverityNote, Text: "Cancelled"}) + return nil + } + case <-ctx.Done(): + return ctx.Err() + } + } + + sink.Emit(output.SpinnerStart("Resetting state...")) + defer func() { + sink.Emit(output.SpinnerStop()) + if retErr == nil { + sink.Emit(output.MessageEvent{Severity: output.SeveritySuccess, Text: "Emulator state reset"}) + } + }() + + if err := resetter.ResetState(ctx, host); err != nil { + return fmt.Errorf("reset state: %w", err) + } + return nil +} diff --git a/internal/reset/reset_test.go b/internal/reset/reset_test.go new file mode 100644 index 00000000..a43372ca --- /dev/null +++ b/internal/reset/reset_test.go @@ -0,0 +1,181 @@ +package reset_test + +import ( + "context" + "fmt" + "io" + "sync" + "testing" + + "github.com/localstack/lstk/internal/config" + "github.com/localstack/lstk/internal/output" + "github.com/localstack/lstk/internal/reset" + "github.com/localstack/lstk/internal/runtime" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +var awsContainers = []config.ContainerConfig{{Type: config.EmulatorAWS}} + +type recordedEvents struct { + mu sync.Mutex + events []output.Event +} + +func (r *recordedEvents) snapshot() []output.Event { + r.mu.Lock() + defer r.mu.Unlock() + return append([]output.Event(nil), r.events...) +} + +// captureEvents returns a sink that records every event and a prompts channel +// that yields each UserInputRequestEvent for tests to respond to. +func captureEvents() (output.Sink, *recordedEvents, <-chan output.UserInputRequestEvent) { + rec := &recordedEvents{} + prompts := make(chan output.UserInputRequestEvent, 4) + sink := output.SinkFunc(func(event output.Event) { + rec.mu.Lock() + rec.events = append(rec.events, event) + rec.mu.Unlock() + if req, ok := event.(output.UserInputRequestEvent); ok { + prompts <- req + } + }) + return sink, rec, prompts +} + +func healthyRunningMock(t *testing.T) *runtime.MockRuntime { + t.Helper() + ctrl := gomock.NewController(t) + mockRT := runtime.NewMockRuntime(ctrl) + mockRT.EXPECT().IsHealthy(gomock.Any()).Return(nil) + mockRT.EXPECT().IsRunning(gomock.Any(), "localstack-aws").Return(true, nil) + return mockRT +} + +func TestReset_Success(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + resetter := NewMockStateResetter(ctrl) + resetter.EXPECT().ResetState(gomock.Any(), "host:4566").Return(nil) + + sink, rec, _ := captureEvents() + + err := reset.Reset(context.Background(), healthyRunningMock(t), awsContainers, resetter, "host:4566", true, sink) + require.NoError(t, err) + + var spinnerStarted, spinnerStopped, succeeded bool + for _, e := range rec.snapshot() { + switch ev := e.(type) { + case output.SpinnerEvent: + if ev.Active { + spinnerStarted = true + } else { + spinnerStopped = true + } + case output.MessageEvent: + if ev.Severity == output.SeveritySuccess { + succeeded = true + assert.Contains(t, ev.Text, "reset") + } + } + } + assert.True(t, spinnerStarted, "spinner should have started") + assert.True(t, spinnerStopped, "spinner should have stopped") + assert.True(t, succeeded, "success event should have been emitted") +} + +func TestReset_EmulatorNotRunning(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + mockRT := runtime.NewMockRuntime(ctrl) + mockRT.EXPECT().IsHealthy(gomock.Any()).Return(nil) + mockRT.EXPECT().IsRunning(gomock.Any(), "localstack-aws").Return(false, nil) + mockRT.EXPECT().FindRunningByImage(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + + resetter := NewMockStateResetter(ctrl) + sink, rec, _ := captureEvents() + + err := reset.Reset(context.Background(), mockRT, awsContainers, resetter, "host:4566", true, sink) + require.Error(t, err) + assert.True(t, output.IsSilent(err)) + + var gotErrorEvent bool + for _, e := range rec.snapshot() { + if ev, ok := e.(output.ErrorEvent); ok { + gotErrorEvent = true + assert.Contains(t, ev.Title, "not running") + assert.NotEmpty(t, ev.Actions) + } + } + assert.True(t, gotErrorEvent, "ErrorEvent should have been emitted") +} + +func TestReset_UnhealthyRuntime(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + mockRT := runtime.NewMockRuntime(ctrl) + mockRT.EXPECT().IsHealthy(gomock.Any()).Return(fmt.Errorf("docker unavailable")) + mockRT.EXPECT().EmitUnhealthyError(gomock.Any(), gomock.Any()) + + resetter := NewMockStateResetter(ctrl) + sink := output.NewPlainSink(io.Discard) + + err := reset.Reset(context.Background(), mockRT, awsContainers, resetter, "host:4566", true, sink) + require.Error(t, err) + assert.True(t, output.IsSilent(err)) +} + +func TestReset_ResetterError(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + resetter := NewMockStateResetter(ctrl) + resetter.EXPECT().ResetState(gomock.Any(), gomock.Any()).Return(fmt.Errorf("connection refused")) + sink := output.NewPlainSink(io.Discard) + + err := reset.Reset(context.Background(), healthyRunningMock(t), awsContainers, resetter, "host:4566", true, sink) + require.Error(t, err) + assert.Contains(t, err.Error(), "connection refused") +} + +func TestReset_ConfirmYes(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + resetter := NewMockStateResetter(ctrl) + resetter.EXPECT().ResetState(gomock.Any(), gomock.Any()).Return(nil) + + sink, _, prompts := captureEvents() + + go func() { + req := <-prompts + req.ResponseCh <- output.InputResponse{SelectedKey: "y"} + }() + + err := reset.Reset(context.Background(), healthyRunningMock(t), awsContainers, resetter, "host:4566", false, sink) + require.NoError(t, err) +} + +func TestReset_ConfirmNo(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + resetter := NewMockStateResetter(ctrl) + + sink, rec, prompts := captureEvents() + + go func() { + req := <-prompts + req.ResponseCh <- output.InputResponse{SelectedKey: "n"} + }() + + err := reset.Reset(context.Background(), healthyRunningMock(t), awsContainers, resetter, "host:4566", false, sink) + require.NoError(t, err) + + var cancelled bool + for _, e := range rec.snapshot() { + if ev, ok := e.(output.MessageEvent); ok && ev.Severity == output.SeverityNote && ev.Text == "Cancelled" { + cancelled = true + } + } + assert.True(t, cancelled, "cancellation message should have been emitted") +} diff --git a/internal/ui/run_reset.go b/internal/ui/run_reset.go new file mode 100644 index 00000000..12d9613b --- /dev/null +++ b/internal/ui/run_reset.go @@ -0,0 +1,16 @@ +package ui + +import ( + "context" + + "github.com/localstack/lstk/internal/config" + "github.com/localstack/lstk/internal/output" + "github.com/localstack/lstk/internal/reset" + "github.com/localstack/lstk/internal/runtime" +) + +func RunReset(parentCtx context.Context, rt runtime.Runtime, containers []config.ContainerConfig, resetter reset.StateResetter, host string, force bool) error { + return runWithTUI(parentCtx, withoutHeader(), func(ctx context.Context, sink output.Sink) error { + return reset.Reset(ctx, rt, containers, resetter, host, force, sink) + }) +}