aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/cli/internal
diff options
context:
space:
mode:
author简律纯 <hsiangnianian@outlook.com>2023-04-28 01:36:44 +0800
committer简律纯 <hsiangnianian@outlook.com>2023-04-28 01:36:44 +0800
commitdd84b9d64fb98746a230cd24233ff50a562c39c9 (patch)
treeb583261ef00b3afe72ec4d6dacb31e57779a6faf /cli/internal
parent0b46fcd72ac34382387b2bcf9095233efbcc52f4 (diff)
downloadHydroRoll-dd84b9d64fb98746a230cd24233ff50a562c39c9.tar.gz
HydroRoll-dd84b9d64fb98746a230cd24233ff50a562c39c9.zip
Diffstat (limited to 'cli/internal')
-rw-r--r--cli/internal/analytics/analytics.go175
-rw-r--r--cli/internal/analytics/analytics_test.go192
-rw-r--r--cli/internal/cache/async_cache.go82
-rw-r--r--cli/internal/cache/cache.go317
-rw-r--r--cli/internal/cache/cache_fs.go174
-rw-r--r--cli/internal/cache/cache_fs_test.go253
-rw-r--r--cli/internal/cache/cache_http.go375
-rw-r--r--cli/internal/cache/cache_http_test.go245
-rw-r--r--cli/internal/cache/cache_noop.go23
-rw-r--r--cli/internal/cache/cache_signature_authentication.go88
-rw-r--r--cli/internal/cache/cache_signature_authentication_test.go195
-rw-r--r--cli/internal/cache/cache_test.go318
-rw-r--r--cli/internal/cacheitem/cacheitem.go76
-rw-r--r--cli/internal/cacheitem/create.go119
-rw-r--r--cli/internal/cacheitem/create_test.go205
-rw-r--r--cli/internal/cacheitem/create_unix_test.go20
-rw-r--r--cli/internal/cacheitem/create_windows_test.go14
-rw-r--r--cli/internal/cacheitem/filepath.go162
-rw-r--r--cli/internal/cacheitem/filepath_unix.go14
-rw-r--r--cli/internal/cacheitem/filepath_windows.go50
-rw-r--r--cli/internal/cacheitem/restore.go200
-rw-r--r--cli/internal/cacheitem/restore_directory.go144
-rw-r--r--cli/internal/cacheitem/restore_directory_test.go103
-rw-r--r--cli/internal/cacheitem/restore_regular.go46
-rw-r--r--cli/internal/cacheitem/restore_symlink.go180
-rw-r--r--cli/internal/cacheitem/restore_test.go1493
-rw-r--r--cli/internal/chrometracing/chrometracing.go227
-rw-r--r--cli/internal/chrometracing/chrometracing_close.go26
-rw-r--r--cli/internal/ci/ci.go58
-rw-r--r--cli/internal/ci/ci_test.go105
-rw-r--r--cli/internal/ci/vendors.go253
-rw-r--r--cli/internal/client/analytics.go21
-rw-r--r--cli/internal/client/cache.go167
-rw-r--r--cli/internal/client/client.go309
-rw-r--r--cli/internal/client/client_test.go159
-rw-r--r--cli/internal/cmd/root.go157
-rw-r--r--cli/internal/cmdutil/cmdutil.go245
-rw-r--r--cli/internal/cmdutil/cmdutil_test.go109
-rw-r--r--cli/internal/colorcache/colorcache.go56
-rw-r--r--cli/internal/config/config_file.go192
-rw-r--r--cli/internal/config/config_file_test.go157
-rw-r--r--cli/internal/context/context.go480
-rw-r--r--cli/internal/context/context_test.go162
-rw-r--r--cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json6
-rw-r--r--cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json6
-rw-r--r--cli/internal/context/testdata/dupe-workspace-names/package.json7
-rw-r--r--cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json3
-rw-r--r--cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml21
-rw-r--r--cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml3
-rw-r--r--cli/internal/core/engine.go591
-rw-r--r--cli/internal/core/engine_test.go88
-rw-r--r--cli/internal/daemon/connector/connector.go391
-rw-r--r--cli/internal/daemon/connector/connector_test.go256
-rw-r--r--cli/internal/daemon/connector/fork.go15
-rw-r--r--cli/internal/daemon/connector/fork_windows.go15
-rw-r--r--cli/internal/daemon/daemon.go307
-rw-r--r--cli/internal/daemon/daemon_test.go262
-rw-r--r--cli/internal/daemonclient/daemonclient.go70
-rw-r--r--cli/internal/doublestar/doublestar.go11
-rw-r--r--cli/internal/doublestar/doublestar_test.go557
-rw-r--r--cli/internal/doublestar/glob.go393
-rw-r--r--cli/internal/doublestar/globwalk.go277
-rw-r--r--cli/internal/doublestar/match.go377
-rw-r--r--cli/internal/doublestar/utils.go71
-rw-r--r--cli/internal/doublestar/validate.go83
-rw-r--r--cli/internal/encoding/gitoutput/gitoutput.go345
-rw-r--r--cli/internal/encoding/gitoutput/gitoutput_test.go377
-rw-r--r--cli/internal/encoding/gitoutput/validators.go148
-rw-r--r--cli/internal/encoding/gitoutput/validators_test.go514
-rw-r--r--cli/internal/ffi/bindings.h21
-rw-r--r--cli/internal/ffi/ffi.go224
-rw-r--r--cli/internal/ffi/proto/messages.pb.go1380
-rw-r--r--cli/internal/filewatcher/backend.go209
-rw-r--r--cli/internal/filewatcher/backend_darwin.go220
-rw-r--r--cli/internal/filewatcher/cookie.go160
-rw-r--r--cli/internal/filewatcher/cookie_test.go130
-rw-r--r--cli/internal/filewatcher/filewatcher.go167
-rw-r--r--cli/internal/filewatcher/filewatcher_test.go152
-rw-r--r--cli/internal/fs/copy_file.go81
-rw-r--r--cli/internal/fs/copy_file_test.go198
-rw-r--r--cli/internal/fs/fs.go191
-rw-r--r--cli/internal/fs/fs_test.go60
-rw-r--r--cli/internal/fs/fs_windows_test.go18
-rw-r--r--cli/internal/fs/get_turbo_data_dir_go.go16
-rw-r--r--cli/internal/fs/get_turbo_data_dir_rust.go16
-rw-r--r--cli/internal/fs/hash.go61
-rw-r--r--cli/internal/fs/hash_test.go53
-rw-r--r--cli/internal/fs/lstat.go74
-rw-r--r--cli/internal/fs/package_json.go142
-rw-r--r--cli/internal/fs/package_json_test.go174
-rw-r--r--cli/internal/fs/path.go113
-rw-r--r--cli/internal/fs/testdata/both/package.json7
-rw-r--r--cli/internal/fs/testdata/both/turbo.json18
-rw-r--r--cli/internal/fs/testdata/correct/turbo.json49
-rw-r--r--cli/internal/fs/testdata/invalid-env-1/turbo.json8
-rw-r--r--cli/internal/fs/testdata/invalid-env-2/turbo.json8
-rw-r--r--cli/internal/fs/testdata/invalid-global-env/turbo.json11
-rw-r--r--cli/internal/fs/testdata/legacy-env/turbo.json34
-rw-r--r--cli/internal/fs/testdata/legacy-only/package.json7
-rw-r--r--cli/internal/fs/turbo_json.go741
-rw-r--r--cli/internal/fs/turbo_json_test.go277
-rw-r--r--cli/internal/globby/globby.go187
-rw-r--r--cli/internal/globby/globby_test.go832
-rw-r--r--cli/internal/globwatcher/globwatcher.go210
-rw-r--r--cli/internal/globwatcher/globwatcher_test.go232
-rw-r--r--cli/internal/graph/graph.go274
-rw-r--r--cli/internal/graph/graph_test.go50
-rw-r--r--cli/internal/graphvisualizer/graphvisualizer.go205
-rw-r--r--cli/internal/hashing/package_deps_hash.go461
-rw-r--r--cli/internal/hashing/package_deps_hash_test.go386
-rw-r--r--cli/internal/inference/inference.go167
-rw-r--r--cli/internal/inference/inference_test.go97
-rw-r--r--cli/internal/lockfile/berry_lockfile.go709
-rw-r--r--cli/internal/lockfile/berry_lockfile_test.go273
-rw-r--r--cli/internal/lockfile/lockfile.go135
-rw-r--r--cli/internal/lockfile/lockfile_test.go25
-rw-r--r--cli/internal/lockfile/npm_lockfile.go107
-rw-r--r--cli/internal/lockfile/pnpm_lockfile.go579
-rw-r--r--cli/internal/lockfile/pnpm_lockfile_test.go405
-rw-r--r--cli/internal/lockfile/testdata/berry.lock3283
-rw-r--r--cli/internal/lockfile/testdata/minimal-berry.lock45
-rw-r--r--cli/internal/lockfile/testdata/npm-lock-workspace-variation.json186
-rw-r--r--cli/internal/lockfile/testdata/npm-lock.json6472
-rw-r--r--cli/internal/lockfile/testdata/pnpm-absolute-v6.yaml18
-rw-r--r--cli/internal/lockfile/testdata/pnpm-absolute.yaml38
-rw-r--r--cli/internal/lockfile/testdata/pnpm-patch-v6.yaml40
-rw-r--r--cli/internal/lockfile/testdata/pnpm-patch.yaml63
-rw-r--r--cli/internal/lockfile/testdata/pnpm-peer-v6.yaml67
-rw-r--r--cli/internal/lockfile/testdata/pnpm-top-level-dupe.yaml36
-rw-r--r--cli/internal/lockfile/testdata/pnpm6-workspace.yaml1704
-rw-r--r--cli/internal/lockfile/testdata/pnpm7-workspace.yaml3445
-rw-r--r--cli/internal/lockfile/testdata/pnpm8.yaml107
-rw-r--r--cli/internal/lockfile/testdata/pnpm_override.yaml24
-rw-r--r--cli/internal/lockfile/testdata/yarn.lock2304
-rw-r--r--cli/internal/lockfile/yarn_lockfile.go124
-rw-r--r--cli/internal/lockfile/yarn_lockfile_test.go51
-rw-r--r--cli/internal/logstreamer/logstreamer.go159
-rw-r--r--cli/internal/logstreamer/logstreamer_test.go114
-rw-r--r--cli/internal/nodes/packagetask.go45
-rw-r--r--cli/internal/packagemanager/berry.go156
-rw-r--r--cli/internal/packagemanager/fixtures/package.json7
-rw-r--r--cli/internal/packagemanager/fixtures/pnpm-patches.json11
-rw-r--r--cli/internal/packagemanager/fixtures/pnpm-workspace.yaml3
-rw-r--r--cli/internal/packagemanager/infer_root.go146
-rw-r--r--cli/internal/packagemanager/infer_root_test.go347
-rw-r--r--cli/internal/packagemanager/npm.go59
-rw-r--r--cli/internal/packagemanager/packagemanager.go197
-rw-r--r--cli/internal/packagemanager/packagemanager_test.go411
-rw-r--r--cli/internal/packagemanager/pnpm.go168
-rw-r--r--cli/internal/packagemanager/pnpm6.go63
-rw-r--r--cli/internal/packagemanager/pnpm_test.go57
-rw-r--r--cli/internal/packagemanager/yarn.go116
-rw-r--r--cli/internal/process/child.go406
-rw-r--r--cli/internal/process/child_nix_test.go190
-rw-r--r--cli/internal/process/child_test.go193
-rw-r--r--cli/internal/process/manager.go120
-rw-r--r--cli/internal/process/manager_test.go94
-rw-r--r--cli/internal/process/sys_nix.go23
-rw-r--r--cli/internal/process/sys_windows.go17
-rw-r--r--cli/internal/prune/prune.go314
-rw-r--r--cli/internal/run/dry_run.go122
-rw-r--r--cli/internal/run/global_hash.go164
-rw-r--r--cli/internal/run/graph_run.go46
-rw-r--r--cli/internal/run/log_tag_go.go11
-rw-r--r--cli/internal/run/log_tag_rust.go11
-rw-r--r--cli/internal/run/real_run.go420
-rw-r--r--cli/internal/run/run.go487
-rw-r--r--cli/internal/run/run_spec.go90
-rw-r--r--cli/internal/run/run_spec_test.go107
-rw-r--r--cli/internal/runcache/output_watcher.go32
-rw-r--r--cli/internal/runcache/runcache.go354
-rw-r--r--cli/internal/runsummary/execution_summary.go282
-rw-r--r--cli/internal/runsummary/format_execution_summary.go70
-rw-r--r--cli/internal/runsummary/format_json.go66
-rw-r--r--cli/internal/runsummary/format_text.go100
-rw-r--r--cli/internal/runsummary/globalhash_summary.go38
-rw-r--r--cli/internal/runsummary/run_summary.go320
-rw-r--r--cli/internal/runsummary/spaces.go96
-rw-r--r--cli/internal/runsummary/task_summary.go117
-rw-r--r--cli/internal/scm/git_go.go111
-rw-r--r--cli/internal/scm/git_rust.go34
-rw-r--r--cli/internal/scm/scm.go53
-rw-r--r--cli/internal/scm/stub.go14
-rw-r--r--cli/internal/scope/filter/filter.go421
-rw-r--r--cli/internal/scope/filter/filter_test.go614
-rw-r--r--cli/internal/scope/filter/matcher.go32
-rw-r--r--cli/internal/scope/filter/matcher_test.go65
-rw-r--r--cli/internal/scope/filter/parse_target_selector.go165
-rw-r--r--cli/internal/scope/filter/parse_target_selector_test.go311
-rw-r--r--cli/internal/scope/scope.go380
-rw-r--r--cli/internal/scope/scope_test.go550
-rw-r--r--cli/internal/server/server.go192
-rw-r--r--cli/internal/server/server_test.go73
-rw-r--r--cli/internal/signals/signals.go60
-rw-r--r--cli/internal/spinner/spinner.go89
-rw-r--r--cli/internal/tarpatch/tar.go92
-rw-r--r--cli/internal/tarpatch/tar_unix.go42
-rw-r--r--cli/internal/tarpatch/tar_windows.go27
-rw-r--r--cli/internal/taskhash/taskhash.go497
-rw-r--r--cli/internal/taskhash/taskhash_test.go138
-rw-r--r--cli/internal/turbodprotocol/turbod.proto53
-rw-r--r--cli/internal/turbopath/absolute_system_path.go258
-rw-r--r--cli/internal/turbopath/absolute_system_path_darwin.go23
-rw-r--r--cli/internal/turbopath/absolute_system_path_notdarwin.go13
-rw-r--r--cli/internal/turbopath/absolute_system_path_test.go174
-rw-r--r--cli/internal/turbopath/anchored_system_path.go75
-rw-r--r--cli/internal/turbopath/anchored_unix_path.go31
-rw-r--r--cli/internal/turbopath/find_up.go50
-rw-r--r--cli/internal/turbopath/relative_system_path.go44
-rw-r--r--cli/internal/turbopath/relative_unix_path.go31
-rw-r--r--cli/internal/turbopath/turbopath.go112
-rw-r--r--cli/internal/turbostate/turbostate.go141
-rw-r--r--cli/internal/ui/charset.go3
-rw-r--r--cli/internal/ui/colors.go54
-rw-r--r--cli/internal/ui/spinner.go80
-rw-r--r--cli/internal/ui/term/cursor.go73
-rw-r--r--cli/internal/ui/term/cursor_test.go43
-rw-r--r--cli/internal/ui/ui.go121
-rw-r--r--cli/internal/util/backends.go30
-rw-r--r--cli/internal/util/browser/open.go37
-rw-r--r--cli/internal/util/closer.go15
-rw-r--r--cli/internal/util/cmd.go24
-rw-r--r--cli/internal/util/filter/filter.go133
-rw-r--r--cli/internal/util/filter/filter_test.go116
-rw-r--r--cli/internal/util/graph.go35
-rw-r--r--cli/internal/util/modulo.go13
-rw-r--r--cli/internal/util/parse_concurrency.go39
-rw-r--r--cli/internal/util/parse_concurrency_test.go79
-rw-r--r--cli/internal/util/printf.go63
-rw-r--r--cli/internal/util/run_opts.go53
-rw-r--r--cli/internal/util/semaphore.go43
-rw-r--r--cli/internal/util/set.go147
-rw-r--r--cli/internal/util/set_test.go149
-rw-r--r--cli/internal/util/status.go47
-rw-r--r--cli/internal/util/task_id.go66
-rw-r--r--cli/internal/util/task_output_mode.go100
-rw-r--r--cli/internal/workspace/workspace.go10
-rw-r--r--cli/internal/xxhash/xxhash.go202
-rw-r--r--cli/internal/yaml/apic.go747
-rw-r--r--cli/internal/yaml/decode.go1000
-rw-r--r--cli/internal/yaml/emitterc.go2019
-rw-r--r--cli/internal/yaml/encode.go577
-rw-r--r--cli/internal/yaml/parserc.go1274
-rw-r--r--cli/internal/yaml/readerc.go434
-rw-r--r--cli/internal/yaml/resolve.go326
-rw-r--r--cli/internal/yaml/scannerc.go3040
-rw-r--r--cli/internal/yaml/sorter.go134
-rw-r--r--cli/internal/yaml/writerc.go48
-rw-r--r--cli/internal/yaml/yaml.go693
-rw-r--r--cli/internal/yaml/yamlh.go809
-rw-r--r--cli/internal/yaml/yamlprivateh.go198
251 files changed, 66089 insertions, 0 deletions
diff --git a/cli/internal/analytics/analytics.go b/cli/internal/analytics/analytics.go
new file mode 100644
index 0000000..8d9a3b6
--- /dev/null
+++ b/cli/internal/analytics/analytics.go
@@ -0,0 +1,175 @@
+package analytics
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/mapstructure"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+type Events = []map[string]interface{}
+
+type EventPayload = interface{}
+
+type Recorder interface {
+ LogEvent(payload EventPayload)
+}
+
+type Client interface {
+ Recorder
+ Close()
+ CloseWithTimeout(timeout time.Duration)
+}
+
+type Sink interface {
+ RecordAnalyticsEvents(events Events) error
+}
+
+type nullSink struct{}
+
+func (n *nullSink) RecordAnalyticsEvents(events Events) error {
+ return nil
+}
+
+// NullSink is an analytics sink to use in the event that we don't want to send
+// analytics
+var NullSink = &nullSink{}
+
+type client struct {
+ ch chan<- EventPayload
+ cancel func()
+
+ worker *worker
+}
+
+type worker struct {
+ buffer []EventPayload
+ ch <-chan EventPayload
+ ctx context.Context
+ doneSemaphore util.Semaphore
+ sessionID uuid.UUID
+ sink Sink
+ wg sync.WaitGroup
+ logger hclog.Logger
+}
+
+const bufferThreshold = 10
+const eventTimeout = 200 * time.Millisecond
+const noTimeout = 24 * time.Hour
+
+func newWorker(ctx context.Context, ch <-chan EventPayload, sink Sink, logger hclog.Logger) *worker {
+ buffer := []EventPayload{}
+ sessionID := uuid.New()
+ w := &worker{
+ buffer: buffer,
+ ch: ch,
+ ctx: ctx,
+ doneSemaphore: util.NewSemaphore(1),
+ sessionID: sessionID,
+ sink: sink,
+ logger: logger,
+ }
+ w.doneSemaphore.Acquire()
+ go w.analyticsClient()
+ return w
+}
+
+func NewClient(parent context.Context, sink Sink, logger hclog.Logger) Client {
+ ch := make(chan EventPayload)
+ ctx, cancel := context.WithCancel(parent)
+ // creates and starts the worker
+ worker := newWorker(ctx, ch, sink, logger)
+ s := &client{
+ ch: ch,
+ cancel: cancel,
+ worker: worker,
+ }
+ return s
+}
+
+func (s *client) LogEvent(event EventPayload) {
+ s.ch <- event
+}
+
+func (s *client) Close() {
+ s.cancel()
+ s.worker.Wait()
+}
+
+func (s *client) CloseWithTimeout(timeout time.Duration) {
+ ch := make(chan struct{})
+ go func() {
+ s.Close()
+ close(ch)
+ }()
+ select {
+ case <-ch:
+ case <-time.After(timeout):
+ }
+}
+
+func (w *worker) Wait() {
+ w.doneSemaphore.Acquire()
+ w.wg.Wait()
+}
+
+func (w *worker) analyticsClient() {
+ timeout := time.After(noTimeout)
+ for {
+ select {
+ case e := <-w.ch:
+ w.buffer = append(w.buffer, e)
+ if len(w.buffer) == bufferThreshold {
+ w.flush()
+ timeout = time.After(noTimeout)
+ } else {
+ timeout = time.After(eventTimeout)
+ }
+ case <-timeout:
+ w.flush()
+ timeout = time.After(noTimeout)
+ case <-w.ctx.Done():
+ w.flush()
+ w.doneSemaphore.Release()
+ return
+ }
+ }
+}
+
+func (w *worker) flush() {
+ if len(w.buffer) > 0 {
+ w.sendEvents(w.buffer)
+ w.buffer = []EventPayload{}
+ }
+}
+
+func (w *worker) sendEvents(events []EventPayload) {
+ w.wg.Add(1)
+ go func() {
+ payload, err := addSessionID(w.sessionID.String(), events)
+ if err != nil {
+ w.logger.Debug("failed to encode cache usage analytics", "error", err)
+ }
+ err = w.sink.RecordAnalyticsEvents(payload)
+ if err != nil {
+ w.logger.Debug("failed to record cache usage analytics", "error", err)
+ }
+ w.wg.Done()
+ }()
+}
+
+func addSessionID(sessionID string, events []EventPayload) (Events, error) {
+ eventMaps := []map[string]interface{}{}
+ err := mapstructure.Decode(events, &eventMaps)
+ if err != nil {
+ return nil, err
+ }
+ for _, event := range eventMaps {
+ event["sessionId"] = sessionID
+ }
+ return eventMaps, nil
+}
diff --git a/cli/internal/analytics/analytics_test.go b/cli/internal/analytics/analytics_test.go
new file mode 100644
index 0000000..0715fda
--- /dev/null
+++ b/cli/internal/analytics/analytics_test.go
@@ -0,0 +1,192 @@
+package analytics
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+)
+
+type dummySink struct {
+ events []*Events
+ err error
+ mu sync.Mutex
+ ch chan struct{}
+}
+
+type evt struct {
+ I int
+}
+
+func newDummySink() *dummySink {
+ return &dummySink{
+ events: []*Events{},
+ ch: make(chan struct{}, 1),
+ }
+}
+
+func (d *dummySink) RecordAnalyticsEvents(events Events) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ // Make a copy in case a test is holding a copy too
+ eventsCopy := make([]*Events, len(d.events))
+ copy(eventsCopy, d.events)
+ d.events = append(eventsCopy, &events)
+ d.ch <- struct{}{}
+ return d.err
+}
+
+func (d *dummySink) Events() []*Events {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.events
+}
+
+func (d *dummySink) ExpectImmediateMessage(t *testing.T) {
+ select {
+ case <-time.After(150 * time.Millisecond):
+ t.Errorf("expected to not wait out the flush timeout")
+ case <-d.ch:
+ }
+}
+
+func (d *dummySink) ExpectTimeoutThenMessage(t *testing.T) {
+ select {
+ case <-d.ch:
+ t.Errorf("Expected to wait out the flush timeout")
+ case <-time.After(150 * time.Millisecond):
+ }
+ <-d.ch
+}
+
+func Test_batching(t *testing.T) {
+ d := newDummySink()
+ ctx := context.Background()
+ c := NewClient(ctx, d, hclog.Default())
+ for i := 0; i < 2; i++ {
+ c.LogEvent(&evt{i})
+ }
+ found := d.Events()
+ if len(found) != 0 {
+ t.Errorf("got %v events, want 0 due to batching", len(found))
+ }
+ // Should timeout
+ d.ExpectTimeoutThenMessage(t)
+ found = d.Events()
+ if len(found) != 1 {
+ t.Errorf("got %v, want 1 batch to have been flushed", len(found))
+ }
+ payloads := *found[0]
+ if len(payloads) != 2 {
+ t.Errorf("got %v, want 2 payloads to have been flushed", len(payloads))
+ }
+}
+
+func Test_batchingAcrossTwoBatches(t *testing.T) {
+ d := newDummySink()
+ ctx := context.Background()
+ c := NewClient(ctx, d, hclog.Default())
+ for i := 0; i < 12; i++ {
+ c.LogEvent(&evt{i})
+ }
+ // We sent more than the batch size, expect a message immediately
+ d.ExpectImmediateMessage(t)
+ found := d.Events()
+ if len(found) != 1 {
+ t.Errorf("got %v, want 1 batch to have been flushed", len(found))
+ }
+ payloads := *found[0]
+ if len(payloads) != 10 {
+ t.Errorf("got %v, want 10 payloads to have been flushed", len(payloads))
+ }
+ // Should timeout second batch
+ d.ExpectTimeoutThenMessage(t)
+ found = d.Events()
+ if len(found) != 2 {
+ t.Errorf("got %v, want 2 batches to have been flushed", len(found))
+ }
+ payloads = *found[1]
+ if len(payloads) != 2 {
+ t.Errorf("got %v, want 2 payloads to have been flushed", len(payloads))
+ }
+}
+
+func Test_closing(t *testing.T) {
+ d := newDummySink()
+ ctx := context.Background()
+ c := NewClient(ctx, d, hclog.Default())
+ for i := 0; i < 2; i++ {
+ c.LogEvent(&evt{i})
+ }
+ found := d.Events()
+ if len(found) != 0 {
+ t.Errorf("got %v events, want 0 due to batching", len(found))
+ }
+ c.Close()
+ found = d.Events()
+ if len(found) != 1 {
+ t.Errorf("got %v, want 1 batch to have been flushed", len(found))
+ }
+ payloads := *found[0]
+ if len(payloads) != 2 {
+ t.Errorf("got %v, want 2 payloads to have been flushed", len(payloads))
+ }
+}
+
+func Test_closingByContext(t *testing.T) {
+ d := newDummySink()
+ ctx, cancel := context.WithCancel(context.Background())
+ c := NewClient(ctx, d, hclog.Default())
+ for i := 0; i < 2; i++ {
+ c.LogEvent(&evt{i})
+ }
+ found := d.Events()
+ if len(found) != 0 {
+ t.Errorf("got %v events, want 0 due to batching", len(found))
+ }
+ cancel()
+ d.ExpectImmediateMessage(t)
+ found = d.Events()
+ if len(found) != 1 {
+ t.Errorf("got %v, want 1 batch to have been flushed", len(found))
+ }
+ payloads := *found[0]
+ if len(payloads) != 2 {
+ t.Errorf("got %v, want 2 payloads to have been flushed", len(payloads))
+ }
+}
+
+func Test_addSessionId(t *testing.T) {
+ events := []struct {
+ Foo string `mapstructure:"foo"`
+ }{
+ {
+ Foo: "foo1",
+ },
+ {
+ Foo: "foo2",
+ },
+ }
+ arr := make([]interface{}, len(events))
+ for i, event := range events {
+ arr[i] = event
+ }
+ sessionID := "my-uuid"
+ output, err := addSessionID(sessionID, arr)
+ if err != nil {
+ t.Errorf("failed to encode analytics events: %v", err)
+ }
+ if len(output) != 2 {
+ t.Errorf("len output got %v, want 2", len(output))
+ }
+ if output[0]["foo"] != "foo1" {
+ t.Errorf("first event foo got %v, want foo1", output[0]["foo"])
+ }
+ for i, event := range output {
+ if event["sessionId"] != "my-uuid" {
+ t.Errorf("event %v sessionId got %v, want %v", i, event["sessionId"], sessionID)
+ }
+ }
+}
diff --git a/cli/internal/cache/async_cache.go b/cli/internal/cache/async_cache.go
new file mode 100644
index 0000000..0a8f467
--- /dev/null
+++ b/cli/internal/cache/async_cache.go
@@ -0,0 +1,82 @@
+// Adapted from https://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package cache
+
+import (
+ "sync"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// An asyncCache is a wrapper around a Cache interface that handles incoming
+// store requests asynchronously and attempts to return immediately.
+// The requests are handled on an internal queue, if that fills up then
+// incoming requests will start to block again until it empties.
+// Retrieval requests are still handled synchronously.
+type asyncCache struct {
+ requests chan cacheRequest
+ realCache Cache
+ wg sync.WaitGroup
+}
+
+// A cacheRequest models an incoming cache request on our queue.
+type cacheRequest struct {
+ anchor turbopath.AbsoluteSystemPath
+ key string
+ duration int
+ files []turbopath.AnchoredSystemPath
+}
+
+func newAsyncCache(realCache Cache, opts Opts) Cache {
+ c := &asyncCache{
+ requests: make(chan cacheRequest),
+ realCache: realCache,
+ }
+ c.wg.Add(opts.Workers)
+ for i := 0; i < opts.Workers; i++ {
+ go c.run()
+ }
+ return c
+}
+
+func (c *asyncCache) Put(anchor turbopath.AbsoluteSystemPath, key string, duration int, files []turbopath.AnchoredSystemPath) error {
+ c.requests <- cacheRequest{
+ anchor: anchor,
+ key: key,
+ files: files,
+ duration: duration,
+ }
+ return nil
+}
+
+func (c *asyncCache) Fetch(anchor turbopath.AbsoluteSystemPath, key string, files []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error) {
+ return c.realCache.Fetch(anchor, key, files)
+}
+
+func (c *asyncCache) Exists(key string) ItemStatus {
+ return c.realCache.Exists(key)
+}
+
+func (c *asyncCache) Clean(anchor turbopath.AbsoluteSystemPath) {
+ c.realCache.Clean(anchor)
+}
+
+func (c *asyncCache) CleanAll() {
+ c.realCache.CleanAll()
+}
+
+func (c *asyncCache) Shutdown() {
+ // fmt.Println("Shutting down cache workers...")
+ close(c.requests)
+ c.wg.Wait()
+ // fmt.Println("Shut down all cache workers")
+}
+
+// run implements the actual async logic.
+func (c *asyncCache) run() {
+ for r := range c.requests {
+ _ = c.realCache.Put(r.anchor, r.key, r.duration, r.files)
+ }
+ c.wg.Done()
+}
diff --git a/cli/internal/cache/cache.go b/cli/internal/cache/cache.go
new file mode 100644
index 0000000..8b74272
--- /dev/null
+++ b/cli/internal/cache/cache.go
@@ -0,0 +1,317 @@
+// Package cache abstracts storing and fetching previously run tasks
+//
+// Adapted from https://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package cache
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/vercel/turbo/cli/internal/analytics"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "golang.org/x/sync/errgroup"
+)
+
+// Cache is abstracted way to cache/fetch previously run tasks
+type Cache interface {
+ // Fetch returns true if there is a cache it. It is expected to move files
+ // into their correct position as a side effect
+ Fetch(anchor turbopath.AbsoluteSystemPath, hash string, files []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error)
+ Exists(hash string) ItemStatus
+ // Put caches files for a given hash
+ Put(anchor turbopath.AbsoluteSystemPath, hash string, duration int, files []turbopath.AnchoredSystemPath) error
+ Clean(anchor turbopath.AbsoluteSystemPath)
+ CleanAll()
+ Shutdown()
+}
+
+// ItemStatus holds whether artifacts exists for a given hash on local
+// and/or remote caching server
+type ItemStatus struct {
+ Local bool `json:"local"`
+ Remote bool `json:"remote"`
+}
+
+const (
+ // CacheSourceFS is a constant to indicate local cache hit
+ CacheSourceFS = "LOCAL"
+ // CacheSourceRemote is a constant to indicate remote cache hit
+ CacheSourceRemote = "REMOTE"
+ // CacheEventHit is a constant to indicate a cache hit
+ CacheEventHit = "HIT"
+ // CacheEventMiss is a constant to indicate a cache miss
+ CacheEventMiss = "MISS"
+)
+
+type CacheEvent struct {
+ Source string `mapstructure:"source"`
+ Event string `mapstructure:"event"`
+ Hash string `mapstructure:"hash"`
+ Duration int `mapstructure:"duration"`
+}
+
+// DefaultLocation returns the default filesystem cache location, given a repo root
+func DefaultLocation(repoRoot turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ return repoRoot.UntypedJoin("node_modules", ".cache", "turbo")
+}
+
+// OnCacheRemoved defines a callback that the cache system calls if a particular cache
+// needs to be removed. In practice, this happens when Remote Caching has been disabled
+// the but CLI continues to try to use it.
+type OnCacheRemoved = func(cache Cache, err error)
+
+// ErrNoCachesEnabled is returned when both the filesystem and http cache are unavailable
+var ErrNoCachesEnabled = errors.New("no caches are enabled")
+
+// Opts holds configuration options for the cache
+// TODO(gsoltis): further refactor this into fs cache opts and http cache opts
+type Opts struct {
+ OverrideDir string
+ SkipRemote bool
+ SkipFilesystem bool
+ Workers int
+ RemoteCacheOpts fs.RemoteCacheOptions
+}
+
+// resolveCacheDir calculates the location turbo should use to cache artifacts,
+// based on the options supplied by the user.
+func (o *Opts) resolveCacheDir(repoRoot turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ if o.OverrideDir != "" {
+ return fs.ResolveUnknownPath(repoRoot, o.OverrideDir)
+ }
+ return DefaultLocation(repoRoot)
+}
+
+var _remoteOnlyHelp = `Ignore the local filesystem cache for all tasks. Only
+allow reading and caching artifacts using the remote cache.`
+
+// New creates a new cache
+func New(opts Opts, repoRoot turbopath.AbsoluteSystemPath, client client, recorder analytics.Recorder, onCacheRemoved OnCacheRemoved) (Cache, error) {
+ c, err := newSyncCache(opts, repoRoot, client, recorder, onCacheRemoved)
+ if err != nil && !errors.Is(err, ErrNoCachesEnabled) {
+ return nil, err
+ }
+ if opts.Workers > 0 {
+ return newAsyncCache(c, opts), err
+ }
+ return c, err
+}
+
+// newSyncCache can return an error with a usable noopCache.
+func newSyncCache(opts Opts, repoRoot turbopath.AbsoluteSystemPath, client client, recorder analytics.Recorder, onCacheRemoved OnCacheRemoved) (Cache, error) {
+ // Check to see if the user has turned off particular cache implementations.
+ useFsCache := !opts.SkipFilesystem
+ useHTTPCache := !opts.SkipRemote
+
+ // Since the above two flags are not mutually exclusive it is possible to configure
+ // yourself out of having a cache. We should tell you about it but we shouldn't fail
+ // your build for that reason.
+ //
+ // Further, since the httpCache can be removed at runtime, we need to insert a noopCache
+ // as a backup if you are configured to have *just* an httpCache.
+ //
+ // This is reduced from (!useFsCache && !useHTTPCache) || (!useFsCache & useHTTPCache)
+ useNoopCache := !useFsCache
+
+ // Build up an array of cache implementations, we can only ever have 1 or 2.
+ cacheImplementations := make([]Cache, 0, 2)
+
+ if useFsCache {
+ implementation, err := newFsCache(opts, recorder, repoRoot)
+ if err != nil {
+ return nil, err
+ }
+ cacheImplementations = append(cacheImplementations, implementation)
+ }
+
+ if useHTTPCache {
+ implementation := newHTTPCache(opts, client, recorder)
+ cacheImplementations = append(cacheImplementations, implementation)
+ }
+
+ if useNoopCache {
+ implementation := newNoopCache()
+ cacheImplementations = append(cacheImplementations, implementation)
+ }
+
+ // Precisely two cache implementations:
+ // fsCache and httpCache OR httpCache and noopCache
+ useMultiplexer := len(cacheImplementations) > 1
+ if useMultiplexer {
+ // We have early-returned any possible errors for this scenario.
+ return &cacheMultiplexer{
+ onCacheRemoved: onCacheRemoved,
+ opts: opts,
+ caches: cacheImplementations,
+ }, nil
+ }
+
+ // Precisely one cache implementation: fsCache OR noopCache
+ implementation := cacheImplementations[0]
+ _, isNoopCache := implementation.(*noopCache)
+
+ // We want to let the user know something is wonky, but we don't want
+ // to trigger their build to fail.
+ if isNoopCache {
+ return implementation, ErrNoCachesEnabled
+ }
+ return implementation, nil
+}
+
+// A cacheMultiplexer multiplexes several caches into one.
+// Used when we have several active (eg. http, dir).
+type cacheMultiplexer struct {
+ caches []Cache
+ opts Opts
+ mu sync.RWMutex
+ onCacheRemoved OnCacheRemoved
+}
+
+func (mplex *cacheMultiplexer) Put(anchor turbopath.AbsoluteSystemPath, key string, duration int, files []turbopath.AnchoredSystemPath) error {
+ return mplex.storeUntil(anchor, key, duration, files, len(mplex.caches))
+}
+
+type cacheRemoval struct {
+ cache Cache
+ err *util.CacheDisabledError
+}
+
+// storeUntil stores artifacts into higher priority caches than the given one.
+// Used after artifact retrieval to ensure we have them in eg. the directory cache after
+// downloading from the RPC cache.
+func (mplex *cacheMultiplexer) storeUntil(anchor turbopath.AbsoluteSystemPath, key string, duration int, files []turbopath.AnchoredSystemPath, stopAt int) error {
+ // Attempt to store on all caches simultaneously.
+ toRemove := make([]*cacheRemoval, stopAt)
+ g := &errgroup.Group{}
+ mplex.mu.RLock()
+ for i, cache := range mplex.caches {
+ if i == stopAt {
+ break
+ }
+ c := cache
+ i := i
+ g.Go(func() error {
+ err := c.Put(anchor, key, duration, files)
+ if err != nil {
+ cd := &util.CacheDisabledError{}
+ if errors.As(err, &cd) {
+ toRemove[i] = &cacheRemoval{
+ cache: c,
+ err: cd,
+ }
+ // we don't want this to cancel other cache actions
+ return nil
+ }
+ return err
+ }
+ return nil
+ })
+ }
+ mplex.mu.RUnlock()
+
+ if err := g.Wait(); err != nil {
+ return err
+ }
+
+ for _, removal := range toRemove {
+ if removal != nil {
+ mplex.removeCache(removal)
+ }
+ }
+ return nil
+}
+
+// removeCache takes a requested removal and tries to actually remove it. However,
+// multiple requests could result in concurrent requests to remove the same cache.
+// Let one of them win and propagate the error, the rest will no-op.
+func (mplex *cacheMultiplexer) removeCache(removal *cacheRemoval) {
+ mplex.mu.Lock()
+ defer mplex.mu.Unlock()
+ for i, cache := range mplex.caches {
+ if cache == removal.cache {
+ mplex.caches = append(mplex.caches[:i], mplex.caches[i+1:]...)
+ mplex.onCacheRemoved(cache, removal.err)
+ break
+ }
+ }
+}
+
+func (mplex *cacheMultiplexer) Fetch(anchor turbopath.AbsoluteSystemPath, key string, files []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error) {
+ // Make a shallow copy of the caches, since storeUntil can call removeCache
+ mplex.mu.RLock()
+ caches := make([]Cache, len(mplex.caches))
+ copy(caches, mplex.caches)
+ mplex.mu.RUnlock()
+
+ // We need to return a composite cache status from multiple caches
+ // Initialize the empty struct so we can assign values to it. This is similar
+ // to how the Exists() method works.
+ combinedCacheState := ItemStatus{}
+
+ // Retrieve from caches sequentially; if we did them simultaneously we could
+ // easily write the same file from two goroutines at once.
+ for i, cache := range caches {
+ itemStatus, actualFiles, duration, err := cache.Fetch(anchor, key, files)
+ ok := itemStatus.Local || itemStatus.Remote
+
+ if err != nil {
+ cd := &util.CacheDisabledError{}
+ if errors.As(err, &cd) {
+ mplex.removeCache(&cacheRemoval{
+ cache: cache,
+ err: cd,
+ })
+ }
+ // We're ignoring the error in the else case, since with this cache
+ // abstraction, we want to check lower priority caches rather than fail
+ // the operation. Future work that plumbs UI / Logging into the cache system
+ // should probably log this at least.
+ }
+ if ok {
+ // Store this into other caches. We can ignore errors here because we know
+ // we have previously successfully stored in a higher-priority cache, and so the overall
+ // result is a success at fetching. Storing in lower-priority caches is an optimization.
+ _ = mplex.storeUntil(anchor, key, duration, actualFiles, i)
+
+ // If another cache had already set this to true, we don't need to set it again from this cache
+ combinedCacheState.Local = combinedCacheState.Local || itemStatus.Local
+ combinedCacheState.Remote = combinedCacheState.Remote || itemStatus.Remote
+ return combinedCacheState, actualFiles, duration, err
+ }
+ }
+
+ return ItemStatus{Local: false, Remote: false}, nil, 0, nil
+}
+
+func (mplex *cacheMultiplexer) Exists(target string) ItemStatus {
+ syncCacheState := ItemStatus{}
+ for _, cache := range mplex.caches {
+ itemStatus := cache.Exists(target)
+ syncCacheState.Local = syncCacheState.Local || itemStatus.Local
+ syncCacheState.Remote = syncCacheState.Remote || itemStatus.Remote
+ }
+
+ return syncCacheState
+}
+
+func (mplex *cacheMultiplexer) Clean(anchor turbopath.AbsoluteSystemPath) {
+ for _, cache := range mplex.caches {
+ cache.Clean(anchor)
+ }
+}
+
+func (mplex *cacheMultiplexer) CleanAll() {
+ for _, cache := range mplex.caches {
+ cache.CleanAll()
+ }
+}
+
+func (mplex *cacheMultiplexer) Shutdown() {
+ for _, cache := range mplex.caches {
+ cache.Shutdown()
+ }
+}
diff --git a/cli/internal/cache/cache_fs.go b/cli/internal/cache/cache_fs.go
new file mode 100644
index 0000000..fb15a02
--- /dev/null
+++ b/cli/internal/cache/cache_fs.go
@@ -0,0 +1,174 @@
+// Adapted from https://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// Package cache implements our cache abstraction.
+package cache
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/vercel/turbo/cli/internal/analytics"
+ "github.com/vercel/turbo/cli/internal/cacheitem"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// fsCache is a local filesystem cache
+type fsCache struct {
+ cacheDirectory turbopath.AbsoluteSystemPath
+ recorder analytics.Recorder
+}
+
+// newFsCache creates a new filesystem cache
+func newFsCache(opts Opts, recorder analytics.Recorder, repoRoot turbopath.AbsoluteSystemPath) (*fsCache, error) {
+ cacheDir := opts.resolveCacheDir(repoRoot)
+ if err := cacheDir.MkdirAll(0775); err != nil {
+ return nil, err
+ }
+ return &fsCache{
+ cacheDirectory: cacheDir,
+ recorder: recorder,
+ }, nil
+}
+
+// Fetch returns true if items are cached. It moves them into position as a side effect.
+func (f *fsCache) Fetch(anchor turbopath.AbsoluteSystemPath, hash string, _ []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error) {
+ uncompressedCachePath := f.cacheDirectory.UntypedJoin(hash + ".tar")
+ compressedCachePath := f.cacheDirectory.UntypedJoin(hash + ".tar.zst")
+
+ var actualCachePath turbopath.AbsoluteSystemPath
+ if uncompressedCachePath.FileExists() {
+ actualCachePath = uncompressedCachePath
+ } else if compressedCachePath.FileExists() {
+ actualCachePath = compressedCachePath
+ } else {
+ // It's not in the cache, bail now
+ f.logFetch(false, hash, 0)
+ return ItemStatus{Local: false}, nil, 0, nil
+ }
+
+ cacheItem, openErr := cacheitem.Open(actualCachePath)
+ if openErr != nil {
+ return ItemStatus{Local: false}, nil, 0, openErr
+ }
+
+ restoredFiles, restoreErr := cacheItem.Restore(anchor)
+ if restoreErr != nil {
+ _ = cacheItem.Close()
+ return ItemStatus{Local: false}, nil, 0, restoreErr
+ }
+
+ meta, err := ReadCacheMetaFile(f.cacheDirectory.UntypedJoin(hash + "-meta.json"))
+ if err != nil {
+ _ = cacheItem.Close()
+ return ItemStatus{Local: false}, nil, 0, fmt.Errorf("error reading cache metadata: %w", err)
+ }
+ f.logFetch(true, hash, meta.Duration)
+
+ // Wait to see what happens with close.
+ closeErr := cacheItem.Close()
+ if closeErr != nil {
+ return ItemStatus{Local: false}, restoredFiles, 0, closeErr
+ }
+ return ItemStatus{Local: true}, restoredFiles, meta.Duration, nil
+}
+
+func (f *fsCache) Exists(hash string) ItemStatus {
+ uncompressedCachePath := f.cacheDirectory.UntypedJoin(hash + ".tar")
+ compressedCachePath := f.cacheDirectory.UntypedJoin(hash + ".tar.zst")
+
+ if compressedCachePath.FileExists() || uncompressedCachePath.FileExists() {
+ return ItemStatus{Local: true}
+ }
+
+ return ItemStatus{Local: false}
+}
+
+func (f *fsCache) logFetch(hit bool, hash string, duration int) {
+ var event string
+ if hit {
+ event = CacheEventHit
+ } else {
+ event = CacheEventMiss
+ }
+ payload := &CacheEvent{
+ Source: CacheSourceFS,
+ Event: event,
+ Hash: hash,
+ Duration: duration,
+ }
+ f.recorder.LogEvent(payload)
+}
+
+func (f *fsCache) Put(anchor turbopath.AbsoluteSystemPath, hash string, duration int, files []turbopath.AnchoredSystemPath) error {
+ cachePath := f.cacheDirectory.UntypedJoin(hash + ".tar.zst")
+ cacheItem, err := cacheitem.Create(cachePath)
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ err := cacheItem.AddFile(anchor, file)
+ if err != nil {
+ _ = cacheItem.Close()
+ return err
+ }
+ }
+
+ writeErr := WriteCacheMetaFile(f.cacheDirectory.UntypedJoin(hash+"-meta.json"), &CacheMetadata{
+ Duration: duration,
+ Hash: hash,
+ })
+
+ if writeErr != nil {
+ _ = cacheItem.Close()
+ return writeErr
+ }
+
+ return cacheItem.Close()
+}
+
+func (f *fsCache) Clean(_ turbopath.AbsoluteSystemPath) {
+ fmt.Println("Not implemented yet")
+}
+
+func (f *fsCache) CleanAll() {
+ fmt.Println("Not implemented yet")
+}
+
+func (f *fsCache) Shutdown() {}
+
+// CacheMetadata stores duration and hash information for a cache entry so that aggregate Time Saved calculations
+// can be made from artifacts from various caches
+type CacheMetadata struct {
+ Hash string `json:"hash"`
+ Duration int `json:"duration"`
+}
+
+// WriteCacheMetaFile writes cache metadata file at a path
+func WriteCacheMetaFile(path turbopath.AbsoluteSystemPath, config *CacheMetadata) error {
+ jsonBytes, marshalErr := json.Marshal(config)
+ if marshalErr != nil {
+ return marshalErr
+ }
+ writeFilErr := path.WriteFile(jsonBytes, 0644)
+ if writeFilErr != nil {
+ return writeFilErr
+ }
+ return nil
+}
+
+// ReadCacheMetaFile reads cache metadata file at a path
+func ReadCacheMetaFile(path turbopath.AbsoluteSystemPath) (*CacheMetadata, error) {
+ jsonBytes, readFileErr := path.ReadFile()
+ if readFileErr != nil {
+ return nil, readFileErr
+ }
+ var config CacheMetadata
+ marshalErr := json.Unmarshal(jsonBytes, &config)
+ if marshalErr != nil {
+ return nil, marshalErr
+ }
+ return &config, nil
+}
diff --git a/cli/internal/cache/cache_fs_test.go b/cli/internal/cache/cache_fs_test.go
new file mode 100644
index 0000000..614ad86
--- /dev/null
+++ b/cli/internal/cache/cache_fs_test.go
@@ -0,0 +1,253 @@
+package cache
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/analytics"
+ "github.com/vercel/turbo/cli/internal/cacheitem"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+type dummyRecorder struct{}
+
+func (dr *dummyRecorder) LogEvent(payload analytics.EventPayload) {}
+
+func TestPut(t *testing.T) {
+ // Set up a test source and cache directory
+ // The "source" directory simulates a package
+ //
+ // <src>/
+ // b
+ // child/
+ // a
+ // link -> ../b
+ // broken -> missing
+ //
+ // Ensure we end up with a matching directory under a
+ // "cache" directory:
+ //
+ // <dst>/the-hash/<src>/...
+
+ src := turbopath.AbsoluteSystemPath(t.TempDir())
+ childDir := src.UntypedJoin("child")
+ err := childDir.MkdirAll(0775)
+ assert.NilError(t, err, "Mkdir")
+ aPath := childDir.UntypedJoin("a")
+ aFile, err := aPath.Create()
+ assert.NilError(t, err, "Create")
+ _, err = aFile.WriteString("hello")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, aFile.Close(), "Close")
+
+ bPath := src.UntypedJoin("b")
+ bFile, err := bPath.Create()
+ assert.NilError(t, err, "Create")
+ _, err = bFile.WriteString("bFile")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, bFile.Close(), "Close")
+
+ srcLinkPath := childDir.UntypedJoin("link")
+ linkTarget := filepath.FromSlash("../b")
+ assert.NilError(t, srcLinkPath.Symlink(linkTarget), "Symlink")
+
+ srcBrokenLinkPath := childDir.Join("broken")
+ assert.NilError(t, srcBrokenLinkPath.Symlink("missing"), "Symlink")
+ circlePath := childDir.Join("circle")
+ assert.NilError(t, circlePath.Symlink(filepath.FromSlash("../child")), "Symlink")
+
+ files := []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("child/").ToSystemPath(), // childDir
+ turbopath.AnchoredUnixPath("child/a").ToSystemPath(), // aPath,
+ turbopath.AnchoredUnixPath("b").ToSystemPath(), // bPath,
+ turbopath.AnchoredUnixPath("child/link").ToSystemPath(), // srcLinkPath,
+ turbopath.AnchoredUnixPath("child/broken").ToSystemPath(), // srcBrokenLinkPath,
+ turbopath.AnchoredUnixPath("child/circle").ToSystemPath(), // circlePath
+ }
+
+ dst := turbopath.AbsoluteSystemPath(t.TempDir())
+ dr := &dummyRecorder{}
+
+ cache := &fsCache{
+ cacheDirectory: dst,
+ recorder: dr,
+ }
+
+ hash := "the-hash"
+ duration := 0
+ putErr := cache.Put(src, hash, duration, files)
+ assert.NilError(t, putErr, "Put")
+
+ // Verify that we got the files that we're expecting
+ dstCachePath := dst.UntypedJoin(hash)
+
+ // This test checks outputs, so we go ahead and pull things back out.
+ // Attempting to satisfy our beliefs that the change is viable with
+ // as few changes to the tests as possible.
+ cacheItem, openErr := cacheitem.Open(dst.UntypedJoin(hash + ".tar.zst"))
+ assert.NilError(t, openErr, "Open")
+
+ _, restoreErr := cacheItem.Restore(dstCachePath)
+ assert.NilError(t, restoreErr, "Restore")
+
+ dstAPath := dstCachePath.UntypedJoin("child", "a")
+ assertFileMatches(t, aPath, dstAPath)
+
+ dstBPath := dstCachePath.UntypedJoin("b")
+ assertFileMatches(t, bPath, dstBPath)
+
+ dstLinkPath := dstCachePath.UntypedJoin("child", "link")
+ target, err := dstLinkPath.Readlink()
+ assert.NilError(t, err, "Readlink")
+ if target != linkTarget {
+ t.Errorf("Readlink got %v, want %v", target, linkTarget)
+ }
+
+ dstBrokenLinkPath := dstCachePath.UntypedJoin("child", "broken")
+ target, err = dstBrokenLinkPath.Readlink()
+ assert.NilError(t, err, "Readlink")
+ if target != "missing" {
+ t.Errorf("Readlink got %v, want missing", target)
+ }
+
+ dstCirclePath := dstCachePath.UntypedJoin("child", "circle")
+ circleLinkDest, err := dstCirclePath.Readlink()
+ assert.NilError(t, err, "Readlink")
+ expectedCircleLinkDest := filepath.FromSlash("../child")
+ if circleLinkDest != expectedCircleLinkDest {
+ t.Errorf("Cache link got %v, want %v", circleLinkDest, expectedCircleLinkDest)
+ }
+
+ assert.NilError(t, cacheItem.Close(), "Close")
+}
+
+func assertFileMatches(t *testing.T, orig turbopath.AbsoluteSystemPath, copy turbopath.AbsoluteSystemPath) {
+ t.Helper()
+ origBytes, err := orig.ReadFile()
+ assert.NilError(t, err, "ReadFile")
+ copyBytes, err := copy.ReadFile()
+ assert.NilError(t, err, "ReadFile")
+ assert.DeepEqual(t, origBytes, copyBytes)
+ origStat, err := orig.Lstat()
+ assert.NilError(t, err, "Lstat")
+ copyStat, err := copy.Lstat()
+ assert.NilError(t, err, "Lstat")
+ assert.Equal(t, origStat.Mode(), copyStat.Mode())
+}
+
+func TestFetch(t *testing.T) {
+ // Set up a test cache directory and target output directory
+ // The "cacheDir" directory simulates a cached package
+ //
+ // <cacheDir>/
+ // the-hash-meta.json
+ // the-hash/
+ // some-package/
+ // b
+ // child/
+ // a
+ // link -> ../b
+ // broken -> missing
+ // circle -> ../child
+ //
+ // Ensure we end up with a matching directory under a
+ // "some-package" directory:
+ //
+ // "some-package"/...
+
+ cacheDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ hash := "the-hash"
+ src := cacheDir.UntypedJoin(hash, "some-package")
+ err := src.MkdirAll(0775)
+ assert.NilError(t, err, "mkdirAll")
+
+ childDir := src.UntypedJoin("child")
+ err = childDir.MkdirAll(0775)
+ assert.NilError(t, err, "Mkdir")
+ aPath := childDir.UntypedJoin("a")
+ aFile, err := aPath.Create()
+ assert.NilError(t, err, "Create")
+ _, err = aFile.WriteString("hello")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, aFile.Close(), "Close")
+
+ bPath := src.UntypedJoin("b")
+ bFile, err := bPath.Create()
+ assert.NilError(t, err, "Create")
+ _, err = bFile.WriteString("bFile")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, bFile.Close(), "Close")
+
+ srcLinkPath := childDir.UntypedJoin("link")
+ linkTarget := filepath.FromSlash("../b")
+ assert.NilError(t, srcLinkPath.Symlink(linkTarget), "Symlink")
+
+ srcBrokenLinkPath := childDir.UntypedJoin("broken")
+ srcBrokenLinkTarget := turbopath.AnchoredUnixPath("missing").ToSystemPath()
+ assert.NilError(t, srcBrokenLinkPath.Symlink(srcBrokenLinkTarget.ToString()), "Symlink")
+
+ circlePath := childDir.Join("circle")
+ srcCircleLinkTarget := turbopath.AnchoredUnixPath("../child").ToSystemPath()
+ assert.NilError(t, circlePath.Symlink(srcCircleLinkTarget.ToString()), "Symlink")
+
+ metadataPath := cacheDir.UntypedJoin("the-hash-meta.json")
+ err = metadataPath.WriteFile([]byte(`{"hash":"the-hash","duration":0}`), 0777)
+ assert.NilError(t, err, "WriteFile")
+
+ dr := &dummyRecorder{}
+
+ cache := &fsCache{
+ cacheDirectory: cacheDir,
+ recorder: dr,
+ }
+
+ inputFiles := []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("some-package/child/").ToSystemPath(), // childDir
+ turbopath.AnchoredUnixPath("some-package/child/a").ToSystemPath(), // aPath,
+ turbopath.AnchoredUnixPath("some-package/b").ToSystemPath(), // bPath,
+ turbopath.AnchoredUnixPath("some-package/child/link").ToSystemPath(), // srcLinkPath,
+ turbopath.AnchoredUnixPath("some-package/child/broken").ToSystemPath(), // srcBrokenLinkPath,
+ turbopath.AnchoredUnixPath("some-package/child/circle").ToSystemPath(), // circlePath
+ }
+
+ putErr := cache.Put(cacheDir.UntypedJoin(hash), hash, 0, inputFiles)
+ assert.NilError(t, putErr, "Put")
+
+ outputDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ dstOutputPath := "some-package"
+ cacheStatus, files, _, err := cache.Fetch(outputDir, "the-hash", []string{})
+ assert.NilError(t, err, "Fetch")
+ hit := cacheStatus.Local || cacheStatus.Remote
+ if !hit {
+ t.Error("Fetch got false, want true")
+ }
+ if len(files) != len(inputFiles) {
+ t.Errorf("len(files) got %v, want %v", len(files), len(inputFiles))
+ }
+
+ dstAPath := outputDir.UntypedJoin(dstOutputPath, "child", "a")
+ assertFileMatches(t, aPath, dstAPath)
+
+ dstBPath := outputDir.UntypedJoin(dstOutputPath, "b")
+ assertFileMatches(t, bPath, dstBPath)
+
+ dstLinkPath := outputDir.UntypedJoin(dstOutputPath, "child", "link")
+ target, err := dstLinkPath.Readlink()
+ assert.NilError(t, err, "Readlink")
+ if target != linkTarget {
+ t.Errorf("Readlink got %v, want %v", target, linkTarget)
+ }
+
+ // Assert that we restore broken symlinks correctly
+ dstBrokenLinkPath := outputDir.UntypedJoin(dstOutputPath, "child", "broken")
+ target, readlinkErr := dstBrokenLinkPath.Readlink()
+ assert.NilError(t, readlinkErr, "Readlink")
+ assert.Equal(t, target, srcBrokenLinkTarget.ToString())
+
+ // Assert that we restore symlinks to directories correctly
+ dstCirclePath := outputDir.UntypedJoin(dstOutputPath, "child", "circle")
+ circleTarget, circleReadlinkErr := dstCirclePath.Readlink()
+ assert.NilError(t, circleReadlinkErr, "Circle Readlink")
+ assert.Equal(t, circleTarget, srcCircleLinkTarget.ToString())
+}
diff --git a/cli/internal/cache/cache_http.go b/cli/internal/cache/cache_http.go
new file mode 100644
index 0000000..1d345bf
--- /dev/null
+++ b/cli/internal/cache/cache_http.go
@@ -0,0 +1,375 @@
+// Adapted from https://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package cache
+
+import (
+ "archive/tar"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ log "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+ "time"
+
+ "github.com/DataDog/zstd"
+
+ "github.com/vercel/turbo/cli/internal/analytics"
+ "github.com/vercel/turbo/cli/internal/tarpatch"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+type client interface {
+ PutArtifact(hash string, body []byte, duration int, tag string) error
+ FetchArtifact(hash string) (*http.Response, error)
+ ArtifactExists(hash string) (*http.Response, error)
+ GetTeamID() string
+}
+
+type httpCache struct {
+ writable bool
+ client client
+ requestLimiter limiter
+ recorder analytics.Recorder
+ signerVerifier *ArtifactSignatureAuthentication
+ repoRoot turbopath.AbsoluteSystemPath
+}
+
+type limiter chan struct{}
+
+func (l limiter) acquire() {
+ l <- struct{}{}
+}
+
+func (l limiter) release() {
+ <-l
+}
+
+// mtime is the time we attach for the modification time of all files.
+var mtime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
+
+// nobody is the usual uid / gid of the 'nobody' user.
+const nobody = 65534
+
+func (cache *httpCache) Put(_ turbopath.AbsoluteSystemPath, hash string, duration int, files []turbopath.AnchoredSystemPath) error {
+ // if cache.writable {
+ cache.requestLimiter.acquire()
+ defer cache.requestLimiter.release()
+
+ r, w := io.Pipe()
+ go cache.write(w, hash, files)
+
+ // Read the entire artifact tar into memory so we can easily compute the signature.
+ // Note: retryablehttp.NewRequest reads the files into memory anyways so there's no
+ // additional overhead by doing the ioutil.ReadAll here instead.
+ artifactBody, err := ioutil.ReadAll(r)
+ if err != nil {
+ return fmt.Errorf("failed to store files in HTTP cache: %w", err)
+ }
+ tag := ""
+ if cache.signerVerifier.isEnabled() {
+ tag, err = cache.signerVerifier.generateTag(hash, artifactBody)
+ if err != nil {
+ return fmt.Errorf("failed to store files in HTTP cache: %w", err)
+ }
+ }
+ return cache.client.PutArtifact(hash, artifactBody, duration, tag)
+}
+
+// write writes a series of files into the given Writer.
+func (cache *httpCache) write(w io.WriteCloser, hash string, files []turbopath.AnchoredSystemPath) {
+ defer w.Close()
+ defer func() { _ = w.Close() }()
+ zw := zstd.NewWriter(w)
+ defer func() { _ = zw.Close() }()
+ tw := tar.NewWriter(zw)
+ defer func() { _ = tw.Close() }()
+ for _, file := range files {
+ // log.Printf("caching file %v", file)
+ if err := cache.storeFile(tw, file); err != nil {
+ log.Printf("[ERROR] Error uploading artifact %s to HTTP cache due to: %s", file, err)
+ // TODO(jaredpalmer): How can we cancel the request at this point?
+ }
+ }
+}
+
+func (cache *httpCache) storeFile(tw *tar.Writer, repoRelativePath turbopath.AnchoredSystemPath) error {
+ absoluteFilePath := repoRelativePath.RestoreAnchor(cache.repoRoot)
+ info, err := absoluteFilePath.Lstat()
+ if err != nil {
+ return err
+ }
+ target := ""
+ if info.Mode()&os.ModeSymlink != 0 {
+ target, err = absoluteFilePath.Readlink()
+ if err != nil {
+ return err
+ }
+ }
+ hdr, err := tarpatch.FileInfoHeader(repoRelativePath.ToUnixPath(), info, filepath.ToSlash(target))
+ if err != nil {
+ return err
+ }
+ // Ensure posix path for filename written in header.
+ hdr.Name = repoRelativePath.ToUnixPath().ToString()
+ // Zero out all timestamps.
+ hdr.ModTime = mtime
+ hdr.AccessTime = mtime
+ hdr.ChangeTime = mtime
+ // Strip user/group ids.
+ hdr.Uid = nobody
+ hdr.Gid = nobody
+ hdr.Uname = "nobody"
+ hdr.Gname = "nobody"
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ } else if info.IsDir() || target != "" {
+ return nil // nothing to write
+ }
+ f, err := absoluteFilePath.Open()
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+ _, err = io.Copy(tw, f)
+ if errors.Is(err, tar.ErrWriteTooLong) {
+ log.Printf("Error writing %v to tar file, info: %v, mode: %v, is regular: %v", repoRelativePath, info, info.Mode(), info.Mode().IsRegular())
+ }
+ return err
+}
+
+func (cache *httpCache) Fetch(_ turbopath.AbsoluteSystemPath, key string, _ []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error) {
+ cache.requestLimiter.acquire()
+ defer cache.requestLimiter.release()
+ hit, files, duration, err := cache.retrieve(key)
+ if err != nil {
+ // TODO: analytics event?
+ return ItemStatus{Remote: false}, files, duration, fmt.Errorf("failed to retrieve files from HTTP cache: %w", err)
+ }
+ cache.logFetch(hit, key, duration)
+ return ItemStatus{Remote: hit}, files, duration, err
+}
+
+func (cache *httpCache) Exists(key string) ItemStatus {
+ cache.requestLimiter.acquire()
+ defer cache.requestLimiter.release()
+ hit, err := cache.exists(key)
+ if err != nil {
+ return ItemStatus{Remote: false}
+ }
+ return ItemStatus{Remote: hit}
+}
+
+func (cache *httpCache) logFetch(hit bool, hash string, duration int) {
+ var event string
+ if hit {
+ event = CacheEventHit
+ } else {
+ event = CacheEventMiss
+ }
+ payload := &CacheEvent{
+ Source: CacheSourceRemote,
+ Event: event,
+ Hash: hash,
+ Duration: duration,
+ }
+ cache.recorder.LogEvent(payload)
+}
+
+func (cache *httpCache) exists(hash string) (bool, error) {
+ resp, err := cache.client.ArtifactExists(hash)
+ if err != nil {
+ return false, nil
+ }
+
+ defer func() { err = resp.Body.Close() }()
+
+ if resp.StatusCode == http.StatusNotFound {
+ return false, nil
+ } else if resp.StatusCode != http.StatusOK {
+ return false, fmt.Errorf("%s", strconv.Itoa(resp.StatusCode))
+ }
+ return true, err
+}
+
+func (cache *httpCache) retrieve(hash string) (bool, []turbopath.AnchoredSystemPath, int, error) {
+ resp, err := cache.client.FetchArtifact(hash)
+ if err != nil {
+ return false, nil, 0, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotFound {
+ return false, nil, 0, nil // doesn't exist - not an error
+ } else if resp.StatusCode != http.StatusOK {
+ b, _ := ioutil.ReadAll(resp.Body)
+ return false, nil, 0, fmt.Errorf("%s", string(b))
+ }
+ // If present, extract the duration from the response.
+ duration := 0
+ if resp.Header.Get("x-artifact-duration") != "" {
+ intVar, err := strconv.Atoi(resp.Header.Get("x-artifact-duration"))
+ if err != nil {
+ return false, nil, 0, fmt.Errorf("invalid x-artifact-duration header: %w", err)
+ }
+ duration = intVar
+ }
+ var tarReader io.Reader
+
+ defer func() { _ = resp.Body.Close() }()
+ if cache.signerVerifier.isEnabled() {
+ expectedTag := resp.Header.Get("x-artifact-tag")
+ if expectedTag == "" {
+ // If the verifier is enabled all incoming artifact downloads must have a signature
+ return false, nil, 0, errors.New("artifact verification failed: Downloaded artifact is missing required x-artifact-tag header")
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return false, nil, 0, fmt.Errorf("artifact verification failed: %w", err)
+ }
+ isValid, err := cache.signerVerifier.validate(hash, b, expectedTag)
+ if err != nil {
+ return false, nil, 0, fmt.Errorf("artifact verification failed: %w", err)
+ }
+ if !isValid {
+ err = fmt.Errorf("artifact verification failed: artifact tag does not match expected tag %s", expectedTag)
+ return false, nil, 0, err
+ }
+ // The artifact has been verified and the body can be read and untarred
+ tarReader = bytes.NewReader(b)
+ } else {
+ tarReader = resp.Body
+ }
+ files, err := restoreTar(cache.repoRoot, tarReader)
+ if err != nil {
+ return false, nil, 0, err
+ }
+ return true, files, duration, nil
+}
+
+// restoreTar returns posix-style repo-relative paths of the files it
+// restored. In the future, these should likely be repo-relative system paths
+// so that they are suitable for being fed into cache.Put for other caches.
+// For now, I think this is working because windows also accepts /-delimited paths.
+func restoreTar(root turbopath.AbsoluteSystemPath, reader io.Reader) ([]turbopath.AnchoredSystemPath, error) {
+ files := []turbopath.AnchoredSystemPath{}
+ missingLinks := []*tar.Header{}
+ zr := zstd.NewReader(reader)
+ var closeError error
+ defer func() { closeError = zr.Close() }()
+ tr := tar.NewReader(zr)
+ for {
+ hdr, err := tr.Next()
+ if err != nil {
+ if err == io.EOF {
+ for _, link := range missingLinks {
+ err := restoreSymlink(root, link, true)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return files, closeError
+ }
+ return nil, err
+ }
+ // hdr.Name is always a posix-style path
+ // FIXME: THIS IS A BUG.
+ restoredName := turbopath.AnchoredUnixPath(hdr.Name)
+ files = append(files, restoredName.ToSystemPath())
+ filename := restoredName.ToSystemPath().RestoreAnchor(root)
+ if isChild, err := root.ContainsPath(filename); err != nil {
+ return nil, err
+ } else if !isChild {
+ return nil, fmt.Errorf("cannot untar file to %v", filename)
+ }
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ if err := filename.MkdirAll(0775); err != nil {
+ return nil, err
+ }
+ case tar.TypeReg:
+ if dir := filename.Dir(); dir != "." {
+ if err := dir.MkdirAll(0775); err != nil {
+ return nil, err
+ }
+ }
+ if f, err := filename.OpenFile(os.O_WRONLY|os.O_TRUNC|os.O_CREATE, os.FileMode(hdr.Mode)); err != nil {
+ return nil, err
+ } else if _, err := io.Copy(f, tr); err != nil {
+ return nil, err
+ } else if err := f.Close(); err != nil {
+ return nil, err
+ }
+ case tar.TypeSymlink:
+ if err := restoreSymlink(root, hdr, false); errors.Is(err, errNonexistentLinkTarget) {
+ missingLinks = append(missingLinks, hdr)
+ } else if err != nil {
+ return nil, err
+ }
+ default:
+ log.Printf("Unhandled file type %d for %s", hdr.Typeflag, hdr.Name)
+ }
+ }
+}
+
+var errNonexistentLinkTarget = errors.New("the link target does not exist")
+
+func restoreSymlink(root turbopath.AbsoluteSystemPath, hdr *tar.Header, allowNonexistentTargets bool) error {
+ // Note that hdr.Linkname is really the link target
+ relativeLinkTarget := filepath.FromSlash(hdr.Linkname)
+ linkFilename := root.UntypedJoin(hdr.Name)
+ if err := linkFilename.EnsureDir(); err != nil {
+ return err
+ }
+
+ // TODO: check if this is an absolute path, or if we even care
+ linkTarget := linkFilename.Dir().UntypedJoin(relativeLinkTarget)
+ if _, err := linkTarget.Lstat(); err != nil {
+ if os.IsNotExist(err) {
+ if !allowNonexistentTargets {
+ return errNonexistentLinkTarget
+ }
+ // if we're allowing nonexistent link targets, proceed to creating the link
+ } else {
+ return err
+ }
+ }
+ // Ensure that the link we're about to create doesn't already exist
+ if err := linkFilename.Remove(); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ if err := linkFilename.Symlink(relativeLinkTarget); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cache *httpCache) Clean(_ turbopath.AbsoluteSystemPath) {
+ // Not possible; this implementation can only clean for a hash.
+}
+
+func (cache *httpCache) CleanAll() {
+ // Also not possible.
+}
+
+func (cache *httpCache) Shutdown() {}
+
+func newHTTPCache(opts Opts, client client, recorder analytics.Recorder) *httpCache {
+ return &httpCache{
+ writable: true,
+ client: client,
+ requestLimiter: make(limiter, 20),
+ recorder: recorder,
+ signerVerifier: &ArtifactSignatureAuthentication{
+ // TODO(Gaspar): this should use RemoteCacheOptions.TeamId once we start
+ // enforcing team restrictions for repositories.
+ teamId: client.GetTeamID(),
+ enabled: opts.RemoteCacheOpts.Signature,
+ },
+ }
+}
diff --git a/cli/internal/cache/cache_http_test.go b/cli/internal/cache/cache_http_test.go
new file mode 100644
index 0000000..d187931
--- /dev/null
+++ b/cli/internal/cache/cache_http_test.go
@@ -0,0 +1,245 @@
+package cache
+
+import (
+ "archive/tar"
+ "bytes"
+ "errors"
+ "net/http"
+ "testing"
+
+ "github.com/DataDog/zstd"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "gotest.tools/v3/assert"
+)
+
+type errorResp struct {
+ err error
+}
+
+func (sr *errorResp) PutArtifact(hash string, body []byte, duration int, tag string) error {
+ return sr.err
+}
+
+func (sr *errorResp) FetchArtifact(hash string) (*http.Response, error) {
+ return nil, sr.err
+}
+
+func (sr *errorResp) ArtifactExists(hash string) (*http.Response, error) {
+ return nil, sr.err
+}
+
+func (sr *errorResp) GetTeamID() string {
+ return ""
+}
+
+func TestRemoteCachingDisabled(t *testing.T) {
+ clientErr := &util.CacheDisabledError{
+ Status: util.CachingStatusDisabled,
+ Message: "Remote Caching has been disabled for this team. A team owner can enable it here: $URL",
+ }
+ client := &errorResp{err: clientErr}
+ cache := &httpCache{
+ client: client,
+ requestLimiter: make(limiter, 20),
+ }
+ cd := &util.CacheDisabledError{}
+ _, _, _, err := cache.Fetch("unused-target", "some-hash", []string{"unused", "outputs"})
+ if !errors.As(err, &cd) {
+ t.Errorf("cache.Fetch err got %v, want a CacheDisabled error", err)
+ }
+ if cd.Status != util.CachingStatusDisabled {
+ t.Errorf("CacheDisabled.Status got %v, want %v", cd.Status, util.CachingStatusDisabled)
+ }
+}
+
+func makeValidTar(t *testing.T) *bytes.Buffer {
+ // <repoRoot>
+ // my-pkg/
+ // some-file
+ // link-to-extra-file -> ../extra-file
+ // broken-link -> ../../global-dep
+ // extra-file
+
+ t.Helper()
+ buf := &bytes.Buffer{}
+ zw := zstd.NewWriter(buf)
+ defer func() {
+ if err := zw.Close(); err != nil {
+ t.Fatalf("failed to close gzip: %v", err)
+ }
+ }()
+ tw := tar.NewWriter(zw)
+ defer func() {
+ if err := tw.Close(); err != nil {
+ t.Fatalf("failed to close tar: %v", err)
+ }
+ }()
+
+ // my-pkg
+ h := &tar.Header{
+ Name: "my-pkg/",
+ Mode: int64(0644),
+ Typeflag: tar.TypeDir,
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatalf("failed to write header: %v", err)
+ }
+ // my-pkg/some-file
+ contents := []byte("some-file-contents")
+ h = &tar.Header{
+ Name: "my-pkg/some-file",
+ Mode: int64(0644),
+ Typeflag: tar.TypeReg,
+ Size: int64(len(contents)),
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatalf("failed to write header: %v", err)
+ }
+ if _, err := tw.Write(contents); err != nil {
+ t.Fatalf("failed to write file: %v", err)
+ }
+ // my-pkg/link-to-extra-file
+ h = &tar.Header{
+ Name: "my-pkg/link-to-extra-file",
+ Mode: int64(0644),
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../extra-file",
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatalf("failed to write header: %v", err)
+ }
+ // my-pkg/broken-link
+ h = &tar.Header{
+ Name: "my-pkg/broken-link",
+ Mode: int64(0644),
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../../global-dep",
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatalf("failed to write header: %v", err)
+ }
+ // extra-file
+ contents = []byte("extra-file-contents")
+ h = &tar.Header{
+ Name: "extra-file",
+ Mode: int64(0644),
+ Typeflag: tar.TypeReg,
+ Size: int64(len(contents)),
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatalf("failed to write header: %v", err)
+ }
+ if _, err := tw.Write(contents); err != nil {
+ t.Fatalf("failed to write file: %v", err)
+ }
+
+ return buf
+}
+
+func makeInvalidTar(t *testing.T) *bytes.Buffer {
+ // contains a single file that traverses out
+ // ../some-file
+
+ t.Helper()
+ buf := &bytes.Buffer{}
+ zw := zstd.NewWriter(buf)
+ defer func() {
+ if err := zw.Close(); err != nil {
+ t.Fatalf("failed to close gzip: %v", err)
+ }
+ }()
+ tw := tar.NewWriter(zw)
+ defer func() {
+ if err := tw.Close(); err != nil {
+ t.Fatalf("failed to close tar: %v", err)
+ }
+ }()
+
+ // my-pkg/some-file
+ contents := []byte("some-file-contents")
+ h := &tar.Header{
+ Name: "../some-file",
+ Mode: int64(0644),
+ Typeflag: tar.TypeReg,
+ Size: int64(len(contents)),
+ }
+ if err := tw.WriteHeader(h); err != nil {
+ t.Fatalf("failed to write header: %v", err)
+ }
+ if _, err := tw.Write(contents); err != nil {
+ t.Fatalf("failed to write file: %v", err)
+ }
+ return buf
+}
+
+func TestRestoreTar(t *testing.T) {
+ root := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ tar := makeValidTar(t)
+
+ expectedFiles := []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("extra-file").ToSystemPath(),
+ turbopath.AnchoredUnixPath("my-pkg/").ToSystemPath(),
+ turbopath.AnchoredUnixPath("my-pkg/some-file").ToSystemPath(),
+ turbopath.AnchoredUnixPath("my-pkg/link-to-extra-file").ToSystemPath(),
+ turbopath.AnchoredUnixPath("my-pkg/broken-link").ToSystemPath(),
+ }
+ files, err := restoreTar(root, tar)
+ assert.NilError(t, err, "readTar")
+
+ expectedSet := make(util.Set)
+ for _, file := range expectedFiles {
+ expectedSet.Add(file.ToString())
+ }
+ gotSet := make(util.Set)
+ for _, file := range files {
+ gotSet.Add(file.ToString())
+ }
+ extraFiles := gotSet.Difference(expectedSet)
+ if extraFiles.Len() > 0 {
+ t.Errorf("got extra files: %v", extraFiles.UnsafeListOfStrings())
+ }
+ missingFiles := expectedSet.Difference(gotSet)
+ if missingFiles.Len() > 0 {
+ t.Errorf("missing expected files: %v", missingFiles.UnsafeListOfStrings())
+ }
+
+ // Verify file contents
+ extraFile := root.UntypedJoin("extra-file")
+ contents, err := extraFile.ReadFile()
+ assert.NilError(t, err, "ReadFile")
+ assert.DeepEqual(t, contents, []byte("extra-file-contents"))
+
+ someFile := root.UntypedJoin("my-pkg", "some-file")
+ contents, err = someFile.ReadFile()
+ assert.NilError(t, err, "ReadFile")
+ assert.DeepEqual(t, contents, []byte("some-file-contents"))
+}
+
+func TestRestoreInvalidTar(t *testing.T) {
+ root := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ expectedContents := []byte("important-data")
+ someFile := root.UntypedJoin("some-file")
+ err := someFile.WriteFile(expectedContents, 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ tar := makeInvalidTar(t)
+ // use a child directory so that blindly untarring will squash the file
+ // that we just wrote above.
+ repoRoot := root.UntypedJoin("repo")
+ _, err = restoreTar(repoRoot, tar)
+ if err == nil {
+ t.Error("expected error untarring invalid tar")
+ }
+
+ contents, err := someFile.ReadFile()
+ assert.NilError(t, err, "ReadFile")
+ assert.Equal(t, string(contents), string(expectedContents), "expected to not overwrite file")
+}
+
+// Note that testing Put will require mocking the filesystem and is not currently the most
+// interesting test. The current implementation directly returns the error from PutArtifact.
+// We should still add the test once feasible to avoid future breakage.
diff --git a/cli/internal/cache/cache_noop.go b/cli/internal/cache/cache_noop.go
new file mode 100644
index 0000000..80a3c23
--- /dev/null
+++ b/cli/internal/cache/cache_noop.go
@@ -0,0 +1,23 @@
+package cache
+
+import "github.com/vercel/turbo/cli/internal/turbopath"
+
+type noopCache struct{}
+
+func newNoopCache() *noopCache {
+ return &noopCache{}
+}
+
+func (c *noopCache) Put(_ turbopath.AbsoluteSystemPath, _ string, _ int, _ []turbopath.AnchoredSystemPath) error {
+ return nil
+}
+func (c *noopCache) Fetch(_ turbopath.AbsoluteSystemPath, _ string, _ []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error) {
+ return ItemStatus{Local: false, Remote: false}, nil, 0, nil
+}
+func (c *noopCache) Exists(_ string) ItemStatus {
+ return ItemStatus{}
+}
+
+func (c *noopCache) Clean(_ turbopath.AbsoluteSystemPath) {}
+func (c *noopCache) CleanAll() {}
+func (c *noopCache) Shutdown() {}
diff --git a/cli/internal/cache/cache_signature_authentication.go b/cli/internal/cache/cache_signature_authentication.go
new file mode 100644
index 0000000..f9fe4c0
--- /dev/null
+++ b/cli/internal/cache/cache_signature_authentication.go
@@ -0,0 +1,88 @@
+// Adapted from https://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package cache
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "hash"
+ "os"
+)
+
+type ArtifactSignatureAuthentication struct {
+ teamId string
+ enabled bool
+}
+
+func (asa *ArtifactSignatureAuthentication) isEnabled() bool {
+ return asa.enabled
+}
+
+// If the secret key is not found or the secret key length is 0, an error is returned
+// Preference is given to the environment specified secret key.
+func (asa *ArtifactSignatureAuthentication) secretKey() ([]byte, error) {
+ secret := os.Getenv("TURBO_REMOTE_CACHE_SIGNATURE_KEY")
+ if len(secret) == 0 {
+ return nil, errors.New("signature secret key not found. You must specify a secret key in the TURBO_REMOTE_CACHE_SIGNATURE_KEY environment variable")
+ }
+ return []byte(secret), nil
+}
+
+func (asa *ArtifactSignatureAuthentication) generateTag(hash string, artifactBody []byte) (string, error) {
+ tag, err := asa.getTagGenerator(hash)
+ if err != nil {
+ return "", err
+ }
+ tag.Write(artifactBody)
+ return base64.StdEncoding.EncodeToString(tag.Sum(nil)), nil
+}
+
+func (asa *ArtifactSignatureAuthentication) getTagGenerator(hash string) (hash.Hash, error) {
+ teamId := asa.teamId
+ secret, err := asa.secretKey()
+ if err != nil {
+ return nil, err
+ }
+ artifactMetadata := &struct {
+ Hash string `json:"hash"`
+ TeamId string `json:"teamId"`
+ }{
+ Hash: hash,
+ TeamId: teamId,
+ }
+ metadata, err := json.Marshal(artifactMetadata)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(Gaspar) Support additional signing algorithms here
+ h := hmac.New(sha256.New, secret)
+ h.Write(metadata)
+ return h, nil
+}
+
+func (asa *ArtifactSignatureAuthentication) validate(hash string, artifactBody []byte, expectedTag string) (bool, error) {
+ computedTag, err := asa.generateTag(hash, artifactBody)
+ if err != nil {
+ return false, fmt.Errorf("failed to verify artifact tag: %w", err)
+ }
+ return hmac.Equal([]byte(computedTag), []byte(expectedTag)), nil
+}
+
+type StreamValidator struct {
+ currentHash hash.Hash
+}
+
+func (sv *StreamValidator) Validate(expectedTag string) bool {
+ computedTag := base64.StdEncoding.EncodeToString(sv.currentHash.Sum(nil))
+ return hmac.Equal([]byte(computedTag), []byte(expectedTag))
+}
+
+func (sv *StreamValidator) CurrentValue() string {
+ return base64.StdEncoding.EncodeToString(sv.currentHash.Sum(nil))
+}
diff --git a/cli/internal/cache/cache_signature_authentication_test.go b/cli/internal/cache/cache_signature_authentication_test.go
new file mode 100644
index 0000000..7f3f865
--- /dev/null
+++ b/cli/internal/cache/cache_signature_authentication_test.go
@@ -0,0 +1,195 @@
+// Adapted from ghttps://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package cache
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_SecretKeySuccess(t *testing.T) {
+ teamId := "team_someid"
+ secretKeyEnvName := "TURBO_REMOTE_CACHE_SIGNATURE_KEY"
+ secretKeyEnvValue := "my-secret-key-env"
+ t.Setenv(secretKeyEnvName, secretKeyEnvValue)
+
+ cases := []struct {
+ name string
+ asa *ArtifactSignatureAuthentication
+ expectedSecretKey string
+ expectedSecretKeyError bool
+ }{
+ {
+ name: "Accepts secret key",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedSecretKey: secretKeyEnvValue,
+ expectedSecretKeyError: false,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ secretKey, err := tc.asa.secretKey()
+ if tc.expectedSecretKeyError {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedSecretKey, string(secretKey))
+ }
+ })
+ }
+}
+
+func Test_SecretKeyErrors(t *testing.T) {
+ teamId := "team_someid"
+
+ // Env secret key TURBO_REMOTE_CACHE_SIGNATURE_KEY is not set
+
+ cases := []struct {
+ name string
+ asa *ArtifactSignatureAuthentication
+ expectedSecretKey string
+ expectedSecretKeyError bool
+ }{
+ {
+ name: "Secret key not defined errors",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedSecretKey: "",
+ expectedSecretKeyError: true,
+ },
+ {
+ name: "Secret key is empty errors",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedSecretKey: "",
+ expectedSecretKeyError: true,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ secretKey, err := tc.asa.secretKey()
+ if tc.expectedSecretKeyError {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tc.expectedSecretKey, string(secretKey))
+ }
+ })
+ }
+}
+
+func Test_GenerateTagAndValidate(t *testing.T) {
+ teamId := "team_someid"
+ hash := "the-artifact-hash"
+ artifactBody := []byte("the artifact body as bytes")
+ secretKeyEnvName := "TURBO_REMOTE_CACHE_SIGNATURE_KEY"
+ secretKeyEnvValue := "my-secret-key-env"
+ t.Setenv(secretKeyEnvName, secretKeyEnvValue)
+
+ cases := []struct {
+ name string
+ asa *ArtifactSignatureAuthentication
+ expectedTagMatches string
+ expectedTagDoesNotMatch string
+ }{
+ {
+ name: "Uses hash to generate tag",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedTagMatches: testUtilGetHMACTag(hash, teamId, artifactBody, secretKeyEnvValue),
+ expectedTagDoesNotMatch: testUtilGetHMACTag("wrong-hash", teamId, artifactBody, secretKeyEnvValue),
+ },
+ {
+ name: "Uses teamId to generate tag",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedTagMatches: testUtilGetHMACTag(hash, teamId, artifactBody, secretKeyEnvValue),
+ expectedTagDoesNotMatch: testUtilGetHMACTag(hash, "wrong-teamId", artifactBody, secretKeyEnvValue),
+ },
+ {
+ name: "Uses artifactBody to generate tag",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedTagMatches: testUtilGetHMACTag(hash, teamId, artifactBody, secretKeyEnvValue),
+ expectedTagDoesNotMatch: testUtilGetHMACTag(hash, teamId, []byte("wrong-artifact-body"), secretKeyEnvValue),
+ },
+ {
+ name: "Uses secret to generate tag",
+ asa: &ArtifactSignatureAuthentication{
+ teamId: teamId,
+ enabled: true,
+ },
+ expectedTagMatches: testUtilGetHMACTag(hash, teamId, artifactBody, secretKeyEnvValue),
+ expectedTagDoesNotMatch: testUtilGetHMACTag(hash, teamId, artifactBody, "wrong-secret"),
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ tag, err := tc.asa.generateTag(hash, artifactBody)
+ assert.NoError(t, err)
+
+ // validates the tag
+ assert.Equal(t, tc.expectedTagMatches, tag)
+ isValid, err := tc.asa.validate(hash, artifactBody, tc.expectedTagMatches)
+ assert.NoError(t, err)
+ assert.True(t, isValid)
+
+ // does not validate the tag
+ assert.NotEqual(t, tc.expectedTagDoesNotMatch, tag)
+ isValid, err = tc.asa.validate(hash, artifactBody, tc.expectedTagDoesNotMatch)
+ assert.NoError(t, err)
+ assert.False(t, isValid)
+
+ })
+ }
+}
+
+// Test utils
+
+// Return the Base64 encoded HMAC given the artifact metadata and artifact body
+func testUtilGetHMACTag(hash string, teamId string, artifactBody []byte, secret string) string {
+ artifactMetadata := &struct {
+ Hash string `json:"hash"`
+ TeamId string `json:"teamId"`
+ }{
+ Hash: hash,
+ TeamId: teamId,
+ }
+ metadata, _ := json.Marshal(artifactMetadata)
+ h := hmac.New(sha256.New, []byte(secret))
+ h.Write(metadata)
+ h.Write(artifactBody)
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func Test_Utils(t *testing.T) {
+ teamId := "team_someid"
+ secret := "my-secret"
+ hash := "the-artifact-hash"
+ artifactBody := []byte("the artifact body as bytes")
+ testTag := testUtilGetHMACTag(hash, teamId, artifactBody, secret)
+ expectedTag := "9Fu8YniPZ2dEBolTPQoNlFWG0LNMW8EXrBsRmf/fEHk="
+ assert.True(t, hmac.Equal([]byte(testTag), []byte(expectedTag)))
+}
diff --git a/cli/internal/cache/cache_test.go b/cli/internal/cache/cache_test.go
new file mode 100644
index 0000000..3f17877
--- /dev/null
+++ b/cli/internal/cache/cache_test.go
@@ -0,0 +1,318 @@
+package cache
+
+import (
+ "net/http"
+ "reflect"
+ "sync/atomic"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/analytics"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+type testCache struct {
+ disabledErr *util.CacheDisabledError
+ entries map[string][]turbopath.AnchoredSystemPath
+}
+
+func (tc *testCache) Fetch(_ turbopath.AbsoluteSystemPath, hash string, _ []string) (ItemStatus, []turbopath.AnchoredSystemPath, int, error) {
+ if tc.disabledErr != nil {
+ return ItemStatus{}, nil, 0, tc.disabledErr
+ }
+ foundFiles, ok := tc.entries[hash]
+ if ok {
+ duration := 5
+ return ItemStatus{Local: true}, foundFiles, duration, nil
+ }
+ return ItemStatus{}, nil, 0, nil
+}
+
+func (tc *testCache) Exists(hash string) ItemStatus {
+ if tc.disabledErr != nil {
+ return ItemStatus{}
+ }
+ _, ok := tc.entries[hash]
+ if ok {
+ return ItemStatus{Local: true}
+ }
+ return ItemStatus{}
+}
+
+func (tc *testCache) Put(_ turbopath.AbsoluteSystemPath, hash string, _ int, files []turbopath.AnchoredSystemPath) error {
+ if tc.disabledErr != nil {
+ return tc.disabledErr
+ }
+ tc.entries[hash] = files
+ return nil
+}
+
+func (tc *testCache) Clean(_ turbopath.AbsoluteSystemPath) {}
+func (tc *testCache) CleanAll() {}
+func (tc *testCache) Shutdown() {}
+
+func newEnabledCache() *testCache {
+ return &testCache{
+ entries: make(map[string][]turbopath.AnchoredSystemPath),
+ }
+}
+
+func newDisabledCache() *testCache {
+ return &testCache{
+ disabledErr: &util.CacheDisabledError{
+ Status: util.CachingStatusDisabled,
+ Message: "remote caching is disabled",
+ },
+ }
+}
+
+func TestPutCachingDisabled(t *testing.T) {
+ disabledCache := newDisabledCache()
+ caches := []Cache{
+ newEnabledCache(),
+ disabledCache,
+ newEnabledCache(),
+ newEnabledCache(),
+ }
+ var removeCalled uint64
+ mplex := &cacheMultiplexer{
+ caches: caches,
+ onCacheRemoved: func(cache Cache, err error) {
+ atomic.AddUint64(&removeCalled, 1)
+ },
+ }
+
+ err := mplex.Put("unused-target", "some-hash", 5, []turbopath.AnchoredSystemPath{"a-file"})
+ if err != nil {
+ // don't leak the cache removal
+ t.Errorf("Put got error %v, want <nil>", err)
+ }
+
+ removes := atomic.LoadUint64(&removeCalled)
+ if removes != 1 {
+ t.Errorf("removes count: %v, want 1", removes)
+ }
+
+ mplex.mu.RLock()
+ if len(mplex.caches) != 3 {
+ t.Errorf("found %v caches, expected to have 3 after one was removed", len(mplex.caches))
+ }
+ for _, cache := range mplex.caches {
+ if cache == disabledCache {
+ t.Error("found disabled cache, expected it to be removed")
+ }
+ }
+ mplex.mu.RUnlock()
+
+ // subsequent Fetch should still work
+ cacheStatus, _, _, err := mplex.Fetch("unused-target", "some-hash", []string{"unused", "files"})
+ if err != nil {
+ t.Errorf("got error fetching files: %v", err)
+ }
+ hit := cacheStatus.Local || cacheStatus.Remote
+ if !hit {
+ t.Error("failed to find previously stored files")
+ }
+
+ removes = atomic.LoadUint64(&removeCalled)
+ if removes != 1 {
+ t.Errorf("removes count: %v, want 1", removes)
+ }
+}
+
+func TestExists(t *testing.T) {
+ caches := []Cache{
+ newEnabledCache(),
+ }
+
+ mplex := &cacheMultiplexer{
+ caches: caches,
+ }
+
+ itemStatus := mplex.Exists("some-hash")
+ if itemStatus.Local {
+ t.Error("did not expect file to exist")
+ }
+
+ err := mplex.Put("unused-target", "some-hash", 5, []turbopath.AnchoredSystemPath{"a-file"})
+ if err != nil {
+ // don't leak the cache removal
+ t.Errorf("Put got error %v, want <nil>", err)
+ }
+
+ itemStatus = mplex.Exists("some-hash")
+ if !itemStatus.Local {
+ t.Error("failed to find previously stored files")
+ }
+}
+
+type fakeClient struct{}
+
+// FetchArtifact implements client
+func (*fakeClient) FetchArtifact(hash string) (*http.Response, error) {
+ panic("unimplemented")
+}
+
+func (*fakeClient) ArtifactExists(hash string) (*http.Response, error) {
+ panic("unimplemented")
+}
+
+// GetTeamID implements client
+func (*fakeClient) GetTeamID() string {
+ return "fake-team-id"
+}
+
+// PutArtifact implements client
+func (*fakeClient) PutArtifact(hash string, body []byte, duration int, tag string) error {
+ panic("unimplemented")
+}
+
+var _ client = &fakeClient{}
+
+func TestFetchCachingDisabled(t *testing.T) {
+ disabledCache := newDisabledCache()
+ caches := []Cache{
+ newEnabledCache(),
+ disabledCache,
+ newEnabledCache(),
+ newEnabledCache(),
+ }
+ var removeCalled uint64
+ mplex := &cacheMultiplexer{
+ caches: caches,
+ onCacheRemoved: func(cache Cache, err error) {
+ atomic.AddUint64(&removeCalled, 1)
+ },
+ }
+
+ cacheStatus, _, _, err := mplex.Fetch("unused-target", "some-hash", []string{"unused", "files"})
+ if err != nil {
+ // don't leak the cache removal
+ t.Errorf("Fetch got error %v, want <nil>", err)
+ }
+ hit := cacheStatus.Local || cacheStatus.Remote
+ if hit {
+ t.Error("hit on empty cache, expected miss")
+ }
+
+ removes := atomic.LoadUint64(&removeCalled)
+ if removes != 1 {
+ t.Errorf("removes count: %v, want 1", removes)
+ }
+
+ mplex.mu.RLock()
+ if len(mplex.caches) != 3 {
+ t.Errorf("found %v caches, expected to have 3 after one was removed", len(mplex.caches))
+ }
+ for _, cache := range mplex.caches {
+ if cache == disabledCache {
+ t.Error("found disabled cache, expected it to be removed")
+ }
+ }
+ mplex.mu.RUnlock()
+}
+
+type nullRecorder struct{}
+
+func (nullRecorder) LogEvent(analytics.EventPayload) {}
+
+func TestNew(t *testing.T) {
+ // Test will bomb if this fails, no need to specially handle the error
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ type args struct {
+ opts Opts
+ recorder analytics.Recorder
+ onCacheRemoved OnCacheRemoved
+ client fakeClient
+ }
+ tests := []struct {
+ name string
+ args args
+ want Cache
+ wantErr bool
+ }{
+ {
+ name: "With no caches configured, new returns a noopCache and an error",
+ args: args{
+ opts: Opts{
+ SkipFilesystem: true,
+ SkipRemote: true,
+ },
+ recorder: &nullRecorder{},
+ onCacheRemoved: func(Cache, error) {},
+ },
+ want: &noopCache{},
+ wantErr: true,
+ },
+ {
+ name: "With just httpCache configured, new returns an httpCache and a noopCache",
+ args: args{
+ opts: Opts{
+ SkipFilesystem: true,
+ RemoteCacheOpts: fs.RemoteCacheOptions{
+ Signature: true,
+ },
+ },
+ recorder: &nullRecorder{},
+ onCacheRemoved: func(Cache, error) {},
+ },
+ want: &cacheMultiplexer{
+ caches: []Cache{&httpCache{}, &noopCache{}},
+ },
+ wantErr: false,
+ },
+ {
+ name: "With just fsCache configured, new returns only an fsCache",
+ args: args{
+ opts: Opts{
+ SkipRemote: true,
+ },
+ recorder: &nullRecorder{},
+ onCacheRemoved: func(Cache, error) {},
+ },
+ want: &fsCache{},
+ },
+ {
+ name: "With both configured, new returns an fsCache and httpCache",
+ args: args{
+ opts: Opts{
+ RemoteCacheOpts: fs.RemoteCacheOptions{
+ Signature: true,
+ },
+ },
+ recorder: &nullRecorder{},
+ onCacheRemoved: func(Cache, error) {},
+ },
+ want: &cacheMultiplexer{
+ caches: []Cache{&fsCache{}, &httpCache{}},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := New(tt.args.opts, repoRoot, &tt.args.client, tt.args.recorder, tt.args.onCacheRemoved)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ switch multiplexer := got.(type) {
+ case *cacheMultiplexer:
+ want := tt.want.(*cacheMultiplexer)
+ for i := range multiplexer.caches {
+ if reflect.TypeOf(multiplexer.caches[i]) != reflect.TypeOf(want.caches[i]) {
+ t.Errorf("New() = %v, want %v", reflect.TypeOf(multiplexer.caches[i]), reflect.TypeOf(want.caches[i]))
+ }
+ }
+ case *fsCache:
+ if reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+ t.Errorf("New() = %v, want %v", reflect.TypeOf(got), reflect.TypeOf(tt.want))
+ }
+ case *noopCache:
+ if reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+ t.Errorf("New() = %v, want %v", reflect.TypeOf(got), reflect.TypeOf(tt.want))
+ }
+ }
+ })
+ }
+}
diff --git a/cli/internal/cacheitem/cacheitem.go b/cli/internal/cacheitem/cacheitem.go
new file mode 100644
index 0000000..2fb2c3b
--- /dev/null
+++ b/cli/internal/cacheitem/cacheitem.go
@@ -0,0 +1,76 @@
+// Package cacheitem is an abstraction over the creation and restoration of a cache
+package cacheitem
+
+import (
+ "archive/tar"
+ "bufio"
+ "crypto/sha512"
+ "errors"
+ "io"
+ "os"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+var (
+ errMissingSymlinkTarget = errors.New("symlink restoration is delayed")
+ errCycleDetected = errors.New("links in the cache are cyclic")
+ errTraversal = errors.New("tar attempts to write outside of directory")
+ errNameMalformed = errors.New("file name is malformed")
+ errNameWindowsUnsafe = errors.New("file name is not Windows-safe")
+ errUnsupportedFileType = errors.New("attempted to restore unsupported file type")
+)
+
+// CacheItem is a `tar` utility with a little bit extra.
+type CacheItem struct {
+ // Path is the location on disk for the CacheItem.
+ Path turbopath.AbsoluteSystemPath
+ // Anchor is the position on disk at which the CacheItem will be restored.
+ Anchor turbopath.AbsoluteSystemPath
+
+ // For creation.
+ tw *tar.Writer
+ zw io.WriteCloser
+ fileBuffer *bufio.Writer
+ handle *os.File
+ compressed bool
+}
+
+// Close any open pipes
+func (ci *CacheItem) Close() error {
+ if ci.tw != nil {
+ if err := ci.tw.Close(); err != nil {
+ return err
+ }
+ }
+
+ if ci.zw != nil {
+ if err := ci.zw.Close(); err != nil {
+ return err
+ }
+ }
+
+ if ci.fileBuffer != nil {
+ if err := ci.fileBuffer.Flush(); err != nil {
+ return err
+ }
+ }
+
+ if ci.handle != nil {
+ if err := ci.handle.Close(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetSha returns the SHA-512 hash for the CacheItem.
+func (ci *CacheItem) GetSha() ([]byte, error) {
+ sha := sha512.New()
+ if _, err := io.Copy(sha, ci.handle); err != nil {
+ return nil, err
+ }
+
+ return sha.Sum(nil), nil
+}
diff --git a/cli/internal/cacheitem/create.go b/cli/internal/cacheitem/create.go
new file mode 100644
index 0000000..ce5b1c8
--- /dev/null
+++ b/cli/internal/cacheitem/create.go
@@ -0,0 +1,119 @@
+package cacheitem
+
+import (
+ "archive/tar"
+ "bufio"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/DataDog/zstd"
+
+ "github.com/moby/sys/sequential"
+ "github.com/vercel/turbo/cli/internal/tarpatch"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// Create makes a new CacheItem at the specified path.
+func Create(path turbopath.AbsoluteSystemPath) (*CacheItem, error) {
+ handle, err := path.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC|os.O_APPEND, 0644)
+ if err != nil {
+ return nil, err
+ }
+
+ cacheItem := &CacheItem{
+ Path: path,
+ handle: handle,
+ compressed: strings.HasSuffix(path.ToString(), ".zst"),
+ }
+
+ cacheItem.init()
+ return cacheItem, nil
+}
+
+// init prepares the CacheItem for writing.
+// Wires all the writers end-to-end:
+// tar.Writer -> zstd.Writer -> fileBuffer -> file
+func (ci *CacheItem) init() {
+ fileBuffer := bufio.NewWriterSize(ci.handle, 2^20) // Flush to disk in 1mb chunks.
+
+ var tw *tar.Writer
+ if ci.compressed {
+ zw := zstd.NewWriter(fileBuffer)
+ tw = tar.NewWriter(zw)
+ ci.zw = zw
+ } else {
+ tw = tar.NewWriter(fileBuffer)
+ }
+
+ ci.tw = tw
+ ci.fileBuffer = fileBuffer
+}
+
+// AddFile adds a user-cached item to the tar.
+func (ci *CacheItem) AddFile(fsAnchor turbopath.AbsoluteSystemPath, filePath turbopath.AnchoredSystemPath) error {
+ // Calculate the fully-qualified path to the file to read it.
+ sourcePath := filePath.RestoreAnchor(fsAnchor)
+
+ // We grab the FileInfo which tar.FileInfoHeader accepts.
+ fileInfo, lstatErr := sourcePath.Lstat()
+ if lstatErr != nil {
+ return lstatErr
+ }
+
+ // Determine if we need to populate the additional link argument to tar.FileInfoHeader.
+ var link string
+ if fileInfo.Mode()&os.ModeSymlink != 0 {
+ linkTarget, readlinkErr := sourcePath.Readlink()
+ if readlinkErr != nil {
+ return readlinkErr
+ }
+ link = linkTarget
+ }
+
+ // Normalize the path within the cache.
+ cacheDestinationName := filePath.ToUnixPath()
+
+ // Generate the the header.
+ // We do not use header generation from stdlib because it can throw an error.
+ header, headerErr := tarpatch.FileInfoHeader(cacheDestinationName, fileInfo, link)
+ if headerErr != nil {
+ return headerErr
+ }
+
+ // Throw an error if trying to create a cache that contains a type we don't support.
+ if (header.Typeflag != tar.TypeReg) && (header.Typeflag != tar.TypeDir) && (header.Typeflag != tar.TypeSymlink) {
+ return errUnsupportedFileType
+ }
+
+ // Consistent creation.
+ header.Uid = 0
+ header.Gid = 0
+ header.AccessTime = time.Unix(0, 0)
+ header.ModTime = time.Unix(0, 0)
+ header.ChangeTime = time.Unix(0, 0)
+
+ // Always write the header.
+ if err := ci.tw.WriteHeader(header); err != nil {
+ return err
+ }
+
+ // If there is a body to be written, do so.
+ if header.Typeflag == tar.TypeReg && header.Size > 0 {
+ // Windows has a distinct "sequential read" opening mode.
+ // We use a library that will switch to this mode for Windows.
+ sourceFile, sourceErr := sequential.OpenFile(sourcePath.ToString(), os.O_RDONLY, 0777)
+ if sourceErr != nil {
+ return sourceErr
+ }
+
+ if _, err := io.Copy(ci.tw, sourceFile); err != nil {
+ return err
+ }
+
+ return sourceFile.Close()
+ }
+
+ return nil
+}
diff --git a/cli/internal/cacheitem/create_test.go b/cli/internal/cacheitem/create_test.go
new file mode 100644
index 0000000..97eeb01
--- /dev/null
+++ b/cli/internal/cacheitem/create_test.go
@@ -0,0 +1,205 @@
+package cacheitem
+
+import (
+ "encoding/hex"
+ "io/fs"
+ "os"
+ "runtime"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+type createFileDefinition struct {
+ Path turbopath.AnchoredSystemPath
+ Linkname string
+ fs.FileMode
+}
+
+func createEntry(t *testing.T, anchor turbopath.AbsoluteSystemPath, fileDefinition createFileDefinition) error {
+ t.Helper()
+ if fileDefinition.FileMode.IsDir() {
+ return createDir(t, anchor, fileDefinition)
+ } else if fileDefinition.FileMode&os.ModeSymlink != 0 {
+ return createSymlink(t, anchor, fileDefinition)
+ } else if fileDefinition.FileMode&os.ModeNamedPipe != 0 {
+ return createFifo(t, anchor, fileDefinition)
+ } else {
+ return createFile(t, anchor, fileDefinition)
+ }
+}
+
+func createDir(t *testing.T, anchor turbopath.AbsoluteSystemPath, fileDefinition createFileDefinition) error {
+ t.Helper()
+ path := fileDefinition.Path.RestoreAnchor(anchor)
+ mkdirAllErr := path.MkdirAllMode(fileDefinition.FileMode & 0777)
+ assert.NilError(t, mkdirAllErr, "MkdirAll")
+ return mkdirAllErr
+}
+func createFile(t *testing.T, anchor turbopath.AbsoluteSystemPath, fileDefinition createFileDefinition) error {
+ t.Helper()
+ path := fileDefinition.Path.RestoreAnchor(anchor)
+ writeErr := path.WriteFile([]byte("file contents"), fileDefinition.FileMode&0777)
+ assert.NilError(t, writeErr, "WriteFile")
+ return writeErr
+}
+func createSymlink(t *testing.T, anchor turbopath.AbsoluteSystemPath, fileDefinition createFileDefinition) error {
+ t.Helper()
+ path := fileDefinition.Path.RestoreAnchor(anchor)
+ symlinkErr := path.Symlink(fileDefinition.Linkname)
+ assert.NilError(t, symlinkErr, "Symlink")
+ lchmodErr := path.Lchmod(fileDefinition.FileMode & 0777)
+ assert.NilError(t, lchmodErr, "Lchmod")
+ return symlinkErr
+}
+
+func TestCreate(t *testing.T) {
+ tests := []struct {
+ name string
+ files []createFileDefinition
+ wantDarwin string
+ wantUnix string
+ wantWindows string
+ wantErr error
+ }{
+ {
+ name: "hello world",
+ files: []createFileDefinition{
+ {
+ Path: turbopath.AnchoredSystemPath("hello world.txt"),
+ FileMode: 0 | 0644,
+ },
+ },
+ wantDarwin: "4f39f1cab23906f3b89f313392ef7c26f2586e1c15fa6b577cce640c4781d082817927b4875a5413bc23e1248f0b198218998d70e7336e8b1244542ba446ca07",
+ wantUnix: "4f39f1cab23906f3b89f313392ef7c26f2586e1c15fa6b577cce640c4781d082817927b4875a5413bc23e1248f0b198218998d70e7336e8b1244542ba446ca07",
+ wantWindows: "e304d1ba8c51209f97bd11dabf27ca06996b70a850db592343942c49480de47bcbb4b7131fb3dd4d7564021d3bc0e648919e4876572b46ac1da97fca92b009c5",
+ },
+ {
+ name: "links",
+ files: []createFileDefinition{
+ {
+ Path: turbopath.AnchoredSystemPath("one"),
+ Linkname: "two",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Path: turbopath.AnchoredSystemPath("two"),
+ Linkname: "three",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Path: turbopath.AnchoredSystemPath("three"),
+ Linkname: "real",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Path: turbopath.AnchoredSystemPath("real"),
+ FileMode: 0 | 0644,
+ },
+ },
+ wantDarwin: "07278fdf37db4b212352367f391377bd6bac8f361dd834ae5522d809539bcf3b34d046873c1b45876d7372251446bb12c32f9fa9824914c4a1a01f6d7a206702",
+ wantUnix: "07278fdf37db4b212352367f391377bd6bac8f361dd834ae5522d809539bcf3b34d046873c1b45876d7372251446bb12c32f9fa9824914c4a1a01f6d7a206702",
+ wantWindows: "d4dac527e40860ee1ba3fdf2b9b12a1eba385050cf4f5877558dd531f0ecf2a06952fd5f88b852ad99e010943ed7b7f1437b727796369524e85f0c06f25d62c9",
+ },
+ {
+ name: "subdirectory",
+ files: []createFileDefinition{
+ {
+ Path: turbopath.AnchoredSystemPath("parent"),
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Path: turbopath.AnchoredSystemPath("parent/child"),
+ FileMode: 0 | 0644,
+ },
+ },
+ wantDarwin: "b513eea231daa84245d1d23d99fc398ccf17166ca49754ffbdcc1a3269cd75b7ad176a9c7095ff2481f71dca9fc350189747035f13d53b3a864e4fe35165233f",
+ wantUnix: "b513eea231daa84245d1d23d99fc398ccf17166ca49754ffbdcc1a3269cd75b7ad176a9c7095ff2481f71dca9fc350189747035f13d53b3a864e4fe35165233f",
+ wantWindows: "a8c3cba54e4dc214d3b21c3fa284d4032fe317d2f88943159efd5d16f3551ab53fae5c92ebf8acdd1bdb85d1238510b7938772cb11a0daa1b72b5e0f2700b5c7",
+ },
+ {
+ name: "symlink permissions",
+ files: []createFileDefinition{
+ {
+ Path: turbopath.AnchoredSystemPath("one"),
+ Linkname: "two",
+ FileMode: 0 | os.ModeSymlink | 0644,
+ },
+ },
+ wantDarwin: "3ea9d8a4581a0c2ba77557c72447b240c5ac622edcdac570a0bf597c276c2917b4ea73e6c373bbac593a480e396845651fa4b51e049531ff5d44c0adb807c2d9",
+ wantUnix: "99d953cbe1c0d8545e6f8382208fcefe14bcbefe39872f7b6310da14ac195b9a1b04b6d7b4b56f01a27216176193344a92488f99e124fcd68693f313f7137a1c",
+ wantWindows: "a4b1dc5c296f8ac4c9124727c1d84d70f72872c7bb4ced6d83ee312889e822baf1eaa72f88e624fb1aac4339d0a1f766ede77eabd2e4524eb26e89f883dc479d",
+ },
+ {
+ name: "unsupported types error",
+ files: []createFileDefinition{
+ {
+ Path: turbopath.AnchoredSystemPath("fifo"),
+ FileMode: 0 | os.ModeNamedPipe | 0644,
+ },
+ },
+ wantErr: errUnsupportedFileType,
+ },
+ }
+ for _, tt := range tests {
+ getTestFunc := func(compressed bool) func(t *testing.T) {
+ return func(t *testing.T) {
+ inputDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ archiveDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ var archivePath turbopath.AbsoluteSystemPath
+ if compressed {
+ archivePath = turbopath.AnchoredSystemPath("out.tar.zst").RestoreAnchor(archiveDir)
+ } else {
+ archivePath = turbopath.AnchoredSystemPath("out.tar").RestoreAnchor(archiveDir)
+ }
+
+ cacheItem, cacheCreateErr := Create(archivePath)
+ assert.NilError(t, cacheCreateErr, "Cache Create")
+
+ for _, file := range tt.files {
+ createErr := createEntry(t, inputDir, file)
+ if createErr != nil {
+ assert.ErrorIs(t, createErr, tt.wantErr)
+ assert.NilError(t, cacheItem.Close(), "Close")
+ return
+ }
+
+ addFileError := cacheItem.AddFile(inputDir, file.Path)
+ if addFileError != nil {
+ assert.ErrorIs(t, addFileError, tt.wantErr)
+ assert.NilError(t, cacheItem.Close(), "Close")
+ return
+ }
+ }
+
+ assert.NilError(t, cacheItem.Close(), "Cache Close")
+
+ // We only check for repeatability on compressed caches.
+ if compressed {
+ openedCacheItem, openedCacheItemErr := Open(archivePath)
+ assert.NilError(t, openedCacheItemErr, "Cache Open")
+
+ // We actually only need to compare the generated SHA.
+ // That ensures we got the same output. (Effectively snapshots.)
+ // This must be called after `Close` because both `tar` and `gzip` have footers.
+ shaOne, shaOneErr := openedCacheItem.GetSha()
+ assert.NilError(t, shaOneErr, "GetSha")
+ snapshot := hex.EncodeToString(shaOne)
+
+ switch runtime.GOOS {
+ case "darwin":
+ assert.Equal(t, snapshot, tt.wantDarwin, "Got expected hash.")
+ case "windows":
+ assert.Equal(t, snapshot, tt.wantWindows, "Got expected hash.")
+ default:
+ assert.Equal(t, snapshot, tt.wantUnix, "Got expected hash.")
+ }
+ assert.NilError(t, openedCacheItem.Close(), "Close")
+ }
+ }
+ }
+ t.Run(tt.name, getTestFunc(false))
+ t.Run(tt.name+"zst", getTestFunc(true))
+ }
+}
diff --git a/cli/internal/cacheitem/create_unix_test.go b/cli/internal/cacheitem/create_unix_test.go
new file mode 100644
index 0000000..812d1eb
--- /dev/null
+++ b/cli/internal/cacheitem/create_unix_test.go
@@ -0,0 +1,20 @@
+//go:build darwin || linux
+// +build darwin linux
+
+package cacheitem
+
+import (
+ "syscall"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func createFifo(t *testing.T, anchor turbopath.AbsoluteSystemPath, fileDefinition createFileDefinition) error {
+ t.Helper()
+ path := fileDefinition.Path.RestoreAnchor(anchor)
+ fifoErr := syscall.Mknod(path.ToString(), syscall.S_IFIFO|0666, 0)
+ assert.NilError(t, fifoErr, "FIFO")
+ return fifoErr
+}
diff --git a/cli/internal/cacheitem/create_windows_test.go b/cli/internal/cacheitem/create_windows_test.go
new file mode 100644
index 0000000..2cbb8b9
--- /dev/null
+++ b/cli/internal/cacheitem/create_windows_test.go
@@ -0,0 +1,14 @@
+//go:build windows
+// +build windows
+
+package cacheitem
+
+import (
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+func createFifo(t *testing.T, anchor turbopath.AbsoluteSystemPath, fileDefinition createFileDefinition) error {
+ return errUnsupportedFileType
+}
diff --git a/cli/internal/cacheitem/filepath.go b/cli/internal/cacheitem/filepath.go
new file mode 100644
index 0000000..4fd1681
--- /dev/null
+++ b/cli/internal/cacheitem/filepath.go
@@ -0,0 +1,162 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cacheitem
+
+import "os"
+
+const _separator = os.PathSeparator
+
+// A lazybuf is a lazily constructed path buffer.
+// It supports append, reading previously appended bytes,
+// and retrieving the final string. It does not allocate a buffer
+// to hold the output until that output diverges from s.
+type lazybuf struct {
+ path string
+ buf []byte
+ w int
+ volAndPath string
+ volLen int
+}
+
+func (b *lazybuf) index(i int) byte {
+ if b.buf != nil {
+ return b.buf[i]
+ }
+ return b.path[i]
+}
+
+func (b *lazybuf) append(c byte) {
+ if b.buf == nil {
+ if b.w < len(b.path) && b.path[b.w] == c {
+ b.w++
+ return
+ }
+ b.buf = make([]byte, len(b.path))
+ copy(b.buf, b.path[:b.w])
+ }
+ b.buf[b.w] = c
+ b.w++
+}
+
+func (b *lazybuf) string() string {
+ if b.buf == nil {
+ return b.volAndPath[:b.volLen+b.w]
+ }
+ return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
+}
+
+// Clean is extracted from stdlib and removes `FromSlash` processing
+// of the stdlib version.
+//
+// Clean returns the shortest path name equivalent to path
+// by purely lexical processing. It applies the following rules
+// iteratively until no further processing can be done:
+//
+// 1. Replace multiple Separator elements with a single one.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path,
+// assuming Separator is '/'.
+//
+// The returned path ends in a slash only if it represents a root directory,
+// such as "/" on Unix or `C:\` on Windows.
+//
+// Finally, any occurrences of slash are replaced by Separator.
+//
+// If the result of this process is an empty string, Clean
+// returns the string ".".
+//
+// See also Rob Pike, “Lexical File Names in Plan 9 or
+// Getting Dot-Dot Right,”
+// https://9p.io/sys/doc/lexnames.html
+func Clean(path string) string {
+ originalPath := path
+ volLen := volumeNameLen(path)
+ path = path[volLen:]
+ if path == "" {
+ if volLen > 1 && originalPath[1] != ':' {
+ // should be UNC
+ // ORIGINAL: return FromSlash(originalPath)
+ return originalPath
+ }
+ return originalPath + "."
+ }
+ rooted := os.IsPathSeparator(path[0])
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+ // dotdot is index in buf where .. must stop, either because
+ // it is the leading slash or it is a leading ../../.. prefix.
+ n := len(path)
+ out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}
+ r, dotdot := 0, 0
+ if rooted {
+ out.append(_separator)
+ r, dotdot = 1, 1
+ }
+
+ for r < n {
+ switch {
+ case os.IsPathSeparator(path[r]):
+ // empty path element
+ r++
+ case path[r] == '.' && r+1 == n:
+ // . element
+ r++
+ case path[r] == '.' && os.IsPathSeparator(path[r+1]):
+ // ./ element
+ r++
+
+ for r < len(path) && os.IsPathSeparator(path[r]) {
+ r++
+ }
+ if out.w == 0 && volumeNameLen(path[r:]) > 0 {
+ // When joining prefix "." and an absolute path on Windows,
+ // the prefix should not be removed.
+ out.append('.')
+ }
+ case path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])):
+ // .. element: remove to last separator
+ r += 2
+ switch {
+ case out.w > dotdot:
+ // can backtrack
+ out.w--
+ for out.w > dotdot && !os.IsPathSeparator(out.index(out.w)) {
+ out.w--
+ }
+ case !rooted:
+ // cannot backtrack, but not rooted, so append .. element.
+ if out.w > 0 {
+ out.append(_separator)
+ }
+ out.append('.')
+ out.append('.')
+ dotdot = out.w
+ }
+ default:
+ // real path element.
+ // add slash if needed
+ if rooted && out.w != 1 || !rooted && out.w != 0 {
+ out.append(_separator)
+ }
+ // copy element
+ for ; r < n && !os.IsPathSeparator(path[r]); r++ {
+ out.append(path[r])
+ }
+ }
+ }
+
+ // Turn empty string into "."
+ if out.w == 0 {
+ out.append('.')
+ }
+
+ // ORIGINAL: return FromSlash(out.string())
+ return out.string()
+}
diff --git a/cli/internal/cacheitem/filepath_unix.go b/cli/internal/cacheitem/filepath_unix.go
new file mode 100644
index 0000000..d0f6786
--- /dev/null
+++ b/cli/internal/cacheitem/filepath_unix.go
@@ -0,0 +1,14 @@
+//go:build !windows
+// +build !windows
+
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cacheitem
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+func volumeNameLen(path string) int {
+ return 0
+}
diff --git a/cli/internal/cacheitem/filepath_windows.go b/cli/internal/cacheitem/filepath_windows.go
new file mode 100644
index 0000000..2c3b852
--- /dev/null
+++ b/cli/internal/cacheitem/filepath_windows.go
@@ -0,0 +1,50 @@
+//go:build windows
+// +build windows
+
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cacheitem
+
+func isSlash(c uint8) bool {
+ return c == '\\' || c == '/'
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+func volumeNameLen(path string) int {
+ if len(path) < 2 {
+ return 0
+ }
+ // with drive letter
+ c := path[0]
+ if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+ return 2
+ }
+ // is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+ if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
+ !isSlash(path[2]) && path[2] != '.' {
+ // first, leading `\\` and next shouldn't be `\`. its server name.
+ for n := 3; n < l-1; n++ {
+ // second, next '\' shouldn't be repeated.
+ if isSlash(path[n]) {
+ n++
+ // third, following something characters. its share name.
+ if !isSlash(path[n]) {
+ if path[n] == '.' {
+ break
+ }
+ for ; n < l; n++ {
+ if isSlash(path[n]) {
+ break
+ }
+ }
+ return n
+ }
+ break
+ }
+ }
+ }
+ return 0
+}
diff --git a/cli/internal/cacheitem/restore.go b/cli/internal/cacheitem/restore.go
new file mode 100644
index 0000000..347b996
--- /dev/null
+++ b/cli/internal/cacheitem/restore.go
@@ -0,0 +1,200 @@
+package cacheitem
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/DataDog/zstd"
+
+ "github.com/moby/sys/sequential"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// Open returns an existing CacheItem at the specified path.
+func Open(path turbopath.AbsoluteSystemPath) (*CacheItem, error) {
+ handle, err := sequential.OpenFile(path.ToString(), os.O_RDONLY, 0777)
+ if err != nil {
+ return nil, err
+ }
+
+ return &CacheItem{
+ Path: path,
+ handle: handle,
+ compressed: strings.HasSuffix(path.ToString(), ".zst"),
+ }, nil
+}
+
+// Restore extracts a cache to a specified disk location.
+func (ci *CacheItem) Restore(anchor turbopath.AbsoluteSystemPath) ([]turbopath.AnchoredSystemPath, error) {
+ var tr *tar.Reader
+ var closeError error
+
+ // We're reading a tar, possibly wrapped in zstd.
+ if ci.compressed {
+ zr := zstd.NewReader(ci.handle)
+
+ // The `Close` function for compression effectively just returns the singular
+ // error field on the decompressor instance. This is extremely unlikely to be
+ // set without triggering one of the numerous other errors, but we should still
+ // handle that possible edge case.
+ defer func() { closeError = zr.Close() }()
+ tr = tar.NewReader(zr)
+ } else {
+ tr = tar.NewReader(ci.handle)
+ }
+
+ // On first attempt to restore it's possible that a link target doesn't exist.
+ // Save them and topsort them.
+ var symlinks []*tar.Header
+
+ restored := make([]turbopath.AnchoredSystemPath, 0)
+
+ restorePointErr := anchor.MkdirAll(0755)
+ if restorePointErr != nil {
+ return nil, restorePointErr
+ }
+
+ // We're going to make the following two assumptions here for "fast" path restoration:
+ // - All directories are enumerated in the `tar`.
+ // - The contents of the tar are enumerated depth-first.
+ //
+ // This allows us to avoid:
+ // - Attempts at recursive creation of directories.
+ // - Repetitive `lstat` on restore of a file.
+ //
+ // Violating these assumptions won't cause things to break but we're only going to maintain
+ // an `lstat` cache for the current tree. If you violate these assumptions and the current
+ // cache does not apply for your path, it will clobber and re-start from the common
+ // shared prefix.
+ dirCache := &cachedDirTree{
+ anchorAtDepth: []turbopath.AbsoluteSystemPath{anchor},
+ }
+
+ for {
+ header, trErr := tr.Next()
+ if trErr == io.EOF {
+ // The end, time to restore any missing links.
+ symlinksRestored, symlinksErr := topologicallyRestoreSymlinks(dirCache, anchor, symlinks, tr)
+ restored = append(restored, symlinksRestored...)
+ if symlinksErr != nil {
+ return restored, symlinksErr
+ }
+
+ break
+ }
+ if trErr != nil {
+ return restored, trErr
+ }
+
+ // The reader will not advance until tr.Next is called.
+ // We can treat this as file metadata + body reader.
+
+ // Attempt to place the file on disk.
+ file, restoreErr := restoreEntry(dirCache, anchor, header, tr)
+ if restoreErr != nil {
+ if errors.Is(restoreErr, errMissingSymlinkTarget) {
+ // Links get one shot to be valid, then they're accumulated, DAG'd, and restored on delay.
+ symlinks = append(symlinks, header)
+ continue
+ }
+ return restored, restoreErr
+ }
+ restored = append(restored, file)
+ }
+
+ return restored, closeError
+}
+
+// restoreRegular is the entry point for all things read from the tar.
+func restoreEntry(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, header *tar.Header, reader *tar.Reader) (turbopath.AnchoredSystemPath, error) {
+ // We're permissive on creation, but restrictive on restoration.
+ // There is no need to prevent the cache creation in any case.
+ // And on restoration, if we fail, we simply run the task.
+ switch header.Typeflag {
+ case tar.TypeDir:
+ return restoreDirectory(dirCache, anchor, header)
+ case tar.TypeReg:
+ return restoreRegular(dirCache, anchor, header, reader)
+ case tar.TypeSymlink:
+ return restoreSymlink(dirCache, anchor, header)
+ default:
+ return "", errUnsupportedFileType
+ }
+}
+
+// canonicalizeName returns either an AnchoredSystemPath or an error.
+func canonicalizeName(name string) (turbopath.AnchoredSystemPath, error) {
+ // Assuming this was a `turbo`-created input, we currently have an AnchoredUnixPath.
+ // Assuming this is malicious input we don't really care if we do the wrong thing.
+ wellFormed, windowsSafe := checkName(name)
+
+ // Determine if the future filename is a well-formed AnchoredUnixPath
+ if !wellFormed {
+ return "", errNameMalformed
+ }
+
+ // Determine if the AnchoredUnixPath is safe to be used on Windows
+ if runtime.GOOS == "windows" && !windowsSafe {
+ return "", errNameWindowsUnsafe
+ }
+
+ // Directories will have a trailing slash. Remove it.
+ noTrailingSlash := strings.TrimSuffix(name, "/")
+
+ // Okay, we're all set here.
+ return turbopath.AnchoredUnixPathFromUpstream(noTrailingSlash).ToSystemPath(), nil
+}
+
+// checkName returns `wellFormed, windowsSafe` via inspection of separators and traversal
+func checkName(name string) (bool, bool) {
+ length := len(name)
+
+ // Name is of length 0.
+ if length == 0 {
+ return false, false
+ }
+
+ wellFormed := true
+ windowsSafe := true
+
+ // Name is:
+ // - "."
+ // - ".."
+ if wellFormed && (name == "." || name == "..") {
+ wellFormed = false
+ }
+
+ // Name starts with:
+ // - `/`
+ // - `./`
+ // - `../`
+ if wellFormed && (strings.HasPrefix(name, "/") || strings.HasPrefix(name, "./") || strings.HasPrefix(name, "../")) {
+ wellFormed = false
+ }
+
+ // Name ends in:
+ // - `/.`
+ // - `/..`
+ if wellFormed && (strings.HasSuffix(name, "/.") || strings.HasSuffix(name, "/..")) {
+ wellFormed = false
+ }
+
+ // Name contains:
+ // - `//`
+ // - `/./`
+ // - `/../`
+ if wellFormed && (strings.Contains(name, "//") || strings.Contains(name, "/./") || strings.Contains(name, "/../")) {
+ wellFormed = false
+ }
+
+ // Name contains: `\`
+ if strings.ContainsRune(name, '\\') {
+ windowsSafe = false
+ }
+
+ return wellFormed, windowsSafe
+}
diff --git a/cli/internal/cacheitem/restore_directory.go b/cli/internal/cacheitem/restore_directory.go
new file mode 100644
index 0000000..4704d66
--- /dev/null
+++ b/cli/internal/cacheitem/restore_directory.go
@@ -0,0 +1,144 @@
+package cacheitem
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// restoreDirectory restores a directory.
+func restoreDirectory(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, header *tar.Header) (turbopath.AnchoredSystemPath, error) {
+ processedName, err := canonicalizeName(header.Name)
+ if err != nil {
+ return "", err
+ }
+
+ // We need to traverse `processedName` from base to root split at
+ // `os.Separator` to make sure we don't end up following a symlink
+ // outside of the restore path.
+
+ // Create the directory.
+ if err := safeMkdirAll(dirCache, anchor, processedName, header.Mode); err != nil {
+ return "", err
+ }
+
+ return processedName, nil
+}
+
+type cachedDirTree struct {
+ anchorAtDepth []turbopath.AbsoluteSystemPath
+ prefix []turbopath.RelativeSystemPath
+}
+
+func (cr *cachedDirTree) getStartingPoint(path turbopath.AnchoredSystemPath) (turbopath.AbsoluteSystemPath, []turbopath.RelativeSystemPath) {
+ pathSegmentStrings := strings.Split(path.ToString(), string(os.PathSeparator))
+ pathSegments := make([]turbopath.RelativeSystemPath, len(pathSegmentStrings))
+ for index, pathSegmentString := range pathSegmentStrings {
+ pathSegments[index] = turbopath.RelativeSystemPathFromUpstream(pathSegmentString)
+ }
+
+ i := 0
+ for i = 0; i < len(cr.prefix) && i < len(pathSegments); i++ {
+ if pathSegments[i] != cr.prefix[i] {
+ break
+ }
+ }
+
+ // 0: root anchor, can't remove it.
+ cr.anchorAtDepth = cr.anchorAtDepth[:i+1]
+
+ // 0: first prefix.
+ cr.prefix = cr.prefix[:i]
+
+ return cr.anchorAtDepth[i], pathSegments[i:]
+}
+
+func (cr *cachedDirTree) Update(anchor turbopath.AbsoluteSystemPath, newSegment turbopath.RelativeSystemPath) {
+ cr.anchorAtDepth = append(cr.anchorAtDepth, anchor)
+ cr.prefix = append(cr.prefix, newSegment)
+}
+
+// safeMkdirAll creates all directories, assuming that the leaf node is a directory.
+// FIXME: Recheck the symlink cache before creating a directory.
+func safeMkdirAll(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, processedName turbopath.AnchoredSystemPath, mode int64) error {
+ // Iterate through path segments by os.Separator, appending them onto the anchor.
+ // Check to see if that path segment is a symlink with a target outside of anchor.
+
+ // Pull the iteration starting point from thie directory cache.
+ calculatedAnchor, pathSegments := dirCache.getStartingPoint(processedName)
+ for _, segment := range pathSegments {
+ calculatedAnchor, checkPathErr := checkPath(anchor, calculatedAnchor, segment)
+ // We hit an existing directory or absolute path that was invalid.
+ if checkPathErr != nil {
+ return checkPathErr
+ }
+
+ // Otherwise we continue and check the next segment.
+ dirCache.Update(calculatedAnchor, segment)
+ }
+
+ // If we have made it here we know that it is safe to call os.MkdirAll
+ // on the Join of anchor and processedName.
+ //
+ // This could _still_ error, but we don't care.
+ return processedName.RestoreAnchor(anchor).MkdirAll(os.FileMode(mode))
+}
+
+// checkPath ensures that the resolved path (if restoring symlinks).
+// It makes sure to never traverse outside of the anchor.
+func checkPath(originalAnchor turbopath.AbsoluteSystemPath, accumulatedAnchor turbopath.AbsoluteSystemPath, segment turbopath.RelativeSystemPath) (turbopath.AbsoluteSystemPath, error) {
+ // Check if the segment itself is sneakily an absolute path...
+ // (looking at you, Windows. CON, AUX...)
+ if filepath.IsAbs(segment.ToString()) {
+ return "", errTraversal
+ }
+
+ // Find out if this portion of the path is a symlink.
+ combinedPath := accumulatedAnchor.Join(segment)
+ fileInfo, err := combinedPath.Lstat()
+
+ // Getting an error here means we failed to stat the path.
+ // Assume that means we're safe and continue.
+ if err != nil {
+ return combinedPath, nil
+ }
+
+ // Find out if we have a symlink.
+ isSymlink := fileInfo.Mode()&os.ModeSymlink != 0
+
+ // If we don't have a symlink it's safe.
+ if !isSymlink {
+ return combinedPath, nil
+ }
+
+ // Check to see if the symlink targets outside of the originalAnchor.
+ // We don't do eval symlinks because we could find ourself in a totally
+ // different place.
+
+ // 1. Get the target.
+ linkTarget, readLinkErr := combinedPath.Readlink()
+ if readLinkErr != nil {
+ return "", readLinkErr
+ }
+
+ // 2. See if the target is absolute.
+ if filepath.IsAbs(linkTarget) {
+ absoluteLinkTarget := turbopath.AbsoluteSystemPathFromUpstream(linkTarget)
+ if originalAnchor.HasPrefix(absoluteLinkTarget) {
+ return absoluteLinkTarget, nil
+ }
+ return "", errTraversal
+ }
+
+ // 3. Target is relative (or absolute Windows on a Unix device)
+ relativeLinkTarget := turbopath.RelativeSystemPathFromUpstream(linkTarget)
+ computedTarget := accumulatedAnchor.UntypedJoin(linkTarget)
+ if computedTarget.HasPrefix(originalAnchor) {
+ // Need to recurse and make sure the target doesn't link out.
+ return checkPath(originalAnchor, accumulatedAnchor, relativeLinkTarget)
+ }
+ return "", errTraversal
+}
diff --git a/cli/internal/cacheitem/restore_directory_test.go b/cli/internal/cacheitem/restore_directory_test.go
new file mode 100644
index 0000000..f75bd47
--- /dev/null
+++ b/cli/internal/cacheitem/restore_directory_test.go
@@ -0,0 +1,103 @@
+package cacheitem
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+func Test_cachedDirTree_getStartingPoint(t *testing.T) {
+ testDir := turbopath.AbsoluteSystemPath("")
+ tests := []struct {
+ name string
+
+ // STATE
+ cachedDirTree cachedDirTree
+
+ // INPUT
+ path turbopath.AnchoredSystemPath
+
+ // OUTPUT
+ calculatedAnchor turbopath.AbsoluteSystemPath
+ pathSegments []turbopath.RelativeSystemPath
+ }{
+ {
+ name: "hello world",
+ cachedDirTree: cachedDirTree{
+ anchorAtDepth: []turbopath.AbsoluteSystemPath{testDir},
+ prefix: []turbopath.RelativeSystemPath{},
+ },
+ path: turbopath.AnchoredUnixPath("hello/world").ToSystemPath(),
+ calculatedAnchor: testDir,
+ pathSegments: []turbopath.RelativeSystemPath{"hello", "world"},
+ },
+ {
+ name: "has a cache",
+ cachedDirTree: cachedDirTree{
+ anchorAtDepth: []turbopath.AbsoluteSystemPath{
+ testDir,
+ testDir.UntypedJoin("hello"),
+ },
+ prefix: []turbopath.RelativeSystemPath{"hello"},
+ },
+ path: turbopath.AnchoredUnixPath("hello/world").ToSystemPath(),
+ calculatedAnchor: testDir.UntypedJoin("hello"),
+ pathSegments: []turbopath.RelativeSystemPath{"world"},
+ },
+ {
+ name: "ask for yourself",
+ cachedDirTree: cachedDirTree{
+ anchorAtDepth: []turbopath.AbsoluteSystemPath{
+ testDir,
+ testDir.UntypedJoin("hello"),
+ testDir.UntypedJoin("hello", "world"),
+ },
+ prefix: []turbopath.RelativeSystemPath{"hello", "world"},
+ },
+ path: turbopath.AnchoredUnixPath("hello/world").ToSystemPath(),
+ calculatedAnchor: testDir.UntypedJoin("hello", "world"),
+ pathSegments: []turbopath.RelativeSystemPath{},
+ },
+ {
+ name: "three layer cake",
+ cachedDirTree: cachedDirTree{
+ anchorAtDepth: []turbopath.AbsoluteSystemPath{
+ testDir,
+ testDir.UntypedJoin("hello"),
+ testDir.UntypedJoin("hello", "world"),
+ },
+ prefix: []turbopath.RelativeSystemPath{"hello", "world"},
+ },
+ path: turbopath.AnchoredUnixPath("hello/world/again").ToSystemPath(),
+ calculatedAnchor: testDir.UntypedJoin("hello", "world"),
+ pathSegments: []turbopath.RelativeSystemPath{"again"},
+ },
+ {
+ name: "outside of cache hierarchy",
+ cachedDirTree: cachedDirTree{
+ anchorAtDepth: []turbopath.AbsoluteSystemPath{
+ testDir,
+ testDir.UntypedJoin("hello"),
+ testDir.UntypedJoin("hello", "world"),
+ },
+ prefix: []turbopath.RelativeSystemPath{"hello", "world"},
+ },
+ path: turbopath.AnchoredUnixPath("somewhere/else").ToSystemPath(),
+ calculatedAnchor: testDir,
+ pathSegments: []turbopath.RelativeSystemPath{"somewhere", "else"},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cr := tt.cachedDirTree
+ calculatedAnchor, pathSegments := cr.getStartingPoint(tt.path)
+ if !reflect.DeepEqual(calculatedAnchor, tt.calculatedAnchor) {
+ t.Errorf("cachedDirTree.getStartingPoint() calculatedAnchor = %v, want %v", calculatedAnchor, tt.calculatedAnchor)
+ }
+ if !reflect.DeepEqual(pathSegments, tt.pathSegments) {
+ t.Errorf("cachedDirTree.getStartingPoint() pathSegments = %v, want %v", pathSegments, tt.pathSegments)
+ }
+ })
+ }
+}
diff --git a/cli/internal/cacheitem/restore_regular.go b/cli/internal/cacheitem/restore_regular.go
new file mode 100644
index 0000000..ed8946e
--- /dev/null
+++ b/cli/internal/cacheitem/restore_regular.go
@@ -0,0 +1,46 @@
+package cacheitem
+
+import (
+ "archive/tar"
+ "io"
+ "os"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// restoreRegular restores a file.
+func restoreRegular(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, header *tar.Header, reader *tar.Reader) (turbopath.AnchoredSystemPath, error) {
+ // Assuming this was a `turbo`-created input, we currently have an AnchoredUnixPath.
+ // Assuming this is malicious input we don't really care if we do the wrong thing.
+ processedName, err := canonicalizeName(header.Name)
+ if err != nil {
+ return "", err
+ }
+
+ // We need to traverse `processedName` from base to root split at
+ // `os.Separator` to make sure we don't end up following a symlink
+ // outside of the restore path.
+ if err := safeMkdirFile(dirCache, anchor, processedName, header.Mode); err != nil {
+ return "", err
+ }
+
+ // Create the file.
+ if f, err := processedName.RestoreAnchor(anchor).OpenFile(os.O_WRONLY|os.O_TRUNC|os.O_CREATE, os.FileMode(header.Mode)); err != nil {
+ return "", err
+ } else if _, err := io.Copy(f, reader); err != nil {
+ return "", err
+ } else if err := f.Close(); err != nil {
+ return "", err
+ }
+ return processedName, nil
+}
+
+// safeMkdirAll creates all directories, assuming that the leaf node is a file.
+func safeMkdirFile(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, processedName turbopath.AnchoredSystemPath, mode int64) error {
+ isRootFile := processedName.Dir() == "."
+ if !isRootFile {
+ return safeMkdirAll(dirCache, anchor, processedName.Dir(), 0755)
+ }
+
+ return nil
+}
diff --git a/cli/internal/cacheitem/restore_symlink.go b/cli/internal/cacheitem/restore_symlink.go
new file mode 100644
index 0000000..4cb29f5
--- /dev/null
+++ b/cli/internal/cacheitem/restore_symlink.go
@@ -0,0 +1,180 @@
+package cacheitem
+
+import (
+ "archive/tar"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// restoreSymlink restores a symlink and errors if the target is missing.
+func restoreSymlink(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, header *tar.Header) (turbopath.AnchoredSystemPath, error) {
+ processedName, canonicalizeNameErr := canonicalizeName(header.Name)
+ if canonicalizeNameErr != nil {
+ return "", canonicalizeNameErr
+ }
+
+ // Check to see if the target exists.
+ processedLinkname := canonicalizeLinkname(anchor, processedName, header.Linkname)
+ if _, err := os.Lstat(processedLinkname); err != nil {
+ return "", errMissingSymlinkTarget
+ }
+
+ return actuallyRestoreSymlink(dirCache, anchor, processedName, header)
+}
+
+// restoreSymlinkMissingTarget restores a symlink and does not error if the target is missing.
+func restoreSymlinkMissingTarget(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, header *tar.Header) (turbopath.AnchoredSystemPath, error) {
+ processedName, canonicalizeNameErr := canonicalizeName(header.Name)
+ if canonicalizeNameErr != nil {
+ return "", canonicalizeNameErr
+ }
+
+ return actuallyRestoreSymlink(dirCache, anchor, processedName, header)
+}
+
+func actuallyRestoreSymlink(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, processedName turbopath.AnchoredSystemPath, header *tar.Header) (turbopath.AnchoredSystemPath, error) {
+ // We need to traverse `processedName` from base to root split at
+ // `os.Separator` to make sure we don't end up following a symlink
+ // outside of the restore path.
+ if err := safeMkdirFile(dirCache, anchor, processedName, header.Mode); err != nil {
+ return "", err
+ }
+
+ // Specify where we restoring this symlink.
+ symlinkFrom := processedName.RestoreAnchor(anchor)
+
+ // Remove any existing object at that location.
+ // If it errors we'll catch it on creation.
+ _ = symlinkFrom.Remove()
+
+ // Create the symlink.
+ // Explicitly uses the _original_ header.Linkname as the target.
+ // This does not support file names with `\` in them in a cross-platform manner.
+ symlinkErr := symlinkFrom.Symlink(header.Linkname)
+ if symlinkErr != nil {
+ return "", symlinkErr
+ }
+
+ // Darwin allows you to change the permissions of a symlink.
+ lchmodErr := symlinkFrom.Lchmod(fs.FileMode(header.Mode))
+ if lchmodErr != nil {
+ return "", lchmodErr
+ }
+
+ return processedName, nil
+}
+
+// topologicallyRestoreSymlinks ensures that targets of symlinks are created in advance
+// of the things that link to them. It does this by topologically sorting all
+// of the symlinks. This also enables us to ensure we do not create cycles.
+func topologicallyRestoreSymlinks(dirCache *cachedDirTree, anchor turbopath.AbsoluteSystemPath, symlinks []*tar.Header, tr *tar.Reader) ([]turbopath.AnchoredSystemPath, error) {
+ restored := make([]turbopath.AnchoredSystemPath, 0)
+ lookup := make(map[string]*tar.Header)
+
+ var g dag.AcyclicGraph
+ for _, header := range symlinks {
+ processedName, err := canonicalizeName(header.Name)
+ processedSourcename := canonicalizeLinkname(anchor, processedName, processedName.ToString())
+ processedLinkname := canonicalizeLinkname(anchor, processedName, header.Linkname)
+ if err != nil {
+ return nil, err
+ }
+ g.Add(processedSourcename)
+ g.Add(processedLinkname)
+ g.Connect(dag.BasicEdge(processedLinkname, processedSourcename))
+ lookup[processedSourcename] = header
+ }
+
+ cycles := g.Cycles()
+ if cycles != nil {
+ return restored, errCycleDetected
+ }
+
+ roots := make(dag.Set)
+ for _, v := range g.Vertices() {
+ if g.UpEdges(v).Len() == 0 {
+ roots.Add(v)
+ }
+ }
+
+ walkFunc := func(vertex dag.Vertex, depth int) error {
+ key, ok := vertex.(string)
+ if !ok {
+ return nil
+ }
+ header, exists := lookup[key]
+ if !exists {
+ return nil
+ }
+
+ file, restoreErr := restoreSymlinkMissingTarget(dirCache, anchor, header)
+ if restoreErr != nil {
+ return restoreErr
+ }
+
+ restored = append(restored, file)
+ return nil
+ }
+
+ walkError := g.DepthFirstWalk(roots, walkFunc)
+ if walkError != nil {
+ return restored, walkError
+ }
+
+ return restored, nil
+}
+
+// canonicalizeLinkname determines (lexically) what the resolved path on the
+// system will be when linkname is restored verbatim.
+func canonicalizeLinkname(anchor turbopath.AbsoluteSystemPath, processedName turbopath.AnchoredSystemPath, linkname string) string {
+ // We don't know _anything_ about linkname. It could be any of:
+ //
+ // - Absolute Unix Path
+ // - Absolute Windows Path
+ // - Relative Unix Path
+ // - Relative Windows Path
+ //
+ // We also can't _truly_ distinguish if the path is Unix or Windows.
+ // Take for example: `/Users/turbobot/weird-filenames/\foo\/lol`
+ // It is a valid file on Unix, but if we do slash conversion it breaks.
+ // Or `i\am\a\normal\unix\file\but\super\nested\on\windows`.
+ //
+ // We also can't safely assume that paths in link targets on one platform
+ // should be treated as targets for that platform. The author may be
+ // generating an artifact that should work on Windows on a Unix device.
+ //
+ // Given all of that, our best option is to restore link targets _verbatim_.
+ // No modification, no slash conversion.
+ //
+ // In order to DAG sort them, however, we do need to canonicalize them.
+ // We canonicalize them as if we're restoring them verbatim.
+ //
+ // 0. We've extracted a version of `Clean` from stdlib which does nothing but
+ // separator and traversal collapsing.
+ cleanedLinkname := Clean(linkname)
+
+ // 1. Check to see if the link target is absolute _on the current platform_.
+ // If it is an absolute path it's canonical by rule.
+ if filepath.IsAbs(cleanedLinkname) {
+ return cleanedLinkname
+ }
+
+ // Remaining options:
+ // - Absolute (other platform) Path
+ // - Relative Unix Path
+ // - Relative Windows Path
+ //
+ // At this point we simply assume that it's a relative path—no matter
+ // which separators appear in it and where they appear, We can't do
+ // anything else because the OS will also treat it like that when it is
+ // a link target.
+ //
+ // We manually join these to avoid calls to stdlib's `Clean`.
+ source := processedName.RestoreAnchor(anchor)
+ canonicalized := source.Dir().ToString() + string(os.PathSeparator) + cleanedLinkname
+ return Clean(canonicalized)
+}
diff --git a/cli/internal/cacheitem/restore_test.go b/cli/internal/cacheitem/restore_test.go
new file mode 100644
index 0000000..a0a33d6
--- /dev/null
+++ b/cli/internal/cacheitem/restore_test.go
@@ -0,0 +1,1493 @@
+package cacheitem
+
+import (
+ "archive/tar"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "syscall"
+ "testing"
+
+ "github.com/DataDog/zstd"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+type tarFile struct {
+ Body string
+ *tar.Header
+}
+
+type restoreFile struct {
+ Name turbopath.AnchoredUnixPath
+ Linkname string
+ fs.FileMode
+}
+
+// generateTar is used specifically to generate tar files that Turborepo would
+// rarely or never encounter without malicious or pathological inputs. We use it
+// to make sure that we respond well in these scenarios during restore attempts.
+func generateTar(t *testing.T, files []tarFile) turbopath.AbsoluteSystemPath {
+ t.Helper()
+ testDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ testArchivePath := testDir.UntypedJoin("out.tar")
+
+ handle, handleCreateErr := testArchivePath.Create()
+ assert.NilError(t, handleCreateErr, "os.Create")
+
+ tw := tar.NewWriter(handle)
+
+ for _, file := range files {
+ if file.Header.Typeflag == tar.TypeReg {
+ file.Header.Size = int64(len(file.Body))
+ }
+
+ writeHeaderErr := tw.WriteHeader(file.Header)
+ assert.NilError(t, writeHeaderErr, "tw.WriteHeader")
+
+ _, writeErr := tw.Write([]byte(file.Body))
+ assert.NilError(t, writeErr, "tw.Write")
+ }
+
+ twCloseErr := tw.Close()
+ assert.NilError(t, twCloseErr, "tw.Close")
+
+ handleCloseErr := handle.Close()
+ assert.NilError(t, handleCloseErr, "handle.Close")
+
+ return testArchivePath
+}
+
+// compressTar splits the compression of a tar file so that we don't
+// accidentally diverge in tar creation while still being able to test
+// restoration from tar and from .tar.zst.
+func compressTar(t *testing.T, archivePath turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ t.Helper()
+
+ inputHandle, inputHandleOpenErr := archivePath.Open()
+ assert.NilError(t, inputHandleOpenErr, "os.Open")
+
+ outputPath := archivePath + ".zst"
+ outputHandle, outputHandleCreateErr := outputPath.Create()
+ assert.NilError(t, outputHandleCreateErr, "os.Create")
+
+ zw := zstd.NewWriter(outputHandle)
+ _, copyError := io.Copy(zw, inputHandle)
+ assert.NilError(t, copyError, "io.Copy")
+
+ zwCloseErr := zw.Close()
+ assert.NilError(t, zwCloseErr, "zw.Close")
+
+ inputHandleCloseErr := inputHandle.Close()
+ assert.NilError(t, inputHandleCloseErr, "inputHandle.Close")
+
+ outputHandleCloseErr := outputHandle.Close()
+ assert.NilError(t, outputHandleCloseErr, "outputHandle.Close")
+
+ return outputPath
+}
+
+func generateAnchor(t *testing.T) turbopath.AbsoluteSystemPath {
+ t.Helper()
+ testDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ anchorPoint := testDir.UntypedJoin("anchor")
+
+ mkdirErr := anchorPoint.Mkdir(0777)
+ assert.NilError(t, mkdirErr, "Mkdir")
+
+ return anchorPoint
+}
+
+func assertFileExists(t *testing.T, anchor turbopath.AbsoluteSystemPath, diskFile restoreFile) {
+ t.Helper()
+ // If we have gotten here we can assume this to be true.
+ processedName := diskFile.Name.ToSystemPath()
+ fullName := processedName.RestoreAnchor(anchor)
+ fileInfo, err := fullName.Lstat()
+ assert.NilError(t, err, "Lstat")
+
+ assert.Equal(t, fileInfo.Mode()&fs.ModePerm, diskFile.FileMode&fs.ModePerm, "File has the expected permissions: "+processedName)
+ assert.Equal(t, fileInfo.Mode()|fs.ModePerm, diskFile.FileMode|fs.ModePerm, "File has the expected mode.")
+
+ if diskFile.FileMode&os.ModeSymlink != 0 {
+ linkname, err := fullName.Readlink()
+ assert.NilError(t, err, "Readlink")
+
+ // We restore Linkname verbatim.
+ assert.Equal(t, linkname, diskFile.Linkname, "Link target matches.")
+ }
+}
+
+func TestOpen(t *testing.T) {
+ type wantErr struct {
+ unix error
+ windows error
+ }
+ type wantOutput struct {
+ unix []turbopath.AnchoredSystemPath
+ windows []turbopath.AnchoredSystemPath
+ }
+ type wantFiles struct {
+ unix []restoreFile
+ windows []restoreFile
+ }
+ tests := []struct {
+ name string
+ tarFiles []tarFile
+ wantOutput wantOutput
+ wantFiles wantFiles
+ wantErr wantErr
+ }{
+ {
+ name: "cache optimized",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "one/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/three/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/three/file-one",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/three/file-two",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/a/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/a/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/b/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/b/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "one",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/three",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/three/file-one",
+ FileMode: 0644,
+ },
+ {
+ Name: "one/two/three/file-two",
+ FileMode: 0644,
+ },
+ {
+ Name: "one/two/a",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/a/file",
+ FileMode: 0644,
+ },
+ {
+ Name: "one/two/b",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/b/file",
+ FileMode: 0644,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "one",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/three",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/three/file-one",
+ FileMode: 0666,
+ },
+ {
+ Name: "one/two/three/file-two",
+ FileMode: 0666,
+ },
+ {
+ Name: "one/two/a",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/a/file",
+ FileMode: 0666,
+ },
+ {
+ Name: "one/two/b",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/b/file",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{
+ "one",
+ "one/two",
+ "one/two/three",
+ "one/two/three/file-one",
+ "one/two/three/file-two",
+ "one/two/a",
+ "one/two/a/file",
+ "one/two/b",
+ "one/two/b/file",
+ }.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "pathological cache works",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "one/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/a/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/b/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/three/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/a/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/b/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/three/file-one",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/three/file-two",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "one",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/three",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/three/file-one",
+ FileMode: 0644,
+ },
+ {
+ Name: "one/two/three/file-two",
+ FileMode: 0644,
+ },
+ {
+ Name: "one/two/a",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/a/file",
+ FileMode: 0644,
+ },
+ {
+ Name: "one/two/b",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "one/two/b/file",
+ FileMode: 0644,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "one",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/three",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/three/file-one",
+ FileMode: 0666,
+ },
+ {
+ Name: "one/two/three/file-two",
+ FileMode: 0666,
+ },
+ {
+ Name: "one/two/a",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/a/file",
+ FileMode: 0666,
+ },
+ {
+ Name: "one/two/b",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "one/two/b/file",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{
+ "one",
+ "one/two",
+ "one/two/a",
+ "one/two/b",
+ "one/two/three",
+ "one/two/a/file",
+ "one/two/b/file",
+ "one/two/three/file-one",
+ "one/two/three/file-two",
+ }.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "hello world",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "target",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "target",
+ },
+ {
+ Header: &tar.Header{
+ Name: "source",
+ Linkname: "target",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "source",
+ Linkname: "target",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Name: "target",
+ FileMode: 0644,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "source",
+ Linkname: "target",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ },
+ {
+ Name: "target",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"target", "source"}.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "nested file",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "folder/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "folder/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "file",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "folder",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "folder/file",
+ FileMode: 0644,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "folder",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "folder/file",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"folder", "folder/file"}.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "nested symlink",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "folder/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "folder/symlink",
+ Linkname: "../",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "folder/symlink/folder-sibling",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "folder-sibling",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "folder",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "folder/symlink",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ Linkname: "../",
+ },
+ {
+ Name: "folder/symlink/folder-sibling",
+ FileMode: 0644,
+ },
+ {
+ Name: "folder-sibling",
+ FileMode: 0644,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "folder",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "folder/symlink",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ Linkname: "..\\",
+ },
+ {
+ Name: "folder/symlink/folder-sibling",
+ FileMode: 0666,
+ },
+ {
+ Name: "folder-sibling",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"folder", "folder/symlink", "folder/symlink/folder-sibling"}.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "pathological symlinks",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "one",
+ Linkname: "two",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "two",
+ Linkname: "three",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "three",
+ Linkname: "real",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "real",
+ Typeflag: tar.TypeReg,
+ Mode: 0755,
+ },
+ Body: "real",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "one",
+ Linkname: "two",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Name: "two",
+ Linkname: "three",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Name: "three",
+ Linkname: "real",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Name: "real",
+ FileMode: 0 | 0755,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "one",
+ Linkname: "two",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ },
+ {
+ Name: "two",
+ Linkname: "three",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ },
+ {
+ Name: "three",
+ Linkname: "real",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ },
+ {
+ Name: "real",
+ FileMode: 0 | 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"real", "three", "two", "one"}.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "place file at dir location",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "folder-not-file/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "folder-not-file/subfile",
+ Typeflag: tar.TypeReg,
+ Mode: 0755,
+ },
+ Body: "subfile",
+ },
+ {
+ Header: &tar.Header{
+ Name: "folder-not-file",
+ Typeflag: tar.TypeReg,
+ Mode: 0755,
+ },
+ Body: "this shouldn't work",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "folder-not-file",
+ FileMode: 0 | os.ModeDir | 0755,
+ },
+ {
+ Name: "folder-not-file/subfile",
+ FileMode: 0755,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "folder-not-file",
+ FileMode: 0 | os.ModeDir | 0777,
+ },
+ {
+ Name: "folder-not-file/subfile",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"folder-not-file", "folder-not-file/subfile"}.ToSystemPathArray(),
+ },
+ wantErr: wantErr{
+ unix: syscall.EISDIR,
+ windows: syscall.EISDIR,
+ },
+ },
+ // {
+ // name: "missing symlink with file at subdir",
+ // tarFiles: []tarFile{
+ // {
+ // Header: &tar.Header{
+ // Name: "one",
+ // Linkname: "two",
+ // Typeflag: tar.TypeSymlink,
+ // Mode: 0777,
+ // },
+ // },
+ // {
+ // Header: &tar.Header{
+ // Name: "one/file",
+ // Typeflag: tar.TypeReg,
+ // Mode: 0755,
+ // },
+ // Body: "file",
+ // },
+ // },
+ // wantFiles: wantFiles{
+ // unix: []restoreFile{
+ // {
+ // Name: "one",
+ // Linkname: "two",
+ // FileMode: 0 | os.ModeSymlink | 0777,
+ // },
+ // },
+ // },
+ // wantOutput: wantOutput{
+ // unix: turbopath.AnchoredUnixPathArray{"one"}.ToSystemPathArray(),
+ // windows: nil,
+ // },
+ // wantErr: wantErr{
+ // unix: os.ErrExist,
+ // windows: os.ErrExist,
+ // },
+ // },
+ {
+ name: "symlink cycle",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "one",
+ Linkname: "two",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "two",
+ Linkname: "three",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "three",
+ Linkname: "one",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{},
+ },
+ wantOutput: wantOutput{
+ unix: []turbopath.AnchoredSystemPath{},
+ },
+ wantErr: wantErr{
+ unix: errCycleDetected,
+ windows: errCycleDetected,
+ },
+ },
+ {
+ name: "symlink clobber",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "one",
+ Linkname: "two",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one",
+ Linkname: "three",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one",
+ Linkname: "real",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "real",
+ Typeflag: tar.TypeReg,
+ Mode: 0755,
+ },
+ Body: "real",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "one",
+ Linkname: "real",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ {
+ Name: "real",
+ FileMode: 0755,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "one",
+ Linkname: "real",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ },
+ {
+ Name: "real",
+ FileMode: 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"real", "one"}.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "symlink traversal",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "escape",
+ Linkname: "../",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "escape/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "file",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "escape",
+ Linkname: "../",
+ FileMode: 0 | os.ModeSymlink | 0777,
+ },
+ },
+ windows: []restoreFile{
+ {
+ Name: "escape",
+ Linkname: "..\\",
+ FileMode: 0 | os.ModeSymlink | 0666,
+ },
+ },
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"escape"}.ToSystemPathArray(),
+ },
+ wantErr: wantErr{
+ unix: errTraversal,
+ windows: errTraversal,
+ },
+ },
+ {
+ name: "Double indirection: file",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "up",
+ Linkname: "../",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "link",
+ Linkname: "up",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "link/outside-file",
+ Typeflag: tar.TypeReg,
+ Mode: 0755,
+ },
+ },
+ },
+ wantErr: wantErr{unix: errTraversal, windows: errTraversal},
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{
+ "up",
+ "link",
+ }.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "Double indirection: folder",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "up",
+ Linkname: "../",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "link",
+ Linkname: "up",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "link/level-one/level-two/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ },
+ wantErr: wantErr{unix: errTraversal, windows: errTraversal},
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{
+ "up",
+ "link",
+ }.ToSystemPathArray(),
+ },
+ },
+ {
+ name: "name traversal",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "../escape",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "file",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{},
+ },
+ wantOutput: wantOutput{
+ unix: []turbopath.AnchoredSystemPath{},
+ },
+ wantErr: wantErr{
+ unix: errNameMalformed,
+ windows: errNameMalformed,
+ },
+ },
+ {
+ name: "windows unsafe",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "back\\slash\\file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "file",
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{
+ {
+ Name: "back\\slash\\file",
+ FileMode: 0644,
+ },
+ },
+ windows: []restoreFile{},
+ },
+ wantOutput: wantOutput{
+ unix: turbopath.AnchoredUnixPathArray{"back\\slash\\file"}.ToSystemPathArray(),
+ windows: turbopath.AnchoredUnixPathArray{}.ToSystemPathArray(),
+ },
+ wantErr: wantErr{
+ unix: nil,
+ windows: errNameWindowsUnsafe,
+ },
+ },
+ {
+ name: "fifo (and others) unsupported",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "fifo",
+ Typeflag: tar.TypeFifo,
+ },
+ },
+ },
+ wantFiles: wantFiles{
+ unix: []restoreFile{},
+ },
+ wantOutput: wantOutput{
+ unix: []turbopath.AnchoredSystemPath{},
+ },
+ wantErr: wantErr{
+ unix: errUnsupportedFileType,
+ windows: errUnsupportedFileType,
+ },
+ },
+ }
+ for _, tt := range tests {
+ getTestFunc := func(compressed bool) func(t *testing.T) {
+ return func(t *testing.T) {
+ var archivePath turbopath.AbsoluteSystemPath
+ if compressed {
+ archivePath = compressTar(t, generateTar(t, tt.tarFiles))
+ } else {
+ archivePath = generateTar(t, tt.tarFiles)
+ }
+ anchor := generateAnchor(t)
+
+ cacheItem, err := Open(archivePath)
+ assert.NilError(t, err, "Open")
+
+ restoreOutput, restoreErr := cacheItem.Restore(anchor)
+ var desiredErr error
+ if runtime.GOOS == "windows" {
+ desiredErr = tt.wantErr.windows
+ } else {
+ desiredErr = tt.wantErr.unix
+ }
+ if desiredErr != nil {
+ if !errors.Is(restoreErr, desiredErr) {
+ t.Errorf("wanted err: %v, got err: %v", tt.wantErr, restoreErr)
+ }
+ } else {
+ assert.NilError(t, restoreErr, "Restore")
+ }
+
+ outputComparison := tt.wantOutput.unix
+ if runtime.GOOS == "windows" && tt.wantOutput.windows != nil {
+ outputComparison = tt.wantOutput.windows
+ }
+
+ if !reflect.DeepEqual(restoreOutput, outputComparison) {
+ t.Errorf("Restore() = %v, want %v", restoreOutput, outputComparison)
+ }
+
+ // Check files on disk.
+ filesComparison := tt.wantFiles.unix
+ if runtime.GOOS == "windows" && tt.wantFiles.windows != nil {
+ filesComparison = tt.wantFiles.windows
+ }
+ for _, diskFile := range filesComparison {
+ assertFileExists(t, anchor, diskFile)
+ }
+
+ assert.NilError(t, cacheItem.Close(), "Close")
+ }
+ }
+ t.Run(tt.name+"zst", getTestFunc(true))
+ t.Run(tt.name, getTestFunc(false))
+ }
+}
+
+func Test_checkName(t *testing.T) {
+ tests := []struct {
+ path string
+ wellFormed bool
+ windowsSafe bool
+ }{
+ // Empty
+ {
+ path: "",
+ wellFormed: false,
+ windowsSafe: false,
+ },
+ // Bad prefix
+ {
+ path: ".",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "..",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "/",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "./",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "../",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad prefix, suffixed
+ {
+ path: "/a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "./a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "../a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad Suffix
+ {
+ path: "/.",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "/..",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad Suffix, with prefix
+ {
+ path: "a/.",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "a/..",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad middle
+ {
+ path: "//",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "/./",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "/../",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad middle, prefixed
+ {
+ path: "a//",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "a/./",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "a/../",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad middle, suffixed
+ {
+ path: "//a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "/./a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "/../a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // Bad middle, wrapped
+ {
+ path: "a//a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "a/./a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ {
+ path: "a/../a",
+ wellFormed: false,
+ windowsSafe: true,
+ },
+ // False positive tests
+ {
+ path: "...",
+ wellFormed: true,
+ windowsSafe: true,
+ },
+ {
+ path: ".../a",
+ wellFormed: true,
+ windowsSafe: true,
+ },
+ {
+ path: "a/...",
+ wellFormed: true,
+ windowsSafe: true,
+ },
+ {
+ path: "a/.../a",
+ wellFormed: true,
+ windowsSafe: true,
+ },
+ {
+ path: ".../...",
+ wellFormed: true,
+ windowsSafe: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(fmt.Sprintf("Path: \"%v\"", tt.path), func(t *testing.T) {
+ wellFormed, windowsSafe := checkName(tt.path)
+ if wellFormed != tt.wellFormed || windowsSafe != tt.windowsSafe {
+ t.Errorf("\nwantOutput: checkName(\"%v\") wellFormed = %v, windowsSafe %v\ngot: checkName(\"%v\") wellFormed = %v, windowsSafe %v", tt.path, tt.wellFormed, tt.windowsSafe, tt.path, wellFormed, windowsSafe)
+ }
+ })
+ }
+}
+
+func Test_canonicalizeLinkname(t *testing.T) {
+ // We're lying that this thing is absolute, but that's not relevant for tests.
+ anchor := turbopath.AbsoluteSystemPath(filepath.Join("path", "to", "anchor"))
+
+ tests := []struct {
+ name string
+ processedName turbopath.AnchoredSystemPath
+ linkname string
+ canonicalUnix string
+ canonicalWindows string
+ }{
+ {
+ name: "hello world",
+ processedName: turbopath.AnchoredSystemPath("source"),
+ linkname: "target",
+ canonicalUnix: "path/to/anchor/target",
+ canonicalWindows: "path\\to\\anchor\\target",
+ },
+ {
+ name: "Unix path subdirectory traversal",
+ processedName: turbopath.AnchoredUnixPath("child/source").ToSystemPath(),
+ linkname: "../sibling/target",
+ canonicalUnix: "path/to/anchor/sibling/target",
+ canonicalWindows: "path\\to\\anchor\\sibling\\target",
+ },
+ {
+ name: "Windows path subdirectory traversal",
+ processedName: turbopath.AnchoredUnixPath("child/source").ToSystemPath(),
+ linkname: "..\\sibling\\target",
+ canonicalUnix: "path/to/anchor/child/..\\sibling\\target",
+ canonicalWindows: "path\\to\\anchor\\sibling\\target",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ canonical := tt.canonicalUnix
+ if runtime.GOOS == "windows" {
+ canonical = tt.canonicalWindows
+ }
+ if got := canonicalizeLinkname(anchor, tt.processedName, tt.linkname); got != canonical {
+ t.Errorf("canonicalizeLinkname() = %v, want %v", got, canonical)
+ }
+ })
+ }
+}
+
+func Test_canonicalizeName(t *testing.T) {
+ tests := []struct {
+ name string
+ fileName string
+ want turbopath.AnchoredSystemPath
+ wantErr error
+ }{
+ {
+ name: "hello world",
+ fileName: "test.txt",
+ want: "test.txt",
+ },
+ {
+ name: "directory",
+ fileName: "something/",
+ want: "something",
+ },
+ {
+ name: "malformed name",
+ fileName: "//",
+ want: "",
+ wantErr: errNameMalformed,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := canonicalizeName(tt.fileName)
+ if tt.wantErr != nil && !errors.Is(err, tt.wantErr) {
+ t.Errorf("canonicalizeName() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("canonicalizeName() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestCacheItem_Restore(t *testing.T) {
+ tests := []struct {
+ name string
+ tarFiles []tarFile
+ want []turbopath.AnchoredSystemPath
+ }{
+ {
+ name: "duplicate restores",
+ tarFiles: []tarFile{
+ {
+ Header: &tar.Header{
+ Name: "target",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ Body: "target",
+ },
+ {
+ Header: &tar.Header{
+ Name: "source",
+ Linkname: "target",
+ Typeflag: tar.TypeSymlink,
+ Mode: 0777,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ {
+ Header: &tar.Header{
+ Name: "one/two/",
+ Typeflag: tar.TypeDir,
+ Mode: 0755,
+ },
+ },
+ },
+ want: turbopath.AnchoredUnixPathArray{"target", "source", "one", "one/two"}.ToSystemPathArray(),
+ },
+ }
+ for _, tt := range tests {
+ getTestFunc := func(compressed bool) func(t *testing.T) {
+ return func(t *testing.T) {
+ var archivePath turbopath.AbsoluteSystemPath
+ if compressed {
+ archivePath = compressTar(t, generateTar(t, tt.tarFiles))
+ } else {
+ archivePath = generateTar(t, tt.tarFiles)
+ }
+ anchor := generateAnchor(t)
+
+ cacheItem, err := Open(archivePath)
+ assert.NilError(t, err, "Open")
+
+ restoreOutput, restoreErr := cacheItem.Restore(anchor)
+ if !reflect.DeepEqual(restoreOutput, tt.want) {
+ t.Errorf("#1 CacheItem.Restore() = %v, want %v", restoreOutput, tt.want)
+ }
+ assert.NilError(t, restoreErr, "Restore #1")
+ assert.NilError(t, cacheItem.Close(), "Close")
+
+ cacheItem2, err2 := Open(archivePath)
+ assert.NilError(t, err2, "Open")
+
+ restoreOutput2, restoreErr2 := cacheItem2.Restore(anchor)
+ if !reflect.DeepEqual(restoreOutput2, tt.want) {
+ t.Errorf("#2 CacheItem.Restore() = %v, want %v", restoreOutput2, tt.want)
+ }
+ assert.NilError(t, restoreErr2, "Restore #2")
+ assert.NilError(t, cacheItem2.Close(), "Close")
+ }
+ }
+ t.Run(tt.name+"zst", getTestFunc(true))
+ t.Run(tt.name, getTestFunc(false))
+ }
+}
diff --git a/cli/internal/chrometracing/chrometracing.go b/cli/internal/chrometracing/chrometracing.go
new file mode 100644
index 0000000..d9325fd
--- /dev/null
+++ b/cli/internal/chrometracing/chrometracing.go
@@ -0,0 +1,227 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package chrometracing writes per-process Chrome trace_event files that can be
+// loaded into chrome://tracing.
+package chrometracing
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/google/chrometracing/traceinternal"
+)
+
+var trace = struct {
+ start time.Time
+ pid uint64
+
+ fileMu sync.Mutex
+ file *os.File
+}{
+ pid: uint64(os.Getpid()),
+}
+
+var out = setup(false)
+
+// Path returns the full path of the chrome://tracing trace_event file for
+// display in log messages.
+func Path() string { return out }
+
+// EnableTracing turns on tracing, regardless of running in a test or
+// not. Tracing is enabled by default if the CHROMETRACING_DIR environment
+// variable is present and non-empty.
+func EnableTracing() {
+ trace.fileMu.Lock()
+ alreadyEnabled := trace.file != nil
+ trace.fileMu.Unlock()
+ if alreadyEnabled {
+ return
+ }
+ out = setup(true)
+}
+
+func setup(overrideEnable bool) string {
+ inTest := os.Getenv("TEST_TMPDIR") != ""
+ explicitlyEnabled := os.Getenv("CHROMETRACING_DIR") != ""
+ enableTracing := inTest || explicitlyEnabled || overrideEnable
+ if !enableTracing {
+ return ""
+ }
+
+ var err error
+ dir := os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")
+ if dir == "" {
+ dir = os.Getenv("CHROMETRACING_DIR")
+ }
+ if dir == "" {
+ dir = os.TempDir()
+ }
+ fn := filepath.Join(dir, fmt.Sprintf("%s.%d.trace", filepath.Base(os.Args[0]), trace.pid))
+ trace.file, err = os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0644)
+ if err != nil {
+ // Using the log package from func init results in an error message
+ // being printed.
+ fmt.Fprintf(os.Stderr, "continuing without tracing: %v\n", err)
+ return ""
+ }
+
+ // We only ever open a JSON array. Ending the array is optional as per
+ // go/trace_event so that not cleanly finished traces can still be read.
+ trace.file.Write([]byte{'['})
+ trace.start = time.Now()
+
+ writeEvent(&traceinternal.ViewerEvent{
+ Name: "process_name",
+ Phase: "M", // Metadata Event
+ Pid: trace.pid,
+ Tid: trace.pid,
+ Arg: struct {
+ Name string `json:"name"`
+ }{
+ Name: strings.Join(os.Args, " "),
+ },
+ })
+ return fn
+}
+
+func writeEvent(ev *traceinternal.ViewerEvent) {
+ b, err := json.Marshal(&ev)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return
+ }
+ trace.fileMu.Lock()
+ defer trace.fileMu.Unlock()
+ if _, err = trace.file.Write(b); err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return
+ }
+ if _, err = trace.file.Write([]byte{',', '\n'}); err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return
+ }
+}
+
+const (
+ begin = "B"
+ end = "E"
+)
+
+// A PendingEvent represents an ongoing unit of work. The begin trace event has
+// already been written, and calling Done will write the end trace event.
+type PendingEvent struct {
+ name string
+ tid uint64
+}
+
+// Done writes the end trace event for this unit of work.
+func (pe *PendingEvent) Done() {
+ if pe == nil || pe.name == "" || trace.file == nil {
+ return
+ }
+ writeEvent(&traceinternal.ViewerEvent{
+ Name: pe.name,
+ Phase: end,
+ Pid: trace.pid,
+ Tid: pe.tid,
+ Time: float64(time.Since(trace.start).Microseconds()),
+ })
+ releaseTid(pe.tid)
+}
+
+// Event logs a unit of work. To instrument a Go function, use e.g.:
+//
+// func calcPi() {
+// defer chrometracing.Event("calculate pi").Done()
+// // …
+// }
+//
+// For more finely-granular traces, use e.g.:
+//
+// for _, cmd := range commands {
+// ev := chrometracing.Event("initialize " + cmd.Name)
+// cmd.Init()
+// ev.Done()
+// }
+func Event(name string) *PendingEvent {
+ if trace.file == nil {
+ return &PendingEvent{}
+ }
+ tid := tid()
+ writeEvent(&traceinternal.ViewerEvent{
+ Name: name,
+ Phase: begin,
+ Pid: trace.pid,
+ Tid: tid,
+ Time: float64(time.Since(trace.start).Microseconds()),
+ })
+ return &PendingEvent{
+ name: name,
+ tid: tid,
+ }
+}
+
+// tids is a chrome://tracing thread id pool. Go does not permit accessing the
+// goroutine id, so we need to maintain our own identifier. The chrome://tracing
+// file format requires a numeric thread id, so we just increment whenever we
+// need a thread id, and reuse the ones no longer in use.
+//
+// In practice, parallelized sections of the code (many goroutines) end up using
+// only as few thread ids as are concurrently in use, and the rest of the events
+// mirror the code call stack nicely. See e.g. http://screen/7MPcAcvXQNUE3JZ
+var tids struct {
+ sync.Mutex
+
+ // We allocate chrome://tracing thread ids based on the index of the
+ // corresponding entry in the used slice.
+ used []bool
+
+ // next points to the earliest unused tid to consider for the next tid to
+ // hand out. This is purely a performance optimization to avoid O(n) slice
+ // iteration.
+ next int
+}
+
+func tid() uint64 {
+ tids.Lock()
+ defer tids.Unlock()
+ // re-use released tids if any
+ for t := tids.next; t < len(tids.used); t++ {
+ if !tids.used[t] {
+ tids.used[t] = true
+ tids.next = t + 1
+ return uint64(t)
+ }
+ }
+ // allocate a new tid
+ t := len(tids.used)
+ tids.used = append(tids.used, true)
+ tids.next = t + 1
+ return uint64(t)
+}
+
+func releaseTid(t uint64) {
+ tids.Lock()
+ defer tids.Unlock()
+ tids.used[int(t)] = false
+ if tids.next > int(t) {
+ tids.next = int(t)
+ }
+}
diff --git a/cli/internal/chrometracing/chrometracing_close.go b/cli/internal/chrometracing/chrometracing_close.go
new file mode 100644
index 0000000..1b3a7b9
--- /dev/null
+++ b/cli/internal/chrometracing/chrometracing_close.go
@@ -0,0 +1,26 @@
+package chrometracing
+
+// Close overwrites the trailing (,\n) with (]\n) and closes the trace file.
+// Close is implemented in a separate file to keep a separation between custom
+// code and upstream from github.com/google/chrometracing. Additionally, we can
+// enable linting for code we author, while leaving upstream code alone.
+func Close() error {
+ trace.fileMu.Lock()
+ defer trace.fileMu.Unlock()
+ // Seek backwards two bytes (,\n)
+ if _, err := trace.file.Seek(-2, 1); err != nil {
+ return err
+ }
+ // Write 1 byte, ']', leaving the trailing '\n' in place
+ if _, err := trace.file.Write([]byte{']'}); err != nil {
+ return err
+ }
+ // Force the filesystem to write to disk
+ if err := trace.file.Sync(); err != nil {
+ return err
+ }
+ if err := trace.file.Close(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cli/internal/ci/ci.go b/cli/internal/ci/ci.go
new file mode 100644
index 0000000..a22ad78
--- /dev/null
+++ b/cli/internal/ci/ci.go
@@ -0,0 +1,58 @@
+// Package ci is a simple utility to check if a program is being executed in common CI/CD/PaaS vendors.
+// This is a partial port of https://github.com/watson/ci-info
+package ci
+
+import "os"
+
+var isCI = os.Getenv("BUILD_ID") != "" || os.Getenv("BUILD_NUMBER") != "" || os.Getenv("CI") != "" || os.Getenv("CI_APP_ID") != "" || os.Getenv("CI_BUILD_ID") != "" || os.Getenv("CI_BUILD_NUMBER") != "" || os.Getenv("CI_NAME") != "" || os.Getenv("CONTINUOUS_INTEGRATION") != "" || os.Getenv("RUN_ID") != "" || os.Getenv("TEAMCITY_VERSION") != "" || false
+
+// IsCi returns true if the program is executing in a CI/CD environment
+func IsCi() bool {
+ return isCI
+}
+
+// Name returns the name of the CI vendor
+func Name() string {
+ return Info().Name
+}
+
+// Constant returns the name of the CI vendor as a constant
+func Constant() string {
+ return Info().Constant
+}
+
+// Info returns information about a CI vendor
+func Info() Vendor {
+ // check both the env var key and value
+ for _, env := range Vendors {
+ if env.EvalEnv != nil {
+ for name, value := range env.EvalEnv {
+ if os.Getenv(name) == value {
+ return env
+ }
+ }
+ } else {
+ // check for any of the listed env var keys, with any value
+ if env.Env.Any != nil && len(env.Env.Any) > 0 {
+ for _, envVar := range env.Env.Any {
+ if os.Getenv(envVar) != "" {
+ return env
+ }
+ }
+ // check for all of the listed env var keys, with any value
+ } else if env.Env.All != nil && len(env.Env.All) > 0 {
+ all := true
+ for _, envVar := range env.Env.All {
+ if os.Getenv(envVar) == "" {
+ all = false
+ break
+ }
+ }
+ if all {
+ return env
+ }
+ }
+ }
+ }
+ return Vendor{}
+}
diff --git a/cli/internal/ci/ci_test.go b/cli/internal/ci/ci_test.go
new file mode 100644
index 0000000..333ff61
--- /dev/null
+++ b/cli/internal/ci/ci_test.go
@@ -0,0 +1,105 @@
+package ci
+
+import (
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func getVendor(name string) Vendor {
+ for _, v := range Vendors {
+ if v.Name == name {
+ return v
+ }
+ }
+ return Vendor{}
+}
+
+func TestInfo(t *testing.T) {
+ tests := []struct {
+ name string
+ setEnv []string
+ want Vendor
+ }{
+ {
+ name: "AppVeyor",
+ setEnv: []string{"APPVEYOR"},
+ want: getVendor("AppVeyor"),
+ },
+ {
+ name: "Vercel",
+ setEnv: []string{"VERCEL", "NOW_BUILDER"},
+ want: getVendor("Vercel"),
+ },
+ {
+ name: "Render",
+ setEnv: []string{"RENDER"},
+ want: getVendor("Render"),
+ },
+ {
+ name: "Netlify",
+ setEnv: []string{"NETLIFY"},
+ want: getVendor("Netlify CI"),
+ },
+ {
+ name: "Jenkins",
+ setEnv: []string{"BUILD_ID", "JENKINS_URL"},
+ want: getVendor("Jenkins"),
+ },
+ {
+ name: "Jenkins - failing",
+ setEnv: []string{"BUILD_ID"},
+ want: getVendor(""),
+ },
+ {
+ name: "GitHub Actions",
+ setEnv: []string{"GITHUB_ACTIONS"},
+ want: getVendor("GitHub Actions"),
+ },
+ {
+ name: "Codeship",
+ setEnv: []string{"CI_NAME=codeship"},
+ want: getVendor("Codeship"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // unset existing envs
+ liveCi := ""
+ if Name() == "GitHub Actions" {
+ liveCi = os.Getenv("GITHUB_ACTIONS")
+ err := os.Unsetenv("GITHUB_ACTIONS")
+ if err != nil {
+ t.Errorf("Error un-setting GITHUB_ACTIONS env: %s", err)
+ }
+ }
+ // set envs
+ for _, env := range tt.setEnv {
+ envParts := strings.Split(env, "=")
+ val := "some value"
+ if len(envParts) > 1 {
+ val = envParts[1]
+ }
+ err := os.Setenv(envParts[0], val)
+ if err != nil {
+ t.Errorf("Error setting %s for %s test", envParts[0], tt.name)
+ }
+ defer os.Unsetenv(envParts[0]) //nolint errcheck
+
+ }
+ // run test
+ if got := Info(); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Info() = %v, want %v", got, tt.want)
+ }
+
+ // reset env
+ if Name() == "GitHub Actions" {
+ err := os.Setenv("GITHUB_ACTIONS", liveCi)
+ if err != nil {
+ t.Errorf("Error re-setting GITHUB_ACTIONS env: %s", err)
+ }
+ }
+ })
+ }
+}
diff --git a/cli/internal/ci/vendors.go b/cli/internal/ci/vendors.go
new file mode 100644
index 0000000..13bce77
--- /dev/null
+++ b/cli/internal/ci/vendors.go
@@ -0,0 +1,253 @@
+package ci
+
+type vendorEnvs struct {
+ Any []string
+ All []string
+}
+
+// Vendor describes a CI/CD vendor execution environment
+type Vendor struct {
+ // Name is the name of the vendor
+ Name string
+ // Constant is the environment variable prefix used by the vendor
+ Constant string
+ // Env is one or many environment variables that can be used to quickly determine the vendor (using simple os.Getenv(env) check)
+ Env vendorEnvs
+ // EvalEnv is key/value map of environment variables that can be used to quickly determine the vendor
+ EvalEnv map[string]string
+}
+
+// Vendors is a list of common CI/CD vendors (from https://github.com/watson/ci-info/blob/master/vendors.json)
+var Vendors = []Vendor{
+ {
+ Name: "Appcircle",
+ Constant: "APPCIRCLE",
+ Env: vendorEnvs{Any: []string{"AC_APPCIRCLE"}},
+ },
+ {
+ Name: "AppVeyor",
+ Constant: "APPVEYOR",
+ Env: vendorEnvs{Any: []string{"APPVEYOR"}},
+ },
+ {
+ Name: "AWS CodeBuild",
+ Constant: "CODEBUILD",
+ Env: vendorEnvs{Any: []string{"CODEBUILD_BUILD_ARN"}},
+ },
+ {
+ Name: "Azure Pipelines",
+ Constant: "AZURE_PIPELINES",
+ Env: vendorEnvs{Any: []string{"SYSTEM_TEAMFOUNDATIONCOLLECTIONURI"}},
+ },
+ {
+ Name: "Bamboo",
+ Constant: "BAMBOO",
+ Env: vendorEnvs{Any: []string{"bamboo_planKey"}},
+ },
+ {
+ Name: "Bitbucket Pipelines",
+ Constant: "BITBUCKET",
+ Env: vendorEnvs{Any: []string{"BITBUCKET_COMMIT"}},
+ },
+ {
+ Name: "Bitrise",
+ Constant: "BITRISE",
+ Env: vendorEnvs{Any: []string{"BITRISE_IO"}},
+ },
+ {
+ Name: "Buddy",
+ Constant: "BUDDY",
+ Env: vendorEnvs{Any: []string{"BUDDY_WORKSPACE_ID"}},
+ },
+ {
+ Name: "Buildkite",
+ Constant: "BUILDKITE",
+ Env: vendorEnvs{Any: []string{"BUILDKITE"}},
+ },
+ {
+ Name: "CircleCI",
+ Constant: "CIRCLE",
+ Env: vendorEnvs{Any: []string{"CIRCLECI"}},
+ },
+ {
+ Name: "Cirrus CI",
+ Constant: "CIRRUS",
+ Env: vendorEnvs{Any: []string{"CIRRUS_CI"}},
+ },
+ {
+ Name: "Codefresh",
+ Constant: "CODEFRESH",
+ Env: vendorEnvs{Any: []string{"CF_BUILD_ID"}},
+ },
+ {
+ Name: "Codemagic",
+ Constant: "CODEMAGIC",
+ Env: vendorEnvs{Any: []string{"CM_BUILD_ID"}},
+ },
+ {
+ Name: "Codeship",
+ Constant: "CODESHIP",
+ EvalEnv: map[string]string{
+ "CI_NAME": "codeship",
+ },
+ },
+ {
+ Name: "Drone",
+ Constant: "DRONE",
+ Env: vendorEnvs{Any: []string{"DRONE"}},
+ },
+ {
+ Name: "dsari",
+ Constant: "DSARI",
+ Env: vendorEnvs{Any: []string{"DSARI"}},
+ },
+ {
+ Name: "Expo Application Services",
+ Constant: "EAS",
+ Env: vendorEnvs{Any: []string{"EAS_BUILD"}},
+ },
+ {
+ Name: "GitHub Actions",
+ Constant: "GITHUB_ACTIONS",
+ Env: vendorEnvs{Any: []string{"GITHUB_ACTIONS"}},
+ },
+ {
+ Name: "GitLab CI",
+ Constant: "GITLAB",
+ Env: vendorEnvs{Any: []string{"GITLAB_CI"}},
+ },
+ {
+ Name: "GoCD",
+ Constant: "GOCD",
+ Env: vendorEnvs{Any: []string{"GO_PIPELINE_LABEL"}},
+ },
+ {
+ Name: "Google Cloud Build",
+ Constant: "GOOGLE_CLOUD_BUILD",
+ Env: vendorEnvs{Any: []string{"BUILDER_OUTPUT"}},
+ },
+ {
+ Name: "LayerCI",
+ Constant: "LAYERCI",
+ Env: vendorEnvs{Any: []string{"LAYERCI"}},
+ },
+ {
+ Name: "Gerrit",
+ Constant: "GERRIT",
+ Env: vendorEnvs{Any: []string{"GERRIT_PROJECT"}},
+ },
+ {
+ Name: "Hudson",
+ Constant: "HUDSON",
+ Env: vendorEnvs{Any: []string{"HUDSON"}},
+ },
+ {
+ Name: "Jenkins",
+ Constant: "JENKINS",
+ Env: vendorEnvs{All: []string{"JENKINS_URL", "BUILD_ID"}},
+ },
+ {
+ Name: "Magnum CI",
+ Constant: "MAGNUM",
+ Env: vendorEnvs{Any: []string{"MAGNUM"}},
+ },
+ {
+ Name: "Netlify CI",
+ Constant: "NETLIFY",
+ Env: vendorEnvs{Any: []string{"NETLIFY"}},
+ },
+ {
+ Name: "Nevercode",
+ Constant: "NEVERCODE",
+ Env: vendorEnvs{Any: []string{"NEVERCODE"}},
+ },
+ {
+ Name: "ReleaseHub",
+ Constant: "RELEASEHUB",
+ Env: vendorEnvs{Any: []string{"RELEASE_BUILD_ID"}},
+ },
+ {
+ Name: "Render",
+ Constant: "RENDER",
+ Env: vendorEnvs{Any: []string{"RENDER"}},
+ },
+ {
+ Name: "Sail CI",
+ Constant: "SAIL",
+ Env: vendorEnvs{Any: []string{"SAILCI"}},
+ },
+ {
+ Name: "Screwdriver",
+ Constant: "SCREWDRIVER",
+ Env: vendorEnvs{Any: []string{"SCREWDRIVER"}},
+ },
+ {
+ Name: "Semaphore",
+ Constant: "SEMAPHORE",
+ Env: vendorEnvs{Any: []string{"SEMAPHORE"}},
+ },
+ {
+ Name: "Shippable",
+ Constant: "SHIPPABLE",
+ Env: vendorEnvs{Any: []string{"SHIPPABLE"}},
+ },
+ {
+ Name: "Solano CI",
+ Constant: "SOLANO",
+ Env: vendorEnvs{Any: []string{"TDDIUM"}},
+ },
+ {
+ Name: "Sourcehut",
+ Constant: "SOURCEHUT",
+ EvalEnv: map[string]string{
+ "CI_NAME": "sourcehut",
+ },
+ },
+ {
+ Name: "Strider CD",
+ Constant: "STRIDER",
+ Env: vendorEnvs{Any: []string{"STRIDER"}},
+ },
+ {
+ Name: "TaskCluster",
+ Constant: "TASKCLUSTER",
+ Env: vendorEnvs{All: []string{"TASK_ID", "RUN_ID"}},
+ },
+ {
+ Name: "TeamCity",
+ Constant: "TEAMCITY",
+ Env: vendorEnvs{Any: []string{"TEAMCITY_VERSION"}},
+ },
+ {
+ Name: "Travis CI",
+ Constant: "TRAVIS",
+ Env: vendorEnvs{Any: []string{"TRAVIS"}},
+ },
+ {
+ Name: "Vercel",
+ Constant: "VERCEL",
+ Env: vendorEnvs{Any: []string{"NOW_BUILDER", "VERCEL"}},
+ },
+ {
+ Name: "Visual Studio App Center",
+ Constant: "APPCENTER",
+ Env: vendorEnvs{Any: []string{"APPCENTER"}},
+ },
+ {
+ Name: "Woodpecker",
+ Constant: "WOODPECKER",
+ EvalEnv: map[string]string{
+ "CI": "woodpecker",
+ },
+ },
+ {
+ Name: "Xcode Cloud",
+ Constant: "XCODE_CLOUD",
+ Env: vendorEnvs{Any: []string{"CI_XCODE_PROJECT"}},
+ },
+ {
+ Name: "Xcode Server",
+ Constant: "XCODE_SERVER",
+ Env: vendorEnvs{Any: []string{"XCS"}},
+ },
+}
diff --git a/cli/internal/client/analytics.go b/cli/internal/client/analytics.go
new file mode 100644
index 0000000..71381f0
--- /dev/null
+++ b/cli/internal/client/analytics.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+)
+
+// RecordAnalyticsEvents is a specific method for POSTing events to Vercel
+func (c *APIClient) RecordAnalyticsEvents(events []map[string]interface{}) error {
+ body, err := json.Marshal(events)
+ if err != nil {
+ return err
+
+ }
+
+ // We don't care about the response here
+ if _, err := c.JSONPost("/v8/artifacts/events", body); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/cli/internal/client/cache.go b/cli/internal/client/cache.go
new file mode 100644
index 0000000..11ad87a
--- /dev/null
+++ b/cli/internal/client/cache.go
@@ -0,0 +1,167 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/hashicorp/go-retryablehttp"
+ "github.com/vercel/turbo/cli/internal/ci"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// PutArtifact uploads an artifact associated with a given hash string to the remote cache
+func (c *APIClient) PutArtifact(hash string, artifactBody []byte, duration int, tag string) error {
+ if err := c.okToRequest(); err != nil {
+ return err
+ }
+ params := url.Values{}
+ c.addTeamParam(&params)
+ // only add a ? if it's actually needed (makes logging cleaner)
+ encoded := params.Encode()
+ if encoded != "" {
+ encoded = "?" + encoded
+ }
+
+ requestURL := c.makeURL("/v8/artifacts/" + hash + encoded)
+ allowAuth := true
+ if c.usePreflight {
+ resp, latestRequestURL, err := c.doPreflight(requestURL, http.MethodPut, "Content-Type, x-artifact-duration, Authorization, User-Agent, x-artifact-tag")
+ if err != nil {
+ return fmt.Errorf("pre-flight request failed before trying to store in HTTP cache: %w", err)
+ }
+ requestURL = latestRequestURL
+ headers := resp.Header.Get("Access-Control-Allow-Headers")
+ allowAuth = strings.Contains(strings.ToLower(headers), strings.ToLower("Authorization"))
+ }
+
+ req, err := retryablehttp.NewRequest(http.MethodPut, requestURL, artifactBody)
+ req.Header.Set("Content-Type", "application/octet-stream")
+ req.Header.Set("x-artifact-duration", fmt.Sprintf("%v", duration))
+ if allowAuth {
+ req.Header.Set("Authorization", "Bearer "+c.token)
+ }
+ req.Header.Set("User-Agent", c.userAgent())
+ if ci.IsCi() {
+ req.Header.Set("x-artifact-client-ci", ci.Constant())
+ }
+ if tag != "" {
+ req.Header.Set("x-artifact-tag", tag)
+ }
+ if err != nil {
+ return fmt.Errorf("[WARNING] Invalid cache URL: %w", err)
+ }
+
+ resp, err := c.HTTPClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("[ERROR] Failed to store files in HTTP cache: %w", err)
+ }
+ defer func() { _ = resp.Body.Close() }()
+ if resp.StatusCode == http.StatusForbidden {
+ return c.handle403(resp.Body)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("[ERROR] Failed to store files in HTTP cache: %s against URL %s", resp.Status, requestURL)
+ }
+ return nil
+}
+
+// FetchArtifact attempts to retrieve the build artifact with the given hash from the remote cache
+func (c *APIClient) FetchArtifact(hash string) (*http.Response, error) {
+ return c.getArtifact(hash, http.MethodGet)
+}
+
+// ArtifactExists attempts to determine if the build artifact with the given hash exists in the Remote Caching server
+func (c *APIClient) ArtifactExists(hash string) (*http.Response, error) {
+ return c.getArtifact(hash, http.MethodHead)
+}
+
+// getArtifact attempts to retrieve the build artifact with the given hash from the remote cache
+func (c *APIClient) getArtifact(hash string, httpMethod string) (*http.Response, error) {
+ if httpMethod != http.MethodHead && httpMethod != http.MethodGet {
+ return nil, fmt.Errorf("invalid httpMethod %v, expected GET or HEAD", httpMethod)
+ }
+
+ if err := c.okToRequest(); err != nil {
+ return nil, err
+ }
+ params := url.Values{}
+ c.addTeamParam(&params)
+ // only add a ? if it's actually needed (makes logging cleaner)
+ encoded := params.Encode()
+ if encoded != "" {
+ encoded = "?" + encoded
+ }
+
+ requestURL := c.makeURL("/v8/artifacts/" + hash + encoded)
+ allowAuth := true
+ if c.usePreflight {
+ resp, latestRequestURL, err := c.doPreflight(requestURL, http.MethodGet, "Authorization, User-Agent")
+ if err != nil {
+ return nil, fmt.Errorf("pre-flight request failed before trying to fetch files in HTTP cache: %w", err)
+ }
+ requestURL = latestRequestURL
+ headers := resp.Header.Get("Access-Control-Allow-Headers")
+ allowAuth = strings.Contains(strings.ToLower(headers), strings.ToLower("Authorization"))
+ }
+
+ req, err := retryablehttp.NewRequest(httpMethod, requestURL, nil)
+ if allowAuth {
+ req.Header.Set("Authorization", "Bearer "+c.token)
+ }
+ req.Header.Set("User-Agent", c.userAgent())
+ if err != nil {
+ return nil, fmt.Errorf("invalid cache URL: %w", err)
+ }
+
+ resp, err := c.HTTPClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch artifact: %v", err)
+ } else if resp.StatusCode == http.StatusForbidden {
+ err = c.handle403(resp.Body)
+ _ = resp.Body.Close()
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *APIClient) handle403(body io.Reader) error {
+ raw, err := ioutil.ReadAll(body)
+ if err != nil {
+ return fmt.Errorf("failed to read response %v", err)
+ }
+ apiError := &apiError{}
+ err = json.Unmarshal(raw, apiError)
+ if err != nil {
+ return fmt.Errorf("failed to read response (%v): %v", string(raw), err)
+ }
+ disabledErr, err := apiError.cacheDisabled()
+ if err != nil {
+ return err
+ }
+ return disabledErr
+}
+
+type apiError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (ae *apiError) cacheDisabled() (*util.CacheDisabledError, error) {
+ if strings.HasPrefix(ae.Code, "remote_caching_") {
+ statusString := ae.Code[len("remote_caching_"):]
+ status, err := util.CachingStatusFromString(statusString)
+ if err != nil {
+ return nil, err
+ }
+ return &util.CacheDisabledError{
+ Status: status,
+ Message: ae.Message,
+ }, nil
+ }
+ return nil, fmt.Errorf("unknown status %v: %v", ae.Code, ae.Message)
+}
diff --git a/cli/internal/client/client.go b/cli/internal/client/client.go
new file mode 100644
index 0000000..822b2df
--- /dev/null
+++ b/cli/internal/client/client.go
@@ -0,0 +1,309 @@
+// Package client implements some interfaces and convenience methods to interact with Vercel APIs and Remote Cache
+package client
+
+import (
+ "context"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-retryablehttp"
+ "github.com/vercel/turbo/cli/internal/ci"
+)
+
+// APIClient is the main interface for making network requests to Vercel
+type APIClient struct {
+ // The api's base URL
+ baseURL string
+ token string
+ turboVersion string
+
+ // Must be used via atomic package
+ currentFailCount uint64
+ HTTPClient *retryablehttp.Client
+ teamID string
+ teamSlug string
+ // Whether or not to send preflight requests before uploads
+ usePreflight bool
+}
+
+// ErrTooManyFailures is returned from remote cache API methods after `maxRemoteFailCount` errors have occurred
+var ErrTooManyFailures = errors.New("skipping HTTP Request, too many failures have occurred")
+
+// _maxRemoteFailCount is the number of failed requests before we stop trying to upload/download
+// artifacts to the remote cache
+const _maxRemoteFailCount = uint64(3)
+
+// SetToken updates the APIClient's Token
+func (c *APIClient) SetToken(token string) {
+ c.token = token
+}
+
+// RemoteConfig holds the authentication and endpoint details for the API client
+type RemoteConfig struct {
+ Token string
+ TeamID string
+ TeamSlug string
+ APIURL string
+}
+
+// Opts holds values for configuring the behavior of the API client
+type Opts struct {
+ UsePreflight bool
+ Timeout uint64
+}
+
+// ClientTimeout Exported ClientTimeout used in run.go
+const ClientTimeout uint64 = 20
+
+// NewClient creates a new APIClient
+func NewClient(remoteConfig RemoteConfig, logger hclog.Logger, turboVersion string, opts Opts) *APIClient {
+ client := &APIClient{
+ baseURL: remoteConfig.APIURL,
+ turboVersion: turboVersion,
+ HTTPClient: &retryablehttp.Client{
+ HTTPClient: &http.Client{
+ Timeout: time.Duration(opts.Timeout) * time.Second,
+ },
+ RetryWaitMin: 2 * time.Second,
+ RetryWaitMax: 10 * time.Second,
+ RetryMax: 2,
+ Backoff: retryablehttp.DefaultBackoff,
+ Logger: logger,
+ },
+ token: remoteConfig.Token,
+ teamID: remoteConfig.TeamID,
+ teamSlug: remoteConfig.TeamSlug,
+ usePreflight: opts.UsePreflight,
+ }
+ client.HTTPClient.CheckRetry = client.checkRetry
+ return client
+}
+
+// hasUser returns true if we have credentials for a user
+func (c *APIClient) hasUser() bool {
+ return c.token != ""
+}
+
+// IsLinked returns true if we have a user and linked team
+func (c *APIClient) IsLinked() bool {
+ return c.hasUser() && (c.teamID != "" || c.teamSlug != "")
+}
+
+// GetTeamID returns the currently configured team id
+func (c *APIClient) GetTeamID() string {
+ return c.teamID
+}
+
+func (c *APIClient) retryCachePolicy(resp *http.Response, err error) (bool, error) {
+ if err != nil {
+ if errors.As(err, &x509.UnknownAuthorityError{}) {
+ // Don't retry if the error was due to TLS cert verification failure.
+ atomic.AddUint64(&c.currentFailCount, 1)
+ return false, err
+ }
+ atomic.AddUint64(&c.currentFailCount, 1)
+ return true, nil
+ }
+
+ // 429 Too Many Requests is recoverable. Sometimes the server puts
+ // a Retry-After response header to indicate when the server is
+ // available to start processing request from client.
+ if resp.StatusCode == http.StatusTooManyRequests {
+ atomic.AddUint64(&c.currentFailCount, 1)
+ return true, nil
+ }
+
+ // Check the response code. We retry on 500-range responses to allow
+ // the server time to recover, as 500's are typically not permanent
+ // errors and may relate to outages on the server side. This will catch
+ // invalid response codes as well, like 0 and 999.
+ if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
+ atomic.AddUint64(&c.currentFailCount, 1)
+ return true, fmt.Errorf("unexpected HTTP status %s", resp.Status)
+ }
+
+ // swallow the error and stop retrying
+ return false, nil
+}
+
+func (c *APIClient) checkRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
+ // do not retry on context.Canceled or context.DeadlineExceeded
+ if ctx.Err() != nil {
+ atomic.AddUint64(&c.currentFailCount, 1)
+ return false, ctx.Err()
+ }
+
+ // we're squashing the error from the request and substituting any error that might come
+ // from our retry policy.
+ shouldRetry, err := c.retryCachePolicy(resp, err)
+ if shouldRetry {
+ // Our policy says it's ok to retry, but we need to check the failure count
+ if retryErr := c.okToRequest(); retryErr != nil {
+ return false, retryErr
+ }
+ }
+ return shouldRetry, err
+}
+
+// okToRequest returns nil if it's ok to make a request, and returns the error to
+// return to the caller if a request is not allowed
+func (c *APIClient) okToRequest() error {
+ if atomic.LoadUint64(&c.currentFailCount) < _maxRemoteFailCount {
+ return nil
+ }
+ return ErrTooManyFailures
+}
+
+func (c *APIClient) makeURL(endpoint string) string {
+ return fmt.Sprintf("%v%v", c.baseURL, endpoint)
+}
+
+func (c *APIClient) userAgent() string {
+ return fmt.Sprintf("turbo %v %v %v (%v)", c.turboVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+}
+
+// doPreflight returns response with closed body, latest request url, and any errors to the caller
+func (c *APIClient) doPreflight(requestURL string, requestMethod string, requestHeaders string) (*http.Response, string, error) {
+ req, err := retryablehttp.NewRequest(http.MethodOptions, requestURL, nil)
+ req.Header.Set("User-Agent", c.userAgent())
+ req.Header.Set("Access-Control-Request-Method", requestMethod)
+ req.Header.Set("Access-Control-Request-Headers", requestHeaders)
+ req.Header.Set("Authorization", "Bearer "+c.token)
+ if err != nil {
+ return nil, requestURL, fmt.Errorf("[WARNING] Invalid cache URL: %w", err)
+ }
+
+ // If resp is not nil, ignore any errors
+ // because most likely unimportant for preflight to handle.
+ // Let follow-up request handle potential errors.
+ resp, err := c.HTTPClient.Do(req)
+ if resp == nil {
+ return resp, requestURL, err
+ }
+ defer resp.Body.Close() //nolint:golint,errcheck // nothing to do
+ // The client will continue following 307, 308 redirects until it hits
+ // max redirects, gets an error, or gets a normal response.
+ // Get the url from the Location header or get the url used in the last
+ // request (could have changed after following redirects).
+ // Note that net/http client does not continue redirecting the preflight
+ // request with the OPTIONS method for 301, 302, and 303 redirects.
+ // See golang/go Issue 18570.
+ if locationURL, err := resp.Location(); err == nil {
+ requestURL = locationURL.String()
+ } else {
+ requestURL = resp.Request.URL.String()
+ }
+ return resp, requestURL, nil
+}
+
+func (c *APIClient) addTeamParam(params *url.Values) {
+ if c.teamID != "" && strings.HasPrefix(c.teamID, "team_") {
+ params.Add("teamId", c.teamID)
+ }
+ if c.teamSlug != "" {
+ params.Add("slug", c.teamSlug)
+ }
+}
+
+// JSONPatch sends a byte array (json.marshalled payload) to a given endpoint with PATCH
+func (c *APIClient) JSONPatch(endpoint string, body []byte) ([]byte, error) {
+ resp, err := c.request(endpoint, http.MethodPatch, body)
+ if err != nil {
+ return nil, err
+ }
+
+ rawResponse, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response %v", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s", string(rawResponse))
+ }
+
+ return rawResponse, nil
+}
+
+// JSONPost sends a byte array (json.marshalled payload) to a given endpoint with POST
+func (c *APIClient) JSONPost(endpoint string, body []byte) ([]byte, error) {
+ resp, err := c.request(endpoint, http.MethodPost, body)
+ if err != nil {
+ return nil, err
+ }
+
+ rawResponse, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response %v", err)
+ }
+
+ // For non 200/201 status codes, return the response body as an error
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
+ return nil, fmt.Errorf("%s", string(rawResponse))
+ }
+
+ return rawResponse, nil
+}
+
+func (c *APIClient) request(endpoint string, method string, body []byte) (*http.Response, error) {
+ if err := c.okToRequest(); err != nil {
+ return nil, err
+ }
+
+ params := url.Values{}
+ c.addTeamParam(&params)
+ encoded := params.Encode()
+ if encoded != "" {
+ encoded = "?" + encoded
+ }
+
+ requestURL := c.makeURL(endpoint + encoded)
+
+ allowAuth := true
+ if c.usePreflight {
+ resp, latestRequestURL, err := c.doPreflight(requestURL, method, "Authorization, User-Agent")
+ if err != nil {
+ return nil, fmt.Errorf("pre-flight request failed before trying to fetch files in HTTP cache: %w", err)
+ }
+
+ requestURL = latestRequestURL
+ headers := resp.Header.Get("Access-Control-Allow-Headers")
+ allowAuth = strings.Contains(strings.ToLower(headers), strings.ToLower("Authorization"))
+ }
+
+ req, err := retryablehttp.NewRequest(method, requestURL, body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set headers
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("User-Agent", c.userAgent())
+
+ if allowAuth {
+ req.Header.Set("Authorization", "Bearer "+c.token)
+ }
+
+ if ci.IsCi() {
+ req.Header.Set("x-artifact-client-ci", ci.Constant())
+ }
+
+ resp, err := c.HTTPClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there isn't a response, something else probably went wrong
+ if resp == nil {
+ return nil, fmt.Errorf("response from %s is nil, something went wrong", requestURL)
+ }
+
+ return resp, nil
+}
diff --git a/cli/internal/client/client_test.go b/cli/internal/client/client_test.go
new file mode 100644
index 0000000..36ff3fb
--- /dev/null
+++ b/cli/internal/client/client_test.go
@@ -0,0 +1,159 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "github.com/google/uuid"
+ "github.com/hashicorp/go-hclog"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+func Test_sendToServer(t *testing.T) {
+ ch := make(chan []byte, 1)
+ ts := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+ b, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("failed to read request %v", err)
+ }
+ ch <- b
+ w.WriteHeader(200)
+ w.Write([]byte{})
+ }))
+ defer ts.Close()
+
+ remoteConfig := RemoteConfig{
+ TeamSlug: "my-team-slug",
+ APIURL: ts.URL,
+ Token: "my-token",
+ }
+ apiClient := NewClient(remoteConfig, hclog.Default(), "v1", Opts{})
+
+ myUUID, err := uuid.NewUUID()
+ if err != nil {
+ t.Errorf("failed to create uuid %v", err)
+ }
+ events := []map[string]interface{}{
+ {
+ "sessionId": myUUID.String(),
+ "hash": "foo",
+ "source": "LOCAL",
+ "event": "hit",
+ },
+ {
+ "sessionId": myUUID.String(),
+ "hash": "bar",
+ "source": "REMOTE",
+ "event": "MISS",
+ },
+ }
+
+ apiClient.RecordAnalyticsEvents(events)
+
+ body := <-ch
+
+ result := []map[string]interface{}{}
+ err = json.Unmarshal(body, &result)
+ if err != nil {
+ t.Errorf("unmarshalling body %v", err)
+ }
+ if !reflect.DeepEqual(events, result) {
+ t.Errorf("roundtrip got %v, want %v", result, events)
+ }
+}
+
+func Test_PutArtifact(t *testing.T) {
+ ch := make(chan []byte, 1)
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+ b, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("failed to read request %v", err)
+ }
+ ch <- b
+ w.WriteHeader(200)
+ w.Write([]byte{})
+ }))
+ defer ts.Close()
+
+ // Set up test expected values
+ remoteConfig := RemoteConfig{
+ TeamSlug: "my-team-slug",
+ APIURL: ts.URL,
+ Token: "my-token",
+ }
+ apiClient := NewClient(remoteConfig, hclog.Default(), "v1", Opts{})
+ expectedArtifactBody := []byte("My string artifact")
+
+ // Test Put Artifact
+ apiClient.PutArtifact("hash", expectedArtifactBody, 500, "")
+ testBody := <-ch
+ if !bytes.Equal(expectedArtifactBody, testBody) {
+ t.Errorf("Handler read '%v', wants '%v'", testBody, expectedArtifactBody)
+ }
+
+}
+
+func Test_PutWhenCachingDisabled(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ defer func() { _ = req.Body.Close() }()
+ w.WriteHeader(403)
+ _, _ = w.Write([]byte("{\"code\": \"remote_caching_disabled\",\"message\":\"caching disabled\"}"))
+ }))
+ defer ts.Close()
+
+ // Set up test expected values
+ remoteConfig := RemoteConfig{
+ TeamSlug: "my-team-slug",
+ APIURL: ts.URL,
+ Token: "my-token",
+ }
+ apiClient := NewClient(remoteConfig, hclog.Default(), "v1", Opts{})
+ expectedArtifactBody := []byte("My string artifact")
+ // Test Put Artifact
+ err := apiClient.PutArtifact("hash", expectedArtifactBody, 500, "")
+ cd := &util.CacheDisabledError{}
+ if !errors.As(err, &cd) {
+ t.Errorf("expected cache disabled error, got %v", err)
+ }
+ if cd.Status != util.CachingStatusDisabled {
+ t.Errorf("caching status: expected %v, got %v", util.CachingStatusDisabled, cd.Status)
+ }
+}
+
+func Test_FetchWhenCachingDisabled(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ defer func() { _ = req.Body.Close() }()
+ w.WriteHeader(403)
+ _, _ = w.Write([]byte("{\"code\": \"remote_caching_disabled\",\"message\":\"caching disabled\"}"))
+ }))
+ defer ts.Close()
+
+ // Set up test expected values
+ remoteConfig := RemoteConfig{
+ TeamSlug: "my-team-slug",
+ APIURL: ts.URL,
+ Token: "my-token",
+ }
+ apiClient := NewClient(remoteConfig, hclog.Default(), "v1", Opts{})
+ // Test Put Artifact
+ resp, err := apiClient.FetchArtifact("hash")
+ cd := &util.CacheDisabledError{}
+ if !errors.As(err, &cd) {
+ t.Errorf("expected cache disabled error, got %v", err)
+ }
+ if cd.Status != util.CachingStatusDisabled {
+ t.Errorf("caching status: expected %v, got %v", util.CachingStatusDisabled, cd.Status)
+ }
+ if resp != nil {
+ t.Errorf("response got %v, want <nil>", resp)
+ }
+}
diff --git a/cli/internal/cmd/root.go b/cli/internal/cmd/root.go
new file mode 100644
index 0000000..d8d0e33
--- /dev/null
+++ b/cli/internal/cmd/root.go
@@ -0,0 +1,157 @@
+// Package cmd holds the root cobra command for turbo
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime/pprof"
+ "runtime/trace"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/daemon"
+ "github.com/vercel/turbo/cli/internal/process"
+ "github.com/vercel/turbo/cli/internal/prune"
+ "github.com/vercel/turbo/cli/internal/run"
+ "github.com/vercel/turbo/cli/internal/signals"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+func initializeOutputFiles(helper *cmdutil.Helper, parsedArgs *turbostate.ParsedArgsFromRust) error {
+ if parsedArgs.Trace != "" {
+ cleanup, err := createTraceFile(parsedArgs.Trace)
+ if err != nil {
+ return fmt.Errorf("failed to create trace file: %v", err)
+ }
+ helper.RegisterCleanup(cleanup)
+ }
+ if parsedArgs.Heap != "" {
+ cleanup, err := createHeapFile(parsedArgs.Heap)
+ if err != nil {
+ return fmt.Errorf("failed to create heap file: %v", err)
+ }
+ helper.RegisterCleanup(cleanup)
+ }
+ if parsedArgs.CPUProfile != "" {
+ cleanup, err := createCpuprofileFile(parsedArgs.CPUProfile)
+ if err != nil {
+ return fmt.Errorf("failed to create CPU profile file: %v", err)
+ }
+ helper.RegisterCleanup(cleanup)
+ }
+
+ return nil
+}
+
+// RunWithArgs runs turbo with the ParsedArgsFromRust that is passed from the Rust side.
+func RunWithArgs(args *turbostate.ParsedArgsFromRust, turboVersion string) int {
+ util.InitPrintf()
+ // TODO: replace this with a context
+ signalWatcher := signals.NewWatcher()
+ helper := cmdutil.NewHelper(turboVersion, args)
+ ctx := context.Background()
+
+ err := initializeOutputFiles(helper, args)
+ if err != nil {
+ fmt.Printf("%v", err)
+ return 1
+ }
+ defer helper.Cleanup(args)
+
+ doneCh := make(chan struct{})
+ var execErr error
+ go func() {
+ command := args.Command
+ if command.Daemon != nil {
+ execErr = daemon.ExecuteDaemon(ctx, helper, signalWatcher, args)
+ } else if command.Prune != nil {
+ execErr = prune.ExecutePrune(helper, args)
+ } else if command.Run != nil {
+ execErr = run.ExecuteRun(ctx, helper, signalWatcher, args)
+ } else {
+ execErr = fmt.Errorf("unknown command: %v", command)
+ }
+
+ close(doneCh)
+ }()
+
+ // Wait for either our command to finish, in which case we need to clean up,
+ // or to receive a signal, in which case the signal handler above does the cleanup
+ select {
+ case <-doneCh:
+ // We finished whatever task we were running
+ signalWatcher.Close()
+ exitErr := &process.ChildExit{}
+ if errors.As(execErr, &exitErr) {
+ return exitErr.ExitCode
+ } else if execErr != nil {
+ fmt.Printf("Turbo error: %v\n", execErr)
+ return 1
+ }
+ return 0
+ case <-signalWatcher.Done():
+ // We caught a signal, which already called the close handlers
+ return 1
+ }
+}
+
+type profileCleanup func() error
+
+// Close implements io.Close for profileCleanup
+func (pc profileCleanup) Close() error {
+ return pc()
+}
+
+// To view a CPU trace, use "go tool trace [file]". Note that the trace
+// viewer doesn't work under Windows Subsystem for Linux for some reason.
+func createTraceFile(traceFile string) (profileCleanup, error) {
+ f, err := os.Create(traceFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create trace file: %v", traceFile)
+ }
+ if err := trace.Start(f); err != nil {
+ return nil, errors.Wrap(err, "failed to start tracing")
+ }
+ return func() error {
+ trace.Stop()
+ return f.Close()
+ }, nil
+}
+
+// To view a heap trace, use "go tool pprof [file]" and type "top". You can
+// also drop it into https://speedscope.app and use the "left heavy" or
+// "sandwich" view modes.
+func createHeapFile(heapFile string) (profileCleanup, error) {
+ f, err := os.Create(heapFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create heap file: %v", heapFile)
+ }
+ return func() error {
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ // we don't care if we fail to close the file we just failed to write to
+ _ = f.Close()
+ return errors.Wrapf(err, "failed to write heap file: %v", heapFile)
+ }
+ return f.Close()
+ }, nil
+}
+
+// To view a CPU profile, drop the file into https://speedscope.app.
+// Note: Running the CPU profiler doesn't work under Windows subsystem for
+// Linux. The profiler has to be built for native Windows and run using the
+// command prompt instead.
+func createCpuprofileFile(cpuprofileFile string) (profileCleanup, error) {
+ f, err := os.Create(cpuprofileFile)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create cpuprofile file: %v", cpuprofileFile)
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ return nil, errors.Wrap(err, "failed to start CPU profiling")
+ }
+ return func() error {
+ pprof.StopCPUProfile()
+ return f.Close()
+ }, nil
+}
diff --git a/cli/internal/cmdutil/cmdutil.go b/cli/internal/cmdutil/cmdutil.go
new file mode 100644
index 0000000..0b02392
--- /dev/null
+++ b/cli/internal/cmdutil/cmdutil.go
@@ -0,0 +1,245 @@
+// Package cmdutil holds functionality to run turbo via cobra. That includes flag parsing and configuration
+// of components common to all subcommands
+package cmdutil
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "sync"
+
+ "github.com/hashicorp/go-hclog"
+
+ "github.com/fatih/color"
+ "github.com/mitchellh/cli"
+ "github.com/vercel/turbo/cli/internal/client"
+ "github.com/vercel/turbo/cli/internal/config"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "github.com/vercel/turbo/cli/internal/ui"
+)
+
+const (
+ // _envLogLevel is the environment log level
+ _envLogLevel = "TURBO_LOG_LEVEL"
+)
+
+// Helper is a struct used to hold configuration values passed via flag, env vars,
+// config files, etc. It is not intended for direct use by turbo commands, it drives
+// the creation of CmdBase, which is then used by the commands themselves.
+type Helper struct {
+ // TurboVersion is the version of turbo that is currently executing
+ TurboVersion string
+
+ // for logging
+ verbosity int
+
+ rawRepoRoot string
+
+ clientOpts client.Opts
+
+ // UserConfigPath is the path to where we expect to find
+ // a user-specific config file, if one is present. Public
+ // to allow overrides in tests
+ UserConfigPath turbopath.AbsoluteSystemPath
+
+ cleanupsMu sync.Mutex
+ cleanups []io.Closer
+}
+
+// RegisterCleanup saves a function to be run after turbo execution,
+// even if the command that runs returns an error
+func (h *Helper) RegisterCleanup(cleanup io.Closer) {
+ h.cleanupsMu.Lock()
+ defer h.cleanupsMu.Unlock()
+ h.cleanups = append(h.cleanups, cleanup)
+}
+
+// Cleanup runs the register cleanup handlers. It requires the flags
+// to the root command so that it can construct a UI if necessary
+func (h *Helper) Cleanup(cliConfig *turbostate.ParsedArgsFromRust) {
+ h.cleanupsMu.Lock()
+ defer h.cleanupsMu.Unlock()
+ var ui cli.Ui
+ for _, cleanup := range h.cleanups {
+ if err := cleanup.Close(); err != nil {
+ if ui == nil {
+ ui = h.getUI(cliConfig)
+ }
+ ui.Warn(fmt.Sprintf("failed cleanup: %v", err))
+ }
+ }
+}
+
+func (h *Helper) getUI(cliConfig *turbostate.ParsedArgsFromRust) cli.Ui {
+ colorMode := ui.GetColorModeFromEnv()
+ if cliConfig.GetNoColor() {
+ colorMode = ui.ColorModeSuppressed
+ }
+ if cliConfig.GetColor() {
+ colorMode = ui.ColorModeForced
+ }
+ return ui.BuildColoredUi(colorMode)
+}
+
+func (h *Helper) getLogger() (hclog.Logger, error) {
+ var level hclog.Level
+ switch h.verbosity {
+ case 0:
+ if v := os.Getenv(_envLogLevel); v != "" {
+ level = hclog.LevelFromString(v)
+ if level == hclog.NoLevel {
+ return nil, fmt.Errorf("%s value %q is not a valid log level", _envLogLevel, v)
+ }
+ } else {
+ level = hclog.NoLevel
+ }
+ case 1:
+ level = hclog.Info
+ case 2:
+ level = hclog.Debug
+ case 3:
+ level = hclog.Trace
+ default:
+ level = hclog.Trace
+ }
+ // Default output is nowhere unless we enable logging.
+ output := ioutil.Discard
+ color := hclog.ColorOff
+ if level != hclog.NoLevel {
+ output = os.Stderr
+ color = hclog.AutoColor
+ }
+
+ return hclog.New(&hclog.LoggerOptions{
+ Name: "turbo",
+ Level: level,
+ Color: color,
+ Output: output,
+ }), nil
+}
+
+// NewHelper returns a new helper instance to hold configuration values for the root
+// turbo command.
+func NewHelper(turboVersion string, args *turbostate.ParsedArgsFromRust) *Helper {
+ return &Helper{
+ TurboVersion: turboVersion,
+ UserConfigPath: config.DefaultUserConfigPath(),
+ verbosity: args.Verbosity,
+ }
+}
+
+// GetCmdBase returns a CmdBase instance configured with values from this helper.
+// It additionally returns a mechanism to set an error, so
+func (h *Helper) GetCmdBase(cliConfig *turbostate.ParsedArgsFromRust) (*CmdBase, error) {
+ // terminal is for color/no-color output
+ terminal := h.getUI(cliConfig)
+ // logger is configured with verbosity level using --verbosity flag from end users
+ logger, err := h.getLogger()
+ if err != nil {
+ return nil, err
+ }
+ cwdRaw, err := cliConfig.GetCwd()
+ if err != nil {
+ return nil, err
+ }
+ cwd, err := fs.GetCwd(cwdRaw)
+ if err != nil {
+ return nil, err
+ }
+ repoRoot := fs.ResolveUnknownPath(cwd, h.rawRepoRoot)
+ repoRoot, err = repoRoot.EvalSymlinks()
+ if err != nil {
+ return nil, err
+ }
+ repoConfig, err := config.ReadRepoConfigFile(config.GetRepoConfigPath(repoRoot), cliConfig)
+ if err != nil {
+ return nil, err
+ }
+ userConfig, err := config.ReadUserConfigFile(h.UserConfigPath, cliConfig)
+ if err != nil {
+ return nil, err
+ }
+ remoteConfig := repoConfig.GetRemoteConfig(userConfig.Token())
+ if remoteConfig.Token == "" && ui.IsCI {
+ vercelArtifactsToken := os.Getenv("VERCEL_ARTIFACTS_TOKEN")
+ vercelArtifactsOwner := os.Getenv("VERCEL_ARTIFACTS_OWNER")
+ if vercelArtifactsToken != "" {
+ remoteConfig.Token = vercelArtifactsToken
+ }
+ if vercelArtifactsOwner != "" {
+ remoteConfig.TeamID = vercelArtifactsOwner
+ }
+ }
+
+ // Primacy: Arg > Env
+ timeout, err := cliConfig.GetRemoteCacheTimeout()
+ if err == nil {
+ h.clientOpts.Timeout = timeout
+ } else {
+ val, ok := os.LookupEnv("TURBO_REMOTE_CACHE_TIMEOUT")
+ if ok {
+ number, err := strconv.ParseUint(val, 10, 64)
+ if err == nil {
+ h.clientOpts.Timeout = number
+ }
+ }
+ }
+
+ apiClient := client.NewClient(
+ remoteConfig,
+ logger,
+ h.TurboVersion,
+ h.clientOpts,
+ )
+
+ return &CmdBase{
+ UI: terminal,
+ Logger: logger,
+ RepoRoot: repoRoot,
+ APIClient: apiClient,
+ RepoConfig: repoConfig,
+ UserConfig: userConfig,
+ RemoteConfig: remoteConfig,
+ TurboVersion: h.TurboVersion,
+ }, nil
+}
+
+// CmdBase encompasses configured components common to all turbo commands.
+type CmdBase struct {
+ UI cli.Ui
+ Logger hclog.Logger
+ RepoRoot turbopath.AbsoluteSystemPath
+ APIClient *client.APIClient
+ RepoConfig *config.RepoConfig
+ UserConfig *config.UserConfig
+ RemoteConfig client.RemoteConfig
+ TurboVersion string
+}
+
+// LogError prints an error to the UI
+func (b *CmdBase) LogError(format string, args ...interface{}) {
+ err := fmt.Errorf(format, args...)
+ b.Logger.Error("error", err)
+ b.UI.Error(fmt.Sprintf("%s%s", ui.ERROR_PREFIX, color.RedString(" %v", err)))
+}
+
+// LogWarning logs an error and outputs it to the UI.
+func (b *CmdBase) LogWarning(prefix string, err error) {
+ b.Logger.Warn(prefix, "warning", err)
+
+ if prefix != "" {
+ prefix = " " + prefix + ": "
+ }
+
+ b.UI.Warn(fmt.Sprintf("%s%s%s", ui.WARNING_PREFIX, prefix, color.YellowString(" %v", err)))
+}
+
+// LogInfo logs an message and outputs it to the UI.
+func (b *CmdBase) LogInfo(msg string) {
+ b.Logger.Info(msg)
+ b.UI.Info(fmt.Sprintf("%s%s", ui.InfoPrefix, color.WhiteString(" %v", msg)))
+}
diff --git a/cli/internal/cmdutil/cmdutil_test.go b/cli/internal/cmdutil/cmdutil_test.go
new file mode 100644
index 0000000..4e6cf70
--- /dev/null
+++ b/cli/internal/cmdutil/cmdutil_test.go
@@ -0,0 +1,109 @@
+package cmdutil
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "gotest.tools/v3/assert"
+)
+
+func TestTokenEnvVar(t *testing.T) {
+ // Set up an empty config so we're just testing environment variables
+ userConfigPath := fs.AbsoluteSystemPathFromUpstream(t.TempDir()).UntypedJoin("turborepo", "config.json")
+ expectedPrefix := "my-token"
+ vars := []string{"TURBO_TOKEN", "VERCEL_ARTIFACTS_TOKEN"}
+ for _, v := range vars {
+ t.Run(v, func(t *testing.T) {
+ t.Cleanup(func() {
+ _ = os.Unsetenv(v)
+ })
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ }
+ h := NewHelper("test-version", args)
+ h.UserConfigPath = userConfigPath
+
+ expectedToken := expectedPrefix + v
+ err := os.Setenv(v, expectedToken)
+ if err != nil {
+ t.Fatalf("setenv %v", err)
+ }
+
+ base, err := h.GetCmdBase(args)
+ if err != nil {
+ t.Fatalf("failed to get command base %v", err)
+ }
+ assert.Equal(t, base.RemoteConfig.Token, expectedToken)
+ })
+ }
+}
+
+func TestRemoteCacheTimeoutEnvVar(t *testing.T) {
+ key := "TURBO_REMOTE_CACHE_TIMEOUT"
+ expectedTimeout := "600"
+ t.Run(key, func(t *testing.T) {
+ t.Cleanup(func() {
+ _ = os.Unsetenv(key)
+ })
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ }
+ h := NewHelper("test-version", args)
+
+ err := os.Setenv(key, expectedTimeout)
+ if err != nil {
+ t.Fatalf("setenv %v", err)
+ }
+
+ base, err := h.GetCmdBase(args)
+ if err != nil {
+ t.Fatalf("failed to get command base %v", err)
+ }
+ assert.Equal(t, base.APIClient.HTTPClient.HTTPClient.Timeout, time.Duration(600)*time.Second)
+ })
+}
+
+func TestRemoteCacheTimeoutFlag(t *testing.T) {
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ RemoteCacheTimeout: 599,
+ }
+ h := NewHelper("test-version", args)
+
+ base, err := h.GetCmdBase(args)
+ if err != nil {
+ t.Fatalf("failed to get command base %v", err)
+ }
+
+ assert.Equal(t, base.APIClient.HTTPClient.HTTPClient.Timeout, time.Duration(599)*time.Second)
+}
+
+func TestRemoteCacheTimeoutPrimacy(t *testing.T) {
+ key := "TURBO_REMOTE_CACHE_TIMEOUT"
+ value := "2"
+
+ t.Run(key, func(t *testing.T) {
+ t.Cleanup(func() {
+ _ = os.Unsetenv(key)
+ })
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ RemoteCacheTimeout: 1,
+ }
+ h := NewHelper("test-version", args)
+
+ err := os.Setenv(key, value)
+ if err != nil {
+ t.Fatalf("setenv %v", err)
+ }
+
+ base, err := h.GetCmdBase(args)
+ if err != nil {
+ t.Fatalf("failed to get command base %v", err)
+ }
+ assert.Equal(t, base.APIClient.HTTPClient.HTTPClient.Timeout, time.Duration(1)*time.Second)
+ })
+}
diff --git a/cli/internal/colorcache/colorcache.go b/cli/internal/colorcache/colorcache.go
new file mode 100644
index 0000000..08a15e8
--- /dev/null
+++ b/cli/internal/colorcache/colorcache.go
@@ -0,0 +1,56 @@
+package colorcache
+
+import (
+ "sync"
+
+ "github.com/vercel/turbo/cli/internal/util"
+
+ "github.com/fatih/color"
+)
+
+type colorFn = func(format string, a ...interface{}) string
+
+func getTerminalPackageColors() []colorFn {
+ return []colorFn{color.CyanString, color.MagentaString, color.GreenString, color.YellowString, color.BlueString}
+}
+
+type ColorCache struct {
+ mu sync.Mutex
+ index int
+ TermColors []colorFn
+ Cache map[interface{}]colorFn
+}
+
+// New creates an instance of ColorCache with helpers for adding colors to task outputs
+func New() *ColorCache {
+ return &ColorCache{
+ TermColors: getTerminalPackageColors(),
+ index: 0,
+ Cache: make(map[interface{}]colorFn),
+ }
+}
+
+// colorForKey returns a color function for a given package name
+func (c *ColorCache) colorForKey(key string) colorFn {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ colorFn, ok := c.Cache[key]
+ if ok {
+ return colorFn
+ }
+ c.index++
+ colorFn = c.TermColors[util.PositiveMod(c.index, len(c.TermColors))] // 5 possible colors
+ c.Cache[key] = colorFn
+ return colorFn
+}
+
+// PrefixWithColor returns a string consisting of the provided prefix in a consistent
+// color based on the cacheKey
+func (c *ColorCache) PrefixWithColor(cacheKey string, prefix string) string {
+ colorFn := c.colorForKey(cacheKey)
+ if prefix != "" {
+ return colorFn("%s: ", prefix)
+ }
+
+ return ""
+}
diff --git a/cli/internal/config/config_file.go b/cli/internal/config/config_file.go
new file mode 100644
index 0000000..d3118b8
--- /dev/null
+++ b/cli/internal/config/config_file.go
@@ -0,0 +1,192 @@
+package config
+
+import (
+ "os"
+
+ "github.com/spf13/viper"
+ "github.com/vercel/turbo/cli/internal/client"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+)
+
+// RepoConfig is a configuration object for the logged-in turborepo.com user
+type RepoConfig struct {
+ repoViper *viper.Viper
+ path turbopath.AbsoluteSystemPath
+}
+
+// LoginURL returns the configured URL for authenticating the user
+func (rc *RepoConfig) LoginURL() string {
+ return rc.repoViper.GetString("loginurl")
+}
+
+// SetTeamID sets the teamID and clears the slug, since it may have been from an old team
+func (rc *RepoConfig) SetTeamID(teamID string) error {
+ // Note that we can't use viper.Set to set a nil value, we have to merge it in
+ newVals := map[string]interface{}{
+ "teamid": teamID,
+ "teamslug": nil,
+ }
+ if err := rc.repoViper.MergeConfigMap(newVals); err != nil {
+ return err
+ }
+ return rc.write()
+}
+
+// GetRemoteConfig produces the necessary values for an API client configuration
+func (rc *RepoConfig) GetRemoteConfig(token string) client.RemoteConfig {
+ return client.RemoteConfig{
+ Token: token,
+ TeamID: rc.repoViper.GetString("teamid"),
+ TeamSlug: rc.repoViper.GetString("teamslug"),
+ APIURL: rc.repoViper.GetString("apiurl"),
+ }
+}
+
+// Internal call to save this config data to the user config file.
+func (rc *RepoConfig) write() error {
+ if err := rc.path.EnsureDir(); err != nil {
+ return err
+ }
+ return rc.repoViper.WriteConfig()
+}
+
+// Delete deletes the config file. This repo config shouldn't be used
+// afterwards, it needs to be re-initialized
+func (rc *RepoConfig) Delete() error {
+ return rc.path.Remove()
+}
+
+// UserConfig is a wrapper around the user-specific configuration values
+// for Turborepo.
+type UserConfig struct {
+ userViper *viper.Viper
+ path turbopath.AbsoluteSystemPath
+}
+
+// Token returns the Bearer token for this user if it exists
+func (uc *UserConfig) Token() string {
+ return uc.userViper.GetString("token")
+}
+
+// SetToken saves a Bearer token for this user, writing it to the
+// user config file, creating it if necessary
+func (uc *UserConfig) SetToken(token string) error {
+ // Technically Set works here, due to how overrides work, but use merge for consistency
+ if err := uc.userViper.MergeConfigMap(map[string]interface{}{"token": token}); err != nil {
+ return err
+ }
+ return uc.write()
+}
+
+// Internal call to save this config data to the user config file.
+func (uc *UserConfig) write() error {
+ if err := uc.path.EnsureDir(); err != nil {
+ return err
+ }
+ return uc.userViper.WriteConfig()
+}
+
+// Delete deletes the config file. This user config shouldn't be used
+// afterwards, it needs to be re-initialized
+func (uc *UserConfig) Delete() error {
+ return uc.path.Remove()
+}
+
+// ReadUserConfigFile creates a UserConfig using the
+// specified path as the user config file. Note that the path or its parents
+// do not need to exist. On a write to this configuration, they will be created.
+func ReadUserConfigFile(path turbopath.AbsoluteSystemPath, cliConfig *turbostate.ParsedArgsFromRust) (*UserConfig, error) {
+ userViper := viper.New()
+ userViper.SetConfigFile(path.ToString())
+ userViper.SetConfigType("json")
+ userViper.SetEnvPrefix("turbo")
+ userViper.MustBindEnv("token")
+
+ token, err := cliConfig.GetToken()
+ if err != nil {
+ return nil, err
+ }
+ if token != "" {
+ userViper.Set("token", token)
+ }
+
+ if err := userViper.ReadInConfig(); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return &UserConfig{
+ userViper: userViper,
+ path: path,
+ }, nil
+}
+
+// DefaultUserConfigPath returns the default platform-dependent place that
+// we store the user-specific configuration.
+func DefaultUserConfigPath() turbopath.AbsoluteSystemPath {
+ return fs.GetUserConfigDir().UntypedJoin("config.json")
+}
+
+const (
+ _defaultAPIURL = "https://vercel.com/api"
+ _defaultLoginURL = "https://vercel.com"
+)
+
+// ReadRepoConfigFile creates a RepoConfig using the
+// specified path as the repo config file. Note that the path or its
+// parents do not need to exist. On a write to this configuration, they
+// will be created.
+func ReadRepoConfigFile(path turbopath.AbsoluteSystemPath, cliConfig *turbostate.ParsedArgsFromRust) (*RepoConfig, error) {
+ repoViper := viper.New()
+ repoViper.SetConfigFile(path.ToString())
+ repoViper.SetConfigType("json")
+ repoViper.SetEnvPrefix("turbo")
+ repoViper.MustBindEnv("apiurl", "TURBO_API")
+ repoViper.MustBindEnv("loginurl", "TURBO_LOGIN")
+ repoViper.MustBindEnv("teamslug", "TURBO_TEAM")
+ repoViper.MustBindEnv("teamid")
+ repoViper.SetDefault("apiurl", _defaultAPIURL)
+ repoViper.SetDefault("loginurl", _defaultLoginURL)
+
+ login, err := cliConfig.GetLogin()
+ if err != nil {
+ return nil, err
+ }
+ if login != "" {
+ repoViper.Set("loginurl", login)
+ }
+
+ api, err := cliConfig.GetAPI()
+ if err != nil {
+ return nil, err
+ }
+ if api != "" {
+ repoViper.Set("apiurl", api)
+ }
+
+ team, err := cliConfig.GetTeam()
+ if err != nil {
+ return nil, err
+ }
+ if team != "" {
+ repoViper.Set("teamslug", team)
+ }
+
+ if err := repoViper.ReadInConfig(); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ // If team was set via commandline, don't read the teamId from the config file, as it
+ // won't necessarily match.
+ if team != "" {
+ repoViper.Set("teamid", "")
+ }
+ return &RepoConfig{
+ repoViper: repoViper,
+ path: path,
+ }, nil
+}
+
+// GetRepoConfigPath reads the user-specific configuration values
+func GetRepoConfigPath(repoRoot turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ return repoRoot.UntypedJoin(".turbo", "config.json")
+}
diff --git a/cli/internal/config/config_file_test.go b/cli/internal/config/config_file_test.go
new file mode 100644
index 0000000..7a19108
--- /dev/null
+++ b/cli/internal/config/config_file_test.go
@@ -0,0 +1,157 @@
+package config
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "gotest.tools/v3/assert"
+)
+
+func TestReadRepoConfigWhenMissing(t *testing.T) {
+ testDir := fs.AbsoluteSystemPathFromUpstream(t.TempDir()).UntypedJoin("config.json")
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ }
+
+ config, err := ReadRepoConfigFile(testDir, args)
+ if err != nil {
+ t.Errorf("got error reading non-existent config file: %v, want <nil>", err)
+ }
+ if config == nil {
+ t.Error("got <nil>, wanted config value")
+ }
+}
+
+func TestReadRepoConfigSetTeamAndAPIFlag(t *testing.T) {
+ testConfigFile := fs.AbsoluteSystemPathFromUpstream(t.TempDir()).UntypedJoin("turborepo", "config.json")
+
+ slug := "my-team-slug"
+ apiURL := "http://my-login-url"
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ Team: slug,
+ API: apiURL,
+ }
+
+ teamID := "some-id"
+ assert.NilError(t, testConfigFile.EnsureDir(), "EnsureDir")
+ assert.NilError(t, testConfigFile.WriteFile([]byte(fmt.Sprintf(`{"teamId":"%v"}`, teamID)), 0644), "WriteFile")
+
+ config, err := ReadRepoConfigFile(testConfigFile, args)
+ if err != nil {
+ t.Errorf("ReadRepoConfigFile err got %v, want <nil>", err)
+ }
+ remoteConfig := config.GetRemoteConfig("")
+ if remoteConfig.TeamID != "" {
+ t.Errorf("TeamID got %v, want <empty string>", remoteConfig.TeamID)
+ }
+ if remoteConfig.TeamSlug != slug {
+ t.Errorf("TeamSlug got %v, want %v", remoteConfig.TeamSlug, slug)
+ }
+ if remoteConfig.APIURL != apiURL {
+ t.Errorf("APIURL got %v, want %v", remoteConfig.APIURL, apiURL)
+ }
+}
+
+func TestRepoConfigIncludesDefaults(t *testing.T) {
+ testConfigFile := fs.AbsoluteSystemPathFromUpstream(t.TempDir()).UntypedJoin("turborepo", "config.json")
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ }
+
+ expectedTeam := "my-team"
+
+ assert.NilError(t, testConfigFile.EnsureDir(), "EnsureDir")
+ assert.NilError(t, testConfigFile.WriteFile([]byte(fmt.Sprintf(`{"teamSlug":"%v"}`, expectedTeam)), 0644), "WriteFile")
+
+ config, err := ReadRepoConfigFile(testConfigFile, args)
+ if err != nil {
+ t.Errorf("ReadRepoConfigFile err got %v, want <nil>", err)
+ }
+
+ remoteConfig := config.GetRemoteConfig("")
+ if remoteConfig.APIURL != _defaultAPIURL {
+ t.Errorf("api url got %v, want %v", remoteConfig.APIURL, _defaultAPIURL)
+ }
+ if remoteConfig.TeamSlug != expectedTeam {
+ t.Errorf("team slug got %v, want %v", remoteConfig.TeamSlug, expectedTeam)
+ }
+}
+
+func TestWriteRepoConfig(t *testing.T) {
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ testConfigFile := repoRoot.UntypedJoin(".turbo", "config.json")
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ }
+
+ expectedTeam := "my-team"
+
+ assert.NilError(t, testConfigFile.EnsureDir(), "EnsureDir")
+ assert.NilError(t, testConfigFile.WriteFile([]byte(fmt.Sprintf(`{"teamSlug":"%v"}`, expectedTeam)), 0644), "WriteFile")
+
+ initial, err := ReadRepoConfigFile(testConfigFile, args)
+ assert.NilError(t, err, "GetRepoConfig")
+ // setting the teamID should clear the slug, since it may have been from an old team
+ expectedTeamID := "my-team-id"
+ err = initial.SetTeamID(expectedTeamID)
+ assert.NilError(t, err, "SetTeamID")
+
+ config, err := ReadRepoConfigFile(testConfigFile, args)
+ if err != nil {
+ t.Errorf("ReadRepoConfig err got %v, want <nil>", err)
+ }
+
+ remoteConfig := config.GetRemoteConfig("")
+ if remoteConfig.TeamSlug != "" {
+ t.Errorf("Expected TeamSlug to be cleared, got %v", remoteConfig.TeamSlug)
+ }
+ if remoteConfig.TeamID != expectedTeamID {
+ t.Errorf("TeamID got %v, want %v", remoteConfig.TeamID, expectedTeamID)
+ }
+}
+
+func TestWriteUserConfig(t *testing.T) {
+ configPath := fs.AbsoluteSystemPathFromUpstream(t.TempDir()).UntypedJoin("turborepo", "config.json")
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ }
+
+ // Non-existent config file should get empty values
+ userConfig, err := ReadUserConfigFile(configPath, args)
+ assert.NilError(t, err, "readUserConfigFile")
+ assert.Equal(t, userConfig.Token(), "")
+ assert.Equal(t, userConfig.path, configPath)
+
+ expectedToken := "my-token"
+ err = userConfig.SetToken(expectedToken)
+ assert.NilError(t, err, "SetToken")
+
+ config, err := ReadUserConfigFile(configPath, args)
+ assert.NilError(t, err, "readUserConfigFile")
+ assert.Equal(t, config.Token(), expectedToken)
+
+ err = config.Delete()
+ assert.NilError(t, err, "deleteConfigFile")
+ assert.Equal(t, configPath.FileExists(), false, "config file should be deleted")
+
+ final, err := ReadUserConfigFile(configPath, args)
+ assert.NilError(t, err, "readUserConfigFile")
+ assert.Equal(t, final.Token(), "")
+ assert.Equal(t, configPath.FileExists(), false, "config file should be deleted")
+}
+
+func TestUserConfigFlags(t *testing.T) {
+ configPath := fs.AbsoluteSystemPathFromUpstream(t.TempDir()).UntypedJoin("turborepo", "config.json")
+ args := &turbostate.ParsedArgsFromRust{
+ CWD: "",
+ Token: "my-token",
+ }
+
+ userConfig, err := ReadUserConfigFile(configPath, args)
+ assert.NilError(t, err, "readUserConfigFile")
+ assert.Equal(t, userConfig.Token(), "my-token")
+ assert.Equal(t, userConfig.path, configPath)
+}
diff --git a/cli/internal/context/context.go b/cli/internal/context/context.go
new file mode 100644
index 0000000..2376d2d
--- /dev/null
+++ b/cli/internal/context/context.go
@@ -0,0 +1,480 @@
+package context
+
+import (
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/vercel/turbo/cli/internal/core"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/packagemanager"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+
+ "github.com/Masterminds/semver"
+ mapset "github.com/deckarep/golang-set"
+ "github.com/pyr-sh/dag"
+ "golang.org/x/sync/errgroup"
+)
+
+// Warnings Error type for errors that don't prevent the creation of a functional Context
+type Warnings struct {
+ warns *multierror.Error
+ mu sync.Mutex
+}
+
+var _ error = (*Warnings)(nil)
+
+func (w *Warnings) Error() string {
+ return w.warns.Error()
+}
+
+func (w *Warnings) errorOrNil() error {
+ if w.warns != nil {
+ return w
+ }
+ return nil
+}
+
+func (w *Warnings) append(err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.warns = multierror.Append(w.warns, err)
+}
+
+// Context of the CLI
+type Context struct {
+ // WorkspaceInfos contains the contents of package.json for every workspace
+ // TODO(gsoltis): should the RootPackageJSON be included in WorkspaceInfos?
+ WorkspaceInfos workspace.Catalog
+
+ // WorkspaceNames is all the names of the workspaces
+ WorkspaceNames []string
+
+ // WorkspaceGraph is a graph of workspace dependencies
+ // (based on package.json dependencies and devDependencies)
+ WorkspaceGraph dag.AcyclicGraph
+
+ // RootNode is a sigil identifying the root workspace
+ RootNode string
+
+ // Lockfile is a struct to read the lockfile based on the package manager
+ Lockfile lockfile.Lockfile
+
+ // PackageManager is an abstraction for all the info a package manager
+ // can give us about the repo.
+ PackageManager *packagemanager.PackageManager
+
+ // Used to arbitrate access to the graph. We parallelise most build operations
+ // and Go maps aren't natively threadsafe so this is needed.
+ mutex sync.Mutex
+}
+
+// Splits "npm:^1.2.3" and "github:foo/bar.git" into a protocol part and a version part.
+func parseDependencyProtocol(version string) (string, string) {
+ parts := strings.Split(version, ":")
+ if len(parts) == 1 {
+ return "", parts[0]
+ }
+
+ return parts[0], strings.Join(parts[1:], ":")
+}
+
+func isProtocolExternal(protocol string) bool {
+ // The npm protocol for yarn by default still uses the workspace package if the workspace
+ // version is in a compatible semver range. See https://github.com/yarnpkg/berry/discussions/4015
+ // For now, we will just assume if the npm protocol is being used and the version matches
+ // its an internal dependency which matches the existing behavior before this additional
+ // logic was added.
+
+ // TODO: extend this to support the `enableTransparentWorkspaces` yarn option
+ return protocol != "" && protocol != "npm"
+}
+
+func isWorkspaceReference(packageVersion string, dependencyVersion string, cwd string, rootpath string) bool {
+ protocol, dependencyVersion := parseDependencyProtocol(dependencyVersion)
+
+ if protocol == "workspace" {
+ // TODO: Since support at the moment is non-existent for workspaces that contain multiple
+ // versions of the same package name, just assume its a match and don't check the range
+ // for an exact match.
+ return true
+ } else if protocol == "file" || protocol == "link" {
+ abs, err := filepath.Abs(filepath.Join(cwd, dependencyVersion))
+ if err != nil {
+ // Default to internal if we have the package but somehow cannot get the path
+ // TODO(gsoltis): log this?
+ return true
+ }
+ isWithinRepo, err := fs.DirContainsPath(rootpath, filepath.FromSlash(abs))
+ if err != nil {
+ // Default to internal if we have the package but somehow cannot get the path
+ // TODO(gsoltis): log this?
+ return true
+ }
+ return isWithinRepo
+ } else if isProtocolExternal(protocol) {
+ // Other protocols are assumed to be external references ("github:", etc)
+ return false
+ } else if dependencyVersion == "*" {
+ return true
+ }
+
+ // If we got this far, then we need to check the workspace package version to see it satisfies
+ // the dependencies range to determin whether or not its an internal or external dependency.
+
+ constraint, constraintErr := semver.NewConstraint(dependencyVersion)
+ pkgVersion, packageVersionErr := semver.NewVersion(packageVersion)
+ if constraintErr != nil || packageVersionErr != nil {
+ // For backwards compatibility with existing behavior, if we can't parse the version then we
+ // treat the dependency as an internal package reference and swallow the error.
+
+ // TODO: some package managers also support tags like "latest". Does extra handling need to be
+ // added for this corner-case
+ return true
+ }
+
+ return constraint.Check(pkgVersion)
+}
+
+// SinglePackageGraph constructs a Context instance from a single package.
+func SinglePackageGraph(repoRoot turbopath.AbsoluteSystemPath, rootPackageJSON *fs.PackageJSON) (*Context, error) {
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: map[string]*fs.PackageJSON{util.RootPkgName: rootPackageJSON},
+ TurboConfigs: map[string]*fs.TurboJSON{},
+ }
+ c := &Context{
+ WorkspaceInfos: workspaceInfos,
+ RootNode: core.ROOT_NODE_NAME,
+ }
+ c.WorkspaceGraph.Connect(dag.BasicEdge(util.RootPkgName, core.ROOT_NODE_NAME))
+ packageManager, err := packagemanager.GetPackageManager(repoRoot, rootPackageJSON)
+ if err != nil {
+ return nil, err
+ }
+ c.PackageManager = packageManager
+ return c, nil
+}
+
+// BuildPackageGraph constructs a Context instance with information about the package dependency graph
+func BuildPackageGraph(repoRoot turbopath.AbsoluteSystemPath, rootPackageJSON *fs.PackageJSON) (*Context, error) {
+ c := &Context{}
+ rootpath := repoRoot.ToStringDuringMigration()
+ c.WorkspaceInfos = workspace.Catalog{
+ PackageJSONs: map[string]*fs.PackageJSON{},
+ TurboConfigs: map[string]*fs.TurboJSON{},
+ }
+ c.RootNode = core.ROOT_NODE_NAME
+
+ var warnings Warnings
+
+ packageManager, err := packagemanager.GetPackageManager(repoRoot, rootPackageJSON)
+ if err != nil {
+ return nil, err
+ }
+ c.PackageManager = packageManager
+
+ if lockfile, err := c.PackageManager.ReadLockfile(repoRoot, rootPackageJSON); err != nil {
+ warnings.append(err)
+ } else {
+ c.Lockfile = lockfile
+ }
+
+ if err := c.resolveWorkspaceRootDeps(rootPackageJSON, &warnings); err != nil {
+ // TODO(Gaspar) was this the intended return error?
+ return nil, fmt.Errorf("could not resolve workspaces: %w", err)
+ }
+
+ // Get the workspaces from the package manager.
+ // workspaces are absolute paths
+ workspaces, err := c.PackageManager.GetWorkspaces(repoRoot)
+
+ if err != nil {
+ return nil, fmt.Errorf("workspace configuration error: %w", err)
+ }
+
+ // We will parse all package.json's simultaneously. We use a
+ // wait group because we cannot fully populate the graph (the next step)
+ // until all parsing is complete
+ parseJSONWaitGroup := &errgroup.Group{}
+ for _, workspace := range workspaces {
+ pkgJSONPath := fs.UnsafeToAbsoluteSystemPath(workspace)
+ parseJSONWaitGroup.Go(func() error {
+ return c.parsePackageJSON(repoRoot, pkgJSONPath)
+ })
+ }
+
+ if err := parseJSONWaitGroup.Wait(); err != nil {
+ return nil, err
+ }
+ populateGraphWaitGroup := &errgroup.Group{}
+ for _, pkg := range c.WorkspaceInfos.PackageJSONs {
+ pkg := pkg
+ populateGraphWaitGroup.Go(func() error {
+ return c.populateWorkspaceGraphForPackageJSON(pkg, rootpath, pkg.Name, &warnings)
+ })
+ }
+
+ if err := populateGraphWaitGroup.Wait(); err != nil {
+ return nil, err
+ }
+ // Resolve dependencies for the root package. We override the vertexName in the graph
+ // for the root package, since it can have an arbitrary name. We need it to have our
+ // RootPkgName so that we can identify it as the root later on.
+ err = c.populateWorkspaceGraphForPackageJSON(rootPackageJSON, rootpath, util.RootPkgName, &warnings)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve dependencies for root package: %v", err)
+ }
+ c.WorkspaceInfos.PackageJSONs[util.RootPkgName] = rootPackageJSON
+
+ return c, warnings.errorOrNil()
+}
+
+func (c *Context) resolveWorkspaceRootDeps(rootPackageJSON *fs.PackageJSON, warnings *Warnings) error {
+ pkg := rootPackageJSON
+ pkg.UnresolvedExternalDeps = make(map[string]string)
+ for dep, version := range pkg.DevDependencies {
+ pkg.UnresolvedExternalDeps[dep] = version
+ }
+ for dep, version := range pkg.OptionalDependencies {
+ pkg.UnresolvedExternalDeps[dep] = version
+ }
+ for dep, version := range pkg.Dependencies {
+ pkg.UnresolvedExternalDeps[dep] = version
+ }
+ if c.Lockfile != nil {
+ depSet, err := lockfile.TransitiveClosure(
+ pkg.Dir.ToUnixPath(),
+ pkg.UnresolvedExternalDeps,
+ c.Lockfile,
+ )
+ if err != nil {
+ warnings.append(err)
+ // Return early to skip using results of incomplete dep graph resolution
+ return nil
+ }
+ pkg.TransitiveDeps = make([]lockfile.Package, 0, depSet.Cardinality())
+ for _, v := range depSet.ToSlice() {
+ dep := v.(lockfile.Package)
+ pkg.TransitiveDeps = append(pkg.TransitiveDeps, dep)
+ }
+ sort.Sort(lockfile.ByKey(pkg.TransitiveDeps))
+ hashOfExternalDeps, err := fs.HashObject(pkg.TransitiveDeps)
+ if err != nil {
+ return err
+ }
+ pkg.ExternalDepsHash = hashOfExternalDeps
+ } else {
+ pkg.TransitiveDeps = []lockfile.Package{}
+ pkg.ExternalDepsHash = ""
+ }
+
+ return nil
+}
+
+// populateWorkspaceGraphForPackageJSON fills in the edges for the dependencies of the given package
+// that are within the monorepo, as well as collecting and hashing the dependencies of the package
+// that are not within the monorepo. The vertexName is used to override the package name in the graph.
+// This can happen when adding the root package, which can have an arbitrary name.
+func (c *Context) populateWorkspaceGraphForPackageJSON(pkg *fs.PackageJSON, rootpath string, vertexName string, warnings *Warnings) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ depMap := make(map[string]string)
+ internalDepsSet := make(dag.Set)
+ externalUnresolvedDepsSet := make(dag.Set)
+ pkg.UnresolvedExternalDeps = make(map[string]string)
+
+ for dep, version := range pkg.DevDependencies {
+ depMap[dep] = version
+ }
+
+ for dep, version := range pkg.OptionalDependencies {
+ depMap[dep] = version
+ }
+
+ for dep, version := range pkg.Dependencies {
+ depMap[dep] = version
+ }
+
+ // split out internal vs. external deps
+ for depName, depVersion := range depMap {
+ if item, ok := c.WorkspaceInfos.PackageJSONs[depName]; ok && isWorkspaceReference(item.Version, depVersion, pkg.Dir.ToStringDuringMigration(), rootpath) {
+ internalDepsSet.Add(depName)
+ c.WorkspaceGraph.Connect(dag.BasicEdge(vertexName, depName))
+ } else {
+ externalUnresolvedDepsSet.Add(depName)
+ }
+ }
+
+ for _, name := range externalUnresolvedDepsSet.List() {
+ name := name.(string)
+ if item, ok := pkg.DevDependencies[name]; ok {
+ pkg.UnresolvedExternalDeps[name] = item
+ }
+
+ if item, ok := pkg.OptionalDependencies[name]; ok {
+ pkg.UnresolvedExternalDeps[name] = item
+ }
+
+ if item, ok := pkg.Dependencies[name]; ok {
+ pkg.UnresolvedExternalDeps[name] = item
+ }
+ }
+
+ externalDeps, err := lockfile.TransitiveClosure(
+ pkg.Dir.ToUnixPath(),
+ pkg.UnresolvedExternalDeps,
+ c.Lockfile,
+ )
+ if err != nil {
+ warnings.append(err)
+ // reset external deps to original state
+ externalDeps = mapset.NewSet()
+ }
+
+ // when there are no internal dependencies, we need to still add these leafs to the graph
+ if internalDepsSet.Len() == 0 {
+ c.WorkspaceGraph.Connect(dag.BasicEdge(pkg.Name, core.ROOT_NODE_NAME))
+ }
+ pkg.TransitiveDeps = make([]lockfile.Package, 0, externalDeps.Cardinality())
+ for _, dependency := range externalDeps.ToSlice() {
+ dependency := dependency.(lockfile.Package)
+ pkg.TransitiveDeps = append(pkg.TransitiveDeps, dependency)
+ }
+ pkg.InternalDeps = make([]string, 0, internalDepsSet.Len())
+ for _, v := range internalDepsSet.List() {
+ pkg.InternalDeps = append(pkg.InternalDeps, fmt.Sprintf("%v", v))
+ }
+ sort.Strings(pkg.InternalDeps)
+ sort.Sort(lockfile.ByKey(pkg.TransitiveDeps))
+ hashOfExternalDeps, err := fs.HashObject(pkg.TransitiveDeps)
+ if err != nil {
+ return err
+ }
+ pkg.ExternalDepsHash = hashOfExternalDeps
+ return nil
+}
+
+func (c *Context) parsePackageJSON(repoRoot turbopath.AbsoluteSystemPath, pkgJSONPath turbopath.AbsoluteSystemPath) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ if pkgJSONPath.FileExists() {
+ pkg, err := fs.ReadPackageJSON(pkgJSONPath)
+ if err != nil {
+ return fmt.Errorf("parsing %s: %w", pkgJSONPath, err)
+ }
+
+ relativePkgJSONPath, err := repoRoot.PathTo(pkgJSONPath)
+ if err != nil {
+ return err
+ }
+ c.WorkspaceGraph.Add(pkg.Name)
+ pkg.PackageJSONPath = turbopath.AnchoredSystemPathFromUpstream(relativePkgJSONPath)
+ pkg.Dir = turbopath.AnchoredSystemPathFromUpstream(filepath.Dir(relativePkgJSONPath))
+ if c.WorkspaceInfos.PackageJSONs[pkg.Name] != nil {
+ existing := c.WorkspaceInfos.PackageJSONs[pkg.Name]
+ return fmt.Errorf("Failed to add workspace \"%s\" from %s, it already exists at %s", pkg.Name, pkg.Dir, existing.Dir)
+ }
+ c.WorkspaceInfos.PackageJSONs[pkg.Name] = pkg
+ c.WorkspaceNames = append(c.WorkspaceNames, pkg.Name)
+ }
+ return nil
+}
+
+// InternalDependencies finds all dependencies required by the slice of starting
+// packages, as well as the starting packages themselves.
+func (c *Context) InternalDependencies(start []string) ([]string, error) {
+ vertices := make(dag.Set)
+ for _, v := range start {
+ vertices.Add(v)
+ }
+ s := make(dag.Set)
+ memoFunc := func(v dag.Vertex, d int) error {
+ s.Add(v)
+ return nil
+ }
+
+ if err := c.WorkspaceGraph.DepthFirstWalk(vertices, memoFunc); err != nil {
+ return nil, err
+ }
+
+ // Use for loop so we can coerce to string
+ // .List() returns a list of interface{} types, but
+ // we know they are strings.
+ targets := make([]string, 0, s.Len())
+ for _, dep := range s.List() {
+ targets = append(targets, dep.(string))
+ }
+ sort.Strings(targets)
+
+ return targets, nil
+}
+
+// ChangedPackages returns a list of changed packages based on the contents of a previous lockfile
+// This assumes that none of the package.json in the workspace change, it is
+// the responsibility of the caller to verify this.
+func (c *Context) ChangedPackages(previousLockfile lockfile.Lockfile) ([]string, error) {
+ if lockfile.IsNil(previousLockfile) || lockfile.IsNil(c.Lockfile) {
+ return nil, fmt.Errorf("Cannot detect changed packages without previous and current lockfile")
+ }
+
+ didPackageChange := func(pkgName string, pkg *fs.PackageJSON) bool {
+ previousDeps, err := lockfile.TransitiveClosure(
+ pkg.Dir.ToUnixPath(),
+ pkg.UnresolvedExternalDeps,
+ previousLockfile,
+ )
+ if err != nil || previousDeps.Cardinality() != len(pkg.TransitiveDeps) {
+ return true
+ }
+
+ prevExternalDeps := make([]lockfile.Package, 0, previousDeps.Cardinality())
+ for _, d := range previousDeps.ToSlice() {
+ prevExternalDeps = append(prevExternalDeps, d.(lockfile.Package))
+ }
+ sort.Sort(lockfile.ByKey(prevExternalDeps))
+
+ for i := range prevExternalDeps {
+ if prevExternalDeps[i] != pkg.TransitiveDeps[i] {
+ return true
+ }
+ }
+ return false
+ }
+
+ changedPkgs := make([]string, 0, len(c.WorkspaceInfos.PackageJSONs))
+
+ // check if prev and current have "global" changes e.g. lockfile bump
+ globalChange := c.Lockfile.GlobalChange(previousLockfile)
+
+ for pkgName, pkg := range c.WorkspaceInfos.PackageJSONs {
+ if globalChange {
+ break
+ }
+ if didPackageChange(pkgName, pkg) {
+ if pkgName == util.RootPkgName {
+ globalChange = true
+ } else {
+ changedPkgs = append(changedPkgs, pkgName)
+ }
+ }
+ }
+
+ if globalChange {
+ changedPkgs = make([]string, 0, len(c.WorkspaceInfos.PackageJSONs))
+ for pkgName := range c.WorkspaceInfos.PackageJSONs {
+ changedPkgs = append(changedPkgs, pkgName)
+ }
+ sort.Strings(changedPkgs)
+ return changedPkgs, nil
+ }
+
+ sort.Strings(changedPkgs)
+ return changedPkgs, nil
+}
diff --git a/cli/internal/context/context_test.go b/cli/internal/context/context_test.go
new file mode 100644
index 0000000..692c0a8
--- /dev/null
+++ b/cli/internal/context/context_test.go
@@ -0,0 +1,162 @@
+package context
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "testing"
+
+ testifyAssert "github.com/stretchr/testify/assert"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+func Test_isWorkspaceReference(t *testing.T) {
+ rootpath, err := filepath.Abs(filepath.FromSlash("/some/repo"))
+ if err != nil {
+ t.Fatalf("failed to create absolute root path %v", err)
+ }
+ pkgDir, err := filepath.Abs(filepath.FromSlash("/some/repo/packages/libA"))
+ if err != nil {
+ t.Fatalf("failed to create absolute pkgDir %v", err)
+ }
+ tests := []struct {
+ name string
+ packageVersion string
+ dependencyVersion string
+ want bool
+ }{
+ {
+ name: "handles exact match",
+ packageVersion: "1.2.3",
+ dependencyVersion: "1.2.3",
+ want: true,
+ },
+ {
+ name: "handles semver range satisfied",
+ packageVersion: "1.2.3",
+ dependencyVersion: "^1.0.0",
+ want: true,
+ },
+ {
+ name: "handles semver range not-satisfied",
+ packageVersion: "2.3.4",
+ dependencyVersion: "^1.0.0",
+ want: false,
+ },
+ {
+ name: "handles workspace protocol with version",
+ packageVersion: "1.2.3",
+ dependencyVersion: "workspace:1.2.3",
+ want: true,
+ },
+ {
+ name: "handles workspace protocol with relative path",
+ packageVersion: "1.2.3",
+ dependencyVersion: "workspace:../other-package/",
+ want: true,
+ },
+ {
+ name: "handles npm protocol with satisfied semver range",
+ packageVersion: "1.2.3",
+ dependencyVersion: "npm:^1.2.3",
+ want: true, // default in yarn is to use the workspace version unless `enableTransparentWorkspaces: true`. This isn't currently being checked.
+ },
+ {
+ name: "handles npm protocol with non-satisfied semver range",
+ packageVersion: "2.3.4",
+ dependencyVersion: "npm:^1.2.3",
+ want: false,
+ },
+ {
+ name: "handles pre-release versions",
+ packageVersion: "1.2.3",
+ dependencyVersion: "1.2.2-alpha-1234abcd.0",
+ want: false,
+ },
+ {
+ name: "handles non-semver package version",
+ packageVersion: "sometag",
+ dependencyVersion: "1.2.3",
+ want: true, // for backwards compatability with the code before versions were verified
+ },
+ {
+ name: "handles non-semver package version",
+ packageVersion: "1.2.3",
+ dependencyVersion: "sometag",
+ want: true, // for backwards compatability with the code before versions were verified
+ },
+ {
+ name: "handles file:... inside repo",
+ packageVersion: "1.2.3",
+ dependencyVersion: "file:../libB",
+ want: true, // this is a sibling package
+ },
+ {
+ name: "handles file:... outside repo",
+ packageVersion: "1.2.3",
+ dependencyVersion: "file:../../../otherproject",
+ want: false, // this is not within the repo root
+ },
+ {
+ name: "handles link:... inside repo",
+ packageVersion: "1.2.3",
+ dependencyVersion: "link:../libB",
+ want: true, // this is a sibling package
+ },
+ {
+ name: "handles link:... outside repo",
+ packageVersion: "1.2.3",
+ dependencyVersion: "link:../../../otherproject",
+ want: false, // this is not within the repo root
+ },
+ {
+ name: "handles development versions",
+ packageVersion: "0.0.0-development",
+ dependencyVersion: "*",
+ want: true, // "*" should always match
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := isWorkspaceReference(tt.packageVersion, tt.dependencyVersion, pkgDir, rootpath)
+ if got != tt.want {
+ t.Errorf("isWorkspaceReference(%v, %v, %v, %v) got = %v, want %v", tt.packageVersion, tt.dependencyVersion, pkgDir, rootpath, got, tt.want)
+ }
+ })
+ }
+}
+
+func TestBuildPackageGraph_DuplicateNames(t *testing.T) {
+ path := getTestDir(t, "dupe-workspace-names")
+ pkgJSON := &fs.PackageJSON{
+ Name: "dupe-workspace-names",
+ PackageManager: "pnpm@7.15.0",
+ }
+
+ _, actualErr := BuildPackageGraph(path, pkgJSON)
+
+ // Not asserting the full error message, because it includes a path with slashes and backslashes
+ // getting the regex incantation to check that is not worth it.
+ // We have to use regex because the actual error may be different depending on which workspace was
+ // added first and which one was second, causing the error.
+ testifyAssert.Regexp(t, regexp.MustCompile("^Failed to add workspace \"same-name\".+$"), actualErr)
+}
+
+// This is duplicated from fs.turbo_json_test.go.
+// I wasn't able to pull it into a helper file/package because
+// it requires the `fs` package and it would cause cyclical dependencies
+// when used in turbo_json_test.go and would require more changes to fix that.
+func getTestDir(t *testing.T, testName string) turbopath.AbsoluteSystemPath {
+ defaultCwd, err := os.Getwd()
+ if err != nil {
+ t.Errorf("failed to get cwd: %v", err)
+ }
+ cwd, err := fs.CheckedToAbsoluteSystemPath(defaultCwd)
+ if err != nil {
+ t.Fatalf("cwd is not an absolute directory %v: %v", defaultCwd, err)
+ }
+
+ return cwd.UntypedJoin("testdata", testName)
+}
diff --git a/cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json b/cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json
new file mode 100644
index 0000000..94301a3
--- /dev/null
+++ b/cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json
@@ -0,0 +1,6 @@
+{
+ "name": "same-name",
+ "dependencies": {
+ "ui": "workspace:*"
+ }
+}
diff --git a/cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json b/cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json
new file mode 100644
index 0000000..94301a3
--- /dev/null
+++ b/cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json
@@ -0,0 +1,6 @@
+{
+ "name": "same-name",
+ "dependencies": {
+ "ui": "workspace:*"
+ }
+}
diff --git a/cli/internal/context/testdata/dupe-workspace-names/package.json b/cli/internal/context/testdata/dupe-workspace-names/package.json
new file mode 100644
index 0000000..3bf7403
--- /dev/null
+++ b/cli/internal/context/testdata/dupe-workspace-names/package.json
@@ -0,0 +1,7 @@
+{
+ "name": "dupe-workspace-names",
+ "workspaces": [
+ "apps/*"
+ ],
+ "packageManager": "pnpm@7.15.0"
+}
diff --git a/cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json b/cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json
new file mode 100644
index 0000000..1cd75b5
--- /dev/null
+++ b/cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json
@@ -0,0 +1,3 @@
+{
+ "name": "ui"
+}
diff --git a/cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml b/cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml
new file mode 100644
index 0000000..0909cde
--- /dev/null
+++ b/cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml
@@ -0,0 +1,21 @@
+lockfileVersion: 5.4
+
+importers:
+
+ .:
+ specifiers: {}
+
+ apps/a:
+ specifiers:
+ ui: workspace:*
+ dependencies:
+ ui: link:../../packages/ui
+
+ apps/b:
+ specifiers:
+ ui: workspace:*
+ dependencies:
+ ui: link:../../packages/ui
+
+ packages/ui:
+ specifiers: {}
diff --git a/cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml b/cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml
new file mode 100644
index 0000000..3ff5faa
--- /dev/null
+++ b/cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml
@@ -0,0 +1,3 @@
+packages:
+ - "apps/*"
+ - "packages/*"
diff --git a/cli/internal/core/engine.go b/cli/internal/core/engine.go
new file mode 100644
index 0000000..7f08ea8
--- /dev/null
+++ b/cli/internal/core/engine.go
@@ -0,0 +1,591 @@
+package core
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync/atomic"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/graph"
+ "github.com/vercel/turbo/cli/internal/util"
+
+ "github.com/pyr-sh/dag"
+)
+
+const ROOT_NODE_NAME = "___ROOT___"
+
+// Task is a higher level struct that contains the underlying TaskDefinition
+// but also some adjustments to it, based on business logic.
+type Task struct {
+ Name string
+ // TaskDefinition contains the config for the task from turbo.json
+ TaskDefinition fs.TaskDefinition
+}
+
+type Visitor = func(taskID string) error
+
+// Engine contains both the DAG for the packages and the tasks and implements the methods to execute tasks in them
+type Engine struct {
+ // TaskGraph is a graph of package-tasks
+ TaskGraph *dag.AcyclicGraph
+ PackageTaskDeps map[string][]string
+ rootEnabledTasks util.Set
+
+ // completeGraph is the CompleteGraph. We need this to look up the Pipeline, etc.
+ completeGraph *graph.CompleteGraph
+ // isSinglePackage is used to load turbo.json correctly
+ isSinglePackage bool
+}
+
+// NewEngine creates a new engine given a topologic graph of workspace package names
+func NewEngine(
+ completeGraph *graph.CompleteGraph,
+ isSinglePackage bool,
+) *Engine {
+ return &Engine{
+ completeGraph: completeGraph,
+ TaskGraph: &dag.AcyclicGraph{},
+ PackageTaskDeps: map[string][]string{},
+ rootEnabledTasks: make(util.Set),
+ isSinglePackage: isSinglePackage,
+ }
+}
+
+// EngineBuildingOptions help construct the TaskGraph
+type EngineBuildingOptions struct {
+ // Packages in the execution scope, if nil, all packages will be considered in scope
+ Packages []string
+ // TaskNames in the execution scope, if nil, all tasks will be executed
+ TaskNames []string
+ // Restrict execution to only the listed task names
+ TasksOnly bool
+}
+
+// EngineExecutionOptions controls a single walk of the task graph
+type EngineExecutionOptions struct {
+ // Parallel is whether to run tasks in parallel
+ Parallel bool
+ // Concurrency is the number of concurrent tasks that can be executed
+ Concurrency int
+}
+
+// Execute executes the pipeline, constructing an internal task graph and walking it accordingly.
+func (e *Engine) Execute(visitor Visitor, opts EngineExecutionOptions) []error {
+ var sema = util.NewSemaphore(opts.Concurrency)
+ var errored int32
+ return e.TaskGraph.Walk(func(v dag.Vertex) error {
+ // If something has already errored, short-circuit.
+ // There is a race here between concurrent tasks. However, if there is not a
+ // dependency edge between them, we are not required to have a strict order
+ // between them, so a failed task can fail to short-circuit a concurrent
+ // task that happened to be starting at the same time.
+ if atomic.LoadInt32(&errored) != 0 {
+ return nil
+ }
+ // Each vertex in the graph is a taskID (package#task format)
+ taskID := dag.VertexName(v)
+
+ // Always return if it is the root node
+ if strings.Contains(taskID, ROOT_NODE_NAME) {
+ return nil
+ }
+
+ // Acquire the semaphore unless parallel
+ if !opts.Parallel {
+ sema.Acquire()
+ defer sema.Release()
+ }
+
+ if err := visitor(taskID); err != nil {
+ // We only ever flip from false to true, so we don't need to compare and swap the atomic
+ atomic.StoreInt32(&errored, 1)
+ return err
+ }
+ return nil
+ })
+}
+
+// MissingTaskError is a specialized Error thrown in the case that we can't find a task.
+// We want to allow this error when getting task definitions, so we have to special case it.
+type MissingTaskError struct {
+ workspaceName string
+ taskID string
+ taskName string
+}
+
+func (m *MissingTaskError) Error() string {
+ return fmt.Sprintf("Could not find \"%s\" or \"%s\" in workspace \"%s\"", m.taskName, m.taskID, m.workspaceName)
+}
+
+func (e *Engine) getTaskDefinition(pkg string, taskName string, taskID string) (*Task, error) {
+ pipeline, err := e.completeGraph.GetPipelineFromWorkspace(pkg, e.isSinglePackage)
+
+ if err != nil {
+ if pkg != util.RootPkgName {
+ // If there was no turbo.json in the workspace, fallback to the root turbo.json
+ if errors.Is(err, os.ErrNotExist) {
+ return e.getTaskDefinition(util.RootPkgName, taskName, taskID)
+ }
+
+ // otherwise bubble it up
+ return nil, err
+ }
+
+ return nil, err
+ }
+
+ if task, ok := pipeline[taskID]; ok {
+ return &Task{
+ Name: taskName,
+ TaskDefinition: task.GetTaskDefinition(),
+ }, nil
+ }
+
+ if task, ok := pipeline[taskName]; ok {
+ return &Task{
+ Name: taskName,
+ TaskDefinition: task.GetTaskDefinition(),
+ }, nil
+ }
+
+ // An error here means turbo.json exists, but didn't define the task.
+ // Fallback to the root pipeline to find the task.
+ if pkg != util.RootPkgName {
+ return e.getTaskDefinition(util.RootPkgName, taskName, taskID)
+ }
+
+ // Return this as a custom type so we can ignore it specifically
+ return nil, &MissingTaskError{
+ taskName: taskName,
+ taskID: taskID,
+ workspaceName: pkg,
+ }
+}
+
+// Prepare constructs the Task Graph for a list of packages and tasks
+func (e *Engine) Prepare(options *EngineBuildingOptions) error {
+ pkgs := options.Packages
+ taskNames := options.TaskNames
+ tasksOnly := options.TasksOnly
+
+ // If there are no affected packages, we don't need to go through all this work
+ // we can just exit early.
+ // TODO(mehulkar): but we still need to validate bad task names?
+ if len(pkgs) == 0 {
+ return nil
+ }
+
+ traversalQueue := []string{}
+
+ // get a set of taskNames passed in. we'll remove the ones that have a definition
+ missing := util.SetFromStrings(taskNames)
+
+ // Get a list of entry points into our TaskGraph.
+ // We do this by taking the input taskNames, and pkgs
+ // and creating a queue of taskIDs that we can traverse and gather dependencies from.
+ for _, pkg := range pkgs {
+ for _, taskName := range taskNames {
+ taskID := util.GetTaskId(pkg, taskName)
+
+ // Look up the task in the package
+ foundTask, err := e.getTaskDefinition(pkg, taskName, taskID)
+
+ // We can skip MissingTaskErrors because we'll validate against them later
+ // Return all other errors
+ if err != nil {
+ var e *MissingTaskError
+ if errors.As(err, &e) {
+ // Initially, non-package tasks are not required to exist, as long as some
+ // package in the list packages defines it as a package-task. Dependencies
+ // *are* required to have a definition.
+ continue
+ }
+
+ return err
+ }
+
+ // If we found a task definition, remove it from the missing list
+ if foundTask != nil {
+ // delete taskName if it was found
+ missing.Delete(taskName)
+
+ // Even if a task definition was found, we _only_ want to add it as an entry point to
+ // the task graph (i.e. the traversalQueue), if it's:
+ // - A task from the non-root workspace (i.e. tasks from every other workspace)
+ // - A task that we *know* is rootEnabled task (in which case, the root workspace is acceptable)
+ isRootPkg := pkg == util.RootPkgName
+ if !isRootPkg || e.rootEnabledTasks.Includes(taskName) {
+ traversalQueue = append(traversalQueue, taskID)
+ }
+ }
+ }
+ }
+
+ visited := make(util.Set)
+
+ // validate that all tasks passed were found
+ missingList := missing.UnsafeListOfStrings()
+ sort.Strings(missingList)
+
+ if len(missingList) > 0 {
+ return fmt.Errorf("Could not find the following tasks in project: %s", strings.Join(missingList, ", "))
+ }
+
+ // Things get appended to traversalQueue inside this loop, so we use the len() check instead of range.
+ for len(traversalQueue) > 0 {
+ // pop off the first item from the traversalQueue
+ taskID := traversalQueue[0]
+ traversalQueue = traversalQueue[1:]
+
+ pkg, taskName := util.GetPackageTaskFromId(taskID)
+
+ if pkg == util.RootPkgName && !e.rootEnabledTasks.Includes(taskName) {
+ return fmt.Errorf("%v needs an entry in turbo.json before it can be depended on because it is a task run from the root package", taskID)
+ }
+
+ if pkg != ROOT_NODE_NAME {
+ if _, ok := e.completeGraph.WorkspaceInfos.PackageJSONs[pkg]; !ok {
+ // If we have a pkg it should be in WorkspaceInfos.
+ // If we're hitting this error something has gone wrong earlier when building WorkspaceInfos
+ // or the workspace really doesn't exist and turbo.json is misconfigured.
+ return fmt.Errorf("Could not find workspace \"%s\" from task \"%s\" in project", pkg, taskID)
+ }
+ }
+
+ taskDefinitions, err := e.getTaskDefinitionChain(taskID, taskName)
+ if err != nil {
+ return err
+ }
+
+ taskDefinition, err := fs.MergeTaskDefinitions(taskDefinitions)
+ if err != nil {
+ return err
+ }
+
+ // Skip this iteration of the loop if we've already seen this taskID
+ if visited.Includes(taskID) {
+ continue
+ }
+
+ visited.Add(taskID)
+
+ // Put this taskDefinition into the Graph so we can look it up later during execution.
+ e.completeGraph.TaskDefinitions[taskID] = taskDefinition
+
+ topoDeps := util.SetFromStrings(taskDefinition.TopologicalDependencies)
+ deps := make(util.Set)
+ isPackageTask := util.IsPackageTask(taskName)
+
+ for _, dependency := range taskDefinition.TaskDependencies {
+ // If the current task is a workspace-specific task (including root Task)
+ // and its dependency is _also_ a workspace-specific task, we need to add
+ // a reference to this dependency directly into the engine.
+ // TODO @mehulkar: Why do we need this?
+ if isPackageTask && util.IsPackageTask(dependency) {
+ if err := e.AddDep(dependency, taskName); err != nil {
+ return err
+ }
+ } else {
+ // For non-workspace-specific dependencies, we attach a reference to
+ // the task that is added into the engine.
+ deps.Add(dependency)
+ }
+ }
+
+ // Filter down the tasks if there's a filter in place
+ // https: //turbo.build/repo/docs/reference/command-line-reference#--only
+ if tasksOnly {
+ deps = deps.Filter(func(d interface{}) bool {
+ for _, target := range taskNames {
+ return fmt.Sprintf("%v", d) == target
+ }
+ return false
+ })
+ topoDeps = topoDeps.Filter(func(d interface{}) bool {
+ for _, target := range taskNames {
+ return fmt.Sprintf("%v", d) == target
+ }
+ return false
+ })
+ }
+
+ toTaskID := taskID
+
+ // hasTopoDeps will be true if the task depends on any tasks from dependency packages
+ // E.g. `dev: { dependsOn: [^dev] }`
+ hasTopoDeps := topoDeps.Len() > 0 && e.completeGraph.WorkspaceGraph.DownEdges(pkg).Len() > 0
+
+ // hasDeps will be true if the task depends on any tasks from its own package
+ // E.g. `build: { dependsOn: [dev] }`
+ hasDeps := deps.Len() > 0
+
+ // hasPackageTaskDeps will be true if this is a workspace-specific task, and
+ // it depends on another workspace-specific tasks
+ // E.g. `my-package#build: { dependsOn: [my-package#beforebuild] }`.
+ hasPackageTaskDeps := false
+ if _, ok := e.PackageTaskDeps[toTaskID]; ok {
+ hasPackageTaskDeps = true
+ }
+
+ if hasTopoDeps {
+ depPkgs := e.completeGraph.WorkspaceGraph.DownEdges(pkg)
+ for _, from := range topoDeps.UnsafeListOfStrings() {
+ // add task dep from all the package deps within repo
+ for depPkg := range depPkgs {
+ fromTaskID := util.GetTaskId(depPkg, from)
+ e.TaskGraph.Add(fromTaskID)
+ e.TaskGraph.Add(toTaskID)
+ e.TaskGraph.Connect(dag.BasicEdge(toTaskID, fromTaskID))
+ traversalQueue = append(traversalQueue, fromTaskID)
+ }
+ }
+ }
+
+ if hasDeps {
+ for _, from := range deps.UnsafeListOfStrings() {
+ fromTaskID := util.GetTaskId(pkg, from)
+ e.TaskGraph.Add(fromTaskID)
+ e.TaskGraph.Add(toTaskID)
+ e.TaskGraph.Connect(dag.BasicEdge(toTaskID, fromTaskID))
+ traversalQueue = append(traversalQueue, fromTaskID)
+ }
+ }
+
+ if hasPackageTaskDeps {
+ if pkgTaskDeps, ok := e.PackageTaskDeps[toTaskID]; ok {
+ for _, fromTaskID := range pkgTaskDeps {
+ e.TaskGraph.Add(fromTaskID)
+ e.TaskGraph.Add(toTaskID)
+ e.TaskGraph.Connect(dag.BasicEdge(toTaskID, fromTaskID))
+ traversalQueue = append(traversalQueue, fromTaskID)
+ }
+ }
+ }
+
+ // Add the root node into the graph
+ if !hasDeps && !hasTopoDeps && !hasPackageTaskDeps {
+ e.TaskGraph.Add(ROOT_NODE_NAME)
+ e.TaskGraph.Add(toTaskID)
+ e.TaskGraph.Connect(dag.BasicEdge(toTaskID, ROOT_NODE_NAME))
+ }
+ }
+
+ return nil
+}
+
+// AddTask adds root tasks to the engine so they can be looked up later.
+func (e *Engine) AddTask(taskName string) {
+ if util.IsPackageTask(taskName) {
+ pkg, taskName := util.GetPackageTaskFromId(taskName)
+ if pkg == util.RootPkgName {
+ e.rootEnabledTasks.Add(taskName)
+ }
+ }
+}
+
+// AddDep adds tuples from+to task ID combos in tuple format so they can be looked up later.
+func (e *Engine) AddDep(fromTaskID string, toTaskID string) error {
+ fromPkg, _ := util.GetPackageTaskFromId(fromTaskID)
+ if fromPkg != ROOT_NODE_NAME && fromPkg != util.RootPkgName && !e.completeGraph.WorkspaceGraph.HasVertex(fromPkg) {
+ return fmt.Errorf("found reference to unknown package: %v in task %v", fromPkg, fromTaskID)
+ }
+
+ if _, ok := e.PackageTaskDeps[toTaskID]; !ok {
+ e.PackageTaskDeps[toTaskID] = []string{}
+ }
+
+ e.PackageTaskDeps[toTaskID] = append(e.PackageTaskDeps[toTaskID], fromTaskID)
+
+ return nil
+}
+
+// ValidatePersistentDependencies checks if any task dependsOn persistent tasks and throws
+// an error if that task is actually implemented
+func (e *Engine) ValidatePersistentDependencies(graph *graph.CompleteGraph, concurrency int) error {
+ var validationError error
+ persistentCount := 0
+
+ // Adding in a lock because otherwise walking the graph can introduce a data race
+ // (reproducible with `go test -race`)
+ var sema = util.NewSemaphore(1)
+
+ errs := e.TaskGraph.Walk(func(v dag.Vertex) error {
+ vertexName := dag.VertexName(v) // vertexName is a taskID
+
+ // No need to check the root node if that's where we are.
+ if strings.Contains(vertexName, ROOT_NODE_NAME) {
+ return nil
+ }
+
+ // Aquire a lock, because otherwise walking this group can cause a race condition
+ // writing to the same validationError var defined outside the Walk(). This shows
+ // up when running tests with the `-race` flag.
+ sema.Acquire()
+ defer sema.Release()
+
+ currentTaskDefinition, currentTaskExists := e.completeGraph.TaskDefinitions[vertexName]
+ if currentTaskExists && currentTaskDefinition.Persistent {
+ persistentCount++
+ }
+
+ currentPackageName, currentTaskName := util.GetPackageTaskFromId(vertexName)
+
+ // For each "downEdge" (i.e. each task that _this_ task dependsOn)
+ // check if the downEdge is a Persistent task, and if it actually has the script implemented
+ // in that package's package.json
+ for dep := range e.TaskGraph.DownEdges(vertexName) {
+ depTaskID := dep.(string)
+ // No need to check the root node
+ if strings.Contains(depTaskID, ROOT_NODE_NAME) {
+ return nil
+ }
+
+ // Parse the taskID of this dependency task
+ packageName, taskName := util.GetPackageTaskFromId(depTaskID)
+
+ // Get the Task Definition so we can check if it is Persistent
+ depTaskDefinition, taskExists := e.completeGraph.TaskDefinitions[depTaskID]
+
+ if !taskExists {
+ return fmt.Errorf("Cannot find task definition for %v in package %v", depTaskID, packageName)
+ }
+
+ // Get information about the package
+ pkg, pkgExists := graph.WorkspaceInfos.PackageJSONs[packageName]
+ if !pkgExists {
+ return fmt.Errorf("Cannot find package %v", packageName)
+ }
+ _, hasScript := pkg.Scripts[taskName]
+
+ // If both conditions are true set a value and break out of checking the dependencies
+ if depTaskDefinition.Persistent && hasScript {
+ validationError = fmt.Errorf(
+ "\"%s\" is a persistent task, \"%s\" cannot depend on it",
+ util.GetTaskId(packageName, taskName),
+ util.GetTaskId(currentPackageName, currentTaskName),
+ )
+
+ break
+ }
+ }
+
+ return nil
+ })
+
+ for _, err := range errs {
+ return fmt.Errorf("Validation failed: %v", err)
+ }
+
+ if validationError != nil {
+ return validationError
+ } else if persistentCount >= concurrency {
+ return fmt.Errorf("You have %v persistent tasks but `turbo` is configured for concurrency of %v. Set --concurrency to at least %v", persistentCount, concurrency, persistentCount+1)
+ }
+
+ return nil
+}
+
+// getTaskDefinitionChain gets a set of TaskDefinitions that apply to the taskID.
+// These definitions should be merged by the consumer.
+func (e *Engine) getTaskDefinitionChain(taskID string, taskName string) ([]fs.BookkeepingTaskDefinition, error) {
+ // Start a list of TaskDefinitions we've found for this TaskID
+ taskDefinitions := []fs.BookkeepingTaskDefinition{}
+
+ rootPipeline, err := e.completeGraph.GetPipelineFromWorkspace(util.RootPkgName, e.isSinglePackage)
+ if err != nil {
+ // It should be very unlikely that we can't find a root pipeline. Even for single package repos
+ // the pipeline is synthesized from package.json, so there should be _something_ here.
+ return nil, err
+ }
+
+ // Look for the taskDefinition in the root pipeline.
+ if rootTaskDefinition, err := rootPipeline.GetTask(taskID, taskName); err == nil {
+ taskDefinitions = append(taskDefinitions, *rootTaskDefinition)
+ }
+
+ // If we're in a single package repo, we can just exit with the TaskDefinition in the root pipeline
+ // since there are no workspaces, and we don't need to follow any extends keys.
+ if e.isSinglePackage {
+ if len(taskDefinitions) == 0 {
+ return nil, fmt.Errorf("Could not find \"%s\" in root turbo.json", taskID)
+ }
+ return taskDefinitions, nil
+ }
+
+ // If the taskID is a root task (e.g. //#build), we don't need to look
+ // for a workspace task, since these can only be defined in the root turbo.json.
+ taskIDPackage, _ := util.GetPackageTaskFromId(taskID)
+ if taskIDPackage != util.RootPkgName && taskIDPackage != ROOT_NODE_NAME {
+ // If there is an error, we can ignore it, since turbo.json config is not required in the workspace.
+ if workspaceTurboJSON, err := e.completeGraph.GetTurboConfigFromWorkspace(taskIDPackage, e.isSinglePackage); err != nil {
+ // swallow the error where the config file doesn't exist, but bubble up other things
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, err
+ }
+ } else {
+ // Run some validations on a workspace turbo.json. Note that these validations are on
+ // the whole struct, and not relevant to the taskID we're looking at right now.
+ validationErrors := workspaceTurboJSON.Validate([]fs.TurboJSONValidation{
+ validateNoPackageTaskSyntax,
+ validateExtends,
+ })
+
+ if len(validationErrors) > 0 {
+ fullError := errors.New("Invalid turbo.json")
+ for _, validationErr := range validationErrors {
+ fullError = fmt.Errorf("%w\n - %s", fullError, validationErr)
+ }
+
+ return nil, fullError
+ }
+
+ // If there are no errors, we can (try to) add the TaskDefinition to our list.
+ if workspaceDefinition, ok := workspaceTurboJSON.Pipeline[taskName]; ok {
+ taskDefinitions = append(taskDefinitions, workspaceDefinition)
+ }
+ }
+ }
+
+ if len(taskDefinitions) == 0 {
+ return nil, fmt.Errorf("Could not find \"%s\" in root turbo.json or \"%s\" workspace", taskID, taskIDPackage)
+ }
+
+ return taskDefinitions, nil
+}
+
+func validateNoPackageTaskSyntax(turboJSON *fs.TurboJSON) []error {
+ errors := []error{}
+
+ for taskIDOrName := range turboJSON.Pipeline {
+ if util.IsPackageTask(taskIDOrName) {
+ taskName := util.StripPackageName(taskIDOrName)
+ errors = append(errors, fmt.Errorf("\"%s\". Use \"%s\" instead", taskIDOrName, taskName))
+ }
+ }
+
+ return errors
+}
+
+func validateExtends(turboJSON *fs.TurboJSON) []error {
+ extendErrors := []error{}
+ extends := turboJSON.Extends
+ // TODO(mehulkar): Enable extending from more than one workspace.
+ if len(extends) > 1 {
+ extendErrors = append(extendErrors, fmt.Errorf("You can only extend from the root workspace"))
+ }
+
+ // We don't support this right now
+ if len(extends) == 0 {
+ extendErrors = append(extendErrors, fmt.Errorf("No \"extends\" key found"))
+ }
+
+ // TODO(mehulkar): Enable extending from non-root workspace.
+ if len(extends) == 1 && extends[0] != util.RootPkgName {
+ extendErrors = append(extendErrors, fmt.Errorf("You can only extend from the root workspace"))
+ }
+
+ return extendErrors
+}
diff --git a/cli/internal/core/engine_test.go b/cli/internal/core/engine_test.go
new file mode 100644
index 0000000..a92264d
--- /dev/null
+++ b/cli/internal/core/engine_test.go
@@ -0,0 +1,88 @@
+package core
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/graph"
+ "github.com/vercel/turbo/cli/internal/workspace"
+ "gotest.tools/v3/assert"
+
+ "github.com/pyr-sh/dag"
+)
+
+func TestShortCircuiting(t *testing.T) {
+ var workspaceGraph dag.AcyclicGraph
+ workspaceGraph.Add("a")
+ workspaceGraph.Add("b")
+ workspaceGraph.Add("c")
+ // Dependencies: a -> b -> c
+ workspaceGraph.Connect(dag.BasicEdge("a", "b"))
+ workspaceGraph.Connect(dag.BasicEdge("b", "c"))
+
+ buildTask := &fs.BookkeepingTaskDefinition{}
+ err := buildTask.UnmarshalJSON([]byte("{\"dependsOn\": [\"^build\"]}"))
+ assert.NilError(t, err, "BookkeepingTaskDefinition unmarshall")
+
+ pipeline := map[string]fs.BookkeepingTaskDefinition{
+ "build": *buildTask,
+ }
+
+ p := NewEngine(&graph.CompleteGraph{
+ WorkspaceGraph: workspaceGraph,
+ Pipeline: pipeline,
+ TaskDefinitions: map[string]*fs.TaskDefinition{},
+ WorkspaceInfos: workspace.Catalog{
+ PackageJSONs: map[string]*fs.PackageJSON{
+ "//": {},
+ "a": {},
+ "b": {},
+ "c": {},
+ },
+ TurboConfigs: map[string]*fs.TurboJSON{
+ "//": {
+ Pipeline: pipeline,
+ },
+ },
+ },
+ }, false)
+
+ p.AddTask("build")
+
+ err = p.Prepare(&EngineBuildingOptions{
+ Packages: []string{"a", "b", "c"},
+ TaskNames: []string{"build"},
+ TasksOnly: false,
+ })
+
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+
+ executed := map[string]bool{
+ "a#build": false,
+ "b#build": false,
+ "c#build": false,
+ }
+ expectedErr := errors.New("an error occurred")
+ // b#build is going to error, we expect to not execute a#build, which depends on b
+ testVisitor := func(taskID string) error {
+ println(taskID)
+ executed[taskID] = true
+ if taskID == "b#build" {
+ return expectedErr
+ }
+ return nil
+ }
+
+ errs := p.Execute(testVisitor, EngineExecutionOptions{
+ Concurrency: 10,
+ })
+ assert.Equal(t, len(errs), 1)
+ assert.Equal(t, errs[0], expectedErr)
+
+ assert.Equal(t, executed["c#build"], true)
+ assert.Equal(t, executed["b#build"], true)
+ assert.Equal(t, executed["a#build"], false)
+}
diff --git a/cli/internal/daemon/connector/connector.go b/cli/internal/daemon/connector/connector.go
new file mode 100644
index 0000000..d05ef59
--- /dev/null
+++ b/cli/internal/daemon/connector/connector.go
@@ -0,0 +1,391 @@
+package connector
+
+import (
+ "context"
+ "fmt"
+ "io/fs"
+ "os"
+ "os/exec"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+ "github.com/hashicorp/go-hclog"
+ "github.com/nightlyone/lockfile"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbodprotocol"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/status"
+)
+
+var (
+ // ErrFailedToStart is returned when the daemon process cannot be started
+ ErrFailedToStart = errors.New("daemon could not be started")
+ // ErrVersionMismatch is returned when the daemon process was spawned by a different version than the connecting client
+ ErrVersionMismatch = errors.New("daemon version does not match client version")
+ errConnectionFailure = errors.New("could not connect to daemon")
+ // ErrTooManyAttempts is returned when the client fails to connect too many times
+ ErrTooManyAttempts = errors.New("reached maximum number of attempts contacting daemon")
+ // ErrDaemonNotRunning is returned when the client cannot contact the daemon and has
+ // been instructed not to attempt to start a new daemon
+ ErrDaemonNotRunning = errors.New("the daemon is not running")
+)
+
+// Opts is the set of configurable options for the client connection,
+// including some options to be passed through to the daemon process if
+// it needs to be started.
+type Opts struct {
+ ServerTimeout time.Duration
+ DontStart bool // if true, don't attempt to start the daemon
+ DontKill bool // if true, don't attempt to kill the daemon
+}
+
+// Client represents a connection to the daemon process
+type Client struct {
+ turbodprotocol.TurbodClient
+ *grpc.ClientConn
+ SockPath turbopath.AbsoluteSystemPath
+ PidPath turbopath.AbsoluteSystemPath
+ LogPath turbopath.AbsoluteSystemPath
+}
+
+// Connector instances are used to create a connection to turbo's daemon process
+// The daemon will be started , or killed and restarted, if necessary
+type Connector struct {
+ Logger hclog.Logger
+ Bin string
+ Opts Opts
+ SockPath turbopath.AbsoluteSystemPath
+ PidPath turbopath.AbsoluteSystemPath
+ LogPath turbopath.AbsoluteSystemPath
+ TurboVersion string
+}
+
+// ConnectionError is returned in the error case from connect. It wraps the underlying
+// cause and adds a message with the relevant files for the user to check.
+type ConnectionError struct {
+ SockPath turbopath.AbsoluteSystemPath
+ PidPath turbopath.AbsoluteSystemPath
+ LogPath turbopath.AbsoluteSystemPath
+ cause error
+}
+
+func (ce *ConnectionError) Error() string {
+ return fmt.Sprintf(`connection to turbo daemon process failed. Please ensure the following:
+ - the process identified by the pid in the file at %v is not running, and remove %v
+ - check the logs at %v
+ - the unix domain socket at %v has been removed
+ You can also run without the daemon process by passing --no-daemon`, ce.PidPath, ce.PidPath, ce.LogPath, ce.SockPath)
+}
+
+// Unwrap allows a connection error to work with standard library "errors" and compatible packages
+func (ce *ConnectionError) Unwrap() error {
+ return ce.cause
+}
+
+func (c *Connector) wrapConnectionError(err error) error {
+ return &ConnectionError{
+ SockPath: c.SockPath,
+ PidPath: c.PidPath,
+ LogPath: c.LogPath,
+ cause: err,
+ }
+}
+
+// lockFile returns a pointer to where a lockfile should be.
+// lockfile.New does not perform IO and the only error it produces
+// is in the case a non-absolute path was provided. We're guaranteeing an
+// turbopath.AbsoluteSystemPath, so an error here is an indication of a bug and
+// we should crash.
+func (c *Connector) lockFile() lockfile.Lockfile {
+ lockFile, err := lockfile.New(c.PidPath.ToString())
+ if err != nil {
+ panic(err)
+ }
+ return lockFile
+}
+
+func (c *Connector) addr() string {
+ // grpc special-cases parsing of unix:<path> urls
+ // to avoid url.Parse. This lets us pass through our absolute
+ // paths unmodified, even on windows.
+ // See code here: https://github.com/grpc/grpc-go/blob/d83070ec0d9043f713b6a63e1963c593b447208c/internal/transport/http_util.go#L392
+ return fmt.Sprintf("unix:%v", c.SockPath.ToString())
+}
+
+// We defer to the daemon's pid file as the locking mechanism.
+// If it doesn't exist, we will attempt to start the daemon.
+// If the daemon has a different version, ask it to shut down.
+// If the pid file exists but we can't connect, try to kill
+// the daemon.
+// If we can't cause the daemon to remove the pid file, report
+// an error to the user that includes the file location so that
+// they can resolve it.
+const (
+ _maxAttempts = 3
+ _shutdownTimeout = 1 * time.Second
+ _socketPollTimeout = 1 * time.Second
+)
+
+// killLiveServer tells a running server to shut down. This method is also responsible
+// for closing this client connection.
+func (c *Connector) killLiveServer(ctx context.Context, client *Client, serverPid int) error {
+ defer func() { _ = client.Close() }()
+
+ _, err := client.Shutdown(ctx, &turbodprotocol.ShutdownRequest{})
+ if err != nil {
+ c.Logger.Error(fmt.Sprintf("failed to shutdown running daemon. attempting to force it closed: %v", err))
+ return c.killDeadServer(serverPid)
+ }
+ // Wait for the server to gracefully exit
+ err = backoff.Retry(func() error {
+ lockFile := c.lockFile()
+ owner, err := lockFile.GetOwner()
+ if os.IsNotExist(err) {
+ // If there is no pid more file, we can conclude that the daemon successfully
+ // exited and cleaned up after itself.
+ return nil
+ } else if err != nil {
+ // some other error occurred getting the lockfile owner
+ return backoff.Permanent(err)
+ } else if owner.Pid == serverPid {
+ // // We're still waiting for the server to shut down
+ return errNeedsRetry
+ }
+ // if there's no error and the lockfile has a new pid, someone else must've started a new daemon.
+ // Consider the old one killed and move on.
+ return nil
+ }, backoffWithTimeout(_shutdownTimeout))
+ if errors.Is(err, errNeedsRetry) {
+ c.Logger.Error(fmt.Sprintf("daemon did not exit after %v, attempting to force it closed", _shutdownTimeout.String()))
+ return c.killDeadServer(serverPid)
+ } else if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Connector) killDeadServer(pid int) error {
+ // currently the only error that this constructor returns is
+ // in the case that you don't provide an absolute path.
+ // Given that we require an absolute path as input, this should
+ // hopefully never happen.
+ lockFile := c.lockFile()
+ process, err := lockFile.GetOwner()
+ if err == nil {
+ // Check that this is the same process that we failed to connect to.
+ // Otherwise, connectInternal will loop around again and start with whatever
+ // new process has the pid file.
+ if process.Pid == pid {
+ // we have a process that we need to kill
+ // TODO(gsoltis): graceful kill? the process is already not responding to requests,
+ // but it could be in the middle of a graceful shutdown. Probably should let it clean
+ // itself up, and report an error and defer to a force-kill by the user
+ if err := process.Kill(); err != nil {
+ return err
+ }
+ }
+ return nil
+ } else if errors.Is(err, os.ErrNotExist) {
+ // There's no pid file. Someone else killed it. Returning no error will cause the
+ // connectInternal to loop around and try the connection again.
+ return nil
+ }
+ return err
+}
+
+// Connect attempts to create a connection to a turbo daemon.
+// Retries and daemon restarts are built in. If this fails,
+// it is unlikely to succeed after an automated retry.
+func (c *Connector) Connect(ctx context.Context) (*Client, error) {
+ client, err := c.connectInternal(ctx)
+ if err != nil {
+ return nil, c.wrapConnectionError(err)
+ }
+ return client, nil
+}
+
+func (c *Connector) connectInternal(ctx context.Context) (*Client, error) {
+ // for each attempt, we:
+ // 1. try to find or start a daemon process, getting its pid
+ // 2. wait for the unix domain socket file to appear
+ // 3. connect to the unix domain socket. Note that this connection is not validated
+ // 4. send a hello message. This validates the connection as a side effect of
+ // negotiating versions, which currently requires exact match.
+ // In the event of a live, but incompatible server, we attempt to shut it down and start
+ // a new one. In the event of an unresponsive server, we attempt to kill the process
+ // identified by the pid file, with the hope that it will clean up after itself.
+ // Failures include details about where to find logs, the pid file, and the socket file.
+ for i := 0; i < _maxAttempts; i++ {
+ serverPid, err := c.getOrStartDaemon()
+ if err != nil {
+ // If we fail to even start the daemon process, return immediately, we're unlikely
+ // to succeed without user intervention
+ return nil, err
+ }
+ if err := c.waitForSocket(); errors.Is(err, ErrFailedToStart) {
+ // If we didn't see the socket file, try again. It's possible that
+ // the daemon encountered an transitory error
+ continue
+ } else if err != nil {
+ return nil, err
+ }
+ client, err := c.getClientConn()
+ if err != nil {
+ return nil, err
+ }
+ if err := c.sendHello(ctx, client); err == nil {
+ // We connected and negotiated a version, we're all set
+ return client, nil
+ } else if errors.Is(err, ErrVersionMismatch) {
+ // We don't want to knock down a perfectly fine daemon in a status check.
+ if c.Opts.DontKill {
+ return nil, err
+ }
+
+ // We now know we aren't going to return this client,
+ // but killLiveServer still needs it to send the Shutdown request.
+ // killLiveServer will close the client when it is done with it.
+ if err := c.killLiveServer(ctx, client, serverPid); err != nil {
+ return nil, err
+ }
+ // Loops back around and tries again.
+ } else if errors.Is(err, errConnectionFailure) {
+ // close the client, see if we can kill the stale daemon
+ _ = client.Close()
+ if err := c.killDeadServer(serverPid); err != nil {
+ return nil, err
+ }
+ // if we successfully killed the dead server, loop around and try again
+ } else if err != nil {
+ // Some other error occurred, close the client and
+ // report the error to the user
+ if closeErr := client.Close(); closeErr != nil {
+ // In the event that we fail to close the client, bundle that error along also.
+ // Keep the original error in the error chain, as it's more likely to be useful
+ // or needed for matching on later.
+ err = errors.Wrapf(err, "also failed to close client connection: %v", closeErr)
+ }
+ return nil, err
+ }
+ }
+ return nil, ErrTooManyAttempts
+}
+
+// getOrStartDaemon returns the PID of the daemon process on success. It may start
+// the daemon if it doesn't find one running.
+func (c *Connector) getOrStartDaemon() (int, error) {
+ lockFile := c.lockFile()
+ daemonProcess, getDaemonProcessErr := lockFile.GetOwner()
+ if getDaemonProcessErr != nil {
+ // If we're in a clean state this isn't an "error" per se.
+ // We attempt to start a daemon.
+ if errors.Is(getDaemonProcessErr, fs.ErrNotExist) {
+ if c.Opts.DontStart {
+ return 0, ErrDaemonNotRunning
+ }
+ pid, startDaemonErr := c.startDaemon()
+ if startDaemonErr != nil {
+ return 0, startDaemonErr
+ }
+ return pid, nil
+ }
+
+ // We could have hit any number of errors.
+ // - Failed to read the file for permission reasons.
+ // - User emptied the file's contents.
+ // - etc.
+ return 0, errors.Wrapf(getDaemonProcessErr, "An issue was encountered with the pid file. Please remove it and try again: %v", c.PidPath)
+ }
+
+ return daemonProcess.Pid, nil
+}
+
+func (c *Connector) getClientConn() (*Client, error) {
+ creds := insecure.NewCredentials()
+ conn, err := grpc.Dial(c.addr(), grpc.WithTransportCredentials(creds))
+ if err != nil {
+ return nil, err
+ }
+ tc := turbodprotocol.NewTurbodClient(conn)
+ return &Client{
+ TurbodClient: tc,
+ ClientConn: conn,
+ SockPath: c.SockPath,
+ PidPath: c.PidPath,
+ LogPath: c.LogPath,
+ }, nil
+}
+
+func (c *Connector) sendHello(ctx context.Context, client turbodprotocol.TurbodClient) error {
+ _, err := client.Hello(ctx, &turbodprotocol.HelloRequest{
+ Version: c.TurboVersion,
+ // TODO: add session id
+ })
+ status := status.Convert(err)
+ switch status.Code() {
+ case codes.OK:
+ return nil
+ case codes.FailedPrecondition:
+ return ErrVersionMismatch
+ case codes.Unavailable:
+ return errConnectionFailure
+ default:
+ return err
+ }
+}
+
+var errNeedsRetry = errors.New("retry the operation")
+
+// backoffWithTimeout returns an exponential backoff, starting at 2ms and doubling until
+// the specific timeout has elapsed. Note that backoff instances are stateful, so we need
+// a new one each time we do a Retry.
+func backoffWithTimeout(timeout time.Duration) *backoff.ExponentialBackOff {
+ return &backoff.ExponentialBackOff{
+ Multiplier: 2,
+ InitialInterval: 2 * time.Millisecond,
+ MaxElapsedTime: timeout,
+ Clock: backoff.SystemClock,
+ Stop: backoff.Stop,
+ }
+}
+
+// waitForSocket waits for the unix domain socket to appear
+func (c *Connector) waitForSocket() error {
+ // Note that we don't care if this is our daemon
+ // or not. We started a process, but someone else could beat
+ // use to listening. That's fine, we'll check the version
+ // later.
+ err := backoff.Retry(func() error {
+ if !c.SockPath.FileExists() {
+ return errNeedsRetry
+ }
+ return nil
+ }, backoffWithTimeout(_socketPollTimeout))
+ if errors.Is(err, errNeedsRetry) {
+ return ErrFailedToStart
+ } else if err != nil {
+ return err
+ }
+ return nil
+}
+
+// startDaemon starts the daemon and returns the pid for the new process
+func (c *Connector) startDaemon() (int, error) {
+ args := []string{"daemon"}
+ if c.Opts.ServerTimeout != 0 {
+ args = append(args, fmt.Sprintf("--idle-time=%v", c.Opts.ServerTimeout.String()))
+ }
+ c.Logger.Debug(fmt.Sprintf("starting turbod binary %v", c.Bin))
+ cmd := exec.Command(c.Bin, args...)
+ // For the daemon to have its own process group id so that any attempts
+ // to kill it and its process tree don't kill this client.
+ cmd.SysProcAttr = getSysProcAttrs()
+ err := cmd.Start()
+ if err != nil {
+ return 0, err
+ }
+ return cmd.Process.Pid, nil
+}
diff --git a/cli/internal/daemon/connector/connector_test.go b/cli/internal/daemon/connector/connector_test.go
new file mode 100644
index 0000000..62b4504
--- /dev/null
+++ b/cli/internal/daemon/connector/connector_test.go
@@ -0,0 +1,256 @@
+package connector
+
+import (
+ "context"
+ "errors"
+ "net"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "testing"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/nightlyone/lockfile"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbodprotocol"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/status"
+ "google.golang.org/grpc/test/bufconn"
+ "gotest.tools/v3/assert"
+)
+
+// testBin returns a platform-appropriate executable to run node.
+// Node works here as an arbitrary process to start, since it's
+// required for turbo development. It will obviously not implement
+// our grpc service, use a mockServer instance where that's needed.
+func testBin() string {
+ if runtime.GOOS == "windows" {
+ return "node.exe"
+ }
+ return "node"
+}
+
+func getUnixSocket(dir turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ return dir.UntypedJoin("turbod-test.sock")
+}
+
+func getPidFile(dir turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ return dir.UntypedJoin("turbod-test.pid")
+}
+
+func TestGetOrStartDaemonInvalidPIDFile(t *testing.T) {
+ logger := hclog.Default()
+ dir := t.TempDir()
+ dirPath := fs.AbsoluteSystemPathFromUpstream(dir)
+
+ pidPath := getPidFile(dirPath)
+ writeFileErr := pidPath.WriteFile(nil, 0777)
+ assert.NilError(t, writeFileErr, "WriteFile")
+
+ c := &Connector{
+ Logger: logger,
+ Opts: Opts{},
+ PidPath: pidPath,
+ }
+
+ pid, err := c.getOrStartDaemon()
+ assert.Equal(t, pid, 0)
+ assert.ErrorContains(t, err, "issue was encountered with the pid file")
+}
+
+func TestConnectFailsWithoutGrpcServer(t *testing.T) {
+ // We aren't starting a server that is going to write
+ // to our socket file, so we should see a series of connection
+ // failures, followed by ErrTooManyAttempts
+ logger := hclog.Default()
+ dir := t.TempDir()
+ dirPath := fs.AbsoluteSystemPathFromUpstream(dir)
+
+ sockPath := getUnixSocket(dirPath)
+ pidPath := getPidFile(dirPath)
+ ctx := context.Background()
+ bin := testBin()
+ c := &Connector{
+ Logger: logger,
+ Bin: bin,
+ Opts: Opts{},
+ SockPath: sockPath,
+ PidPath: pidPath,
+ }
+ // Note that we expect ~3s here, for 3 attempts with a timeout of 1s
+ _, err := c.connectInternal(ctx)
+ assert.ErrorIs(t, err, ErrTooManyAttempts)
+}
+
+func TestKillDeadServerNoPid(t *testing.T) {
+ logger := hclog.Default()
+ dir := t.TempDir()
+ dirPath := fs.AbsoluteSystemPathFromUpstream(dir)
+
+ sockPath := getUnixSocket(dirPath)
+ pidPath := getPidFile(dirPath)
+ c := &Connector{
+ Logger: logger,
+ Bin: "nonexistent",
+ Opts: Opts{},
+ SockPath: sockPath,
+ PidPath: pidPath,
+ }
+
+ err := c.killDeadServer(99999)
+ assert.NilError(t, err, "killDeadServer")
+}
+
+func TestKillDeadServerNoProcess(t *testing.T) {
+ logger := hclog.Default()
+ dir := t.TempDir()
+ dirPath := fs.AbsoluteSystemPathFromUpstream(dir)
+
+ sockPath := getUnixSocket(dirPath)
+ pidPath := getPidFile(dirPath)
+ // Simulate the socket already existing, with no live daemon
+ err := sockPath.WriteFile([]byte("junk"), 0644)
+ assert.NilError(t, err, "WriteFile")
+ err = pidPath.WriteFile([]byte("99999"), 0644)
+ assert.NilError(t, err, "WriteFile")
+ c := &Connector{
+ Logger: logger,
+ Bin: "nonexistent",
+ Opts: Opts{},
+ SockPath: sockPath,
+ PidPath: pidPath,
+ }
+
+ err = c.killDeadServer(99999)
+ assert.ErrorIs(t, err, lockfile.ErrDeadOwner)
+ stillExists := pidPath.FileExists()
+ if !stillExists {
+ t.Error("pidPath should still exist, expected the user to clean it up")
+ }
+}
+
+func TestKillDeadServerWithProcess(t *testing.T) {
+ logger := hclog.Default()
+ dir := t.TempDir()
+ dirPath := fs.AbsoluteSystemPathFromUpstream(dir)
+
+ sockPath := getUnixSocket(dirPath)
+ pidPath := getPidFile(dirPath)
+ // Simulate the socket already existing, with no live daemon
+ err := sockPath.WriteFile([]byte("junk"), 0644)
+ assert.NilError(t, err, "WriteFile")
+ bin := testBin()
+ cmd := exec.Command(bin)
+ err = cmd.Start()
+ assert.NilError(t, err, "cmd.Start")
+ pid := cmd.Process.Pid
+ if pid == 0 {
+ t.Fatalf("failed to start process %v", bin)
+ }
+
+ err = pidPath.WriteFile([]byte(strconv.Itoa(pid)), 0644)
+ assert.NilError(t, err, "WriteFile")
+ c := &Connector{
+ Logger: logger,
+ Bin: "nonexistent",
+ Opts: Opts{},
+ SockPath: sockPath,
+ PidPath: pidPath,
+ }
+
+ err = c.killDeadServer(pid)
+ assert.NilError(t, err, "killDeadServer")
+ stillExists := pidPath.FileExists()
+ if !stillExists {
+ t.Error("pidPath no longer exists, expected client to not clean it up")
+ }
+ err = cmd.Wait()
+ exitErr := &exec.ExitError{}
+ if !errors.As(err, &exitErr) {
+ t.Errorf("expected an exit error from %v, got %v", bin, err)
+ }
+}
+
+type mockServer struct {
+ turbodprotocol.UnimplementedTurbodServer
+ helloErr error
+ shutdownResp *turbodprotocol.ShutdownResponse
+ pidFile turbopath.AbsoluteSystemPath
+}
+
+// Simulates server exiting by cleaning up the pid file
+func (s *mockServer) Shutdown(ctx context.Context, req *turbodprotocol.ShutdownRequest) (*turbodprotocol.ShutdownResponse, error) {
+ if err := s.pidFile.Remove(); err != nil {
+ return nil, err
+ }
+ return s.shutdownResp, nil
+}
+
+func (s *mockServer) Hello(ctx context.Context, req *turbodprotocol.HelloRequest) (*turbodprotocol.HelloResponse, error) {
+ if req.Version == "" {
+ return nil, errors.New("missing version")
+ }
+ return nil, s.helloErr
+}
+
+func TestKillLiveServer(t *testing.T) {
+ logger := hclog.Default()
+ dir := t.TempDir()
+ dirPath := fs.AbsoluteSystemPathFromUpstream(dir)
+
+ sockPath := getUnixSocket(dirPath)
+ pidPath := getPidFile(dirPath)
+ err := pidPath.WriteFile([]byte("99999"), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ ctx := context.Background()
+ c := &Connector{
+ Logger: logger,
+ Bin: "nonexistent",
+ Opts: Opts{},
+ SockPath: sockPath,
+ PidPath: pidPath,
+ TurboVersion: "some-version",
+ }
+
+ st := status.New(codes.FailedPrecondition, "version mismatch")
+ mock := &mockServer{
+ shutdownResp: &turbodprotocol.ShutdownResponse{},
+ helloErr: st.Err(),
+ pidFile: pidPath,
+ }
+ lis := bufconn.Listen(1024 * 1024)
+ grpcServer := grpc.NewServer()
+ turbodprotocol.RegisterTurbodServer(grpcServer, mock)
+ go func(t *testing.T) {
+ if err := grpcServer.Serve(lis); err != nil {
+ t.Logf("server closed: %v", err)
+ }
+ }(t)
+
+ conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(func(ctx context.Context, s string) (net.Conn, error) {
+ return lis.Dial()
+ }), grpc.WithTransportCredentials(insecure.NewCredentials()))
+ assert.NilError(t, err, "DialContext")
+ turboClient := turbodprotocol.NewTurbodClient(conn)
+ client := &Client{
+ TurbodClient: turboClient,
+ ClientConn: conn,
+ }
+ err = c.sendHello(ctx, client)
+ if !errors.Is(err, ErrVersionMismatch) {
+ t.Errorf("sendHello error got %v, want %v", err, ErrVersionMismatch)
+ }
+ err = c.killLiveServer(ctx, client, 99999)
+ assert.NilError(t, err, "killLiveServer")
+ // Expect the pid file and socket files to have been cleaned up
+ if pidPath.FileExists() {
+ t.Errorf("expected pid file to have been deleted: %v", pidPath)
+ }
+ if sockPath.FileExists() {
+ t.Errorf("expected socket file to have been deleted: %v", sockPath)
+ }
+}
diff --git a/cli/internal/daemon/connector/fork.go b/cli/internal/daemon/connector/fork.go
new file mode 100644
index 0000000..8a6d01d
--- /dev/null
+++ b/cli/internal/daemon/connector/fork.go
@@ -0,0 +1,15 @@
+//go:build !windows
+// +build !windows
+
+package connector
+
+import "syscall"
+
+// getSysProcAttrs returns the platform-specific attributes we want to
+// use while forking the daemon process. Currently this is limited to
+// forcing a new process group
+func getSysProcAttrs() *syscall.SysProcAttr {
+ return &syscall.SysProcAttr{
+ Setpgid: true,
+ }
+}
diff --git a/cli/internal/daemon/connector/fork_windows.go b/cli/internal/daemon/connector/fork_windows.go
new file mode 100644
index 0000000..b9d6e77
--- /dev/null
+++ b/cli/internal/daemon/connector/fork_windows.go
@@ -0,0 +1,15 @@
+//go:build windows
+// +build windows
+
+package connector
+
+import "syscall"
+
+// getSysProcAttrs returns the platform-specific attributes we want to
+// use while forking the daemon process. Currently this is limited to
+// forcing a new process group
+func getSysProcAttrs() *syscall.SysProcAttr {
+ return &syscall.SysProcAttr{
+ CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,
+ }
+}
diff --git a/cli/internal/daemon/daemon.go b/cli/internal/daemon/daemon.go
new file mode 100644
index 0000000..81d5283
--- /dev/null
+++ b/cli/internal/daemon/daemon.go
@@ -0,0 +1,307 @@
+package daemon
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
+ "github.com/hashicorp/go-hclog"
+ "github.com/nightlyone/lockfile"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/daemon/connector"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/server"
+ "github.com/vercel/turbo/cli/internal/signals"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+type daemon struct {
+ logger hclog.Logger
+ repoRoot turbopath.AbsoluteSystemPath
+ timeout time.Duration
+ reqCh chan struct{}
+ timedOutCh chan struct{}
+}
+
+func getRepoHash(repoRoot turbopath.AbsoluteSystemPath) string {
+ pathHash := sha256.Sum256([]byte(repoRoot.ToString()))
+ // We grab a substring of the hash because there is a 108-character limit on the length
+ // of a filepath for unix domain socket.
+ return hex.EncodeToString(pathHash[:])[:16]
+}
+
+func getDaemonFileRoot(repoRoot turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ tempDir := fs.TempDir("turbod")
+ hexHash := getRepoHash(repoRoot)
+ return tempDir.UntypedJoin(hexHash)
+}
+
+func getLogFilePath(repoRoot turbopath.AbsoluteSystemPath) (turbopath.AbsoluteSystemPath, error) {
+ hexHash := getRepoHash(repoRoot)
+ base := repoRoot.Base()
+ logFilename := fmt.Sprintf("%v-%v.log", hexHash, base)
+
+ logsDir := fs.GetTurboDataDir().UntypedJoin("logs")
+ return logsDir.UntypedJoin(logFilename), nil
+}
+
+func getUnixSocket(repoRoot turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ root := getDaemonFileRoot(repoRoot)
+ return root.UntypedJoin("turbod.sock")
+}
+
+func getPidFile(repoRoot turbopath.AbsoluteSystemPath) turbopath.AbsoluteSystemPath {
+ root := getDaemonFileRoot(repoRoot)
+ return root.UntypedJoin("turbod.pid")
+}
+
+// logError logs an error and outputs it to the UI.
+func (d *daemon) logError(err error) {
+ d.logger.Error(fmt.Sprintf("error %v", err))
+}
+
+// we're only appending, and we're creating the file if it doesn't exist.
+// we do not need to read the log file.
+var _logFileFlags = os.O_WRONLY | os.O_APPEND | os.O_CREATE
+
+// ExecuteDaemon executes the root daemon command
+func ExecuteDaemon(ctx context.Context, helper *cmdutil.Helper, signalWatcher *signals.Watcher, args *turbostate.ParsedArgsFromRust) error {
+ base, err := helper.GetCmdBase(args)
+ if err != nil {
+ return err
+ }
+ if args.TestRun {
+ base.UI.Info("Daemon test run successful")
+ return nil
+ }
+
+ idleTimeout := 4 * time.Hour
+ if args.Command.Daemon.IdleTimeout != "" {
+ idleTimeout, err = time.ParseDuration(args.Command.Daemon.IdleTimeout)
+ if err != nil {
+ return err
+ }
+ }
+
+ logFilePath, err := getLogFilePath(base.RepoRoot)
+ if err != nil {
+ return err
+ }
+ if err := logFilePath.EnsureDir(); err != nil {
+ return err
+ }
+ logFile, err := logFilePath.OpenFile(_logFileFlags, 0644)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = logFile.Close() }()
+ logger := hclog.New(&hclog.LoggerOptions{
+ Output: io.MultiWriter(logFile, os.Stdout),
+ Level: hclog.Info,
+ Color: hclog.ColorOff,
+ Name: "turbod",
+ })
+
+ d := &daemon{
+ logger: logger,
+ repoRoot: base.RepoRoot,
+ timeout: idleTimeout,
+ reqCh: make(chan struct{}),
+ timedOutCh: make(chan struct{}),
+ }
+ serverName := getRepoHash(base.RepoRoot)
+ turboServer, err := server.New(serverName, d.logger.Named("rpc server"), base.RepoRoot, base.TurboVersion, logFilePath)
+ if err != nil {
+ d.logError(err)
+ return err
+ }
+ defer func() { _ = turboServer.Close() }()
+ err = d.runTurboServer(ctx, turboServer, signalWatcher)
+ if err != nil {
+ d.logError(err)
+ return err
+ }
+ return nil
+}
+
+var errInactivityTimeout = errors.New("turbod shut down from inactivity")
+
+// tryAcquirePidfileLock attempts to ensure that only one daemon is running from the given pid file path
+// at a time. If this process fails to write its PID to the lockfile, it must exit.
+func tryAcquirePidfileLock(pidPath turbopath.AbsoluteSystemPath) (lockfile.Lockfile, error) {
+ if err := pidPath.EnsureDir(); err != nil {
+ return "", err
+ }
+ lockFile, err := lockfile.New(pidPath.ToString())
+ if err != nil {
+ // lockfile.New should only return an error if it wasn't given an absolute path.
+ // We are attempting to use the type system to enforce that we are passing an
+ // absolute path. An error here likely means a bug, and we should crash.
+ panic(err)
+ }
+ if err := lockFile.TryLock(); err != nil {
+ return "", err
+ }
+ return lockFile, nil
+}
+
+type rpcServer interface {
+ Register(grpcServer server.GRPCServer)
+}
+
+func (d *daemon) runTurboServer(parentContext context.Context, rpcServer rpcServer, signalWatcher *signals.Watcher) error {
+ ctx, cancel := context.WithCancel(parentContext)
+ defer cancel()
+ pidPath := getPidFile(d.repoRoot)
+ lock, err := tryAcquirePidfileLock(pidPath)
+ if err != nil {
+ return errors.Wrapf(err, "failed to lock the pid file at %v. Is another turbo daemon running?", lock)
+ }
+ // When we're done serving, clean up the pid file.
+ // Also, if *this* goroutine panics, make sure we unlock the pid file.
+ defer func() {
+ if err := lock.Unlock(); err != nil {
+ d.logger.Error(errors.Wrapf(err, "failed unlocking pid file at %v", lock).Error())
+ }
+ }()
+ // This handler runs in request goroutines. If a request causes a panic,
+ // this handler will get called after a call to recover(), meaning we are
+ // no longer panicking. We return a server error and cancel our context,
+ // which triggers a shutdown of the server.
+ panicHandler := func(thePanic interface{}) error {
+ cancel()
+ d.logger.Error(fmt.Sprintf("Caught panic %v", thePanic))
+ return status.Error(codes.Internal, "server panicked")
+ }
+
+ // If we have the lock, assume that we are the owners of the socket file,
+ // whether it already exists or not. That means we are free to remove it.
+ sockPath := getUnixSocket(d.repoRoot)
+ if err := sockPath.Remove(); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ d.logger.Debug(fmt.Sprintf("Using socket path %v (%v)\n", sockPath, len(sockPath)))
+ lis, err := net.Listen("unix", sockPath.ToString())
+ if err != nil {
+ return err
+ }
+ // We don't need to explicitly close 'lis', the grpc server will handle that
+ s := grpc.NewServer(
+ grpc.ChainUnaryInterceptor(
+ d.onRequest,
+ grpc_recovery.UnaryServerInterceptor(grpc_recovery.WithRecoveryHandler(panicHandler)),
+ ),
+ )
+ go d.timeoutLoop(ctx)
+
+ rpcServer.Register(s)
+ errCh := make(chan error)
+ go func(errCh chan<- error) {
+ if err := s.Serve(lis); err != nil {
+ errCh <- err
+ }
+ close(errCh)
+ }(errCh)
+
+ // Note that we aren't deferring s.GracefulStop here because we also need
+ // to drain the error channel, which isn't guaranteed to happen until
+ // the server has stopped. That in turn may depend on GracefulStop being
+ // called.
+ // Future work could restructure this to make that simpler.
+ var exitErr error
+ select {
+ case err, ok := <-errCh:
+ // The server exited
+ if ok {
+ exitErr = err
+ }
+ case <-d.timedOutCh:
+ // This is the inactivity timeout case
+ exitErr = errInactivityTimeout
+ s.GracefulStop()
+ case <-ctx.Done():
+ // If a request handler panics, it will cancel this context
+ s.GracefulStop()
+ case <-signalWatcher.Done():
+ // This is fired if caught a signal
+ s.GracefulStop()
+ }
+ // Wait for the server to exit, if it hasn't already.
+ // When it does, this channel will close. We don't
+ // care about the error in this scenario because we've
+ // either requested a close via cancelling the context,
+ // an inactivity timeout, or caught a signal.
+ for range errCh {
+ }
+ return exitErr
+}
+
+func (d *daemon) onRequest(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
+ d.reqCh <- struct{}{}
+ return handler(ctx, req)
+}
+
+func (d *daemon) timeoutLoop(ctx context.Context) {
+ timeoutCh := time.After(d.timeout)
+outer:
+ for {
+ select {
+ case <-d.reqCh:
+ timeoutCh = time.After(d.timeout)
+ case <-timeoutCh:
+ close(d.timedOutCh)
+ break outer
+ case <-ctx.Done():
+ break outer
+ }
+ }
+}
+
+// ClientOpts re-exports connector.Ops to encapsulate the connector package
+type ClientOpts = connector.Opts
+
+// Client re-exports connector.Client to encapsulate the connector package
+type Client = connector.Client
+
+// GetClient returns a client that can be used to interact with the daemon
+func GetClient(ctx context.Context, repoRoot turbopath.AbsoluteSystemPath, logger hclog.Logger, turboVersion string, opts ClientOpts) (*Client, error) {
+ sockPath := getUnixSocket(repoRoot)
+ pidPath := getPidFile(repoRoot)
+ logPath, err := getLogFilePath(repoRoot)
+ if err != nil {
+ return nil, err
+ }
+ bin, err := os.Executable()
+ if err != nil {
+ return nil, err
+ }
+ // The Go binary can no longer be called directly, so we need to route back to the rust wrapper
+ if strings.HasSuffix(bin, "go-turbo") {
+ bin = filepath.Join(filepath.Dir(bin), "turbo")
+ } else if strings.HasSuffix(bin, "go-turbo.exe") {
+ bin = filepath.Join(filepath.Dir(bin), "turbo.exe")
+ }
+ c := &connector.Connector{
+ Logger: logger.Named("TurbodClient"),
+ Bin: bin,
+ Opts: opts,
+ SockPath: sockPath,
+ PidPath: pidPath,
+ LogPath: logPath,
+ TurboVersion: turboVersion,
+ }
+ return c.Connect(ctx)
+}
diff --git a/cli/internal/daemon/daemon_test.go b/cli/internal/daemon/daemon_test.go
new file mode 100644
index 0000000..66a714d
--- /dev/null
+++ b/cli/internal/daemon/daemon_test.go
@@ -0,0 +1,262 @@
+package daemon
+
+import (
+ "context"
+ "errors"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/nightlyone/lockfile"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/server"
+ "github.com/vercel/turbo/cli/internal/signals"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/test/grpc_testing"
+ "gotest.tools/v3/assert"
+)
+
+// testBin returns a platform-appropriate node binary.
+// We need some process to be running and findable by the
+// lockfile library, and we don't particularly care what it is.
+// Since node is required for turbo development, it makes a decent
+// candidate.
+func testBin() string {
+ if runtime.GOOS == "windows" {
+ return "node.exe"
+ }
+ return "node"
+}
+
+func TestPidFileLock(t *testing.T) {
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ pidPath := getPidFile(repoRoot)
+ // the lockfile library handles removing pids from dead owners
+ _, err := tryAcquirePidfileLock(pidPath)
+ assert.NilError(t, err, "acquirePidLock")
+
+ // Start up a node process and fake a pid file for it.
+ // Ensure that we can't start the daemon while the node process is live
+ bin := testBin()
+ node := exec.Command(bin)
+ err = node.Start()
+ assert.NilError(t, err, "Start")
+ stopNode := func() error {
+ if err := node.Process.Kill(); err != nil {
+ return err
+ }
+ // We expect an error from node, we just sent a kill signal
+ _ = node.Wait()
+ return nil
+ }
+ // In case we fail the test, still try to kill the node process
+ t.Cleanup(func() { _ = stopNode() })
+ nodePid := node.Process.Pid
+ err = pidPath.WriteFile([]byte(strconv.Itoa(nodePid)), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ _, err = tryAcquirePidfileLock(pidPath)
+ assert.ErrorIs(t, err, lockfile.ErrBusy)
+
+ // Stop the node process, but leave the pid file there
+ // This simulates a crash
+ err = stopNode()
+ assert.NilError(t, err, "stopNode")
+ // the lockfile library handles removing pids from dead owners
+ _, err = tryAcquirePidfileLock(pidPath)
+ assert.NilError(t, err, "acquirePidLock")
+}
+
+type testRPCServer struct {
+ grpc_testing.UnimplementedTestServiceServer
+ registered chan struct{}
+}
+
+func (ts *testRPCServer) EmptyCall(ctx context.Context, req *grpc_testing.Empty) (*grpc_testing.Empty, error) {
+ panic("intended to panic")
+}
+
+func (ts *testRPCServer) Register(grpcServer server.GRPCServer) {
+ grpc_testing.RegisterTestServiceServer(grpcServer, ts)
+ ts.registered <- struct{}{}
+}
+
+func newTestRPCServer() *testRPCServer {
+ return &testRPCServer{
+ registered: make(chan struct{}, 1),
+ }
+}
+
+func waitForFile(t *testing.T, filename turbopath.AbsoluteSystemPath, timeout time.Duration) {
+ t.Helper()
+ deadline := time.After(timeout)
+outer:
+ for !filename.FileExists() {
+ select {
+ case <-deadline:
+ break outer
+ case <-time.After(10 * time.Millisecond):
+ }
+ }
+ if !filename.FileExists() {
+ t.Errorf("timed out waiting for %v to exist after %v", filename, timeout)
+ }
+}
+
+func TestDaemonLifecycle(t *testing.T) {
+ logger := hclog.Default()
+ logger.SetLevel(hclog.Debug)
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ ts := newTestRPCServer()
+ watcher := signals.NewWatcher()
+ ctx, cancel := context.WithCancel(context.Background())
+
+ d := &daemon{
+ logger: logger,
+ repoRoot: repoRoot,
+ timeout: 10 * time.Second,
+ reqCh: make(chan struct{}),
+ timedOutCh: make(chan struct{}),
+ }
+
+ var serverErr error
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ serverErr = d.runTurboServer(ctx, ts, watcher)
+ wg.Done()
+ }()
+
+ sockPath := getUnixSocket(repoRoot)
+ waitForFile(t, sockPath, 30*time.Second)
+ pidPath := getPidFile(repoRoot)
+ waitForFile(t, pidPath, 1*time.Second)
+ cancel()
+ wg.Wait()
+ assert.NilError(t, serverErr, "runTurboServer")
+ if sockPath.FileExists() {
+ t.Errorf("%v still exists, should have been cleaned up", sockPath)
+ }
+ if pidPath.FileExists() {
+ t.Errorf("%v still exists, should have been cleaned up", sockPath)
+ }
+}
+
+func TestTimeout(t *testing.T) {
+ logger := hclog.Default()
+ logger.SetLevel(hclog.Debug)
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ ts := newTestRPCServer()
+ watcher := signals.NewWatcher()
+ ctx := context.Background()
+
+ d := &daemon{
+ logger: logger,
+ repoRoot: repoRoot,
+ timeout: 5 * time.Millisecond,
+ reqCh: make(chan struct{}),
+ timedOutCh: make(chan struct{}),
+ }
+ err := d.runTurboServer(ctx, ts, watcher)
+ if !errors.Is(err, errInactivityTimeout) {
+ t.Errorf("server error got %v, want %v", err, errInactivityTimeout)
+ }
+}
+
+func TestCaughtSignal(t *testing.T) {
+ logger := hclog.Default()
+ logger.SetLevel(hclog.Debug)
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ ts := newTestRPCServer()
+ watcher := signals.NewWatcher()
+ ctx := context.Background()
+
+ d := &daemon{
+ logger: logger,
+ repoRoot: repoRoot,
+ timeout: 5 * time.Second,
+ reqCh: make(chan struct{}),
+ timedOutCh: make(chan struct{}),
+ }
+ errCh := make(chan error)
+ go func() {
+ err := d.runTurboServer(ctx, ts, watcher)
+ errCh <- err
+ }()
+ <-ts.registered
+ // grpc doesn't provide a signal to know when the server is serving.
+ // So while this call to Close can race with the call to grpc.Server.Serve, if we've
+ // registered with the turboserver, we've registered all of our
+ // signal handlers as well. We just may or may not be serving when Close()
+ // is called. It shouldn't matter for the purposes of this test:
+ // Either we are serving, and Serve will return with nil when GracefulStop is
+ // called, or we aren't serving yet, and the subsequent call to Serve will
+ // immediately return with grpc.ErrServerStopped. So, both nil and grpc.ErrServerStopped
+ // are acceptable outcomes for runTurboServer. Any other error, or a timeout, is a
+ // failure.
+ watcher.Close()
+
+ err := <-errCh
+ pidPath := getPidFile(repoRoot)
+ if pidPath.FileExists() {
+ t.Errorf("expected to clean up %v, but it still exists", pidPath)
+ }
+ // We'll either get nil or ErrServerStopped, depending on whether
+ // or not we close the signal watcher before grpc.Server.Serve was
+ // called.
+ if err != nil && !errors.Is(err, grpc.ErrServerStopped) {
+ t.Errorf("runTurboServer got err %v, want nil or ErrServerStopped", err)
+ }
+}
+
+func TestCleanupOnPanic(t *testing.T) {
+ logger := hclog.Default()
+ logger.SetLevel(hclog.Debug)
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ ts := newTestRPCServer()
+ watcher := signals.NewWatcher()
+ ctx := context.Background()
+
+ d := &daemon{
+ logger: logger,
+ repoRoot: repoRoot,
+ timeout: 5 * time.Second,
+ reqCh: make(chan struct{}),
+ timedOutCh: make(chan struct{}),
+ }
+ errCh := make(chan error)
+ go func() {
+ err := d.runTurboServer(ctx, ts, watcher)
+ errCh <- err
+ }()
+ <-ts.registered
+
+ creds := insecure.NewCredentials()
+ sockFile := getUnixSocket(repoRoot)
+ conn, err := grpc.Dial("unix://"+sockFile.ToString(), grpc.WithTransportCredentials(creds))
+ assert.NilError(t, err, "Dial")
+
+ client := grpc_testing.NewTestServiceClient(conn)
+ _, err = client.EmptyCall(ctx, &grpc_testing.Empty{})
+ if err == nil {
+ t.Error("nil error")
+ }
+ // wait for the server to finish
+ <-errCh
+
+ pidPath := getPidFile(repoRoot)
+ if pidPath.FileExists() {
+ t.Errorf("expected to clean up %v, but it still exists", pidPath)
+ }
+}
diff --git a/cli/internal/daemonclient/daemonclient.go b/cli/internal/daemonclient/daemonclient.go
new file mode 100644
index 0000000..c415cd3
--- /dev/null
+++ b/cli/internal/daemonclient/daemonclient.go
@@ -0,0 +1,70 @@
+// Package daemonclient is a wrapper around a grpc client
+// to talk to turbod
+package daemonclient
+
+import (
+ "context"
+
+ "github.com/vercel/turbo/cli/internal/daemon/connector"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbodprotocol"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// DaemonClient provides access to higher-level functionality from the daemon to a turbo run.
+type DaemonClient struct {
+ client *connector.Client
+}
+
+// Status provides details about the daemon's status
+type Status struct {
+ UptimeMs uint64 `json:"uptimeMs"`
+ LogFile turbopath.AbsoluteSystemPath `json:"logFile"`
+ PidFile turbopath.AbsoluteSystemPath `json:"pidFile"`
+ SockFile turbopath.AbsoluteSystemPath `json:"sockFile"`
+}
+
+// New creates a new instance of a DaemonClient.
+func New(client *connector.Client) *DaemonClient {
+ return &DaemonClient{
+ client: client,
+ }
+}
+
+// GetChangedOutputs implements runcache.OutputWatcher.GetChangedOutputs
+func (d *DaemonClient) GetChangedOutputs(ctx context.Context, hash string, repoRelativeOutputGlobs []string) ([]string, error) {
+ resp, err := d.client.GetChangedOutputs(ctx, &turbodprotocol.GetChangedOutputsRequest{
+ Hash: hash,
+ OutputGlobs: repoRelativeOutputGlobs,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.ChangedOutputGlobs, nil
+}
+
+// NotifyOutputsWritten implements runcache.OutputWatcher.NotifyOutputsWritten
+func (d *DaemonClient) NotifyOutputsWritten(ctx context.Context, hash string, repoRelativeOutputGlobs fs.TaskOutputs) error {
+ _, err := d.client.NotifyOutputsWritten(ctx, &turbodprotocol.NotifyOutputsWrittenRequest{
+ Hash: hash,
+ OutputGlobs: repoRelativeOutputGlobs.Inclusions,
+ OutputExclusionGlobs: repoRelativeOutputGlobs.Exclusions,
+ })
+ return err
+}
+
+// Status returns the DaemonStatus from the daemon
+func (d *DaemonClient) Status(ctx context.Context) (*Status, error) {
+ resp, err := d.client.Status(ctx, &turbodprotocol.StatusRequest{})
+ if err != nil {
+ return nil, err
+ }
+ daemonStatus := resp.DaemonStatus
+ return &Status{
+ UptimeMs: daemonStatus.UptimeMsec,
+ LogFile: d.client.LogPath,
+ PidFile: d.client.PidPath,
+ SockFile: d.client.SockPath,
+ }, nil
+}
diff --git a/cli/internal/doublestar/doublestar.go b/cli/internal/doublestar/doublestar.go
new file mode 100644
index 0000000..6fa05f1
--- /dev/null
+++ b/cli/internal/doublestar/doublestar.go
@@ -0,0 +1,11 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+package doublestar
+
+import (
+ "path"
+)
+
+// ErrBadPattern indicates a pattern was malformed.
+var ErrBadPattern = path.ErrBadPattern
diff --git a/cli/internal/doublestar/doublestar_test.go b/cli/internal/doublestar/doublestar_test.go
new file mode 100644
index 0000000..512f8b7
--- /dev/null
+++ b/cli/internal/doublestar/doublestar_test.go
@@ -0,0 +1,557 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+
+// This file is mostly copied from Go's path/match_test.go
+
+package doublestar
+
+import (
+ "io/fs"
+ "log"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type MatchTest struct {
+ pattern, testPath string // a pattern and path to test the pattern on
+ shouldMatch bool // true if the pattern should match the path
+ expectedErr error // an expected error
+ isStandard bool // pattern doesn't use any doublestar features
+ testOnDisk bool // true: test pattern against files in "test" directory
+ numResults int // number of glob results if testing on disk
+ winNumResults int // number of glob results on Windows
+}
+
+// Tests which contain escapes and symlinks will not work on Windows
+var onWindows = runtime.GOOS == "windows"
+
+var matchTests = []MatchTest{
+ {"*", "", true, nil, true, false, 0, 0},
+ {"*", "/", false, nil, true, false, 0, 0},
+ {"/*", "/", true, nil, true, false, 0, 0},
+ {"/*", "/debug/", false, nil, true, false, 0, 0},
+ {"/*", "//", false, nil, true, false, 0, 0},
+ {"abc", "abc", true, nil, true, true, 1, 1},
+ {"*", "abc", true, nil, true, true, 19, 15},
+ {"*c", "abc", true, nil, true, true, 2, 2},
+ {"*/", "a/", true, nil, true, false, 0, 0},
+ {"a*", "a", true, nil, true, true, 9, 9},
+ {"a*", "abc", true, nil, true, true, 9, 9},
+ {"a*", "ab/c", false, nil, true, true, 9, 9},
+ {"a*/b", "abc/b", true, nil, true, true, 2, 2},
+ {"a*/b", "a/c/b", false, nil, true, true, 2, 2},
+ {"a*b*c*d*e*", "axbxcxdxe", true, nil, true, true, 3, 3},
+ {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil, true, true, 2, 2},
+ {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil, true, true, 2, 2},
+ {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil, true, true, 2, 2},
+ {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil, true, true, 2, 2},
+ {"a*b?c*x", "abxbbxdbxebxczzx", true, nil, true, true, 2, 2},
+ {"a*b?c*x", "abxbbxdbxebxczzy", false, nil, true, true, 2, 2},
+ {"ab[c]", "abc", true, nil, true, true, 1, 1},
+ {"ab[b-d]", "abc", true, nil, true, true, 1, 1},
+ {"ab[e-g]", "abc", false, nil, true, true, 0, 0},
+ {"ab[^c]", "abc", false, nil, true, true, 0, 0},
+ {"ab[^b-d]", "abc", false, nil, true, true, 0, 0},
+ {"ab[^e-g]", "abc", true, nil, true, true, 1, 1},
+ {"a\\*b", "ab", false, nil, true, true, 0, 0},
+ {"a?b", "a☺b", true, nil, true, true, 1, 1},
+ {"a[^a]b", "a☺b", true, nil, true, true, 1, 1},
+ {"a[!a]b", "a☺b", true, nil, false, true, 1, 1},
+ {"a???b", "a☺b", false, nil, true, true, 0, 0},
+ {"a[^a][^a][^a]b", "a☺b", false, nil, true, true, 0, 0},
+ {"[a-ζ]*", "α", true, nil, true, true, 17, 15},
+ {"*[a-ζ]", "A", false, nil, true, true, 17, 15},
+ {"a?b", "a/b", false, nil, true, true, 1, 1},
+ {"a*b", "a/b", false, nil, true, true, 1, 1},
+ {"[\\]a]", "]", true, nil, true, !onWindows, 2, 2},
+ {"[\\-]", "-", true, nil, true, !onWindows, 1, 1},
+ {"[x\\-]", "x", true, nil, true, !onWindows, 2, 2},
+ {"[x\\-]", "-", true, nil, true, !onWindows, 2, 2},
+ {"[x\\-]", "z", false, nil, true, !onWindows, 2, 2},
+ {"[\\-x]", "x", true, nil, true, !onWindows, 2, 2},
+ {"[\\-x]", "-", true, nil, true, !onWindows, 2, 2},
+ {"[\\-x]", "a", false, nil, true, !onWindows, 2, 2},
+ {"[]a]", "]", false, ErrBadPattern, true, true, 0, 0},
+ // doublestar, like bash, allows these when path.Match() does not
+ {"[-]", "-", true, nil, false, !onWindows, 1, 0},
+ {"[x-]", "x", true, nil, false, true, 2, 1},
+ {"[x-]", "-", true, nil, false, !onWindows, 2, 1},
+ {"[x-]", "z", false, nil, false, true, 2, 1},
+ {"[-x]", "x", true, nil, false, true, 2, 1},
+ {"[-x]", "-", true, nil, false, !onWindows, 2, 1},
+ {"[-x]", "a", false, nil, false, true, 2, 1},
+ {"[a-b-d]", "a", true, nil, false, true, 3, 2},
+ {"[a-b-d]", "b", true, nil, false, true, 3, 2},
+ {"[a-b-d]", "-", true, nil, false, !onWindows, 3, 2},
+ {"[a-b-d]", "c", false, nil, false, true, 3, 2},
+ {"[a-b-x]", "x", true, nil, false, true, 4, 3},
+ {"\\", "a", false, ErrBadPattern, true, !onWindows, 0, 0},
+ {"[", "a", false, ErrBadPattern, true, true, 0, 0},
+ {"[^", "a", false, ErrBadPattern, true, true, 0, 0},
+ {"[^bc", "a", false, ErrBadPattern, true, true, 0, 0},
+ {"a[", "a", false, ErrBadPattern, true, true, 0, 0},
+ {"a[", "ab", false, ErrBadPattern, true, true, 0, 0},
+ {"ad[", "ab", false, ErrBadPattern, true, true, 0, 0},
+ {"*x", "xxx", true, nil, true, true, 4, 4},
+ {"[abc]", "b", true, nil, true, true, 3, 3},
+ {"**", "", true, nil, false, false, 38, 38},
+ {"a/**", "a", true, nil, false, true, 7, 7},
+ {"a/**", "a/", true, nil, false, false, 7, 7},
+ {"a/**", "a/b", true, nil, false, true, 7, 7},
+ {"a/**", "a/b/c", true, nil, false, true, 7, 7},
+ // These tests differ since we've disabled walking symlinks
+ {"**/c", "c", true, nil, false, true, 4, 4},
+ {"**/c", "b/c", true, nil, false, true, 4, 4},
+ {"**/c", "a/b/c", true, nil, false, true, 4, 4},
+ {"**/c", "a/b", false, nil, false, true, 4, 4},
+ {"**/c", "abcd", false, nil, false, true, 4, 4},
+ {"**/c", "a/abc", false, nil, false, true, 4, 4},
+ {"a/**/b", "a/b", true, nil, false, true, 2, 2},
+ {"a/**/c", "a/b/c", true, nil, false, true, 2, 2},
+ {"a/**/d", "a/b/c/d", true, nil, false, true, 1, 1},
+ {"a/\\**", "a/b/c", false, nil, false, !onWindows, 0, 0},
+ {"a/\\[*\\]", "a/bc", false, nil, true, !onWindows, 0, 0},
+ // this is an odd case: filepath.Glob() will return results
+ {"a//b/c", "a/b/c", false, nil, true, false, 0, 0},
+ {"a/b/c", "a/b//c", false, nil, true, true, 1, 1},
+ // also odd: Glob + filepath.Glob return results
+ {"a/", "a", false, nil, true, false, 0, 0},
+ {"ab{c,d}", "abc", true, nil, false, true, 1, 1},
+ {"ab{c,d,*}", "abcde", true, nil, false, true, 5, 5},
+ {"ab{c,d}[", "abcd", false, ErrBadPattern, false, true, 0, 0},
+ {"a{,bc}", "a", true, nil, false, true, 2, 2},
+ {"a{,bc}", "abc", true, nil, false, true, 2, 2},
+ {"a/{b/c,c/b}", "a/b/c", true, nil, false, true, 2, 2},
+ {"a/{b/c,c/b}", "a/c/b", true, nil, false, true, 2, 2},
+ {"{a/{b,c},abc}", "a/b", true, nil, false, true, 3, 3},
+ {"{a/{b,c},abc}", "a/c", true, nil, false, true, 3, 3},
+ {"{a/{b,c},abc}", "abc", true, nil, false, true, 3, 3},
+ {"{a/{b,c},abc}", "a/b/c", false, nil, false, true, 3, 3},
+ {"{a/ab*}", "a/abc", true, nil, false, true, 1, 1},
+ {"{a/*}", "a/b", true, nil, false, true, 3, 3},
+ {"{a/abc}", "a/abc", true, nil, false, true, 1, 1},
+ {"{a/b,a/c}", "a/c", true, nil, false, true, 2, 2},
+ {"abc/**", "abc/b", true, nil, false, true, 3, 3},
+ {"**/abc", "abc", true, nil, false, true, 2, 2},
+ {"abc**", "abc/b", false, nil, false, true, 3, 3},
+ {"**/*.txt", "abc/【test】.txt", true, nil, false, true, 1, 1},
+ {"**/【*", "abc/【test】.txt", true, nil, false, true, 1, 1},
+ // unfortunately, io/fs can't handle this, so neither can Glob =(
+ {"broken-symlink", "broken-symlink", true, nil, true, false, 1, 1},
+ // We don't care about matching a particular file, we want to verify
+ // that we don't traverse the symlink
+ {"working-symlink/c/*", "working-symlink/c/d", true, nil, true, !onWindows, 1, 1},
+ {"working-sym*/*", "irrelevant", false, nil, false, !onWindows, 0, 0},
+ {"b/**/f", "irrelevant", false, nil, false, !onWindows, 0, 0},
+}
+
+func TestValidatePattern(t *testing.T) {
+ for idx, tt := range matchTests {
+ testValidatePatternWith(t, idx, tt)
+ }
+}
+
+func testValidatePatternWith(t *testing.T, idx int, tt MatchTest) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("#%v. Validate(%#q) panicked: %#v", idx, tt.pattern, r)
+ }
+ }()
+
+ result := ValidatePattern(tt.pattern)
+ if result != (tt.expectedErr == nil) {
+ t.Errorf("#%v. ValidatePattern(%#q) = %v want %v", idx, tt.pattern, result, !result)
+ }
+}
+
+func TestMatch(t *testing.T) {
+ for idx, tt := range matchTests {
+ // Since Match() always uses "/" as the separator, we
+ // don't need to worry about the tt.testOnDisk flag
+ testMatchWith(t, idx, tt)
+ }
+}
+
+func testMatchWith(t *testing.T, idx int, tt MatchTest) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("#%v. Match(%#q, %#q) panicked: %#v", idx, tt.pattern, tt.testPath, r)
+ }
+ }()
+
+ // Match() always uses "/" as the separator
+ ok, err := Match(tt.pattern, tt.testPath)
+ if ok != tt.shouldMatch || err != tt.expectedErr {
+ t.Errorf("#%v. Match(%#q, %#q) = %v, %v want %v, %v", idx, tt.pattern, tt.testPath, ok, err, tt.shouldMatch, tt.expectedErr)
+ }
+
+ if tt.isStandard {
+ stdOk, stdErr := path.Match(tt.pattern, tt.testPath)
+ if ok != stdOk || !compareErrors(err, stdErr) {
+ t.Errorf("#%v. Match(%#q, %#q) != path.Match(...). Got %v, %v want %v, %v", idx, tt.pattern, tt.testPath, ok, err, stdOk, stdErr)
+ }
+ }
+}
+
+func BenchmarkMatch(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard {
+ _, _ = Match(tt.pattern, tt.testPath)
+ }
+ }
+ }
+}
+
+func BenchmarkGoMatch(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard {
+ _, _ = path.Match(tt.pattern, tt.testPath)
+ }
+ }
+ }
+}
+
+func TestPathMatch(t *testing.T) {
+ for idx, tt := range matchTests {
+ // Even though we aren't actually matching paths on disk, we are using
+ // PathMatch() which will use the system's separator. As a result, any
+ // patterns that might cause problems on-disk need to also be avoided
+ // here in this test.
+ if tt.testOnDisk {
+ testPathMatchWith(t, idx, tt)
+ }
+ }
+}
+
+func testPathMatchWith(t *testing.T, idx int, tt MatchTest) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("#%v. Match(%#q, %#q) panicked: %#v", idx, tt.pattern, tt.testPath, r)
+ }
+ }()
+
+ pattern := filepath.FromSlash(tt.pattern)
+ testPath := filepath.FromSlash(tt.testPath)
+ ok, err := PathMatch(pattern, testPath)
+ if ok != tt.shouldMatch || err != tt.expectedErr {
+ t.Errorf("#%v. PathMatch(%#q, %#q) = %v, %v want %v, %v", idx, pattern, testPath, ok, err, tt.shouldMatch, tt.expectedErr)
+ }
+
+ if tt.isStandard {
+ stdOk, stdErr := filepath.Match(pattern, testPath)
+ if ok != stdOk || !compareErrors(err, stdErr) {
+ t.Errorf("#%v. PathMatch(%#q, %#q) != filepath.Match(...). Got %v, %v want %v, %v", idx, pattern, testPath, ok, err, stdOk, stdErr)
+ }
+ }
+}
+
+func TestPathMatchFake(t *testing.T) {
+ // This test fakes that our path separator is `\\` so we can test what it
+ // would be like on Windows - obviously, we don't need to do that if we
+ // actually _are_ on Windows, since TestPathMatch will cover it.
+ if onWindows {
+ return
+ }
+
+ for idx, tt := range matchTests {
+ // Even though we aren't actually matching paths on disk, we are using
+ // PathMatch() which will use the system's separator. As a result, any
+ // patterns that might cause problems on-disk need to also be avoided
+ // here in this test.
+ if tt.testOnDisk && tt.pattern != "\\" {
+ testPathMatchFakeWith(t, idx, tt)
+ }
+ }
+}
+
+func testPathMatchFakeWith(t *testing.T, idx int, tt MatchTest) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("#%v. Match(%#q, %#q) panicked: %#v", idx, tt.pattern, tt.testPath, r)
+ }
+ }()
+
+ pattern := strings.ReplaceAll(tt.pattern, "/", "\\")
+ testPath := strings.ReplaceAll(tt.testPath, "/", "\\")
+ ok, err := matchWithSeparator(pattern, testPath, '\\', true)
+ if ok != tt.shouldMatch || err != tt.expectedErr {
+ t.Errorf("#%v. PathMatch(%#q, %#q) = %v, %v want %v, %v", idx, pattern, testPath, ok, err, tt.shouldMatch, tt.expectedErr)
+ }
+}
+
+func BenchmarkPathMatch(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard && tt.testOnDisk {
+ pattern := filepath.FromSlash(tt.pattern)
+ testPath := filepath.FromSlash(tt.testPath)
+ _, _ = PathMatch(pattern, testPath)
+ }
+ }
+ }
+}
+
+func BenchmarkGoPathMatch(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard && tt.testOnDisk {
+ pattern := filepath.FromSlash(tt.pattern)
+ testPath := filepath.FromSlash(tt.testPath)
+ _, _ = filepath.Match(pattern, testPath)
+ }
+ }
+ }
+}
+
+func TestGlob(t *testing.T) {
+ fsys := os.DirFS("test")
+ for idx, tt := range matchTests {
+ if tt.testOnDisk {
+ testGlobWith(t, idx, tt, fsys)
+ }
+ }
+}
+
+func testGlobWith(t *testing.T, idx int, tt MatchTest, fsys fs.FS) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("#%v. Glob(%#q) panicked: %#v", idx, tt.pattern, r)
+ }
+ }()
+
+ matches, err := Glob(fsys, tt.pattern)
+ verifyGlobResults(t, idx, "Glob", tt, fsys, matches, err)
+}
+
+func TestGlobWalk(t *testing.T) {
+ fsys := os.DirFS("test")
+ for idx, tt := range matchTests {
+ if tt.testOnDisk {
+ testGlobWalkWith(t, idx, tt, fsys)
+ }
+ }
+}
+
+func testGlobWalkWith(t *testing.T, idx int, tt MatchTest, fsys fs.FS) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Errorf("#%v. Glob(%#q) panicked: %#v", idx, tt.pattern, r)
+ }
+ }()
+
+ var matches []string
+ err := GlobWalk(fsys, tt.pattern, func(p string, d fs.DirEntry) error {
+ matches = append(matches, p)
+ return nil
+ })
+ verifyGlobResults(t, idx, "GlobWalk", tt, fsys, matches, err)
+}
+
+func verifyGlobResults(t *testing.T, idx int, fn string, tt MatchTest, fsys fs.FS, matches []string, err error) {
+ numResults := tt.numResults
+ if onWindows {
+ numResults = tt.winNumResults
+ }
+ if len(matches) != numResults {
+ t.Errorf("#%v. %v(%#q) = %#v - should have %#v results", idx, fn, tt.pattern, matches, tt.numResults)
+ }
+ if inSlice(tt.testPath, matches) != tt.shouldMatch {
+ if tt.shouldMatch {
+ t.Errorf("#%v. %v(%#q) = %#v - doesn't contain %v, but should", idx, fn, tt.pattern, matches, tt.testPath)
+ } else {
+ t.Errorf("#%v. %v(%#q) = %#v - contains %v, but shouldn't", idx, fn, tt.pattern, matches, tt.testPath)
+ }
+ }
+ if err != tt.expectedErr {
+ t.Errorf("#%v. %v(%#q) has error %v, but should be %v", idx, fn, tt.pattern, err, tt.expectedErr)
+ }
+
+ if tt.isStandard {
+ stdMatches, stdErr := fs.Glob(fsys, tt.pattern)
+ if !compareSlices(matches, stdMatches) || !compareErrors(err, stdErr) {
+ t.Errorf("#%v. %v(%#q) != fs.Glob(...). Got %#v, %v want %#v, %v", idx, fn, tt.pattern, matches, err, stdMatches, stdErr)
+ }
+ }
+}
+
+func BenchmarkGlob(b *testing.B) {
+ fsys := os.DirFS("test")
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard && tt.testOnDisk {
+ _, _ = Glob(fsys, tt.pattern)
+ }
+ }
+ }
+}
+
+func BenchmarkGlobWalk(b *testing.B) {
+ fsys := os.DirFS("test")
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard && tt.testOnDisk {
+ _ = GlobWalk(fsys, tt.pattern, func(p string, d fs.DirEntry) error {
+ return nil
+ })
+ }
+ }
+ }
+}
+
+func BenchmarkGoGlob(b *testing.B) {
+ fsys := os.DirFS("test")
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range matchTests {
+ if tt.isStandard && tt.testOnDisk {
+ _, _ = fs.Glob(fsys, tt.pattern)
+ }
+ }
+ }
+}
+
+func compareErrors(a, b error) bool {
+ if a == nil {
+ return b == nil
+ }
+ return b != nil
+}
+
+func inSlice(s string, a []string) bool {
+ for _, i := range a {
+ if i == s {
+ return true
+ }
+ }
+ return false
+}
+
+func compareSlices(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ diff := make(map[string]int, len(a))
+
+ for _, x := range a {
+ diff[x]++
+ }
+
+ for _, y := range b {
+ if _, ok := diff[y]; !ok {
+ return false
+ }
+
+ diff[y]--
+ if diff[y] == 0 {
+ delete(diff, y)
+ }
+ }
+
+ return len(diff) == 0
+}
+
+func mkdirp(parts ...string) {
+ dirs := path.Join(parts...)
+ err := os.MkdirAll(dirs, 0755)
+ if err != nil {
+ log.Fatalf("Could not create test directories %v: %v\n", dirs, err)
+ }
+}
+
+func touch(parts ...string) {
+ filename := path.Join(parts...)
+ f, err := os.Create(filename)
+ if err != nil {
+ log.Fatalf("Could not create test file %v: %v\n", filename, err)
+ }
+ _ = f.Close()
+}
+
+func symlink(oldname, newname string) {
+ // since this will only run on non-windows, we can assume "/" as path separator
+ err := os.Symlink(oldname, newname)
+ if err != nil && !os.IsExist(err) {
+ log.Fatalf("Could not create symlink %v -> %v: %v\n", oldname, newname, err)
+ }
+}
+
+func TestGlobSorted(t *testing.T) {
+ fsys := os.DirFS("test")
+ expected := []string{"a", "abc", "abcd", "abcde", "abxbbxdbxebxczzx", "abxbbxdbxebxczzy", "axbxcxdxe", "axbxcxdxexxx", "a☺b"}
+ matches, err := Glob(fsys, "a*")
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ return
+ }
+
+ if len(matches) != len(expected) {
+ t.Errorf("Glob returned %#v; expected %#v", matches, expected)
+ return
+ }
+ for idx, match := range matches {
+ if match != expected[idx] {
+ t.Errorf("Glob returned %#v; expected %#v", matches, expected)
+ return
+ }
+ }
+}
+
+func TestMain(m *testing.M) {
+ // create the test directory
+ mkdirp("test", "a", "b", "c")
+ mkdirp("test", "a", "c")
+ mkdirp("test", "abc")
+ mkdirp("test", "axbxcxdxe", "xxx")
+ mkdirp("test", "axbxcxdxexxx")
+ mkdirp("test", "b")
+
+ // create test files
+ touch("test", "a", "abc")
+ touch("test", "a", "b", "c", "d")
+ touch("test", "a", "c", "b")
+ touch("test", "abc", "b")
+ touch("test", "abcd")
+ touch("test", "abcde")
+ touch("test", "abxbbxdbxebxczzx")
+ touch("test", "abxbbxdbxebxczzy")
+ touch("test", "axbxcxdxe", "f")
+ touch("test", "axbxcxdxe", "xxx", "f")
+ touch("test", "axbxcxdxexxx", "f")
+ touch("test", "axbxcxdxexxx", "fff")
+ touch("test", "a☺b")
+ touch("test", "b", "c")
+ touch("test", "c")
+ touch("test", "x")
+ touch("test", "xxx")
+ touch("test", "z")
+ touch("test", "α")
+ touch("test", "abc", "【test】.txt")
+
+ if !onWindows {
+ // these files/symlinks won't work on Windows
+ touch("test", "-")
+ touch("test", "]")
+ symlink("../axbxcxdxe/", "test/b/symlink-dir")
+ symlink("/tmp/nonexistant-file-20160902155705", "test/broken-symlink")
+ symlink("a/b", "test/working-symlink")
+ }
+
+ // os.Exit(m.Run())
+ exitCode := m.Run()
+ _ = os.RemoveAll("test")
+ os.Exit(exitCode)
+}
diff --git a/cli/internal/doublestar/glob.go b/cli/internal/doublestar/glob.go
new file mode 100644
index 0000000..eee8920
--- /dev/null
+++ b/cli/internal/doublestar/glob.go
@@ -0,0 +1,393 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+package doublestar
+
+import (
+ "io/fs"
+ "path"
+)
+
+// Glob returns the names of all files matching pattern or nil if there is no
+// matching file. The syntax of pattern is the same as in Match(). The pattern
+// may describe hierarchical names such as usr/*/bin/ed.
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, reporting that the
+// pattern is malformed.
+//
+// Note: this is meant as a drop-in replacement for io/fs.Glob(). Like
+// io/fs.Glob(), this function assumes that your pattern uses `/` as the path
+// separator even if that's not correct for your OS (like Windows). If you
+// aren't sure if that's the case, you can use filepath.ToSlash() on your
+// pattern before calling Glob().
+//
+// Like `io/fs.Glob()`, patterns containing `/./`, `/../`, or starting with `/`
+// will return no results and no errors. You can use SplitPattern to divide a
+// pattern into a base path (to initialize an `FS` object) and pattern.
+func Glob(fsys fs.FS, pattern string) ([]string, error) {
+ if !ValidatePattern(pattern) {
+ return nil, ErrBadPattern
+ }
+ if hasMidDoubleStar(pattern) {
+ // If the pattern has a `**` anywhere but the very end, GlobWalk is more
+ // performant because it can get away with less allocations. If the pattern
+ // ends in a `**`, both methods are pretty much the same, but Glob has a
+ // _very_ slight advantage because of lower function call overhead.
+ var matches []string
+ err := doGlobWalk(fsys, pattern, true, func(p string, d fs.DirEntry) error {
+ matches = append(matches, p)
+ return nil
+ })
+ return matches, err
+ }
+ return doGlob(fsys, pattern, nil, true)
+}
+
+// Does the actual globbin'
+func doGlob(fsys fs.FS, pattern string, m []string, firstSegment bool) ([]string, error) {
+ matches := m
+ patternStart := indexMeta(pattern)
+ if patternStart == -1 {
+ // pattern doesn't contain any meta characters - does a file matching the
+ // pattern exist?
+ if exists(fsys, pattern) {
+ matches = append(matches, pattern)
+ }
+ return matches, nil
+ }
+
+ dir := "."
+ splitIdx := lastIndexSlashOrAlt(pattern)
+ if splitIdx != -1 {
+ if pattern[splitIdx] == '}' {
+ openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx])
+ if openingIdx == -1 {
+ // if there's no matching opening index, technically Match() will treat
+ // an unmatched `}` as nothing special, so... we will, too!
+ splitIdx = lastIndexSlash(pattern[:splitIdx])
+ } else {
+ // otherwise, we have to handle the alts:
+ return globAlts(fsys, pattern, openingIdx, splitIdx, matches, firstSegment)
+ }
+ }
+
+ dir = pattern[:splitIdx]
+ pattern = pattern[splitIdx+1:]
+ }
+
+ // if `splitIdx` is less than `patternStart`, we know `dir` has no meta
+ // characters. They would be equal if they are both -1, which means `dir`
+ // will be ".", and we know that doesn't have meta characters either.
+ if splitIdx <= patternStart {
+ return globDir(fsys, dir, pattern, matches, firstSegment)
+ }
+
+ var dirs []string
+ var err error
+ dirs, err = doGlob(fsys, dir, matches, false)
+ if err != nil {
+ return nil, err
+ }
+ for _, d := range dirs {
+ matches, err = globDir(fsys, d, pattern, matches, firstSegment)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return matches, nil
+}
+
+// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the
+// indexes of `{` and `}`, respectively
+func globAlts(fsys fs.FS, pattern string, openingIdx, closingIdx int, m []string, firstSegment bool) ([]string, error) {
+ matches := m
+
+ var dirs []string
+ startIdx := 0
+ afterIdx := closingIdx + 1
+ splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx])
+ if splitIdx == -1 || pattern[splitIdx] == '}' {
+ // no common prefix
+ dirs = []string{""}
+ } else {
+ // our alts have a common prefix that we can process first
+ var err error
+ dirs, err = doGlob(fsys, pattern[:splitIdx], matches, false)
+ if err != nil {
+ return nil, err
+ }
+
+ startIdx = splitIdx + 1
+ }
+
+ for _, d := range dirs {
+ patIdx := openingIdx + 1
+ altResultsStartIdx := len(matches)
+ thisResultStartIdx := altResultsStartIdx
+ for patIdx < closingIdx {
+ nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true)
+ if nextIdx == -1 {
+ nextIdx = closingIdx
+ } else {
+ nextIdx += patIdx
+ }
+
+ alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx)
+ var err error
+ matches, err = doGlob(fsys, alt, matches, firstSegment)
+ if err != nil {
+ return nil, err
+ }
+
+ matchesLen := len(matches)
+ if altResultsStartIdx != thisResultStartIdx && thisResultStartIdx != matchesLen {
+ // Alts can result in matches that aren't sorted, or, worse, duplicates
+ // (consider the trivial pattern `path/to/{a,*}`). Since doGlob returns
+ // sorted results, we can do a sort of in-place merge and remove
+ // duplicates. But, we only need to do this if this isn't the first alt
+ // (ie, `altResultsStartIdx != thisResultsStartIdx`) and if the latest
+ // alt actually added some matches (`thisResultStartIdx !=
+ // len(matches)`)
+ matches = sortAndRemoveDups(matches, altResultsStartIdx, thisResultStartIdx, matchesLen)
+
+ // length of matches may have changed
+ thisResultStartIdx = len(matches)
+ } else {
+ thisResultStartIdx = matchesLen
+ }
+
+ patIdx = nextIdx + 1
+ }
+ }
+
+ return matches, nil
+}
+
+// find files/subdirectories in the given `dir` that match `pattern`
+func globDir(fsys fs.FS, dir, pattern string, matches []string, canMatchFiles bool) ([]string, error) {
+ m := matches
+
+ if pattern == "" {
+ // pattern can be an empty string if the original pattern ended in a slash,
+ // in which case, we should just return dir, but only if it actually exists
+ // and it's a directory (or a symlink to a directory)
+ if isPathDir(fsys, dir) {
+ m = append(m, dir)
+ }
+ return m, nil
+ }
+
+ if pattern == "**" {
+ m = globDoubleStar(fsys, dir, m, canMatchFiles)
+ return m, nil
+ }
+
+ dirs, err := fs.ReadDir(fsys, dir)
+ if err != nil {
+ // ignore IO errors
+ return m, nil
+ }
+
+ var matched bool
+ for _, info := range dirs {
+ name := info.Name()
+ if canMatchFiles || isDir(fsys, dir, name, info) {
+ matched, err = matchWithSeparator(pattern, name, '/', false)
+ if err != nil {
+ return nil, err
+ }
+ if matched {
+ m = append(m, path.Join(dir, name))
+ }
+ }
+ }
+
+ return m, nil
+}
+
+func globDoubleStar(fsys fs.FS, dir string, matches []string, canMatchFiles bool) []string {
+ dirs, err := fs.ReadDir(fsys, dir)
+ if err != nil {
+ // ignore IO errors
+ return matches
+ }
+
+ // `**` can match *this* dir, so add it
+ matches = append(matches, dir)
+ for _, info := range dirs {
+ name := info.Name()
+ if isDir(fsys, dir, name, info) {
+ matches = globDoubleStar(fsys, path.Join(dir, name), matches, canMatchFiles)
+ } else if canMatchFiles {
+ matches = append(matches, path.Join(dir, name))
+ }
+ }
+
+ return matches
+}
+
+// Returns true if the pattern has a doublestar in the middle of the pattern.
+// In this case, GlobWalk is faster because it can get away with less
+// allocations. However, Glob has a _very_ slight edge if the pattern ends in
+// `**`.
+func hasMidDoubleStar(p string) bool {
+ // subtract 3: 2 because we want to return false if the pattern ends in `**`
+ // (Glob is _very_ slightly faster in that case), and the extra 1 because our
+ // loop checks p[i] and p[i+1].
+ l := len(p) - 3
+ for i := 0; i < l; i++ {
+ if p[i] == '\\' {
+ // escape next byte
+ i++
+ } else if p[i] == '*' && p[i+1] == '*' {
+ return true
+ }
+ }
+ return false
+}
+
+// Returns the index of the first unescaped meta character, or negative 1.
+func indexMeta(s string) int {
+ var c byte
+ l := len(s)
+ for i := 0; i < l; i++ {
+ c = s[i]
+ if c == '*' || c == '?' || c == '[' || c == '{' {
+ return i
+ } else if c == '\\' {
+ // skip next byte
+ i++
+ }
+ }
+ return -1
+}
+
+// Returns the index of the last unescaped slash or closing alt (`}`) in the
+// string, or negative 1.
+func lastIndexSlashOrAlt(s string) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if (s[i] == '/' || s[i] == '}') && (i == 0 || s[i-1] != '\\') {
+ return i
+ }
+ }
+ return -1
+}
+
+// Returns the index of the last unescaped slash in the string, or negative 1.
+func lastIndexSlash(s string) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == '/' && (i == 0 || s[i-1] != '\\') {
+ return i
+ }
+ }
+ return -1
+}
+
+// Assuming the byte after the end of `s` is a closing `}`, this function will
+// find the index of the matching `{`. That is, it'll skip over any nested `{}`
+// and account for escaping.
+func indexMatchedOpeningAlt(s string) int {
+ alts := 1
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == '}' && (i == 0 || s[i-1] != '\\') {
+ alts++
+ } else if s[i] == '{' && (i == 0 || s[i-1] != '\\') {
+ if alts--; alts == 0 {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// Returns true if the path exists
+func exists(fsys fs.FS, name string) bool {
+ if _, err := fs.Stat(fsys, name); err != nil {
+ return false
+ }
+ return true
+}
+
+// Returns true if the path is a directory, or a symlink to a directory
+func isPathDir(fsys fs.FS, name string) bool {
+ info, err := fs.Stat(fsys, name)
+ if err != nil {
+ return false
+ }
+ return info.IsDir()
+}
+
+// Returns whether or not the given DirEntry is a directory. If the DirEntry
+// represents a symbolic link, return false
+func isDir(fsys fs.FS, dir string, name string, info fs.DirEntry) bool {
+ if (info.Type() & fs.ModeSymlink) > 0 {
+ return false
+ }
+ return info.IsDir()
+}
+
+// Builds a string from an alt
+func buildAlt(prefix, pattern string, startIdx, openingIdx, currentIdx, nextIdx, afterIdx int) string {
+ // pattern:
+ // ignored/start{alts,go,here}remaining - len = 36
+ // | | | | ^--- afterIdx = 27
+ // | | | \--------- nextIdx = 21
+ // | | \----------- currentIdx = 19
+ // | \----------------- openingIdx = 13
+ // \---------------------- startIdx = 8
+ //
+ // result:
+ // prefix/startgoremaining - len = 7 + 5 + 2 + 9 = 23
+ var buf []byte
+ patLen := len(pattern)
+ size := (openingIdx - startIdx) + (nextIdx - currentIdx) + (patLen - afterIdx)
+ if prefix != "" {
+ buf = make([]byte, 0, size+len(prefix)+1)
+ buf = append(buf, prefix...)
+ buf = append(buf, '/')
+ } else {
+ buf = make([]byte, 0, size)
+ }
+ buf = append(buf, pattern[startIdx:openingIdx]...)
+ buf = append(buf, pattern[currentIdx:nextIdx]...)
+ if afterIdx < patLen {
+ buf = append(buf, pattern[afterIdx:]...)
+ }
+ return string(buf)
+}
+
+// Running alts can produce results that are not sorted, and, worse, can cause
+// duplicates (consider the trivial pattern `path/to/{a,*}`). Since we know
+// each run of doGlob is sorted, we can basically do the "merge" step of a
+// merge sort in-place.
+func sortAndRemoveDups(matches []string, idx1, idx2, l int) []string {
+ var tmp string
+ for ; idx1 < idx2; idx1++ {
+ if matches[idx1] < matches[idx2] {
+ // order is correct
+ continue
+ } else if matches[idx1] > matches[idx2] {
+ // need to swap and then re-sort matches above idx2
+ tmp = matches[idx1]
+ matches[idx1] = matches[idx2]
+
+ shft := idx2 + 1
+ for ; shft < l && matches[shft] < tmp; shft++ {
+ matches[shft-1] = matches[shft]
+ }
+ matches[shft-1] = tmp
+ } else {
+ // duplicate - shift matches above idx2 down one and decrement l
+ for shft := idx2 + 1; shft < l; shft++ {
+ matches[shft-1] = matches[shft]
+ }
+ if l--; idx2 == l {
+ // nothing left to do... matches[idx2:] must have been full of dups
+ break
+ }
+ }
+ }
+ return matches[:l]
+}
diff --git a/cli/internal/doublestar/globwalk.go b/cli/internal/doublestar/globwalk.go
new file mode 100644
index 0000000..6caec3e
--- /dev/null
+++ b/cli/internal/doublestar/globwalk.go
@@ -0,0 +1,277 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+package doublestar
+
+import (
+ "io/fs"
+ "path"
+)
+
+// GlobWalkFunc is a callback function for GlobWalk(). If the function returns an error, GlobWalk
+// will end immediately and return the same error.
+type GlobWalkFunc func(path string, d fs.DirEntry) error
+
+// GlobWalk calls the callback function `fn` for every file matching pattern.
+// The syntax of pattern is the same as in Match() and the behavior is the same
+// as Glob(), with regard to limitations (such as patterns containing `/./`,
+// `/../`, or starting with `/`). The pattern may describe hierarchical names
+// such as usr/*/bin/ed.
+//
+// GlobWalk may have a small performance benefit over Glob if you do not need a
+// slice of matches because it can avoid allocating memory for the matches.
+// Additionally, GlobWalk gives you access to the `fs.DirEntry` objects for
+// each match, and lets you quit early by returning a non-nil error from your
+// callback function.
+//
+// GlobWalk ignores file system errors such as I/O errors reading directories.
+// GlobWalk may return ErrBadPattern, reporting that the pattern is malformed.
+// Additionally, if the callback function `fn` returns an error, GlobWalk will
+// exit immediately and return that error.
+//
+// Like Glob(), this function assumes that your pattern uses `/` as the path
+// separator even if that's not correct for your OS (like Windows). If you
+// aren't sure if that's the case, you can use filepath.ToSlash() on your
+// pattern before calling GlobWalk().
+func GlobWalk(fsys fs.FS, pattern string, fn GlobWalkFunc) error {
+ if !ValidatePattern(pattern) {
+ return ErrBadPattern
+ }
+ return doGlobWalk(fsys, pattern, true, fn)
+}
+
+// Actually execute GlobWalk
+func doGlobWalk(fsys fs.FS, pattern string, firstSegment bool, fn GlobWalkFunc) error {
+ patternStart := indexMeta(pattern)
+ if patternStart == -1 {
+ // pattern doesn't contain any meta characters - does a file matching the
+ // pattern exist?
+ info, err := fs.Stat(fsys, pattern)
+ if err == nil {
+ err = fn(pattern, newDirEntryFromFileInfo(info))
+ return err
+ }
+ // ignore IO errors
+ return nil
+ }
+
+ dir := "."
+ splitIdx := lastIndexSlashOrAlt(pattern)
+ if splitIdx != -1 {
+ if pattern[splitIdx] == '}' {
+ openingIdx := indexMatchedOpeningAlt(pattern[:splitIdx])
+ if openingIdx == -1 {
+ // if there's no matching opening index, technically Match() will treat
+ // an unmatched `}` as nothing special, so... we will, too!
+ splitIdx = lastIndexSlash(pattern[:splitIdx])
+ } else {
+ // otherwise, we have to handle the alts:
+ return globAltsWalk(fsys, pattern, openingIdx, splitIdx, firstSegment, fn)
+ }
+ }
+
+ dir = pattern[:splitIdx]
+ pattern = pattern[splitIdx+1:]
+ }
+
+ // if `splitIdx` is less than `patternStart`, we know `dir` has no meta
+ // characters. They would be equal if they are both -1, which means `dir`
+ // will be ".", and we know that doesn't have meta characters either.
+ if splitIdx <= patternStart {
+ return globDirWalk(fsys, dir, pattern, firstSegment, fn)
+ }
+
+ return doGlobWalk(fsys, dir, false, func(p string, d fs.DirEntry) error {
+ if err := globDirWalk(fsys, p, pattern, firstSegment, fn); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+// handle alts in the glob pattern - `openingIdx` and `closingIdx` are the
+// indexes of `{` and `}`, respectively
+func globAltsWalk(fsys fs.FS, pattern string, openingIdx, closingIdx int, firstSegment bool, fn GlobWalkFunc) error {
+ var matches []dirEntryWithFullPath
+ startIdx := 0
+ afterIdx := closingIdx + 1
+ splitIdx := lastIndexSlashOrAlt(pattern[:openingIdx])
+ if splitIdx == -1 || pattern[splitIdx] == '}' {
+ // no common prefix
+ var err error
+ matches, err = doGlobAltsWalk(fsys, "", pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, matches)
+ if err != nil {
+ return err
+ }
+ } else {
+ // our alts have a common prefix that we can process first
+ startIdx = splitIdx + 1
+ err := doGlobWalk(fsys, pattern[:splitIdx], false, func(p string, d fs.DirEntry) (e error) {
+ matches, e = doGlobAltsWalk(fsys, p, pattern, startIdx, openingIdx, closingIdx, afterIdx, firstSegment, matches)
+ return e
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, m := range matches {
+ if err := fn(m.Path, m.Entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// runs actual matching for alts
+func doGlobAltsWalk(fsys fs.FS, d, pattern string, startIdx, openingIdx, closingIdx, afterIdx int, firstSegment bool, m []dirEntryWithFullPath) ([]dirEntryWithFullPath, error) {
+ matches := m
+ matchesLen := len(m)
+ patIdx := openingIdx + 1
+ for patIdx < closingIdx {
+ nextIdx := indexNextAlt(pattern[patIdx:closingIdx], true)
+ if nextIdx == -1 {
+ nextIdx = closingIdx
+ } else {
+ nextIdx += patIdx
+ }
+
+ alt := buildAlt(d, pattern, startIdx, openingIdx, patIdx, nextIdx, afterIdx)
+ err := doGlobWalk(fsys, alt, firstSegment, func(p string, d fs.DirEntry) error {
+ // insertion sort, ignoring dups
+ insertIdx := matchesLen
+ for insertIdx > 0 && matches[insertIdx-1].Path > p {
+ insertIdx--
+ }
+ if insertIdx > 0 && matches[insertIdx-1].Path == p {
+ // dup
+ return nil
+ }
+
+ // append to grow the slice, then insert
+ entry := dirEntryWithFullPath{d, p}
+ matches = append(matches, entry)
+ for i := matchesLen; i > insertIdx; i-- {
+ matches[i] = matches[i-1]
+ }
+ matches[insertIdx] = entry
+ matchesLen++
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ patIdx = nextIdx + 1
+ }
+
+ return matches, nil
+}
+
+func globDirWalk(fsys fs.FS, dir, pattern string, canMatchFiles bool, fn GlobWalkFunc) error {
+ if pattern == "" {
+ // pattern can be an empty string if the original pattern ended in a slash,
+ // in which case, we should just return dir, but only if it actually exists
+ // and it's a directory (or a symlink to a directory)
+ info, err := fs.Stat(fsys, dir)
+ if err != nil || !info.IsDir() {
+ return nil
+ }
+ return fn(dir, newDirEntryFromFileInfo(info))
+ }
+
+ if pattern == "**" {
+ // `**` can match *this* dir
+ info, err := fs.Stat(fsys, dir)
+ if err != nil || !info.IsDir() {
+ return nil
+ }
+ if err = fn(dir, newDirEntryFromFileInfo(info)); err != nil {
+ return err
+ }
+ return globDoubleStarWalk(fsys, dir, canMatchFiles, fn)
+ }
+
+ dirs, err := fs.ReadDir(fsys, dir)
+ if err != nil {
+ // ignore IO errors
+ return nil
+ }
+
+ var matched bool
+ for _, info := range dirs {
+ name := info.Name()
+ if canMatchFiles || isDir(fsys, dir, name, info) {
+ matched, err = matchWithSeparator(pattern, name, '/', false)
+ if err != nil {
+ return err
+ }
+ if matched {
+ if err = fn(path.Join(dir, name), info); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func globDoubleStarWalk(fsys fs.FS, dir string, canMatchFiles bool, fn GlobWalkFunc) error {
+ dirs, err := fs.ReadDir(fsys, dir)
+ if err != nil {
+ // ignore IO errors
+ return nil
+ }
+
+ // `**` can match *this* dir, so add it
+ for _, info := range dirs {
+ name := info.Name()
+ if isDir(fsys, dir, name, info) {
+ p := path.Join(dir, name)
+ if e := fn(p, info); e != nil {
+ return e
+ }
+ if e := globDoubleStarWalk(fsys, p, canMatchFiles, fn); e != nil {
+ return e
+ }
+ } else if canMatchFiles {
+ if e := fn(path.Join(dir, name), info); e != nil {
+ return e
+ }
+ }
+ }
+
+ return nil
+}
+
+type dirEntryFromFileInfo struct {
+ fi fs.FileInfo
+}
+
+func (d *dirEntryFromFileInfo) Name() string {
+ return d.fi.Name()
+}
+
+func (d *dirEntryFromFileInfo) IsDir() bool {
+ return d.fi.IsDir()
+}
+
+func (d *dirEntryFromFileInfo) Type() fs.FileMode {
+ return d.fi.Mode().Type()
+}
+
+func (d *dirEntryFromFileInfo) Info() (fs.FileInfo, error) {
+ return d.fi, nil
+}
+
+func newDirEntryFromFileInfo(fi fs.FileInfo) fs.DirEntry {
+ return &dirEntryFromFileInfo{fi}
+}
+
+type dirEntryWithFullPath struct {
+ Entry fs.DirEntry
+ Path string
+}
diff --git a/cli/internal/doublestar/match.go b/cli/internal/doublestar/match.go
new file mode 100644
index 0000000..d8c9536
--- /dev/null
+++ b/cli/internal/doublestar/match.go
@@ -0,0 +1,377 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+package doublestar
+
+import (
+ "path/filepath"
+ "unicode/utf8"
+)
+
+// Match reports whether name matches the shell pattern.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-path-separators
+// '/**/' matches zero or more directories
+// '?' matches any single non-path-separator character
+// '[' [ '^' '!' ] { character-range } ']'
+// character class (must be non-empty)
+// starting with `^` or `!` negates the class
+// '{' { term } [ ',' { term } ... ] '}'
+// alternatives
+// c matches character c (c != '*', '?', '\\', '[')
+// '\\' c matches character c
+//
+// character-range:
+// c matches character c (c != '\\', '-', ']')
+// '\\' c matches character c
+// lo '-' hi matches character c for lo <= c <= hi
+//
+// Match returns true if `name` matches the file name `pattern`. `name` and
+// `pattern` are split on forward slash (`/`) characters and may be relative or
+// absolute.
+//
+// Match requires pattern to match all of name, not just a substring.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// A doublestar (`**`) should appear surrounded by path separators such as
+// `/**/`. A mid-pattern doublestar (`**`) behaves like bash's globstar
+// option: a pattern such as `path/to/**.txt` would return the same results as
+// `path/to/*.txt`. The pattern you're looking for is `path/to/**/*.txt`.
+//
+// Note: this is meant as a drop-in replacement for path.Match() which
+// always uses '/' as the path separator. If you want to support systems
+// which use a different path separator (such as Windows), what you want
+// is PathMatch(). Alternatively, you can run filepath.ToSlash() on both
+// pattern and name and then use this function.
+func Match(pattern, name string) (bool, error) {
+ return matchWithSeparator(pattern, name, '/', true)
+}
+
+// PathMatch returns true if `name` matches the file name `pattern`. The
+// difference between Match and PathMatch is that PathMatch will automatically
+// use your system's path separator to split `name` and `pattern`. On systems
+// where the path separator is `'\'`, escaping will be disabled.
+//
+// Note: this is meant as a drop-in replacement for filepath.Match(). It
+// assumes that both `pattern` and `name` are using the system's path
+// separator. If you can't be sure of that, use filepath.ToSlash() on both
+// `pattern` and `name`, and then use the Match() function instead.
+func PathMatch(pattern, name string) (bool, error) {
+ return matchWithSeparator(pattern, name, filepath.Separator, true)
+}
+
+func matchWithSeparator(pattern, name string, separator rune, validate bool) (matched bool, err error) {
+ doublestarPatternBacktrack := -1
+ doublestarNameBacktrack := -1
+ starPatternBacktrack := -1
+ starNameBacktrack := -1
+ patIdx := 0
+ nameIdx := 0
+ patLen := len(pattern)
+ nameLen := len(name)
+ startOfSegment := true
+MATCH:
+ for nameIdx < nameLen {
+ if patIdx < patLen {
+ switch pattern[patIdx] {
+ case '*':
+ if patIdx++; patIdx < patLen && pattern[patIdx] == '*' {
+ // doublestar - must begin with a path separator, otherwise we'll
+ // treat it like a single star like bash
+ patIdx++
+ if startOfSegment {
+ if patIdx >= patLen {
+ // pattern ends in `/**`: return true
+ return true, nil
+ }
+
+ // doublestar must also end with a path separator, otherwise we're
+ // just going to treat the doublestar as a single star like bash
+ patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:])
+ if patRune == separator {
+ patIdx += patRuneLen
+
+ doublestarPatternBacktrack = patIdx
+ doublestarNameBacktrack = nameIdx
+ starPatternBacktrack = -1
+ starNameBacktrack = -1
+ continue
+ }
+ }
+ }
+ startOfSegment = false
+
+ starPatternBacktrack = patIdx
+ starNameBacktrack = nameIdx
+ continue
+
+ case '?':
+ startOfSegment = false
+ nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:])
+ if nameRune == separator {
+ // `?` cannot match the separator
+ break
+ }
+
+ patIdx++
+ nameIdx += nameRuneLen
+ continue
+
+ case '[':
+ startOfSegment = false
+ if patIdx++; patIdx >= patLen {
+ // class didn't end
+ return false, ErrBadPattern
+ }
+ nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:])
+
+ matched := false
+ negate := pattern[patIdx] == '!' || pattern[patIdx] == '^'
+ if negate {
+ patIdx++
+ }
+
+ if patIdx >= patLen || pattern[patIdx] == ']' {
+ // class didn't end or empty character class
+ return false, ErrBadPattern
+ }
+
+ last := utf8.MaxRune
+ for patIdx < patLen && pattern[patIdx] != ']' {
+ patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:])
+ patIdx += patRuneLen
+
+ // match a range
+ if last < utf8.MaxRune && patRune == '-' && patIdx < patLen && pattern[patIdx] != ']' {
+ if pattern[patIdx] == '\\' {
+ // next character is escaped
+ patIdx++
+ }
+ patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:])
+ patIdx += patRuneLen
+
+ if last <= nameRune && nameRune <= patRune {
+ matched = true
+ break
+ }
+
+ // didn't match range - reset `last`
+ last = utf8.MaxRune
+ continue
+ }
+
+ // not a range - check if the next rune is escaped
+ if patRune == '\\' {
+ patRune, patRuneLen = utf8.DecodeRuneInString(pattern[patIdx:])
+ patIdx += patRuneLen
+ }
+
+ // check if the rune matches
+ if patRune == nameRune {
+ matched = true
+ break
+ }
+
+ // no matches yet
+ last = patRune
+ }
+
+ if matched == negate {
+ // failed to match - if we reached the end of the pattern, that means
+ // we never found a closing `]`
+ if patIdx >= patLen {
+ return false, ErrBadPattern
+ }
+ break
+ }
+
+ closingIdx := indexUnescapedByte(pattern[patIdx:], ']', true)
+ if closingIdx == -1 {
+ // no closing `]`
+ return false, ErrBadPattern
+ }
+
+ patIdx += closingIdx + 1
+ nameIdx += nameRuneLen
+ continue
+
+ case '{':
+ // Note: removed 'startOfSegment = false' here.
+ // This block is guaranteed to return, so assigning it was useless
+ // and triggering a lint error
+ patIdx++
+ closingIdx := indexMatchedClosingAlt(pattern[patIdx:], separator != '\\')
+ if closingIdx == -1 {
+ // no closing `}`
+ return false, ErrBadPattern
+ }
+ closingIdx += patIdx
+
+ for {
+ commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\')
+ if commaIdx == -1 {
+ break
+ }
+ commaIdx += patIdx
+
+ result, err := matchWithSeparator(pattern[patIdx:commaIdx]+pattern[closingIdx+1:], name[nameIdx:], separator, validate)
+ if result || err != nil {
+ return result, err
+ }
+
+ patIdx = commaIdx + 1
+ }
+ return matchWithSeparator(pattern[patIdx:closingIdx]+pattern[closingIdx+1:], name[nameIdx:], separator, validate)
+
+ case '\\':
+ if separator != '\\' {
+ // next rune is "escaped" in the pattern - literal match
+ if patIdx++; patIdx >= patLen {
+ // pattern ended
+ return false, ErrBadPattern
+ }
+ }
+ fallthrough
+
+ default:
+ patRune, patRuneLen := utf8.DecodeRuneInString(pattern[patIdx:])
+ nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:])
+ if patRune != nameRune {
+ if separator != '\\' && patIdx > 0 && pattern[patIdx-1] == '\\' {
+ // if this rune was meant to be escaped, we need to move patIdx
+ // back to the backslash before backtracking or validating below
+ patIdx--
+ }
+ break
+ }
+
+ patIdx += patRuneLen
+ nameIdx += nameRuneLen
+ startOfSegment = patRune == separator
+ continue
+ }
+ }
+
+ if starPatternBacktrack >= 0 {
+ // `*` backtrack, but only if the `name` rune isn't the separator
+ nameRune, nameRuneLen := utf8.DecodeRuneInString(name[starNameBacktrack:])
+ if nameRune != separator {
+ starNameBacktrack += nameRuneLen
+ patIdx = starPatternBacktrack
+ nameIdx = starNameBacktrack
+ startOfSegment = false
+ continue
+ }
+ }
+
+ if doublestarPatternBacktrack >= 0 {
+ // `**` backtrack, advance `name` past next separator
+ nameIdx = doublestarNameBacktrack
+ for nameIdx < nameLen {
+ nameRune, nameRuneLen := utf8.DecodeRuneInString(name[nameIdx:])
+ nameIdx += nameRuneLen
+ if nameRune == separator {
+ doublestarNameBacktrack = nameIdx
+ patIdx = doublestarPatternBacktrack
+ startOfSegment = true
+ continue MATCH
+ }
+ }
+ }
+
+ if validate && patIdx < patLen && !doValidatePattern(pattern[patIdx:], separator) {
+ return false, ErrBadPattern
+ }
+ return false, nil
+ }
+
+ if nameIdx < nameLen {
+ // we reached the end of `pattern` before the end of `name`
+ return false, nil
+ }
+
+ // we've reached the end of `name`; we've successfully matched if we've also
+ // reached the end of `pattern`, or if the rest of `pattern` can match a
+ // zero-length string
+ return isZeroLengthPattern(pattern[patIdx:], separator)
+}
+
+func isZeroLengthPattern(pattern string, separator rune) (ret bool, err error) {
+ // `/**` is a special case - a pattern such as `path/to/a/**` *should* match
+ // `path/to/a` because `a` might be a directory
+ if pattern == "" || pattern == "*" || pattern == "**" || pattern == string(separator)+"**" {
+ return true, nil
+ }
+
+ if pattern[0] == '{' {
+ closingIdx := indexMatchedClosingAlt(pattern[1:], separator != '\\')
+ if closingIdx == -1 {
+ // no closing '}'
+ return false, ErrBadPattern
+ }
+ closingIdx++
+
+ patIdx := 1
+ for {
+ commaIdx := indexNextAlt(pattern[patIdx:closingIdx], separator != '\\')
+ if commaIdx == -1 {
+ break
+ }
+ commaIdx += patIdx
+
+ ret, err = isZeroLengthPattern(pattern[patIdx:commaIdx]+pattern[closingIdx+1:], separator)
+ if ret || err != nil {
+ return
+ }
+
+ patIdx = commaIdx + 1
+ }
+ return isZeroLengthPattern(pattern[patIdx:closingIdx]+pattern[closingIdx+1:], separator)
+ }
+
+ // no luck - validate the rest of the pattern
+ if !doValidatePattern(pattern, separator) {
+ return false, ErrBadPattern
+ }
+ return false, nil
+}
+
+// Finds the index of the first unescaped byte `c`, or negative 1.
+func indexUnescapedByte(s string, c byte, allowEscaping bool) int {
+ l := len(s)
+ for i := 0; i < l; i++ {
+ if allowEscaping && s[i] == '\\' {
+ // skip next byte
+ i++
+ } else if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
+
+// Assuming the byte before the beginning of `s` is an opening `{`, this
+// function will find the index of the matching `}`. That is, it'll skip over
+// any nested `{}` and account for escaping
+func indexMatchedClosingAlt(s string, allowEscaping bool) int {
+ alts := 1
+ l := len(s)
+ for i := 0; i < l; i++ {
+ if allowEscaping && s[i] == '\\' {
+ // skip next byte
+ i++
+ } else if s[i] == '{' {
+ alts++
+ } else if s[i] == '}' {
+ if alts--; alts == 0 {
+ return i
+ }
+ }
+ }
+ return -1
+}
diff --git a/cli/internal/doublestar/utils.go b/cli/internal/doublestar/utils.go
new file mode 100644
index 0000000..7236cd0
--- /dev/null
+++ b/cli/internal/doublestar/utils.go
@@ -0,0 +1,71 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+package doublestar
+
+// SplitPattern is a utility function. Given a pattern, SplitPattern will
+// return two strings: the first string is everything up to the last slash
+// (`/`) that appears _before_ any unescaped "meta" characters (ie, `*?[{`).
+// The second string is everything after that slash. For example, given the
+// pattern:
+//
+// ../../path/to/meta*/**
+// ^----------- split here
+//
+// SplitPattern returns "../../path/to" and "meta*/**". This is useful for
+// initializing os.DirFS() to call Glob() because Glob() will silently fail if
+// your pattern includes `/./` or `/../`. For example:
+//
+// base, pattern := SplitPattern("../../path/to/meta*/**")
+// fsys := os.DirFS(base)
+// matches, err := Glob(fsys, pattern)
+//
+// If SplitPattern cannot find somewhere to split the pattern (for example,
+// `meta*/**`), it will return "." and the unaltered pattern (`meta*/**` in
+// this example).
+//
+// Of course, it is your responsibility to decide if the returned base path is
+// "safe" in the context of your application. Perhaps you could use Match() to
+// validate against a list of approved base directories?
+func SplitPattern(p string) (string, string) {
+ base := "."
+ pattern := p
+
+ splitIdx := -1
+ for i := 0; i < len(p); i++ {
+ c := p[i]
+ if c == '\\' {
+ i++
+ } else if c == '/' {
+ splitIdx = i
+ } else if c == '*' || c == '?' || c == '[' || c == '{' {
+ break
+ }
+ }
+
+ if splitIdx >= 0 {
+ return p[:splitIdx], p[splitIdx+1:]
+ }
+
+ return base, pattern
+}
+
+// Finds the next comma, but ignores any commas that appear inside nested `{}`.
+// Assumes that each opening bracket has a corresponding closing bracket.
+func indexNextAlt(s string, allowEscaping bool) int {
+ alts := 1
+ l := len(s)
+ for i := 0; i < l; i++ {
+ if allowEscaping && s[i] == '\\' {
+ // skip next byte
+ i++
+ } else if s[i] == '{' {
+ alts++
+ } else if s[i] == '}' {
+ alts--
+ } else if s[i] == ',' && alts == 1 {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/cli/internal/doublestar/validate.go b/cli/internal/doublestar/validate.go
new file mode 100644
index 0000000..225fc5e
--- /dev/null
+++ b/cli/internal/doublestar/validate.go
@@ -0,0 +1,83 @@
+// Package doublestar is adapted from https://github.com/bmatcuk/doublestar
+// Copyright Bob Matcuk. All Rights Reserved.
+// SPDX-License-Identifier: MIT
+package doublestar
+
+import "path/filepath"
+
+// ValidatePattern validates a pattern. Patterns are validated while they run in Match(),
+// PathMatch(), and Glob(), so, you normally wouldn't need to call this.
+// However, there are cases where this might be useful: for example, if your
+// program allows a user to enter a pattern that you'll run at a later time,
+// you might want to validate it.
+//
+// ValidatePattern assumes your pattern uses '/' as the path separator.
+func ValidatePattern(s string) bool {
+ return doValidatePattern(s, '/')
+}
+
+// ValidatePathPattern only uses your OS path separator. In other words, use
+// ValidatePattern if you would normally use Match() or Glob(). Use
+// ValidatePathPattern if you would normally use PathMatch(). Keep in mind,
+// Glob() requires '/' separators, even if your OS uses something else.
+func ValidatePathPattern(s string) bool {
+ return doValidatePattern(s, filepath.Separator)
+}
+
+func doValidatePattern(s string, separator rune) bool {
+ altDepth := 0
+ l := len(s)
+VALIDATE:
+ for i := 0; i < l; i++ {
+ switch s[i] {
+ case '\\':
+ if separator != '\\' {
+ // skip the next byte - return false if there is no next byte
+ if i++; i >= l {
+ return false
+ }
+ }
+ continue
+
+ case '[':
+ if i++; i >= l {
+ // class didn't end
+ return false
+ }
+ if s[i] == '^' || s[i] == '!' {
+ i++
+ }
+ if i >= l || s[i] == ']' {
+ // class didn't end or empty character class
+ return false
+ }
+
+ for ; i < l; i++ {
+ if separator != '\\' && s[i] == '\\' {
+ i++
+ } else if s[i] == ']' {
+ // looks good
+ continue VALIDATE
+ }
+ }
+
+ // class didn't end
+ return false
+
+ case '{':
+ altDepth++
+ continue
+
+ case '}':
+ if altDepth == 0 {
+ // alt end without a corresponding start
+ return false
+ }
+ altDepth--
+ continue
+ }
+ }
+
+ // valid as long as all alts are closed
+ return altDepth == 0
+}
diff --git a/cli/internal/encoding/gitoutput/gitoutput.go b/cli/internal/encoding/gitoutput/gitoutput.go
new file mode 100644
index 0000000..1c2ad4f
--- /dev/null
+++ b/cli/internal/encoding/gitoutput/gitoutput.go
@@ -0,0 +1,345 @@
+// Package gitoutput reads the output of calls to `git`.
+package gitoutput
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// These describe the structure of fields in the output of `git` commands.
+var (
+ LsTreeFields = []Field{ObjectMode, ObjectType, ObjectName, Path}
+ LsFilesFields = []Field{ObjectMode, ObjectName, ObjectStage, Path}
+ StatusFields = []Field{StatusX, StatusY, Path}
+)
+
+var _lsTreeFieldToIndex = map[Field]int{
+ ObjectMode: 0,
+ ObjectType: 1,
+ ObjectName: 2,
+ Path: 3,
+}
+
+var _lsFilesFieldToIndex = map[Field]int{
+ ObjectMode: 0,
+ ObjectName: 1,
+ ObjectStage: 2,
+ Path: 3,
+}
+
+var _statusFieldToIndex = map[Field]int{
+ StatusX: 0,
+ StatusY: 1,
+ Path: 2,
+}
+
+// Field is the type for fields available in outputs to `git`.
+// Used for naming and sensible call sites.
+type Field int
+
+const (
+ // ObjectMode is the mode field from `git` outputs. e.g. 100644
+ ObjectMode Field = iota + 1
+ // ObjectType is the set of allowed types from `git` outputs: blob, tree, commit
+ ObjectType
+ // ObjectName is the 40-character SHA hash
+ ObjectName
+ // ObjectStage is a value 0-3.
+ ObjectStage
+ // StatusX is the first character of the two-character output from `git status`.
+ StatusX
+ // StatusY is the second character of the two-character output from `git status`.
+ StatusY
+ // Path is the file path under version control in `git`.
+ Path
+)
+
+// LsTreeEntry is the result from call `git ls-files`
+type LsTreeEntry []string
+
+// LsFilesEntry is the result from call `git ls-tree`
+type LsFilesEntry []string
+
+// StatusEntry is the result from call `git status`
+type StatusEntry []string
+
+// GetField returns the value of the specified field.
+func (e LsTreeEntry) GetField(field Field) string {
+ value, exists := _lsTreeFieldToIndex[field]
+ if !exists {
+ panic("Received an invalid field for LsTreeEntry.")
+ }
+ return e[value]
+}
+
+// GetField returns the value of the specified field.
+func (e LsFilesEntry) GetField(field Field) string {
+ value, exists := _lsFilesFieldToIndex[field]
+ if !exists {
+ panic("Received an invalid field for LsFilesEntry.")
+ }
+ return e[value]
+}
+
+// GetField returns the value of the specified field.
+func (e StatusEntry) GetField(field Field) string {
+ value, exists := _statusFieldToIndex[field]
+ if !exists {
+ panic("Received an invalid field for StatusEntry.")
+ }
+ return e[value]
+}
+
+// Separators that appear in the output of `git` commands.
+const (
+ _space = ' '
+ _tab = '\t'
+ _nul = '\000'
+)
+
+// A ParseError is returned for parsing errors.
+// Entries and columns are both 1-indexed.
+type ParseError struct {
+ Entry int // Entry where the error occurred
+ Column int // Column where the error occurred
+ Err error // The actual error
+}
+
+// Error creates a string for a parse error.
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("parse error on entry %d, column %d: %v", e.Entry, e.Column, e.Err)
+}
+
+// Unwrap returns the raw error.
+func (e *ParseError) Unwrap() error { return e.Err }
+
+// These are the errors that can be returned in ParseError.Err.
+var (
+ ErrInvalidObjectMode = errors.New("object mode is not valid")
+ ErrInvalidObjectType = errors.New("object type is not valid")
+ ErrInvalidObjectName = errors.New("object name is not valid")
+ ErrInvalidObjectStage = errors.New("object stage is not valid")
+ ErrInvalidObjectStatusX = errors.New("object status x is not valid")
+ ErrInvalidObjectStatusY = errors.New("object status y is not valid")
+ ErrInvalidPath = errors.New("path is not valid")
+ ErrUnknownField = errors.New("unknown field")
+)
+
+// A Reader reads records from `git`'s output`.
+type Reader struct {
+ // ReuseRecord controls whether calls to Read may return a slice sharing
+ // the backing array of the previous call's returned slice for performance.
+ // By default, each call to Read returns newly allocated memory owned by the caller.
+ ReuseRecord bool
+
+ // Fields specifies the type of each field.
+ Fields []Field
+
+ reader *bufio.Reader
+
+ // numEntry is the current entry being read in the `git` output.
+ numEntry int
+
+ // rawBuffer is an entry buffer only used by the readEntry method.
+ rawBuffer []byte
+
+ // recordBuffer holds the unescaped fields, one after another.
+ // The fields can be accessed by using the indexes in fieldIndexes.
+ recordBuffer []byte
+
+ // fieldIndexes is an index of fields inside recordBuffer.
+ // The i'th field ends at offset fieldIndexes[i] in recordBuffer.
+ fieldIndexes []int
+
+ // fieldPositions is an index of field positions for the
+ // last record returned by Read.
+ fieldPositions []position
+
+ // lastRecord is a record cache and only used when ReuseRecord == true.
+ lastRecord []string
+}
+
+// NewLSTreeReader returns a new Reader that reads from reader.
+func NewLSTreeReader(reader io.Reader) *Reader {
+ return &Reader{
+ reader: bufio.NewReader(reader),
+ Fields: LsTreeFields,
+ }
+}
+
+// NewLSFilesReader returns a new Reader that reads from reader.
+func NewLSFilesReader(reader io.Reader) *Reader {
+ return &Reader{
+ reader: bufio.NewReader(reader),
+ Fields: LsFilesFields,
+ }
+}
+
+// NewStatusReader returns a new Reader that reads from reader.
+func NewStatusReader(reader io.Reader) *Reader {
+ return &Reader{
+ reader: bufio.NewReader(reader),
+ Fields: StatusFields,
+ }
+}
+
+// Read reads one record from `reader`.
+// Read always returns either a non-nil record or a non-nil error,
+// but not both.
+//
+// If there is no data left to be read, Read returns nil, io.EOF.
+//
+// If ReuseRecord is true, the returned slice may be shared
+// between multiple calls to Read.
+func (r *Reader) Read() (record []string, err error) {
+ if r.ReuseRecord {
+ record, err = r.readRecord(r.lastRecord)
+ r.lastRecord = record
+ } else {
+ record, err = r.readRecord(nil)
+ }
+ return record, err
+}
+
+// FieldPos returns the entry and column corresponding to
+// the start of the field with the given index in the slice most recently
+// returned by Read. Numbering of entries and columns starts at 1;
+// columns are counted in bytes, not runes.
+//
+// If this is called with an out-of-bounds index, it panics.
+func (r *Reader) FieldPos(field int) (entry int, column int) {
+ if field < 0 || field >= len(r.fieldPositions) {
+ panic("out of range index passed to FieldPos")
+ }
+ p := &r.fieldPositions[field]
+ return p.entry, p.col
+}
+
+// pos holds the position of a field in the current entry.
+type position struct {
+ entry, col int
+}
+
+// ReadAll reads all the records from reader until EOF.
+//
+// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
+// defined to read until EOF, it does not treat end of file as an error to be
+// reported.
+func (r *Reader) ReadAll() (records [][]string, err error) {
+ for {
+ record, err := r.readRecord(nil)
+ if err == io.EOF {
+ return records, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ records = append(records, record)
+ }
+}
+
+// readEntry reads the next entry (with the trailing NUL).
+// If EOF is hit without a trailing NUL, it will be omitted.
+// If some bytes were read then the error is never io.EOF.
+// The result is only valid until the next call to readEntry.
+func (r *Reader) readEntry() ([]byte, error) {
+ entry, err := r.reader.ReadSlice('\000')
+ if err == bufio.ErrBufferFull {
+ r.rawBuffer = append(r.rawBuffer[:0], entry...)
+ for err == bufio.ErrBufferFull {
+ entry, err = r.reader.ReadSlice('\000')
+ r.rawBuffer = append(r.rawBuffer, entry...)
+ }
+ entry = r.rawBuffer
+ }
+ if len(entry) > 0 && err == io.EOF {
+ entry = append(entry, '\000')
+ err = nil
+ }
+ r.numEntry++
+
+ return entry, err
+}
+
+// getFieldLength returns the field length and the separator length for advancing.
+func getFieldLength(fieldType Field, fieldNumber int, fieldCount int, entry *[]byte) (int, int) {
+ switch fieldType {
+ case StatusX:
+ return 1, 0
+ case StatusY:
+ return 1, 1
+ default:
+ return bytes.IndexRune(*entry, getSeparator(fieldNumber, fieldCount)), 1
+ }
+}
+
+// getSeparator returns the separator between the current field and the next field.
+// Since fields separators are regular it doesn't hard code them.
+func getSeparator(fieldNumber int, fieldCount int) rune {
+ remaining := fieldCount - fieldNumber
+
+ switch remaining {
+ default:
+ return _space
+ case 2:
+ return _tab
+ case 1:
+ return _nul
+ }
+}
+
+// readRecord reads a single record.
+func (r *Reader) readRecord(dst []string) ([]string, error) {
+ entry, errRead := r.readEntry()
+ if errRead == io.EOF {
+ return nil, errRead
+ }
+
+ // Parse each field in the record.
+ r.recordBuffer = r.recordBuffer[:0]
+ r.fieldIndexes = r.fieldIndexes[:0]
+ r.fieldPositions = r.fieldPositions[:0]
+ pos := position{entry: r.numEntry, col: 1}
+
+ fieldCount := len(r.Fields)
+
+ for fieldNumber, fieldType := range r.Fields {
+ length, advance := getFieldLength(fieldType, fieldNumber, fieldCount, &entry)
+ field := entry[:length]
+
+ fieldError := checkValid(fieldType, field)
+ if fieldError != nil {
+ return nil, &ParseError{
+ Entry: pos.entry,
+ Column: pos.col,
+ Err: fieldError,
+ }
+ }
+
+ offset := length + advance
+ entry = entry[offset:]
+ r.recordBuffer = append(r.recordBuffer, field...)
+ r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
+ r.fieldPositions = append(r.fieldPositions, pos)
+ pos.col += offset
+ }
+
+ // Create a single string and create slices out of it.
+ // This pins the memory of the fields together, but allocates once.
+ str := string(r.recordBuffer) // Convert to string once to batch allocations
+ dst = dst[:0]
+ if cap(dst) < len(r.fieldIndexes) {
+ dst = make([]string, len(r.fieldIndexes))
+ }
+ dst = dst[:len(r.fieldIndexes)]
+ var preIdx int
+ for i, idx := range r.fieldIndexes {
+ dst[i] = str[preIdx:idx]
+ preIdx = idx
+ }
+
+ return dst, nil
+}
diff --git a/cli/internal/encoding/gitoutput/gitoutput_test.go b/cli/internal/encoding/gitoutput/gitoutput_test.go
new file mode 100644
index 0000000..19ab056
--- /dev/null
+++ b/cli/internal/encoding/gitoutput/gitoutput_test.go
@@ -0,0 +1,377 @@
+package gitoutput
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+type readTest struct {
+ Name string
+ Input string
+ Output [][]string
+ Reader func(io.Reader) *Reader
+ Positions [][][2]int
+ Errors []error
+
+ // These fields are copied into the Reader
+ ReuseRecord bool
+}
+
+// In these tests, the § and ∑ characters in readTest.Input are used to denote
+// the start of a field and the position of an error respectively.
+// They are removed before parsing and are used to verify the position
+// information reported by FieldPos.
+
+var lsTreeTests = []readTest{
+ {
+ Name: "simple",
+ Input: "§100644 §blob §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391\t§package.json\000",
+ Output: [][]string{{"100644", "blob", "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", "package.json"}},
+ Reader: NewLSTreeReader,
+ },
+ {
+ Name: "no trailing nul",
+ Input: "§100644 §blob §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391\t§package.json",
+ Output: [][]string{{"100644", "blob", "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", "package.json"}},
+ Reader: NewLSTreeReader,
+ },
+ {
+ Name: "weird file names",
+ Input: "§100644 §blob §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391\t§\t\000§100644 §blob §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391\t§\"\000§100644 §blob §5b999efa470b056e329b4c23a73904e0794bdc2f\t§\n\000§100644 §blob §f44f57fff95196c5f7139dfa0b96875f1e9650a9\t§.gitignore\000§100644 §blob §33dbaf21275ca2a5f460249d941cbc27d5da3121\t§README.md\000§040000 §tree §7360f2d292aec95907cebdcbb412a6bf2bd10f8a\t§apps\000§100644 §blob §9ec2879b24ce2c817296eebe2cb3846f8e4751ea\t§package.json\000§040000 §tree §5759aadaea2cde55468a61e7104eb0a9d86c1d30\t§packages\000§100644 §blob §33d0621ee2f4da4a2f6f6bdd51a42618d181e337\t§turbo.json\000",
+ Output: [][]string{
+ {"100644", "blob", "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", "\t"},
+ {"100644", "blob", "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", "\""},
+ {"100644", "blob", "5b999efa470b056e329b4c23a73904e0794bdc2f", "\n"},
+ {"100644", "blob", "f44f57fff95196c5f7139dfa0b96875f1e9650a9", ".gitignore"},
+ {"100644", "blob", "33dbaf21275ca2a5f460249d941cbc27d5da3121", "README.md"},
+ {"040000", "tree", "7360f2d292aec95907cebdcbb412a6bf2bd10f8a", "apps"},
+ {"100644", "blob", "9ec2879b24ce2c817296eebe2cb3846f8e4751ea", "package.json"},
+ {"040000", "tree", "5759aadaea2cde55468a61e7104eb0a9d86c1d30", "packages"},
+ {"100644", "blob", "33d0621ee2f4da4a2f6f6bdd51a42618d181e337", "turbo.json"},
+ },
+ Reader: NewLSTreeReader,
+ },
+ {
+ Name: "invalid object mode",
+ Input: "∑888888 §blob §5b999efa470b056e329b4c23a73904e0794bdc2f\t§.eslintrc.js\000",
+ Output: [][]string{},
+ Reader: NewLSTreeReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectMode}},
+ },
+ {
+ Name: "invalid object type",
+ Input: "§100644 ∑bush §5b999efa470b056e329b4c23a73904e0794bdc2f\t§.eslintrc.js\000",
+ Output: [][]string{},
+ Reader: NewLSTreeReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectType}},
+ },
+ {
+ Name: "invalid object name",
+ Input: "§100644 §blob ∑Zb999efa470b056e329b4c23a73904e0794bdc2f\t§.eslintrc.js\000",
+ Output: [][]string{},
+ Reader: NewLSTreeReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectName}},
+ },
+ {
+ Name: "invalid path",
+ Input: "§100644 §blob §5b999efa470b056e329b4c23a73904e0794bdc2f\t∑\000",
+ Output: [][]string{},
+ Reader: NewLSTreeReader,
+ Errors: []error{&ParseError{Err: ErrInvalidPath}},
+ },
+}
+
+var lsFilesTests = []readTest{
+ {
+ Name: "simple",
+ Input: "§100644 §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 §0\t§package.json\000",
+ Output: [][]string{{"100644", "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", "0", "package.json"}},
+ Reader: NewLSFilesReader,
+ },
+ {
+ Name: "no trailing nul",
+ Input: "§100644 §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 §0\t§package.json",
+ Output: [][]string{{"100644", "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391", "0", "package.json"}},
+ Reader: NewLSFilesReader,
+ },
+ {
+ Name: "invalid object mode",
+ Input: "∑888888 §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 §0\t§package.json",
+ Output: [][]string{},
+ Reader: NewLSFilesReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectMode}},
+ },
+ {
+ Name: "invalid object name",
+ Input: "§100644 ∑Z69de29bb2d1d6434b8b29ae775ad8c2e48c5391 §0\t§package.json",
+ Output: [][]string{},
+ Reader: NewLSFilesReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectName}},
+ },
+ {
+ Name: "invalid object stage",
+ Input: "§100644 §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 ∑4\t§package.json",
+ Output: [][]string{},
+ Reader: NewLSFilesReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectStage}},
+ },
+ {
+ Name: "invalid path",
+ Input: "§100644 §e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 §0\t∑",
+ Output: [][]string{},
+ Reader: NewLSFilesReader,
+ Errors: []error{&ParseError{Err: ErrInvalidPath}},
+ },
+}
+
+var statusTests = []readTest{
+ {
+ Name: "simple",
+ Input: "§A§D §package.json\000",
+ Output: [][]string{{"A", "D", "package.json"}},
+ Reader: NewStatusReader,
+ },
+ {
+ Name: "no trailing nul",
+ Input: "§A§D §package.json",
+ Output: [][]string{{"A", "D", "package.json"}},
+ Reader: NewStatusReader,
+ },
+ {
+ Name: "invalid status X",
+ Input: "∑~§D §package.json\000",
+ Output: [][]string{},
+ Reader: NewStatusReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectStatusX}},
+ },
+ {
+ Name: "invalid status Y",
+ Input: "§D∑~ §package.json\000",
+ Output: [][]string{},
+ Reader: NewStatusReader,
+ Errors: []error{&ParseError{Err: ErrInvalidObjectStatusY}},
+ },
+ {
+ Name: "invalid path",
+ Input: "§A§D ∑\000",
+ Output: [][]string{},
+ Reader: NewStatusReader,
+ Errors: []error{&ParseError{Err: ErrInvalidPath}},
+ },
+}
+
+func TestRead(t *testing.T) {
+ newReader := func(tt readTest) (*Reader, [][][2]int, map[int][2]int) {
+ positions, errPositions, input := makePositions(tt.Input)
+ r := tt.Reader(strings.NewReader(input))
+
+ r.ReuseRecord = tt.ReuseRecord
+ return r, positions, errPositions
+ }
+
+ allTests := []readTest{}
+ allTests = append(allTests, lsTreeTests...)
+ allTests = append(allTests, lsFilesTests...)
+ allTests = append(allTests, statusTests...)
+
+ for _, tt := range allTests {
+ t.Run(tt.Name, func(t *testing.T) {
+ r, positions, errPositions := newReader(tt)
+ out, err := r.ReadAll()
+ if wantErr := firstError(tt.Errors, positions, errPositions); wantErr != nil {
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("ReadAll() error mismatch:\ngot %v (%#v)\nwant %v (%#v)", err, err, wantErr, wantErr)
+ }
+ if out != nil {
+ t.Fatalf("ReadAll() output:\ngot %q\nwant nil", out)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("unexpected Readall() error: %v", err)
+ }
+ if !reflect.DeepEqual(out, tt.Output) {
+ t.Fatalf("ReadAll() output:\ngot %q\nwant %q", out, tt.Output)
+ }
+ }
+
+ // Check field and error positions.
+ r, _, _ = newReader(tt)
+ for recNum := 0; ; recNum++ {
+ rec, err := r.Read()
+ var wantErr error
+ if recNum < len(tt.Errors) && tt.Errors[recNum] != nil {
+ wantErr = errorWithPosition(tt.Errors[recNum], recNum, positions, errPositions)
+ } else if recNum >= len(tt.Output) {
+ wantErr = io.EOF
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("Read() error at record %d:\ngot %v (%#v)\nwant %v (%#v)", recNum, err, err, wantErr, wantErr)
+ }
+ if err != nil {
+ if recNum < len(tt.Output) {
+ t.Fatalf("need more records; got %d want %d", recNum, len(tt.Output))
+ }
+ break
+ }
+ if got, want := rec, tt.Output[recNum]; !reflect.DeepEqual(got, want) {
+ t.Errorf("Read vs ReadAll mismatch;\ngot %q\nwant %q", got, want)
+ }
+ pos := positions[recNum]
+ if len(pos) != len(rec) {
+ t.Fatalf("mismatched position length at record %d", recNum)
+ }
+ for i := range rec {
+ entry, col := r.FieldPos(i)
+ if got, want := [2]int{entry, col}, pos[i]; got != want {
+ t.Errorf("position mismatch at record %d, field %d;\ngot %v\nwant %v", recNum, i, got, want)
+ }
+ }
+ }
+ })
+ }
+}
+
+// firstError returns the first non-nil error in errs,
+// with the position adjusted according to the error's
+// index inside positions.
+func firstError(errs []error, positions [][][2]int, errPositions map[int][2]int) error {
+ for i, err := range errs {
+ if err != nil {
+ return errorWithPosition(err, i, positions, errPositions)
+ }
+ }
+ return nil
+}
+
+func errorWithPosition(err error, recNum int, positions [][][2]int, errPositions map[int][2]int) error {
+ parseErr, ok := err.(*ParseError)
+ if !ok {
+ return err
+ }
+ if recNum >= len(positions) {
+ panic(fmt.Errorf("no positions found for error at record %d", recNum))
+ }
+ errPos, ok := errPositions[recNum]
+ if !ok {
+ panic(fmt.Errorf("no error position found for error at record %d", recNum))
+ }
+ parseErr1 := *parseErr
+ parseErr1.Entry = errPos[0]
+ parseErr1.Column = errPos[1]
+ return &parseErr1
+}
+
+// makePositions returns the expected field positions of all the fields in text,
+// the positions of any errors, and the text with the position markers removed.
+//
+// The start of each field is marked with a § symbol;
+// Error positions are marked with ∑ symbols.
+func makePositions(text string) ([][][2]int, map[int][2]int, string) {
+ buf := make([]byte, 0, len(text))
+ var positions [][][2]int
+ errPositions := make(map[int][2]int)
+ entry, col := 1, 1
+ recNum := 0
+
+ for len(text) > 0 {
+ r, size := utf8.DecodeRuneInString(text)
+ switch r {
+ case '\000':
+ col = 1
+ buf = append(buf, '\000')
+ positions = append(positions, [][2]int{})
+ entry++
+ recNum++
+ case '§':
+ if len(positions) == 0 {
+ positions = append(positions, [][2]int{})
+ }
+ positions[len(positions)-1] = append(positions[len(positions)-1], [2]int{entry, col})
+ case '∑':
+ errPositions[recNum] = [2]int{entry, col}
+ default:
+ buf = append(buf, text[:size]...)
+ col += size
+ }
+ text = text[size:]
+ }
+ return positions, errPositions, string(buf)
+}
+
+// nTimes is an io.Reader which yields the string s n times.
+type nTimes struct {
+ s string
+ n int
+ off int
+}
+
+func (r *nTimes) Read(p []byte) (n int, err error) {
+ for {
+ if r.n <= 0 || r.s == "" {
+ return n, io.EOF
+ }
+ n0 := copy(p, r.s[r.off:])
+ p = p[n0:]
+ n += n0
+ r.off += n0
+ if r.off == len(r.s) {
+ r.off = 0
+ r.n--
+ }
+ if len(p) == 0 {
+ return
+ }
+ }
+}
+
+// TODO: track other types.
+// benchmarkRead measures reading the provided ls-tree data.
+// initReader, if non-nil, modifies the Reader before it's used.
+func benchmarkRead(b *testing.B, getReader func(reader io.Reader) *Reader, initReader func(*Reader), rows string) {
+ b.ReportAllocs()
+ r := getReader(&nTimes{s: rows, n: b.N})
+ if initReader != nil {
+ initReader(r)
+ }
+ for {
+ _, err := r.Read()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+const benchmarkLSTreeData = `100644 blob e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 \000100644 blob e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 "\000100644 blob 5b999efa470b056e329b4c23a73904e0794bdc2f .eslintrc.js\000100644 blob f44f57fff95196c5f7139dfa0b96875f1e9650a9 .gitignore\000100644 blob 33dbaf21275ca2a5f460249d941cbc27d5da3121 README.md\000040000 tree 7360f2d292aec95907cebdcbb412a6bf2bd10f8a apps\000100644 blob 9ec2879b24ce2c817296eebe2cb3846f8e4751ea package.json\000040000 tree 5759aadaea2cde55468a61e7104eb0a9d86c1d30 packages\000100644 blob 33d0621ee2f4da4a2f6f6bdd51a42618d181e337 turbo.json\000`
+const benchmarkLSFilesData = `100644 13e399637190f1edb7f034b4281ecfafb5dab9e2 0 Makefile\000100644 6c1c500409989499db51f1eff37b38b857547fdc 0 cmd/turbo/main.go\000100644 2d2b9a2c3ba82f6b806f58c7f7d5eb55fefa837e 0 cmd/turbo/main_utils.go\000100644 3329c8a7f6edee487caeeaf56c600f7c85fc69e7 0 cmd/turbo/signals.go\000100644 e81df7b6ed9a277c30dd35e3524d00e8b13cf584 0 cmd/turbo/version.go\000100644 8992ebf37df05fc5ff64c0f811a3259adff10d70 0 go.mod\000100644 3da872301c79986673d6a12914fbd48c924f5999 0 go.sum\000100644 d7b2d20a037aa9bf8b48eef451eb5f9ba5904237 0 internal/analytics/analytics.go\000`
+const benchmarkStatusData = ` M cli/internal/encoding/gitoutput/gitoutput.go\000 M cli/internal/encoding/gitoutput/gitoutput_test.go\000?? NOTICES.md\000 M cli/internal/encoding/gitoutput/gitoutput.go\000 M cli/internal/encoding/gitoutput/gitoutput_test.go\000?? NOTICES.md\000 M cli/internal/encoding/gitoutput/gitoutput.go\000 M cli/internal/encoding/gitoutput/gitoutput_test.go\000?? NOTICES.md\000 M cli/internal/encoding/gitoutput/gitoutput.go\000 M cli/internal/encoding/gitoutput/gitoutput_test.go\000?? NOTICES.md\000 M cli/internal/encoding/gitoutput/gitoutput.go\000 M cli/internal/encoding/gitoutput/gitoutput_test.go\000`
+
+func BenchmarkLSTreeRead(b *testing.B) {
+ benchmarkRead(b, NewLSTreeReader, nil, benchmarkLSTreeData)
+}
+
+func BenchmarkLSTreeReadReuseRecord(b *testing.B) {
+ benchmarkRead(b, NewLSTreeReader, func(r *Reader) { r.ReuseRecord = true }, benchmarkLSTreeData)
+}
+
+func BenchmarkLSFilesRead(b *testing.B) {
+ benchmarkRead(b, NewLSFilesReader, nil, benchmarkLSFilesData)
+}
+
+func BenchmarkLSFilesReadReuseRecord(b *testing.B) {
+ benchmarkRead(b, NewLSFilesReader, func(r *Reader) { r.ReuseRecord = true }, benchmarkLSFilesData)
+}
+
+func BenchmarkStatusRead(b *testing.B) {
+ benchmarkRead(b, NewStatusReader, nil, benchmarkStatusData)
+}
+
+func BenchmarkStatusReadReuseRecord(b *testing.B) {
+ benchmarkRead(b, NewStatusReader, func(r *Reader) { r.ReuseRecord = true }, benchmarkStatusData)
+}
diff --git a/cli/internal/encoding/gitoutput/validators.go b/cli/internal/encoding/gitoutput/validators.go
new file mode 100644
index 0000000..e13c2d5
--- /dev/null
+++ b/cli/internal/encoding/gitoutput/validators.go
@@ -0,0 +1,148 @@
+package gitoutput
+
+import "bytes"
+
+var _allowedObjectType = []byte(" blob tree commit ")
+var _allowedStatusChars = []byte(" MTADRCU?!")
+
+// checkValid provides a uniform interface for calling `gitoutput` validators.
+func checkValid(fieldType Field, value []byte) error {
+ switch fieldType {
+ case ObjectMode:
+ return checkObjectMode(value)
+ case ObjectType:
+ return checkObjectType(value)
+ case ObjectName:
+ return CheckObjectName(value)
+ case ObjectStage:
+ return checkObjectStage(value)
+ case StatusX:
+ return checkStatusX(value)
+ case StatusY:
+ return checkStatusY(value)
+ case Path:
+ return checkPath(value)
+ default:
+ return ErrUnknownField
+ }
+}
+
+// checkObjectMode asserts that a byte slice is a six digit octal string (100644).
+// It does not attempt to ensure that the values in particular positions are reasonable.
+func checkObjectMode(value []byte) error {
+ if len(value) != 6 {
+ return ErrInvalidObjectMode
+ }
+
+ // 0-7 are 0x30 - 0x37
+ for _, currentByte := range value {
+ if (currentByte ^ 0x30) > 7 {
+ return ErrInvalidObjectMode
+ }
+ }
+
+ // length of 6, 0-7
+ return nil
+}
+
+// checkObjectType asserts that a byte slice is a valid possibility (blob, tree, commit).
+func checkObjectType(value []byte) error {
+ typeLength := len(value)
+ // Based upon:
+ // min(len("blob"), len("tree"), len("commit"))
+ // max(len("blob"), len("tree"), len("commit"))
+ if typeLength < 4 || typeLength > 6 {
+ return ErrInvalidObjectType
+ }
+
+ // Because of the space separator there is no way to pass in a space.
+ // We use that trick to enable fast lookups in _allowedObjectType.
+ index := bytes.Index(_allowedObjectType, value)
+
+ // Impossible to match at 0, not found is -1.
+ if index < 1 {
+ return ErrInvalidObjectType
+ }
+
+ // Followed by a space.
+ if _allowedObjectType[index-1] != byte(_space) {
+ return ErrInvalidObjectType
+ }
+
+ // Preceded by a space.
+ if _allowedObjectType[index+typeLength] != byte(_space) {
+ return ErrInvalidObjectType
+ }
+ return nil
+}
+
+// CheckObjectName asserts that a byte slice looks like a SHA hash.
+func CheckObjectName(value []byte) error {
+ if len(value) != 40 {
+ return ErrInvalidObjectName
+ }
+
+ // 0-9 are 0x30 - 0x39
+ // a-f are 0x61 - 0x66
+ for _, currentByte := range value {
+ isNumber := (currentByte ^ 0x30) < 10
+ numericAlpha := (currentByte ^ 0x60)
+ isAlpha := (numericAlpha < 7) && (numericAlpha > 0)
+ if !(isNumber || isAlpha) {
+ return ErrInvalidObjectName
+ }
+ }
+
+ // length of 40, hex
+ return nil
+}
+
+// checkObjectStage asserts that a byte slice is a valid possibility (0-3).
+func checkObjectStage(value []byte) error {
+ // 0-3 are 0x30 - 0x33
+ if len(value) != 1 {
+ return ErrInvalidObjectStage
+ }
+
+ currentByte := value[0]
+ if (currentByte ^ 0x30) >= 4 {
+ return ErrInvalidObjectStage
+ }
+
+ return nil
+}
+
+// checkStatusX asserts that a byte slice is a valid possibility (" MTADRCU?!").
+func checkStatusX(value []byte) error {
+ if len(value) != 1 {
+ return ErrInvalidObjectStatusX
+ }
+
+ index := bytes.Index(_allowedStatusChars, value)
+ if index == -1 {
+ return ErrInvalidObjectStatusX
+ }
+ return nil
+}
+
+// checkStatusY asserts that a byte slice is a valid possibility (" MTADRCU?!").
+func checkStatusY(value []byte) error {
+ if len(value) != 1 {
+ return ErrInvalidObjectStatusY
+ }
+
+ index := bytes.Index(_allowedStatusChars, value)
+ if index == -1 {
+ return ErrInvalidObjectStatusY
+ }
+ return nil
+}
+
+// checkPath asserts that a byte slice is non-empty.
+func checkPath(value []byte) error {
+ // Exists at all. This is best effort as trying to be fully-compatible is silly.
+ if len(value) == 0 {
+ return ErrInvalidPath
+ }
+ return nil
+}
diff --git a/cli/internal/encoding/gitoutput/validators_test.go b/cli/internal/encoding/gitoutput/validators_test.go
new file mode 100644
index 0000000..29e1274
--- /dev/null
+++ b/cli/internal/encoding/gitoutput/validators_test.go
@@ -0,0 +1,514 @@
+package gitoutput
+
+import (
+ "testing"
+)
+
+func Test_checkValid(t *testing.T) {
+ type args struct {
+ fieldType Field
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "ObjectMode",
+ args: args{
+ fieldType: ObjectMode,
+ value: []byte("100644"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "ObjectType",
+ args: args{
+ fieldType: ObjectType,
+ value: []byte("blob"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "ObjectName",
+ args: args{
+ fieldType: ObjectName,
+ value: []byte("8992ebf37df05fc5ff64c0f811a3259adff10d70"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "ObjectStage",
+ args: args{
+ fieldType: ObjectStage,
+ value: []byte("0"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "StatusX",
+ args: args{
+ fieldType: StatusX,
+ value: []byte("!"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "StatusY",
+ args: args{
+ fieldType: StatusY,
+ value: []byte("?"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Path",
+ args: args{
+ fieldType: Path,
+ value: []byte("/hello/world"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Unknown",
+ args: args{
+ fieldType: Field(12),
+ value: []byte("unused"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := checkValid(tt.args.fieldType, tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkValid() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_checkObjectMode(t *testing.T) {
+ type args struct {
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Simple",
+ args: args{
+ value: []byte("100644"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "All sevens",
+ args: args{
+ value: []byte("777777"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "All zeroes",
+ args: args{
+ value: []byte("000000"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Non-octal chars",
+ args: args{
+ value: []byte("sixsix"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "nul",
+ args: args{
+ value: []byte("\000\000\000\000\000\000"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "too long",
+ args: args{
+ value: []byte("1234567"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "off by plus one",
+ args: args{
+ value: []byte("888888"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "off by minus one",
+ args: args{
+ value: []byte("//////"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := checkObjectMode(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkObjectMode() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_checkObjectType(t *testing.T) {
+ type args struct {
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Finds blob",
+ args: args{
+ value: []byte("blob"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Finds tree",
+ args: args{
+ value: []byte("tree"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Finds commit",
+ args: args{
+ value: []byte("commit"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "nonsense input",
+ args: args{
+ value: []byte("input"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (all 3)",
+ args: args{
+ value: []byte("blob tree commit"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (first two)",
+ args: args{
+ value: []byte("blob tree"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (last two)",
+ args: args{
+ value: []byte("tree commit"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (arbitrary substring)",
+ args: args{
+ value: []byte("tree c"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (space)",
+ args: args{
+ value: []byte(" "),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (empty string)",
+ args: args{
+ value: []byte(""),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Knows too much about the implementation details (leading space)",
+ args: args{
+ value: []byte(" tree"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := checkObjectType(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkObjectType() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestCheckObjectName(t *testing.T) {
+ type args struct {
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Simple",
+ args: args{
+ value: []byte("8992ebf37df05fc5ff64c0f811a3259adff10d70"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Too short",
+ args: args{
+ value: []byte("8992ebf37df05fc5ff64"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Too long",
+ args: args{
+ value: []byte("8992ebf37df05fc5ff64c0f811a3259adff10d708992ebf37df05fc5ff64c0f811a3259adff10d70"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Not hex",
+ args: args{
+ value: []byte("z992ebf37df05fc5ff64c0f811a3259adff10d70"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Not lowercase",
+ args: args{
+ value: []byte("8992EBF37DF05FC5FF64C0F811A3259ADFF10D70"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Off by plus one in the ASCII table (a-f).",
+ args: args{
+ value: []byte("gggggggggggggggggggggggggggggggggggggggg"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Off by minus one in the ASCII table (a-f).",
+ args: args{
+ value: []byte("````````````````````````````````````````"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Off by minus one in the ASCII table (0-9).",
+ args: args{
+ value: []byte("////////////////////////////////////////"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Off by plus one in the ASCII table (0-9).",
+ args: args{
+ value: []byte("::::::::::::::::::::::::::::::::::::::::"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := CheckObjectName(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("CheckObjectName() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_checkObjectStage(t *testing.T) {
+ type args struct {
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "0",
+ args: args{
+ value: []byte("0"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "1",
+ args: args{
+ value: []byte("1"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "2",
+ args: args{
+ value: []byte("2"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "3",
+ args: args{
+ value: []byte("3"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "/",
+ args: args{
+ value: []byte("/"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "4",
+ args: args{
+ value: []byte("4"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "00",
+ args: args{
+ value: []byte("00"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := checkObjectStage(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkObjectStage() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_checkStatus(t *testing.T) {
+ type args struct {
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Simple",
+ args: args{
+ value: []byte("D"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Space",
+ args: args{
+ value: []byte(" "),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Empty",
+ args: args{
+ value: []byte(""),
+ },
+ wantErr: true,
+ },
+ {
+ name: "Too long",
+ args: args{
+ value: []byte("?!"),
+ },
+ wantErr: true,
+ },
+ {
+ name: "nul",
+ args: args{
+ value: []byte("\000"),
+ },
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := checkStatusX(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkStatusX() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ if err := checkStatusY(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkStatusY() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_checkPath(t *testing.T) {
+ type args struct {
+ value []byte
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Simple",
+ args: args{
+ value: []byte("./"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "newline",
+ args: args{
+ value: []byte("has\nnewline"),
+ },
+ wantErr: false,
+ },
+ {
+ name: "Empty",
+ args: args{
+ value: []byte(""),
+ },
+ wantErr: true,
+ },
+ {
+ name: "newline",
+ args: args{
+ value: []byte("\n"),
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := checkPath(tt.args.value); (err != nil) != tt.wantErr {
+ t.Errorf("checkPath() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/cli/internal/ffi/bindings.h b/cli/internal/ffi/bindings.h
new file mode 100644
index 0000000..c2bbcea
--- /dev/null
+++ b/cli/internal/ffi/bindings.h
@@ -0,0 +1,21 @@
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+typedef struct Buffer {
+ uint32_t len;
+ uint8_t *data;
+} Buffer;
+
+void free_buffer(struct Buffer buffer);
+
+struct Buffer get_turbo_data_dir(void);
+
+struct Buffer changed_files(struct Buffer buffer);
+
+struct Buffer previous_content(struct Buffer buffer);
+
+struct Buffer npm_transitive_closure(struct Buffer buf);
+
+struct Buffer npm_subgraph(struct Buffer buf);
diff --git a/cli/internal/ffi/ffi.go b/cli/internal/ffi/ffi.go
new file mode 100644
index 0000000..7ac15e4
--- /dev/null
+++ b/cli/internal/ffi/ffi.go
@@ -0,0 +1,224 @@
+package ffi
+
+// ffi
+//
+// Please read the notes about safety (marked with `SAFETY`) in both this file,
+// and in turborepo-ffi/lib.rs before modifying this file.
+
+// #include "bindings.h"
+//
+// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR} -lturborepo_ffi_darwin_arm64 -lz -liconv
+// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR} -lturborepo_ffi_darwin_amd64 -lz -liconv
+// #cgo linux,arm64,staticbinary LDFLAGS: -L${SRCDIR} -lturborepo_ffi_linux_arm64 -lunwind
+// #cgo linux,amd64,staticbinary LDFLAGS: -L${SRCDIR} -lturborepo_ffi_linux_amd64 -lunwind
+// #cgo linux,arm64,!staticbinary LDFLAGS: -L${SRCDIR} -lturborepo_ffi_linux_arm64 -lz
+// #cgo linux,amd64,!staticbinary LDFLAGS: -L${SRCDIR} -lturborepo_ffi_linux_amd64 -lz
+// #cgo windows,amd64 LDFLAGS: -L${SRCDIR} -lturborepo_ffi_windows_amd64 -lole32 -lbcrypt -lws2_32 -luserenv
+import "C"
+
+import (
+ "errors"
+ "reflect"
+ "unsafe"
+
+ ffi_proto "github.com/vercel/turbo/cli/internal/ffi/proto"
+ "google.golang.org/protobuf/proto"
+)
+
+// Unmarshal consumes a buffer and parses it into a proto.Message
+func Unmarshal[M proto.Message](b C.Buffer, c M) error {
+ bytes := toBytes(b)
+ if err := proto.Unmarshal(bytes, c); err != nil {
+ return err
+ }
+
+ // free the buffer on the rust side
+ //
+ // SAFETY: do not use `C.free_buffer` to free a buffer that has been allocated
+ // on the go side. If you happen to accidentally use the wrong one, you can
+ // expect a segfault on some platforms. This is the only valid callsite.
+ C.free_buffer(b)
+
+ return nil
+}
+
+// Marshal consumes a proto.Message and returns a bufferfire
+//
+// NOTE: the buffer must be freed by calling `Free` on it
+func Marshal[M proto.Message](c M) C.Buffer {
+ bytes, err := proto.Marshal(c)
+ if err != nil {
+ panic(err)
+ }
+
+ return toBuffer(bytes)
+}
+
+// Free frees a buffer that has been allocated *on the go side*.
+//
+// SAFETY: this is not the same as `C.free_buffer`, which frees a buffer that
+// has been allocated *on the rust side*. If you happen to accidentally use
+// the wrong one, you can expect a segfault on some platforms.
+//
+// EXAMPLE: it is recommended use this function via a `defer` statement, like so:
+//
+// reqBuf := Marshal(&req)
+// defer reqBuf.Free()
+func (c C.Buffer) Free() {
+ C.free(unsafe.Pointer(c.data))
+}
+
+// rather than use C.GoBytes, we use this function to avoid copying the bytes,
+// since it is going to be immediately Unmarshalled into a proto.Message
+//
+// SAFETY: go slices contain a pointer to an underlying buffer with a length.
+// if the buffer is known to the garbage collector, dropping the last slice will
+// cause the memory to be freed. this memory is owned by the rust side (and is
+// not known the garbage collector), so dropping the slice will do nothing
+func toBytes(b C.Buffer) []byte {
+ var out []byte
+
+ len := (uint32)(b.len)
+
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&out))
+ sh.Data = uintptr(unsafe.Pointer(b.data))
+ sh.Len = int(len)
+ sh.Cap = int(len)
+
+ return out
+}
+
+func toBuffer(bytes []byte) C.Buffer {
+ b := C.Buffer{}
+ b.len = C.uint(len(bytes))
+ b.data = (*C.uchar)(C.CBytes(bytes))
+ return b
+}
+
+// GetTurboDataDir returns the path to the Turbo data directory
+func GetTurboDataDir() string {
+ buffer := C.get_turbo_data_dir()
+ resp := ffi_proto.TurboDataDirResp{}
+ if err := Unmarshal(buffer, resp.ProtoReflect().Interface()); err != nil {
+ panic(err)
+ }
+ return resp.Dir
+}
+
+// Go convention is to use an empty string for an uninitialized or null-valued
+// string. Rust convention is to use an Option<String> for the same purpose, which
+// is encoded on the Go side as *string. This converts between the two.
+func stringToRef(s string) *string {
+ if s == "" {
+ return nil
+ }
+ return &s
+}
+
+// ChangedFiles returns the files changed in between two commits, the workdir and the index, and optionally untracked files
+func ChangedFiles(gitRoot string, turboRoot string, fromCommit string, toCommit string) ([]string, error) {
+ fromCommitRef := stringToRef(fromCommit)
+ toCommitRef := stringToRef(toCommit)
+
+ req := ffi_proto.ChangedFilesReq{
+ GitRoot: gitRoot,
+ FromCommit: fromCommitRef,
+ ToCommit: toCommitRef,
+ TurboRoot: turboRoot,
+ }
+
+ reqBuf := Marshal(&req)
+ defer reqBuf.Free()
+
+ respBuf := C.changed_files(reqBuf)
+
+ resp := ffi_proto.ChangedFilesResp{}
+ if err := Unmarshal(respBuf, resp.ProtoReflect().Interface()); err != nil {
+ panic(err)
+ }
+ if err := resp.GetError(); err != "" {
+ return nil, errors.New(err)
+ }
+
+ return resp.GetFiles().GetFiles(), nil
+}
+
+// PreviousContent returns the content of a file at a previous commit
+func PreviousContent(gitRoot, fromCommit, filePath string) ([]byte, error) {
+ req := ffi_proto.PreviousContentReq{
+ GitRoot: gitRoot,
+ FromCommit: fromCommit,
+ FilePath: filePath,
+ }
+
+ reqBuf := Marshal(&req)
+ defer reqBuf.Free()
+
+ respBuf := C.previous_content(reqBuf)
+
+ resp := ffi_proto.PreviousContentResp{}
+ if err := Unmarshal(respBuf, resp.ProtoReflect().Interface()); err != nil {
+ panic(err)
+ }
+ content := resp.GetContent()
+ if err := resp.GetError(); err != "" {
+ return nil, errors.New(err)
+ }
+
+ return []byte(content), nil
+}
+
+// NpmTransitiveDeps returns the transitive external deps of a given package based on the deps and specifiers given
+func NpmTransitiveDeps(content []byte, pkgDir string, unresolvedDeps map[string]string) ([]*ffi_proto.LockfilePackage, error) {
+ return transitiveDeps(npmTransitiveDeps, content, pkgDir, unresolvedDeps)
+}
+
+func npmTransitiveDeps(buf C.Buffer) C.Buffer {
+ return C.npm_transitive_closure(buf)
+}
+
+func transitiveDeps(cFunc func(C.Buffer) C.Buffer, content []byte, pkgDir string, unresolvedDeps map[string]string) ([]*ffi_proto.LockfilePackage, error) {
+ req := ffi_proto.TransitiveDepsRequest{
+ Contents: content,
+ WorkspaceDir: pkgDir,
+ UnresolvedDeps: unresolvedDeps,
+ }
+ reqBuf := Marshal(&req)
+ resBuf := cFunc(reqBuf)
+ reqBuf.Free()
+
+ resp := ffi_proto.TransitiveDepsResponse{}
+ if err := Unmarshal(resBuf, resp.ProtoReflect().Interface()); err != nil {
+ panic(err)
+ }
+
+ if err := resp.GetError(); err != "" {
+ return nil, errors.New(err)
+ }
+
+ list := resp.GetPackages()
+ return list.GetList(), nil
+}
+
+// NpmSubgraph returns the contents of a npm lockfile subgraph
+func NpmSubgraph(content []byte, workspaces []string, packages []string) ([]byte, error) {
+ req := ffi_proto.SubgraphRequest{
+ Contents: content,
+ Workspaces: workspaces,
+ Packages: packages,
+ }
+ reqBuf := Marshal(&req)
+ resBuf := C.npm_subgraph(reqBuf)
+ reqBuf.Free()
+
+ resp := ffi_proto.SubgraphResponse{}
+ if err := Unmarshal(resBuf, resp.ProtoReflect().Interface()); err != nil {
+ panic(err)
+ }
+
+ if err := resp.GetError(); err != "" {
+ return nil, errors.New(err)
+ }
+
+ return resp.GetContents(), nil
+}
diff --git a/cli/internal/ffi/proto/messages.pb.go b/cli/internal/ffi/proto/messages.pb.go
new file mode 100644
index 0000000..22992d3
--- /dev/null
+++ b/cli/internal/ffi/proto/messages.pb.go
@@ -0,0 +1,1380 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.12
+// source: turborepo-ffi/messages.proto
+
+package proto
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TurboDataDirResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"`
+}
+
+func (x *TurboDataDirResp) Reset() {
+ *x = TurboDataDirResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TurboDataDirResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TurboDataDirResp) ProtoMessage() {}
+
+func (x *TurboDataDirResp) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TurboDataDirResp.ProtoReflect.Descriptor instead.
+func (*TurboDataDirResp) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TurboDataDirResp) GetDir() string {
+ if x != nil {
+ return x.Dir
+ }
+ return ""
+}
+
+type GlobReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BasePath string `protobuf:"bytes,1,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+ IncludePatterns []string `protobuf:"bytes,2,rep,name=include_patterns,json=includePatterns,proto3" json:"include_patterns,omitempty"`
+ ExcludePatterns []string `protobuf:"bytes,3,rep,name=exclude_patterns,json=excludePatterns,proto3" json:"exclude_patterns,omitempty"`
+ FilesOnly bool `protobuf:"varint,4,opt,name=files_only,json=filesOnly,proto3" json:"files_only,omitempty"` // note that the default for a bool is false
+}
+
+func (x *GlobReq) Reset() {
+ *x = GlobReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GlobReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GlobReq) ProtoMessage() {}
+
+func (x *GlobReq) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GlobReq.ProtoReflect.Descriptor instead.
+func (*GlobReq) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GlobReq) GetBasePath() string {
+ if x != nil {
+ return x.BasePath
+ }
+ return ""
+}
+
+func (x *GlobReq) GetIncludePatterns() []string {
+ if x != nil {
+ return x.IncludePatterns
+ }
+ return nil
+}
+
+func (x *GlobReq) GetExcludePatterns() []string {
+ if x != nil {
+ return x.ExcludePatterns
+ }
+ return nil
+}
+
+func (x *GlobReq) GetFilesOnly() bool {
+ if x != nil {
+ return x.FilesOnly
+ }
+ return false
+}
+
+type GlobResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Response:
+ // *GlobResp_Files
+ // *GlobResp_Error
+ Response isGlobResp_Response `protobuf_oneof:"response"`
+}
+
+func (x *GlobResp) Reset() {
+ *x = GlobResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GlobResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GlobResp) ProtoMessage() {}
+
+func (x *GlobResp) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GlobResp.ProtoReflect.Descriptor instead.
+func (*GlobResp) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *GlobResp) GetResponse() isGlobResp_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *GlobResp) GetFiles() *GlobRespList {
+ if x, ok := x.GetResponse().(*GlobResp_Files); ok {
+ return x.Files
+ }
+ return nil
+}
+
+func (x *GlobResp) GetError() string {
+ if x, ok := x.GetResponse().(*GlobResp_Error); ok {
+ return x.Error
+ }
+ return ""
+}
+
+type isGlobResp_Response interface {
+ isGlobResp_Response()
+}
+
+type GlobResp_Files struct {
+ Files *GlobRespList `protobuf:"bytes,1,opt,name=files,proto3,oneof"`
+}
+
+type GlobResp_Error struct {
+ Error string `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+func (*GlobResp_Files) isGlobResp_Response() {}
+
+func (*GlobResp_Error) isGlobResp_Response() {}
+
+type GlobRespList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Files []string `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"`
+}
+
+func (x *GlobRespList) Reset() {
+ *x = GlobRespList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GlobRespList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GlobRespList) ProtoMessage() {}
+
+func (x *GlobRespList) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GlobRespList.ProtoReflect.Descriptor instead.
+func (*GlobRespList) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *GlobRespList) GetFiles() []string {
+ if x != nil {
+ return x.Files
+ }
+ return nil
+}
+
+type ChangedFilesReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GitRoot string `protobuf:"bytes,1,opt,name=git_root,json=gitRoot,proto3" json:"git_root,omitempty"`
+ TurboRoot string `protobuf:"bytes,2,opt,name=turbo_root,json=turboRoot,proto3" json:"turbo_root,omitempty"`
+ FromCommit *string `protobuf:"bytes,3,opt,name=from_commit,json=fromCommit,proto3,oneof" json:"from_commit,omitempty"`
+ ToCommit *string `protobuf:"bytes,4,opt,name=to_commit,json=toCommit,proto3,oneof" json:"to_commit,omitempty"`
+}
+
+func (x *ChangedFilesReq) Reset() {
+ *x = ChangedFilesReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ChangedFilesReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ChangedFilesReq) ProtoMessage() {}
+
+func (x *ChangedFilesReq) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ChangedFilesReq.ProtoReflect.Descriptor instead.
+func (*ChangedFilesReq) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ChangedFilesReq) GetGitRoot() string {
+ if x != nil {
+ return x.GitRoot
+ }
+ return ""
+}
+
+func (x *ChangedFilesReq) GetTurboRoot() string {
+ if x != nil {
+ return x.TurboRoot
+ }
+ return ""
+}
+
+func (x *ChangedFilesReq) GetFromCommit() string {
+ if x != nil && x.FromCommit != nil {
+ return *x.FromCommit
+ }
+ return ""
+}
+
+func (x *ChangedFilesReq) GetToCommit() string {
+ if x != nil && x.ToCommit != nil {
+ return *x.ToCommit
+ }
+ return ""
+}
+
+type ChangedFilesResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Response:
+ // *ChangedFilesResp_Files
+ // *ChangedFilesResp_Error
+ Response isChangedFilesResp_Response `protobuf_oneof:"response"`
+}
+
+func (x *ChangedFilesResp) Reset() {
+ *x = ChangedFilesResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ChangedFilesResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ChangedFilesResp) ProtoMessage() {}
+
+func (x *ChangedFilesResp) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ChangedFilesResp.ProtoReflect.Descriptor instead.
+func (*ChangedFilesResp) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{5}
+}
+
+func (m *ChangedFilesResp) GetResponse() isChangedFilesResp_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *ChangedFilesResp) GetFiles() *ChangedFilesList {
+ if x, ok := x.GetResponse().(*ChangedFilesResp_Files); ok {
+ return x.Files
+ }
+ return nil
+}
+
+func (x *ChangedFilesResp) GetError() string {
+ if x, ok := x.GetResponse().(*ChangedFilesResp_Error); ok {
+ return x.Error
+ }
+ return ""
+}
+
+type isChangedFilesResp_Response interface {
+ isChangedFilesResp_Response()
+}
+
+type ChangedFilesResp_Files struct {
+ Files *ChangedFilesList `protobuf:"bytes,1,opt,name=files,proto3,oneof"`
+}
+
+type ChangedFilesResp_Error struct {
+ Error string `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+func (*ChangedFilesResp_Files) isChangedFilesResp_Response() {}
+
+func (*ChangedFilesResp_Error) isChangedFilesResp_Response() {}
+
+type ChangedFilesList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Files []string `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"`
+}
+
+func (x *ChangedFilesList) Reset() {
+ *x = ChangedFilesList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ChangedFilesList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ChangedFilesList) ProtoMessage() {}
+
+func (x *ChangedFilesList) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ChangedFilesList.ProtoReflect.Descriptor instead.
+func (*ChangedFilesList) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ChangedFilesList) GetFiles() []string {
+ if x != nil {
+ return x.Files
+ }
+ return nil
+}
+
+type PreviousContentReq struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ GitRoot string `protobuf:"bytes,1,opt,name=git_root,json=gitRoot,proto3" json:"git_root,omitempty"`
+ FromCommit string `protobuf:"bytes,2,opt,name=from_commit,json=fromCommit,proto3" json:"from_commit,omitempty"`
+ FilePath string `protobuf:"bytes,3,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
+}
+
+func (x *PreviousContentReq) Reset() {
+ *x = PreviousContentReq{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreviousContentReq) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreviousContentReq) ProtoMessage() {}
+
+func (x *PreviousContentReq) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreviousContentReq.ProtoReflect.Descriptor instead.
+func (*PreviousContentReq) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *PreviousContentReq) GetGitRoot() string {
+ if x != nil {
+ return x.GitRoot
+ }
+ return ""
+}
+
+func (x *PreviousContentReq) GetFromCommit() string {
+ if x != nil {
+ return x.FromCommit
+ }
+ return ""
+}
+
+func (x *PreviousContentReq) GetFilePath() string {
+ if x != nil {
+ return x.FilePath
+ }
+ return ""
+}
+
+type PreviousContentResp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Response:
+ // *PreviousContentResp_Content
+ // *PreviousContentResp_Error
+ Response isPreviousContentResp_Response `protobuf_oneof:"response"`
+}
+
+func (x *PreviousContentResp) Reset() {
+ *x = PreviousContentResp{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreviousContentResp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreviousContentResp) ProtoMessage() {}
+
+func (x *PreviousContentResp) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreviousContentResp.ProtoReflect.Descriptor instead.
+func (*PreviousContentResp) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{8}
+}
+
+func (m *PreviousContentResp) GetResponse() isPreviousContentResp_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *PreviousContentResp) GetContent() []byte {
+ if x, ok := x.GetResponse().(*PreviousContentResp_Content); ok {
+ return x.Content
+ }
+ return nil
+}
+
+func (x *PreviousContentResp) GetError() string {
+ if x, ok := x.GetResponse().(*PreviousContentResp_Error); ok {
+ return x.Error
+ }
+ return ""
+}
+
+type isPreviousContentResp_Response interface {
+ isPreviousContentResp_Response()
+}
+
+type PreviousContentResp_Content struct {
+ Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
+}
+
+type PreviousContentResp_Error struct {
+ Error string `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+func (*PreviousContentResp_Content) isPreviousContentResp_Response() {}
+
+func (*PreviousContentResp_Error) isPreviousContentResp_Response() {}
+
+type TransitiveDepsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Contents []byte `protobuf:"bytes,1,opt,name=contents,proto3" json:"contents,omitempty"`
+ WorkspaceDir string `protobuf:"bytes,2,opt,name=workspace_dir,json=workspaceDir,proto3" json:"workspace_dir,omitempty"`
+ UnresolvedDeps map[string]string `protobuf:"bytes,3,rep,name=unresolved_deps,json=unresolvedDeps,proto3" json:"unresolved_deps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *TransitiveDepsRequest) Reset() {
+ *x = TransitiveDepsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TransitiveDepsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransitiveDepsRequest) ProtoMessage() {}
+
+func (x *TransitiveDepsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransitiveDepsRequest.ProtoReflect.Descriptor instead.
+func (*TransitiveDepsRequest) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *TransitiveDepsRequest) GetContents() []byte {
+ if x != nil {
+ return x.Contents
+ }
+ return nil
+}
+
+func (x *TransitiveDepsRequest) GetWorkspaceDir() string {
+ if x != nil {
+ return x.WorkspaceDir
+ }
+ return ""
+}
+
+func (x *TransitiveDepsRequest) GetUnresolvedDeps() map[string]string {
+ if x != nil {
+ return x.UnresolvedDeps
+ }
+ return nil
+}
+
+type TransitiveDepsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Response:
+ // *TransitiveDepsResponse_Packages
+ // *TransitiveDepsResponse_Error
+ Response isTransitiveDepsResponse_Response `protobuf_oneof:"response"`
+}
+
+func (x *TransitiveDepsResponse) Reset() {
+ *x = TransitiveDepsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TransitiveDepsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransitiveDepsResponse) ProtoMessage() {}
+
+func (x *TransitiveDepsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransitiveDepsResponse.ProtoReflect.Descriptor instead.
+func (*TransitiveDepsResponse) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{10}
+}
+
+func (m *TransitiveDepsResponse) GetResponse() isTransitiveDepsResponse_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *TransitiveDepsResponse) GetPackages() *LockfilePackageList {
+ if x, ok := x.GetResponse().(*TransitiveDepsResponse_Packages); ok {
+ return x.Packages
+ }
+ return nil
+}
+
+func (x *TransitiveDepsResponse) GetError() string {
+ if x, ok := x.GetResponse().(*TransitiveDepsResponse_Error); ok {
+ return x.Error
+ }
+ return ""
+}
+
+type isTransitiveDepsResponse_Response interface {
+ isTransitiveDepsResponse_Response()
+}
+
+type TransitiveDepsResponse_Packages struct {
+ Packages *LockfilePackageList `protobuf:"bytes,1,opt,name=packages,proto3,oneof"`
+}
+
+type TransitiveDepsResponse_Error struct {
+ Error string `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+func (*TransitiveDepsResponse_Packages) isTransitiveDepsResponse_Response() {}
+
+func (*TransitiveDepsResponse_Error) isTransitiveDepsResponse_Response() {}
+
+type LockfilePackage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Found bool `protobuf:"varint,3,opt,name=found,proto3" json:"found,omitempty"`
+}
+
+func (x *LockfilePackage) Reset() {
+ *x = LockfilePackage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LockfilePackage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LockfilePackage) ProtoMessage() {}
+
+func (x *LockfilePackage) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LockfilePackage.ProtoReflect.Descriptor instead.
+func (*LockfilePackage) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *LockfilePackage) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *LockfilePackage) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *LockfilePackage) GetFound() bool {
+ if x != nil {
+ return x.Found
+ }
+ return false
+}
+
+type LockfilePackageList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ List []*LockfilePackage `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
+}
+
+func (x *LockfilePackageList) Reset() {
+ *x = LockfilePackageList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LockfilePackageList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LockfilePackageList) ProtoMessage() {}
+
+func (x *LockfilePackageList) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LockfilePackageList.ProtoReflect.Descriptor instead.
+func (*LockfilePackageList) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *LockfilePackageList) GetList() []*LockfilePackage {
+ if x != nil {
+ return x.List
+ }
+ return nil
+}
+
+type SubgraphRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Contents []byte `protobuf:"bytes,1,opt,name=contents,proto3" json:"contents,omitempty"`
+ Workspaces []string `protobuf:"bytes,2,rep,name=workspaces,proto3" json:"workspaces,omitempty"`
+ Packages []string `protobuf:"bytes,3,rep,name=packages,proto3" json:"packages,omitempty"`
+}
+
+func (x *SubgraphRequest) Reset() {
+ *x = SubgraphRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubgraphRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubgraphRequest) ProtoMessage() {}
+
+func (x *SubgraphRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubgraphRequest.ProtoReflect.Descriptor instead.
+func (*SubgraphRequest) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *SubgraphRequest) GetContents() []byte {
+ if x != nil {
+ return x.Contents
+ }
+ return nil
+}
+
+func (x *SubgraphRequest) GetWorkspaces() []string {
+ if x != nil {
+ return x.Workspaces
+ }
+ return nil
+}
+
+func (x *SubgraphRequest) GetPackages() []string {
+ if x != nil {
+ return x.Packages
+ }
+ return nil
+}
+
+type SubgraphResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Response:
+ // *SubgraphResponse_Contents
+ // *SubgraphResponse_Error
+ Response isSubgraphResponse_Response `protobuf_oneof:"response"`
+}
+
+func (x *SubgraphResponse) Reset() {
+ *x = SubgraphResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubgraphResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubgraphResponse) ProtoMessage() {}
+
+func (x *SubgraphResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_turborepo_ffi_messages_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubgraphResponse.ProtoReflect.Descriptor instead.
+func (*SubgraphResponse) Descriptor() ([]byte, []int) {
+ return file_turborepo_ffi_messages_proto_rawDescGZIP(), []int{14}
+}
+
+func (m *SubgraphResponse) GetResponse() isSubgraphResponse_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *SubgraphResponse) GetContents() []byte {
+ if x, ok := x.GetResponse().(*SubgraphResponse_Contents); ok {
+ return x.Contents
+ }
+ return nil
+}
+
+func (x *SubgraphResponse) GetError() string {
+ if x, ok := x.GetResponse().(*SubgraphResponse_Error); ok {
+ return x.Error
+ }
+ return ""
+}
+
+type isSubgraphResponse_Response interface {
+ isSubgraphResponse_Response()
+}
+
+type SubgraphResponse_Contents struct {
+ Contents []byte `protobuf:"bytes,1,opt,name=contents,proto3,oneof"`
+}
+
+type SubgraphResponse_Error struct {
+ Error string `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+func (*SubgraphResponse_Contents) isSubgraphResponse_Response() {}
+
+func (*SubgraphResponse_Error) isSubgraphResponse_Response() {}
+
+var File_turborepo_ffi_messages_proto protoreflect.FileDescriptor
+
+var file_turborepo_ffi_messages_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x74, 0x75, 0x72, 0x62, 0x6f, 0x72, 0x65, 0x70, 0x6f, 0x2d, 0x66, 0x66, 0x69, 0x2f,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x24,
+ 0x0a, 0x10, 0x54, 0x75, 0x72, 0x62, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72, 0x52, 0x65,
+ 0x73, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x64, 0x69, 0x72, 0x22, 0x9b, 0x01, 0x0a, 0x07, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71,
+ 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x29, 0x0a,
+ 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65,
+ 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65,
+ 0x72, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x4f, 0x6e,
+ 0x6c, 0x79, 0x22, 0x55, 0x0a, 0x08, 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x12, 0x25,
+ 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e,
+ 0x47, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05,
+ 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x0a, 0x0a,
+ 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x0a, 0x0c, 0x47, 0x6c, 0x6f,
+ 0x62, 0x52, 0x65, 0x73, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x6c,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x22,
+ 0xb1, 0x01, 0x0a, 0x0f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73,
+ 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x69, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x74, 0x75, 0x72, 0x62, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x74, 0x75, 0x72, 0x62, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a,
+ 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
+ 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x08, 0x74, 0x6f, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x69, 0x74, 0x22, 0x61, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x46, 0x69,
+ 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x29, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64,
+ 0x46, 0x69, 0x6c, 0x65, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x05, 0x66, 0x69, 0x6c,
+ 0x65, 0x73, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65,
+ 0x64, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69,
+ 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73,
+ 0x22, 0x6d, 0x0a, 0x12, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x69, 0x74, 0x5f, 0x72, 0x6f,
+ 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x69, 0x74, 0x52, 0x6f, 0x6f,
+ 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x22,
+ 0x55, 0x0a, 0x13, 0x50, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x6e, 0x73,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d,
+ 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x69,
+ 0x72, 0x12, 0x53, 0x0a, 0x0f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x5f,
+ 0x64, 0x65, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x2e, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x44, 0x65, 0x70,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76,
+ 0x65, 0x64, 0x44, 0x65, 0x70, 0x73, 0x1a, 0x41, 0x0a, 0x13, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x6f,
+ 0x6c, 0x76, 0x65, 0x64, 0x44, 0x65, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x16, 0x54, 0x72, 0x61,
+ 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x65,
+ 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70,
+ 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42,
+ 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x53, 0x0a, 0x0f, 0x4c,
+ 0x6f, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f,
+ 0x75, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64,
+ 0x22, 0x3b, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x65,
+ 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x69, 0x0a,
+ 0x0f, 0x53, 0x75, 0x62, 0x67, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a,
+ 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+ 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x67,
+ 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x08,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00,
+ 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x0b,
+ 0x5a, 0x09, 0x66, 0x66, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_turborepo_ffi_messages_proto_rawDescOnce sync.Once
+ file_turborepo_ffi_messages_proto_rawDescData = file_turborepo_ffi_messages_proto_rawDesc
+)
+
+func file_turborepo_ffi_messages_proto_rawDescGZIP() []byte {
+ file_turborepo_ffi_messages_proto_rawDescOnce.Do(func() {
+ file_turborepo_ffi_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_turborepo_ffi_messages_proto_rawDescData)
+ })
+ return file_turborepo_ffi_messages_proto_rawDescData
+}
+
+var file_turborepo_ffi_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_turborepo_ffi_messages_proto_goTypes = []interface{}{
+ (*TurboDataDirResp)(nil), // 0: TurboDataDirResp
+ (*GlobReq)(nil), // 1: GlobReq
+ (*GlobResp)(nil), // 2: GlobResp
+ (*GlobRespList)(nil), // 3: GlobRespList
+ (*ChangedFilesReq)(nil), // 4: ChangedFilesReq
+ (*ChangedFilesResp)(nil), // 5: ChangedFilesResp
+ (*ChangedFilesList)(nil), // 6: ChangedFilesList
+ (*PreviousContentReq)(nil), // 7: PreviousContentReq
+ (*PreviousContentResp)(nil), // 8: PreviousContentResp
+ (*TransitiveDepsRequest)(nil), // 9: TransitiveDepsRequest
+ (*TransitiveDepsResponse)(nil), // 10: TransitiveDepsResponse
+ (*LockfilePackage)(nil), // 11: LockfilePackage
+ (*LockfilePackageList)(nil), // 12: LockfilePackageList
+ (*SubgraphRequest)(nil), // 13: SubgraphRequest
+ (*SubgraphResponse)(nil), // 14: SubgraphResponse
+ nil, // 15: TransitiveDepsRequest.UnresolvedDepsEntry
+}
+var file_turborepo_ffi_messages_proto_depIdxs = []int32{
+ 3, // 0: GlobResp.files:type_name -> GlobRespList
+ 6, // 1: ChangedFilesResp.files:type_name -> ChangedFilesList
+ 15, // 2: TransitiveDepsRequest.unresolved_deps:type_name -> TransitiveDepsRequest.UnresolvedDepsEntry
+ 12, // 3: TransitiveDepsResponse.packages:type_name -> LockfilePackageList
+ 11, // 4: LockfilePackageList.list:type_name -> LockfilePackage
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_turborepo_ffi_messages_proto_init() }
+func file_turborepo_ffi_messages_proto_init() {
+ if File_turborepo_ffi_messages_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_turborepo_ffi_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TurboDataDirResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GlobReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GlobResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GlobRespList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ChangedFilesReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ChangedFilesResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ChangedFilesList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreviousContentReq); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreviousContentResp); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransitiveDepsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransitiveDepsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LockfilePackage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LockfilePackageList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubgraphRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubgraphResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*GlobResp_Files)(nil),
+ (*GlobResp_Error)(nil),
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[4].OneofWrappers = []interface{}{}
+ file_turborepo_ffi_messages_proto_msgTypes[5].OneofWrappers = []interface{}{
+ (*ChangedFilesResp_Files)(nil),
+ (*ChangedFilesResp_Error)(nil),
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[8].OneofWrappers = []interface{}{
+ (*PreviousContentResp_Content)(nil),
+ (*PreviousContentResp_Error)(nil),
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*TransitiveDepsResponse_Packages)(nil),
+ (*TransitiveDepsResponse_Error)(nil),
+ }
+ file_turborepo_ffi_messages_proto_msgTypes[14].OneofWrappers = []interface{}{
+ (*SubgraphResponse_Contents)(nil),
+ (*SubgraphResponse_Error)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_turborepo_ffi_messages_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_turborepo_ffi_messages_proto_goTypes,
+ DependencyIndexes: file_turborepo_ffi_messages_proto_depIdxs,
+ MessageInfos: file_turborepo_ffi_messages_proto_msgTypes,
+ }.Build()
+ File_turborepo_ffi_messages_proto = out.File
+ file_turborepo_ffi_messages_proto_rawDesc = nil
+ file_turborepo_ffi_messages_proto_goTypes = nil
+ file_turborepo_ffi_messages_proto_depIdxs = nil
+}
diff --git a/cli/internal/filewatcher/backend.go b/cli/internal/filewatcher/backend.go
new file mode 100644
index 0000000..b8b7fa8
--- /dev/null
+++ b/cli/internal/filewatcher/backend.go
@@ -0,0 +1,209 @@
+//go:build !darwin
+// +build !darwin
+
+package filewatcher
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/fsnotify/fsnotify"
+ "github.com/hashicorp/go-hclog"
+ "github.com/karrick/godirwalk"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// watchAddMode is used to indicate whether watchRecursively should synthesize events
+// for existing files.
+type watchAddMode int
+
+const (
+ dontSynthesizeEvents watchAddMode = iota
+ synthesizeEvents
+)
+
+type fsNotifyBackend struct {
+ watcher *fsnotify.Watcher
+ events chan Event
+ errors chan error
+ logger hclog.Logger
+
+ mu sync.Mutex
+ allExcludes []string
+ closed bool
+}
+
+func (f *fsNotifyBackend) Events() <-chan Event {
+ return f.events
+}
+
+func (f *fsNotifyBackend) Errors() <-chan error {
+ return f.errors
+}
+
+func (f *fsNotifyBackend) Close() error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.closed {
+ return ErrFilewatchingClosed
+ }
+ f.closed = true
+ close(f.events)
+ close(f.errors)
+ if err := f.watcher.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// onFileAdded helps up paper over cross-platform inconsistencies in fsnotify.
+// Some fsnotify backends automatically add the contents of directories. Some do
+// not. Adding a watch is idempotent, so anytime any file we care about gets added,
+// watch it.
+func (f *fsNotifyBackend) onFileAdded(name turbopath.AbsoluteSystemPath) error {
+ info, err := name.Lstat()
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ // We can race with a file being added and removed. Ignore it
+ return nil
+ }
+ return errors.Wrapf(err, "error checking lstat of new file %v", name)
+ }
+ if info.IsDir() {
+ // If a directory has been added, we need to synthesize events for everything it contains
+ if err := f.watchRecursively(name, []string{}, synthesizeEvents); err != nil {
+ return errors.Wrapf(err, "failed recursive watch of %v", name)
+ }
+ } else {
+ if err := f.watcher.Add(name.ToString()); err != nil {
+ return errors.Wrapf(err, "failed adding watch to %v", name)
+ }
+ }
+ return nil
+}
+
+func (f *fsNotifyBackend) watchRecursively(root turbopath.AbsoluteSystemPath, excludePatterns []string, addMode watchAddMode) error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ err := fs.WalkMode(root.ToString(), func(name string, isDir bool, info os.FileMode) error {
+ for _, excludePattern := range excludePatterns {
+ excluded, err := doublestar.Match(excludePattern, filepath.ToSlash(name))
+ if err != nil {
+ return err
+ }
+ if excluded {
+ return godirwalk.SkipThis
+ }
+ }
+ if info.IsDir() && (info&os.ModeSymlink == 0) {
+ if err := f.watcher.Add(name); err != nil {
+ return errors.Wrapf(err, "failed adding watch to %v", name)
+ }
+ f.logger.Debug(fmt.Sprintf("watching directory %v", name))
+ }
+ if addMode == synthesizeEvents {
+ f.events <- Event{
+ Path: fs.AbsoluteSystemPathFromUpstream(name),
+ EventType: FileAdded,
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ f.allExcludes = append(f.allExcludes, excludePatterns...)
+
+ return nil
+}
+
+func (f *fsNotifyBackend) watch() {
+outer:
+ for {
+ select {
+ case ev, ok := <-f.watcher.Events:
+ if !ok {
+ break outer
+ }
+ eventType := toFileEvent(ev.Op)
+ path := fs.AbsoluteSystemPathFromUpstream(ev.Name)
+ if eventType == FileAdded {
+ if err := f.onFileAdded(path); err != nil {
+ f.errors <- err
+ }
+ }
+ f.events <- Event{
+ Path: path,
+ EventType: eventType,
+ }
+ case err, ok := <-f.watcher.Errors:
+ if !ok {
+ break outer
+ }
+ f.errors <- err
+ }
+ }
+}
+
+var _modifiedMask = fsnotify.Chmod | fsnotify.Write
+
+func toFileEvent(op fsnotify.Op) FileEvent {
+ if op&fsnotify.Create != 0 {
+ return FileAdded
+ } else if op&fsnotify.Remove != 0 {
+ return FileDeleted
+ } else if op&_modifiedMask != 0 {
+ return FileModified
+ } else if op&fsnotify.Rename != 0 {
+ return FileRenamed
+ }
+ return FileOther
+}
+
+func (f *fsNotifyBackend) Start() error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.closed {
+ return ErrFilewatchingClosed
+ }
+ for _, dir := range f.watcher.WatchList() {
+ for _, excludePattern := range f.allExcludes {
+ excluded, err := doublestar.Match(excludePattern, filepath.ToSlash(dir))
+ if err != nil {
+ return err
+ }
+ if excluded {
+ if err := f.watcher.Remove(dir); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ go f.watch()
+ return nil
+}
+
+func (f *fsNotifyBackend) AddRoot(root turbopath.AbsoluteSystemPath, excludePatterns ...string) error {
+ // We don't synthesize events for the initial watch
+ return f.watchRecursively(root, excludePatterns, dontSynthesizeEvents)
+}
+
+// GetPlatformSpecificBackend returns a filewatching backend appropriate for the OS we are
+// running on.
+func GetPlatformSpecificBackend(logger hclog.Logger) (Backend, error) {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ return nil, err
+ }
+ return &fsNotifyBackend{
+ watcher: watcher,
+ events: make(chan Event),
+ errors: make(chan error),
+ logger: logger.Named("fsnotify"),
+ }, nil
+}
diff --git a/cli/internal/filewatcher/backend_darwin.go b/cli/internal/filewatcher/backend_darwin.go
new file mode 100644
index 0000000..4c029c4
--- /dev/null
+++ b/cli/internal/filewatcher/backend_darwin.go
@@ -0,0 +1,220 @@
+//go:build darwin
+// +build darwin
+
+package filewatcher
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/yookoala/realpath"
+
+ "github.com/fsnotify/fsevents"
+ "github.com/hashicorp/go-hclog"
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+type fseventsBackend struct {
+ events chan Event
+ errors chan error
+ logger hclog.Logger
+ mu sync.Mutex
+ streams []*fsevents.EventStream
+ closed bool
+}
+
+func (f *fseventsBackend) Events() <-chan Event {
+ return f.events
+}
+
+func (f *fseventsBackend) Errors() <-chan error {
+ return f.errors
+}
+
+func (f *fseventsBackend) Close() error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.closed {
+ return ErrFilewatchingClosed
+ }
+ f.closed = true
+ for _, stream := range f.streams {
+ stream.Stop()
+ }
+ close(f.events)
+ close(f.errors)
+ return nil
+}
+
+func (f *fseventsBackend) Start() error {
+ return nil
+}
+
+var (
+ _eventLatency = 10 * time.Millisecond
+ _cookieTimeout = 500 * time.Millisecond
+)
+
+// AddRoot starts watching a new directory hierarchy. Events matching the provided excludePatterns
+// will not be forwarded.
+func (f *fseventsBackend) AddRoot(someRoot turbopath.AbsoluteSystemPath, excludePatterns ...string) error {
+ // We need to resolve the real path to the hierarchy that we are going to watch
+ realRoot, err := realpath.Realpath(someRoot.ToString())
+ if err != nil {
+ return err
+ }
+ root := fs.AbsoluteSystemPathFromUpstream(realRoot)
+ dev, err := fsevents.DeviceForPath(root.ToString())
+ if err != nil {
+ return err
+ }
+
+ // Optimistically set up and start a stream, assuming the watch is still valid.
+ s := &fsevents.EventStream{
+ Paths: []string{root.ToString()},
+ Latency: _eventLatency,
+ Device: dev,
+ Flags: fsevents.FileEvents | fsevents.WatchRoot,
+ }
+ s.Start()
+ events := s.Events
+
+ // fsevents delivers events for all existing files first, so use a cookie to detect when we're ready for new events
+ if err := waitForCookie(root, events, _cookieTimeout); err != nil {
+ s.Stop()
+ return err
+ }
+
+ // Now try to persist the stream.
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.closed {
+ s.Stop()
+ return ErrFilewatchingClosed
+ }
+ f.streams = append(f.streams, s)
+ f.logger.Debug(fmt.Sprintf("watching root %v, excluding %v", root, excludePatterns))
+
+ go func() {
+ for evs := range events {
+ for _, ev := range evs {
+ isExcluded := false
+
+ // 1. Ensure that we have a `/`-prefixed path from the event.
+ var eventPath string
+ if !strings.HasPrefix("/", ev.Path) {
+ eventPath = "/" + ev.Path
+ } else {
+ eventPath = ev.Path
+ }
+
+ // 2. We're getting events from the real path, but we need to translate
+ // back to the path we were provided since that's what the caller will
+ // expect in terms of event paths.
+ watchRootRelativePath := eventPath[len(realRoot):]
+ processedEventPath := someRoot.UntypedJoin(watchRootRelativePath)
+
+ // 3. Compare the event to all exclude patterns, short-circuit if we know
+ // we are not watching this file.
+ processedPathString := processedEventPath.ToString() // loop invariant
+ for _, pattern := range excludePatterns {
+ matches, err := doublestar.Match(pattern, processedPathString)
+ if err != nil {
+ f.errors <- err
+ } else if matches {
+ isExcluded = true
+ break
+ }
+ }
+
+ // 4. Report the file events we care about.
+ if !isExcluded {
+ f.events <- Event{
+ Path: processedEventPath,
+ EventType: toFileEvent(ev.Flags),
+ }
+ }
+ }
+ }
+ }()
+
+ return nil
+}
+
+func waitForCookie(root turbopath.AbsoluteSystemPath, events <-chan []fsevents.Event, timeout time.Duration) error {
+ // This cookie needs to be in a location that we're watching, and at this point we can't guarantee
+ // what the root is, or if something like "node_modules/.cache/turbo" would make sense. As a compromise, ensure
+ // that we clean it up even in the event of a failure.
+ cookiePath := root.UntypedJoin(".turbo-cookie")
+ if err := cookiePath.WriteFile([]byte("cookie"), 0755); err != nil {
+ return err
+ }
+ expected := cookiePath.ToString()[1:] // trim leading slash
+ if err := waitForEvent(events, expected, fsevents.ItemCreated, timeout); err != nil {
+ // Attempt to not leave the cookie file lying around.
+ // Ignore the error, since there's not much we can do with it.
+ _ = cookiePath.Remove()
+ return err
+ }
+ if err := cookiePath.Remove(); err != nil {
+ return err
+ }
+ if err := waitForEvent(events, expected, fsevents.ItemRemoved, timeout); err != nil {
+ return err
+ }
+ return nil
+}
+
+func waitForEvent(events <-chan []fsevents.Event, path string, flag fsevents.EventFlags, timeout time.Duration) error {
+ ch := make(chan struct{})
+ go func() {
+ for evs := range events {
+ for _, ev := range evs {
+ if ev.Path == path && ev.Flags&flag != 0 {
+ close(ch)
+ return
+ }
+ }
+ }
+ }()
+ select {
+ case <-time.After(timeout):
+ return errors.Wrap(ErrFailedToStart, "timed out waiting for initial fsevents cookie")
+ case <-ch:
+ return nil
+ }
+}
+
+var _modifiedMask = fsevents.ItemModified | fsevents.ItemInodeMetaMod | fsevents.ItemFinderInfoMod | fsevents.ItemChangeOwner | fsevents.ItemXattrMod
+
+func toFileEvent(flags fsevents.EventFlags) FileEvent {
+ if flags&fsevents.ItemCreated != 0 {
+ return FileAdded
+ } else if flags&fsevents.ItemRemoved != 0 {
+ return FileDeleted
+ } else if flags&_modifiedMask != 0 {
+ return FileModified
+ } else if flags&fsevents.ItemRenamed != 0 {
+ return FileRenamed
+ } else if flags&fsevents.RootChanged != 0 {
+ // count this as a delete, something affected the path to the root
+ // of the stream
+ return FileDeleted
+ }
+ return FileOther
+}
+
+// GetPlatformSpecificBackend returns a filewatching backend appropriate for the OS we are
+// running on.
+func GetPlatformSpecificBackend(logger hclog.Logger) (Backend, error) {
+ return &fseventsBackend{
+ events: make(chan Event),
+ errors: make(chan error),
+ logger: logger.Named("fsevents"),
+ }, nil
+}
diff --git a/cli/internal/filewatcher/cookie.go b/cli/internal/filewatcher/cookie.go
new file mode 100644
index 0000000..7a4931e
--- /dev/null
+++ b/cli/internal/filewatcher/cookie.go
@@ -0,0 +1,160 @@
+package filewatcher
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// CookieWaiter is the interface used by clients that need to wait
+// for a roundtrip through the filewatching API.
+type CookieWaiter interface {
+ WaitForCookie() error
+}
+
+var (
+ // ErrCookieTimeout is returned when we did not see our cookie file within the given time constraints
+ ErrCookieTimeout = errors.New("timed out waiting for cookie")
+ // ErrCookieWatchingClosed is returned when the underlying filewatching has been closed.
+ ErrCookieWatchingClosed = errors.New("filewatching has closed, cannot watch cookies")
+)
+
+// CookieJar is used for tracking roundtrips through the filesystem watching API
+type CookieJar struct {
+ timeout time.Duration
+ dir turbopath.AbsoluteSystemPath
+ serial uint64
+ mu sync.Mutex
+ cookies map[turbopath.AbsoluteSystemPath]chan error
+ closed bool
+}
+
+// NewCookieJar returns a new instance of a CookieJar. There should only ever be a single
+// instance live per cookieDir, since they expect to have full control over that directory.
+func NewCookieJar(cookieDir turbopath.AbsoluteSystemPath, timeout time.Duration) (*CookieJar, error) {
+ if err := cookieDir.RemoveAll(); err != nil {
+ return nil, err
+ }
+ if err := cookieDir.MkdirAll(0775); err != nil {
+ return nil, err
+ }
+ return &CookieJar{
+ timeout: timeout,
+ dir: cookieDir,
+ cookies: make(map[turbopath.AbsoluteSystemPath]chan error),
+ }, nil
+}
+
+// removeAllCookiesWithError sends the error to every channel, closes every channel,
+// and attempts to remove every cookie file. Must be called while the cj.mu is held.
+// If the cookie jar is going to be reused afterwards, the cookies map must be reinitialized.
+func (cj *CookieJar) removeAllCookiesWithError(err error) {
+ for p, ch := range cj.cookies {
+ _ = p.Remove()
+ ch <- err
+ close(ch)
+ }
+ // Drop all of the references so they can be cleaned up
+ cj.cookies = nil
+}
+
+// OnFileWatchClosed handles the case where filewatching had to close for some reason
+// We send an error to all of our cookies and stop accepting new ones.
+func (cj *CookieJar) OnFileWatchClosed() {
+ cj.mu.Lock()
+ defer cj.mu.Unlock()
+ cj.closed = true
+ cj.removeAllCookiesWithError(ErrCookieWatchingClosed)
+
+}
+
+// OnFileWatchError handles when filewatching has encountered an error.
+// In the error case, we remove all cookies and send them errors. We remain
+// available for later cookies.
+func (cj *CookieJar) OnFileWatchError(err error) {
+ // We are now in an inconsistent state. Drop all of our cookies,
+ // but we still allow new ones to be created
+ cj.mu.Lock()
+ defer cj.mu.Unlock()
+ cj.removeAllCookiesWithError(err)
+ cj.cookies = make(map[turbopath.AbsoluteSystemPath]chan error)
+}
+
+// OnFileWatchEvent determines if the specified event is relevant
+// for cookie watching and notifies the appropriate cookie if so.
+func (cj *CookieJar) OnFileWatchEvent(ev Event) {
+ if ev.EventType == FileAdded {
+ isCookie, err := fs.DirContainsPath(cj.dir.ToStringDuringMigration(), ev.Path.ToStringDuringMigration())
+ if err != nil {
+ cj.OnFileWatchError(errors.Wrapf(err, "failed to determine if path is a cookie: %v", ev.Path))
+ } else if isCookie {
+ cj.notifyCookie(ev.Path, nil)
+ }
+ }
+}
+
+// WaitForCookie touches a unique file, then waits for it to show up in filesystem notifications.
+// This provides a theoretical bound on filesystem operations, although it's possible
+// that underlying filewatch mechanisms don't respect this ordering.
+func (cj *CookieJar) WaitForCookie() error {
+ // we're only ever going to send a single error on the channel, add a buffer so that we never
+ // block sending it.
+ ch := make(chan error, 1)
+ serial := atomic.AddUint64(&cj.serial, 1)
+ cookiePath := cj.dir.UntypedJoin(fmt.Sprintf("%v.cookie", serial))
+ cj.mu.Lock()
+ if cj.closed {
+ cj.mu.Unlock()
+ return ErrCookieWatchingClosed
+ }
+ cj.cookies[cookiePath] = ch
+ cj.mu.Unlock()
+ if err := touchCookieFile(cookiePath); err != nil {
+ cj.notifyCookie(cookiePath, err)
+ return err
+ }
+ select {
+ case <-time.After(cj.timeout):
+ return ErrCookieTimeout
+ case err, ok := <-ch:
+ if !ok {
+ // the channel closed without an error, we're all set
+ return nil
+ }
+ // the channel didn't close, meaning we got some error.
+ // We don't need to wait on channel close, it's going to be closed
+ // immediately by whoever sent the error. Return the error directly
+ return err
+ }
+}
+
+func (cj *CookieJar) notifyCookie(cookie turbopath.AbsoluteSystemPath, err error) {
+ cj.mu.Lock()
+ ch, ok := cj.cookies[cookie]
+ // delete is a no-op if the key doesn't exist
+ delete(cj.cookies, cookie)
+ cj.mu.Unlock()
+ if ok {
+ if err != nil {
+ ch <- err
+ }
+ close(ch)
+ }
+}
+
+func touchCookieFile(cookie turbopath.AbsoluteSystemPath) error {
+ f, err := cookie.OpenFile(os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0700)
+ if err != nil {
+ return err
+ }
+ if err := f.Close(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cli/internal/filewatcher/cookie_test.go b/cli/internal/filewatcher/cookie_test.go
new file mode 100644
index 0000000..96241b4
--- /dev/null
+++ b/cli/internal/filewatcher/cookie_test.go
@@ -0,0 +1,130 @@
+package filewatcher
+
+import (
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "gotest.tools/v3/assert"
+)
+
+func TestWaitForCookie(t *testing.T) {
+ logger := hclog.Default()
+ cookieDir := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ jar, err := NewCookieJar(cookieDir, 5*time.Second)
+ assert.NilError(t, err, "NewCookieJar")
+
+ watcher, err := GetPlatformSpecificBackend(logger)
+ assert.NilError(t, err, "NewWatcher")
+ fw := New(logger, repoRoot, watcher)
+ err = fw.Start()
+ assert.NilError(t, err, "Start")
+ fw.AddClient(jar)
+ err = fw.AddRoot(cookieDir)
+ assert.NilError(t, err, "Add")
+
+ err = jar.WaitForCookie()
+ assert.NilError(t, err, "failed to roundtrip cookie")
+}
+
+func TestWaitForCookieAfterClose(t *testing.T) {
+ logger := hclog.Default()
+ cookieDir := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ jar, err := NewCookieJar(cookieDir, 5*time.Second)
+ assert.NilError(t, err, "NewCookieJar")
+
+ watcher, err := GetPlatformSpecificBackend(logger)
+ assert.NilError(t, err, "NewWatcher")
+ fw := New(logger, repoRoot, watcher)
+ err = fw.Start()
+ assert.NilError(t, err, "Start")
+ fw.AddClient(jar)
+ err = fw.AddRoot(cookieDir)
+ assert.NilError(t, err, "Add")
+
+ err = fw.Close()
+ assert.NilError(t, err, "Close")
+ err = jar.WaitForCookie()
+ assert.ErrorIs(t, err, ErrCookieWatchingClosed)
+}
+
+func TestWaitForCookieTimeout(t *testing.T) {
+ logger := hclog.Default()
+ cookieDir := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ jar, err := NewCookieJar(cookieDir, 10*time.Millisecond)
+ assert.NilError(t, err, "NewCookieJar")
+
+ watcher, err := GetPlatformSpecificBackend(logger)
+ assert.NilError(t, err, "NewWatcher")
+ fw := New(logger, repoRoot, watcher)
+ err = fw.Start()
+ assert.NilError(t, err, "Start")
+ fw.AddClient(jar)
+
+ // NOTE: don't call fw.Add here so that no file event gets delivered
+
+ err = jar.WaitForCookie()
+ assert.ErrorIs(t, err, ErrCookieTimeout)
+}
+
+func TestWaitForCookieWithError(t *testing.T) {
+ logger := hclog.Default()
+ cookieDir := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ jar, err := NewCookieJar(cookieDir, 10*time.Second)
+ assert.NilError(t, err, "NewCookieJar")
+
+ watcher, err := GetPlatformSpecificBackend(logger)
+ assert.NilError(t, err, "NewWatcher")
+ fw := New(logger, repoRoot, watcher)
+ err = fw.Start()
+ assert.NilError(t, err, "Start")
+ fw.AddClient(jar)
+
+ // NOTE: don't call fw.Add here so that no file event gets delivered
+ myErr := errors.New("an error")
+ ch := make(chan error)
+ go func() {
+ if err := jar.WaitForCookie(); err != nil {
+ ch <- err
+ }
+ close(ch)
+ }()
+ // wait for the cookie to be registered in the jar
+ for {
+ found := false
+ jar.mu.Lock()
+ if len(jar.cookies) == 1 {
+ found = true
+ }
+ jar.mu.Unlock()
+ if found {
+ break
+ }
+ <-time.After(10 * time.Millisecond)
+ }
+ jar.OnFileWatchError(myErr)
+
+ err, ok := <-ch
+ if !ok {
+ t.Error("expected to get an error from cookie watching")
+ }
+ assert.ErrorIs(t, err, myErr)
+
+ // ensure waiting for a new cookie still works.
+ // Add the filewatch to allow cookies work normally
+ err = fw.AddRoot(cookieDir)
+ assert.NilError(t, err, "Add")
+
+ err = jar.WaitForCookie()
+ assert.NilError(t, err, "WaitForCookie")
+}
diff --git a/cli/internal/filewatcher/filewatcher.go b/cli/internal/filewatcher/filewatcher.go
new file mode 100644
index 0000000..4f79495
--- /dev/null
+++ b/cli/internal/filewatcher/filewatcher.go
@@ -0,0 +1,167 @@
+// Package filewatcher is used to handle watching for file changes inside the monorepo
+package filewatcher
+
+import (
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// _ignores is the set of paths we exempt from file-watching
+var _ignores = []string{".git", "node_modules"}
+
+// FileWatchClient defines the callbacks used by the file watching loop.
+// All methods are called from the same goroutine so they:
+// 1) do not need synchronization
+// 2) should minimize the work they are doing when called, if possible
+type FileWatchClient interface {
+ OnFileWatchEvent(ev Event)
+ OnFileWatchError(err error)
+ OnFileWatchClosed()
+}
+
+// FileEvent is an enum covering the kinds of things that can happen
+// to files that we might be interested in
+type FileEvent int
+
+const (
+ // FileAdded - this is a new file
+ FileAdded FileEvent = iota + 1
+ // FileDeleted - this file has been removed
+ FileDeleted
+ // FileModified - this file has been changed in some way
+ FileModified
+ // FileRenamed - a file's name has changed
+ FileRenamed
+ // FileOther - some other backend-specific event has happened
+ FileOther
+)
+
+var (
+ // ErrFilewatchingClosed is returned when filewatching has been closed
+ ErrFilewatchingClosed = errors.New("Close() has already been called for filewatching")
+ // ErrFailedToStart is returned when filewatching fails to start up
+ ErrFailedToStart = errors.New("filewatching failed to start")
+)
+
+// Event is the backend-independent information about a file change
+type Event struct {
+ Path turbopath.AbsoluteSystemPath
+ EventType FileEvent
+}
+
+// Backend is the interface that describes what an underlying filesystem watching backend
+// must provide.
+type Backend interface {
+ AddRoot(root turbopath.AbsoluteSystemPath, excludePatterns ...string) error
+ Events() <-chan Event
+ Errors() <-chan error
+ Close() error
+ Start() error
+}
+
+// FileWatcher handles watching all of the files in the monorepo.
+// We currently ignore .git and top-level node_modules. We can revisit
+// if necessary.
+type FileWatcher struct {
+ backend Backend
+
+ logger hclog.Logger
+ repoRoot turbopath.AbsoluteSystemPath
+ excludePattern string
+
+ clientsMu sync.RWMutex
+ clients []FileWatchClient
+ closed bool
+}
+
+// New returns a new FileWatcher instance
+func New(logger hclog.Logger, repoRoot turbopath.AbsoluteSystemPath, backend Backend) *FileWatcher {
+ excludes := make([]string, len(_ignores))
+ for i, ignore := range _ignores {
+ excludes[i] = filepath.ToSlash(repoRoot.UntypedJoin(ignore).ToString() + "/**")
+ }
+ excludePattern := "{" + strings.Join(excludes, ",") + "}"
+ return &FileWatcher{
+ backend: backend,
+ logger: logger,
+ repoRoot: repoRoot,
+ excludePattern: excludePattern,
+ }
+}
+
+// Close shuts down filewatching
+func (fw *FileWatcher) Close() error {
+ return fw.backend.Close()
+}
+
+// Start recursively adds all directories from the repo root, redacts the excluded ones,
+// then fires off a goroutine to respond to filesystem events
+func (fw *FileWatcher) Start() error {
+ if err := fw.backend.AddRoot(fw.repoRoot, fw.excludePattern); err != nil {
+ return err
+ }
+ if err := fw.backend.Start(); err != nil {
+ return err
+ }
+ go fw.watch()
+ return nil
+}
+
+// AddRoot registers the root a filesystem hierarchy to be watched for changes. Events are *not*
+// fired for existing files when AddRoot is called, only for subsequent changes.
+// NOTE: if it appears helpful, we could change this behavior so that we provide a stream of initial
+// events.
+func (fw *FileWatcher) AddRoot(root turbopath.AbsoluteSystemPath, excludePatterns ...string) error {
+ return fw.backend.AddRoot(root, excludePatterns...)
+}
+
+// watch is the main file-watching loop. Watching is not recursive,
+// so when new directories are added, they are manually recursively watched.
+func (fw *FileWatcher) watch() {
+outer:
+ for {
+ select {
+ case ev, ok := <-fw.backend.Events():
+ if !ok {
+ fw.logger.Info("Events channel closed. Exiting watch loop")
+ break outer
+ }
+ fw.clientsMu.RLock()
+ for _, client := range fw.clients {
+ client.OnFileWatchEvent(ev)
+ }
+ fw.clientsMu.RUnlock()
+ case err, ok := <-fw.backend.Errors():
+ if !ok {
+ fw.logger.Info("Errors channel closed. Exiting watch loop")
+ break outer
+ }
+ fw.clientsMu.RLock()
+ for _, client := range fw.clients {
+ client.OnFileWatchError(err)
+ }
+ fw.clientsMu.RUnlock()
+ }
+ }
+ fw.clientsMu.Lock()
+ fw.closed = true
+ for _, client := range fw.clients {
+ client.OnFileWatchClosed()
+ }
+ fw.clientsMu.Unlock()
+}
+
+// AddClient registers a client for filesystem events
+func (fw *FileWatcher) AddClient(client FileWatchClient) {
+ fw.clientsMu.Lock()
+ defer fw.clientsMu.Unlock()
+ fw.clients = append(fw.clients, client)
+ if fw.closed {
+ client.OnFileWatchClosed()
+ }
+}
diff --git a/cli/internal/filewatcher/filewatcher_test.go b/cli/internal/filewatcher/filewatcher_test.go
new file mode 100644
index 0000000..72b48ba
--- /dev/null
+++ b/cli/internal/filewatcher/filewatcher_test.go
@@ -0,0 +1,152 @@
+package filewatcher
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+type testClient struct {
+ mu sync.Mutex
+ createEvents []Event
+ notify chan Event
+}
+
+func (c *testClient) OnFileWatchEvent(ev Event) {
+ if ev.EventType == FileAdded {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.createEvents = append(c.createEvents, ev)
+ c.notify <- ev
+ }
+}
+
+func (c *testClient) OnFileWatchError(err error) {}
+
+func (c *testClient) OnFileWatchClosed() {}
+
+func expectFilesystemEvent(t *testing.T, ch <-chan Event, expected Event) {
+ // mark this method as a helper
+ t.Helper()
+ timeout := time.After(1 * time.Second)
+ for {
+ select {
+ case ev := <-ch:
+ t.Logf("got event %v", ev)
+ if ev.Path == expected.Path && ev.EventType == expected.EventType {
+ return
+ }
+ case <-timeout:
+ t.Errorf("Timed out waiting for filesystem event at %v", expected.Path)
+ return
+ }
+ }
+}
+
+func expectNoFilesystemEvent(t *testing.T, ch <-chan Event) {
+ // mark this method as a helper
+ t.Helper()
+ select {
+ case ev, ok := <-ch:
+ if ok {
+ t.Errorf("got unexpected filesystem event %v", ev)
+ } else {
+ t.Error("filewatching closed unexpectedly")
+ }
+ case <-time.After(500 * time.Millisecond):
+ return
+ }
+}
+
+func expectWatching(t *testing.T, c *testClient, dirs []turbopath.AbsoluteSystemPath) {
+ t.Helper()
+ now := time.Now()
+ filename := fmt.Sprintf("test-%v", now.UnixMilli())
+ for _, dir := range dirs {
+ file := dir.UntypedJoin(filename)
+ err := file.WriteFile([]byte("hello"), 0755)
+ assert.NilError(t, err, "WriteFile")
+ expectFilesystemEvent(t, c.notify, Event{
+ Path: file,
+ EventType: FileAdded,
+ })
+ }
+}
+
+func TestFileWatching(t *testing.T) {
+ logger := hclog.Default()
+ logger.SetLevel(hclog.Debug)
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ err := repoRoot.UntypedJoin(".git").MkdirAll(0775)
+ assert.NilError(t, err, "MkdirAll")
+ err = repoRoot.UntypedJoin("node_modules", "some-dep").MkdirAll(0775)
+ assert.NilError(t, err, "MkdirAll")
+ err = repoRoot.UntypedJoin("parent", "child").MkdirAll(0775)
+ assert.NilError(t, err, "MkdirAll")
+ err = repoRoot.UntypedJoin("parent", "sibling").MkdirAll(0775)
+ assert.NilError(t, err, "MkdirAll")
+
+ // Directory layout:
+ // <repoRoot>/
+ // .git/
+ // node_modules/
+ // some-dep/
+ // parent/
+ // child/
+ // sibling/
+
+ watcher, err := GetPlatformSpecificBackend(logger)
+ assert.NilError(t, err, "GetPlatformSpecificBackend")
+ fw := New(logger, repoRoot, watcher)
+ err = fw.Start()
+ assert.NilError(t, err, "fw.Start")
+
+ // Add a client
+ ch := make(chan Event, 1)
+ c := &testClient{
+ notify: ch,
+ }
+ fw.AddClient(c)
+ expectedWatching := []turbopath.AbsoluteSystemPath{
+ repoRoot,
+ repoRoot.UntypedJoin("parent"),
+ repoRoot.UntypedJoin("parent", "child"),
+ repoRoot.UntypedJoin("parent", "sibling"),
+ }
+ expectWatching(t, c, expectedWatching)
+
+ fooPath := repoRoot.UntypedJoin("parent", "child", "foo")
+ err = fooPath.WriteFile([]byte("hello"), 0644)
+ assert.NilError(t, err, "WriteFile")
+ expectFilesystemEvent(t, ch, Event{
+ EventType: FileAdded,
+ Path: fooPath,
+ })
+
+ deepPath := repoRoot.UntypedJoin("parent", "sibling", "deep", "path")
+ err = deepPath.MkdirAll(0775)
+ assert.NilError(t, err, "MkdirAll")
+ // We'll catch an event for "deep", but not "deep/path" since
+ // we don't have a recursive watch
+ expectFilesystemEvent(t, ch, Event{
+ Path: repoRoot.UntypedJoin("parent", "sibling", "deep"),
+ EventType: FileAdded,
+ })
+ expectFilesystemEvent(t, ch, Event{
+ Path: repoRoot.UntypedJoin("parent", "sibling", "deep", "path"),
+ EventType: FileAdded,
+ })
+ expectedWatching = append(expectedWatching, deepPath, repoRoot.UntypedJoin("parent", "sibling", "deep"))
+ expectWatching(t, c, expectedWatching)
+
+ gitFilePath := repoRoot.UntypedJoin(".git", "git-file")
+ err = gitFilePath.WriteFile([]byte("nope"), 0644)
+ assert.NilError(t, err, "WriteFile")
+ expectNoFilesystemEvent(t, ch)
+}
diff --git a/cli/internal/fs/copy_file.go b/cli/internal/fs/copy_file.go
new file mode 100644
index 0000000..e7619de
--- /dev/null
+++ b/cli/internal/fs/copy_file.go
@@ -0,0 +1,81 @@
+// Adapted from https://github.com/thought-machine/please
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package fs
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+
+ "github.com/karrick/godirwalk"
+)
+
+// RecursiveCopy copies either a single file or a directory.
+// 'mode' is the mode of the destination file.
+func RecursiveCopy(from string, to string) error {
+ // Verified all callers are passing in absolute paths for from (and to)
+ statedFrom := LstatCachedFile{Path: UnsafeToAbsoluteSystemPath(from)}
+ fromType, err := statedFrom.GetType()
+ if err != nil {
+ return err
+ }
+
+ if fromType.IsDir() {
+ return WalkMode(statedFrom.Path.ToStringDuringMigration(), func(name string, isDir bool, fileType os.FileMode) error {
+ dest := filepath.Join(to, name[len(statedFrom.Path.ToString()):])
+ // name is absolute, (originates from godirwalk)
+ src := LstatCachedFile{Path: UnsafeToAbsoluteSystemPath(name), fileType: &fileType}
+ if isDir {
+ mode, err := src.GetMode()
+ if err != nil {
+ return err
+ }
+ return os.MkdirAll(dest, mode)
+ }
+ return CopyFile(&src, dest)
+ })
+ }
+ return CopyFile(&statedFrom, to)
+}
+
+// Walk implements an equivalent to filepath.Walk.
+// It's implemented over github.com/karrick/godirwalk but the provided interface doesn't use that
+// to make it a little easier to handle.
+func Walk(rootPath string, callback func(name string, isDir bool) error) error {
+ return WalkMode(rootPath, func(name string, isDir bool, mode os.FileMode) error {
+ return callback(name, isDir)
+ })
+}
+
+// WalkMode is like Walk but the callback receives an additional type specifying the file mode type.
+// N.B. This only includes the bits of the mode that determine the mode type, not the permissions.
+func WalkMode(rootPath string, callback func(name string, isDir bool, mode os.FileMode) error) error {
+ return godirwalk.Walk(rootPath, &godirwalk.Options{
+ Callback: func(name string, info *godirwalk.Dirent) error {
+ // currently we support symlinked files, but not symlinked directories:
+ // For copying, we Mkdir and bail if we encounter a symlink to a directoy
+ // For finding packages, we enumerate the symlink, but don't follow inside
+ isDir, err := info.IsDirOrSymlinkToDir()
+ if err != nil {
+ pathErr := &os.PathError{}
+ if errors.As(err, &pathErr) {
+ // If we have a broken link, skip this entry
+ return godirwalk.SkipThis
+ }
+ return err
+ }
+ return callback(name, isDir, info.ModeType())
+ },
+ ErrorCallback: func(pathname string, err error) godirwalk.ErrorAction {
+ pathErr := &os.PathError{}
+ if errors.As(err, &pathErr) {
+ return godirwalk.SkipNode
+ }
+ return godirwalk.Halt
+ },
+ Unsorted: true,
+ AllowNonDirectory: true,
+ FollowSymbolicLinks: false,
+ })
+}
diff --git a/cli/internal/fs/copy_file_test.go b/cli/internal/fs/copy_file_test.go
new file mode 100644
index 0000000..6a61576
--- /dev/null
+++ b/cli/internal/fs/copy_file_test.go
@@ -0,0 +1,198 @@
+package fs
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/fs"
+)
+
+func TestCopyFile(t *testing.T) {
+ srcTmpDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ destTmpDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ srcFilePath := srcTmpDir.UntypedJoin("src")
+ destFilePath := destTmpDir.UntypedJoin("dest")
+ from := &LstatCachedFile{Path: srcFilePath}
+
+ // The src file doesn't exist, will error.
+ err := CopyFile(from, destFilePath.ToString())
+ pathErr := &os.PathError{}
+ if !errors.As(err, &pathErr) {
+ t.Errorf("got %v, want PathError", err)
+ }
+
+ // Create the src file.
+ srcFile, err := srcFilePath.Create()
+ assert.NilError(t, err, "Create")
+ _, err = srcFile.WriteString("src")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, srcFile.Close(), "Close")
+
+ // Copy the src to the dest.
+ err = CopyFile(from, destFilePath.ToString())
+ assert.NilError(t, err, "src exists dest does not, should not error.")
+
+ // Now test for symlinks.
+ symlinkSrcDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ symlinkTargetDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ symlinkDestDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ symlinkSrcPath := symlinkSrcDir.UntypedJoin("symlink")
+ symlinkTargetPath := symlinkTargetDir.UntypedJoin("target")
+ symlinkDestPath := symlinkDestDir.UntypedJoin("dest")
+ fromSymlink := &LstatCachedFile{Path: symlinkSrcPath}
+
+ // Create the symlink target.
+ symlinkTargetFile, err := symlinkTargetPath.Create()
+ assert.NilError(t, err, "Create")
+ _, err = symlinkTargetFile.WriteString("Target")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, symlinkTargetFile.Close(), "Close")
+
+ // Link things up.
+ err = symlinkSrcPath.Symlink(symlinkTargetPath.ToString())
+ assert.NilError(t, err, "Symlink")
+
+ // Run the test.
+ err = CopyFile(fromSymlink, symlinkDestPath.ToString())
+ assert.NilError(t, err, "Copying a valid symlink does not error.")
+
+ // Break the symlink.
+ err = symlinkTargetPath.Remove()
+ assert.NilError(t, err, "breaking the symlink")
+
+ // Remove the existing copy.
+ err = symlinkDestPath.Remove()
+ assert.NilError(t, err, "existing copy is removed")
+
+ // Try copying the now-broken symlink.
+ err = CopyFile(fromSymlink, symlinkDestPath.ToString())
+ assert.NilError(t, err, "CopyFile")
+
+ // Confirm that it copied
+ target, err := symlinkDestPath.Readlink()
+ assert.NilError(t, err, "Readlink")
+ assert.Equal(t, target, symlinkTargetPath.ToString())
+}
+
+func TestCopyOrLinkFileWithPerms(t *testing.T) {
+ // Directory layout:
+ //
+ // <src>/
+ // foo
+ readonlyMode := os.FileMode(0444)
+ srcDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ dstDir := turbopath.AbsoluteSystemPath(t.TempDir())
+ srcFilePath := srcDir.UntypedJoin("src")
+ dstFilePath := dstDir.UntypedJoin("dst")
+ srcFile, err := srcFilePath.Create()
+ defer func() { _ = srcFile.Close() }()
+ assert.NilError(t, err, "Create")
+ err = srcFile.Chmod(readonlyMode)
+ assert.NilError(t, err, "Chmod")
+ err = CopyFile(&LstatCachedFile{Path: srcFilePath}, dstFilePath.ToStringDuringMigration())
+ assert.NilError(t, err, "CopyOrLinkFile")
+ info, err := dstFilePath.Lstat()
+ assert.NilError(t, err, "Lstat")
+ assert.Equal(t, info.Mode(), readonlyMode, "expected dest to have matching permissions")
+}
+
+func TestRecursiveCopy(t *testing.T) {
+ // Directory layout:
+ //
+ // <src>/
+ // b
+ // child/
+ // a
+ // link -> ../b
+ // broken -> missing
+ // circle -> ../child
+ src := fs.NewDir(t, "recursive-copy-or-link")
+ dst := fs.NewDir(t, "recursive-copy-or-link-dist")
+ childDir := filepath.Join(src.Path(), "child")
+ err := os.Mkdir(childDir, os.ModeDir|0777)
+ assert.NilError(t, err, "Mkdir")
+ aPath := filepath.Join(childDir, "a")
+ aFile, err := os.Create(aPath)
+ assert.NilError(t, err, "Create")
+ _, err = aFile.WriteString("hello")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, aFile.Close(), "Close")
+
+ bPath := filepath.Join(src.Path(), "b")
+ bFile, err := os.Create(bPath)
+ assert.NilError(t, err, "Create")
+ _, err = bFile.WriteString("bFile")
+ assert.NilError(t, err, "WriteString")
+ assert.NilError(t, bFile.Close(), "Close")
+
+ srcLinkPath := filepath.Join(childDir, "link")
+ assert.NilError(t, os.Symlink(filepath.FromSlash("../b"), srcLinkPath), "Symlink")
+
+ srcBrokenLinkPath := filepath.Join(childDir, "broken")
+ assert.NilError(t, os.Symlink("missing", srcBrokenLinkPath), "Symlink")
+ circlePath := filepath.Join(childDir, "circle")
+ assert.NilError(t, os.Symlink(filepath.FromSlash("../child"), circlePath), "Symlink")
+
+ err = RecursiveCopy(src.Path(), dst.Path())
+ assert.NilError(t, err, "RecursiveCopy")
+ // For ensure multiple times copy will not broken
+ err = RecursiveCopy(src.Path(), dst.Path())
+ assert.NilError(t, err, "RecursiveCopy")
+
+ dstChildDir := filepath.Join(dst.Path(), "child")
+ assertDirMatches(t, childDir, dstChildDir)
+ dstAPath := filepath.Join(dst.Path(), "child", "a")
+ assertFileMatches(t, aPath, dstAPath)
+ dstBPath := filepath.Join(dst.Path(), "b")
+ assertFileMatches(t, bPath, dstBPath)
+ dstLinkPath := filepath.Join(dst.Path(), "child", "link")
+ dstLinkDest, err := os.Readlink(dstLinkPath)
+ assert.NilError(t, err, "Readlink")
+ expectedLinkDest := filepath.FromSlash("../b")
+ if dstLinkDest != expectedLinkDest {
+ t.Errorf("Readlink got %v, want %v", dstLinkDest, expectedLinkDest)
+ }
+ dstBrokenLinkPath := filepath.Join(dst.Path(), "child", "broken")
+ brokenLinkExists := PathExists(dstBrokenLinkPath)
+ if brokenLinkExists {
+ t.Errorf("We cached a broken link at %v", dstBrokenLinkPath)
+ }
+ // Currently, we convert symlink-to-directory to empty-directory
+ // This is very likely not ideal behavior, but leaving this test here to verify
+ // that it is what we expect at this point in time.
+ dstCirclePath := filepath.Join(dst.Path(), "child", "circle")
+ circleStat, err := os.Lstat(dstCirclePath)
+ assert.NilError(t, err, "Lstat")
+ assert.Equal(t, circleStat.IsDir(), true)
+ entries, err := os.ReadDir(dstCirclePath)
+ assert.NilError(t, err, "ReadDir")
+ assert.Equal(t, len(entries), 0)
+}
+
+func assertFileMatches(t *testing.T, orig string, copy string) {
+ t.Helper()
+ origBytes, err := ioutil.ReadFile(orig)
+ assert.NilError(t, err, "ReadFile")
+ copyBytes, err := ioutil.ReadFile(copy)
+ assert.NilError(t, err, "ReadFile")
+ assert.DeepEqual(t, origBytes, copyBytes)
+ origStat, err := os.Lstat(orig)
+ assert.NilError(t, err, "Lstat")
+ copyStat, err := os.Lstat(copy)
+ assert.NilError(t, err, "Lstat")
+ assert.Equal(t, origStat.Mode(), copyStat.Mode())
+}
+
+func assertDirMatches(t *testing.T, orig string, copy string) {
+ t.Helper()
+ origStat, err := os.Lstat(orig)
+ assert.NilError(t, err, "Lstat")
+ copyStat, err := os.Lstat(copy)
+ assert.NilError(t, err, "Lstat")
+ assert.Equal(t, origStat.Mode(), copyStat.Mode())
+}
diff --git a/cli/internal/fs/fs.go b/cli/internal/fs/fs.go
new file mode 100644
index 0000000..77804c0
--- /dev/null
+++ b/cli/internal/fs/fs.go
@@ -0,0 +1,191 @@
+package fs
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// https://github.com/thought-machine/please/blob/master/src/fs/fs.go
+
+// DirPermissions are the default permission bits we apply to directories.
+const DirPermissions = os.ModeDir | 0775
+
+// EnsureDir ensures that the directory of the given file has been created.
+func EnsureDir(filename string) error {
+ dir := filepath.Dir(filename)
+ err := os.MkdirAll(dir, DirPermissions)
+ if err != nil && FileExists(dir) {
+ // It looks like this is a file and not a directory. Attempt to remove it; this can
+ // happen in some cases if you change a rule from outputting a file to a directory.
+ log.Printf("Attempting to remove file %s; a subdirectory is required", dir)
+ if err2 := os.Remove(dir); err2 == nil {
+ err = os.MkdirAll(dir, DirPermissions)
+ } else {
+ return err
+ }
+ }
+ return err
+}
+
+var nonRelativeSentinel string = ".." + string(filepath.Separator)
+
+// DirContainsPath returns true if the path 'target' is contained within 'dir'
+// Expects both paths to be absolute and does not verify that either path exists.
+func DirContainsPath(dir string, target string) (bool, error) {
+ // On windows, trying to get a relative path between files on different volumes
+ // is an error. We don't care about the error, it's good enough for us to say
+ // that one path doesn't contain the other if they're on different volumes.
+ if runtime.GOOS == "windows" && filepath.VolumeName(dir) != filepath.VolumeName(target) {
+ return false, nil
+ }
+ // In Go, filepath.Rel can return a path that starts with "../" or equivalent.
+ // Checking filesystem-level contains can get extremely complicated
+ // (see https://github.com/golang/dep/blob/f13583b555deaa6742f141a9c1185af947720d60/internal/fs/fs.go#L33)
+ // As a compromise, rely on the stdlib to generate a relative path and then check
+ // if the first step is "../".
+ rel, err := filepath.Rel(dir, target)
+ if err != nil {
+ return false, err
+ }
+ return !strings.HasPrefix(rel, nonRelativeSentinel), nil
+}
+
+// PathExists returns true if the given path exists, as a file or a directory.
+func PathExists(filename string) bool {
+ _, err := os.Lstat(filename)
+ return err == nil
+}
+
+// FileExists returns true if the given path exists and is a file.
+func FileExists(filename string) bool {
+ info, err := os.Lstat(filename)
+ return err == nil && !info.IsDir()
+}
+
+// CopyFile copies a file from 'from' to 'to', with an attempt to perform a copy & rename
+// to avoid chaos if anything goes wrong partway.
+func CopyFile(from *LstatCachedFile, to string) error {
+ fromMode, err := from.GetMode()
+ if err != nil {
+ return errors.Wrapf(err, "getting mode for %v", from.Path)
+ }
+ if fromMode&os.ModeSymlink != 0 {
+ target, err := from.Path.Readlink()
+ if err != nil {
+ return errors.Wrapf(err, "reading link target for %v", from.Path)
+ }
+ if err := EnsureDir(to); err != nil {
+ return err
+ }
+ if _, err := os.Lstat(to); err == nil {
+ // target link file exist, should remove it first
+ err := os.Remove(to)
+ if err != nil {
+ return err
+ }
+ }
+ return os.Symlink(target, to)
+ }
+ fromFile, err := from.Path.Open()
+ if err != nil {
+ return err
+ }
+ defer util.CloseAndIgnoreError(fromFile)
+ return writeFileFromStream(fromFile, to, fromMode)
+}
+
+// writeFileFromStream writes data from a reader to the file named 'to', with an attempt to perform
+// a copy & rename to avoid chaos if anything goes wrong partway.
+func writeFileFromStream(fromFile io.Reader, to string, mode os.FileMode) error {
+ dir, file := filepath.Split(to)
+ if dir != "" {
+ if err := os.MkdirAll(dir, DirPermissions); err != nil {
+ return err
+ }
+ }
+ tempFile, err := ioutil.TempFile(dir, file)
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(tempFile, fromFile); err != nil {
+ return err
+ }
+ if err := tempFile.Close(); err != nil {
+ return err
+ }
+ // OK, now file is written; adjust permissions appropriately.
+ if mode == 0 {
+ mode = 0664
+ }
+ if err := os.Chmod(tempFile.Name(), mode); err != nil {
+ return err
+ }
+ // And move it to its final destination.
+ return renameFile(tempFile.Name(), to)
+}
+
+// IsDirectory checks if a given path is a directory
+func IsDirectory(path string) bool {
+ info, err := os.Stat(path)
+ return err == nil && info.IsDir()
+}
+
+// Try to gracefully rename the file as the os.Rename does not work across
+// filesystems and on most Linux systems /tmp is mounted as tmpfs
+func renameFile(from, to string) (err error) {
+ err = os.Rename(from, to)
+ if err == nil {
+ return nil
+ }
+ err = copyFile(from, to)
+ if err != nil {
+ return err
+ }
+ err = os.RemoveAll(from)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func copyFile(from, to string) (err error) {
+ in, err := os.Open(from)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(to)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if e := out.Close(); e != nil {
+ err = e
+ }
+ }()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return err
+ }
+
+ si, err := os.Stat(from)
+ if err != nil {
+ return err
+ }
+ err = os.Chmod(to, si.Mode())
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/cli/internal/fs/fs_test.go b/cli/internal/fs/fs_test.go
new file mode 100644
index 0000000..0598d43
--- /dev/null
+++ b/cli/internal/fs/fs_test.go
@@ -0,0 +1,60 @@
+package fs
+
+import (
+ "path/filepath"
+ "testing"
+)
+
+func Test_DirContainsPath(t *testing.T) {
+ parent, err := filepath.Abs(filepath.Join("some", "path"))
+ if err != nil {
+ t.Fatalf("failed to construct parent path %v", err)
+ }
+ testcases := []struct {
+ target []string
+ want bool
+ }{
+ {
+ []string{"..", "elsewhere"},
+ false,
+ },
+ {
+ []string{"sibling"},
+ false,
+ },
+ {
+ // The same path as parent
+ []string{"some", "path"},
+ true,
+ },
+ {
+ []string{"some", "path", "..", "path", "inside", "parent"},
+ true,
+ },
+ {
+ []string{"some", "path", "inside", "..", "inside", "parent"},
+ true,
+ },
+ {
+ []string{"some", "path", "inside", "..", "..", "outside", "parent"},
+ false,
+ },
+ {
+ []string{"some", "pathprefix"},
+ false,
+ },
+ }
+ for _, tc := range testcases {
+ target, err := filepath.Abs(filepath.Join(tc.target...))
+ if err != nil {
+ t.Fatalf("failed to construct path for %v: %v", tc.target, err)
+ }
+ got, err := DirContainsPath(parent, target)
+ if err != nil {
+ t.Fatalf("failed to check ")
+ }
+ if got != tc.want {
+ t.Errorf("DirContainsPath(%v, %v) got %v, want %v", parent, target, got, tc.want)
+ }
+ }
+}
diff --git a/cli/internal/fs/fs_windows_test.go b/cli/internal/fs/fs_windows_test.go
new file mode 100644
index 0000000..4e71e2c
--- /dev/null
+++ b/cli/internal/fs/fs_windows_test.go
@@ -0,0 +1,18 @@
+//go:build windows
+// +build windows
+
+package fs
+
+import "testing"
+
+func TestDifferentVolumes(t *testing.T) {
+ p1 := "C:\\some\\path"
+ p2 := "D:\\other\\path"
+ contains, err := DirContainsPath(p1, p2)
+ if err != nil {
+ t.Errorf("DirContainsPath got error %v, want <nil>", err)
+ }
+ if contains {
+ t.Errorf("DirContainsPath got true, want false")
+ }
+}
diff --git a/cli/internal/fs/get_turbo_data_dir_go.go b/cli/internal/fs/get_turbo_data_dir_go.go
new file mode 100644
index 0000000..2cf459a
--- /dev/null
+++ b/cli/internal/fs/get_turbo_data_dir_go.go
@@ -0,0 +1,16 @@
+//go:build go || !rust
+// +build go !rust
+
+package fs
+
+import (
+ "github.com/adrg/xdg"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// GetTurboDataDir returns a directory outside of the repo
+// where turbo can store data files related to turbo.
+func GetTurboDataDir() turbopath.AbsoluteSystemPath {
+ dataHome := AbsoluteSystemPathFromUpstream(xdg.DataHome)
+ return dataHome.UntypedJoin("turborepo")
+}
diff --git a/cli/internal/fs/get_turbo_data_dir_rust.go b/cli/internal/fs/get_turbo_data_dir_rust.go
new file mode 100644
index 0000000..dbc80f3
--- /dev/null
+++ b/cli/internal/fs/get_turbo_data_dir_rust.go
@@ -0,0 +1,16 @@
+//go:build rust
+// +build rust
+
+package fs
+
+import (
+ "github.com/vercel/turbo/cli/internal/ffi"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// GetTurboDataDir returns a directory outside of the repo
+// where turbo can store data files related to turbo.
+func GetTurboDataDir() turbopath.AbsoluteSystemPath {
+ dir := ffi.GetTurboDataDir()
+ return turbopath.AbsoluteSystemPathFromUpstream(dir)
+}
diff --git a/cli/internal/fs/hash.go b/cli/internal/fs/hash.go
new file mode 100644
index 0000000..fed7d87
--- /dev/null
+++ b/cli/internal/fs/hash.go
@@ -0,0 +1,61 @@
+package fs
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+
+ "github.com/vercel/turbo/cli/internal/xxhash"
+)
+
+func HashObject(i interface{}) (string, error) {
+ hash := xxhash.New()
+
+ _, err := hash.Write([]byte(fmt.Sprintf("%v", i)))
+
+ return hex.EncodeToString(hash.Sum(nil)), err
+}
+
+func HashFile(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ hash := xxhash.New()
+ if _, err := io.Copy(hash, file); err != nil {
+ return "", err
+ }
+
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
+
+// GitLikeHashFile is a function that mimics how Git
+// calculates the SHA1 for a file (or, in Git terms, a "blob") (without git)
+func GitLikeHashFile(filePath string) (string, error) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ stat, err := file.Stat()
+ if err != nil {
+ return "", err
+ }
+ hash := sha1.New()
+ hash.Write([]byte("blob"))
+ hash.Write([]byte(" "))
+ hash.Write([]byte(strconv.FormatInt(stat.Size(), 10)))
+ hash.Write([]byte{0})
+
+ if _, err := io.Copy(hash, file); err != nil {
+ return "", err
+ }
+
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
diff --git a/cli/internal/fs/hash_test.go b/cli/internal/fs/hash_test.go
new file mode 100644
index 0000000..dd2fa84
--- /dev/null
+++ b/cli/internal/fs/hash_test.go
@@ -0,0 +1,53 @@
+package fs
+
+import (
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+const _numOfRuns = 20
+
+func Test_HashObjectStability(t *testing.T) {
+ type TestCase struct {
+ name string
+ obj interface{}
+ }
+ type complexStruct struct {
+ nested TaskOutputs
+ foo string
+ bar []string
+ }
+
+ testCases := []TestCase{
+ {
+ name: "task object",
+ obj: TaskOutputs{
+ Inclusions: []string{"foo", "bar"},
+ Exclusions: []string{"baz"},
+ },
+ },
+ {
+ name: "complex struct",
+ obj: complexStruct{
+ nested: TaskOutputs{
+ Exclusions: []string{"bar", "baz"},
+ Inclusions: []string{"foo"},
+ },
+ foo: "a",
+ bar: []string{"b", "c"},
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ expectedHash, err := HashObject(tc.obj)
+ assert.NilError(t, err, tc.name)
+
+ for n := 0; n < _numOfRuns; n++ {
+ hash, err := HashObject(tc.obj)
+ assert.NilError(t, err, tc.name)
+ assert.Equal(t, expectedHash, hash, tc.name)
+ }
+ }
+}
diff --git a/cli/internal/fs/lstat.go b/cli/internal/fs/lstat.go
new file mode 100644
index 0000000..eff0810
--- /dev/null
+++ b/cli/internal/fs/lstat.go
@@ -0,0 +1,74 @@
+package fs
+
+import (
+ "io/fs"
+ "os"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// LstatCachedFile maintains a cache of file info, mode and type for the given Path
+type LstatCachedFile struct {
+ Path turbopath.AbsoluteSystemPath
+ fileInfo fs.FileInfo
+ fileMode *fs.FileMode
+ fileType *fs.FileMode
+}
+
+// GetInfo returns, and caches the file info for the LstatCachedFile.Path
+func (file *LstatCachedFile) GetInfo() (fs.FileInfo, error) {
+ if file.fileInfo != nil {
+ return file.fileInfo, nil
+ }
+
+ err := file.lstat()
+ if err != nil {
+ return nil, err
+ }
+
+ return file.fileInfo, nil
+}
+
+// GetMode returns, and caches the file mode for the LstatCachedFile.Path
+func (file *LstatCachedFile) GetMode() (fs.FileMode, error) {
+ if file.fileMode != nil {
+ return *file.fileMode, nil
+ }
+
+ err := file.lstat()
+ if err != nil {
+ return 0, err
+ }
+
+ return *file.fileMode, nil
+}
+
+// GetType returns, and caches the type bits of (FileMode & os.ModeType) for the LstatCachedFile.Path
+func (file *LstatCachedFile) GetType() (fs.FileMode, error) {
+ if file.fileType != nil {
+ return *file.fileType, nil
+ }
+
+ err := file.lstat()
+ if err != nil {
+ return 0, err
+ }
+
+ return *file.fileType, nil
+}
+
+func (file *LstatCachedFile) lstat() error {
+ fileInfo, err := file.Path.Lstat()
+ if err != nil {
+ return err
+ }
+
+ fileMode := fileInfo.Mode()
+ fileModeType := fileMode & os.ModeType
+
+ file.fileInfo = fileInfo
+ file.fileMode = &fileMode
+ file.fileType = &fileModeType
+
+ return nil
+}
diff --git a/cli/internal/fs/package_json.go b/cli/internal/fs/package_json.go
new file mode 100644
index 0000000..883f7a4
--- /dev/null
+++ b/cli/internal/fs/package_json.go
@@ -0,0 +1,142 @@
+package fs
+
+import (
+ "bytes"
+ "encoding/json"
+ "sync"
+
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// PackageJSON represents NodeJS package.json
+type PackageJSON struct {
+ Name string `json:"name"`
+ Version string `json:"version"`
+ Scripts map[string]string `json:"scripts"`
+ Dependencies map[string]string `json:"dependencies"`
+ DevDependencies map[string]string `json:"devDependencies"`
+ OptionalDependencies map[string]string `json:"optionalDependencies"`
+ PeerDependencies map[string]string `json:"peerDependencies"`
+ PackageManager string `json:"packageManager"`
+ Os []string `json:"os"`
+ Workspaces Workspaces `json:"workspaces"`
+ Private bool `json:"private"`
+ // Exact JSON object stored in package.json including unknown fields
+ // During marshalling struct fields will take priority over raw fields
+ RawJSON map[string]interface{} `json:"-"`
+
+ // relative path from repo root to the package.json file
+ PackageJSONPath turbopath.AnchoredSystemPath `json:"-"`
+ // relative path from repo root to the package
+ Dir turbopath.AnchoredSystemPath `json:"-"`
+ InternalDeps []string `json:"-"`
+ UnresolvedExternalDeps map[string]string `json:"-"`
+ TransitiveDeps []lockfile.Package `json:"-"`
+ LegacyTurboConfig *TurboJSON `json:"turbo"`
+ Mu sync.Mutex `json:"-"`
+ ExternalDepsHash string `json:"-"`
+}
+
+type Workspaces []string
+
+type WorkspacesAlt struct {
+ Packages []string `json:"packages,omitempty"`
+}
+
+func (r *Workspaces) UnmarshalJSON(data []byte) error {
+ var tmp = &WorkspacesAlt{}
+ if err := json.Unmarshal(data, tmp); err == nil {
+ *r = Workspaces(tmp.Packages)
+ return nil
+ }
+ var tempstr = []string{}
+ if err := json.Unmarshal(data, &tempstr); err != nil {
+ return err
+ }
+ *r = tempstr
+ return nil
+}
+
+// ReadPackageJSON returns a struct of package.json
+func ReadPackageJSON(path turbopath.AbsoluteSystemPath) (*PackageJSON, error) {
+ b, err := path.ReadFile()
+ if err != nil {
+ return nil, err
+ }
+ return UnmarshalPackageJSON(b)
+}
+
+// UnmarshalPackageJSON decodes a byte slice into a PackageJSON struct
+func UnmarshalPackageJSON(data []byte) (*PackageJSON, error) {
+ var rawJSON map[string]interface{}
+ if err := json.Unmarshal(data, &rawJSON); err != nil {
+ return nil, err
+ }
+
+ pkgJSON := &PackageJSON{}
+ if err := json.Unmarshal(data, &pkgJSON); err != nil {
+ return nil, err
+ }
+ pkgJSON.RawJSON = rawJSON
+
+ return pkgJSON, nil
+}
+
+// MarshalPackageJSON Serialize PackageJSON to a slice of bytes
+func MarshalPackageJSON(pkgJSON *PackageJSON) ([]byte, error) {
+ structuredContent, err := json.Marshal(pkgJSON)
+ if err != nil {
+ return nil, err
+ }
+ var structuredFields map[string]interface{}
+ if err := json.Unmarshal(structuredContent, &structuredFields); err != nil {
+ return nil, err
+ }
+
+ fieldsToSerialize := make(map[string]interface{}, len(pkgJSON.RawJSON))
+
+ // copy pkgJSON.RawJSON
+ for key, value := range pkgJSON.RawJSON {
+ fieldsToSerialize[key] = value
+ }
+
+ for key, value := range structuredFields {
+ if isEmpty(value) {
+ delete(fieldsToSerialize, key)
+ } else {
+ fieldsToSerialize[key] = value
+ }
+ }
+
+ var b bytes.Buffer
+ encoder := json.NewEncoder(&b)
+ encoder.SetEscapeHTML(false)
+ encoder.SetIndent("", " ")
+ if err := encoder.Encode(fieldsToSerialize); err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
+
+func isEmpty(value interface{}) bool {
+ if value == nil {
+ return true
+ }
+ switch s := value.(type) {
+ case string:
+ return s == ""
+ case bool:
+ return !s
+ case []string:
+ return len(s) == 0
+ case map[string]interface{}:
+ return len(s) == 0
+ case Workspaces:
+ return len(s) == 0
+ default:
+ // Assume any unknown types aren't empty
+ return false
+ }
+}
diff --git a/cli/internal/fs/package_json_test.go b/cli/internal/fs/package_json_test.go
new file mode 100644
index 0000000..3c16620
--- /dev/null
+++ b/cli/internal/fs/package_json_test.go
@@ -0,0 +1,174 @@
+package fs
+
+import (
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func Test_UnmarshalPackageJSON(t *testing.T) {
+ type Case struct {
+ name string
+ json string
+ expectedFields *PackageJSON
+ }
+
+ testCases := []Case{
+ {
+ name: "basic types are in raw and processed",
+ json: `{"name":"foo","version":"1.2.3"}`,
+ expectedFields: &PackageJSON{
+ Name: "foo",
+ Version: "1.2.3",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "version": "1.2.3",
+ },
+ },
+ },
+ {
+ name: "map types get copied",
+ json: `{"dependencies":{"foo":"1.2.3"},"devDependencies":{"bar": "^1.0.0"}}`,
+ expectedFields: &PackageJSON{
+ Dependencies: map[string]string{"foo": "1.2.3"},
+ DevDependencies: map[string]string{"bar": "^1.0.0"},
+ RawJSON: map[string]interface{}{
+ "dependencies": map[string]interface{}{"foo": "1.2.3"},
+ "devDependencies": map[string]interface{}{"bar": "^1.0.0"},
+ },
+ },
+ },
+ {
+ name: "array types get copied",
+ json: `{"os":["linux", "windows"]}`,
+ expectedFields: &PackageJSON{
+ Os: []string{"linux", "windows"},
+ RawJSON: map[string]interface{}{
+ "os": []interface{}{"linux", "windows"},
+ },
+ },
+ },
+ }
+
+ for _, testCase := range testCases {
+ actual, err := UnmarshalPackageJSON([]byte(testCase.json))
+ assert.NilError(t, err, testCase.name)
+ assertPackageJSONEqual(t, actual, testCase.expectedFields)
+ }
+}
+
+func Test_MarshalPackageJSON(t *testing.T) {
+ type TestCase struct {
+ name string
+ input *PackageJSON
+ expected *PackageJSON
+ }
+
+ testCases := []TestCase{
+ {
+ name: "roundtrip should have no effect",
+ input: &PackageJSON{
+ Name: "foo",
+ Version: "1.2.3",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "version": "1.2.3",
+ },
+ },
+ expected: &PackageJSON{
+ Name: "foo",
+ Version: "1.2.3",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "version": "1.2.3",
+ },
+ },
+ },
+ {
+ name: "structured fields should take priority over raw values",
+ input: &PackageJSON{
+ Name: "foo",
+ Version: "2.3.4",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "version": "1.2.3",
+ },
+ },
+ expected: &PackageJSON{
+ Name: "foo",
+ Version: "2.3.4",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "version": "2.3.4",
+ },
+ },
+ },
+ {
+ name: "empty structured fields don't get serialized",
+ input: &PackageJSON{
+ Name: "foo",
+ Version: "",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "version": "1.2.3",
+ },
+ },
+ expected: &PackageJSON{
+ Name: "foo",
+ Version: "",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ },
+ },
+ },
+ {
+ name: "unstructured fields survive the round trip",
+ input: &PackageJSON{
+ Name: "foo",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "special-field": "special-value",
+ "special-config": map[string]interface{}{
+ "flag": true,
+ "value": "toggled",
+ },
+ },
+ },
+ expected: &PackageJSON{
+ Name: "foo",
+ RawJSON: map[string]interface{}{
+ "name": "foo",
+ "special-field": "special-value",
+ "special-config": map[string]interface{}{
+ "flag": true,
+ "value": "toggled",
+ },
+ },
+ },
+ },
+ }
+
+ for _, testCase := range testCases {
+ serializedInput, err := MarshalPackageJSON(testCase.input)
+ assert.NilError(t, err, testCase.name)
+ actual, err := UnmarshalPackageJSON(serializedInput)
+ assert.NilError(t, err, testCase.name)
+ assertPackageJSONEqual(t, actual, testCase.expected)
+ }
+}
+
+// Asserts that the data section of two PackageJSON structs are equal
+func assertPackageJSONEqual(t *testing.T, x *PackageJSON, y *PackageJSON) {
+ t.Helper()
+ assert.Equal(t, x.Name, y.Name)
+ assert.Equal(t, x.Version, y.Version)
+ assert.DeepEqual(t, x.Scripts, y.Scripts)
+ assert.DeepEqual(t, x.Dependencies, y.Dependencies)
+ assert.DeepEqual(t, x.DevDependencies, y.DevDependencies)
+ assert.DeepEqual(t, x.OptionalDependencies, y.OptionalDependencies)
+ assert.DeepEqual(t, x.PeerDependencies, y.PeerDependencies)
+ assert.Equal(t, x.PackageManager, y.PackageManager)
+ assert.DeepEqual(t, x.Workspaces, y.Workspaces)
+ assert.DeepEqual(t, x.Private, y.Private)
+ assert.DeepEqual(t, x.RawJSON, y.RawJSON)
+}
diff --git a/cli/internal/fs/path.go b/cli/internal/fs/path.go
new file mode 100644
index 0000000..2023d69
--- /dev/null
+++ b/cli/internal/fs/path.go
@@ -0,0 +1,113 @@
+package fs
+
+import (
+ "fmt"
+ iofs "io/fs"
+ "os"
+ "path/filepath"
+ "reflect"
+
+ "github.com/adrg/xdg"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// CheckedToAbsoluteSystemPath inspects a string and determines if it is an absolute path.
+func CheckedToAbsoluteSystemPath(s string) (turbopath.AbsoluteSystemPath, error) {
+ if filepath.IsAbs(s) {
+ return turbopath.AbsoluteSystemPath(s), nil
+ }
+ return "", fmt.Errorf("%v is not an absolute path", s)
+}
+
+// ResolveUnknownPath returns unknown if it is an absolute path, otherwise, it
+// assumes unknown is a path relative to the given root.
+func ResolveUnknownPath(root turbopath.AbsoluteSystemPath, unknown string) turbopath.AbsoluteSystemPath {
+ if filepath.IsAbs(unknown) {
+ return turbopath.AbsoluteSystemPath(unknown)
+ }
+ return root.UntypedJoin(unknown)
+}
+
+// UnsafeToAbsoluteSystemPath directly converts a string to an AbsoluteSystemPath
+func UnsafeToAbsoluteSystemPath(s string) turbopath.AbsoluteSystemPath {
+ return turbopath.AbsoluteSystemPath(s)
+}
+
+// UnsafeToAnchoredSystemPath directly converts a string to an AbsoluteSystemPath
+func UnsafeToAnchoredSystemPath(s string) turbopath.AnchoredSystemPath {
+ return turbopath.AnchoredSystemPath(s)
+}
+
+// AbsoluteSystemPathFromUpstream is used to mark return values from APIs that we
+// expect to give us absolute paths. No checking is performed.
+// Prefer to use this over a cast to maintain the search-ability of interfaces
+// into and out of the turbopath.AbsoluteSystemPath type.
+func AbsoluteSystemPathFromUpstream(s string) turbopath.AbsoluteSystemPath {
+ return turbopath.AbsoluteSystemPath(s)
+}
+
+// GetCwd returns the calculated working directory after traversing symlinks.
+func GetCwd(cwdRaw string) (turbopath.AbsoluteSystemPath, error) {
+ if cwdRaw == "" {
+ var err error
+ cwdRaw, err = os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ }
+ // We evaluate symlinks here because the package managers
+ // we support do the same.
+ cwdRaw, err := filepath.EvalSymlinks(cwdRaw)
+ if err != nil {
+ return "", fmt.Errorf("evaluating symlinks in cwd: %w", err)
+ }
+ cwd, err := CheckedToAbsoluteSystemPath(cwdRaw)
+ if err != nil {
+ return "", fmt.Errorf("cwd is not an absolute path %v: %v", cwdRaw, err)
+ }
+ return cwd, nil
+}
+
+// GetVolumeRoot returns the root directory given an absolute path.
+func GetVolumeRoot(absolutePath string) string {
+ return filepath.VolumeName(absolutePath) + string(os.PathSeparator)
+}
+
+// CreateDirFSAtRoot creates an `os.dirFS` instance at the root of the
+// volume containing the specified path.
+func CreateDirFSAtRoot(absolutePath string) iofs.FS {
+ return os.DirFS(GetVolumeRoot(absolutePath))
+}
+
+// GetDirFSRootPath returns the root path of a os.dirFS.
+func GetDirFSRootPath(fsys iofs.FS) string {
+ // We can't typecheck fsys to enforce using an `os.dirFS` because the
+ // type isn't exported from `os`. So instead, reflection. 🤷‍♂️
+
+ fsysType := reflect.TypeOf(fsys).Name()
+ if fsysType != "dirFS" {
+ // This is not a user error, fail fast
+ panic("GetDirFSRootPath must receive an os.dirFS")
+ }
+
+ // The underlying type is a string; this is the original path passed in.
+ return reflect.ValueOf(fsys).String()
+}
+
+// IofsRelativePath calculates a `os.dirFS`-friendly path from an absolute system path.
+func IofsRelativePath(fsysRoot string, absolutePath string) (string, error) {
+ return filepath.Rel(fsysRoot, absolutePath)
+}
+
+// TempDir returns the absolute path of a directory with the given name
+// under the system's default temp directory location
+func TempDir(subDir string) turbopath.AbsoluteSystemPath {
+ return turbopath.AbsoluteSystemPath(os.TempDir()).UntypedJoin(subDir)
+}
+
+// GetUserConfigDir returns the platform-specific common location
+// for configuration files that belong to a user.
+func GetUserConfigDir() turbopath.AbsoluteSystemPath {
+ configHome := AbsoluteSystemPathFromUpstream(xdg.ConfigHome)
+ return configHome.UntypedJoin("turborepo")
+}
diff --git a/cli/internal/fs/testdata/both/package.json b/cli/internal/fs/testdata/both/package.json
new file mode 100644
index 0000000..03534b7
--- /dev/null
+++ b/cli/internal/fs/testdata/both/package.json
@@ -0,0 +1,7 @@
+{
+ "turbo": {
+ "pipeline": {
+ "build": {}
+ }
+ }
+}
diff --git a/cli/internal/fs/testdata/both/turbo.json b/cli/internal/fs/testdata/both/turbo.json
new file mode 100644
index 0000000..721e897
--- /dev/null
+++ b/cli/internal/fs/testdata/both/turbo.json
@@ -0,0 +1,18 @@
+// mocked test comment
+{
+ "pipeline": {
+ "build": {
+ // mocked test comment
+ "dependsOn": [
+ // mocked test comment
+ "^build"
+ ],
+ "outputs": ["dist/**", ".next/**", "!dist/assets/**"],
+ "outputMode": "new-only"
+ } // mocked test comment
+ },
+ "remoteCache": {
+ "teamId": "team_id",
+ "signature": true
+ }
+}
diff --git a/cli/internal/fs/testdata/correct/turbo.json b/cli/internal/fs/testdata/correct/turbo.json
new file mode 100644
index 0000000..e22cde2
--- /dev/null
+++ b/cli/internal/fs/testdata/correct/turbo.json
@@ -0,0 +1,49 @@
+// mocked test comment
+{
+ "pipeline": {
+ "build": {
+ "experimentalPassthroughEnv": ["GITHUB_TOKEN"],
+ // mocked test comment
+ "dependsOn": [
+ // mocked test comment
+ "^build"
+ ],
+ "outputs": ["dist/**", "!dist/assets/**", ".next/**"],
+ "outputMode": "new-only"
+ }, // mocked test comment
+ "lint": {
+ "outputs": [],
+ "dependsOn": ["$MY_VAR"],
+ "cache": true,
+ "outputMode": "new-only"
+ },
+ "dev": {
+ "cache": false,
+ "outputMode": "full"
+ },
+ /* mocked test comment */
+ "publish": {
+ "outputs": ["dist/**"],
+ "inputs": [
+ /*
+ mocked test comment
+ */
+ "build/**/*"
+ ],
+ "dependsOn": [
+ /* mocked test comment */ "^publish",
+ "^build",
+ "build",
+ "admin#lint"
+ ],
+ "cache": false
+ }
+ },
+ "globalDependencies": ["some-file", "../another-dir/**", "$GLOBAL_ENV_VAR"],
+ "globlaEnv": ["SOME_VAR", "ANOTHER_VAR"],
+ "experimentalGlobalPassThroughEnv": ["AWS_SECRET_KEY"],
+ "remoteCache": {
+ "teamId": "team_id",
+ "signature": true
+ }
+}
diff --git a/cli/internal/fs/testdata/invalid-env-1/turbo.json b/cli/internal/fs/testdata/invalid-env-1/turbo.json
new file mode 100644
index 0000000..e4a6517
--- /dev/null
+++ b/cli/internal/fs/testdata/invalid-env-1/turbo.json
@@ -0,0 +1,8 @@
+{
+ "pipeline": {
+ "task1": {
+ // all invalid value
+ "env": ["$A", "$B"]
+ }
+ }
+}
diff --git a/cli/internal/fs/testdata/invalid-env-2/turbo.json b/cli/internal/fs/testdata/invalid-env-2/turbo.json
new file mode 100644
index 0000000..92eec96
--- /dev/null
+++ b/cli/internal/fs/testdata/invalid-env-2/turbo.json
@@ -0,0 +1,8 @@
+{
+ "pipeline": {
+ "task1": {
+ // Mixed values
+ "env": ["$A", "B"]
+ }
+ }
+}
diff --git a/cli/internal/fs/testdata/invalid-global-env/turbo.json b/cli/internal/fs/testdata/invalid-global-env/turbo.json
new file mode 100644
index 0000000..2ae9ff9
--- /dev/null
+++ b/cli/internal/fs/testdata/invalid-global-env/turbo.json
@@ -0,0 +1,11 @@
+{
+ // Both global declarations with duplicates
+ "globalDependencies": ["$FOO", "$BAR", "somefile.txt", "somefile.txt"],
+ // some invalid values
+ "globalEnv": ["FOO", "BAZ", "$QUX"],
+ "pipeline": {
+ "task1": {
+ "dependsOn": ["$A"]
+ }
+ }
+}
diff --git a/cli/internal/fs/testdata/legacy-env/turbo.json b/cli/internal/fs/testdata/legacy-env/turbo.json
new file mode 100644
index 0000000..6b082c4
--- /dev/null
+++ b/cli/internal/fs/testdata/legacy-env/turbo.json
@@ -0,0 +1,34 @@
+// mocked test comment
+{
+ // Both global declarations with duplicates and with
+ "globalDependencies": ["$FOO", "$BAR", "somefile.txt", "somefile.txt"],
+ "globalEnv": ["FOO", "BAZ", "QUX"],
+ "pipeline": {
+ // Only legacy declaration
+ "task1": {
+ "dependsOn": ["$A"]
+ },
+ // Only new declaration
+ "task2": {
+ "env": ["A"]
+ },
+ // Same var declared in both
+ "task3": {
+ "dependsOn": ["$A"],
+ "env": ["A"]
+ },
+ // Different vars declared in both
+ "task4": {
+ "dependsOn": ["$A"],
+ "env": ["B"]
+ },
+
+ // some edge cases
+ "task6": { "env": ["A", "B", "C"], "dependsOn": ["$D", "$E", "$F"] },
+ "task7": { "env": ["A", "B", "C"], "dependsOn": ["$A", "$B", "$C"] },
+ "task8": { "env": ["A", "B", "C"], "dependsOn": ["A", "B", "C"] },
+ "task9": { "env": [], "dependsOn": ["$A"] },
+ "task10": { "env": ["A", "A"], "dependsOn": ["$A", "$A"] },
+ "task11": { "env": ["A", "A"], "dependsOn": ["$B", "$B"] }
+ }
+}
diff --git a/cli/internal/fs/testdata/legacy-only/package.json b/cli/internal/fs/testdata/legacy-only/package.json
new file mode 100644
index 0000000..03534b7
--- /dev/null
+++ b/cli/internal/fs/testdata/legacy-only/package.json
@@ -0,0 +1,7 @@
+{
+ "turbo": {
+ "pipeline": {
+ "build": {}
+ }
+ }
+}
diff --git a/cli/internal/fs/turbo_json.go b/cli/internal/fs/turbo_json.go
new file mode 100644
index 0000000..71ef29d
--- /dev/null
+++ b/cli/internal/fs/turbo_json.go
@@ -0,0 +1,741 @@
+package fs
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/muhammadmuzzammil1998/jsonc"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+const (
+ configFile = "turbo.json"
+ envPipelineDelimiter = "$"
+ topologicalPipelineDelimiter = "^"
+)
+
+type rawTurboJSON struct {
+ // Global root filesystem dependencies
+ GlobalDependencies []string `json:"globalDependencies,omitempty"`
+ // Global env
+ GlobalEnv []string `json:"globalEnv,omitempty"`
+
+ // Global passthrough env
+ GlobalPassthroughEnv []string `json:"experimentalGlobalPassThroughEnv,omitempty"`
+
+ // Pipeline is a map of Turbo pipeline entries which define the task graph
+ // and cache behavior on a per task or per package-task basis.
+ Pipeline Pipeline `json:"pipeline"`
+ // Configuration options when interfacing with the remote cache
+ RemoteCacheOptions RemoteCacheOptions `json:"remoteCache,omitempty"`
+
+ // Extends can be the name of another workspace
+ Extends []string `json:"extends,omitempty"`
+}
+
+// pristineTurboJSON is used when marshaling a TurboJSON object into a turbo.json string
+// Notably, it includes a PristinePipeline instead of the regular Pipeline. (i.e. TaskDefinition
+// instead of BookkeepingTaskDefinition.)
+type pristineTurboJSON struct {
+ GlobalDependencies []string `json:"globalDependencies,omitempty"`
+ GlobalEnv []string `json:"globalEnv,omitempty"`
+ GlobalPassthroughEnv []string `json:"experimentalGlobalPassThroughEnv,omitempty"`
+ Pipeline PristinePipeline `json:"pipeline"`
+ RemoteCacheOptions RemoteCacheOptions `json:"remoteCache,omitempty"`
+ Extends []string `json:"extends,omitempty"`
+}
+
+// TurboJSON represents a turbo.json configuration file
+type TurboJSON struct {
+ GlobalDeps []string
+ GlobalEnv []string
+ GlobalPassthroughEnv []string
+ Pipeline Pipeline
+ RemoteCacheOptions RemoteCacheOptions
+
+ // A list of Workspace names
+ Extends []string
+}
+
+// RemoteCacheOptions is a struct for deserializing .remoteCache of configFile
+type RemoteCacheOptions struct {
+ TeamID string `json:"teamId,omitempty"`
+ Signature bool `json:"signature,omitempty"`
+}
+
+// rawTaskWithDefaults exists to Marshal (i.e. turn a TaskDefinition into json).
+// We use this for printing ResolvedTaskConfiguration, because we _want_ to show
+// the user the default values for key they have not configured.
+type rawTaskWithDefaults struct {
+ Outputs []string `json:"outputs"`
+ Cache *bool `json:"cache"`
+ DependsOn []string `json:"dependsOn"`
+ Inputs []string `json:"inputs"`
+ OutputMode util.TaskOutputMode `json:"outputMode"`
+ PassthroughEnv []string `json:"experimentalPassThroughEnv,omitempty"`
+ Env []string `json:"env"`
+ Persistent bool `json:"persistent"`
+}
+
+// rawTask exists to Unmarshal from json. When fields are omitted, we _want_
+// them to be missing, so that we can distinguish missing from empty value.
+type rawTask struct {
+ Outputs []string `json:"outputs,omitempty"`
+ Cache *bool `json:"cache,omitempty"`
+ DependsOn []string `json:"dependsOn,omitempty"`
+ Inputs []string `json:"inputs,omitempty"`
+ OutputMode *util.TaskOutputMode `json:"outputMode,omitempty"`
+ Env []string `json:"env,omitempty"`
+ PassthroughEnv []string `json:"experimentalPassthroughEnv,omitempty"`
+ Persistent *bool `json:"persistent,omitempty"`
+}
+
+// taskDefinitionHashable exists as a definition for PristinePipeline, which is used down
+// stream for calculating the global hash. We want to exclude experimental fields here
+// because we don't want experimental fields to be part of the global hash.
+type taskDefinitionHashable struct {
+ Outputs TaskOutputs
+ ShouldCache bool
+ EnvVarDependencies []string
+ TopologicalDependencies []string
+ TaskDependencies []string
+ Inputs []string
+ OutputMode util.TaskOutputMode
+ Persistent bool
+}
+
+// taskDefinitionExperiments is a list of config fields in a task definition that are considered
+// experimental. We keep these separated so we can compute a global hash without these.
+type taskDefinitionExperiments struct {
+ PassthroughEnv []string
+}
+
+// PristinePipeline is a map of task names to TaskDefinition or taskDefinitionHashable.
+// Depending on whether any experimental fields are defined, we will use either struct.
+// The purpose is to omit experimental fields when making a pristine version, so that
+// it doesn't show up in --dry/--summarize output or affect the global hash.
+type PristinePipeline map[string]interface{}
+
+// Pipeline is a struct for deserializing .pipeline in configFile
+type Pipeline map[string]BookkeepingTaskDefinition
+
+// BookkeepingTaskDefinition holds the underlying TaskDefinition and some bookkeeping data
+// about the TaskDefinition. This wrapper struct allows us to leave TaskDefinition untouched.
+type BookkeepingTaskDefinition struct {
+ definedFields util.Set
+ experimentalFields util.Set
+ experimental taskDefinitionExperiments
+ TaskDefinition taskDefinitionHashable
+}
+
+// TaskDefinition is a representation of the configFile pipeline for further computation.
+type TaskDefinition struct {
+ Outputs TaskOutputs
+ ShouldCache bool
+
+ // This field is custom-marshalled from rawTask.Env and rawTask.DependsOn
+ EnvVarDependencies []string
+
+ // rawTask.PassthroughEnv
+ PassthroughEnv []string
+
+ // TopologicalDependencies are tasks from package dependencies.
+ // E.g. "build" is a topological dependency in:
+ // dependsOn: ['^build'].
+ // This field is custom-marshalled from rawTask.DependsOn
+ TopologicalDependencies []string
+
+ // TaskDependencies are anything that is not a topological dependency
+ // E.g. both something and //whatever are TaskDependencies in:
+ // dependsOn: ['something', '//whatever']
+ // This field is custom-marshalled from rawTask.DependsOn
+ TaskDependencies []string
+
+ // Inputs indicate the list of files this Task depends on. If any of those files change
+ // we can conclude that any cached outputs or logs for this Task should be invalidated.
+ Inputs []string
+
+ // OutputMode determins how we should log the output.
+ OutputMode util.TaskOutputMode
+
+ // Persistent indicates whether the Task is expected to exit or not
+ // Tasks marked Persistent do not exit (e.g. --watch mode or dev servers)
+ Persistent bool
+}
+
+// GetTask returns a TaskDefinition based on the ID (package#task format) or name (e.g. "build")
+func (pc Pipeline) GetTask(taskID string, taskName string) (*BookkeepingTaskDefinition, error) {
+ // first check for package-tasks
+ taskDefinition, ok := pc[taskID]
+ if !ok {
+ // then check for regular tasks
+ fallbackTaskDefinition, notcool := pc[taskName]
+ // if neither, then bail
+ if !notcool {
+ // Return an empty TaskDefinition
+ return nil, fmt.Errorf("Could not find task \"%s\" in pipeline", taskID)
+ }
+
+ // override if we need to...
+ taskDefinition = fallbackTaskDefinition
+ }
+
+ return &taskDefinition, nil
+}
+
+// LoadTurboConfig loads, or optionally, synthesizes a TurboJSON instance
+func LoadTurboConfig(dir turbopath.AbsoluteSystemPath, rootPackageJSON *PackageJSON, includeSynthesizedFromRootPackageJSON bool) (*TurboJSON, error) {
+ // If the root package.json stil has a `turbo` key, log a warning and remove it.
+ if rootPackageJSON.LegacyTurboConfig != nil {
+ log.Printf("[WARNING] \"turbo\" in package.json is no longer supported. Migrate to %s by running \"npx @turbo/codemod create-turbo-config\"\n", configFile)
+ rootPackageJSON.LegacyTurboConfig = nil
+ }
+
+ var turboJSON *TurboJSON
+ turboFromFiles, err := readTurboConfig(dir.UntypedJoin(configFile))
+
+ if !includeSynthesizedFromRootPackageJSON && err != nil {
+ // If the file didn't exist, throw a custom error here instead of propagating
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, errors.Wrap(err, fmt.Sprintf("Could not find %s. Follow directions at https://turbo.build/repo/docs to create one", configFile))
+
+ }
+
+ // There was an error, and we don't have any chance of recovering
+ // because we aren't synthesizing anything
+ return nil, err
+ } else if !includeSynthesizedFromRootPackageJSON {
+ // We're not synthesizing anything and there was no error, we're done
+ return turboFromFiles, nil
+ } else if errors.Is(err, os.ErrNotExist) {
+ // turbo.json doesn't exist, but we're going try to synthesize something
+ turboJSON = &TurboJSON{
+ Pipeline: make(Pipeline),
+ }
+ } else if err != nil {
+ // some other happened, we can't recover
+ return nil, err
+ } else {
+ // we're synthesizing, but we have a starting point
+ // Note: this will have to change to support task inference in a monorepo
+ // for now, we're going to error on any "root" tasks and turn non-root tasks into root tasks
+ pipeline := make(Pipeline)
+ for taskID, taskDefinition := range turboFromFiles.Pipeline {
+ if util.IsPackageTask(taskID) {
+ return nil, fmt.Errorf("Package tasks (<package>#<task>) are not allowed in single-package repositories: found %v", taskID)
+ }
+ pipeline[util.RootTaskID(taskID)] = taskDefinition
+ }
+ turboJSON = turboFromFiles
+ turboJSON.Pipeline = pipeline
+ }
+
+ for scriptName := range rootPackageJSON.Scripts {
+ if !turboJSON.Pipeline.HasTask(scriptName) {
+ taskName := util.RootTaskID(scriptName)
+ // Explicitly set ShouldCache to false in this definition and add the bookkeeping fields
+ // so downstream we can pretend that it was set on purpose (as if read from a config file)
+ // rather than defaulting to the 0-value of a boolean field.
+ turboJSON.Pipeline[taskName] = BookkeepingTaskDefinition{
+ definedFields: util.SetFromStrings([]string{"ShouldCache"}),
+ TaskDefinition: taskDefinitionHashable{
+ ShouldCache: false,
+ },
+ }
+ }
+ }
+ return turboJSON, nil
+}
+
+// TurboJSONValidation is the signature for a validation function passed to Validate()
+type TurboJSONValidation func(*TurboJSON) []error
+
+// Validate calls an array of validation functions on the TurboJSON struct.
+// The validations can be customized by the caller.
+func (tj *TurboJSON) Validate(validations []TurboJSONValidation) []error {
+ allErrors := []error{}
+ for _, validation := range validations {
+ errors := validation(tj)
+ allErrors = append(allErrors, errors...)
+ }
+
+ return allErrors
+}
+
+// TaskOutputs represents the patterns for including and excluding files from outputs
+type TaskOutputs struct {
+ Inclusions []string
+ Exclusions []string
+}
+
+// Sort contents of task outputs
+func (to TaskOutputs) Sort() TaskOutputs {
+ var inclusions []string
+ var exclusions []string
+ copy(inclusions, to.Inclusions)
+ copy(exclusions, to.Exclusions)
+ sort.Strings(inclusions)
+ sort.Strings(exclusions)
+ return TaskOutputs{Inclusions: inclusions, Exclusions: exclusions}
+}
+
+// readTurboConfig reads turbo.json from a provided path
+func readTurboConfig(turboJSONPath turbopath.AbsoluteSystemPath) (*TurboJSON, error) {
+ // If the configFile exists, use that
+ if turboJSONPath.FileExists() {
+ turboJSON, err := readTurboJSON(turboJSONPath)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", configFile, err)
+ }
+
+ return turboJSON, nil
+ }
+
+ // If there's no turbo.json, return an error.
+ return nil, os.ErrNotExist
+}
+
+// readTurboJSON reads the configFile in to a struct
+func readTurboJSON(path turbopath.AbsoluteSystemPath) (*TurboJSON, error) {
+ file, err := path.Open()
+ if err != nil {
+ return nil, err
+ }
+ var turboJSON *TurboJSON
+ data, err := ioutil.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+
+ err = jsonc.Unmarshal(data, &turboJSON)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return turboJSON, nil
+}
+
+// GetTaskDefinition returns a TaskDefinition from a serialized definition in configFile
+func (pc Pipeline) GetTaskDefinition(taskID string) (TaskDefinition, bool) {
+ if entry, ok := pc[taskID]; ok {
+ return entry.GetTaskDefinition(), true
+ }
+ _, task := util.GetPackageTaskFromId(taskID)
+ entry, ok := pc[task]
+ return entry.GetTaskDefinition(), ok
+}
+
+// HasTask returns true if the given task is defined in the pipeline, either directly or
+// via a package task (`pkg#task`)
+func (pc Pipeline) HasTask(task string) bool {
+ for key := range pc {
+ if key == task {
+ return true
+ }
+ if util.IsPackageTask(key) {
+ _, taskName := util.GetPackageTaskFromId(key)
+ if taskName == task {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Pristine returns a PristinePipeline, this is used for printing to console and pruning
+func (pc Pipeline) Pristine() PristinePipeline {
+ pristine := PristinePipeline{}
+ for taskName, taskDef := range pc {
+ // If there are any experimental fields, we will include them with 0-values
+ // if there aren't, we will omit them entirely
+ if taskDef.hasExperimentalFields() {
+ pristine[taskName] = taskDef.GetTaskDefinition() // merges experimental fields in
+ } else {
+ pristine[taskName] = taskDef.TaskDefinition // has no experimental fields
+ }
+ }
+ return pristine
+}
+
+// hasField checks the internal bookkeeping definedFields field to
+// see whether a field was actually in the underlying turbo.json
+// or whether it was initialized with its 0-value.
+func (btd BookkeepingTaskDefinition) hasField(fieldName string) bool {
+ return btd.definedFields.Includes(fieldName) || btd.experimentalFields.Includes(fieldName)
+}
+
+// hasExperimentalFields keeps track of whether any experimental fields were found
+func (btd BookkeepingTaskDefinition) hasExperimentalFields() bool {
+ return len(btd.experimentalFields) > 0
+}
+
+// GetTaskDefinition gets a TaskDefinition by merging the experimental and non-experimental fields
+// into a single representation to use downstream.
+func (btd BookkeepingTaskDefinition) GetTaskDefinition() TaskDefinition {
+ return TaskDefinition{
+ Outputs: btd.TaskDefinition.Outputs,
+ ShouldCache: btd.TaskDefinition.ShouldCache,
+ EnvVarDependencies: btd.TaskDefinition.EnvVarDependencies,
+ TopologicalDependencies: btd.TaskDefinition.TopologicalDependencies,
+ TaskDependencies: btd.TaskDefinition.TaskDependencies,
+ Inputs: btd.TaskDefinition.Inputs,
+ OutputMode: btd.TaskDefinition.OutputMode,
+ Persistent: btd.TaskDefinition.Persistent,
+ // From experimental fields
+ PassthroughEnv: btd.experimental.PassthroughEnv,
+ }
+}
+
+// MergeTaskDefinitions accepts an array of BookkeepingTaskDefinitions and merges them into
+// a single TaskDefinition. It uses the bookkeeping definedFields to determine which fields should
+// be overwritten and when 0-values should be respected.
+func MergeTaskDefinitions(taskDefinitions []BookkeepingTaskDefinition) (*TaskDefinition, error) {
+ // Start with an empty definition
+ mergedTaskDefinition := &TaskDefinition{}
+
+ // Set the default, because the 0-value will be false, and if no turbo.jsons had
+ // this field set for this task, we want it to be true.
+ mergedTaskDefinition.ShouldCache = true
+
+ // For each of the TaskDefinitions we know of, merge them in
+ for _, bookkeepingTaskDef := range taskDefinitions {
+ taskDef := bookkeepingTaskDef.GetTaskDefinition()
+
+ if bookkeepingTaskDef.hasField("Outputs") {
+ mergedTaskDefinition.Outputs = taskDef.Outputs
+ }
+
+ if bookkeepingTaskDef.hasField("ShouldCache") {
+ mergedTaskDefinition.ShouldCache = taskDef.ShouldCache
+ }
+
+ if bookkeepingTaskDef.hasField("EnvVarDependencies") {
+ mergedTaskDefinition.EnvVarDependencies = taskDef.EnvVarDependencies
+ }
+
+ if bookkeepingTaskDef.hasField("PassthroughEnv") {
+ mergedTaskDefinition.PassthroughEnv = taskDef.PassthroughEnv
+ }
+
+ if bookkeepingTaskDef.hasField("DependsOn") {
+ mergedTaskDefinition.TopologicalDependencies = taskDef.TopologicalDependencies
+ }
+
+ if bookkeepingTaskDef.hasField("DependsOn") {
+ mergedTaskDefinition.TaskDependencies = taskDef.TaskDependencies
+ }
+
+ if bookkeepingTaskDef.hasField("Inputs") {
+ mergedTaskDefinition.Inputs = taskDef.Inputs
+ }
+
+ if bookkeepingTaskDef.hasField("OutputMode") {
+ mergedTaskDefinition.OutputMode = taskDef.OutputMode
+ }
+ if bookkeepingTaskDef.hasField("Persistent") {
+ mergedTaskDefinition.Persistent = taskDef.Persistent
+ }
+ }
+
+ return mergedTaskDefinition, nil
+}
+
+// UnmarshalJSON deserializes a single task definition from
+// turbo.json into a TaskDefinition struct
+func (btd *BookkeepingTaskDefinition) UnmarshalJSON(data []byte) error {
+ task := rawTask{}
+ if err := json.Unmarshal(data, &task); err != nil {
+ return err
+ }
+
+ btd.definedFields = util.Set{}
+ btd.experimentalFields = util.Set{}
+
+ if task.Outputs != nil {
+ var inclusions []string
+ var exclusions []string
+ // Assign a bookkeeping field so we know that there really were
+ // outputs configured in the underlying config file.
+ btd.definedFields.Add("Outputs")
+
+ for _, glob := range task.Outputs {
+ if strings.HasPrefix(glob, "!") {
+ if filepath.IsAbs(glob[1:]) {
+ log.Printf("[WARNING] Using an absolute path in \"outputs\" (%v) will not work and will be an error in a future version", glob)
+ }
+ exclusions = append(exclusions, glob[1:])
+ } else {
+ if filepath.IsAbs(glob) {
+ log.Printf("[WARNING] Using an absolute path in \"outputs\" (%v) will not work and will be an error in a future version", glob)
+ }
+ inclusions = append(inclusions, glob)
+ }
+ }
+
+ btd.TaskDefinition.Outputs = TaskOutputs{
+ Inclusions: inclusions,
+ Exclusions: exclusions,
+ }
+
+ sort.Strings(btd.TaskDefinition.Outputs.Inclusions)
+ sort.Strings(btd.TaskDefinition.Outputs.Exclusions)
+ }
+
+ if task.Cache == nil {
+ btd.TaskDefinition.ShouldCache = true
+ } else {
+ btd.definedFields.Add("ShouldCache")
+ btd.TaskDefinition.ShouldCache = *task.Cache
+ }
+
+ envVarDependencies := make(util.Set)
+ envVarPassthroughs := make(util.Set)
+
+ btd.TaskDefinition.TopologicalDependencies = []string{} // TODO @mehulkar: this should be a set
+ btd.TaskDefinition.TaskDependencies = []string{} // TODO @mehulkar: this should be a set
+
+ // If there was a dependsOn field, add the bookkeeping
+ // we don't care what's in the field, just that it was there
+ // We'll use this marker to overwrite while merging TaskDefinitions.
+ if task.DependsOn != nil {
+ btd.definedFields.Add("DependsOn")
+ }
+
+ for _, dependency := range task.DependsOn {
+ if strings.HasPrefix(dependency, envPipelineDelimiter) {
+ log.Printf("[DEPRECATED] Declaring an environment variable in \"dependsOn\" is deprecated, found %s. Use the \"env\" key or use `npx @turbo/codemod migrate-env-var-dependencies`.\n", dependency)
+ envVarDependencies.Add(strings.TrimPrefix(dependency, envPipelineDelimiter))
+ } else if strings.HasPrefix(dependency, topologicalPipelineDelimiter) {
+ // Note: This will get assigned multiple times in the loop, but we only care that it's true
+ btd.TaskDefinition.TopologicalDependencies = append(btd.TaskDefinition.TopologicalDependencies, strings.TrimPrefix(dependency, topologicalPipelineDelimiter))
+ } else {
+ btd.TaskDefinition.TaskDependencies = append(btd.TaskDefinition.TaskDependencies, dependency)
+ }
+ }
+
+ sort.Strings(btd.TaskDefinition.TaskDependencies)
+ sort.Strings(btd.TaskDefinition.TopologicalDependencies)
+
+ // Append env key into EnvVarDependencies
+ if task.Env != nil {
+ btd.definedFields.Add("EnvVarDependencies")
+ if err := gatherEnvVars(task.Env, "env", &envVarDependencies); err != nil {
+ return err
+ }
+ }
+
+ btd.TaskDefinition.EnvVarDependencies = envVarDependencies.UnsafeListOfStrings()
+
+ sort.Strings(btd.TaskDefinition.EnvVarDependencies)
+
+ if task.PassthroughEnv != nil {
+ btd.experimentalFields.Add("PassthroughEnv")
+ if err := gatherEnvVars(task.PassthroughEnv, "passthrougEnv", &envVarPassthroughs); err != nil {
+ return err
+ }
+ }
+
+ btd.experimental.PassthroughEnv = envVarPassthroughs.UnsafeListOfStrings()
+ sort.Strings(btd.experimental.PassthroughEnv)
+
+ if task.Inputs != nil {
+ // Note that we don't require Inputs to be sorted, we're going to
+ // hash the resulting files and sort that instead
+ btd.definedFields.Add("Inputs")
+ // TODO: during rust port, this should be moved to a post-parse validation step
+ for _, input := range task.Inputs {
+ if filepath.IsAbs(input) {
+ log.Printf("[WARNING] Using an absolute path in \"inputs\" (%v) will not work and will be an error in a future version", input)
+ }
+ }
+ btd.TaskDefinition.Inputs = task.Inputs
+ }
+
+ if task.OutputMode != nil {
+ btd.definedFields.Add("OutputMode")
+ btd.TaskDefinition.OutputMode = *task.OutputMode
+ }
+
+ if task.Persistent != nil {
+ btd.definedFields.Add("Persistent")
+ btd.TaskDefinition.Persistent = *task.Persistent
+ } else {
+ btd.TaskDefinition.Persistent = false
+ }
+ return nil
+}
+
+// MarshalJSON serializes taskDefinitionHashable struct into json
+func (c taskDefinitionHashable) MarshalJSON() ([]byte, error) {
+ task := makeRawTask(
+ c.Persistent,
+ c.ShouldCache,
+ c.OutputMode,
+ c.Inputs,
+ c.Outputs,
+ c.EnvVarDependencies,
+ c.TaskDependencies,
+ c.TopologicalDependencies,
+ )
+ return json.Marshal(task)
+}
+
+// MarshalJSON serializes TaskDefinition struct into json
+func (c TaskDefinition) MarshalJSON() ([]byte, error) {
+ task := makeRawTask(
+ c.Persistent,
+ c.ShouldCache,
+ c.OutputMode,
+ c.Inputs,
+ c.Outputs,
+ c.EnvVarDependencies,
+ c.TaskDependencies,
+ c.TopologicalDependencies,
+ )
+
+ if len(c.PassthroughEnv) > 0 {
+ task.PassthroughEnv = append(task.PassthroughEnv, c.PassthroughEnv...)
+ }
+ sort.Strings(task.PassthroughEnv)
+
+ return json.Marshal(task)
+}
+
+// UnmarshalJSON deserializes the contents of turbo.json into a TurboJSON struct
+func (c *TurboJSON) UnmarshalJSON(data []byte) error {
+ raw := &rawTurboJSON{}
+ if err := json.Unmarshal(data, &raw); err != nil {
+ return err
+ }
+
+ envVarDependencies := make(util.Set)
+ envVarPassthroughs := make(util.Set)
+ globalFileDependencies := make(util.Set)
+
+ if err := gatherEnvVars(raw.GlobalEnv, "globalEnv", &envVarDependencies); err != nil {
+ return err
+ }
+ if err := gatherEnvVars(raw.GlobalPassthroughEnv, "experimentalGlobalPassThroughEnv", &envVarPassthroughs); err != nil {
+ return err
+ }
+
+ // TODO: In the rust port, warnings should be refactored to a post-parse validation step
+ for _, value := range raw.GlobalDependencies {
+ if strings.HasPrefix(value, envPipelineDelimiter) {
+ log.Printf("[DEPRECATED] Declaring an environment variable in \"globalDependencies\" is deprecated, found %s. Use the \"globalEnv\" key or use `npx @turbo/codemod migrate-env-var-dependencies`.\n", value)
+ envVarDependencies.Add(strings.TrimPrefix(value, envPipelineDelimiter))
+ } else {
+ if filepath.IsAbs(value) {
+ log.Printf("[WARNING] Using an absolute path in \"globalDependencies\" (%v) will not work and will be an error in a future version", value)
+ }
+ globalFileDependencies.Add(value)
+ }
+ }
+
+ // turn the set into an array and assign to the TurboJSON struct fields.
+ c.GlobalEnv = envVarDependencies.UnsafeListOfStrings()
+ sort.Strings(c.GlobalEnv)
+
+ if raw.GlobalPassthroughEnv != nil {
+ c.GlobalPassthroughEnv = envVarPassthroughs.UnsafeListOfStrings()
+ sort.Strings(c.GlobalPassthroughEnv)
+ }
+
+ c.GlobalDeps = globalFileDependencies.UnsafeListOfStrings()
+ sort.Strings(c.GlobalDeps)
+
+ // copy these over, we don't need any changes here.
+ c.Pipeline = raw.Pipeline
+ c.RemoteCacheOptions = raw.RemoteCacheOptions
+ c.Extends = raw.Extends
+
+ return nil
+}
+
+// MarshalJSON converts a TurboJSON into the equivalent json object in bytes
+// note: we go via rawTurboJSON so that the output format is correct.
+// This is used by `turbo prune` to generate a pruned turbo.json
+// and also by --summarize & --dry=json to serialize the known config
+// into something we can print to screen
+func (c *TurboJSON) MarshalJSON() ([]byte, error) {
+ raw := pristineTurboJSON{}
+ raw.GlobalDependencies = c.GlobalDeps
+ raw.GlobalEnv = c.GlobalEnv
+ raw.GlobalPassthroughEnv = c.GlobalPassthroughEnv
+ raw.Pipeline = c.Pipeline.Pristine()
+ raw.RemoteCacheOptions = c.RemoteCacheOptions
+
+ return json.Marshal(&raw)
+}
+
+func makeRawTask(persistent bool, shouldCache bool, outputMode util.TaskOutputMode, inputs []string, outputs TaskOutputs, envVarDependencies []string, taskDependencies []string, topologicalDependencies []string) *rawTaskWithDefaults {
+ // Initialize with empty arrays, so we get empty arrays serialized into JSON
+ task := &rawTaskWithDefaults{
+ Outputs: []string{},
+ Inputs: []string{},
+ Env: []string{},
+ PassthroughEnv: []string{},
+ DependsOn: []string{},
+ }
+
+ task.Persistent = persistent
+ task.Cache = &shouldCache
+ task.OutputMode = outputMode
+
+ if len(inputs) > 0 {
+ task.Inputs = inputs
+ }
+
+ if len(envVarDependencies) > 0 {
+ task.Env = append(task.Env, envVarDependencies...)
+ }
+
+ if len(outputs.Inclusions) > 0 {
+ task.Outputs = append(task.Outputs, outputs.Inclusions...)
+ }
+
+ for _, i := range outputs.Exclusions {
+ task.Outputs = append(task.Outputs, "!"+i)
+ }
+
+ if len(taskDependencies) > 0 {
+ task.DependsOn = append(task.DependsOn, taskDependencies...)
+ }
+
+ for _, i := range topologicalDependencies {
+ task.DependsOn = append(task.DependsOn, "^"+i)
+ }
+
+ // These _should_ already be sorted when the TaskDefinition struct was unmarshaled,
+ // but we want to ensure they're sorted on the way out also, just in case something
+ // in the middle mutates the items.
+ sort.Strings(task.DependsOn)
+ sort.Strings(task.Outputs)
+ sort.Strings(task.Env)
+ sort.Strings(task.Inputs)
+ return task
+}
+
+// gatherEnvVars puts env vars into the provided set as long as they don't have an invalid value.
+func gatherEnvVars(vars []string, key string, into *util.Set) error {
+ for _, value := range vars {
+ if strings.HasPrefix(value, envPipelineDelimiter) {
+ // Hard error to help people specify this correctly during migration.
+ // TODO: Remove this error after we have run summary.
+ return fmt.Errorf("You specified \"%s\" in the \"%s\" key. You should not prefix your environment variables with \"%s\"", value, key, envPipelineDelimiter)
+ }
+
+ into.Add(value)
+ }
+
+ return nil
+}
diff --git a/cli/internal/fs/turbo_json_test.go b/cli/internal/fs/turbo_json_test.go
new file mode 100644
index 0000000..1d384d5
--- /dev/null
+++ b/cli/internal/fs/turbo_json_test.go
@@ -0,0 +1,277 @@
+package fs
+
+import (
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "gotest.tools/v3/assert/cmp"
+)
+
+func assertIsSorted(t *testing.T, arr []string, msg string) {
+ t.Helper()
+ if arr == nil {
+ return
+ }
+
+ copied := make([]string, len(arr))
+ copy(copied, arr)
+ sort.Strings(copied)
+ if !reflect.DeepEqual(arr, copied) {
+ t.Errorf("Expected sorted, got %v: %v", arr, msg)
+ }
+}
+
+func Test_ReadTurboConfig(t *testing.T) {
+ testDir := getTestDir(t, "correct")
+ turboJSON, turboJSONReadErr := readTurboConfig(testDir.UntypedJoin("turbo.json"))
+
+ if turboJSONReadErr != nil {
+ t.Fatalf("invalid parse: %#v", turboJSONReadErr)
+ }
+
+ assert.EqualValues(t, []string{"AWS_SECRET_KEY"}, turboJSON.GlobalPassthroughEnv)
+
+ pipelineExpected := map[string]BookkeepingTaskDefinition{
+ "build": {
+ definedFields: util.SetFromStrings([]string{"Outputs", "OutputMode", "DependsOn"}),
+ experimentalFields: util.SetFromStrings([]string{"PassthroughEnv"}),
+ experimental: taskDefinitionExperiments{
+ PassthroughEnv: []string{"GITHUB_TOKEN"},
+ },
+ TaskDefinition: taskDefinitionHashable{
+ Outputs: TaskOutputs{Inclusions: []string{".next/**", "dist/**"}, Exclusions: []string{"dist/assets/**"}},
+ TopologicalDependencies: []string{"build"},
+ EnvVarDependencies: []string{},
+ TaskDependencies: []string{},
+ ShouldCache: true,
+ OutputMode: util.NewTaskOutput,
+ },
+ },
+ "lint": {
+ definedFields: util.SetFromStrings([]string{"Outputs", "OutputMode", "ShouldCache", "DependsOn"}),
+ experimentalFields: util.SetFromStrings([]string{}),
+ experimental: taskDefinitionExperiments{
+ PassthroughEnv: []string{},
+ },
+ TaskDefinition: taskDefinitionHashable{
+ Outputs: TaskOutputs{},
+ TopologicalDependencies: []string{},
+ EnvVarDependencies: []string{"MY_VAR"},
+ TaskDependencies: []string{},
+ ShouldCache: true,
+ OutputMode: util.NewTaskOutput,
+ },
+ },
+ "dev": {
+ definedFields: util.SetFromStrings([]string{"OutputMode", "ShouldCache"}),
+ experimentalFields: util.SetFromStrings([]string{}),
+ experimental: taskDefinitionExperiments{
+ PassthroughEnv: []string{},
+ },
+ TaskDefinition: taskDefinitionHashable{
+ Outputs: TaskOutputs{},
+ TopologicalDependencies: []string{},
+ EnvVarDependencies: []string{},
+ TaskDependencies: []string{},
+ ShouldCache: false,
+ OutputMode: util.FullTaskOutput,
+ },
+ },
+ "publish": {
+ definedFields: util.SetFromStrings([]string{"Inputs", "Outputs", "DependsOn", "ShouldCache"}),
+ experimentalFields: util.SetFromStrings([]string{}),
+ experimental: taskDefinitionExperiments{
+ PassthroughEnv: []string{},
+ },
+ TaskDefinition: taskDefinitionHashable{
+ Outputs: TaskOutputs{Inclusions: []string{"dist/**"}},
+ TopologicalDependencies: []string{"build", "publish"},
+ EnvVarDependencies: []string{},
+ TaskDependencies: []string{"admin#lint", "build"},
+ ShouldCache: false,
+ Inputs: []string{"build/**/*"},
+ OutputMode: util.FullTaskOutput,
+ },
+ },
+ }
+
+ validateOutput(t, turboJSON, pipelineExpected)
+ remoteCacheOptionsExpected := RemoteCacheOptions{"team_id", true}
+ assert.EqualValues(t, remoteCacheOptionsExpected, turboJSON.RemoteCacheOptions)
+}
+
+func Test_LoadTurboConfig_Legacy(t *testing.T) {
+ testDir := getTestDir(t, "legacy-only")
+ packageJSONPath := testDir.UntypedJoin("package.json")
+ rootPackageJSON, pkgJSONReadErr := ReadPackageJSON(packageJSONPath)
+
+ if pkgJSONReadErr != nil {
+ t.Fatalf("invalid parse: %#v", pkgJSONReadErr)
+ }
+
+ _, turboJSONReadErr := LoadTurboConfig(testDir, rootPackageJSON, false)
+ expectedErrorMsg := "Could not find turbo.json. Follow directions at https://turbo.build/repo/docs to create one: file does not exist"
+ assert.EqualErrorf(t, turboJSONReadErr, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, turboJSONReadErr)
+}
+
+func Test_LoadTurboConfig_BothCorrectAndLegacy(t *testing.T) {
+ testDir := getTestDir(t, "both")
+
+ packageJSONPath := testDir.UntypedJoin("package.json")
+ rootPackageJSON, pkgJSONReadErr := ReadPackageJSON(packageJSONPath)
+
+ if pkgJSONReadErr != nil {
+ t.Fatalf("invalid parse: %#v", pkgJSONReadErr)
+ }
+
+ turboJSON, turboJSONReadErr := LoadTurboConfig(testDir, rootPackageJSON, false)
+
+ if turboJSONReadErr != nil {
+ t.Fatalf("invalid parse: %#v", turboJSONReadErr)
+ }
+
+ pipelineExpected := map[string]BookkeepingTaskDefinition{
+ "build": {
+ definedFields: util.SetFromStrings([]string{"Outputs", "OutputMode", "DependsOn"}),
+ experimentalFields: util.SetFromStrings([]string{}),
+ experimental: taskDefinitionExperiments{
+ PassthroughEnv: []string{},
+ },
+ TaskDefinition: taskDefinitionHashable{
+ Outputs: TaskOutputs{Inclusions: []string{".next/**", "dist/**"}, Exclusions: []string{"dist/assets/**"}},
+ TopologicalDependencies: []string{"build"},
+ EnvVarDependencies: []string{},
+ TaskDependencies: []string{},
+ ShouldCache: true,
+ OutputMode: util.NewTaskOutput,
+ },
+ },
+ }
+
+ validateOutput(t, turboJSON, pipelineExpected)
+
+ remoteCacheOptionsExpected := RemoteCacheOptions{"team_id", true}
+ assert.EqualValues(t, remoteCacheOptionsExpected, turboJSON.RemoteCacheOptions)
+ assert.Equal(t, rootPackageJSON.LegacyTurboConfig == nil, true)
+}
+
+func Test_ReadTurboConfig_InvalidEnvDeclarations1(t *testing.T) {
+ testDir := getTestDir(t, "invalid-env-1")
+ _, turboJSONReadErr := readTurboConfig(testDir.UntypedJoin("turbo.json"))
+
+ expectedErrorMsg := "turbo.json: You specified \"$A\" in the \"env\" key. You should not prefix your environment variables with \"$\""
+ assert.EqualErrorf(t, turboJSONReadErr, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, turboJSONReadErr)
+}
+
+func Test_ReadTurboConfig_InvalidEnvDeclarations2(t *testing.T) {
+ testDir := getTestDir(t, "invalid-env-2")
+ _, turboJSONReadErr := readTurboConfig(testDir.UntypedJoin("turbo.json"))
+ expectedErrorMsg := "turbo.json: You specified \"$A\" in the \"env\" key. You should not prefix your environment variables with \"$\""
+ assert.EqualErrorf(t, turboJSONReadErr, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, turboJSONReadErr)
+}
+
+func Test_ReadTurboConfig_InvalidGlobalEnvDeclarations(t *testing.T) {
+ testDir := getTestDir(t, "invalid-global-env")
+ _, turboJSONReadErr := readTurboConfig(testDir.UntypedJoin("turbo.json"))
+ expectedErrorMsg := "turbo.json: You specified \"$QUX\" in the \"globalEnv\" key. You should not prefix your environment variables with \"$\""
+ assert.EqualErrorf(t, turboJSONReadErr, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, turboJSONReadErr)
+}
+
+func Test_ReadTurboConfig_EnvDeclarations(t *testing.T) {
+ testDir := getTestDir(t, "legacy-env")
+ turboJSON, turboJSONReadErr := readTurboConfig(testDir.UntypedJoin("turbo.json"))
+
+ if turboJSONReadErr != nil {
+ t.Fatalf("invalid parse: %#v", turboJSONReadErr)
+ }
+
+ pipeline := turboJSON.Pipeline
+ assert.EqualValues(t, pipeline["task1"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A"}))
+ assert.EqualValues(t, pipeline["task2"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A"}))
+ assert.EqualValues(t, pipeline["task3"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A"}))
+ assert.EqualValues(t, pipeline["task4"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A", "B"}))
+ assert.EqualValues(t, pipeline["task6"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A", "B", "C", "D", "E", "F"}))
+ assert.EqualValues(t, pipeline["task7"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A", "B", "C"}))
+ assert.EqualValues(t, pipeline["task8"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A", "B", "C"}))
+ assert.EqualValues(t, pipeline["task9"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A"}))
+ assert.EqualValues(t, pipeline["task10"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A"}))
+ assert.EqualValues(t, pipeline["task11"].TaskDefinition.EnvVarDependencies, sortedArray([]string{"A", "B"}))
+
+ // check global env vars also
+ assert.EqualValues(t, sortedArray([]string{"FOO", "BAR", "BAZ", "QUX"}), sortedArray(turboJSON.GlobalEnv))
+ assert.EqualValues(t, sortedArray([]string{"somefile.txt"}), sortedArray(turboJSON.GlobalDeps))
+}
+
+func Test_TaskOutputsSort(t *testing.T) {
+ inclusions := []string{"foo/**", "bar"}
+ exclusions := []string{"special-file", ".hidden/**"}
+ taskOutputs := TaskOutputs{Inclusions: inclusions, Exclusions: exclusions}
+ sortedOutputs := taskOutputs.Sort()
+ assertIsSorted(t, sortedOutputs.Inclusions, "Inclusions")
+ assertIsSorted(t, sortedOutputs.Exclusions, "Exclusions")
+ assert.False(t, cmp.DeepEqual(taskOutputs, sortedOutputs)().Success())
+}
+
+// Helpers
+func validateOutput(t *testing.T, turboJSON *TurboJSON, expectedPipeline Pipeline) {
+ t.Helper()
+ assertIsSorted(t, turboJSON.GlobalDeps, "Global Deps")
+ assertIsSorted(t, turboJSON.GlobalEnv, "Global Env")
+ validatePipeline(t, turboJSON.Pipeline, expectedPipeline)
+}
+
+func validatePipeline(t *testing.T, actual Pipeline, expected Pipeline) {
+ t.Helper()
+ // check top level keys
+ if len(actual) != len(expected) {
+ expectedKeys := []string{}
+ for k := range expected {
+ expectedKeys = append(expectedKeys, k)
+ }
+ actualKeys := []string{}
+ for k := range actual {
+ actualKeys = append(actualKeys, k)
+ }
+ t.Errorf("pipeline tasks mismatch. got %v, want %v", strings.Join(actualKeys, ","), strings.Join(expectedKeys, ","))
+ }
+
+ // check individual task definitions
+ for taskName, expectedTaskDefinition := range expected {
+ bookkeepingTaskDef, ok := actual[taskName]
+ if !ok {
+ t.Errorf("missing expected task: %v", taskName)
+ }
+ actualTaskDefinition := bookkeepingTaskDef.GetTaskDefinition()
+ assertIsSorted(t, actualTaskDefinition.Outputs.Inclusions, "Task output inclusions")
+ assertIsSorted(t, actualTaskDefinition.Outputs.Exclusions, "Task output exclusions")
+ assertIsSorted(t, actualTaskDefinition.EnvVarDependencies, "Task env vars")
+ assertIsSorted(t, actualTaskDefinition.PassthroughEnv, "Task env vars")
+ assertIsSorted(t, actualTaskDefinition.TopologicalDependencies, "Topo deps")
+ assertIsSorted(t, actualTaskDefinition.TaskDependencies, "Task deps")
+ assert.EqualValuesf(t, expectedTaskDefinition, bookkeepingTaskDef, "task definition mismatch for %v", taskName)
+ }
+}
+
+func getTestDir(t *testing.T, testName string) turbopath.AbsoluteSystemPath {
+ defaultCwd, err := os.Getwd()
+ if err != nil {
+ t.Errorf("failed to get cwd: %v", err)
+ }
+ cwd, err := CheckedToAbsoluteSystemPath(defaultCwd)
+ if err != nil {
+ t.Fatalf("cwd is not an absolute directory %v: %v", defaultCwd, err)
+ }
+
+ return cwd.UntypedJoin("testdata", testName)
+}
+
+func sortedArray(arr []string) []string {
+ sort.Strings(arr)
+ return arr
+}
diff --git a/cli/internal/globby/globby.go b/cli/internal/globby/globby.go
new file mode 100644
index 0000000..14c40d9
--- /dev/null
+++ b/cli/internal/globby/globby.go
@@ -0,0 +1,187 @@
+package globby
+
+import (
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ iofs "io/fs"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// GlobAll returns an array of files and folders that match the specified set of glob patterns.
+// The returned files and folders are absolute paths, assuming that basePath is an absolute path.
+func GlobAll(basePath string, includePatterns []string, excludePatterns []string) ([]string, error) {
+ fsys := fs.CreateDirFSAtRoot(basePath)
+ fsysRoot := fs.GetDirFSRootPath(fsys)
+ output, err := globAllFs(fsys, fsysRoot, basePath, includePatterns, excludePatterns)
+
+ // Because this is coming out of a map output is in no way ordered.
+ // Sorting will put the files in a depth-first order.
+ sort.Strings(output)
+ return output, err
+}
+
+// GlobFiles returns an array of files that match the specified set of glob patterns.
+// The return files are absolute paths, assuming that basePath is an absolute path.
+func GlobFiles(basePath string, includePatterns []string, excludePatterns []string) ([]string, error) {
+ fsys := fs.CreateDirFSAtRoot(basePath)
+ fsysRoot := fs.GetDirFSRootPath(fsys)
+ output, err := globFilesFs(fsys, fsysRoot, basePath, includePatterns, excludePatterns)
+
+ // Because this is coming out of a map output is in no way ordered.
+ // Sorting will put the files in a depth-first order.
+ sort.Strings(output)
+ return output, err
+}
+
+// checkRelativePath ensures that the the requested file path is a child of `from`.
+func checkRelativePath(from string, to string) error {
+ relativePath, err := filepath.Rel(from, to)
+
+ if err != nil {
+ return err
+ }
+
+ if strings.HasPrefix(relativePath, "..") {
+ return fmt.Errorf("the path you are attempting to specify (%s) is outside of the root", to)
+ }
+
+ return nil
+}
+
+// globFilesFs searches the specified file system to enumerate all files to include.
+func globFilesFs(fsys iofs.FS, fsysRoot string, basePath string, includePatterns []string, excludePatterns []string) ([]string, error) {
+ return globWalkFs(fsys, fsysRoot, basePath, includePatterns, excludePatterns, false)
+}
+
+// globAllFs searches the specified file system to enumerate all files to include.
+func globAllFs(fsys iofs.FS, fsysRoot string, basePath string, includePatterns []string, excludePatterns []string) ([]string, error) {
+ return globWalkFs(fsys, fsysRoot, basePath, includePatterns, excludePatterns, true)
+}
+
+// globWalkFs searches the specified file system to enumerate all files and folders to include.
+func globWalkFs(fsys iofs.FS, fsysRoot string, basePath string, includePatterns []string, excludePatterns []string, includeDirs bool) ([]string, error) {
+ var processedIncludes []string
+ var processedExcludes []string
+ result := make(util.Set)
+
+ for _, includePattern := range includePatterns {
+ includePath := filepath.Join(basePath, includePattern)
+ err := checkRelativePath(basePath, includePath)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // fs.FS paths may not include leading separators. Calculate the
+ // correct path for this relative to the filesystem root.
+ // This will not error as it follows the call to checkRelativePath.
+ iofsRelativePath, _ := fs.IofsRelativePath(fsysRoot, includePath)
+
+ // Includes only operate on files.
+ processedIncludes = append(processedIncludes, iofsRelativePath)
+ }
+
+ for _, excludePattern := range excludePatterns {
+ excludePath := filepath.Join(basePath, excludePattern)
+ err := checkRelativePath(basePath, excludePath)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // fs.FS paths may not include leading separators. Calculate the
+ // correct path for this relative to the filesystem root.
+ // This will not error as it follows the call to checkRelativePath.
+ iofsRelativePath, _ := fs.IofsRelativePath(fsysRoot, excludePath)
+
+ // In case this is a file pattern and not a directory, add the exact pattern.
+ // In the event that the user has already specified /**,
+ if !strings.HasSuffix(iofsRelativePath, string(filepath.Separator)+"**") {
+ processedExcludes = append(processedExcludes, iofsRelativePath)
+ }
+ // TODO: we need to either document or change this behavior
+ // Excludes operate on entire folders, so we also exclude everything under this in case it represents a directory
+ processedExcludes = append(processedExcludes, filepath.Join(iofsRelativePath, "**"))
+ }
+
+ // We start from a naive includePattern
+ includePattern := ""
+ includeCount := len(processedIncludes)
+
+ // Do not use alternation if unnecessary.
+ if includeCount == 1 {
+ includePattern = processedIncludes[0]
+ } else if includeCount > 1 {
+ // We use alternation from the very root of the path. This avoids fs.Stat of the basePath.
+ includePattern = "{" + strings.Join(processedIncludes, ",") + "}"
+ }
+
+ // We start with an empty string excludePattern which we only use if excludeCount > 0.
+ excludePattern := ""
+ excludeCount := len(processedExcludes)
+
+ // Do not use alternation if unnecessary.
+ if excludeCount == 1 {
+ excludePattern = processedExcludes[0]
+ } else if excludeCount > 1 {
+ // We use alternation from the very root of the path. This avoids fs.Stat of the basePath.
+ excludePattern = "{" + strings.Join(processedExcludes, ",") + "}"
+ }
+
+ // GlobWalk expects that everything uses Unix path conventions.
+ includePattern = filepath.ToSlash(includePattern)
+ excludePattern = filepath.ToSlash(excludePattern)
+
+ err := doublestar.GlobWalk(fsys, includePattern, func(path string, dirEntry iofs.DirEntry) error {
+ if !includeDirs && dirEntry.IsDir() {
+ return nil
+ }
+
+ // All files that are returned by doublestar.GlobWalk are relative to
+ // the fsys root. Go, however, has decided that `fs.FS` filesystems do
+ // not address the root of the file system using `/` and instead use
+ // paths without leading separators.
+ //
+ // We need to track where the `fsys` root is so that when we hand paths back
+ // we hand them back as the path addressable in the actual OS filesystem.
+ //
+ // As a consequence, when processing, we need to *restore* the original
+ // root to the file path after returning. This works because when we create
+ // the `os.dirFS` filesystem we do so at the root of the current volume.
+ if excludeCount == 0 {
+ // Reconstruct via string concatenation since the root is already pre-composed.
+ result.Add(fsysRoot + path)
+ return nil
+ }
+
+ isExcluded, err := doublestar.Match(excludePattern, filepath.ToSlash(path))
+ if err != nil {
+ return err
+ }
+
+ if !isExcluded {
+ // Reconstruct via string concatenation since the root is already pre-composed.
+ result.Add(fsysRoot + path)
+ }
+
+ return nil
+ })
+
+ // GlobWalk threw an error.
+ if err != nil {
+ return nil, err
+ }
+
+ // Never actually capture the root folder.
+ // This is a risk because of how we rework the globs.
+ result.Delete(strings.TrimSuffix(basePath, "/"))
+
+ return result.UnsafeListOfStrings(), nil
+}
diff --git a/cli/internal/globby/globby_test.go b/cli/internal/globby/globby_test.go
new file mode 100644
index 0000000..2fdd613
--- /dev/null
+++ b/cli/internal/globby/globby_test.go
@@ -0,0 +1,832 @@
+package globby
+
+import (
+ "io/fs"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "testing"
+
+ "testing/fstest"
+)
+
+// setup prepares the test file system contents and returns the file system.
+func setup(fsysRoot string, files []string) fs.FS {
+ fsys := fstest.MapFS{}
+ for _, file := range files {
+ // We're populating a `fs.FS` filesytem which requires paths to have no
+ // leading slash. As a consequence we strip it during creation.
+ iofsRelativePath := file[1:]
+
+ fsys[iofsRelativePath] = &fstest.MapFile{Mode: 0666}
+ }
+
+ return fsys
+}
+
+func TestGlobFilesFs(t *testing.T) {
+ type args struct {
+ basePath string
+ includePatterns []string
+ excludePatterns []string
+ }
+ tests := []struct {
+ name string
+ files []string
+ args args
+ wantAll []string
+ wantFiles []string
+ wantErr bool
+ }{
+ {
+ name: "hello world",
+ files: []string{"/test.txt"},
+ args: args{
+ basePath: "/",
+ includePatterns: []string{"*.txt"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{"/test.txt"},
+ wantFiles: []string{"/test.txt"},
+ },
+ {
+ name: "bullet files",
+ files: []string{
+ "/test.txt",
+ "/subdir/test.txt",
+ "/other/test.txt",
+ },
+ args: args{
+ basePath: "/",
+ includePatterns: []string{"subdir/test.txt", "test.txt"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/subdir/test.txt",
+ "/test.txt",
+ },
+ wantFiles: []string{
+ "/subdir/test.txt",
+ "/test.txt",
+ },
+ },
+ {
+ name: "finding workspace package.json files",
+ files: []string{
+ "/external/file.txt",
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/bower_components/readline/package.json",
+ "/repos/some-app/examples/package.json",
+ "/repos/some-app/node_modules/gulp/bower_components/readline/package.json",
+ "/repos/some-app/node_modules/react/package.json",
+ "/repos/some-app/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/test/mocks/kitchen-sink/package.json",
+ "/repos/some-app/tests/mocks/kitchen-sink/package.json",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"packages/*/package.json", "apps/*/package.json"},
+ excludePatterns: []string{"**/node_modules/", "**/bower_components/", "**/test/", "**/tests/"},
+ },
+ wantAll: []string{
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ },
+ wantFiles: []string{
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ },
+ },
+ {
+ name: "excludes unexpected workspace package.json files",
+ files: []string{
+ "/external/file.txt",
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/bower_components/readline/package.json",
+ "/repos/some-app/examples/package.json",
+ "/repos/some-app/node_modules/gulp/bower_components/readline/package.json",
+ "/repos/some-app/node_modules/react/package.json",
+ "/repos/some-app/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/test/mocks/spanish-inquisition/package.json",
+ "/repos/some-app/tests/mocks/spanish-inquisition/package.json",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**/package.json"},
+ excludePatterns: []string{"**/node_modules/", "**/bower_components/", "**/test/", "**/tests/"},
+ },
+ wantAll: []string{
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/examples/package.json",
+ "/repos/some-app/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ },
+ wantFiles: []string{
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/examples/package.json",
+ "/repos/some-app/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ },
+ },
+ {
+ name: "nested packages work",
+ files: []string{
+ "/external/file.txt",
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/bower_components/readline/package.json",
+ "/repos/some-app/examples/package.json",
+ "/repos/some-app/node_modules/gulp/bower_components/readline/package.json",
+ "/repos/some-app/node_modules/react/package.json",
+ "/repos/some-app/package.json",
+ "/repos/some-app/packages/xzibit/package.json",
+ "/repos/some-app/packages/xzibit/node_modules/street-legal/package.json",
+ "/repos/some-app/packages/xzibit/node_modules/paint-colors/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/node_modules/meme/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/node_modules/yo-dawg/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/test/mocks/spanish-inquisition/package.json",
+ "/repos/some-app/tests/mocks/spanish-inquisition/package.json",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"packages/**/package.json"},
+ excludePatterns: []string{"**/node_modules/", "**/bower_components/", "**/test/", "**/tests/"},
+ },
+ wantAll: []string{
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/packages/xzibit/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/package.json",
+ },
+ wantFiles: []string{
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/packages/xzibit/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/package.json",
+ },
+ },
+ {
+ name: "includes do not override excludes",
+ files: []string{
+ "/external/file.txt",
+ "/repos/some-app/apps/docs/package.json",
+ "/repos/some-app/apps/web/package.json",
+ "/repos/some-app/bower_components/readline/package.json",
+ "/repos/some-app/examples/package.json",
+ "/repos/some-app/node_modules/gulp/bower_components/readline/package.json",
+ "/repos/some-app/node_modules/react/package.json",
+ "/repos/some-app/package.json",
+ "/repos/some-app/packages/xzibit/package.json",
+ "/repos/some-app/packages/xzibit/node_modules/street-legal/package.json",
+ "/repos/some-app/packages/xzibit/node_modules/paint-colors/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/node_modules/meme/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/node_modules/yo-dawg/package.json",
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/test/mocks/spanish-inquisition/package.json",
+ "/repos/some-app/tests/mocks/spanish-inquisition/package.json",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"packages/**/package.json", "tests/mocks/*/package.json"},
+ excludePatterns: []string{"**/node_modules/", "**/bower_components/", "**/test/", "**/tests/"},
+ },
+ wantAll: []string{
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/packages/xzibit/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/package.json",
+ },
+ wantFiles: []string{
+ "/repos/some-app/packages/colors/package.json",
+ "/repos/some-app/packages/faker/package.json",
+ "/repos/some-app/packages/left-pad/package.json",
+ "/repos/some-app/packages/xzibit/package.json",
+ "/repos/some-app/packages/xzibit/packages/yo-dawg/package.json",
+ },
+ },
+ {
+ name: "output globbing grabs the desired content",
+ files: []string{
+ "/external/file.txt",
+ "/repos/some-app/src/index.js",
+ "/repos/some-app/public/src/css/index.css",
+ "/repos/some-app/.turbo/turbo-build.log",
+ "/repos/some-app/.turbo/somebody-touched-this-file-into-existence.txt",
+ "/repos/some-app/.next/log.txt",
+ "/repos/some-app/.next/cache/db6a76a62043520e7aaadd0bb2104e78.txt",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ "/repos/some-app/public/dist/css/index.css",
+ "/repos/some-app/public/dist/images/rick_astley.jpg",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{".turbo/turbo-build.log", "dist/**", ".next/**", "public/dist/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/.next",
+ "/repos/some-app/.next/cache",
+ "/repos/some-app/.next/cache/db6a76a62043520e7aaadd0bb2104e78.txt",
+ "/repos/some-app/.next/log.txt",
+ "/repos/some-app/.turbo/turbo-build.log",
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ "/repos/some-app/public/dist",
+ "/repos/some-app/public/dist/css",
+ "/repos/some-app/public/dist/css/index.css",
+ "/repos/some-app/public/dist/images",
+ "/repos/some-app/public/dist/images/rick_astley.jpg",
+ },
+ wantFiles: []string{
+ "/repos/some-app/.next/cache/db6a76a62043520e7aaadd0bb2104e78.txt",
+ "/repos/some-app/.next/log.txt",
+ "/repos/some-app/.turbo/turbo-build.log",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ "/repos/some-app/public/dist/css/index.css",
+ "/repos/some-app/public/dist/images/rick_astley.jpg",
+ },
+ },
+ {
+ name: "passing ** captures all children",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"dist/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ },
+ {
+ name: "passing just a directory captures no children",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"dist"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{"/repos/some-app/dist"},
+ wantFiles: []string{},
+ },
+ {
+ name: "redundant includes do not duplicate",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**/*", "dist/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ },
+ {
+ name: "exclude everything, include everything",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"**"},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ },
+ {
+ name: "passing just a directory to exclude prevents capture of children",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"dist/**"},
+ excludePatterns: []string{"dist/js"},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ },
+ },
+ {
+ name: "passing ** to exclude prevents capture of children",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"dist/**"},
+ excludePatterns: []string{"dist/js/**"},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ },
+ },
+ {
+ name: "exclude everything with folder . applies at base path",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"./"},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ },
+ {
+ name: "exclude everything with traversal applies at a non-base path",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"./dist"},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ },
+ {
+ name: "exclude everything with folder traversal (..) applies at base path",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"dist/../"},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ },
+ {
+ name: "how do globs even work bad glob microformat",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**/**/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ },
+ {
+ name: "directory traversal stops at base path",
+ files: []string{
+ "/repos/spanish-inquisition/index.html",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"../spanish-inquisition/**", "dist/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ wantErr: true,
+ },
+ {
+ name: "globs and traversal and globs do not cross base path",
+ files: []string{
+ "/repos/spanish-inquisition/index.html",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**/../../spanish-inquisition/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ wantErr: true,
+ },
+ {
+ name: "traversal works within base path",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"dist/js/../**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ },
+ {
+ name: "self-references (.) work",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"dist/./././**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ },
+ {
+ name: "depth of 1 includes handles folders properly",
+ files: []string{
+ "/repos/some-app/package.json",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"*"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/package.json",
+ },
+ wantFiles: []string{"/repos/some-app/package.json"},
+ },
+ {
+ name: "depth of 1 excludes prevents capturing folders",
+ files: []string{
+ "/repos/some-app/package.json",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app/",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"dist/*"},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/package.json",
+ },
+ wantFiles: []string{"/repos/some-app/package.json"},
+ },
+ {
+ name: "No-trailing slash basePath works",
+ files: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"dist/**"},
+ excludePatterns: []string{},
+ },
+ wantAll: []string{
+ "/repos/some-app/dist",
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ wantFiles: []string{
+ "/repos/some-app/dist/index.html",
+ "/repos/some-app/dist/js/index.js",
+ "/repos/some-app/dist/js/lib.js",
+ "/repos/some-app/dist/js/node_modules/browserify.js",
+ },
+ },
+ {
+ name: "exclude single file",
+ files: []string{
+ "/repos/some-app/included.txt",
+ "/repos/some-app/excluded.txt",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"*.txt"},
+ excludePatterns: []string{"excluded.txt"},
+ },
+ wantAll: []string{
+ "/repos/some-app/included.txt",
+ },
+ wantFiles: []string{
+ "/repos/some-app/included.txt",
+ },
+ },
+ {
+ name: "exclude nested single file",
+ files: []string{
+ "/repos/some-app/one/included.txt",
+ "/repos/some-app/one/two/included.txt",
+ "/repos/some-app/one/two/three/included.txt",
+ "/repos/some-app/one/excluded.txt",
+ "/repos/some-app/one/two/excluded.txt",
+ "/repos/some-app/one/two/three/excluded.txt",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"**/excluded.txt"},
+ },
+ wantAll: []string{
+ "/repos/some-app/one/included.txt",
+ "/repos/some-app/one/two/included.txt",
+ "/repos/some-app/one/two/three/included.txt",
+ "/repos/some-app/one",
+ "/repos/some-app/one/two",
+ "/repos/some-app/one/two/three",
+ },
+ wantFiles: []string{
+ "/repos/some-app/one/included.txt",
+ "/repos/some-app/one/two/included.txt",
+ "/repos/some-app/one/two/three/included.txt",
+ },
+ },
+ {
+ name: "exclude everything",
+ files: []string{
+ "/repos/some-app/one/included.txt",
+ "/repos/some-app/one/two/included.txt",
+ "/repos/some-app/one/two/three/included.txt",
+ "/repos/some-app/one/excluded.txt",
+ "/repos/some-app/one/two/excluded.txt",
+ "/repos/some-app/one/two/three/excluded.txt",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"**"},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ },
+ {
+ name: "exclude everything with slash",
+ files: []string{
+ "/repos/some-app/one/included.txt",
+ "/repos/some-app/one/two/included.txt",
+ "/repos/some-app/one/two/three/included.txt",
+ "/repos/some-app/one/excluded.txt",
+ "/repos/some-app/one/two/excluded.txt",
+ "/repos/some-app/one/two/three/excluded.txt",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"**/"},
+ },
+ wantAll: []string{},
+ wantFiles: []string{},
+ },
+ {
+ name: "exclude everything with leading **",
+ files: []string{
+ "/repos/some-app/foo/bar",
+ "/repos/some-app/some-foo",
+ "/repos/some-app/some-foo/bar",
+ "/repos/some-app/included",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"**foo"},
+ },
+ wantAll: []string{
+ "/repos/some-app/included",
+ },
+ wantFiles: []string{
+ "/repos/some-app/included",
+ },
+ },
+ {
+ name: "exclude everything with trailing **",
+ files: []string{
+ "/repos/some-app/foo/bar",
+ "/repos/some-app/foo-file",
+ "/repos/some-app/foo-dir/bar",
+ "/repos/some-app/included",
+ },
+ args: args{
+ basePath: "/repos/some-app",
+ includePatterns: []string{"**"},
+ excludePatterns: []string{"foo**"},
+ },
+ wantAll: []string{
+ "/repos/some-app/included",
+ },
+ wantFiles: []string{
+ "/repos/some-app/included",
+ },
+ },
+ }
+ for _, tt := range tests {
+ fsysRoot := "/"
+ fsys := setup(fsysRoot, tt.files)
+
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := globFilesFs(fsys, fsysRoot, tt.args.basePath, tt.args.includePatterns, tt.args.excludePatterns)
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("globFilesFs() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ gotToSlash := make([]string, len(got))
+ for index, path := range got {
+ gotToSlash[index] = filepath.ToSlash(path)
+ }
+
+ sort.Strings(gotToSlash)
+
+ if !reflect.DeepEqual(gotToSlash, tt.wantFiles) {
+ t.Errorf("globFilesFs() = %v, want %v", gotToSlash, tt.wantFiles)
+ }
+ })
+
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := globAllFs(fsys, fsysRoot, tt.args.basePath, tt.args.includePatterns, tt.args.excludePatterns)
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("globAllFs() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ gotToSlash := make([]string, len(got))
+ for index, path := range got {
+ gotToSlash[index] = filepath.ToSlash(path)
+ }
+
+ sort.Strings(gotToSlash)
+ sort.Strings(tt.wantAll)
+
+ if !reflect.DeepEqual(gotToSlash, tt.wantAll) {
+ t.Errorf("globAllFs() = %v, want %v", gotToSlash, tt.wantAll)
+ }
+ })
+ }
+}
diff --git a/cli/internal/globwatcher/globwatcher.go b/cli/internal/globwatcher/globwatcher.go
new file mode 100644
index 0000000..9226cfa
--- /dev/null
+++ b/cli/internal/globwatcher/globwatcher.go
@@ -0,0 +1,210 @@
+package globwatcher
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "sync"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/filewatcher"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// ErrClosed is returned when attempting to get changed globs after glob watching has closed
+var ErrClosed = errors.New("glob watching is closed")
+
+type globs struct {
+ Inclusions util.Set
+ Exclusions util.Set
+}
+
+// GlobWatcher is used to track unchanged globs by hash. Once a glob registers a file change
+// it is no longer tracked until a new hash requests it. Once all globs for a particular hash
+// have changed, that hash is no longer tracked.
+type GlobWatcher struct {
+ logger hclog.Logger
+ repoRoot turbopath.AbsoluteSystemPath
+ cookieWaiter filewatcher.CookieWaiter
+
+ mu sync.RWMutex // protects field below
+ hashGlobs map[string]globs
+ globStatus map[string]util.Set // glob -> hashes where this glob hasn't changed
+
+ closed bool
+}
+
+// New returns a new GlobWatcher instance
+func New(logger hclog.Logger, repoRoot turbopath.AbsoluteSystemPath, cookieWaiter filewatcher.CookieWaiter) *GlobWatcher {
+ return &GlobWatcher{
+ logger: logger,
+ repoRoot: repoRoot,
+ cookieWaiter: cookieWaiter,
+ hashGlobs: make(map[string]globs),
+ globStatus: make(map[string]util.Set),
+ }
+}
+
+func (g *GlobWatcher) setClosed() {
+ g.mu.Lock()
+ g.closed = true
+ g.mu.Unlock()
+}
+
+func (g *GlobWatcher) isClosed() bool {
+ g.mu.RLock()
+ defer g.mu.RUnlock()
+ return g.closed
+}
+
+// WatchGlobs registers the given set of globs to be watched for changes and grouped
+// under the given hash. This method pairs with GetChangedGlobs to determine which globs
+// out of a set of candidates have changed since WatchGlobs was called for the same hash.
+func (g *GlobWatcher) WatchGlobs(hash string, globsToWatch fs.TaskOutputs) error {
+ if g.isClosed() {
+ return ErrClosed
+ }
+ // Wait for a cookie here
+ // that will ensure that we have seen all filesystem writes
+ // *by the calling client*. Other tasks _could_ write to the
+ // same output directories, however we are relying on task
+ // execution dependencies to prevent that.
+ if err := g.cookieWaiter.WaitForCookie(); err != nil {
+ return err
+ }
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ g.hashGlobs[hash] = globs{
+ Inclusions: util.SetFromStrings(globsToWatch.Inclusions),
+ Exclusions: util.SetFromStrings(globsToWatch.Exclusions),
+ }
+
+ for _, glob := range globsToWatch.Inclusions {
+ existing, ok := g.globStatus[glob]
+ if !ok {
+ existing = make(util.Set)
+ }
+ existing.Add(hash)
+ g.globStatus[glob] = existing
+ }
+ return nil
+}
+
+// GetChangedGlobs returns the subset of the given candidates that we are not currently
+// tracking as "unchanged".
+func (g *GlobWatcher) GetChangedGlobs(hash string, candidates []string) ([]string, error) {
+ if g.isClosed() {
+ // If filewatching has crashed, return all candidates as changed.
+ return candidates, nil
+ }
+ // Wait for a cookie here
+ // that will ensure that we have seen all filesystem writes
+ // *by the calling client*. Other tasks _could_ write to the
+ // same output directories, however we are relying on task
+ // execution dependencies to prevent that.
+ if err := g.cookieWaiter.WaitForCookie(); err != nil {
+ return nil, err
+ }
+ // hashGlobs tracks all of the unchanged globs for a given hash
+ // If hashGlobs doesn't have our hash, either everything has changed,
+ // or we were never tracking it. Either way, consider all the candidates
+ // to be changed globs.
+ g.mu.RLock()
+ defer g.mu.RUnlock()
+ globsToCheck, ok := g.hashGlobs[hash]
+ if !ok {
+ return candidates, nil
+ }
+ allGlobs := util.SetFromStrings(candidates)
+ diff := allGlobs.Difference(globsToCheck.Inclusions)
+
+ return diff.UnsafeListOfStrings(), nil
+}
+
+// OnFileWatchEvent implements FileWatchClient.OnFileWatchEvent
+// On a file change, check if we have a glob that matches this file. Invalidate
+// any matching globs, and remove them from the set of unchanged globs for the corresponding
+// hashes. If this is the last glob for a hash, remove the hash from being tracked.
+func (g *GlobWatcher) OnFileWatchEvent(ev filewatcher.Event) {
+ // At this point, we don't care what the Op is, any Op represents a change
+ // that should invalidate matching globs
+ g.logger.Trace(fmt.Sprintf("Got fsnotify event %v", ev))
+ absolutePath := ev.Path
+ repoRelativePath, err := g.repoRoot.RelativePathString(absolutePath.ToStringDuringMigration())
+ if err != nil {
+ g.logger.Debug(fmt.Sprintf("could not get relative path from %v to %v: %v", g.repoRoot, absolutePath, err))
+ return
+ }
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ for glob, hashStatus := range g.globStatus {
+ matches, err := doublestar.Match(glob, filepath.ToSlash(repoRelativePath))
+ if err != nil {
+ g.logger.Error(fmt.Sprintf("failed to check path %v against glob %v: %v", repoRelativePath, glob, err))
+ continue
+ }
+ // If this glob matches, we know that it has changed for every hash that included this glob
+ // and is not excluded by a hash's exclusion globs.
+ // So, we can delete this glob from every hash tracking it as well as stop watching this glob.
+ // To stop watching, we unref each of the directories corresponding to this glob.
+ if matches {
+ for hashUntyped := range hashStatus {
+ hash := hashUntyped.(string)
+ hashGlobs, ok := g.hashGlobs[hash]
+
+ if !ok {
+ g.logger.Warn(fmt.Sprintf("failed to find hash %v referenced from glob %v", hash, glob))
+ continue
+ }
+
+ isExcluded := false
+ // Check if we've excluded this path by going through exclusion globs
+ for exclusionGlob := range hashGlobs.Exclusions {
+ matches, err := doublestar.Match(exclusionGlob.(string), filepath.ToSlash(repoRelativePath))
+ if err != nil {
+ g.logger.Error(fmt.Sprintf("failed to check path %v against glob %v: %v", repoRelativePath, glob, err))
+ continue
+ }
+
+ if matches {
+ isExcluded = true
+ break
+ }
+ }
+
+ // If we have excluded this path, then we skip it
+ if isExcluded {
+ continue
+ }
+
+ // We delete hash from the globStatus entry
+ g.globStatus[glob].Delete(hash)
+
+ // If we've deleted the last hash for a glob in globStatus, delete the whole glob entry
+ if len(g.globStatus[glob]) == 0 {
+ delete(g.globStatus, glob)
+ }
+
+ hashGlobs.Inclusions.Delete(glob)
+ // If we've deleted the last glob for a hash, delete the whole hash entry
+ if hashGlobs.Inclusions.Len() == 0 {
+ delete(g.hashGlobs, hash)
+ }
+ }
+ }
+ }
+}
+
+// OnFileWatchError implements FileWatchClient.OnFileWatchError
+func (g *GlobWatcher) OnFileWatchError(err error) {
+ g.logger.Error(fmt.Sprintf("file watching received an error: %v", err))
+}
+
+// OnFileWatchClosed implements FileWatchClient.OnFileWatchClosed
+func (g *GlobWatcher) OnFileWatchClosed() {
+ g.setClosed()
+ g.logger.Warn("GlobWatching is closing due to file watching closing")
+}
diff --git a/cli/internal/globwatcher/globwatcher_test.go b/cli/internal/globwatcher/globwatcher_test.go
new file mode 100644
index 0000000..6fb89a7
--- /dev/null
+++ b/cli/internal/globwatcher/globwatcher_test.go
@@ -0,0 +1,232 @@
+package globwatcher
+
+import (
+ "testing"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/vercel/turbo/cli/internal/filewatcher"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func setup(t *testing.T, repoRoot turbopath.AbsoluteSystemPath) {
+ // Directory layout:
+ // <repoRoot>/
+ // my-pkg/
+ // irrelevant
+ // dist/
+ // dist-file
+ // distChild/
+ // child-file
+ // .next/
+ // next-file
+ distPath := repoRoot.UntypedJoin("my-pkg", "dist")
+ childFilePath := distPath.UntypedJoin("distChild", "child-file")
+ err := childFilePath.EnsureDir()
+ assert.NilError(t, err, "EnsureDir")
+ f, err := childFilePath.Create()
+ assert.NilError(t, err, "Create")
+ err = f.Close()
+ assert.NilError(t, err, "Close")
+ distFilePath := repoRoot.UntypedJoin("my-pkg", "dist", "dist-file")
+ f, err = distFilePath.Create()
+ assert.NilError(t, err, "Create")
+ err = f.Close()
+ assert.NilError(t, err, "Close")
+ nextFilePath := repoRoot.UntypedJoin("my-pkg", ".next", "next-file")
+ err = nextFilePath.EnsureDir()
+ assert.NilError(t, err, "EnsureDir")
+ f, err = nextFilePath.Create()
+ assert.NilError(t, err, "Create")
+ err = f.Close()
+ assert.NilError(t, err, "Close")
+ irrelevantPath := repoRoot.UntypedJoin("my-pkg", "irrelevant")
+ f, err = irrelevantPath.Create()
+ assert.NilError(t, err, "Create")
+ err = f.Close()
+ assert.NilError(t, err, "Close")
+}
+
+type noopCookieWaiter struct{}
+
+func (*noopCookieWaiter) WaitForCookie() error {
+ return nil
+}
+
+var _noopCookieWaiter = &noopCookieWaiter{}
+
+func TestTrackOutputs(t *testing.T) {
+ logger := hclog.Default()
+
+ repoRootRaw := t.TempDir()
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(repoRootRaw)
+
+ setup(t, repoRoot)
+
+ globWatcher := New(logger, repoRoot, _noopCookieWaiter)
+
+ globs := fs.TaskOutputs{
+ Inclusions: []string{
+ "my-pkg/dist/**",
+ "my-pkg/.next/**",
+ },
+ Exclusions: []string{"my-pkg/.next/cache/**"},
+ }
+
+ hash := "the-hash"
+ err := globWatcher.WatchGlobs(hash, globs)
+ assert.NilError(t, err, "WatchGlobs")
+
+ changed, err := globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 0, len(changed), "Expected no changed paths")
+
+ // Make an irrelevant change
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", "irrelevant"),
+ })
+
+ changed, err = globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 0, len(changed), "Expected no changed paths")
+
+ // Make an excluded change
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.Join("my-pkg", ".next", "cache", "foo"),
+ })
+
+ changed, err = globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 0, len(changed), "Expected no changed paths")
+
+ // Make a relevant change
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", "dist", "foo"),
+ })
+
+ changed, err = globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 1, len(changed), "Expected one changed path remaining")
+ expected := "my-pkg/dist/**"
+ assert.Equal(t, expected, changed[0], "Expected dist glob to have changed")
+
+ // Change a file matching the other glob
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", ".next", "foo"),
+ })
+ // We should no longer be watching anything, since both globs have
+ // registered changes
+ if len(globWatcher.hashGlobs) != 0 {
+ t.Errorf("expected to not track any hashes, found %v", globWatcher.hashGlobs)
+ }
+
+ // Both globs have changed, we should have stopped tracking
+ // this hash
+ changed, err = globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.DeepEqual(t, globs.Inclusions, changed)
+}
+
+func TestTrackMultipleHashes(t *testing.T) {
+ logger := hclog.Default()
+
+ repoRootRaw := t.TempDir()
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(repoRootRaw)
+
+ setup(t, repoRoot)
+
+ globWatcher := New(logger, repoRoot, _noopCookieWaiter)
+
+ globs := fs.TaskOutputs{
+ Inclusions: []string{
+ "my-pkg/dist/**",
+ "my-pkg/.next/**",
+ },
+ }
+
+ hash := "the-hash"
+ err := globWatcher.WatchGlobs(hash, globs)
+ assert.NilError(t, err, "WatchGlobs")
+
+ secondGlobs := fs.TaskOutputs{
+ Inclusions: []string{
+ "my-pkg/.next/**",
+ },
+ Exclusions: []string{"my-pkg/.next/cache/**"},
+ }
+
+ secondHash := "the-second-hash"
+ err = globWatcher.WatchGlobs(secondHash, secondGlobs)
+ assert.NilError(t, err, "WatchGlobs")
+
+ changed, err := globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 0, len(changed), "Expected no changed paths")
+
+ changed, err = globWatcher.GetChangedGlobs(secondHash, secondGlobs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 0, len(changed), "Expected no changed paths")
+
+ // Make a change that is excluded in one of the hashes but not in the other
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", ".next", "cache", "foo"),
+ })
+
+ changed, err = globWatcher.GetChangedGlobs(hash, globs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 1, len(changed), "Expected one changed path remaining")
+
+ changed, err = globWatcher.GetChangedGlobs(secondHash, secondGlobs.Inclusions)
+ assert.NilError(t, err, "GetChangedGlobs")
+ assert.Equal(t, 0, len(changed), "Expected no changed paths")
+
+ assert.Equal(t, 1, len(globWatcher.globStatus["my-pkg/.next/**"]), "Expected to be still watching `my-pkg/.next/**`")
+
+ // Make a change for secondHash
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", ".next", "bar"),
+ })
+
+ assert.Equal(t, 0, len(globWatcher.globStatus["my-pkg/.next/**"]), "Expected to be no longer watching `my-pkg/.next/**`")
+}
+
+func TestWatchSingleFile(t *testing.T) {
+ logger := hclog.Default()
+
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+
+ setup(t, repoRoot)
+
+ //watcher := newTestWatcher()
+ globWatcher := New(logger, repoRoot, _noopCookieWaiter)
+ globs := fs.TaskOutputs{
+ Inclusions: []string{"my-pkg/.next/next-file"},
+ Exclusions: []string{},
+ }
+ hash := "the-hash"
+ err := globWatcher.WatchGlobs(hash, globs)
+ assert.NilError(t, err, "WatchGlobs")
+
+ assert.Equal(t, 1, len(globWatcher.hashGlobs))
+
+ // A change to an irrelevant file
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", ".next", "foo"),
+ })
+ assert.Equal(t, 1, len(globWatcher.hashGlobs))
+
+ // Change the watched file
+ globWatcher.OnFileWatchEvent(filewatcher.Event{
+ EventType: filewatcher.FileAdded,
+ Path: repoRoot.UntypedJoin("my-pkg", ".next", "next-file"),
+ })
+ assert.Equal(t, 0, len(globWatcher.hashGlobs))
+}
diff --git a/cli/internal/graph/graph.go b/cli/internal/graph/graph.go
new file mode 100644
index 0000000..480dec9
--- /dev/null
+++ b/cli/internal/graph/graph.go
@@ -0,0 +1,274 @@
+// Package graph contains the CompleteGraph struct and some methods around it
+package graph
+
+import (
+ gocontext "context"
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/env"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/nodes"
+ "github.com/vercel/turbo/cli/internal/runsummary"
+ "github.com/vercel/turbo/cli/internal/taskhash"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+// CompleteGraph represents the common state inferred from the filesystem and pipeline.
+// It is not intended to include information specific to a particular run.
+type CompleteGraph struct {
+ // WorkspaceGraph expresses the dependencies between packages
+ WorkspaceGraph dag.AcyclicGraph
+
+ // Pipeline is config from turbo.json
+ Pipeline fs.Pipeline
+
+ // WorkspaceInfos stores the package.json contents by package name
+ WorkspaceInfos workspace.Catalog
+
+ // GlobalHash is the hash of all global dependencies
+ GlobalHash string
+
+ RootNode string
+
+ // Map of TaskDefinitions by taskID
+ TaskDefinitions map[string]*fs.TaskDefinition
+ RepoRoot turbopath.AbsoluteSystemPath
+
+ TaskHashTracker *taskhash.Tracker
+}
+
+// GetPackageTaskVisitor wraps a `visitor` function that is used for walking the TaskGraph
+// during execution (or dry-runs). The function returned here does not execute any tasks itself,
+// but it helps curry some data from the Complete Graph and pass it into the visitor function.
+func (g *CompleteGraph) GetPackageTaskVisitor(
+ ctx gocontext.Context,
+ taskGraph *dag.AcyclicGraph,
+ globalEnvMode util.EnvMode,
+ getArgs func(taskID string) []string,
+ logger hclog.Logger,
+ execFunc func(ctx gocontext.Context, packageTask *nodes.PackageTask, taskSummary *runsummary.TaskSummary) error,
+) func(taskID string) error {
+ return func(taskID string) error {
+ packageName, taskName := util.GetPackageTaskFromId(taskID)
+ pkg, ok := g.WorkspaceInfos.PackageJSONs[packageName]
+ if !ok {
+ return fmt.Errorf("cannot find package %v for task %v", packageName, taskID)
+ }
+
+ // Check for root task
+ var command string
+ if cmd, ok := pkg.Scripts[taskName]; ok {
+ command = cmd
+ }
+
+ if packageName == util.RootPkgName && commandLooksLikeTurbo(command) {
+ return fmt.Errorf("root task %v (%v) looks like it invokes turbo and might cause a loop", taskName, command)
+ }
+
+ taskDefinition, ok := g.TaskDefinitions[taskID]
+ if !ok {
+ return fmt.Errorf("Could not find definition for task")
+ }
+
+ // Task env mode is only independent when global env mode is `infer`.
+ taskEnvMode := globalEnvMode
+ useOldTaskHashable := false
+ if taskEnvMode == util.Infer {
+ if taskDefinition.PassthroughEnv != nil {
+ taskEnvMode = util.Strict
+ } else {
+ // If we're in infer mode we have just detected non-usage of strict env vars.
+ // Since we haven't stabilized this we don't want to break their cache.
+ useOldTaskHashable = true
+
+ // But our old behavior's actual meaning of this state is `loose`.
+ taskEnvMode = util.Loose
+ }
+ }
+
+ // TODO: maybe we can remove this PackageTask struct at some point
+ packageTask := &nodes.PackageTask{
+ TaskID: taskID,
+ Task: taskName,
+ PackageName: packageName,
+ Pkg: pkg,
+ EnvMode: taskEnvMode,
+ Dir: pkg.Dir.ToString(),
+ TaskDefinition: taskDefinition,
+ Outputs: taskDefinition.Outputs.Inclusions,
+ ExcludedOutputs: taskDefinition.Outputs.Exclusions,
+ }
+
+ passThruArgs := getArgs(taskName)
+ hash, err := g.TaskHashTracker.CalculateTaskHash(
+ packageTask,
+ taskGraph.DownEdges(taskID),
+ logger,
+ passThruArgs,
+ useOldTaskHashable,
+ )
+
+ // Not being able to construct the task hash is a hard error
+ if err != nil {
+ return fmt.Errorf("Hashing error: %v", err)
+ }
+
+ pkgDir := pkg.Dir
+ packageTask.Hash = hash
+ envVars := g.TaskHashTracker.GetEnvVars(taskID)
+ expandedInputs := g.TaskHashTracker.GetExpandedInputs(packageTask)
+ framework := g.TaskHashTracker.GetFramework(taskID)
+
+ logFile := repoRelativeLogFile(pkgDir, taskName)
+ packageTask.LogFile = logFile
+ packageTask.Command = command
+
+ var envVarPassthroughMap env.EnvironmentVariableMap
+ if taskDefinition.PassthroughEnv != nil {
+ if envVarPassthroughDetailedMap, err := env.GetHashableEnvVars(taskDefinition.PassthroughEnv, nil, ""); err == nil {
+ envVarPassthroughMap = envVarPassthroughDetailedMap.BySource.Explicit
+ }
+ }
+
+ summary := &runsummary.TaskSummary{
+ TaskID: taskID,
+ Task: taskName,
+ Hash: hash,
+ Package: packageName,
+ Dir: pkgDir.ToString(),
+ Outputs: taskDefinition.Outputs.Inclusions,
+ ExcludedOutputs: taskDefinition.Outputs.Exclusions,
+ LogFile: logFile,
+ ResolvedTaskDefinition: taskDefinition,
+ ExpandedInputs: expandedInputs,
+ ExpandedOutputs: []turbopath.AnchoredSystemPath{},
+ Command: command,
+ CommandArguments: passThruArgs,
+ Framework: framework,
+ EnvMode: taskEnvMode,
+ EnvVars: runsummary.TaskEnvVarSummary{
+ Configured: envVars.BySource.Explicit.ToSecretHashable(),
+ Inferred: envVars.BySource.Matching.ToSecretHashable(),
+ Passthrough: envVarPassthroughMap.ToSecretHashable(),
+ },
+ ExternalDepsHash: pkg.ExternalDepsHash,
+ }
+
+ if ancestors, err := g.getTaskGraphAncestors(taskGraph, packageTask.TaskID); err == nil {
+ summary.Dependencies = ancestors
+ }
+ if descendents, err := g.getTaskGraphDescendants(taskGraph, packageTask.TaskID); err == nil {
+ summary.Dependents = descendents
+ }
+
+ return execFunc(ctx, packageTask, summary)
+ }
+}
+
+// GetPipelineFromWorkspace returns the Unmarshaled fs.Pipeline struct from turbo.json in the given workspace.
+func (g *CompleteGraph) GetPipelineFromWorkspace(workspaceName string, isSinglePackage bool) (fs.Pipeline, error) {
+ turboConfig, err := g.GetTurboConfigFromWorkspace(workspaceName, isSinglePackage)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return turboConfig.Pipeline, nil
+}
+
+// GetTurboConfigFromWorkspace returns the Unmarshaled fs.TurboJSON from turbo.json in the given workspace.
+func (g *CompleteGraph) GetTurboConfigFromWorkspace(workspaceName string, isSinglePackage bool) (*fs.TurboJSON, error) {
+ cachedTurboConfig, ok := g.WorkspaceInfos.TurboConfigs[workspaceName]
+
+ if ok {
+ return cachedTurboConfig, nil
+ }
+
+ var workspacePackageJSON *fs.PackageJSON
+ if pkgJSON, err := g.GetPackageJSONFromWorkspace(workspaceName); err == nil {
+ workspacePackageJSON = pkgJSON
+ } else {
+ return nil, err
+ }
+
+ // Note: pkgJSON.Dir for the root workspace will be an empty string, and for
+ // other workspaces, it will be a relative path.
+ workspaceAbsolutePath := workspacePackageJSON.Dir.RestoreAnchor(g.RepoRoot)
+ turboConfig, err := fs.LoadTurboConfig(workspaceAbsolutePath, workspacePackageJSON, isSinglePackage)
+
+ // If we failed to load a TurboConfig, bubble up the error
+ if err != nil {
+ return nil, err
+ }
+
+ // add to cache
+ g.WorkspaceInfos.TurboConfigs[workspaceName] = turboConfig
+
+ return g.WorkspaceInfos.TurboConfigs[workspaceName], nil
+}
+
+// GetPackageJSONFromWorkspace returns an Unmarshaled struct of the package.json in the given workspace
+func (g *CompleteGraph) GetPackageJSONFromWorkspace(workspaceName string) (*fs.PackageJSON, error) {
+ if pkgJSON, ok := g.WorkspaceInfos.PackageJSONs[workspaceName]; ok {
+ return pkgJSON, nil
+ }
+
+ return nil, fmt.Errorf("No package.json for %s", workspaceName)
+}
+
+// repoRelativeLogFile returns the path to the log file for this task execution as a
+// relative path from the root of the monorepo.
+func repoRelativeLogFile(dir turbopath.AnchoredSystemPath, taskName string) string {
+ return filepath.Join(dir.ToStringDuringMigration(), ".turbo", fmt.Sprintf("turbo-%v.log", taskName))
+}
+
+// getTaskGraphAncestors gets all the ancestors for a given task in the graph.
+// "ancestors" are all tasks that the given task depends on.
+func (g *CompleteGraph) getTaskGraphAncestors(taskGraph *dag.AcyclicGraph, taskID string) ([]string, error) {
+ ancestors, err := taskGraph.Ancestors(taskID)
+ if err != nil {
+ return nil, err
+ }
+ stringAncestors := []string{}
+ for _, dep := range ancestors {
+ // Don't leak out internal root node name, which are just placeholders
+ if !strings.Contains(dep.(string), g.RootNode) {
+ stringAncestors = append(stringAncestors, dep.(string))
+ }
+ }
+
+ sort.Strings(stringAncestors)
+ return stringAncestors, nil
+}
+
+// getTaskGraphDescendants gets all the descendants for a given task in the graph.
+// "descendants" are all tasks that depend on the given taskID.
+func (g *CompleteGraph) getTaskGraphDescendants(taskGraph *dag.AcyclicGraph, taskID string) ([]string, error) {
+ descendents, err := taskGraph.Descendents(taskID)
+ if err != nil {
+ return nil, err
+ }
+ stringDescendents := []string{}
+ for _, dep := range descendents {
+ // Don't leak out internal root node name, which are just placeholders
+ if !strings.Contains(dep.(string), g.RootNode) {
+ stringDescendents = append(stringDescendents, dep.(string))
+ }
+ }
+ sort.Strings(stringDescendents)
+ return stringDescendents, nil
+}
+
+var _isTurbo = regexp.MustCompile(`(?:^|\s)turbo(?:$|\s)`)
+
+func commandLooksLikeTurbo(command string) bool {
+ return _isTurbo.MatchString(command)
+}
diff --git a/cli/internal/graph/graph_test.go b/cli/internal/graph/graph_test.go
new file mode 100644
index 0000000..9323e19
--- /dev/null
+++ b/cli/internal/graph/graph_test.go
@@ -0,0 +1,50 @@
+package graph
+
+import (
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func Test_CommandsInvokingTurbo(t *testing.T) {
+ type testCase struct {
+ command string
+ match bool
+ }
+ testCases := []testCase{
+ {
+ "turbo run foo",
+ true,
+ },
+ {
+ "rm -rf ~/Library/Caches/pnpm && turbo run foo && rm -rf ~/.npm",
+ true,
+ },
+ {
+ "FLAG=true turbo run foo",
+ true,
+ },
+ {
+ "npx turbo run foo",
+ true,
+ },
+ {
+ "echo starting; turbo foo; echo done",
+ true,
+ },
+ // We don't catch this as if people are going to try to invoke the turbo
+ // binary directly, they'll always be able to work around us.
+ {
+ "./node_modules/.bin/turbo foo",
+ false,
+ },
+ {
+ "rm -rf ~/Library/Caches/pnpm && rm -rf ~/Library/Caches/turbo && rm -rf ~/.npm && rm -rf ~/.pnpm-store && rm -rf ~/.turbo",
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ assert.Equal(t, commandLooksLikeTurbo(tc.command), tc.match, tc.command)
+ }
+}
diff --git a/cli/internal/graphvisualizer/graphvisualizer.go b/cli/internal/graphvisualizer/graphvisualizer.go
new file mode 100644
index 0000000..4e134b2
--- /dev/null
+++ b/cli/internal/graphvisualizer/graphvisualizer.go
@@ -0,0 +1,205 @@
+package graphvisualizer
+
+import (
+ "fmt"
+ "io"
+ "math/rand"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/mitchellh/cli"
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/util/browser"
+)
+
+// GraphVisualizer requirements
+type GraphVisualizer struct {
+ repoRoot turbopath.AbsoluteSystemPath
+ ui cli.Ui
+ TaskGraph *dag.AcyclicGraph
+}
+
+// hasGraphViz checks for the presence of https://graphviz.org/
+func hasGraphViz() bool {
+ err := exec.Command("dot", "-V").Run()
+ return err == nil
+}
+
+func getRandChar() string {
+ i := rand.Intn(25) + 65
+ return string(rune(i))
+}
+
+func getRandID() string {
+ return getRandChar() + getRandChar() + getRandChar() + getRandChar()
+}
+
+// New creates an instance of ColorCache with helpers for adding colors to task outputs
+func New(repoRoot turbopath.AbsoluteSystemPath, ui cli.Ui, TaskGraph *dag.AcyclicGraph) *GraphVisualizer {
+ return &GraphVisualizer{
+ repoRoot: repoRoot,
+ ui: ui,
+ TaskGraph: TaskGraph,
+ }
+}
+
+// Converts the TaskGraph dag into a string
+func (g *GraphVisualizer) generateDotString() string {
+ return string(g.TaskGraph.Dot(&dag.DotOpts{
+ Verbose: true,
+ DrawCycles: true,
+ }))
+}
+
+// Outputs a warning when a file was requested, but graphviz is not available
+func (g *GraphVisualizer) graphVizWarnUI() {
+ g.ui.Warn(color.New(color.FgYellow, color.Bold, color.ReverseVideo).Sprint(" WARNING ") + color.YellowString(" `turbo` uses Graphviz to generate an image of your\ngraph, but Graphviz isn't installed on this machine.\n\nYou can download Graphviz from https://graphviz.org/download.\n\nIn the meantime, you can use this string output with an\nonline Dot graph viewer."))
+}
+
+// RenderDotGraph renders a dot graph string for the current TaskGraph
+func (g *GraphVisualizer) RenderDotGraph() {
+ g.ui.Output("")
+ g.ui.Output(g.generateDotString())
+}
+
+type nameCache map[string]string
+
+func (nc nameCache) getName(in string) string {
+ if existing, ok := nc[in]; ok {
+ return existing
+ }
+ newName := getRandID()
+ nc[in] = newName
+ return newName
+}
+
+type sortableEdge dag.Edge
+type sortableEdges []sortableEdge
+
+// methods mostly copied from marshalEdges in the dag library
+func (e sortableEdges) Less(i, j int) bool {
+ iSrc := dag.VertexName(e[i].Source())
+ jSrc := dag.VertexName(e[j].Source())
+ if iSrc < jSrc {
+ return true
+ } else if iSrc > jSrc {
+ return false
+ }
+ return dag.VertexName(e[i].Target()) < dag.VertexName(e[j].Target())
+}
+func (e sortableEdges) Len() int { return len(e) }
+func (e sortableEdges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+
+func (g *GraphVisualizer) generateMermaid(out io.StringWriter) error {
+ if _, err := out.WriteString("graph TD\n"); err != nil {
+ return err
+ }
+ cache := make(nameCache)
+ // cast edges to our custom type so we can sort them
+ // this allows us to generate the same graph every time
+ var edges sortableEdges
+ for _, edge := range g.TaskGraph.Edges() {
+ edges = append(edges, sortableEdge(edge))
+ }
+ sort.Sort(edges)
+ for _, edge := range edges {
+ left := dag.VertexName(edge.Source())
+ right := dag.VertexName(edge.Target())
+ leftName := cache.getName(left)
+ rightName := cache.getName(right)
+ if _, err := out.WriteString(fmt.Sprintf("\t%v(\"%v\") --> %v(\"%v\")\n", leftName, left, rightName, right)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GenerateGraphFile saves a visualization of the TaskGraph to a file (or renders a DotGraph as a fallback))
+func (g *GraphVisualizer) GenerateGraphFile(outputName string) error {
+ outputFilename := g.repoRoot.UntypedJoin(outputName)
+ ext := outputFilename.Ext()
+ // use .jpg as default extension if none is provided
+ if ext == "" {
+ ext = ".jpg"
+ outputFilename = g.repoRoot.UntypedJoin(outputName + ext)
+ }
+ if ext == ".mermaid" {
+ f, err := outputFilename.Create()
+ if err != nil {
+ return fmt.Errorf("error creating file: %w", err)
+ }
+ defer util.CloseAndIgnoreError(f)
+ if err := g.generateMermaid(f); err != nil {
+ return err
+ }
+ g.ui.Output(fmt.Sprintf("✔ Generated task graph in %s", ui.Bold(outputFilename.ToString())))
+ return nil
+ }
+ graphString := g.generateDotString()
+ if ext == ".html" {
+ f, err := outputFilename.Create()
+ if err != nil {
+ return fmt.Errorf("error creating file: %w", err)
+ }
+ defer f.Close() //nolint errcheck
+ _, writeErr1 := f.WriteString(`<!DOCTYPE html>
+ <html>
+ <head>
+ <meta charset="utf-8">
+ <title>Graph</title>
+ </head>
+ <body>
+ <script src="https://cdn.jsdelivr.net/npm/viz.js@2.1.2-pre.1/viz.js"></script>
+ <script src="https://cdn.jsdelivr.net/npm/viz.js@2.1.2-pre.1/full.render.js"></script>
+ <script>`)
+ if writeErr1 != nil {
+ return fmt.Errorf("error writing graph contents: %w", writeErr1)
+ }
+
+ _, writeErr2 := f.WriteString("const s = `" + graphString + "`.replace(/\\_\\_\\_ROOT\\_\\_\\_/g, \"Root\").replace(/\\[root\\]/g, \"\");new Viz().renderSVGElement(s).then(el => document.body.appendChild(el)).catch(e => console.error(e));")
+ if writeErr2 != nil {
+ return fmt.Errorf("error creating file: %w", writeErr2)
+ }
+
+ _, writeErr3 := f.WriteString(`
+ </script>
+ </body>
+ </html>`)
+ if writeErr3 != nil {
+ return fmt.Errorf("error creating file: %w", writeErr3)
+ }
+
+ g.ui.Output("")
+ g.ui.Output(fmt.Sprintf("✔ Generated task graph in %s", ui.Bold(outputFilename.ToString())))
+ if ui.IsTTY {
+ if err := browser.OpenBrowser(outputFilename.ToString()); err != nil {
+ g.ui.Warn(color.New(color.FgYellow, color.Bold, color.ReverseVideo).Sprintf("failed to open browser. Please navigate to file://%v", filepath.ToSlash(outputFilename.ToString())))
+ }
+ }
+ return nil
+ }
+ hasDot := hasGraphViz()
+ if hasDot {
+ dotArgs := []string{"-T" + ext[1:], "-o", outputFilename.ToString()}
+ cmd := exec.Command("dot", dotArgs...)
+ cmd.Stdin = strings.NewReader(graphString)
+ if err := cmd.Run(); err != nil {
+ return fmt.Errorf("could not generate task graphfile %v: %w", outputFilename, err)
+ }
+ g.ui.Output("")
+ g.ui.Output(fmt.Sprintf("✔ Generated task graph in %s", ui.Bold(outputFilename.ToString())))
+
+ } else {
+ g.ui.Output("")
+ // User requested a file, but we're falling back to console here so warn about installing graphViz correctly
+ g.graphVizWarnUI()
+ g.RenderDotGraph()
+ }
+ return nil
+}
diff --git a/cli/internal/hashing/package_deps_hash.go b/cli/internal/hashing/package_deps_hash.go
new file mode 100644
index 0000000..517cddd
--- /dev/null
+++ b/cli/internal/hashing/package_deps_hash.go
@@ -0,0 +1,461 @@
+package hashing
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/encoding/gitoutput"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/globby"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// PackageDepsOptions are parameters for getting git hashes for a filesystem
+type PackageDepsOptions struct {
+ // PackagePath is the folder path to derive the package dependencies from. This is typically the folder
+ // containing package.json. If omitted, the default value is the current working directory.
+ PackagePath turbopath.AnchoredSystemPath
+
+ InputPatterns []string
+}
+
+// GetPackageDeps Builds an object containing git hashes for the files under the specified `packagePath` folder.
+func GetPackageDeps(rootPath turbopath.AbsoluteSystemPath, p *PackageDepsOptions) (map[turbopath.AnchoredUnixPath]string, error) {
+ pkgPath := rootPath.UntypedJoin(p.PackagePath.ToStringDuringMigration())
+ // Add all the checked in hashes.
+ var result map[turbopath.AnchoredUnixPath]string
+
+ // make a copy of the inputPatterns array, because we may be appending to it later.
+ calculatedInputs := make([]string, len(p.InputPatterns))
+ copy(calculatedInputs, p.InputPatterns)
+
+ if len(calculatedInputs) == 0 {
+ gitLsTreeOutput, err := gitLsTree(pkgPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not get git hashes for files in package %s: %w", p.PackagePath, err)
+ }
+ result = gitLsTreeOutput
+
+ // Update the checked in hashes with the current repo status
+ // The paths returned from this call are anchored at the package directory
+ gitStatusOutput, err := gitStatus(pkgPath, calculatedInputs)
+ if err != nil {
+ return nil, fmt.Errorf("Could not get git hashes from git status: %v", err)
+ }
+
+ var filesToHash []turbopath.AnchoredSystemPath
+ for filePath, status := range gitStatusOutput {
+ if status.isDelete() {
+ delete(result, filePath)
+ } else {
+ filesToHash = append(filesToHash, filePath.ToSystemPath())
+ }
+ }
+
+ hashes, err := gitHashObject(turbopath.AbsoluteSystemPathFromUpstream(pkgPath.ToString()), filesToHash)
+ if err != nil {
+ return nil, err
+ }
+
+ // Zip up file paths and hashes together
+ for filePath, hash := range hashes {
+ result[filePath] = hash
+ }
+ } else {
+ // Add in package.json and turbo.json to input patterns. Both file paths are relative to pkgPath
+ //
+ // - package.json is an input because if the `scripts` in
+ // the package.json change (i.e. the tasks that turbo executes), we want
+ // a cache miss, since any existing cache could be invalid.
+ // - turbo.json because it's the definition of the tasks themselves. The root turbo.json
+ // is similarly included in the global hash. This file may not exist in the workspace, but
+ // that is ok, because it will get ignored downstream.
+ calculatedInputs = append(calculatedInputs, "package.json")
+ calculatedInputs = append(calculatedInputs, "turbo.json")
+
+ // The input patterns are relative to the package.
+ // However, we need to change the globbing to be relative to the repo root.
+ // Prepend the package path to each of the input patterns.
+ prefixedInputPatterns := []string{}
+ prefixedExcludePatterns := []string{}
+ for _, pattern := range calculatedInputs {
+ if len(pattern) > 0 && pattern[0] == '!' {
+ rerooted, err := rootPath.PathTo(pkgPath.UntypedJoin(pattern[1:]))
+ if err != nil {
+ return nil, err
+ }
+ prefixedExcludePatterns = append(prefixedExcludePatterns, rerooted)
+ } else {
+ rerooted, err := rootPath.PathTo(pkgPath.UntypedJoin(pattern))
+ if err != nil {
+ return nil, err
+ }
+ prefixedInputPatterns = append(prefixedInputPatterns, rerooted)
+ }
+ }
+ absoluteFilesToHash, err := globby.GlobFiles(rootPath.ToStringDuringMigration(), prefixedInputPatterns, prefixedExcludePatterns)
+
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to resolve input globs %v", calculatedInputs)
+ }
+
+ filesToHash := make([]turbopath.AnchoredSystemPath, len(absoluteFilesToHash))
+ for i, rawPath := range absoluteFilesToHash {
+ relativePathString, err := pkgPath.RelativePathString(rawPath)
+
+ if err != nil {
+ return nil, errors.Wrapf(err, "not relative to package: %v", rawPath)
+ }
+
+ filesToHash[i] = turbopath.AnchoredSystemPathFromUpstream(relativePathString)
+ }
+
+ hashes, err := gitHashObject(turbopath.AbsoluteSystemPathFromUpstream(pkgPath.ToStringDuringMigration()), filesToHash)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed hashing resolved inputs globs")
+ }
+ result = hashes
+ // Note that in this scenario, we don't need to check git status, we're using hash-object directly which
+ // hashes the current state, not state at a commit
+ }
+
+ return result, nil
+}
+
+func manuallyHashFiles(rootPath turbopath.AbsoluteSystemPath, files []turbopath.AnchoredSystemPath) (map[turbopath.AnchoredUnixPath]string, error) {
+ hashObject := make(map[turbopath.AnchoredUnixPath]string)
+ for _, file := range files {
+ hash, err := fs.GitLikeHashFile(file.ToString())
+ if err != nil {
+ return nil, fmt.Errorf("could not hash file %v. \n%w", file.ToString(), err)
+ }
+
+ hashObject[file.ToUnixPath()] = hash
+ }
+ return hashObject, nil
+}
+
+// GetHashableDeps hashes the list of given files, then returns a map of normalized path to hash
+// this map is suitable for cross-platform caching.
+func GetHashableDeps(rootPath turbopath.AbsoluteSystemPath, files []turbopath.AbsoluteSystemPath) (map[turbopath.AnchoredUnixPath]string, error) {
+ output := make([]turbopath.AnchoredSystemPath, len(files))
+ convertedRootPath := turbopath.AbsoluteSystemPathFromUpstream(rootPath.ToString())
+
+ for index, file := range files {
+ anchoredSystemPath, err := file.RelativeTo(convertedRootPath)
+ if err != nil {
+ return nil, err
+ }
+ output[index] = anchoredSystemPath
+ }
+ hashObject, err := gitHashObject(convertedRootPath, output)
+ if err != nil {
+ manuallyHashedObject, err := manuallyHashFiles(convertedRootPath, output)
+ if err != nil {
+ return nil, err
+ }
+ hashObject = manuallyHashedObject
+ }
+
+ return hashObject, nil
+}
+
+// gitHashObject returns a map of paths to their SHA hashes calculated by passing the paths to `git hash-object`.
+// `git hash-object` expects paths to use Unix separators, even on Windows.
+//
+// Note: paths of files to hash passed to `git hash-object` are processed as relative to the given anchor.
+// For that reason we convert all input paths and make them relative to the anchor prior to passing them
+// to `git hash-object`.
+func gitHashObject(anchor turbopath.AbsoluteSystemPath, filesToHash []turbopath.AnchoredSystemPath) (map[turbopath.AnchoredUnixPath]string, error) {
+ fileCount := len(filesToHash)
+ output := make(map[turbopath.AnchoredUnixPath]string, fileCount)
+
+ if fileCount > 0 {
+ cmd := exec.Command(
+ "git", // Using `git` from $PATH,
+ "hash-object", // hash a file,
+ "--stdin-paths", // using a list of newline-separated paths from stdin.
+ )
+ cmd.Dir = anchor.ToString() // Start at this directory.
+
+ // The functionality for gitHashObject is different enough that it isn't reasonable to
+ // generalize the behavior for `runGitCmd`. In fact, it doesn't even use the `gitoutput`
+ // encoding library, instead relying on its own separate `bufio.Scanner`.
+
+ // We're going to send the list of files in via `stdin`, so we grab that pipe.
+ // This prevents a huge number of encoding issues and shell compatibility issues
+ // before they even start.
+ stdinPipe, stdinPipeError := cmd.StdinPipe()
+ if stdinPipeError != nil {
+ return nil, stdinPipeError
+ }
+
+ // Kick the processing off in a goroutine so while that is doing its thing we can go ahead
+ // and wire up the consumer of `stdout`.
+ go func() {
+ defer util.CloseAndIgnoreError(stdinPipe)
+
+ // `git hash-object` understands all relative paths to be relative to the repository.
+ // This function's result needs to be relative to `rootPath`.
+ // We convert all files to absolute paths and assume that they will be inside of the repository.
+ for _, file := range filesToHash {
+ converted := file.RestoreAnchor(anchor)
+
+ // `git hash-object` expects paths to use Unix separators, even on Windows.
+ // `git hash-object` expects paths to be one per line so we must escape newlines.
+ // In order to understand the escapes, the path must be quoted.
+ // In order to quote the path, the quotes in the path must be escaped.
+ // Other than that, we just write everything with full Unicode.
+ stringPath := converted.ToString()
+ toSlashed := filepath.ToSlash(stringPath)
+ escapedNewLines := strings.ReplaceAll(toSlashed, "\n", "\\n")
+ escapedQuotes := strings.ReplaceAll(escapedNewLines, "\"", "\\\"")
+ prepared := fmt.Sprintf("\"%s\"\n", escapedQuotes)
+ _, err := io.WriteString(stdinPipe, prepared)
+ if err != nil {
+ return
+ }
+ }
+ }()
+
+ // This gives us an io.ReadCloser so that we never have to read the entire input in
+ // at a single time. It is doing stream processing instead of string processing.
+ stdoutPipe, stdoutPipeError := cmd.StdoutPipe()
+ if stdoutPipeError != nil {
+ return nil, fmt.Errorf("failed to read `git hash-object`: %w", stdoutPipeError)
+ }
+
+ startError := cmd.Start()
+ if startError != nil {
+ return nil, fmt.Errorf("failed to read `git hash-object`: %w", startError)
+ }
+
+ // The output of `git hash-object` is a 40-character SHA per input, then a newline.
+ // We need to track the SHA that corresponds to the input file path.
+ index := 0
+ hashes := make([]string, len(filesToHash))
+ scanner := bufio.NewScanner(stdoutPipe)
+
+ // Read the output line-by-line (which is our separator) until exhausted.
+ for scanner.Scan() {
+ bytes := scanner.Bytes()
+
+ scanError := scanner.Err()
+ if scanError != nil {
+ return nil, fmt.Errorf("failed to read `git hash-object`: %w", scanError)
+ }
+
+ hashError := gitoutput.CheckObjectName(bytes)
+ if hashError != nil {
+ return nil, fmt.Errorf("failed to read `git hash-object`: %s", "invalid hash received")
+ }
+
+ // Worked, save it off.
+ hashes[index] = string(bytes)
+ index++
+ }
+
+ // Waits until stdout is closed before proceeding.
+ waitErr := cmd.Wait()
+ if waitErr != nil {
+ return nil, fmt.Errorf("failed to read `git hash-object`: %w", waitErr)
+ }
+
+ // Make sure we end up with a matching number of files and hashes.
+ hashCount := len(hashes)
+ if fileCount != hashCount {
+ return nil, fmt.Errorf("failed to read `git hash-object`: %d files %d hashes", fileCount, hashCount)
+ }
+
+ // The API of this method specifies that we return a `map[turbopath.AnchoredUnixPath]string`.
+ for i, hash := range hashes {
+ filePath := filesToHash[i]
+ output[filePath.ToUnixPath()] = hash
+ }
+ }
+
+ return output, nil
+}
+
+// runGitCommand provides boilerplate command handling for `ls-tree`, `ls-files`, and `status`
+// Rather than doing string processing, it does stream processing of `stdout`.
+func runGitCommand(cmd *exec.Cmd, commandName string, handler func(io.Reader) *gitoutput.Reader) ([][]string, error) {
+ stdoutPipe, pipeError := cmd.StdoutPipe()
+ if pipeError != nil {
+ return nil, fmt.Errorf("failed to read `git %s`: %w", commandName, pipeError)
+ }
+
+ startError := cmd.Start()
+ if startError != nil {
+ return nil, fmt.Errorf("failed to read `git %s`: %w", commandName, startError)
+ }
+
+ reader := handler(stdoutPipe)
+ entries, readErr := reader.ReadAll()
+ if readErr != nil {
+ return nil, fmt.Errorf("failed to read `git %s`: %w", commandName, readErr)
+ }
+
+ waitErr := cmd.Wait()
+ if waitErr != nil {
+ return nil, fmt.Errorf("failed to read `git %s`: %w", commandName, waitErr)
+ }
+
+ return entries, nil
+}
+
+// gitLsTree returns a map of paths to their SHA hashes starting at a particular directory
+// that are present in the `git` index at a particular revision.
+func gitLsTree(rootPath turbopath.AbsoluteSystemPath) (map[turbopath.AnchoredUnixPath]string, error) {
+ cmd := exec.Command(
+ "git", // Using `git` from $PATH,
+ "ls-tree", // list the contents of the git index,
+ "-r", // recursively,
+ "-z", // with each file path relative to the invocation directory and \000-terminated,
+ "HEAD", // at this specified version.
+ )
+ cmd.Dir = rootPath.ToString() // Include files only from this directory.
+
+ entries, err := runGitCommand(cmd, "ls-tree", gitoutput.NewLSTreeReader)
+ if err != nil {
+ return nil, err
+ }
+
+ output := make(map[turbopath.AnchoredUnixPath]string, len(entries))
+
+ for _, entry := range entries {
+ lsTreeEntry := gitoutput.LsTreeEntry(entry)
+ output[turbopath.AnchoredUnixPathFromUpstream(lsTreeEntry.GetField(gitoutput.Path))] = lsTreeEntry[2]
+ }
+
+ return output, nil
+}
+
+// getTraversePath gets the distance of the current working directory to the repository root.
+// This is used to convert repo-relative paths to cwd-relative paths.
+//
+// `git rev-parse --show-cdup` always returns Unix paths, even on Windows.
+func getTraversePath(rootPath turbopath.AbsoluteSystemPath) (turbopath.RelativeUnixPath, error) {
+ cmd := exec.Command("git", "rev-parse", "--show-cdup")
+ cmd.Dir = rootPath.ToString()
+
+ traversePath, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ trimmedTraversePath := strings.TrimSuffix(string(traversePath), "\n")
+
+ return turbopath.RelativeUnixPathFromUpstream(trimmedTraversePath), nil
+}
+
+// Don't shell out if we already know where you are in the repository.
+// `memoize` is a good candidate for generics.
+func memoizeGetTraversePath() func(turbopath.AbsoluteSystemPath) (turbopath.RelativeUnixPath, error) {
+ cacheMutex := &sync.RWMutex{}
+ cachedResult := map[turbopath.AbsoluteSystemPath]turbopath.RelativeUnixPath{}
+ cachedError := map[turbopath.AbsoluteSystemPath]error{}
+
+ return func(rootPath turbopath.AbsoluteSystemPath) (turbopath.RelativeUnixPath, error) {
+ cacheMutex.RLock()
+ result, resultExists := cachedResult[rootPath]
+ err, errExists := cachedError[rootPath]
+ cacheMutex.RUnlock()
+
+ if resultExists && errExists {
+ return result, err
+ }
+
+ invokedResult, invokedErr := getTraversePath(rootPath)
+ cacheMutex.Lock()
+ cachedResult[rootPath] = invokedResult
+ cachedError[rootPath] = invokedErr
+ cacheMutex.Unlock()
+
+ return invokedResult, invokedErr
+ }
+}
+
+var memoizedGetTraversePath = memoizeGetTraversePath()
+
+// statusCode represents the two-letter status code from `git status` with two "named" fields, x & y.
+// They have different meanings based upon the actual state of the working tree. Using x & y maps
+// to upstream behavior.
+type statusCode struct {
+ x string
+ y string
+}
+
+func (s statusCode) isDelete() bool {
+ return s.x == "D" || s.y == "D"
+}
+
+// gitStatus returns a map of paths to their `git` status code. This can be used to identify what should
+// be done with files that do not currently match what is in the index.
+//
+// Note: `git status -z`'s relative path results are relative to the repository's location.
+// We need to calculate where the repository's location is in order to determine what the full path is
+// before we can return those paths relative to the calling directory, normalizing to the behavior of
+// `ls-files` and `ls-tree`.
+func gitStatus(rootPath turbopath.AbsoluteSystemPath, patterns []string) (map[turbopath.AnchoredUnixPath]statusCode, error) {
+ cmd := exec.Command(
+ "git", // Using `git` from $PATH,
+ "status", // tell me about the status of the working tree,
+ "--untracked-files", // including information about untracked files,
+ "--no-renames", // do not detect renames,
+ "-z", // with each file path relative to the repository root and \000-terminated,
+ "--", // and any additional argument you see is a path, promise.
+ )
+ if len(patterns) == 0 {
+ cmd.Args = append(cmd.Args, ".") // Operate in the current directory instead of the root of the working tree.
+ } else {
+ // FIXME: Globbing is using `git`'s globbing rules which are not consistent with `doublestar``.
+ cmd.Args = append(cmd.Args, patterns...) // Pass in input patterns as arguments.
+ }
+ cmd.Dir = rootPath.ToString() // Include files only from this directory.
+
+ entries, err := runGitCommand(cmd, "status", gitoutput.NewStatusReader)
+ if err != nil {
+ return nil, err
+ }
+
+ output := make(map[turbopath.AnchoredUnixPath]statusCode, len(entries))
+ convertedRootPath := turbopath.AbsoluteSystemPathFromUpstream(rootPath.ToString())
+
+ traversePath, err := memoizedGetTraversePath(convertedRootPath)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, entry := range entries {
+ statusEntry := gitoutput.StatusEntry(entry)
+ // Anchored at repository.
+ pathFromStatus := turbopath.AnchoredUnixPathFromUpstream(statusEntry.GetField(gitoutput.Path))
+ var outputPath turbopath.AnchoredUnixPath
+
+ if len(traversePath) > 0 {
+ repositoryPath := convertedRootPath.Join(traversePath.ToSystemPath())
+ fileFullPath := pathFromStatus.ToSystemPath().RestoreAnchor(repositoryPath)
+
+ relativePath, err := fileFullPath.RelativeTo(convertedRootPath)
+ if err != nil {
+ return nil, err
+ }
+
+ outputPath = relativePath.ToUnixPath()
+ } else {
+ outputPath = pathFromStatus
+ }
+
+ output[outputPath] = statusCode{x: statusEntry.GetField(gitoutput.StatusX), y: statusEntry.GetField(gitoutput.StatusY)}
+ }
+
+ return output, nil
+}
diff --git a/cli/internal/hashing/package_deps_hash_test.go b/cli/internal/hashing/package_deps_hash_test.go
new file mode 100644
index 0000000..8f68d38
--- /dev/null
+++ b/cli/internal/hashing/package_deps_hash_test.go
@@ -0,0 +1,386 @@
+package hashing
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func getFixture(id int) turbopath.AbsoluteSystemPath {
+ cwd, _ := os.Getwd()
+ root := turbopath.AbsoluteSystemPath(filepath.VolumeName(cwd) + string(os.PathSeparator))
+ checking := turbopath.AbsoluteSystemPath(cwd)
+
+ for checking != root {
+ fixtureDirectory := checking.Join("fixtures")
+ _, err := os.Stat(fixtureDirectory.ToString())
+ if !errors.Is(err, os.ErrNotExist) {
+ // Found the fixture directory!
+ files, _ := os.ReadDir(fixtureDirectory.ToString())
+
+ // Grab the specified fixture.
+ for _, file := range files {
+ fileName := turbopath.RelativeSystemPath(file.Name())
+ if strings.Index(fileName.ToString(), fmt.Sprintf("%02d-", id)) == 0 {
+ return turbopath.AbsoluteSystemPath(fixtureDirectory.Join(fileName))
+ }
+ }
+ }
+ checking = checking.Join("..")
+ }
+
+ panic("fixtures not found!")
+}
+
+func TestSpecialCharacters(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+
+ fixturePath := getFixture(1)
+ newlinePath := turbopath.AnchoredUnixPath("new\nline").ToSystemPath()
+ quotePath := turbopath.AnchoredUnixPath("\"quote\"").ToSystemPath()
+ newline := newlinePath.RestoreAnchor(fixturePath)
+ quote := quotePath.RestoreAnchor(fixturePath)
+
+ // Setup
+ one := os.WriteFile(newline.ToString(), []byte{}, 0644)
+ two := os.WriteFile(quote.ToString(), []byte{}, 0644)
+
+ // Cleanup
+ defer func() {
+ one := os.Remove(newline.ToString())
+ two := os.Remove(quote.ToString())
+
+ if one != nil || two != nil {
+ return
+ }
+ }()
+
+ // Setup error check
+ if one != nil || two != nil {
+ return
+ }
+
+ tests := []struct {
+ name string
+ rootPath turbopath.AbsoluteSystemPath
+ filesToHash []turbopath.AnchoredSystemPath
+ want map[turbopath.AnchoredUnixPath]string
+ wantErr bool
+ }{
+ {
+ name: "Quotes",
+ rootPath: fixturePath,
+ filesToHash: []turbopath.AnchoredSystemPath{
+ quotePath,
+ },
+ want: map[turbopath.AnchoredUnixPath]string{
+ quotePath.ToUnixPath(): "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ },
+ },
+ {
+ name: "Newlines",
+ rootPath: fixturePath,
+ filesToHash: []turbopath.AnchoredSystemPath{
+ newlinePath,
+ },
+ want: map[turbopath.AnchoredUnixPath]string{
+ newlinePath.ToUnixPath(): "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := gitHashObject(tt.rootPath, tt.filesToHash)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("gitHashObject() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("gitHashObject() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_gitHashObject(t *testing.T) {
+ fixturePath := getFixture(1)
+ traversePath, err := getTraversePath(fixturePath)
+ if err != nil {
+ return
+ }
+
+ tests := []struct {
+ name string
+ rootPath turbopath.AbsoluteSystemPath
+ filesToHash []turbopath.AnchoredSystemPath
+ want map[turbopath.AnchoredUnixPath]string
+ wantErr bool
+ }{
+ {
+ name: "No paths",
+ rootPath: fixturePath,
+ filesToHash: []turbopath.AnchoredSystemPath{},
+ want: map[turbopath.AnchoredUnixPath]string{},
+ },
+ {
+ name: "Absolute paths come back relative to rootPath",
+ rootPath: fixturePath.Join("child"),
+ filesToHash: []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("../root.json").ToSystemPath(),
+ turbopath.AnchoredUnixPath("child.json").ToSystemPath(),
+ turbopath.AnchoredUnixPath("grandchild/grandchild.json").ToSystemPath(),
+ },
+ want: map[turbopath.AnchoredUnixPath]string{
+ "../root.json": "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ "child.json": "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ "grandchild/grandchild.json": "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391",
+ },
+ },
+ {
+ name: "Traverse outside of the repo",
+ rootPath: fixturePath.Join(traversePath.ToSystemPath(), ".."),
+ filesToHash: []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("null.json").ToSystemPath(),
+ },
+ want: nil,
+ wantErr: true,
+ },
+ {
+ name: "Nonexistent file",
+ rootPath: fixturePath,
+ filesToHash: []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("nonexistent.json").ToSystemPath(),
+ },
+ want: nil,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := gitHashObject(tt.rootPath, tt.filesToHash)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("gitHashObject() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("gitHashObject() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_getTraversePath(t *testing.T) {
+ fixturePath := getFixture(1)
+
+ tests := []struct {
+ name string
+ rootPath turbopath.AbsoluteSystemPath
+ want turbopath.RelativeUnixPath
+ wantErr bool
+ }{
+ {
+ name: "From fixture location",
+ rootPath: fixturePath,
+ want: turbopath.RelativeUnixPath("../../../"),
+ wantErr: false,
+ },
+ {
+ name: "Traverse out of git repo",
+ rootPath: fixturePath.UntypedJoin("..", "..", "..", ".."),
+ want: "",
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := getTraversePath(tt.rootPath)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("getTraversePath() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("getTraversePath() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func requireGitCmd(t *testing.T, repoRoot turbopath.AbsoluteSystemPath, args ...string) {
+ t.Helper()
+ cmd := exec.Command("git", args...)
+ cmd.Dir = repoRoot.ToString()
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("git commit failed: %v %v", err, string(out))
+ }
+}
+
+func TestGetPackageDeps(t *testing.T) {
+ // Directory structure:
+ // <root>/
+ // new-root-file <- new file not added to git
+ // my-pkg/
+ // committed-file
+ // deleted-file
+ // uncommitted-file <- new file not added to git
+ // dir/
+ // nested-file
+
+ repoRoot := fs.AbsoluteSystemPathFromUpstream(t.TempDir())
+ myPkgDir := repoRoot.UntypedJoin("my-pkg")
+
+ // create the dir first
+ err := myPkgDir.MkdirAll(0775)
+ assert.NilError(t, err, "CreateDir")
+
+ // create file 1
+ committedFilePath := myPkgDir.UntypedJoin("committed-file")
+ err = committedFilePath.WriteFile([]byte("committed bytes"), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ // create file 2
+ deletedFilePath := myPkgDir.UntypedJoin("deleted-file")
+ err = deletedFilePath.WriteFile([]byte("delete-me"), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ // create file 3
+ nestedPath := myPkgDir.UntypedJoin("dir", "nested-file")
+ assert.NilError(t, nestedPath.EnsureDir(), "EnsureDir")
+ assert.NilError(t, nestedPath.WriteFile([]byte("nested"), 0644), "WriteFile")
+
+ // create a package.json
+ packageJSONPath := myPkgDir.UntypedJoin("package.json")
+ err = packageJSONPath.WriteFile([]byte("{}"), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ // set up git repo and commit all
+ requireGitCmd(t, repoRoot, "init", ".")
+ requireGitCmd(t, repoRoot, "config", "--local", "user.name", "test")
+ requireGitCmd(t, repoRoot, "config", "--local", "user.email", "test@example.com")
+ requireGitCmd(t, repoRoot, "add", ".")
+ requireGitCmd(t, repoRoot, "commit", "-m", "foo")
+
+ // remove a file
+ err = deletedFilePath.Remove()
+ assert.NilError(t, err, "Remove")
+
+ // create another untracked file in git
+ uncommittedFilePath := myPkgDir.UntypedJoin("uncommitted-file")
+ err = uncommittedFilePath.WriteFile([]byte("uncommitted bytes"), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ // create an untracked file in git up a level
+ rootFilePath := repoRoot.UntypedJoin("new-root-file")
+ err = rootFilePath.WriteFile([]byte("new-root bytes"), 0644)
+ assert.NilError(t, err, "WriteFile")
+
+ tests := []struct {
+ opts *PackageDepsOptions
+ expected map[turbopath.AnchoredUnixPath]string
+ }{
+ // base case. when inputs aren't specified, all files hashes are computed
+ {
+ opts: &PackageDepsOptions{
+ PackagePath: "my-pkg",
+ },
+ expected: map[turbopath.AnchoredUnixPath]string{
+ "committed-file": "3a29e62ea9ba15c4a4009d1f605d391cdd262033",
+ "uncommitted-file": "4e56ad89387e6379e4e91ddfe9872cf6a72c9976",
+ "package.json": "9e26dfeeb6e641a33dae4961196235bdb965b21b",
+ "dir/nested-file": "bfe53d766e64d78f80050b73cd1c88095bc70abb",
+ },
+ },
+ // with inputs, only the specified inputs are hashed
+ {
+ opts: &PackageDepsOptions{
+ PackagePath: "my-pkg",
+ InputPatterns: []string{"uncommitted-file"},
+ },
+ expected: map[turbopath.AnchoredUnixPath]string{
+ "package.json": "9e26dfeeb6e641a33dae4961196235bdb965b21b",
+ "uncommitted-file": "4e56ad89387e6379e4e91ddfe9872cf6a72c9976",
+ },
+ },
+ // inputs with glob pattern also works
+ {
+ opts: &PackageDepsOptions{
+ PackagePath: "my-pkg",
+ InputPatterns: []string{"**/*-file"},
+ },
+ expected: map[turbopath.AnchoredUnixPath]string{
+ "committed-file": "3a29e62ea9ba15c4a4009d1f605d391cdd262033",
+ "uncommitted-file": "4e56ad89387e6379e4e91ddfe9872cf6a72c9976",
+ "package.json": "9e26dfeeb6e641a33dae4961196235bdb965b21b",
+ "dir/nested-file": "bfe53d766e64d78f80050b73cd1c88095bc70abb",
+ },
+ },
+ // inputs with traversal work
+ {
+ opts: &PackageDepsOptions{
+ PackagePath: "my-pkg",
+ InputPatterns: []string{"../**/*-file"},
+ },
+ expected: map[turbopath.AnchoredUnixPath]string{
+ "../new-root-file": "8906ddcdd634706188bd8ef1c98ac07b9be3425e",
+ "committed-file": "3a29e62ea9ba15c4a4009d1f605d391cdd262033",
+ "uncommitted-file": "4e56ad89387e6379e4e91ddfe9872cf6a72c9976",
+ "package.json": "9e26dfeeb6e641a33dae4961196235bdb965b21b",
+ "dir/nested-file": "bfe53d766e64d78f80050b73cd1c88095bc70abb",
+ },
+ },
+ // inputs with another glob pattern works
+ {
+ opts: &PackageDepsOptions{
+ PackagePath: "my-pkg",
+ InputPatterns: []string{"**/{uncommitted,committed}-file"},
+ },
+ expected: map[turbopath.AnchoredUnixPath]string{
+ "committed-file": "3a29e62ea9ba15c4a4009d1f605d391cdd262033",
+ "package.json": "9e26dfeeb6e641a33dae4961196235bdb965b21b",
+ "uncommitted-file": "4e56ad89387e6379e4e91ddfe9872cf6a72c9976",
+ },
+ },
+ // inputs with another glob pattern + traversal work
+ {
+ opts: &PackageDepsOptions{
+ PackagePath: "my-pkg",
+ InputPatterns: []string{"../**/{new-root,uncommitted,committed}-file"},
+ },
+ expected: map[turbopath.AnchoredUnixPath]string{
+ "../new-root-file": "8906ddcdd634706188bd8ef1c98ac07b9be3425e",
+ "committed-file": "3a29e62ea9ba15c4a4009d1f605d391cdd262033",
+ "package.json": "9e26dfeeb6e641a33dae4961196235bdb965b21b",
+ "uncommitted-file": "4e56ad89387e6379e4e91ddfe9872cf6a72c9976",
+ },
+ },
+ }
+ for _, tt := range tests {
+ got, err := GetPackageDeps(repoRoot, tt.opts)
+ if err != nil {
+ t.Errorf("GetPackageDeps got error %v", err)
+ continue
+ }
+ assert.DeepEqual(t, got, tt.expected)
+ }
+}
+
+func Test_memoizedGetTraversePath(t *testing.T) {
+ fixturePath := getFixture(1)
+
+ gotOne, _ := memoizedGetTraversePath(fixturePath)
+ gotTwo, _ := memoizedGetTraversePath(fixturePath)
+
+ assert.Check(t, gotOne == gotTwo, "The strings are identical.")
+}
diff --git a/cli/internal/inference/inference.go b/cli/internal/inference/inference.go
new file mode 100644
index 0000000..5d6d34f
--- /dev/null
+++ b/cli/internal/inference/inference.go
@@ -0,0 +1,167 @@
+package inference
+
+import "github.com/vercel/turbo/cli/internal/fs"
+
+// Framework is an identifier for something that we wish to inference against.
+type Framework struct {
+ Slug string
+ EnvMatcher string
+ DependencyMatch matcher
+}
+
+type matcher struct {
+ strategy matchStrategy
+ dependencies []string
+}
+
+type matchStrategy int
+
+const (
+ all matchStrategy = iota + 1
+ some
+)
+
+var _frameworks = []Framework{
+ {
+ Slug: "blitzjs",
+ EnvMatcher: "^NEXT_PUBLIC_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"blitz"},
+ },
+ },
+ {
+ Slug: "nextjs",
+ EnvMatcher: "^NEXT_PUBLIC_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"next"},
+ },
+ },
+ {
+ Slug: "gatsby",
+ EnvMatcher: "^GATSBY_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"gatsby"},
+ },
+ },
+ {
+ Slug: "astro",
+ EnvMatcher: "^PUBLIC_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"astro"},
+ },
+ },
+ {
+ Slug: "solidstart",
+ EnvMatcher: "^VITE_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"solid-js", "solid-start"},
+ },
+ },
+ {
+ Slug: "vue",
+ EnvMatcher: "^VUE_APP_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"@vue/cli-service"},
+ },
+ },
+ {
+ Slug: "sveltekit",
+ EnvMatcher: "^VITE_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"@sveltejs/kit"},
+ },
+ },
+ {
+ Slug: "create-react-app",
+ EnvMatcher: "^REACT_APP_",
+ DependencyMatch: matcher{
+ strategy: some,
+ dependencies: []string{"react-scripts", "react-dev-utils"},
+ },
+ },
+ {
+ Slug: "nuxtjs",
+ EnvMatcher: "^NUXT_ENV_",
+ DependencyMatch: matcher{
+ strategy: some,
+ dependencies: []string{"nuxt", "nuxt-edge", "nuxt3", "nuxt3-edge"},
+ },
+ },
+ {
+ Slug: "redwoodjs",
+ EnvMatcher: "^REDWOOD_ENV_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"@redwoodjs/core"},
+ },
+ },
+ {
+ Slug: "vite",
+ EnvMatcher: "^VITE_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"vite"},
+ },
+ },
+ {
+ Slug: "sanity",
+ EnvMatcher: "^SANITY_STUDIO_",
+ DependencyMatch: matcher{
+ strategy: all,
+ dependencies: []string{"@sanity/cli"},
+ },
+ },
+}
+
+func (m matcher) match(pkg *fs.PackageJSON) bool {
+ deps := pkg.UnresolvedExternalDeps
+ // only check dependencies if we're in a non-monorepo
+ if pkg.Workspaces != nil && len(pkg.Workspaces) == 0 {
+ deps = pkg.Dependencies
+ }
+
+ if m.strategy == all {
+ for _, dependency := range m.dependencies {
+ _, exists := deps[dependency]
+ if !exists {
+ return false
+ }
+ }
+ return true
+ }
+
+ // m.strategy == some
+ for _, dependency := range m.dependencies {
+ _, exists := deps[dependency]
+ if exists {
+ return true
+ }
+ }
+ return false
+}
+
+func (f Framework) match(pkg *fs.PackageJSON) bool {
+ return f.DependencyMatch.match(pkg)
+}
+
+// InferFramework returns a reference to a matched framework
+func InferFramework(pkg *fs.PackageJSON) *Framework {
+ if pkg == nil {
+ return nil
+ }
+
+ for _, candidateFramework := range _frameworks {
+ if candidateFramework.match(pkg) {
+ return &candidateFramework
+ }
+ }
+
+ return nil
+}
diff --git a/cli/internal/inference/inference_test.go b/cli/internal/inference/inference_test.go
new file mode 100644
index 0000000..ed82ecc
--- /dev/null
+++ b/cli/internal/inference/inference_test.go
@@ -0,0 +1,97 @@
+package inference
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+)
+
+func getFrameworkBySlug(slug string) *Framework {
+ for _, framework := range _frameworks {
+ if framework.Slug == slug {
+ return &framework
+ }
+ }
+ panic("that framework doesn't exist")
+}
+
+func TestInferFramework(t *testing.T) {
+ tests := []struct {
+ name string
+ pkg *fs.PackageJSON
+ want *Framework
+ }{
+ {
+ name: "Hello world",
+ pkg: nil,
+ want: nil,
+ },
+ {
+ name: "Empty dependencies",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{}},
+ want: nil,
+ },
+ {
+ name: "Finds Blitz",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{
+ "blitz": "*",
+ }},
+ want: getFrameworkBySlug("blitzjs"),
+ },
+ {
+ name: "Order is preserved (returns blitz, not next)",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{
+ "blitz": "*",
+ "next": "*",
+ }},
+ want: getFrameworkBySlug("blitzjs"),
+ },
+ {
+ name: "Finds next without blitz",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{
+ "next": "*",
+ }},
+ want: getFrameworkBySlug("nextjs"),
+ },
+ {
+ name: "match strategy of all works (solid)",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{
+ "solid-js": "*",
+ "solid-start": "*",
+ }},
+ want: getFrameworkBySlug("solidstart"),
+ },
+ {
+ name: "match strategy of some works (nuxt)",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{
+ "nuxt3": "*",
+ }},
+ want: getFrameworkBySlug("nuxtjs"),
+ },
+ {
+ name: "match strategy of some works (c-r-a)",
+ pkg: &fs.PackageJSON{UnresolvedExternalDeps: map[string]string{
+ "react-scripts": "*",
+ }},
+ want: getFrameworkBySlug("create-react-app"),
+ },
+ {
+ name: "Finds next in non monorepo",
+ pkg: &fs.PackageJSON{
+ Dependencies: map[string]string{
+ "next": "*",
+ },
+ Workspaces: []string{},
+ },
+ want: getFrameworkBySlug("nextjs"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := InferFramework(tt.pkg); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("InferFramework() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/cli/internal/lockfile/berry_lockfile.go b/cli/internal/lockfile/berry_lockfile.go
new file mode 100644
index 0000000..e76f230
--- /dev/null
+++ b/cli/internal/lockfile/berry_lockfile.go
@@ -0,0 +1,709 @@
+package lockfile
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/Masterminds/semver"
+ "github.com/andybalholm/crlf"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/yaml"
+)
+
+var _multipleKeyRegex = regexp.MustCompile(" *, *")
+
+// A tag cannot start with a "v"
+var _tagRegex = regexp.MustCompile("^[a-zA-Z0-9.-_-[v]][a-zA-Z0-9._-]*$")
+
+var _metadataKey = "__metadata"
+
+type _void struct{}
+
+// BerryLockfileEntry package information from yarn lockfile
+// Full Definition at https://github.com/yarnpkg/berry/blob/master/packages/yarnpkg-core/sources/Manifest.ts
+// Only a subset of full definition are written to the lockfile
+type BerryLockfileEntry struct {
+ Version string `yaml:"version"`
+ LanguageName string `yaml:"languageName,omitempty"`
+
+ Dependencies map[string]string `yaml:"dependencies,omitempty"`
+ PeerDependencies map[string]string `yaml:"peerDependencies,omitempty"`
+
+ DependenciesMeta map[string]BerryDependencyMetaEntry `yaml:"dependenciesMeta,omitempty"`
+ PeerDependenciesMeta map[string]BerryDependencyMetaEntry `yaml:"peerDependenciesMeta,omitempty"`
+
+ Bin map[string]string `yaml:"bin,omitempty"`
+
+ LinkType string `yaml:"linkType,omitempty"`
+ Resolution string `yaml:"resolution,omitempty"`
+ Checksum string `yaml:"checksum,omitempty"`
+ Conditions string `yaml:"conditions,omitempty"`
+
+ // Only used for metadata entry
+ CacheKey string `yaml:"cacheKey,omitempty"`
+}
+
+// Return a list of descriptors that this entry possibly uses
+func (b *BerryLockfileEntry) possibleDescriptors() []_Descriptor {
+ descriptors := []_Descriptor{}
+ addDescriptor := func(name, version string) {
+ descriptors = append(descriptors, berryPossibleKeys(name, version)...)
+ }
+
+ for dep, version := range b.Dependencies {
+ addDescriptor(dep, version)
+ }
+
+ return descriptors
+}
+
+// BerryLockfile representation of berry lockfile
+type BerryLockfile struct {
+ packages map[_Locator]*BerryLockfileEntry
+ version int
+ cacheKey string
+ // Mapping descriptors (lodash@npm:^4.17.21) to their resolutions (lodash@npm:4.17.21)
+ descriptors map[_Descriptor]_Locator
+ // Mapping regular package locators to patched package locators
+ patches map[_Locator]_Locator
+ // Descriptors that are only used by package extensions
+ packageExtensions map[_Descriptor]_void
+ hasCRLF bool
+}
+
+// BerryDependencyMetaEntry Structure for holding if a package is optional or not
+type BerryDependencyMetaEntry struct {
+ Optional bool `yaml:"optional,omitempty"`
+ Unplugged bool `yaml:"unplugged,omitempty"`
+}
+
+var _ Lockfile = (*BerryLockfile)(nil)
+
+// ResolvePackage Given a package and version returns the key, resolved version, and if it was found
+func (l *BerryLockfile) ResolvePackage(_workspace turbopath.AnchoredUnixPath, name string, version string) (Package, error) {
+ for _, key := range berryPossibleKeys(name, version) {
+ if locator, ok := l.descriptors[key]; ok {
+ entry := l.packages[locator]
+ return Package{
+ Found: true,
+ Key: locator.String(),
+ Version: entry.Version,
+ }, nil
+ }
+ }
+
+ return Package{}, nil
+}
+
+// AllDependencies Given a lockfile key return all (dev/optional/peer) dependencies of that package
+func (l *BerryLockfile) AllDependencies(key string) (map[string]string, bool) {
+ deps := map[string]string{}
+ var locator _Locator
+ if err := locator.parseLocator(key); err != nil {
+ // We should never hit this as we have already vetted all entries in the lockfile
+ // during the creation of the lockfile struct
+ panic(fmt.Sprintf("invalid locator string: %s", key))
+ }
+ entry, ok := l.packages[locator]
+ if !ok {
+ return deps, false
+ }
+
+ for name, version := range entry.Dependencies {
+ deps[name] = version
+ }
+
+ return deps, true
+}
+
+// Subgraph Given a list of lockfile keys returns a Lockfile based off the original one that only contains the packages given
+func (l *BerryLockfile) Subgraph(workspacePackages []turbopath.AnchoredSystemPath, packages []string) (Lockfile, error) {
+ prunedPackages := make(map[_Locator]*BerryLockfileEntry, len(packages))
+ prunedDescriptors := make(map[_Descriptor]_Locator, len(prunedPackages))
+ patches := make(map[_Locator]_Locator, len(l.patches))
+ reverseLookup := l.locatorToDescriptors()
+
+ // add workspace package entries
+ for locator, pkg := range l.packages {
+ if locator.reference == "workspace:." {
+ prunedPackages[locator] = pkg
+ descriptor := _Descriptor{locator._Ident, locator.reference}
+ prunedDescriptors[descriptor] = locator
+ for desc := range reverseLookup[locator] {
+ prunedDescriptors[desc] = locator
+ }
+ }
+ }
+ for _, workspacePackage := range workspacePackages {
+ expectedReference := fmt.Sprintf("workspace:%s", workspacePackage.ToUnixPath().ToString())
+ for locator, pkg := range l.packages {
+ if locator.reference == expectedReference {
+ prunedPackages[locator] = pkg
+ descriptor := _Descriptor{locator._Ident, locator.reference}
+ prunedDescriptors[descriptor] = locator
+ }
+ }
+ }
+
+ for _, key := range packages {
+ var locator _Locator
+ if err := locator.parseLocator(key); err != nil {
+ // We should never hit this as we have already vetted all entries in the lockfile
+ // during the creation of the lockfile struct
+ panic(fmt.Sprintf("invalid locator string: %s", key))
+ }
+ entry, ok := l.packages[locator]
+ if ok {
+ prunedPackages[locator] = entry
+ }
+ // If a package has a patch it should be included in the subgraph
+ patchLocator, ok := l.patches[locator]
+ if ok {
+ patches[locator] = patchLocator
+ prunedPackages[patchLocator] = l.packages[patchLocator]
+ }
+ }
+
+ for _, entry := range prunedPackages {
+ for _, desc := range entry.possibleDescriptors() {
+ locator, ok := l.descriptors[desc]
+ if ok {
+ prunedDescriptors[desc] = locator
+ }
+ }
+ }
+
+ // For each patch we find all descriptors for the primary package and patched package
+ for primaryLocator, patchLocator := range patches {
+ primaryDescriptors := reverseLookup[primaryLocator]
+ patchDescriptors := reverseLookup[patchLocator]
+
+ // For each patch descriptor we extract the primary descriptor that each patch descriptor targets
+ // and check if that descriptor is present in the pruned map and add it if it is present
+ for patch := range patchDescriptors {
+ primaryVersion, _ := patch.primaryVersion()
+ primaryDescriptor := _Descriptor{patch._Ident, primaryVersion}
+ _, isPresent := primaryDescriptors[primaryDescriptor]
+ if !isPresent {
+ panic(fmt.Sprintf("Unable to find primary descriptor %s", &primaryDescriptor))
+ }
+
+ _, ok := prunedDescriptors[primaryDescriptor]
+ if ok {
+ if !ok {
+ panic(fmt.Sprintf("Unable to find patch for %s", &patchLocator))
+ }
+ prunedDescriptors[patch] = patchLocator
+ }
+ }
+ }
+
+ // Add any descriptors used by package extensions
+ for descriptor := range l.packageExtensions {
+ locator := l.descriptors[descriptor]
+ _, ok := prunedPackages[locator]
+ if ok {
+ prunedDescriptors[descriptor] = locator
+ }
+ }
+
+ // berry only includes a cache key in the lockfile if there are entries with a checksum
+ cacheKey := ""
+ for _, entry := range prunedPackages {
+ if entry.Checksum != "" {
+ cacheKey = l.cacheKey
+ break
+ }
+ }
+
+ return &BerryLockfile{
+ packages: prunedPackages,
+ version: l.version,
+ cacheKey: cacheKey,
+ descriptors: prunedDescriptors,
+ patches: patches,
+ packageExtensions: l.packageExtensions,
+ hasCRLF: l.hasCRLF,
+ }, nil
+}
+
+// Encode encode the lockfile representation and write it to the given writer
+func (l *BerryLockfile) Encode(w io.Writer) error {
+ // Map all resolved packages to the descriptors that match them
+ reverseLookup := l.locatorToDescriptors()
+
+ lockfile := make(map[string]*BerryLockfileEntry, len(l.packages))
+
+ lockfile[_metadataKey] = &BerryLockfileEntry{
+ Version: fmt.Sprintf("%d", l.version),
+ CacheKey: l.cacheKey,
+ }
+
+ for locator, descriptors := range reverseLookup {
+ sortedDescriptors := make([]string, len(descriptors))
+ i := 0
+ for descriptor := range descriptors {
+ sortedDescriptors[i] = descriptor.String()
+ i++
+ }
+ sort.Strings(sortedDescriptors)
+
+ key := strings.Join(sortedDescriptors, ", ")
+
+ entry, ok := l.packages[locator]
+ if !ok {
+ return fmt.Errorf("Unable to find entry for %s", &locator)
+ }
+
+ lockfile[key] = entry
+ }
+
+ if l.hasCRLF {
+ w = crlf.NewWriter(w)
+ }
+
+ _, err := io.WriteString(w, `# This file is generated by running "yarn install" inside your project.
+# Manual changes might be lost - proceed with caution!
+`)
+ if err != nil {
+ return errors.Wrap(err, "unable to write header to lockfile")
+ }
+
+ return _writeBerryLockfile(w, lockfile)
+}
+
+// Invert the descriptor to locator map
+func (l *BerryLockfile) locatorToDescriptors() map[_Locator]map[_Descriptor]_void {
+ reverseLookup := make(map[_Locator]map[_Descriptor]_void, len(l.packages))
+ for descriptor, locator := range l.descriptors {
+ descriptors, ok := reverseLookup[locator]
+ if !ok {
+ reverseLookup[locator] = map[_Descriptor]_void{descriptor: {}}
+ } else {
+ descriptors[descriptor] = _void{}
+ }
+ }
+
+ return reverseLookup
+}
+
+// Patches return a list of patches used in the lockfile
+func (l *BerryLockfile) Patches() []turbopath.AnchoredUnixPath {
+ patches := []turbopath.AnchoredUnixPath{}
+
+ for _, patchLocator := range l.patches {
+ patchPath, isPatch := patchLocator.patchPath()
+
+ if isPatch && !strings.HasPrefix(patchPath, "~") && !_builtinRegexp.MatchString(patchPath) {
+ patches = append(patches, turbopath.AnchoredUnixPath(patchPath))
+ }
+ }
+
+ if len(patches) == 0 {
+ return nil
+ }
+
+ return patches
+}
+
+// DecodeBerryLockfile Takes the contents of a berry lockfile and returns a struct representation
+func DecodeBerryLockfile(contents []byte) (*BerryLockfile, error) {
+ var packages map[string]*BerryLockfileEntry
+
+ hasCRLF := bytes.HasSuffix(contents, _crlfLiteral)
+ err := yaml.Unmarshal(contents, &packages)
+ if err != nil {
+ return &BerryLockfile{}, fmt.Errorf("could not unmarshal lockfile: %w", err)
+ }
+
+ metadata, ok := packages[_metadataKey]
+ if !ok {
+ return nil, errors.New("No __metadata entry found when decoding yarn.lock")
+ }
+ version, err := strconv.Atoi(metadata.Version)
+ if err != nil {
+ return nil, errors.Wrap(err, "yarn lockfile version isn't valid integer")
+ }
+ delete(packages, _metadataKey)
+
+ locatorToPackage := map[_Locator]*BerryLockfileEntry{}
+ descriptorToLocator := map[_Descriptor]_Locator{}
+ // A map from packages to their patch entries
+ patches := map[_Locator]_Locator{}
+
+ for key, data := range packages {
+ var locator _Locator
+ if err := locator.parseLocator(data.Resolution); err != nil {
+ return nil, errors.Wrap(err, "unable to parse entry")
+ }
+
+ if _, isPatch := locator.patchPath(); isPatch {
+ // A patch will have the same identifier and version allowing us to construct the non-patch entry
+ originalLocator := _Locator{locator._Ident, fmt.Sprintf("npm:%s", data.Version)}
+ patches[originalLocator] = locator
+ }
+
+ // Before storing cacheKey set it to "" so we know it's invalid
+ data.CacheKey = ""
+
+ locatorToPackage[locator] = data
+
+ // All descriptors that resolve to a single locator are grouped into a single key
+ for _, entry := range _multipleKeyRegex.Split(key, -1) {
+ descriptor := _Descriptor{}
+ if err := descriptor.parseDescriptor(entry); err != nil {
+ return nil, errors.Wrap(err, "Bad entry key found")
+ }
+
+ // Before lockfile version 6 descriptors could be missing the npm protocol
+ if version <= 6 && descriptor.versionRange != "*" {
+ _, err := semver.NewConstraint(descriptor.versionRange)
+ if err == nil || _tagRegex.MatchString(descriptor.versionRange) {
+ descriptor.versionRange = fmt.Sprintf("npm:%s", descriptor.versionRange)
+ }
+ }
+
+ descriptorToLocator[descriptor] = locator
+ }
+ }
+
+ // Build up list of all descriptors in the file
+ packageExtensions := make(map[_Descriptor]_void, len(descriptorToLocator))
+ for descriptor := range descriptorToLocator {
+ if descriptor.protocol() == "npm" {
+ packageExtensions[descriptor] = _void{}
+ }
+ }
+ // Remove any that are found in the lockfile entries
+ for _, entry := range packages {
+ for _, descriptor := range entry.possibleDescriptors() {
+ delete(packageExtensions, descriptor)
+ }
+ }
+
+ lockfile := BerryLockfile{
+ packages: locatorToPackage,
+ version: version,
+ cacheKey: metadata.CacheKey,
+ descriptors: descriptorToLocator,
+ patches: patches,
+ packageExtensions: packageExtensions,
+ hasCRLF: hasCRLF,
+ }
+ return &lockfile, nil
+}
+
+// GlobalChange checks if there are any differences between lockfiles that would completely invalidate
+// the cache.
+func (l *BerryLockfile) GlobalChange(other Lockfile) bool {
+ otherBerry, ok := other.(*BerryLockfile)
+ return !ok ||
+ l.cacheKey != otherBerry.cacheKey ||
+ l.version != otherBerry.version ||
+ // This is probably overly cautious, but getting it correct will be hard
+ !reflect.DeepEqual(l.patches, otherBerry.patches)
+}
+
+// Fields shared between _Locator and _Descriptor
+type _Ident struct {
+ // Scope of package without leading @
+ scope string
+ // Name of package
+ name string
+}
+
+type _Locator struct {
+ _Ident
+ // Resolved version e.g. 1.2.3
+ reference string
+}
+
+type _Descriptor struct {
+ _Ident
+ // Version range e.g. ^1.0.0
+ // Can be prefixed with the protocol e.g. npm, workspace, patch,
+ versionRange string
+}
+
+func (i _Ident) String() string {
+ if i.scope == "" {
+ return i.name
+ }
+ return fmt.Sprintf("@%s/%s", i.scope, i.name)
+}
+
+var _locatorRegexp = regexp.MustCompile("^(?:@([^/]+?)/)?([^/]+?)(?:@(.+))$")
+
+func (l *_Locator) parseLocator(data string) error {
+ matches := _locatorRegexp.FindStringSubmatch(data)
+ if len(matches) != 4 {
+ return fmt.Errorf("%s is not a valid locator string", data)
+ }
+ l.scope = matches[1]
+ l.name = matches[2]
+ l.reference = matches[3]
+
+ return nil
+}
+
+func (l *_Locator) String() string {
+ if l.scope == "" {
+ return fmt.Sprintf("%s@%s", l.name, l.reference)
+ }
+ return fmt.Sprintf("@%s/%s@%s", l.scope, l.name, l.reference)
+}
+
+var _builtinRegexp = regexp.MustCompile("^builtin<([^>]+)>$")
+
+func (l *_Locator) patchPath() (string, bool) {
+ if strings.HasPrefix(l.reference, "patch:") {
+ patchFileIndex := strings.Index(l.reference, "#")
+ paramIndex := strings.LastIndex(l.reference, "::")
+ if patchFileIndex == -1 || paramIndex == -1 {
+ // Better error handling
+ panic("Unable to extract patch file path from lockfile entry")
+ }
+ patchPath := strings.TrimPrefix(l.reference[patchFileIndex+1:paramIndex], "./")
+
+ return patchPath, true
+ }
+
+ return "", false
+}
+
+var _descriptorRegexp = regexp.MustCompile("^(?:@([^/]+?)/)?([^/]+?)(?:@(.+))?$")
+
+func (d *_Descriptor) parseDescriptor(data string) error {
+ matches := _descriptorRegexp.FindStringSubmatch(data)
+ if len(matches) != 4 {
+ return fmt.Errorf("%s is not a valid descriptor string", data)
+ }
+
+ d.scope = matches[1]
+ d.name = matches[2]
+ d.versionRange = matches[3]
+
+ return nil
+}
+
+// If the descriptor is for a patch it will return the primary descriptor that it patches
+func (d *_Descriptor) primaryVersion() (string, bool) {
+ if !strings.HasPrefix(d.versionRange, "patch:") {
+ return "", false
+ }
+ patchFileIndex := strings.Index(d.versionRange, "#")
+ versionRangeIndex := strings.Index(d.versionRange, "@")
+ if patchFileIndex < 0 || versionRangeIndex < 0 {
+ panic("Patch reference is missing required markers")
+ }
+ // The ':' following npm protocol gets encoded as '%3A' in the patch string
+ version := strings.Replace(d.versionRange[versionRangeIndex+1:patchFileIndex], "%3A", ":", 1)
+ if !strings.HasPrefix(version, "npm:") {
+ version = fmt.Sprintf("npm:%s", version)
+ }
+
+ return version, true
+}
+
+// Returns the protocol of the descriptor
+func (d *_Descriptor) protocol() string {
+ if index := strings.Index(d.versionRange, ":"); index > 0 {
+ return d.versionRange[:index]
+ }
+ return ""
+}
+
+func (d *_Descriptor) String() string {
+ if d.scope == "" {
+ return fmt.Sprintf("%s@%s", d.name, d.versionRange)
+ }
+ return fmt.Sprintf("@%s/%s@%s", d.scope, d.name, d.versionRange)
+}
+
+func berryPossibleKeys(name string, version string) []_Descriptor {
+ makeDescriptor := func(protocol string) _Descriptor {
+ descriptorString := fmt.Sprintf("%s@%s%s", name, protocol, version)
+ var descriptor _Descriptor
+ if err := descriptor.parseDescriptor(descriptorString); err != nil {
+ panic("Generated invalid descriptor")
+ }
+ return descriptor
+ }
+ return []_Descriptor{
+ makeDescriptor(""),
+ makeDescriptor("npm:"),
+ makeDescriptor("file:"),
+ makeDescriptor("workspace:"),
+ makeDescriptor("yarn:"),
+ }
+}
+
+func _writeBerryLockfile(w io.Writer, lockfile map[string]*BerryLockfileEntry) error {
+ keys := make([]string, len(lockfile))
+ i := 0
+ for key := range lockfile {
+ keys[i] = key
+ i++
+ }
+
+ // The __metadata key gets hoisted to the top
+ sort.Slice(keys, func(i, j int) bool {
+ if keys[i] == _metadataKey {
+ return true
+ } else if keys[j] == _metadataKey {
+ return false
+ }
+ return keys[i] < keys[j]
+ })
+
+ for _, key := range keys {
+ value, ok := lockfile[key]
+ if !ok {
+ panic(fmt.Sprintf("Unable to find entry for %s", key))
+ }
+
+ wrappedKey := _wrapString(key)
+ wrappedValue := _stringifyEntry(*value, 1)
+
+ var keyPart string
+ if len(wrappedKey) > 1024 {
+ keyPart = fmt.Sprintf("? %s\n:", keyPart)
+ } else {
+ keyPart = fmt.Sprintf("%s:", wrappedKey)
+ }
+
+ _, err := io.WriteString(w, fmt.Sprintf("\n%s\n%s\n", keyPart, wrappedValue))
+ if err != nil {
+ return errors.Wrap(err, "unable to write to lockfile")
+ }
+ }
+
+ return nil
+}
+
+var _simpleStringPattern = regexp.MustCompile("^[^-?:,\\][{}#&*!|>'\"%@` \t\r\n]([ \t]*[^,\\][{}:# \t\r\n])*$")
+
+func _wrapString(str string) string {
+ if !_simpleStringPattern.MatchString(str) {
+ var b bytes.Buffer
+ encoder := json.NewEncoder(&b)
+ encoder.SetEscapeHTML(false)
+ err := encoder.Encode(str)
+ if err != nil {
+ panic("Unexpected error wrapping key")
+ }
+
+ return strings.TrimRight(b.String(), "\n")
+ }
+ return str
+}
+
+func _stringifyEntry(entry BerryLockfileEntry, indentLevel int) string {
+ lines := []string{}
+ addLine := func(field, value string, inline bool) {
+ var line string
+ if inline {
+ line = fmt.Sprintf(" %s: %s", field, value)
+ } else {
+ line = fmt.Sprintf(" %s:\n%s", field, value)
+ }
+ lines = append(lines, line)
+ }
+
+ if entry.Version != "" {
+ addLine("version", _wrapString(entry.Version), true)
+ }
+ if entry.Resolution != "" {
+ addLine("resolution", _wrapString(entry.Resolution), true)
+ }
+ if len(entry.Dependencies) > 0 {
+ addLine("dependencies", _stringifyDeps(entry.Dependencies), false)
+ }
+ if len(entry.PeerDependencies) > 0 {
+ addLine("peerDependencies", _stringifyDeps(entry.PeerDependencies), false)
+ }
+ if len(entry.DependenciesMeta) > 0 {
+ addLine("dependenciesMeta", _stringifyDepsMeta(entry.DependenciesMeta), false)
+ }
+ if len(entry.PeerDependenciesMeta) > 0 {
+ addLine("peerDependenciesMeta", _stringifyDepsMeta(entry.PeerDependenciesMeta), false)
+ }
+
+ if len(entry.Bin) > 0 {
+ addLine("bin", _stringifyDeps(entry.Bin), false)
+ }
+ if entry.Checksum != "" {
+ addLine("checksum", _wrapString(entry.Checksum), true)
+ }
+ if entry.Conditions != "" {
+ addLine("conditions", _wrapString(entry.Conditions), true)
+ }
+ if entry.LanguageName != "" {
+ addLine("languageName", _wrapString(entry.LanguageName), true)
+ }
+ if entry.LinkType != "" {
+ addLine("linkType", _wrapString(entry.LinkType), true)
+ }
+ if entry.CacheKey != "" {
+ addLine("cacheKey", _wrapString(entry.CacheKey), true)
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func _stringifyDeps(deps map[string]string) string {
+ keys := make([]string, len(deps))
+ i := 0
+ for key := range deps {
+ keys[i] = key
+ i++
+ }
+ sort.Strings(keys)
+
+ lines := make([]string, 0, len(deps))
+ addLine := func(name, version string) {
+ lines = append(lines, fmt.Sprintf(" %s: %s", _wrapString(name), _wrapString(version)))
+ }
+
+ for _, name := range keys {
+ version := deps[name]
+ addLine(name, version)
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func _stringifyDepsMeta(meta map[string]BerryDependencyMetaEntry) string {
+ keys := make([]string, len(meta))
+ i := 0
+ for key := range meta {
+ keys[i] = key
+ i++
+ }
+ sort.Strings(keys)
+
+ lines := make([]string, 0, len(meta))
+ addLine := func(name string, key string) {
+ lines = append(lines, fmt.Sprintf(" %s:\n %s: true", _wrapString(name), key))
+ }
+
+ for _, name := range keys {
+ optional := meta[name]
+ if optional.Optional {
+ addLine(name, "optional")
+ }
+ if optional.Unplugged {
+ addLine(name, "unplugged")
+ }
+ }
+
+ return strings.Join(lines, "\n")
+}
diff --git a/cli/internal/lockfile/berry_lockfile_test.go b/cli/internal/lockfile/berry_lockfile_test.go
new file mode 100644
index 0000000..afcbe46
--- /dev/null
+++ b/cli/internal/lockfile/berry_lockfile_test.go
@@ -0,0 +1,273 @@
+package lockfile
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func getBerryLockfile(t *testing.T, filename string) *BerryLockfile {
+ content, err := getFixture(t, filename)
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodeBerryLockfile(content)
+ if err != nil {
+ t.Error(err)
+ }
+ return lockfile
+}
+
+func Test_DecodingBerryLockfile(t *testing.T) {
+ lockfile := getBerryLockfile(t, "berry.lock")
+ assert.Equal(t, lockfile.version, 6)
+ assert.Equal(t, lockfile.cacheKey, "8c0")
+}
+
+func Test_ResolvePackage(t *testing.T) {
+ lockfile := getBerryLockfile(t, "berry.lock")
+
+ type Case struct {
+ name string
+ semver string
+ key string
+ version string
+ found bool
+ }
+
+ cases := map[string]Case{
+ "can resolve '||' semver syntax": {
+ name: "js-tokens",
+ semver: "^3.0.0 || ^4.0.0",
+ key: "js-tokens@npm:4.0.0",
+ version: "4.0.0",
+ found: true,
+ },
+ "handles packages with multiple descriptors": {
+ name: "js-tokens",
+ semver: "^4.0.0",
+ key: "js-tokens@npm:4.0.0",
+ version: "4.0.0",
+ found: true,
+ },
+ "doesn't find nonexistent descriptors": {
+ name: "@babel/code-frame",
+ semver: "^7.12.11",
+ found: false,
+ },
+ "handles workspace packages": {
+ name: "eslint-config-custom",
+ semver: "*",
+ key: "eslint-config-custom@workspace:packages/eslint-config-custom",
+ version: "0.0.0-use.local",
+ found: true,
+ },
+ }
+
+ for testName, testCase := range cases {
+ pkg, err := lockfile.ResolvePackage("some-pkg", testCase.name, testCase.semver)
+ assert.NilError(t, err)
+ if testCase.found {
+ assert.Equal(t, pkg.Key, testCase.key, testName)
+ assert.Equal(t, pkg.Version, testCase.version, testName)
+ }
+ assert.Equal(t, pkg.Found, testCase.found, testName)
+ }
+}
+
+func Test_AllDependencies(t *testing.T) {
+ lockfile := getBerryLockfile(t, "berry.lock")
+
+ pkg, err := lockfile.ResolvePackage("some-pkg", "react-dom", "18.2.0")
+ assert.NilError(t, err)
+ assert.Assert(t, pkg.Found, "expected to find react-dom")
+ deps, found := lockfile.AllDependencies(pkg.Key)
+ assert.Assert(t, found, "expected lockfile key for react-dom to be present")
+ assert.Equal(t, len(deps), 2, "expected to find all react-dom direct dependencies")
+ for pkgName, version := range deps {
+ pkg, err := lockfile.ResolvePackage("some-pkg", pkgName, version)
+ assert.NilError(t, err, "error finding %s@%s", pkgName, version)
+ assert.Assert(t, pkg.Found, "expected to find lockfile entry for %s@%s", pkgName, version)
+ }
+}
+
+func Test_BerryPatchList(t *testing.T) {
+ lockfile := getBerryLockfile(t, "berry.lock")
+
+ var locator _Locator
+ if err := locator.parseLocator("resolve@npm:2.0.0-next.4"); err != nil {
+ t.Error(err)
+ }
+
+ patchLocator, ok := lockfile.patches[locator]
+ assert.Assert(t, ok, "Expected to find patch locator")
+ patch, ok := lockfile.packages[patchLocator]
+ assert.Assert(t, ok, "Expected to find patch")
+ assert.Equal(t, patch.Version, "2.0.0-next.4")
+}
+
+func Test_PackageExtensions(t *testing.T) {
+ lockfile := getBerryLockfile(t, "berry.lock")
+
+ expectedExtensions := map[_Descriptor]_void{}
+ for _, extension := range []string{"@babel/types@npm:^7.8.3", "lodash@npm:4.17.21"} {
+ var extensionDescriptor _Descriptor
+ if err := extensionDescriptor.parseDescriptor(extension); err != nil {
+ t.Error(err)
+ }
+ expectedExtensions[extensionDescriptor] = _void{}
+ }
+
+ assert.DeepEqual(t, lockfile.packageExtensions, expectedExtensions)
+}
+
+func Test_StringifyMetadata(t *testing.T) {
+ metadata := BerryLockfileEntry{
+ Version: "6",
+ CacheKey: "8c0",
+ }
+ lockfile := map[string]*BerryLockfileEntry{"__metadata": &metadata}
+
+ var b bytes.Buffer
+ err := _writeBerryLockfile(&b, lockfile)
+ assert.Assert(t, err == nil)
+ assert.Equal(t, b.String(), `
+__metadata:
+ version: 6
+ cacheKey: 8c0
+`)
+}
+
+func Test_BerryRoundtrip(t *testing.T) {
+ content, err := getFixture(t, "berry.lock")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodeBerryLockfile(content)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var b bytes.Buffer
+ if err := lockfile.Encode(&b); err != nil {
+ t.Error(err)
+ }
+
+ assert.Equal(t, b.String(), string(content))
+}
+
+func Test_PatchPathExtraction(t *testing.T) {
+ type Case struct {
+ locator string
+ patchPath string
+ isPatch bool
+ }
+ cases := []Case{
+ {
+ locator: "lodash@patch:lodash@npm%3A4.17.21#./.yarn/patches/lodash-npm-4.17.21-6382451519.patch::version=4.17.21&hash=2c6e9e&locator=berry-patch%40workspace%3A.",
+ patchPath: ".yarn/patches/lodash-npm-4.17.21-6382451519.patch",
+ isPatch: true,
+ },
+ {
+ locator: "lodash@npm:4.17.21",
+ isPatch: false,
+ },
+ {
+ locator: "resolve@patch:resolve@npm%3A2.0.0-next.4#~builtin<compat/resolve>::version=2.0.0-next.4&hash=07638b",
+ patchPath: "~builtin<compat/resolve>",
+ isPatch: true,
+ },
+ }
+
+ for _, testCase := range cases {
+ var locator _Locator
+ err := locator.parseLocator(testCase.locator)
+ if err != nil {
+ t.Error(err)
+ }
+ patchPath, isPatch := locator.patchPath()
+ assert.Equal(t, isPatch, testCase.isPatch, locator)
+ assert.Equal(t, patchPath, testCase.patchPath, locator)
+ }
+}
+
+func Test_PatchPrimaryVersion(t *testing.T) {
+ // todo write tests to make sure extraction actually works
+ type TestCase struct {
+ descriptor string
+ version string
+ isPatch bool
+ }
+ testCases := []TestCase{
+ {
+ descriptor: "lodash@patch:lodash@npm%3A4.17.21#./.yarn/patches/lodash-npm-4.17.21-6382451519.patch::locator=berry-patch%40workspace%3A.",
+ version: "npm:4.17.21",
+ isPatch: true,
+ },
+ {
+ descriptor: "typescript@patch:typescript@^4.5.2#~builtin<compat/typescript>",
+ version: "npm:^4.5.2",
+ isPatch: true,
+ },
+ {
+ descriptor: "react@npm:18.2.0",
+ isPatch: false,
+ },
+ }
+
+ for _, testCase := range testCases {
+ var d _Descriptor
+ err := d.parseDescriptor(testCase.descriptor)
+ assert.NilError(t, err, testCase.descriptor)
+ actual, isPatch := d.primaryVersion()
+ assert.Equal(t, isPatch, testCase.isPatch, testCase)
+ if testCase.isPatch {
+ assert.Equal(t, actual, testCase.version, testCase.descriptor)
+ }
+ }
+}
+
+func Test_BerryPruneDescriptors(t *testing.T) {
+ lockfile := getBerryLockfile(t, "minimal-berry.lock")
+ prunedLockfile, err := lockfile.Subgraph(
+ []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("packages/a").ToSystemPath(),
+ turbopath.AnchoredUnixPath("packages/c").ToSystemPath(),
+ },
+ []string{"lodash@npm:4.17.21"},
+ )
+ if err != nil {
+ t.Error(err)
+ }
+ lockfileA := prunedLockfile.(*BerryLockfile)
+
+ prunedLockfile, err = lockfile.Subgraph(
+ []turbopath.AnchoredSystemPath{
+ turbopath.AnchoredUnixPath("packages/b").ToSystemPath(),
+ turbopath.AnchoredUnixPath("packages/c").ToSystemPath(),
+ },
+ []string{"lodash@npm:4.17.21"},
+ )
+ if err != nil {
+ t.Error(err)
+ }
+ lockfileB := prunedLockfile.(*BerryLockfile)
+
+ lodashIdent := _Ident{name: "lodash"}
+ lodashA := _Descriptor{lodashIdent, "npm:^4.17.0"}
+ lodashB := _Descriptor{lodashIdent, "npm:^3.0.0 || ^4.0.0"}
+
+ lodashEntryA, hasLodashA := lockfileA.descriptors[lodashA]
+ lodashEntryB, hasLodashB := lockfileB.descriptors[lodashB]
+
+ assert.Assert(t, hasLodashA, "Expected lockfile a to have descriptor used by a")
+ assert.Assert(t, hasLodashB, "Expected lockfile b to have descriptor used by b")
+ assert.DeepEqual(t, lodashEntryA.reference, lodashEntryB.reference)
+
+ _, lockfileAHasB := lockfileA.descriptors[lodashB]
+ _, lockfileBHasA := lockfileB.descriptors[lodashA]
+ assert.Assert(t, !lockfileAHasB, "Expected lockfile a not to have descriptor used by b")
+ assert.Assert(t, !lockfileBHasA, "Expected lockfile b not to have descriptor used by a")
+}
diff --git a/cli/internal/lockfile/lockfile.go b/cli/internal/lockfile/lockfile.go
new file mode 100644
index 0000000..bb36eda
--- /dev/null
+++ b/cli/internal/lockfile/lockfile.go
@@ -0,0 +1,135 @@
+// Package lockfile provides the lockfile interface and implementations for the various package managers
+package lockfile
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+
+ mapset "github.com/deckarep/golang-set"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "golang.org/x/sync/errgroup"
+)
+
+// Lockfile Interface for general operations that work across all lockfiles
+type Lockfile interface {
+ // ResolvePackage Given a workspace, a package it imports and version returns the key, resolved version, and if it was found
+ ResolvePackage(workspacePath turbopath.AnchoredUnixPath, name string, version string) (Package, error)
+ // AllDependencies Given a lockfile key return all (dev/optional/peer) dependencies of that package
+ AllDependencies(key string) (map[string]string, bool)
+ // Subgraph Given a list of lockfile keys returns a Lockfile based off the original one that only contains the packages given
+ Subgraph(workspacePackages []turbopath.AnchoredSystemPath, packages []string) (Lockfile, error)
+ // Encode encode the lockfile representation and write it to the given writer
+ Encode(w io.Writer) error
+ // Patches return a list of patches used in the lockfile
+ Patches() []turbopath.AnchoredUnixPath
+ // GlobalChange checks if there are any differences between lockfiles that would completely invalidate
+ // the cache.
+ GlobalChange(other Lockfile) bool
+}
+
+// IsNil checks if lockfile is nil
+func IsNil(l Lockfile) bool {
+ return l == nil || reflect.ValueOf(l).IsNil()
+}
+
+// Package Structure representing a possible Pack
+type Package struct {
+ // Key used to lookup a package in the lockfile
+ Key string
+ // The resolved version of a package as it appears in the lockfile
+ Version string
+ // Set to true iff Key and Version are set
+ Found bool
+}
+
+// ByKey sort package structures by key
+type ByKey []Package
+
+func (p ByKey) Len() int {
+ return len(p)
+}
+
+func (p ByKey) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+func (p ByKey) Less(i, j int) bool {
+ return p[i].Key+p[i].Version < p[j].Key+p[j].Version
+}
+
+var _ (sort.Interface) = (*ByKey)(nil)
+
+// TransitiveClosure the set of all lockfile keys that pkg depends on
+func TransitiveClosure(
+ workspaceDir turbopath.AnchoredUnixPath,
+ unresolvedDeps map[string]string,
+ lockFile Lockfile,
+) (mapset.Set, error) {
+ if lf, ok := lockFile.(*NpmLockfile); ok {
+ // We special case as Rust implementations have their own dep crawl
+ return npmTransitiveDeps(lf, workspaceDir, unresolvedDeps)
+ }
+ return transitiveClosure(workspaceDir, unresolvedDeps, lockFile)
+}
+
+func transitiveClosure(
+ workspaceDir turbopath.AnchoredUnixPath,
+ unresolvedDeps map[string]string,
+ lockFile Lockfile,
+) (mapset.Set, error) {
+ if IsNil(lockFile) {
+ return nil, fmt.Errorf("No lockfile available to do analysis on")
+ }
+
+ resolvedPkgs := mapset.NewSet()
+ lockfileEg := &errgroup.Group{}
+
+ transitiveClosureHelper(lockfileEg, workspaceDir, lockFile, unresolvedDeps, resolvedPkgs)
+
+ if err := lockfileEg.Wait(); err != nil {
+ return nil, err
+ }
+
+ return resolvedPkgs, nil
+}
+
+func transitiveClosureHelper(
+ wg *errgroup.Group,
+ workspacePath turbopath.AnchoredUnixPath,
+ lockfile Lockfile,
+ unresolvedDirectDeps map[string]string,
+ resolvedDeps mapset.Set,
+) {
+ for directDepName, unresolvedVersion := range unresolvedDirectDeps {
+ directDepName := directDepName
+ unresolvedVersion := unresolvedVersion
+ wg.Go(func() error {
+
+ lockfilePkg, err := lockfile.ResolvePackage(workspacePath, directDepName, unresolvedVersion)
+
+ if err != nil {
+ return err
+ }
+
+ if !lockfilePkg.Found || resolvedDeps.Contains(lockfilePkg) {
+ return nil
+ }
+
+ resolvedDeps.Add(lockfilePkg)
+
+ allDeps, ok := lockfile.AllDependencies(lockfilePkg.Key)
+
+ if !ok {
+ panic(fmt.Sprintf("Unable to find entry for %s", lockfilePkg.Key))
+ }
+
+ if len(allDeps) > 0 {
+ transitiveClosureHelper(wg, workspacePath, lockfile, allDeps, resolvedDeps)
+ }
+
+ return nil
+ })
+ }
+}
diff --git a/cli/internal/lockfile/lockfile_test.go b/cli/internal/lockfile/lockfile_test.go
new file mode 100644
index 0000000..7c666cc
--- /dev/null
+++ b/cli/internal/lockfile/lockfile_test.go
@@ -0,0 +1,25 @@
+package lockfile
+
+import (
+ "sort"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func Test_ByKeySortIsStable(t *testing.T) {
+ packagesA := []Package{
+ {"/foo/1.2.3", "1.2.3", true},
+ {"/baz/1.0.9", "/baz/1.0.9", true},
+ {"/bar/1.2.3", "1.2.3", true},
+ {"/foo/1.2.3", "/foo/1.2.3", true},
+ {"/baz/1.0.9", "1.0.9", true},
+ }
+ packagesB := make([]Package, len(packagesA))
+ copy(packagesB, packagesA)
+
+ sort.Sort(ByKey(packagesA))
+ sort.Sort(ByKey(packagesB))
+
+ assert.DeepEqual(t, packagesA, packagesB)
+}
diff --git a/cli/internal/lockfile/npm_lockfile.go b/cli/internal/lockfile/npm_lockfile.go
new file mode 100644
index 0000000..67cd32a
--- /dev/null
+++ b/cli/internal/lockfile/npm_lockfile.go
@@ -0,0 +1,107 @@
+package lockfile
+
+import (
+ "encoding/json"
+ "io"
+
+ mapset "github.com/deckarep/golang-set"
+ "github.com/vercel/turbo/cli/internal/ffi"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// NpmLockfile representation of package-lock.json
+type NpmLockfile struct {
+ // We just story the entire lockfile in memory and pass it for every call
+ contents []byte
+}
+
+// ResolvePackage Given a workspace, a package it imports and version returns the key, resolved version, and if it was found
+func (l *NpmLockfile) ResolvePackage(workspacePath turbopath.AnchoredUnixPath, name string, version string) (Package, error) {
+ // This is only used when doing calculating the transitive deps, but Rust
+ // implementations do this calculation on the Rust side.
+ panic("Unreachable")
+}
+
+// AllDependencies Given a lockfile key return all (dev/optional/peer) dependencies of that package
+func (l *NpmLockfile) AllDependencies(key string) (map[string]string, bool) {
+ // This is only used when doing calculating the transitive deps, but Rust
+ // implementations do this calculation on the Rust side.
+ panic("Unreachable")
+}
+
+// Subgraph Given a list of lockfile keys returns a Lockfile based off the original one that only contains the packages given
+func (l *NpmLockfile) Subgraph(workspacePackages []turbopath.AnchoredSystemPath, packages []string) (Lockfile, error) {
+ workspaces := make([]string, len(workspacePackages))
+ for i, workspace := range workspacePackages {
+ workspaces[i] = workspace.ToUnixPath().ToString()
+ }
+ contents, err := ffi.NpmSubgraph(l.contents, workspaces, packages)
+ if err != nil {
+ return nil, err
+ }
+ return &NpmLockfile{contents: contents}, nil
+}
+
+// Encode the lockfile representation and write it to the given writer
+func (l *NpmLockfile) Encode(w io.Writer) error {
+ _, err := w.Write(l.contents)
+ return err
+}
+
+// Patches return a list of patches used in the lockfile
+func (l *NpmLockfile) Patches() []turbopath.AnchoredUnixPath {
+ return nil
+}
+
+// GlobalChange checks if there are any differences between lockfiles that would completely invalidate
+// the cache.
+func (l *NpmLockfile) GlobalChange(other Lockfile) bool {
+ o, ok := other.(*NpmLockfile)
+ if !ok {
+ return true
+ }
+
+ // We just grab the few global fields and check if they've changed
+ type minimalJSON struct {
+ LockfileVersion string `json:"version"`
+ Requires bool `json:"requires"`
+ }
+ var self minimalJSON
+ var otherJSON minimalJSON
+ if err := json.Unmarshal(o.contents, &otherJSON); err != nil {
+ return true
+ }
+ if err := json.Unmarshal(l.contents, &self); err != nil {
+ return true
+ }
+
+ return self.LockfileVersion != otherJSON.LockfileVersion ||
+ self.Requires != otherJSON.Requires
+}
+
+var _ (Lockfile) = (*NpmLockfile)(nil)
+
+// DecodeNpmLockfile Parse contents of package-lock.json into NpmLockfile
+func DecodeNpmLockfile(contents []byte) (Lockfile, error) {
+ return &NpmLockfile{contents: contents}, nil
+}
+
+func npmTransitiveDeps(lockfile *NpmLockfile, workspacePath turbopath.AnchoredUnixPath, unresolvedDeps map[string]string) (mapset.Set, error) {
+ pkgDir := workspacePath.ToString()
+
+ packages, err := ffi.NpmTransitiveDeps(lockfile.contents, pkgDir, unresolvedDeps)
+ if err != nil {
+ return nil, err
+ }
+
+ deps := make([]interface{}, len(packages))
+ for i, pkg := range packages {
+ deps[i] = Package{
+ Found: pkg.Found,
+ Key: pkg.Key,
+ Version: pkg.Version,
+ }
+ }
+
+ return mapset.NewSetFromSlice(deps), nil
+}
diff --git a/cli/internal/lockfile/pnpm_lockfile.go b/cli/internal/lockfile/pnpm_lockfile.go
new file mode 100644
index 0000000..a51b36e
--- /dev/null
+++ b/cli/internal/lockfile/pnpm_lockfile.go
@@ -0,0 +1,579 @@
+package lockfile
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/yaml"
+)
+
+// PnpmLockfile Go representation of the contents of 'pnpm-lock.yaml'
+// Reference https://github.com/pnpm/pnpm/blob/main/packages/lockfile-types/src/index.ts
+type PnpmLockfile struct {
+ isV6 bool
+ // Before 6.0 version was stored as a float, but as of 6.0+ it's a string
+ Version interface{} `yaml:"lockfileVersion"`
+ NeverBuiltDependencies []string `yaml:"neverBuiltDependencies,omitempty"`
+ OnlyBuiltDependencies []string `yaml:"onlyBuiltDependencies,omitempty"`
+ Overrides map[string]string `yaml:"overrides,omitempty"`
+ PackageExtensionsChecksum string `yaml:"packageExtensionsChecksum,omitempty"`
+ PatchedDependencies map[string]PatchFile `yaml:"patchedDependencies,omitempty"`
+ Importers map[string]ProjectSnapshot `yaml:"importers"`
+ Packages map[string]PackageSnapshot `yaml:"packages,omitempty"`
+ Time map[string]string `yaml:"time,omitempty"`
+}
+
+var _ Lockfile = (*PnpmLockfile)(nil)
+
+// ProjectSnapshot Snapshot used to represent projects in the importers section
+type ProjectSnapshot struct {
+ // for v6 we omitempty
+ // for pre v6 we *need* to omit the empty map
+ Specifiers SpecifierMap `yaml:"specifiers,omitempty"`
+
+ // The values of these maps will be string if lockfileVersion <6 or DependencyV6 if 6+
+ Dependencies map[string]yaml.Node `yaml:"dependencies,omitempty"`
+ OptionalDependencies map[string]yaml.Node `yaml:"optionalDependencies,omitempty"`
+ DevDependencies map[string]yaml.Node `yaml:"devDependencies,omitempty"`
+
+ DependenciesMeta map[string]DependenciesMeta `yaml:"dependenciesMeta,omitempty"`
+ PublishDirectory string `yaml:"publishDirectory,omitempty"`
+}
+
+// SpecifierMap is a type wrapper that overrides IsZero for Golang's map
+// to match the behavior that pnpm expects
+type SpecifierMap map[string]string
+
+// IsZero is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag.
+func (m SpecifierMap) IsZero() bool {
+ return m == nil
+}
+
+var _ (yaml.IsZeroer) = (*SpecifierMap)(nil)
+
+// DependencyV6 are dependency entries for lockfileVersion 6.0+
+type DependencyV6 struct {
+ Specifier string `yaml:"specifier"`
+ Version string `yaml:"version"`
+}
+
+// Will try to find a resolution in any of the dependency fields
+func (p *ProjectSnapshot) findResolution(dependency string) (DependencyV6, bool, error) {
+ var getResolution func(yaml.Node) (DependencyV6, bool, error)
+ if len(p.Specifiers) > 0 {
+ getResolution = func(node yaml.Node) (DependencyV6, bool, error) {
+ specifier, ok := p.Specifiers[dependency]
+ if !ok {
+ return DependencyV6{}, false, nil
+ }
+ var version string
+ if err := node.Decode(&version); err != nil {
+ return DependencyV6{}, false, err
+ }
+ return DependencyV6{Version: version, Specifier: specifier}, true, nil
+ }
+ } else {
+ getResolution = func(node yaml.Node) (DependencyV6, bool, error) {
+ var resolution DependencyV6
+ if err := node.Decode(&resolution); err != nil {
+ return DependencyV6{}, false, err
+ }
+ return resolution, true, nil
+ }
+ }
+ if resolution, ok := p.Dependencies[dependency]; ok {
+ return getResolution(resolution)
+ }
+ if resolution, ok := p.DevDependencies[dependency]; ok {
+ return getResolution(resolution)
+ }
+ if resolution, ok := p.OptionalDependencies[dependency]; ok {
+ return getResolution(resolution)
+ }
+ return DependencyV6{}, false, nil
+}
+
+// PackageSnapshot Snapshot used to represent a package in the packages setion
+type PackageSnapshot struct {
+ Resolution PackageResolution `yaml:"resolution,flow"`
+ ID string `yaml:"id,omitempty"`
+
+ // only needed for packages that aren't in npm
+ Name string `yaml:"name,omitempty"`
+ Version string `yaml:"version,omitempty"`
+
+ Engines struct {
+ Node string `yaml:"node"`
+ NPM string `yaml:"npm,omitempty"`
+ } `yaml:"engines,omitempty,flow"`
+ CPU []string `yaml:"cpu,omitempty,flow"`
+ Os []string `yaml:"os,omitempty,flow"`
+ LibC []string `yaml:"libc,omitempty"`
+
+ Deprecated string `yaml:"deprecated,omitempty"`
+ HasBin bool `yaml:"hasBin,omitempty"`
+ Prepare bool `yaml:"prepare,omitempty"`
+ RequiresBuild bool `yaml:"requiresBuild,omitempty"`
+
+ BundledDependencies []string `yaml:"bundledDependencies,omitempty"`
+ PeerDependencies map[string]string `yaml:"peerDependencies,omitempty"`
+ PeerDependenciesMeta map[string]struct {
+ Optional bool `yaml:"optional"`
+ } `yaml:"peerDependenciesMeta,omitempty"`
+
+ Dependencies map[string]string `yaml:"dependencies,omitempty"`
+ OptionalDependencies map[string]string `yaml:"optionalDependencies,omitempty"`
+
+ TransitivePeerDependencies []string `yaml:"transitivePeerDependencies,omitempty"`
+ Dev bool `yaml:"dev"`
+ Optional bool `yaml:"optional,omitempty"`
+ Patched bool `yaml:"patched,omitempty"`
+}
+
+// PackageResolution Various resolution strategies for packages
+type PackageResolution struct {
+ Type string `yaml:"type,omitempty"`
+ // For npm or tarball
+ Integrity string `yaml:"integrity,omitempty"`
+
+ // For tarball
+ Tarball string `yaml:"tarball,omitempty"`
+
+ // For local directory
+ Dir string `yaml:"directory,omitempty"`
+
+ // For git repo
+ Repo string `yaml:"repo,omitempty"`
+ Commit string `yaml:"commit,omitempty"`
+}
+
+// PatchFile represent a patch applied to a package
+type PatchFile struct {
+ Path string `yaml:"path"`
+ Hash string `yaml:"hash"`
+}
+
+func isSupportedVersion(version interface{}) error {
+ switch version.(type) {
+ case string:
+ if version == "6.0" {
+ return nil
+ }
+ case float64:
+ if version == 5.3 || version == 5.4 {
+ return nil
+ }
+ default:
+ return fmt.Errorf("lockfileVersion of type %T is invalid", version)
+ }
+ supportedVersions := []string{"5.3", "5.4", "6.0"}
+ return errors.Errorf("Unable to generate pnpm-lock.yaml with lockfileVersion: %s. Supported lockfile versions are %v", version, supportedVersions)
+}
+
+// DependenciesMeta metadata for dependencies
+type DependenciesMeta struct {
+ Injected bool `yaml:"injected,omitempty"`
+ Node string `yaml:"node,omitempty"`
+ Patch string `yaml:"patch,omitempty"`
+}
+
+// DecodePnpmLockfile parse a pnpm lockfile
+func DecodePnpmLockfile(contents []byte) (*PnpmLockfile, error) {
+ var lockfile PnpmLockfile
+ err := yaml.Unmarshal(contents, &lockfile)
+ if err != nil {
+ return nil, errors.Wrap(err, "could not unmarshal lockfile: ")
+ }
+
+ switch lockfile.Version.(type) {
+ case float64:
+ lockfile.isV6 = false
+ case string:
+ lockfile.isV6 = true
+ default:
+ return nil, fmt.Errorf("Unexpected type of lockfileVersion: '%T', expected float64 or string", lockfile.Version)
+ }
+ return &lockfile, nil
+}
+
+// ResolvePackage Given a package and version returns the key, resolved version, and if it was found
+func (p *PnpmLockfile) ResolvePackage(workspacePath turbopath.AnchoredUnixPath, name string, version string) (Package, error) {
+ // Check if version is a key
+ if _, ok := p.Packages[version]; ok {
+ return Package{Key: version, Version: p.extractVersion(version), Found: true}, nil
+ }
+
+ resolvedVersion, ok, err := p.resolveSpecifier(workspacePath, name, version)
+ if !ok || err != nil {
+ return Package{}, err
+ }
+ key := p.formatKey(name, resolvedVersion)
+ if entry, ok := (p.Packages)[key]; ok {
+ var version string
+ if entry.Version != "" {
+ version = entry.Version
+ } else {
+ version = resolvedVersion
+ }
+ return Package{Key: key, Version: version, Found: true}, nil
+ }
+
+ if entry, ok := p.Packages[resolvedVersion]; ok {
+ var version string
+ if entry.Version != "" {
+ version = entry.Version
+ } else {
+ // If there isn't a version field in the entry then the version is
+ // encoded in the key and we can omit the name from the version.
+ version = p.extractVersion(resolvedVersion)
+ }
+ return Package{Key: resolvedVersion, Version: version, Found: true}, nil
+ }
+
+ return Package{}, nil
+}
+
+// AllDependencies Given a lockfile key return all (dev/optional/peer) dependencies of that package
+func (p *PnpmLockfile) AllDependencies(key string) (map[string]string, bool) {
+ deps := map[string]string{}
+ entry, ok := p.Packages[key]
+ if !ok {
+ return deps, false
+ }
+
+ for name, version := range entry.Dependencies {
+ deps[name] = version
+ }
+
+ for name, version := range entry.OptionalDependencies {
+ deps[name] = version
+ }
+
+ // Peer dependencies appear in the Dependencies map resolved
+
+ return deps, true
+}
+
+// Subgraph Given a list of lockfile keys returns a Lockfile based off the original one that only contains the packages given
+func (p *PnpmLockfile) Subgraph(workspacePackages []turbopath.AnchoredSystemPath, packages []string) (Lockfile, error) {
+ lockfilePackages := make(map[string]PackageSnapshot, len(packages))
+ for _, key := range packages {
+ entry, ok := p.Packages[key]
+ if ok {
+ lockfilePackages[key] = entry
+ } else {
+ return nil, fmt.Errorf("Unable to find lockfile entry for %s", key)
+ }
+ }
+
+ importers, err := pruneImporters(p.Importers, workspacePackages)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, importer := range importers {
+ for dependency, meta := range importer.DependenciesMeta {
+ if meta.Injected {
+ resolution, ok, err := importer.findResolution(dependency)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Unable to decode reference to %s", dependency)
+ }
+ if !ok {
+ return nil, fmt.Errorf("Unable to find %s other than reference in dependenciesMeta", dependency)
+ }
+ entry, ok := p.Packages[resolution.Version]
+ if !ok {
+ return nil, fmt.Errorf("Unable to find package entry for %s", resolution)
+ }
+
+ lockfilePackages[resolution.Version] = entry
+ }
+ }
+ }
+
+ lockfile := PnpmLockfile{
+ Version: p.Version,
+ Packages: lockfilePackages,
+ NeverBuiltDependencies: p.NeverBuiltDependencies,
+ OnlyBuiltDependencies: p.OnlyBuiltDependencies,
+ Overrides: p.Overrides,
+ PackageExtensionsChecksum: p.PackageExtensionsChecksum,
+ PatchedDependencies: p.prunePatches(p.PatchedDependencies, lockfilePackages),
+ Importers: importers,
+ }
+
+ return &lockfile, nil
+}
+
+// Prune imports to only those have all of their dependencies in the packages list
+func pruneImporters(importers map[string]ProjectSnapshot, workspacePackages []turbopath.AnchoredSystemPath) (map[string]ProjectSnapshot, error) {
+ prunedImporters := map[string]ProjectSnapshot{}
+
+ // Copy over root level importer
+ if root, ok := importers["."]; ok {
+ prunedImporters["."] = root
+ }
+
+ for _, workspacePath := range workspacePackages {
+ workspace := workspacePath.ToUnixPath().ToString()
+ importer, ok := importers[workspace]
+
+ // If a workspace has no dependencies *and* it is only depended on by the
+ // workspace root it will not show up as an importer.
+ if ok {
+ prunedImporters[workspace] = importer
+ }
+
+ }
+
+ return prunedImporters, nil
+}
+
+func (p *PnpmLockfile) prunePatches(patches map[string]PatchFile, packages map[string]PackageSnapshot) map[string]PatchFile {
+ if len(patches) == 0 {
+ return nil
+ }
+
+ patchPackages := make(map[string]PatchFile, len(patches))
+ for dependency := range packages {
+ if p.isV6 {
+ // Internally pnpm partially converts the new path format to the old
+ // format in order for existing parsing logic to work.
+ dependency = convertNewToOldDepPath(dependency)
+ }
+ dp := parseDepPath(dependency)
+ patchKey := fmt.Sprintf("%s@%s", dp.name, dp.version)
+
+ if patch, ok := patches[patchKey]; ok && patch.Hash == dp.patchHash() {
+ patchPackages[patchKey] = patch
+ }
+ }
+
+ return patchPackages
+}
+
+// Encode encode the lockfile representation and write it to the given writer
+func (p *PnpmLockfile) Encode(w io.Writer) error {
+ if err := isSupportedVersion(p.Version); err != nil {
+ return err
+ }
+
+ encoder := yaml.NewEncoder(w)
+ encoder.SetIndent(2)
+
+ if err := encoder.Encode(p); err != nil {
+ return errors.Wrap(err, "unable to encode pnpm lockfile")
+ }
+ return nil
+}
+
+// Patches return a list of patches used in the lockfile
+func (p *PnpmLockfile) Patches() []turbopath.AnchoredUnixPath {
+ if len(p.PatchedDependencies) == 0 {
+ return nil
+ }
+ patches := make([]string, len(p.PatchedDependencies))
+ i := 0
+ for _, patch := range p.PatchedDependencies {
+ patches[i] = patch.Path
+ i++
+ }
+ sort.Strings(patches)
+
+ patchPaths := make([]turbopath.AnchoredUnixPath, len(p.PatchedDependencies))
+ for i, patch := range patches {
+ patchPaths[i] = turbopath.AnchoredUnixPath(patch)
+ }
+ return patchPaths
+}
+
+// GlobalChange checks if there are any differences between lockfiles that would completely invalidate
+// the cache.
+func (p *PnpmLockfile) GlobalChange(other Lockfile) bool {
+ o, ok := other.(*PnpmLockfile)
+ return !ok ||
+ p.Version != o.Version ||
+ p.PackageExtensionsChecksum != o.PackageExtensionsChecksum ||
+ !reflect.DeepEqual(p.Overrides, o.Overrides) ||
+ !reflect.DeepEqual(p.PatchedDependencies, o.PatchedDependencies)
+}
+
+func (p *PnpmLockfile) resolveSpecifier(workspacePath turbopath.AnchoredUnixPath, name string, specifier string) (string, bool, error) {
+ pnpmWorkspacePath := workspacePath.ToString()
+ if pnpmWorkspacePath == "" {
+ // For pnpm, the root is named "."
+ pnpmWorkspacePath = "."
+ }
+ importer, ok := p.Importers[pnpmWorkspacePath]
+ if !ok {
+ return "", false, fmt.Errorf("no workspace '%v' found in lockfile", workspacePath)
+ }
+ resolution, ok, err := importer.findResolution(name)
+ if err != nil {
+ return "", false, err
+ }
+ // Verify that the specifier in the importer matches the one given
+ if !ok {
+ // Check if the specifier is already a resolved version
+ if _, ok := p.Packages[p.formatKey(name, specifier)]; ok {
+ return specifier, true, nil
+ }
+ return "", false, fmt.Errorf("Unable to find resolved version for %s@%s in %s", name, specifier, workspacePath)
+ }
+ overrideSpecifier := p.applyOverrides(name, specifier)
+ if resolution.Specifier != overrideSpecifier {
+ if _, ok := p.Packages[p.formatKey(name, overrideSpecifier)]; ok {
+ return overrideSpecifier, true, nil
+ }
+ return "", false, nil
+ }
+ return resolution.Version, true, nil
+}
+
+// Apply pnpm overrides to specifier, see https://pnpm.io/package_json#pnpmoverrides
+// Note this is barebones support and will only supports global overrides
+// future work will support semver ranges and selector filtering.
+func (p *PnpmLockfile) applyOverrides(name string, specifier string) string {
+ if len(p.Overrides) > 0 {
+ if new, ok := p.Overrides[name]; ok {
+ return new
+ }
+ }
+ return specifier
+}
+
+// Formatter of the lockfile key given a package name and version
+func (p *PnpmLockfile) formatKey(name string, version string) string {
+ if p.isV6 {
+ return fmt.Sprintf("/%s@%s", name, version)
+ }
+ return fmt.Sprintf("/%s/%s", name, version)
+}
+
+// Extracts version from lockfile key
+func (p *PnpmLockfile) extractVersion(key string) string {
+ if p.isV6 {
+ key = convertNewToOldDepPath(key)
+ }
+ dp := parseDepPath(key)
+ if dp.peerSuffix != "" {
+ sep := ""
+ if !p.isV6 {
+ sep = "_"
+ }
+ return fmt.Sprintf("%s%s%s", dp.version, sep, dp.peerSuffix)
+ }
+ return dp.version
+}
+
+// Parsed representation of a pnpm lockfile key
+type depPath struct {
+ host string
+ name string
+ version string
+ peerSuffix string
+}
+
+func parseDepPath(dependency string) depPath {
+ // See https://github.com/pnpm/pnpm/blob/185ab01adfc927ea23d2db08a14723bf51d0025f/packages/dependency-path/src/index.ts#L96
+ var dp depPath
+ parts := strings.Split(dependency, "/")
+ shift := func() string {
+ if len(parts) == 0 {
+ return ""
+ }
+ val := parts[0]
+ parts = parts[1:]
+ return val
+ }
+
+ isAbsolute := dependency[0] != '/'
+ // Skip leading '/'
+ if !isAbsolute {
+ shift()
+ }
+
+ if isAbsolute {
+ dp.host = shift()
+ }
+
+ if len(parts) == 0 {
+ return dp
+ }
+
+ if strings.HasPrefix(parts[0], "@") {
+ dp.name = fmt.Sprintf("%s/%s", shift(), shift())
+ } else {
+ dp.name = shift()
+ }
+
+ version := strings.Join(parts, "/")
+ if len(version) > 0 {
+ var peerSuffixIndex int
+ if strings.Contains(version, "(") && strings.HasSuffix(version, ")") {
+ // v6 encodes peers deps using (peer=version)
+ // also used to encode patches using (path_hash=hash)
+ peerSuffixIndex = strings.Index(version, "(")
+ dp.peerSuffix = version[peerSuffixIndex:]
+ dp.version = version[0:peerSuffixIndex]
+ } else {
+ // pre v6 uses _ to separate version from peer dependencies
+ // if a dependency is patched and has peer dependencies its version will
+ // be encoded as version_patchHash_peerDepsHash
+ peerSuffixIndex = strings.Index(version, "_")
+ if peerSuffixIndex != -1 {
+ dp.peerSuffix = version[peerSuffixIndex+1:]
+ dp.version = version[0:peerSuffixIndex]
+ }
+ }
+ if peerSuffixIndex == -1 {
+ dp.version = version
+ }
+ }
+
+ return dp
+}
+
+var _patchHashKey = "patch_hash="
+
+func (d depPath) patchHash() string {
+ if strings.HasPrefix(d.peerSuffix, "(") && strings.HasSuffix(d.peerSuffix, ")") {
+ for _, part := range strings.Split(d.peerSuffix, "(") {
+ if strings.HasPrefix(part, _patchHashKey) {
+ // drop the enclosing ')'
+ return part[len(_patchHashKey) : len(part)-1]
+ }
+ }
+ // no patch entry found
+ return ""
+ }
+
+ sepIndex := strings.Index(d.peerSuffix, "_")
+ if sepIndex != -1 {
+ return d.peerSuffix[:sepIndex]
+ }
+ // if a dependency just has a single suffix we can't tell if it's a patch or peer hash
+ // return it in case it's a patch hash
+ return d.peerSuffix
+}
+
+// Used to convert v6's dep path of /name@version to v5's /name/version
+// See https://github.com/pnpm/pnpm/blob/185ab01adfc927ea23d2db08a14723bf51d0025f/lockfile/lockfile-file/src/experiments/inlineSpecifiersLockfileConverters.ts#L162
+func convertNewToOldDepPath(newPath string) string {
+ if len(newPath) > 2 && !strings.Contains(newPath[2:], "@") {
+ return newPath
+ }
+ searchStartIndex := strings.Index(newPath, "/@") + 2
+ index := strings.Index(newPath[searchStartIndex:], "@") + searchStartIndex
+ if strings.Contains(newPath, "(") && index > strings.Index(newPath, "(") {
+ return newPath
+ }
+ return fmt.Sprintf("%s/%s", newPath[0:index], newPath[index+1:])
+}
diff --git a/cli/internal/lockfile/pnpm_lockfile_test.go b/cli/internal/lockfile/pnpm_lockfile_test.go
new file mode 100644
index 0000000..b4c8475
--- /dev/null
+++ b/cli/internal/lockfile/pnpm_lockfile_test.go
@@ -0,0 +1,405 @@
+package lockfile
+
+import (
+ "bytes"
+ "os"
+ "sort"
+ "testing"
+
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/yaml"
+ "gotest.tools/v3/assert"
+)
+
+func getFixture(t *testing.T, name string) ([]byte, error) {
+ defaultCwd, err := os.Getwd()
+ if err != nil {
+ t.Errorf("failed to get cwd: %v", err)
+ }
+ cwd := turbopath.AbsoluteSystemPath(defaultCwd)
+ lockfilePath := cwd.UntypedJoin("testdata", name)
+ if !lockfilePath.FileExists() {
+ return nil, errors.Errorf("unable to find 'testdata/%s'", name)
+ }
+ return os.ReadFile(lockfilePath.ToString())
+}
+
+func Test_Roundtrip(t *testing.T) {
+ lockfiles := []string{"pnpm6-workspace.yaml", "pnpm7-workspace.yaml", "pnpm8.yaml"}
+
+ for _, lockfilePath := range lockfiles {
+ lockfileContent, err := getFixture(t, lockfilePath)
+ if err != nil {
+ t.Errorf("failure getting fixture: %s", err)
+ }
+ lockfile, err := DecodePnpmLockfile(lockfileContent)
+ if err != nil {
+ t.Errorf("decoding failed %s", err)
+ }
+ var b bytes.Buffer
+ if err := lockfile.Encode(&b); err != nil {
+ t.Errorf("encoding failed %s", err)
+ }
+ newLockfile, err := DecodePnpmLockfile(b.Bytes())
+ if err != nil {
+ t.Errorf("decoding failed %s", err)
+ }
+
+ assert.DeepEqual(
+ t,
+ lockfile,
+ newLockfile,
+ // Skip over fields that don't get serialized
+ cmpopts.IgnoreUnexported(PnpmLockfile{}),
+ cmpopts.IgnoreTypes(yaml.Node{}),
+ )
+ }
+}
+
+func Test_SpecifierResolution(t *testing.T) {
+ contents, err := getFixture(t, "pnpm7-workspace.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodePnpmLockfile(contents)
+ if err != nil {
+ t.Errorf("failure decoding lockfile: %v", err)
+ }
+
+ type Case struct {
+ workspacePath turbopath.AnchoredUnixPath
+ pkg string
+ specifier string
+ version string
+ found bool
+ err string
+ }
+
+ cases := []Case{
+ {workspacePath: "apps/docs", pkg: "next", specifier: "12.2.5", version: "12.2.5_ir3quccc6i62x6qn6jjhyjjiey", found: true},
+ {workspacePath: "apps/web", pkg: "next", specifier: "12.2.5", version: "12.2.5_ir3quccc6i62x6qn6jjhyjjiey", found: true},
+ {workspacePath: "apps/web", pkg: "typescript", specifier: "^4.5.3", version: "4.8.3", found: true},
+ {workspacePath: "apps/web", pkg: "lodash", specifier: "bad-tag", version: "", found: false},
+ {workspacePath: "apps/web", pkg: "lodash", specifier: "^4.17.21", version: "4.17.21_ehchni3mpmovsvjxesffg2i5a4", found: true},
+ {workspacePath: "apps/docs", pkg: "dashboard-icons", specifier: "github:peerigon/dashboard-icons", version: "github.com/peerigon/dashboard-icons/ce27ef933144e09cef3911025f3649040a8571b6", found: true},
+ {workspacePath: "", pkg: "turbo", specifier: "latest", version: "1.4.6", found: true},
+ {workspacePath: "apps/bad_workspace", pkg: "turbo", specifier: "latest", version: "1.4.6", err: "no workspace 'apps/bad_workspace' found in lockfile"},
+ }
+
+ for _, testCase := range cases {
+ actualVersion, actualFound, err := lockfile.resolveSpecifier(testCase.workspacePath, testCase.pkg, testCase.specifier)
+ if testCase.err != "" {
+ assert.Error(t, err, testCase.err)
+ } else {
+ assert.Equal(t, actualFound, testCase.found, "%s@%s", testCase.pkg, testCase.version)
+ assert.Equal(t, actualVersion, testCase.version, "%s@%s", testCase.pkg, testCase.version)
+ }
+ }
+}
+
+func Test_SpecifierResolutionV6(t *testing.T) {
+ contents, err := getFixture(t, "pnpm8.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodePnpmLockfile(contents)
+ if err != nil {
+ t.Errorf("failure decoding lockfile: %v", err)
+ }
+
+ type Case struct {
+ workspacePath turbopath.AnchoredUnixPath
+ pkg string
+ specifier string
+ version string
+ found bool
+ err string
+ }
+
+ cases := []Case{
+ {workspacePath: "packages/a", pkg: "c", specifier: "workspace:*", version: "link:../c", found: true},
+ {workspacePath: "packages/a", pkg: "is-odd", specifier: "^3.0.1", version: "3.0.1", found: true},
+ {workspacePath: "packages/b", pkg: "is-odd", specifier: "^3.0.1", version: "3.0.1", err: "Unable to find resolved version for is-odd@^3.0.1 in packages/b"},
+ {workspacePath: "apps/bad_workspace", pkg: "turbo", specifier: "latest", version: "1.4.6", err: "no workspace 'apps/bad_workspace' found in lockfile"},
+ }
+
+ for _, testCase := range cases {
+ actualVersion, actualFound, err := lockfile.resolveSpecifier(testCase.workspacePath, testCase.pkg, testCase.specifier)
+ if testCase.err != "" {
+ assert.Error(t, err, testCase.err)
+ } else {
+ assert.Equal(t, actualFound, testCase.found, "%s@%s", testCase.pkg, testCase.version)
+ assert.Equal(t, actualVersion, testCase.version, "%s@%s", testCase.pkg, testCase.version)
+ }
+ }
+}
+
+func Test_SubgraphInjectedPackages(t *testing.T) {
+ contents, err := getFixture(t, "pnpm7-workspace.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err, "decode lockfile")
+
+ packageWithInjectedPackage := turbopath.AnchoredUnixPath("apps/docs").ToSystemPath()
+
+ prunedLockfile, err := lockfile.Subgraph([]turbopath.AnchoredSystemPath{packageWithInjectedPackage}, []string{})
+ assert.NilError(t, err, "prune lockfile")
+
+ pnpmLockfile, ok := prunedLockfile.(*PnpmLockfile)
+ assert.Assert(t, ok, "got different lockfile impl")
+
+ _, hasInjectedPackage := pnpmLockfile.Packages["file:packages/ui"]
+
+ assert.Assert(t, hasInjectedPackage, "pruned lockfile is missing injected package")
+
+}
+
+func Test_GitPackages(t *testing.T) {
+ contents, err := getFixture(t, "pnpm7-workspace.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err, "decode lockfile")
+
+ pkg, err := lockfile.ResolvePackage(turbopath.AnchoredUnixPath("apps/docs"), "dashboard-icons", "github:peerigon/dashboard-icons")
+ assert.NilError(t, err, "failure to find package")
+ assert.Assert(t, pkg.Found)
+ assert.DeepEqual(t, pkg.Key, "github.com/peerigon/dashboard-icons/ce27ef933144e09cef3911025f3649040a8571b6")
+ assert.DeepEqual(t, pkg.Version, "1.0.0")
+ // make sure subgraph produces git dep
+}
+
+func Test_DecodePnpmUnquotedURL(t *testing.T) {
+ resolutionWithQuestionMark := `{integrity: sha512-deadbeef, tarball: path/to/tarball?foo=bar}`
+ var resolution map[string]interface{}
+ err := yaml.Unmarshal([]byte(resolutionWithQuestionMark), &resolution)
+ assert.NilError(t, err, "valid package entry should be able to be decoded")
+ assert.Equal(t, resolution["tarball"], "path/to/tarball?foo=bar")
+}
+
+func Test_PnpmLockfilePatches(t *testing.T) {
+ contents, err := getFixture(t, "pnpm-patch.yaml")
+ assert.NilError(t, err)
+
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err)
+
+ patches := lockfile.Patches()
+ assert.Equal(t, len(patches), 3)
+ assert.Equal(t, patches[0], turbopath.AnchoredUnixPath("patches/@babel__core@7.20.12.patch"))
+ assert.Equal(t, patches[1], turbopath.AnchoredUnixPath("patches/is-odd@3.0.1.patch"))
+}
+
+func Test_PnpmPrunePatches(t *testing.T) {
+ contents, err := getFixture(t, "pnpm-patch.yaml")
+ assert.NilError(t, err)
+
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err)
+
+ prunedLockfile, err := lockfile.Subgraph(
+ []turbopath.AnchoredSystemPath{turbopath.AnchoredSystemPath("packages/dependency")},
+ []string{"/is-odd/3.0.1_nrrwwz7lemethtlvvm75r5bmhq", "/is-number/6.0.0", "/@babel/core/7.20.12_3hyn7hbvzkemudbydlwjmrb65y", "/moleculer/0.14.28_5pk7ojv7qbqha75ozglk4y4f74_kumip57h7zlinbhp4gz3jrbqry"},
+ )
+ assert.NilError(t, err)
+
+ assert.Equal(t, len(prunedLockfile.Patches()), 3)
+}
+
+func Test_PnpmPrunePatchesV6(t *testing.T) {
+ contents, err := getFixture(t, "pnpm-patch-v6.yaml")
+ assert.NilError(t, err)
+
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err)
+
+ prunedLockfile, err := lockfile.Subgraph(
+ []turbopath.AnchoredSystemPath{turbopath.AnchoredSystemPath("packages/a")},
+ []string{"/lodash@4.17.21(patch_hash=lgum37zgng4nfkynzh3cs7wdeq)"},
+ )
+ assert.NilError(t, err)
+
+ assert.Equal(t, len(prunedLockfile.Patches()), 1)
+
+ prunedLockfile, err = lockfile.Subgraph(
+ []turbopath.AnchoredSystemPath{turbopath.AnchoredSystemPath("packages/b")},
+ []string{"/@babel/helper-string-parser@7.19.4(patch_hash=wjhgmpzh47qmycrzgpeyoyh3ce)(@babel/core@7.21.0)"},
+ )
+ assert.NilError(t, err)
+
+ assert.Equal(t, len(prunedLockfile.Patches()), 1)
+}
+
+func Test_PnpmAbsoluteDependency(t *testing.T) {
+ type testCase struct {
+ fixture string
+ key string
+ }
+ testcases := []testCase{
+ {"pnpm-absolute.yaml", "/@scope/child/1.0.0"},
+ {"pnpm-absolute-v6.yaml", "/@scope/child@1.0.0"},
+ }
+ for _, tc := range testcases {
+ contents, err := getFixture(t, tc.fixture)
+ assert.NilError(t, err, tc.fixture)
+
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err, tc.fixture)
+
+ pkg, err := lockfile.ResolvePackage(turbopath.AnchoredUnixPath("packages/a"), "child", tc.key)
+ assert.NilError(t, err, "resolve")
+ assert.Assert(t, pkg.Found, tc.fixture)
+ assert.DeepEqual(t, pkg.Key, tc.key)
+ assert.DeepEqual(t, pkg.Version, "1.0.0")
+ }
+}
+
+func Test_LockfilePeer(t *testing.T) {
+ contents, err := getFixture(t, "pnpm-peer-v6.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ assert.NilError(t, err, "read fixture")
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err, "parse lockfile")
+
+ pkg, err := lockfile.ResolvePackage(turbopath.AnchoredUnixPath("apps/web"), "next", "13.0.4")
+ assert.NilError(t, err, "read lockfile")
+ assert.Assert(t, pkg.Found)
+ assert.DeepEqual(t, pkg.Version, "13.0.4(react-dom@18.2.0)(react@18.2.0)")
+ assert.DeepEqual(t, pkg.Key, "/next@13.0.4(react-dom@18.2.0)(react@18.2.0)")
+}
+
+func Test_LockfileTopLevelOverride(t *testing.T) {
+ contents, err := getFixture(t, "pnpm-top-level-dupe.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err, "decode lockfile")
+
+ pkg, err := lockfile.ResolvePackage(turbopath.AnchoredUnixPath("packages/a"), "ci-info", "3.7.1")
+ assert.NilError(t, err, "resolve package")
+
+ assert.Assert(t, pkg.Found)
+ assert.DeepEqual(t, pkg.Key, "/ci-info/3.7.1")
+ assert.DeepEqual(t, pkg.Version, "3.7.1")
+}
+
+func Test_PnpmOverride(t *testing.T) {
+ contents, err := getFixture(t, "pnpm_override.yaml")
+ if err != nil {
+ t.Error(err)
+ }
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err, "decode lockfile")
+
+ pkg, err := lockfile.ResolvePackage(
+ turbopath.AnchoredUnixPath("config/hardhat"),
+ "@nomiclabs/hardhat-ethers",
+ "npm:hardhat-deploy-ethers@0.3.0-beta.13",
+ )
+ assert.NilError(t, err, "failure to find package")
+ assert.Assert(t, pkg.Found)
+ assert.DeepEqual(t, pkg.Key, "/hardhat-deploy-ethers/0.3.0-beta.13_yab2ug5tvye2kp6e24l5x3z7uy")
+ assert.DeepEqual(t, pkg.Version, "0.3.0-beta.13_yab2ug5tvye2kp6e24l5x3z7uy")
+}
+
+func Test_DepPathParsing(t *testing.T) {
+ type testCase struct {
+ input string
+ dp depPath
+ }
+ testCases := []testCase{
+ {
+ "/foo/1.0.0",
+ depPath{
+ name: "foo",
+ version: "1.0.0",
+ },
+ },
+ {
+ "/@foo/bar/1.0.0",
+ depPath{
+ name: "@foo/bar",
+ version: "1.0.0",
+ },
+ },
+ {
+ "example.org/foo/1.0.0",
+ depPath{
+ host: "example.org",
+ name: "foo",
+ version: "1.0.0",
+ },
+ },
+ {
+ "/foo/1.0.0_bar@1.0.0",
+ depPath{
+ name: "foo",
+ version: "1.0.0",
+ peerSuffix: "bar@1.0.0",
+ },
+ },
+ {
+ "/foo/1.0.0(bar@1.0.0)",
+ depPath{
+ name: "foo",
+ version: "1.0.0",
+ peerSuffix: "(bar@1.0.0)",
+ },
+ },
+ {
+ "/foo/1.0.0_patchHash_peerHash",
+ depPath{
+ name: "foo",
+ version: "1.0.0",
+ peerSuffix: "patchHash_peerHash",
+ },
+ },
+ {
+ "/@babel/helper-string-parser/7.19.4(patch_hash=wjhgmpzh47qmycrzgpeyoyh3ce)(@babel/core@7.21.0)",
+ depPath{
+ name: "@babel/helper-string-parser",
+ version: "7.19.4",
+ peerSuffix: "(patch_hash=wjhgmpzh47qmycrzgpeyoyh3ce)(@babel/core@7.21.0)",
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ assert.Equal(t, parseDepPath(tc.input), tc.dp, tc.input)
+ }
+}
+
+func Test_PnpmAliasesOverlap(t *testing.T) {
+ contents, err := getFixture(t, "pnpm-absolute.yaml")
+ assert.NilError(t, err)
+
+ lockfile, err := DecodePnpmLockfile(contents)
+ assert.NilError(t, err)
+
+ closure, err := transitiveClosure("packages/a", map[string]string{"@scope/parent": "^1.0.0", "another": "^1.0.0", "special": "npm:Special@1.2.3"}, lockfile)
+ assert.NilError(t, err)
+
+ deps := []Package{}
+
+ for _, v := range closure.ToSlice() {
+ dep := v.(Package)
+ deps = append(deps, dep)
+ }
+ sort.Sort(ByKey(deps))
+
+ assert.DeepEqual(t, deps, []Package{
+ {"/@scope/child/1.0.0", "1.0.0", true},
+ {"/@scope/parent/1.0.0", "1.0.0", true},
+ {"/Special/1.2.3", "1.2.3", true},
+ {"/another/1.0.0", "1.0.0", true},
+ {"/foo/1.0.0", "1.0.0", true},
+ })
+}
diff --git a/cli/internal/lockfile/testdata/berry.lock b/cli/internal/lockfile/testdata/berry.lock
new file mode 100644
index 0000000..f4436e4
--- /dev/null
+++ b/cli/internal/lockfile/testdata/berry.lock
@@ -0,0 +1,3283 @@
+# This file is generated by running "yarn install" inside your project.
+# Manual changes might be lost - proceed with caution!
+
+__metadata:
+ version: 6
+ cacheKey: 8c0
+
+"@ampproject/remapping@npm:^2.1.0":
+ version: 2.2.0
+ resolution: "@ampproject/remapping@npm:2.2.0"
+ dependencies:
+ "@jridgewell/gen-mapping": ^0.1.0
+ "@jridgewell/trace-mapping": ^0.3.9
+ checksum: d74d170d06468913921d72430259424b7e4c826b5a7d39ff839a29d547efb97dc577caa8ba3fb5cf023624e9af9d09651afc3d4112a45e2050328abc9b3a2292
+ languageName: node
+ linkType: hard
+
+"@babel/code-frame@npm:7.12.11":
+ version: 7.12.11
+ resolution: "@babel/code-frame@npm:7.12.11"
+ dependencies:
+ "@babel/highlight": ^7.10.4
+ checksum: 3963eff3ebfb0e091c7e6f99596ef4b258683e4ba8a134e4e95f77afe85be5c931e184fff6435fb4885d12eba04a5e25532f7fbc292ca13b48e7da943474e2f3
+ languageName: node
+ linkType: hard
+
+"@babel/code-frame@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/code-frame@npm:7.18.6"
+ dependencies:
+ "@babel/highlight": ^7.18.6
+ checksum: 195e2be3172d7684bf95cff69ae3b7a15a9841ea9d27d3c843662d50cdd7d6470fd9c8e64be84d031117e4a4083486effba39f9aef6bbb2c89f7f21bcfba33ba
+ languageName: node
+ linkType: hard
+
+"@babel/compat-data@npm:^7.19.1":
+ version: 7.19.1
+ resolution: "@babel/compat-data@npm:7.19.1"
+ checksum: f985887ea08a140e4af87a94d3fb17af0345491eb97f5a85b1840255c2e2a97429f32a8fd12a7aae9218af5f1024f1eb12a5cd280d2d69b2337583c17ea506ba
+ languageName: node
+ linkType: hard
+
+"@babel/core@npm:^7.0.0":
+ version: 7.19.1
+ resolution: "@babel/core@npm:7.19.1"
+ dependencies:
+ "@ampproject/remapping": ^2.1.0
+ "@babel/code-frame": ^7.18.6
+ "@babel/generator": ^7.19.0
+ "@babel/helper-compilation-targets": ^7.19.1
+ "@babel/helper-module-transforms": ^7.19.0
+ "@babel/helpers": ^7.19.0
+ "@babel/parser": ^7.19.1
+ "@babel/template": ^7.18.10
+ "@babel/traverse": ^7.19.1
+ "@babel/types": ^7.19.0
+ convert-source-map: ^1.7.0
+ debug: ^4.1.0
+ gensync: ^1.0.0-beta.2
+ json5: ^2.2.1
+ semver: ^6.3.0
+ checksum: 941c8c119b80bdba5fafc80bbaa424d51146b6d3c30b8fae35879358dd37c11d3d0926bc7e970a0861229656eedaa8c884d4a3a25cc904086eb73b827a2f1168
+ languageName: node
+ linkType: hard
+
+"@babel/generator@npm:^7.19.0":
+ version: 7.19.0
+ resolution: "@babel/generator@npm:7.19.0"
+ dependencies:
+ "@babel/types": ^7.19.0
+ "@jridgewell/gen-mapping": ^0.3.2
+ jsesc: ^2.5.1
+ checksum: aa3d5785cf8f8e81672dcc61aef351188efeadb20d9f66d79113d82cbcf3bbbdeb829989fa14582108572ddbc4e4027bdceb06ccaf5ec40fa93c2dda8fbcd4aa
+ languageName: node
+ linkType: hard
+
+"@babel/helper-compilation-targets@npm:^7.19.1":
+ version: 7.19.1
+ resolution: "@babel/helper-compilation-targets@npm:7.19.1"
+ dependencies:
+ "@babel/compat-data": ^7.19.1
+ "@babel/helper-validator-option": ^7.18.6
+ browserslist: ^4.21.3
+ semver: ^6.3.0
+ peerDependencies:
+ "@babel/core": ^7.0.0
+ checksum: c2d3039265e498b341a6b597f855f2fcef02659050fefedf36ad4e6815e6aafe1011a761214cc80d98260ed07ab15a8cbe959a0458e97bec5f05a450e1b1741b
+ languageName: node
+ linkType: hard
+
+"@babel/helper-environment-visitor@npm:^7.18.9":
+ version: 7.18.9
+ resolution: "@babel/helper-environment-visitor@npm:7.18.9"
+ checksum: b25101f6162ddca2d12da73942c08ad203d7668e06663df685634a8fde54a98bc015f6f62938e8554457a592a024108d45b8f3e651fd6dcdb877275b73cc4420
+ languageName: node
+ linkType: hard
+
+"@babel/helper-function-name@npm:^7.19.0":
+ version: 7.19.0
+ resolution: "@babel/helper-function-name@npm:7.19.0"
+ dependencies:
+ "@babel/template": ^7.18.10
+ "@babel/types": ^7.19.0
+ checksum: eac1f5db428ba546270c2b8d750c24eb528b8fcfe50c81de2e0bdebf0e20f24bec688d4331533b782e4a907fad435244621ca2193cfcf80a86731299840e0f6e
+ languageName: node
+ linkType: hard
+
+"@babel/helper-hoist-variables@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/helper-hoist-variables@npm:7.18.6"
+ dependencies:
+ "@babel/types": ^7.18.6
+ checksum: fd9c35bb435fda802bf9ff7b6f2df06308a21277c6dec2120a35b09f9de68f68a33972e2c15505c1a1a04b36ec64c9ace97d4a9e26d6097b76b4396b7c5fa20f
+ languageName: node
+ linkType: hard
+
+"@babel/helper-module-imports@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/helper-module-imports@npm:7.18.6"
+ dependencies:
+ "@babel/types": ^7.18.6
+ checksum: f393f8a3b3304b1b7a288a38c10989de754f01d29caf62ce7c4e5835daf0a27b81f3ac687d9d2780d39685aae7b55267324b512150e7b2be967b0c493b6a1def
+ languageName: node
+ linkType: hard
+
+"@babel/helper-module-transforms@npm:^7.19.0":
+ version: 7.19.0
+ resolution: "@babel/helper-module-transforms@npm:7.19.0"
+ dependencies:
+ "@babel/helper-environment-visitor": ^7.18.9
+ "@babel/helper-module-imports": ^7.18.6
+ "@babel/helper-simple-access": ^7.18.6
+ "@babel/helper-split-export-declaration": ^7.18.6
+ "@babel/helper-validator-identifier": ^7.18.6
+ "@babel/template": ^7.18.10
+ "@babel/traverse": ^7.19.0
+ "@babel/types": ^7.19.0
+ checksum: 4483276c66f56cf3b5b063634092ad9438c2593725de5c143ba277dda82f1501e6d73b311c1b28036f181dbe36eaeff29f24726cde37a599d4e735af294e5359
+ languageName: node
+ linkType: hard
+
+"@babel/helper-simple-access@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/helper-simple-access@npm:7.18.6"
+ dependencies:
+ "@babel/types": ^7.18.6
+ checksum: 37cd36eef199e0517845763c1e6ff6ea5e7876d6d707a6f59c9267c547a50aa0e84260ba9285d49acfaf2cfa0a74a772d92967f32ac1024c961517d40b6c16a5
+ languageName: node
+ linkType: hard
+
+"@babel/helper-split-export-declaration@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/helper-split-export-declaration@npm:7.18.6"
+ dependencies:
+ "@babel/types": ^7.18.6
+ checksum: c6d3dede53878f6be1d869e03e9ffbbb36f4897c7cc1527dc96c56d127d834ffe4520a6f7e467f5b6f3c2843ea0e81a7819d66ae02f707f6ac057f3d57943a2b
+ languageName: node
+ linkType: hard
+
+"@babel/helper-string-parser@npm:^7.18.10":
+ version: 7.18.10
+ resolution: "@babel/helper-string-parser@npm:7.18.10"
+ checksum: d554a4393365b624916b5c00a4cc21c990c6617e7f3fe30be7d9731f107f12c33229a7a3db9d829bfa110d2eb9f04790745d421640e3bd245bb412dc0ea123c1
+ languageName: node
+ linkType: hard
+
+"@babel/helper-validator-identifier@npm:^7.18.6":
+ version: 7.19.1
+ resolution: "@babel/helper-validator-identifier@npm:7.19.1"
+ checksum: 0eca5e86a729162af569b46c6c41a63e18b43dbe09fda1d2a3c8924f7d617116af39cac5e4cd5d431bb760b4dca3c0970e0c444789b1db42bcf1fa41fbad0a3a
+ languageName: node
+ linkType: hard
+
+"@babel/helper-validator-option@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/helper-validator-option@npm:7.18.6"
+ checksum: f9cc6eb7cc5d759c5abf006402180f8d5e4251e9198197428a97e05d65eb2f8ae5a0ce73b1dfd2d35af41d0eb780627a64edf98a4e71f064eeeacef8de58f2cf
+ languageName: node
+ linkType: hard
+
+"@babel/helpers@npm:^7.19.0":
+ version: 7.19.0
+ resolution: "@babel/helpers@npm:7.19.0"
+ dependencies:
+ "@babel/template": ^7.18.10
+ "@babel/traverse": ^7.19.0
+ "@babel/types": ^7.19.0
+ checksum: e50e78e0dbb0435075fa3f85021a6bcae529589800bca0292721afd7f7c874bea54508d6dc57eca16e5b8224f8142c6b0e32e3b0140029dc09865da747da4623
+ languageName: node
+ linkType: hard
+
+"@babel/highlight@npm:^7.10.4, @babel/highlight@npm:^7.18.6":
+ version: 7.18.6
+ resolution: "@babel/highlight@npm:7.18.6"
+ dependencies:
+ "@babel/helper-validator-identifier": ^7.18.6
+ chalk: ^2.0.0
+ js-tokens: ^4.0.0
+ checksum: 92d8ee61549de5ff5120e945e774728e5ccd57fd3b2ed6eace020ec744823d4a98e242be1453d21764a30a14769ecd62170fba28539b211799bbaf232bbb2789
+ languageName: node
+ linkType: hard
+
+"@babel/parser@npm:^7.18.10, @babel/parser@npm:^7.19.1":
+ version: 7.19.1
+ resolution: "@babel/parser@npm:7.19.1"
+ bin:
+ parser: ./bin/babel-parser.js
+ checksum: b1e0acb346b2a533c857e1e97ac0886cdcbd76aafef67835a2b23f760c10568eb53ad8a27dd5f862d8ba4e583742e6067f107281ccbd68959d61bc61e4ddaa51
+ languageName: node
+ linkType: hard
+
+"@babel/runtime-corejs3@npm:^7.10.2":
+ version: 7.19.1
+ resolution: "@babel/runtime-corejs3@npm:7.19.1"
+ dependencies:
+ core-js-pure: ^3.25.1
+ regenerator-runtime: ^0.13.4
+ checksum: 38a1e8fcd2ba1f76c951259c98a5a11052123923adbf30ec8b2fec202dbbe38c6db61658ef9398e00c30f799e2e54ea036e56a09f43229261918bf5ec1b7d03a
+ languageName: node
+ linkType: hard
+
+"@babel/runtime@npm:^7.10.2, @babel/runtime@npm:^7.18.9":
+ version: 7.19.0
+ resolution: "@babel/runtime@npm:7.19.0"
+ dependencies:
+ regenerator-runtime: ^0.13.4
+ checksum: fa69c351bb05e1db3ceb9a02fdcf620c234180af68cdda02152d3561015f6d55277265d3109815992f96d910f3db709458cae4f8df1c3def66f32e0867d82294
+ languageName: node
+ linkType: hard
+
+"@babel/template@npm:^7.18.10":
+ version: 7.18.10
+ resolution: "@babel/template@npm:7.18.10"
+ dependencies:
+ "@babel/code-frame": ^7.18.6
+ "@babel/parser": ^7.18.10
+ "@babel/types": ^7.18.10
+ checksum: 93a6aa094af5f355a72bd55f67fa1828a046c70e46f01b1606e6118fa1802b6df535ca06be83cc5a5e834022be95c7b714f0a268b5f20af984465a71e28f1473
+ languageName: node
+ linkType: hard
+
+"@babel/traverse@npm:^7.19.0, @babel/traverse@npm:^7.19.1":
+ version: 7.19.1
+ resolution: "@babel/traverse@npm:7.19.1"
+ dependencies:
+ "@babel/code-frame": ^7.18.6
+ "@babel/generator": ^7.19.0
+ "@babel/helper-environment-visitor": ^7.18.9
+ "@babel/helper-function-name": ^7.19.0
+ "@babel/helper-hoist-variables": ^7.18.6
+ "@babel/helper-split-export-declaration": ^7.18.6
+ "@babel/parser": ^7.19.1
+ "@babel/types": ^7.19.0
+ debug: ^4.1.0
+ globals: ^11.1.0
+ checksum: 9d782b5089ebc989e54c2406814ed1206cb745ed2734e6602dee3e23d4b6ebbb703ff86e536276630f8de83fda6cde99f0634e3c3d847ddb40572d0303ba8800
+ languageName: node
+ linkType: hard
+
+"@babel/types@npm:^7.18.10, @babel/types@npm:^7.18.6, @babel/types@npm:^7.19.0, @babel/types@npm:^7.8.3":
+ version: 7.19.0
+ resolution: "@babel/types@npm:7.19.0"
+ dependencies:
+ "@babel/helper-string-parser": ^7.18.10
+ "@babel/helper-validator-identifier": ^7.18.6
+ to-fast-properties: ^2.0.0
+ checksum: 9b346715a68aeede70ba9c685a144b0b26c53bcd595d448e24c8fa8df4d5956a5712e56ebadb7c85dcc32f218ee42788e37b93d50d3295c992072224cb3ef3fe
+ languageName: node
+ linkType: hard
+
+"@eslint/eslintrc@npm:^0.4.3":
+ version: 0.4.3
+ resolution: "@eslint/eslintrc@npm:0.4.3"
+ dependencies:
+ ajv: ^6.12.4
+ debug: ^4.1.1
+ espree: ^7.3.0
+ globals: ^13.9.0
+ ignore: ^4.0.6
+ import-fresh: ^3.2.1
+ js-yaml: ^3.13.1
+ minimatch: ^3.0.4
+ strip-json-comments: ^3.1.1
+ checksum: 03a7704150b868c318aab6a94d87a33d30dc2ec579d27374575014f06237ba1370ae11178db772f985ef680d469dc237e7b16a1c5d8edaaeb8c3733e7a95a6d3
+ languageName: node
+ linkType: hard
+
+"@humanwhocodes/config-array@npm:^0.5.0":
+ version: 0.5.0
+ resolution: "@humanwhocodes/config-array@npm:0.5.0"
+ dependencies:
+ "@humanwhocodes/object-schema": ^1.2.0
+ debug: ^4.1.1
+ minimatch: ^3.0.4
+ checksum: 44ee6a9f05d93dd9d5935a006b17572328ba9caff8002442f601736cbda79c580cc0f5a49ce9eb88fbacc5c3a6b62098357c2e95326cd17bb9f1a6c61d6e95e7
+ languageName: node
+ linkType: hard
+
+"@humanwhocodes/object-schema@npm:^1.2.0":
+ version: 1.2.1
+ resolution: "@humanwhocodes/object-schema@npm:1.2.1"
+ checksum: a824a1ec31591231e4bad5787641f59e9633827d0a2eaae131a288d33c9ef0290bd16fda8da6f7c0fcb014147865d12118df10db57f27f41e20da92369fcb3f1
+ languageName: node
+ linkType: hard
+
+"@jridgewell/gen-mapping@npm:^0.1.0":
+ version: 0.1.1
+ resolution: "@jridgewell/gen-mapping@npm:0.1.1"
+ dependencies:
+ "@jridgewell/set-array": ^1.0.0
+ "@jridgewell/sourcemap-codec": ^1.4.10
+ checksum: 3bcc21fe786de6ffbf35c399a174faab05eb23ce6a03e8769569de28abbf4facc2db36a9ddb0150545ae23a8d35a7cf7237b2aa9e9356a7c626fb4698287d5cc
+ languageName: node
+ linkType: hard
+
+"@jridgewell/gen-mapping@npm:^0.3.2":
+ version: 0.3.2
+ resolution: "@jridgewell/gen-mapping@npm:0.3.2"
+ dependencies:
+ "@jridgewell/set-array": ^1.0.1
+ "@jridgewell/sourcemap-codec": ^1.4.10
+ "@jridgewell/trace-mapping": ^0.3.9
+ checksum: 1832707a1c476afebe4d0fbbd4b9434fdb51a4c3e009ab1e9938648e21b7a97049fa6009393bdf05cab7504108413441df26d8a3c12193996e65493a4efb6882
+ languageName: node
+ linkType: hard
+
+"@jridgewell/resolve-uri@npm:^3.0.3":
+ version: 3.1.0
+ resolution: "@jridgewell/resolve-uri@npm:3.1.0"
+ checksum: b5ceaaf9a110fcb2780d1d8f8d4a0bfd216702f31c988d8042e5f8fbe353c55d9b0f55a1733afdc64806f8e79c485d2464680ac48a0d9fcadb9548ee6b81d267
+ languageName: node
+ linkType: hard
+
+"@jridgewell/set-array@npm:^1.0.0, @jridgewell/set-array@npm:^1.0.1":
+ version: 1.1.2
+ resolution: "@jridgewell/set-array@npm:1.1.2"
+ checksum: 69a84d5980385f396ff60a175f7177af0b8da4ddb81824cb7016a9ef914eee9806c72b6b65942003c63f7983d4f39a5c6c27185bbca88eb4690b62075602e28e
+ languageName: node
+ linkType: hard
+
+"@jridgewell/sourcemap-codec@npm:^1.4.10":
+ version: 1.4.14
+ resolution: "@jridgewell/sourcemap-codec@npm:1.4.14"
+ checksum: 61100637b6d173d3ba786a5dff019e1a74b1f394f323c1fee337ff390239f053b87266c7a948777f4b1ee68c01a8ad0ab61e5ff4abb5a012a0b091bec391ab97
+ languageName: node
+ linkType: hard
+
+"@jridgewell/trace-mapping@npm:^0.3.9":
+ version: 0.3.15
+ resolution: "@jridgewell/trace-mapping@npm:0.3.15"
+ dependencies:
+ "@jridgewell/resolve-uri": ^3.0.3
+ "@jridgewell/sourcemap-codec": ^1.4.10
+ checksum: 38917e9c2b014d469a9f51c016ed506acbe44dd16ec2f6f99b553ebf3764d22abadbf992f2367b6d2b3511f3eae8ed3a8963f6c1030093fda23efd35ecab2bae
+ languageName: node
+ linkType: hard
+
+"@next/env@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/env@npm:12.2.5"
+ checksum: a44939e59b46d5951831529a43dba9daa2e4e467e8680ea96e21ae127d1bf7f11757aaf3a6cff8a51273abfe7af782903e1304405a481361c7ba3e66d47e3238
+ languageName: node
+ linkType: hard
+
+"@next/eslint-plugin-next@npm:12.3.0":
+ version: 12.3.0
+ resolution: "@next/eslint-plugin-next@npm:12.3.0"
+ dependencies:
+ glob: 7.1.7
+ checksum: f08582b36ff01a776183b3c33d6d81be3a110c1c3c39c81a33aff91277ea822aa4a952d4f2271a08ce56692ca5c58c9e958aaf4e08348c10cc45a85213b208f0
+ languageName: node
+ linkType: hard
+
+"@next/swc-android-arm-eabi@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-android-arm-eabi@npm:12.2.5"
+ conditions: os=android & cpu=arm
+ languageName: node
+ linkType: hard
+
+"@next/swc-android-arm64@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-android-arm64@npm:12.2.5"
+ conditions: os=android & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"@next/swc-darwin-arm64@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-darwin-arm64@npm:12.2.5"
+ conditions: os=darwin & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"@next/swc-darwin-x64@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-darwin-x64@npm:12.2.5"
+ conditions: os=darwin & cpu=x64
+ languageName: node
+ linkType: hard
+
+"@next/swc-freebsd-x64@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-freebsd-x64@npm:12.2.5"
+ conditions: os=freebsd & cpu=x64
+ languageName: node
+ linkType: hard
+
+"@next/swc-linux-arm-gnueabihf@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-linux-arm-gnueabihf@npm:12.2.5"
+ conditions: os=linux & cpu=arm
+ languageName: node
+ linkType: hard
+
+"@next/swc-linux-arm64-gnu@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-linux-arm64-gnu@npm:12.2.5"
+ conditions: os=linux & cpu=arm64 & libc=glibc
+ languageName: node
+ linkType: hard
+
+"@next/swc-linux-arm64-musl@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-linux-arm64-musl@npm:12.2.5"
+ conditions: os=linux & cpu=arm64 & libc=musl
+ languageName: node
+ linkType: hard
+
+"@next/swc-linux-x64-gnu@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-linux-x64-gnu@npm:12.2.5"
+ conditions: os=linux & cpu=x64 & libc=glibc
+ languageName: node
+ linkType: hard
+
+"@next/swc-linux-x64-musl@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-linux-x64-musl@npm:12.2.5"
+ conditions: os=linux & cpu=x64 & libc=musl
+ languageName: node
+ linkType: hard
+
+"@next/swc-win32-arm64-msvc@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-win32-arm64-msvc@npm:12.2.5"
+ conditions: os=win32 & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"@next/swc-win32-ia32-msvc@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-win32-ia32-msvc@npm:12.2.5"
+ conditions: os=win32 & cpu=ia32
+ languageName: node
+ linkType: hard
+
+"@next/swc-win32-x64-msvc@npm:12.2.5":
+ version: 12.2.5
+ resolution: "@next/swc-win32-x64-msvc@npm:12.2.5"
+ conditions: os=win32 & cpu=x64
+ languageName: node
+ linkType: hard
+
+"@nodelib/fs.scandir@npm:2.1.5":
+ version: 2.1.5
+ resolution: "@nodelib/fs.scandir@npm:2.1.5"
+ dependencies:
+ "@nodelib/fs.stat": 2.0.5
+ run-parallel: ^1.1.9
+ checksum: a970d595bd23c66c880e0ef1817791432dbb7acbb8d44b7e7d0e7a22f4521260d4a83f7f9fd61d44fda4610105577f8f58a60718105fb38352baed612fd79e59
+ languageName: node
+ linkType: hard
+
+"@nodelib/fs.stat@npm:2.0.5, @nodelib/fs.stat@npm:^2.0.2":
+ version: 2.0.5
+ resolution: "@nodelib/fs.stat@npm:2.0.5"
+ checksum: 012480b5ca9d97bff9261571dbbec7bbc6033f69cc92908bc1ecfad0792361a5a1994bc48674b9ef76419d056a03efadfce5a6cf6dbc0a36559571a7a483f6f0
+ languageName: node
+ linkType: hard
+
+"@nodelib/fs.walk@npm:^1.2.3":
+ version: 1.2.8
+ resolution: "@nodelib/fs.walk@npm:1.2.8"
+ dependencies:
+ "@nodelib/fs.scandir": 2.1.5
+ fastq: ^1.6.0
+ checksum: 190c643f156d8f8f277bf2a6078af1ffde1fd43f498f187c2db24d35b4b4b5785c02c7dc52e356497b9a1b65b13edc996de08de0b961c32844364da02986dc53
+ languageName: node
+ linkType: hard
+
+"@rushstack/eslint-patch@npm:^1.1.3":
+ version: 1.2.0
+ resolution: "@rushstack/eslint-patch@npm:1.2.0"
+ checksum: faa749faae0e83c26ae9eb00ad36a897ac78f3cf27da8e8ff21c00bcf7973b598d823d8f2b3957ef66079288bcf577f94df831eae2d65f3f68d8ca32f18b6aff
+ languageName: node
+ linkType: hard
+
+"@swc/helpers@npm:0.4.3":
+ version: 0.4.3
+ resolution: "@swc/helpers@npm:0.4.3"
+ dependencies:
+ tslib: ^2.4.0
+ checksum: 5c2f173e950dd3929d84ae48b3586a274d5a874e7cf2013b3d8081e4f8c723fa3a4d4e63b263e84bb7f06431f87b640e91a12655410463c81a3dc2bbc15eceda
+ languageName: node
+ linkType: hard
+
+"@types/json5@npm:^0.0.29":
+ version: 0.0.29
+ resolution: "@types/json5@npm:0.0.29"
+ checksum: e60b153664572116dfea673c5bda7778dbff150498f44f998e34b5886d8afc47f16799280e4b6e241c0472aef1bc36add771c569c68fc5125fc2ae519a3eb9ac
+ languageName: node
+ linkType: hard
+
+"@types/node@npm:^17.0.12":
+ version: 17.0.45
+ resolution: "@types/node@npm:17.0.45"
+ checksum: aa04366b9103b7d6cfd6b2ef64182e0eaa7d4462c3f817618486ea0422984c51fc69fd0d436eae6c9e696ddfdbec9ccaa27a917f7c2e8c75c5d57827fe3d95e8
+ languageName: node
+ linkType: hard
+
+"@types/prop-types@npm:*":
+ version: 15.7.5
+ resolution: "@types/prop-types@npm:15.7.5"
+ checksum: 5b43b8b15415e1f298243165f1d44390403bb2bd42e662bca3b5b5633fdd39c938e91b7fce3a9483699db0f7a715d08cef220c121f723a634972fdf596aec980
+ languageName: node
+ linkType: hard
+
+"@types/react-dom@npm:^17.0.11":
+ version: 17.0.17
+ resolution: "@types/react-dom@npm:17.0.17"
+ dependencies:
+ "@types/react": ^17
+ checksum: 23caf98aa03e968811560f92a2c8f451694253ebe16b670929b24eaf0e7fa62ba549abe9db0ac028a9d8a9086acd6ab9c6c773f163fa21224845edbc00ba6232
+ languageName: node
+ linkType: hard
+
+"@types/react@npm:18.0.17":
+ version: 18.0.17
+ resolution: "@types/react@npm:18.0.17"
+ dependencies:
+ "@types/prop-types": "*"
+ "@types/scheduler": "*"
+ csstype: ^3.0.2
+ checksum: 18cae64f5bfd6bb58fbd8ee2ba52ec82de844f114254e26de7b513e4b86621f643f9b71d7066958cd571b0d78cb86cbceda449c5289f9349ca573df29ab69252
+ languageName: node
+ linkType: hard
+
+"@types/react@npm:^17, @types/react@npm:^17.0.37":
+ version: 17.0.50
+ resolution: "@types/react@npm:17.0.50"
+ dependencies:
+ "@types/prop-types": "*"
+ "@types/scheduler": "*"
+ csstype: ^3.0.2
+ checksum: b5629dff7c2f3e9fcba95a19b2b3bfd78d7cacc33ba5fc26413dba653d34afcac3b93ddabe563e8062382688a1eac7db68e93739bb8e712d27637a03aaafbbb8
+ languageName: node
+ linkType: hard
+
+"@types/scheduler@npm:*":
+ version: 0.16.2
+ resolution: "@types/scheduler@npm:0.16.2"
+ checksum: b6b4dcfeae6deba2e06a70941860fb1435730576d3689225a421280b7742318d1548b3d22c1f66ab68e414f346a9542f29240bc955b6332c5b11e561077583bc
+ languageName: node
+ linkType: hard
+
+"@typescript-eslint/parser@npm:^5.21.0":
+ version: 5.37.0
+ resolution: "@typescript-eslint/parser@npm:5.37.0"
+ dependencies:
+ "@typescript-eslint/scope-manager": 5.37.0
+ "@typescript-eslint/types": 5.37.0
+ "@typescript-eslint/typescript-estree": 5.37.0
+ debug: ^4.3.4
+ peerDependencies:
+ eslint: ^6.0.0 || ^7.0.0 || ^8.0.0
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ checksum: 33343e27c9602820d43ee12de9797365d97a5cf3f716e750fa44de760f2a2c6800f3bc4fa54931ac70c0e0ede77a92224f8151da7f30fed3bf692a029d6659af
+ languageName: node
+ linkType: hard
+
+"@typescript-eslint/scope-manager@npm:5.37.0":
+ version: 5.37.0
+ resolution: "@typescript-eslint/scope-manager@npm:5.37.0"
+ dependencies:
+ "@typescript-eslint/types": 5.37.0
+ "@typescript-eslint/visitor-keys": 5.37.0
+ checksum: 1c439e21ffa63ebaadb8c8363e9d668132a835a28203e5b779366bfa56772f332e5dedb50d63dffb836839b9d9c4e66aa9e3ea47b8c59465b18a0cbd063ec7a3
+ languageName: node
+ linkType: hard
+
+"@typescript-eslint/types@npm:5.37.0":
+ version: 5.37.0
+ resolution: "@typescript-eslint/types@npm:5.37.0"
+ checksum: 899e59e7775fa95c2d9fcac5cc02cc49d83af5f1ffc706df495046c3b3733f79d5489568b01bfaf8c9ae4636e057056866adc783113036f774580086d0189f21
+ languageName: node
+ linkType: hard
+
+"@typescript-eslint/typescript-estree@npm:5.37.0":
+ version: 5.37.0
+ resolution: "@typescript-eslint/typescript-estree@npm:5.37.0"
+ dependencies:
+ "@typescript-eslint/types": 5.37.0
+ "@typescript-eslint/visitor-keys": 5.37.0
+ debug: ^4.3.4
+ globby: ^11.1.0
+ is-glob: ^4.0.3
+ semver: ^7.3.7
+ tsutils: ^3.21.0
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ checksum: 80365a50fa11ed39bf54d9ef06e264fbbf3bdbcc55b7d7d555ef0be915edae40ec30e98d08b3f6ef048e1874450cbcb1e7d9f429d4f420dacbbde45d3376a7bc
+ languageName: node
+ linkType: hard
+
+"@typescript-eslint/visitor-keys@npm:5.37.0":
+ version: 5.37.0
+ resolution: "@typescript-eslint/visitor-keys@npm:5.37.0"
+ dependencies:
+ "@typescript-eslint/types": 5.37.0
+ eslint-visitor-keys: ^3.3.0
+ checksum: d6193550f77413aead0cb267e058df80b80a488c8fb4e39beb5f0a70b971c41682a6391903fbc5f3dd859a872016288c434d631b8efc3ac5a04edbdb7b63b5f6
+ languageName: node
+ linkType: hard
+
+"acorn-jsx@npm:^5.3.1":
+ version: 5.3.2
+ resolution: "acorn-jsx@npm:5.3.2"
+ peerDependencies:
+ acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+ checksum: c3d3b2a89c9a056b205b69530a37b972b404ee46ec8e5b341666f9513d3163e2a4f214a71f4dfc7370f5a9c07472d2fd1c11c91c3f03d093e37637d95da98950
+ languageName: node
+ linkType: hard
+
+"acorn@npm:^7.4.0":
+ version: 7.4.1
+ resolution: "acorn@npm:7.4.1"
+ bin:
+ acorn: bin/acorn
+ checksum: 1860f23c2107c910c6177b7b7be71be350db9e1080d814493fae143ae37605189504152d1ba8743ba3178d0b37269ce1ffc42b101547fdc1827078f82671e407
+ languageName: node
+ linkType: hard
+
+"ajv@npm:^6.10.0, ajv@npm:^6.12.4":
+ version: 6.12.6
+ resolution: "ajv@npm:6.12.6"
+ dependencies:
+ fast-deep-equal: ^3.1.1
+ fast-json-stable-stringify: ^2.0.0
+ json-schema-traverse: ^0.4.1
+ uri-js: ^4.2.2
+ checksum: 874972efe5c4202ab0a68379481fbd3d1b5d0a7bd6d3cc21d40d3536ebff3352a2a1fabb632d4fd2cc7fe4cbdcd5ed6782084c9bbf7f32a1536d18f9da5007d4
+ languageName: node
+ linkType: hard
+
+"ajv@npm:^8.0.1":
+ version: 8.11.0
+ resolution: "ajv@npm:8.11.0"
+ dependencies:
+ fast-deep-equal: ^3.1.1
+ json-schema-traverse: ^1.0.0
+ require-from-string: ^2.0.2
+ uri-js: ^4.2.2
+ checksum: 5e0ff226806763be73e93dd7805b634f6f5921e3e90ca04acdf8db81eed9d8d3f0d4c5f1213047f45ebbf8047ffe0c840fa1ef2ec42c3a644899f69aa72b5bef
+ languageName: node
+ linkType: hard
+
+"ansi-colors@npm:^4.1.1":
+ version: 4.1.3
+ resolution: "ansi-colors@npm:4.1.3"
+ checksum: a9c2ec842038a1fabc7db9ece7d3177e2fe1c5dc6f0c51ecfbf5f39911427b89c00b5dc6b8bd95f82a26e9b16aaae2e83d45f060e98070ce4d1333038edceb0e
+ languageName: node
+ linkType: hard
+
+"ansi-regex@npm:^5.0.1":
+ version: 5.0.1
+ resolution: "ansi-regex@npm:5.0.1"
+ checksum: 2aa4bb54caf2d622f1afdad09441695af2a83aa3fe8b8afa581d205e57ed4261c183c4d3877cee25794443fde5876417d859c108078ab788d6af7e4fe52eb66b
+ languageName: node
+ linkType: hard
+
+"ansi-styles@npm:^3.2.1":
+ version: 3.2.1
+ resolution: "ansi-styles@npm:3.2.1"
+ dependencies:
+ color-convert: ^1.9.0
+ checksum: d85ade01c10e5dd77b6c89f34ed7531da5830d2cb5882c645f330079975b716438cd7ebb81d0d6e6b4f9c577f19ae41ab55f07f19786b02f9dfd9e0377395665
+ languageName: node
+ linkType: hard
+
+"ansi-styles@npm:^4.0.0, ansi-styles@npm:^4.1.0":
+ version: 4.3.0
+ resolution: "ansi-styles@npm:4.3.0"
+ dependencies:
+ color-convert: ^2.0.1
+ checksum: 513b44c3b2105dd14cc42a19271e80f386466c4be574bccf60b627432f9198571ebf4ab1e4c3ba17347658f4ee1711c163d574248c0c1cdc2d5917a0ad582ec4
+ languageName: node
+ linkType: hard
+
+"argparse@npm:^1.0.7":
+ version: 1.0.10
+ resolution: "argparse@npm:1.0.10"
+ dependencies:
+ sprintf-js: ~1.0.2
+ checksum: 7ca6e45583a28de7258e39e13d81e925cfa25d7d4aacbf806a382d3c02fcb13403a07fb8aeef949f10a7cfe4a62da0e2e807b348a5980554cc28ee573ef95945
+ languageName: node
+ linkType: hard
+
+"aria-query@npm:^4.2.2":
+ version: 4.2.2
+ resolution: "aria-query@npm:4.2.2"
+ dependencies:
+ "@babel/runtime": ^7.10.2
+ "@babel/runtime-corejs3": ^7.10.2
+ checksum: 38401a9a400f26f3dcc24b84997461a16b32869a9893d323602bed8da40a8bcc0243b8d2880e942249a1496cea7a7de769e93d21c0baa439f01e1ee936fed665
+ languageName: node
+ linkType: hard
+
+"array-includes@npm:^3.1.4, array-includes@npm:^3.1.5":
+ version: 3.1.5
+ resolution: "array-includes@npm:3.1.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.4
+ es-abstract: ^1.19.5
+ get-intrinsic: ^1.1.1
+ is-string: ^1.0.7
+ checksum: f6f24d834179604656b7bec3e047251d5cc87e9e87fab7c175c61af48e80e75acd296017abcde21fb52292ab6a2a449ab2ee37213ee48c8709f004d75983f9c5
+ languageName: node
+ linkType: hard
+
+"array-union@npm:^2.1.0":
+ version: 2.1.0
+ resolution: "array-union@npm:2.1.0"
+ checksum: 5bee12395cba82da674931df6d0fea23c4aa4660cb3b338ced9f828782a65caa232573e6bf3968f23e0c5eb301764a382cef2f128b170a9dc59de0e36c39f98d
+ languageName: node
+ linkType: hard
+
+"array.prototype.flat@npm:^1.2.5":
+ version: 1.3.0
+ resolution: "array.prototype.flat@npm:1.3.0"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.2
+ es-shim-unscopables: ^1.0.0
+ checksum: 2a652b3e8dc0bebb6117e42a5ab5738af0203a14c27341d7bb2431467bdb4b348e2c5dc555dfcda8af0a5e4075c400b85311ded73861c87290a71a17c3e0a257
+ languageName: node
+ linkType: hard
+
+"array.prototype.flatmap@npm:^1.3.0":
+ version: 1.3.0
+ resolution: "array.prototype.flatmap@npm:1.3.0"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.2
+ es-shim-unscopables: ^1.0.0
+ checksum: 818538f39409c4045d874be85df0dbd195e1446b14d22f95bdcfefea44ae77db44e42dcd89a559254ec5a7c8b338cfc986cc6d641e3472f9a5326b21eb2976a2
+ languageName: node
+ linkType: hard
+
+"ast-types-flow@npm:^0.0.7":
+ version: 0.0.7
+ resolution: "ast-types-flow@npm:0.0.7"
+ checksum: a26dcc2182ffee111cad7c471759b0bda22d3b7ebacf27c348b22c55f16896b18ab0a4d03b85b4020dce7f3e634b8f00b593888f622915096ea1927fa51866c4
+ languageName: node
+ linkType: hard
+
+"astral-regex@npm:^2.0.0":
+ version: 2.0.0
+ resolution: "astral-regex@npm:2.0.0"
+ checksum: 876231688c66400473ba505731df37ea436e574dd524520294cc3bbc54ea40334865e01fa0d074d74d036ee874ee7e62f486ea38bc421ee8e6a871c06f011766
+ languageName: node
+ linkType: hard
+
+"axe-core@npm:^4.4.3":
+ version: 4.4.3
+ resolution: "axe-core@npm:4.4.3"
+ checksum: c3ea000d9ace3ba0bc747c8feafc24b0de62a0f7d93021d0f77b19c73fca15341843510f6170da563d51535d6cfb7a46c5fc0ea36170549dbb44b170208450a2
+ languageName: node
+ linkType: hard
+
+"axobject-query@npm:^2.2.0":
+ version: 2.2.0
+ resolution: "axobject-query@npm:2.2.0"
+ checksum: 96b8c7d807ca525f41ad9b286186e2089b561ba63a6d36c3e7d73dc08150714660995c7ad19cda05784458446a0793b45246db45894631e13853f48c1aa3117f
+ languageName: node
+ linkType: hard
+
+"balanced-match@npm:^1.0.0":
+ version: 1.0.2
+ resolution: "balanced-match@npm:1.0.2"
+ checksum: 9706c088a283058a8a99e0bf91b0a2f75497f185980d9ffa8b304de1d9e58ebda7c72c07ebf01dadedaac5b2907b2c6f566f660d62bd336c3468e960403b9d65
+ languageName: node
+ linkType: hard
+
+"berry-patch@workspace:.":
+ version: 0.0.0-use.local
+ resolution: "berry-patch@workspace:."
+ dependencies:
+ eslint-config-custom: "*"
+ prettier: latest
+ turbo: latest
+ languageName: unknown
+ linkType: soft
+
+"brace-expansion@npm:^1.1.7":
+ version: 1.1.11
+ resolution: "brace-expansion@npm:1.1.11"
+ dependencies:
+ balanced-match: ^1.0.0
+ concat-map: 0.0.1
+ checksum: faf34a7bb0c3fcf4b59c7808bc5d2a96a40988addf2e7e09dfbb67a2251800e0d14cd2bfc1aa79174f2f5095c54ff27f46fb1289fe2d77dac755b5eb3434cc07
+ languageName: node
+ linkType: hard
+
+"braces@npm:^3.0.2":
+ version: 3.0.2
+ resolution: "braces@npm:3.0.2"
+ dependencies:
+ fill-range: ^7.0.1
+ checksum: e2a8e769a863f3d4ee887b5fe21f63193a891c68b612ddb4b68d82d1b5f3ff9073af066c343e9867a393fe4c2555dcb33e89b937195feb9c1613d259edfcd459
+ languageName: node
+ linkType: hard
+
+"browserslist@npm:^4.21.3":
+ version: 4.21.4
+ resolution: "browserslist@npm:4.21.4"
+ dependencies:
+ caniuse-lite: ^1.0.30001400
+ electron-to-chromium: ^1.4.251
+ node-releases: ^2.0.6
+ update-browserslist-db: ^1.0.9
+ bin:
+ browserslist: cli.js
+ checksum: 4af3793704dbb4615bcd29059ab472344dc7961c8680aa6c4bb84f05340e14038d06a5aead58724eae69455b8fade8b8c69f1638016e87e5578969d74c078b79
+ languageName: node
+ linkType: hard
+
+"call-bind@npm:^1.0.0, call-bind@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "call-bind@npm:1.0.2"
+ dependencies:
+ function-bind: ^1.1.1
+ get-intrinsic: ^1.0.2
+ checksum: f8e31de9d19988a4b80f3e704788c4a2d6b6f3d17cfec4f57dc29ced450c53a49270dc66bf0fbd693329ee948dd33e6c90a329519aef17474a4d961e8d6426b0
+ languageName: node
+ linkType: hard
+
+"callsites@npm:^3.0.0":
+ version: 3.1.0
+ resolution: "callsites@npm:3.1.0"
+ checksum: 072d17b6abb459c2ba96598918b55868af677154bec7e73d222ef95a8fdb9bbf7dae96a8421085cdad8cd190d86653b5b6dc55a4484f2e5b2e27d5e0c3fc15b3
+ languageName: node
+ linkType: hard
+
+"caniuse-lite@npm:^1.0.30001332, caniuse-lite@npm:^1.0.30001400":
+ version: 1.0.30001400
+ resolution: "caniuse-lite@npm:1.0.30001400"
+ checksum: 984e29d3c02fd02a59cc92ef4a5e9390fce250de3791056362347cf901f0d91041246961a57cfa8fed800538d03ee341bc4f7eaed19bf7be0ef8a181d94cd848
+ languageName: node
+ linkType: hard
+
+"chalk@npm:^2.0.0":
+ version: 2.4.2
+ resolution: "chalk@npm:2.4.2"
+ dependencies:
+ ansi-styles: ^3.2.1
+ escape-string-regexp: ^1.0.5
+ supports-color: ^5.3.0
+ checksum: ec3661d38fe77f681200f878edbd9448821924e0f93a9cefc0e26a33b145f1027a2084bf19967160d11e1f03bfe4eaffcabf5493b89098b2782c3fe0b03d80c2
+ languageName: node
+ linkType: hard
+
+"chalk@npm:^4.0.0":
+ version: 4.1.2
+ resolution: "chalk@npm:4.1.2"
+ dependencies:
+ ansi-styles: ^4.1.0
+ supports-color: ^7.1.0
+ checksum: fe75c9d5c76a7a98d45495b91b2172fa3b7a09e0cc9370e5c8feb1c567b85c4288e2b3fded7cfdd7359ac28d6b3844feb8b82b8686842e93d23c827c417e83fc
+ languageName: node
+ linkType: hard
+
+"color-convert@npm:^1.9.0":
+ version: 1.9.3
+ resolution: "color-convert@npm:1.9.3"
+ dependencies:
+ color-name: 1.1.3
+ checksum: fd7a64a17cde98fb923b1dd05c5f2e6f7aefda1b60d67e8d449f9328b4e53b228a428fd38bfeaeb2db2ff6b6503a776a996150b80cdf224062af08a5c8a3a203
+ languageName: node
+ linkType: hard
+
+"color-convert@npm:^2.0.1":
+ version: 2.0.1
+ resolution: "color-convert@npm:2.0.1"
+ dependencies:
+ color-name: ~1.1.4
+ checksum: 79e6bdb9fd479a205c71d89574fccfb22bd9053bd98c6c4d870d65c132e5e904e6034978e55b43d69fcaa7433af2016ee203ce76eeba9cfa554b373e7f7db336
+ languageName: node
+ linkType: hard
+
+"color-name@npm:1.1.3":
+ version: 1.1.3
+ resolution: "color-name@npm:1.1.3"
+ checksum: 09c5d3e33d2105850153b14466501f2bfb30324a2f76568a408763a3b7433b0e50e5b4ab1947868e65cb101bb7cb75029553f2c333b6d4b8138a73fcc133d69d
+ languageName: node
+ linkType: hard
+
+"color-name@npm:~1.1.4":
+ version: 1.1.4
+ resolution: "color-name@npm:1.1.4"
+ checksum: b0445859521eb4021cd0fb0cc1a75cecf67fceecae89b63f62b201cca8d345baf8b952c966862a9d9a2632987d4f6581f0ec8d957dfacece86f0a7919316f610
+ languageName: node
+ linkType: hard
+
+"concat-map@npm:0.0.1":
+ version: 0.0.1
+ resolution: "concat-map@npm:0.0.1"
+ checksum: 902a9f5d8967a3e2faf138d5cb784b9979bad2e6db5357c5b21c568df4ebe62bcb15108af1b2253744844eb964fc023fbd9afbbbb6ddd0bcc204c6fb5b7bf3af
+ languageName: node
+ linkType: hard
+
+"convert-source-map@npm:^1.7.0":
+ version: 1.8.0
+ resolution: "convert-source-map@npm:1.8.0"
+ dependencies:
+ safe-buffer: ~5.1.1
+ checksum: 985d974a2d33e1a2543ada51c93e1ba2f73eaed608dc39f229afc78f71dcc4c8b7d7c684aa647e3c6a3a204027444d69e53e169ce94e8d1fa8d7dee80c9c8fed
+ languageName: node
+ linkType: hard
+
+"core-js-pure@npm:^3.25.1":
+ version: 3.25.1
+ resolution: "core-js-pure@npm:3.25.1"
+ checksum: 0123131ec7ab3a1e56f0b4df4ae659de03d9c245ce281637d4d0f18f9839d8e0cfbfa989bd577ce1b67826f889a7dcc734421f697cf1bbe59f605f29c537a678
+ languageName: node
+ linkType: hard
+
+"cross-spawn@npm:^7.0.2":
+ version: 7.0.3
+ resolution: "cross-spawn@npm:7.0.3"
+ dependencies:
+ path-key: ^3.1.0
+ shebang-command: ^2.0.0
+ which: ^2.0.1
+ checksum: 671cc7c7288c3a8406f3c69a3ae2fc85555c04169e9d611def9a675635472614f1c0ed0ef80955d5b6d4e724f6ced67f0ad1bb006c2ea643488fcfef994d7f52
+ languageName: node
+ linkType: hard
+
+"csstype@npm:^3.0.2":
+ version: 3.1.1
+ resolution: "csstype@npm:3.1.1"
+ checksum: 1f7b4f5fdd955b7444b18ebdddf3f5c699159f13e9cf8ac9027ae4a60ae226aef9bbb14a6e12ca7dba3358b007cee6354b116e720262867c398de6c955ea451d
+ languageName: node
+ linkType: hard
+
+"damerau-levenshtein@npm:^1.0.8":
+ version: 1.0.8
+ resolution: "damerau-levenshtein@npm:1.0.8"
+ checksum: d240b7757544460ae0586a341a53110ab0a61126570ef2d8c731e3eab3f0cb6e488e2609e6a69b46727635de49be20b071688698744417ff1b6c1d7ccd03e0de
+ languageName: node
+ linkType: hard
+
+"debug@npm:^2.6.9":
+ version: 2.6.9
+ resolution: "debug@npm:2.6.9"
+ dependencies:
+ ms: 2.0.0
+ checksum: d2f51589ca66df60bf36e1fa6e4386b318c3f1e06772280eea5b1ae9fd3d05e9c2b7fd8a7d862457d00853c75b00451aa2d7459b924629ee385287a650f58fe6
+ languageName: node
+ linkType: hard
+
+"debug@npm:^3.2.7":
+ version: 3.2.7
+ resolution: "debug@npm:3.2.7"
+ dependencies:
+ ms: ^2.1.1
+ checksum: b3d8c5940799914d30314b7c3304a43305fd0715581a919dacb8b3176d024a782062368405b47491516d2091d6462d4d11f2f4974a405048094f8bfebfa3071c
+ languageName: node
+ linkType: hard
+
+"debug@npm:^4.0.1, debug@npm:^4.1.0, debug@npm:^4.1.1, debug@npm:^4.3.4":
+ version: 4.3.4
+ resolution: "debug@npm:4.3.4"
+ dependencies:
+ ms: 2.1.2
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+ checksum: 3dbad3f94ea64f34431a9cbf0bafb61853eda57bff2880036153438f50fb5a84f27683ba0d8e5426bf41a8c6ff03879488120cf5b3a761e77953169c0600a708
+ languageName: node
+ linkType: hard
+
+"deep-is@npm:^0.1.3":
+ version: 0.1.4
+ resolution: "deep-is@npm:0.1.4"
+ checksum: edb65dd0d7d1b9c40b2f50219aef30e116cedd6fc79290e740972c132c09106d2e80aa0bc8826673dd5a00222d4179c84b36a790eef63a4c4bca75a37ef90804
+ languageName: node
+ linkType: hard
+
+"define-properties@npm:^1.1.3, define-properties@npm:^1.1.4":
+ version: 1.1.4
+ resolution: "define-properties@npm:1.1.4"
+ dependencies:
+ has-property-descriptors: ^1.0.0
+ object-keys: ^1.1.1
+ checksum: ce0aef3f9eb193562b5cfb79b2d2c86b6a109dfc9fdcb5f45d680631a1a908c06824ddcdb72b7573b54e26ace07f0a23420aaba0d5c627b34d2c1de8ef527e2b
+ languageName: node
+ linkType: hard
+
+"dir-glob@npm:^3.0.1":
+ version: 3.0.1
+ resolution: "dir-glob@npm:3.0.1"
+ dependencies:
+ path-type: ^4.0.0
+ checksum: fa05e18324510d7283f55862f3161c6759a3f2f8dbce491a2fc14c8324c498286c54282c1f0e933cb930da8419b30679389499b919122952a4f8592362ef4615
+ languageName: node
+ linkType: hard
+
+"docs@workspace:apps/docs":
+ version: 0.0.0-use.local
+ resolution: "docs@workspace:apps/docs"
+ dependencies:
+ "@babel/core": ^7.0.0
+ "@types/node": ^17.0.12
+ "@types/react": 18.0.17
+ eslint: 7.32.0
+ eslint-config-custom: "*"
+ lodash: ^4.17.21
+ next: 12.2.5
+ next-transpile-modules: 9.0.0
+ react: 18.2.0
+ react-dom: 18.2.0
+ tsconfig: "*"
+ typescript: ^4.5.3
+ ui: "*"
+ languageName: unknown
+ linkType: soft
+
+"doctrine@npm:^2.1.0":
+ version: 2.1.0
+ resolution: "doctrine@npm:2.1.0"
+ dependencies:
+ esutils: ^2.0.2
+ checksum: a45e277f7feaed309fe658ace1ff286c6e2002ac515af0aaf37145b8baa96e49899638c7cd47dccf84c3d32abfc113246625b3ac8f552d1046072adee13b0dc8
+ languageName: node
+ linkType: hard
+
+"doctrine@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "doctrine@npm:3.0.0"
+ dependencies:
+ esutils: ^2.0.2
+ checksum: fd7673ca77fe26cd5cba38d816bc72d641f500f1f9b25b83e8ce28827fe2da7ad583a8da26ab6af85f834138cf8dae9f69b0cd6ab925f52ddab1754db44d99ce
+ languageName: node
+ linkType: hard
+
+"electron-to-chromium@npm:^1.4.251":
+ version: 1.4.251
+ resolution: "electron-to-chromium@npm:1.4.251"
+ checksum: 470a04dfe1d34814f8bc7e1dde606851b6f787a6d78655a57df063844fc71feb64ce793c52a3a130ceac1fc368b8d3e25a4c55c847a1e9c02c3090f9dcbf40ac
+ languageName: node
+ linkType: hard
+
+"emoji-regex@npm:^8.0.0":
+ version: 8.0.0
+ resolution: "emoji-regex@npm:8.0.0"
+ checksum: d4c5c39d5a9868b5fa152f00cada8a936868fd3367f33f71be515ecee4c803132d11b31a6222b2571b1e5f7e13890156a94880345594d0ce7e3c9895f560f192
+ languageName: node
+ linkType: hard
+
+"emoji-regex@npm:^9.2.2":
+ version: 9.2.2
+ resolution: "emoji-regex@npm:9.2.2"
+ checksum: 8487182da74aabd810ac6d6f1994111dfc0e331b01271ae01ec1eb0ad7b5ecc2bbbbd2f053c05cb55a1ac30449527d819bbfbf0e3de1023db308cbcb47f86601
+ languageName: node
+ linkType: hard
+
+"enhanced-resolve@npm:^5.7.0":
+ version: 5.10.0
+ resolution: "enhanced-resolve@npm:5.10.0"
+ dependencies:
+ graceful-fs: ^4.2.4
+ tapable: ^2.2.0
+ checksum: 0bb9830704db271610f900e8d79d70a740ea16f251263362b0c91af545576d09fe50103496606c1300a05e588372d6f9780a9bc2e30ce8ef9b827ec8f44687ff
+ languageName: node
+ linkType: hard
+
+"enquirer@npm:^2.3.5":
+ version: 2.3.6
+ resolution: "enquirer@npm:2.3.6"
+ dependencies:
+ ansi-colors: ^4.1.1
+ checksum: 1c0911e14a6f8d26721c91e01db06092a5f7675159f0261d69c403396a385afd13dd76825e7678f66daffa930cfaa8d45f506fb35f818a2788463d022af1b884
+ languageName: node
+ linkType: hard
+
+"es-abstract@npm:^1.19.0, es-abstract@npm:^1.19.1, es-abstract@npm:^1.19.2, es-abstract@npm:^1.19.5":
+ version: 1.20.2
+ resolution: "es-abstract@npm:1.20.2"
+ dependencies:
+ call-bind: ^1.0.2
+ es-to-primitive: ^1.2.1
+ function-bind: ^1.1.1
+ function.prototype.name: ^1.1.5
+ get-intrinsic: ^1.1.2
+ get-symbol-description: ^1.0.0
+ has: ^1.0.3
+ has-property-descriptors: ^1.0.0
+ has-symbols: ^1.0.3
+ internal-slot: ^1.0.3
+ is-callable: ^1.2.4
+ is-negative-zero: ^2.0.2
+ is-regex: ^1.1.4
+ is-shared-array-buffer: ^1.0.2
+ is-string: ^1.0.7
+ is-weakref: ^1.0.2
+ object-inspect: ^1.12.2
+ object-keys: ^1.1.1
+ object.assign: ^4.1.4
+ regexp.prototype.flags: ^1.4.3
+ string.prototype.trimend: ^1.0.5
+ string.prototype.trimstart: ^1.0.5
+ unbox-primitive: ^1.0.2
+ checksum: ab893dd1f849250f5a2da82656b4e21b511f76429b25a4aea5c8b2a3007ff01cb8e112987d0dd7693b9ad9e6399f8f7be133285d6196a5ebd1b13a4ee2258f70
+ languageName: node
+ linkType: hard
+
+"es-shim-unscopables@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "es-shim-unscopables@npm:1.0.0"
+ dependencies:
+ has: ^1.0.3
+ checksum: 83e95cadbb6ee44d3644dfad60dcad7929edbc42c85e66c3e99aefd68a3a5c5665f2686885cddb47dfeabfd77bd5ea5a7060f2092a955a729bbd8834f0d86fa1
+ languageName: node
+ linkType: hard
+
+"es-to-primitive@npm:^1.2.1":
+ version: 1.2.1
+ resolution: "es-to-primitive@npm:1.2.1"
+ dependencies:
+ is-callable: ^1.1.4
+ is-date-object: ^1.0.1
+ is-symbol: ^1.0.2
+ checksum: 4ead6671a2c1402619bdd77f3503991232ca15e17e46222b0a41a5d81aebc8740a77822f5b3c965008e631153e9ef0580540007744521e72de8e33599fca2eed
+ languageName: node
+ linkType: hard
+
+"escalade@npm:^3.1.1":
+ version: 3.1.1
+ resolution: "escalade@npm:3.1.1"
+ checksum: a3e2a99f07acb74b3ad4989c48ca0c3140f69f923e56d0cba0526240ee470b91010f9d39001f2a4a313841d237ede70a729e92125191ba5d21e74b106800b133
+ languageName: node
+ linkType: hard
+
+"escape-string-regexp@npm:^1.0.5":
+ version: 1.0.5
+ resolution: "escape-string-regexp@npm:1.0.5"
+ checksum: 6092fda75c63b110c706b6a9bfde8a612ad595b628f0bd2147eea1d3406723020810e591effc7db1da91d80a71a737a313567c5abb3813e8d9c71f4aa595b410
+ languageName: node
+ linkType: hard
+
+"escape-string-regexp@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "escape-string-regexp@npm:4.0.0"
+ checksum: 98b48897d93060f2322108bf29db0feba7dd774be96cd069458d1453347b25ce8682ecc39859d4bca2203cc0ab19c237bcc71755eff49a0f8d90beadeeba5cc5
+ languageName: node
+ linkType: hard
+
+"eslint-config-custom@*, eslint-config-custom@workspace:packages/eslint-config-custom":
+ version: 0.0.0-use.local
+ resolution: "eslint-config-custom@workspace:packages/eslint-config-custom"
+ dependencies:
+ eslint: ^7.23.0
+ eslint-config-next: ^12.0.8
+ eslint-config-prettier: ^8.3.0
+ eslint-config-turbo: latest
+ eslint-plugin-react: 7.31.7
+ typescript: ^4.7.4
+ languageName: unknown
+ linkType: soft
+
+"eslint-config-next@npm:^12.0.8":
+ version: 12.3.0
+ resolution: "eslint-config-next@npm:12.3.0"
+ dependencies:
+ "@next/eslint-plugin-next": 12.3.0
+ "@rushstack/eslint-patch": ^1.1.3
+ "@typescript-eslint/parser": ^5.21.0
+ eslint-import-resolver-node: ^0.3.6
+ eslint-import-resolver-typescript: ^2.7.1
+ eslint-plugin-import: ^2.26.0
+ eslint-plugin-jsx-a11y: ^6.5.1
+ eslint-plugin-react: ^7.29.4
+ eslint-plugin-react-hooks: ^4.5.0
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ typescript: ">=3.3.1"
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ checksum: 50a2e43c515350c689cd848973b953c1d058303b84e05ecba5b5bf0f8feffe3935011de3b574ba35d48de8a5d7d5c42567d21d1a17f02189a701edeb6d76a8e0
+ languageName: node
+ linkType: hard
+
+"eslint-config-prettier@npm:^8.3.0":
+ version: 8.5.0
+ resolution: "eslint-config-prettier@npm:8.5.0"
+ peerDependencies:
+ eslint: ">=7.0.0"
+ bin:
+ eslint-config-prettier: bin/cli.js
+ checksum: 0d0f5c32e7a0ad91249467ce71ca92394ccd343178277d318baf32063b79ea90216f4c81d1065d60f96366fdc60f151d4d68ae7811a58bd37228b84c2083f893
+ languageName: node
+ linkType: hard
+
+eslint-config-turbo@latest:
+ version: 0.0.3
+ resolution: "eslint-config-turbo@npm:0.0.3"
+ dependencies:
+ eslint-plugin-turbo: 0.0.3
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ checksum: c92255e91dd0865faeebc857eb3a862e8ca2ccb37fc54ffce93b73cd41e95ad456826ae6634772450dfa9c705b67c288f476e8e413fab3d8194dc271754528e2
+ languageName: node
+ linkType: hard
+
+"eslint-import-resolver-node@npm:^0.3.6":
+ version: 0.3.6
+ resolution: "eslint-import-resolver-node@npm:0.3.6"
+ dependencies:
+ debug: ^3.2.7
+ resolve: ^1.20.0
+ checksum: 6266733af1e112970e855a5bcc2d2058fb5ae16ad2a6d400705a86b29552b36131ffc5581b744c23d550de844206fb55e9193691619ee4dbf225c4bde526b1c8
+ languageName: node
+ linkType: hard
+
+"eslint-import-resolver-typescript@npm:^2.7.1":
+ version: 2.7.1
+ resolution: "eslint-import-resolver-typescript@npm:2.7.1"
+ dependencies:
+ debug: ^4.3.4
+ glob: ^7.2.0
+ is-glob: ^4.0.3
+ resolve: ^1.22.0
+ tsconfig-paths: ^3.14.1
+ peerDependencies:
+ eslint: "*"
+ eslint-plugin-import: "*"
+ checksum: 1d81b657b1f73bf95b8f0b745c0305574b91630c1db340318f3ca8918e206fce20a933b95e7c419338cc4452cb80bb2b2d92acaf01b6aa315c78a332d832545c
+ languageName: node
+ linkType: hard
+
+"eslint-module-utils@npm:^2.7.3":
+ version: 2.7.4
+ resolution: "eslint-module-utils@npm:2.7.4"
+ dependencies:
+ debug: ^3.2.7
+ dependenciesMeta:
+ debug@4.3.4:
+ unplugged: true
+ peerDependenciesMeta:
+ eslint:
+ optional: true
+ checksum: 5da13645daff145a5c922896b258f8bba560722c3767254e458d894ff5fbb505d6dfd945bffa932a5b0ae06714da2379bd41011c4c20d2d59cc83e23895360f7
+ languageName: node
+ linkType: hard
+
+"eslint-plugin-import@npm:^2.26.0":
+ version: 2.26.0
+ resolution: "eslint-plugin-import@npm:2.26.0"
+ dependencies:
+ array-includes: ^3.1.4
+ array.prototype.flat: ^1.2.5
+ debug: ^2.6.9
+ doctrine: ^2.1.0
+ eslint-import-resolver-node: ^0.3.6
+ eslint-module-utils: ^2.7.3
+ has: ^1.0.3
+ is-core-module: ^2.8.1
+ is-glob: ^4.0.3
+ minimatch: ^3.1.2
+ object.values: ^1.1.5
+ resolve: ^1.22.0
+ tsconfig-paths: ^3.14.1
+ peerDependencies:
+ eslint: ^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8
+ checksum: 0bf77ad80339554481eafa2b1967449e1f816b94c7a6f9614ce33fb4083c4e6c050f10d241dd50b4975d47922880a34de1e42ea9d8e6fd663ebb768baa67e655
+ languageName: node
+ linkType: hard
+
+"eslint-plugin-jsx-a11y@npm:^6.5.1":
+ version: 6.6.1
+ resolution: "eslint-plugin-jsx-a11y@npm:6.6.1"
+ dependencies:
+ "@babel/runtime": ^7.18.9
+ aria-query: ^4.2.2
+ array-includes: ^3.1.5
+ ast-types-flow: ^0.0.7
+ axe-core: ^4.4.3
+ axobject-query: ^2.2.0
+ damerau-levenshtein: ^1.0.8
+ emoji-regex: ^9.2.2
+ has: ^1.0.3
+ jsx-ast-utils: ^3.3.2
+ language-tags: ^1.0.5
+ minimatch: ^3.1.2
+ semver: ^6.3.0
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8
+ checksum: baae7377f0e25a0cc9b34dc333a3dc6ead9ee8365e445451eff554c3ca267a0a6cb88127fe90395c578ab1b92cfed246aef7dc8d2b48b603389e10181799e144
+ languageName: node
+ linkType: hard
+
+"eslint-plugin-react-hooks@npm:^4.5.0":
+ version: 4.6.0
+ resolution: "eslint-plugin-react-hooks@npm:4.6.0"
+ peerDependencies:
+ eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0
+ checksum: 23001801f14c1d16bf0a837ca7970d9dd94e7b560384b41db378b49b6e32dc43d6e2790de1bd737a652a86f81a08d6a91f402525061b47719328f586a57e86c3
+ languageName: node
+ linkType: hard
+
+"eslint-plugin-react@npm:7.31.7":
+ version: 7.31.7
+ resolution: "eslint-plugin-react@npm:7.31.7"
+ dependencies:
+ array-includes: ^3.1.5
+ array.prototype.flatmap: ^1.3.0
+ doctrine: ^2.1.0
+ estraverse: ^5.3.0
+ jsx-ast-utils: ^2.4.1 || ^3.0.0
+ minimatch: ^3.1.2
+ object.entries: ^1.1.5
+ object.fromentries: ^2.0.5
+ object.hasown: ^1.1.1
+ object.values: ^1.1.5
+ prop-types: ^15.8.1
+ resolve: ^2.0.0-next.3
+ semver: ^6.3.0
+ string.prototype.matchall: ^4.0.7
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8
+ checksum: 582d422f531d7d3894fc09ac941ef8b6ad595782cfca5e1d52af5895ce117def7a0ff8afeea0166bff7b6ceae8baec2313614b1571754f539575cfa9351cd2da
+ languageName: node
+ linkType: hard
+
+"eslint-plugin-react@npm:^7.29.4":
+ version: 7.31.8
+ resolution: "eslint-plugin-react@npm:7.31.8"
+ dependencies:
+ array-includes: ^3.1.5
+ array.prototype.flatmap: ^1.3.0
+ doctrine: ^2.1.0
+ estraverse: ^5.3.0
+ jsx-ast-utils: ^2.4.1 || ^3.0.0
+ minimatch: ^3.1.2
+ object.entries: ^1.1.5
+ object.fromentries: ^2.0.5
+ object.hasown: ^1.1.1
+ object.values: ^1.1.5
+ prop-types: ^15.8.1
+ resolve: ^2.0.0-next.3
+ semver: ^6.3.0
+ string.prototype.matchall: ^4.0.7
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8
+ checksum: 0683e2a624a4df6f08264a3f6bc614a81e8f961c83173bdf2d8d3523f84ed5d234cddc976dbc6815913e007c5984df742ba61be0c0592b27c3daabe0f68165a3
+ languageName: node
+ linkType: hard
+
+"eslint-plugin-turbo@npm:0.0.3":
+ version: 0.0.3
+ resolution: "eslint-plugin-turbo@npm:0.0.3"
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ checksum: 18e2b13ede03eee7635d0c67ca792cf46483e90443143bdc06555bf231045fb5f70b2f6f1d67492365b7fe47620408eea22f7548879f3afcb07ccc070aec5c15
+ languageName: node
+ linkType: hard
+
+"eslint-scope@npm:^5.1.1":
+ version: 5.1.1
+ resolution: "eslint-scope@npm:5.1.1"
+ dependencies:
+ esrecurse: ^4.3.0
+ estraverse: ^4.1.1
+ checksum: 47e4b6a3f0cc29c7feedee6c67b225a2da7e155802c6ea13bbef4ac6b9e10c66cd2dcb987867ef176292bf4e64eccc680a49e35e9e9c669f4a02bac17e86abdb
+ languageName: node
+ linkType: hard
+
+"eslint-utils@npm:^2.1.0":
+ version: 2.1.0
+ resolution: "eslint-utils@npm:2.1.0"
+ dependencies:
+ eslint-visitor-keys: ^1.1.0
+ checksum: 27500938f348da42100d9e6ad03ae29b3de19ba757ae1a7f4a087bdcf83ac60949bbb54286492ca61fac1f5f3ac8692dd21537ce6214240bf95ad0122f24d71d
+ languageName: node
+ linkType: hard
+
+"eslint-visitor-keys@npm:^1.1.0, eslint-visitor-keys@npm:^1.3.0":
+ version: 1.3.0
+ resolution: "eslint-visitor-keys@npm:1.3.0"
+ checksum: 37a19b712f42f4c9027e8ba98c2b06031c17e0c0a4c696cd429bd9ee04eb43889c446f2cd545e1ff51bef9593fcec94ecd2c2ef89129fcbbf3adadbef520376a
+ languageName: node
+ linkType: hard
+
+"eslint-visitor-keys@npm:^2.0.0":
+ version: 2.1.0
+ resolution: "eslint-visitor-keys@npm:2.1.0"
+ checksum: e3081d7dd2611a35f0388bbdc2f5da60b3a3c5b8b6e928daffff7391146b434d691577aa95064c8b7faad0b8a680266bcda0a42439c18c717b80e6718d7e267d
+ languageName: node
+ linkType: hard
+
+"eslint-visitor-keys@npm:^3.3.0":
+ version: 3.3.0
+ resolution: "eslint-visitor-keys@npm:3.3.0"
+ checksum: d59e68a7c5a6d0146526b0eec16ce87fbf97fe46b8281e0d41384224375c4e52f5ffb9e16d48f4ea50785cde93f766b0c898e31ab89978d88b0e1720fbfb7808
+ languageName: node
+ linkType: hard
+
+"eslint@npm:7.32.0, eslint@npm:^7.23.0, eslint@npm:^7.32.0":
+ version: 7.32.0
+ resolution: "eslint@npm:7.32.0"
+ dependencies:
+ "@babel/code-frame": 7.12.11
+ "@eslint/eslintrc": ^0.4.3
+ "@humanwhocodes/config-array": ^0.5.0
+ ajv: ^6.10.0
+ chalk: ^4.0.0
+ cross-spawn: ^7.0.2
+ debug: ^4.0.1
+ doctrine: ^3.0.0
+ enquirer: ^2.3.5
+ escape-string-regexp: ^4.0.0
+ eslint-scope: ^5.1.1
+ eslint-utils: ^2.1.0
+ eslint-visitor-keys: ^2.0.0
+ espree: ^7.3.1
+ esquery: ^1.4.0
+ esutils: ^2.0.2
+ fast-deep-equal: ^3.1.3
+ file-entry-cache: ^6.0.1
+ functional-red-black-tree: ^1.0.1
+ glob-parent: ^5.1.2
+ globals: ^13.6.0
+ ignore: ^4.0.6
+ import-fresh: ^3.0.0
+ imurmurhash: ^0.1.4
+ is-glob: ^4.0.0
+ js-yaml: ^3.13.1
+ json-stable-stringify-without-jsonify: ^1.0.1
+ levn: ^0.4.1
+ lodash.merge: ^4.6.2
+ minimatch: ^3.0.4
+ natural-compare: ^1.4.0
+ optionator: ^0.9.1
+ progress: ^2.0.0
+ regexpp: ^3.1.0
+ semver: ^7.2.1
+ strip-ansi: ^6.0.0
+ strip-json-comments: ^3.1.0
+ table: ^6.0.9
+ text-table: ^0.2.0
+ v8-compile-cache: ^2.0.3
+ bin:
+ eslint: bin/eslint.js
+ checksum: cc85af9985a3a11085c011f3d27abe8111006d34cc274291b3c4d7bea51a4e2ff6135780249becd919ba7f6d6d1ecc38a6b73dacb6a7be08d38453b344dc8d37
+ languageName: node
+ linkType: hard
+
+"espree@npm:^7.3.0, espree@npm:^7.3.1":
+ version: 7.3.1
+ resolution: "espree@npm:7.3.1"
+ dependencies:
+ acorn: ^7.4.0
+ acorn-jsx: ^5.3.1
+ eslint-visitor-keys: ^1.3.0
+ checksum: aa9b50dcce883449af2e23bc2b8d9abb77118f96f4cb313935d6b220f77137eaef7724a83c3f6243b96bc0e4ab14766198e60818caad99f9519ae5a336a39b45
+ languageName: node
+ linkType: hard
+
+"esprima@npm:^4.0.0":
+ version: 4.0.1
+ resolution: "esprima@npm:4.0.1"
+ bin:
+ esparse: ./bin/esparse.js
+ esvalidate: ./bin/esvalidate.js
+ checksum: b45bc805a613dbea2835278c306b91aff6173c8d034223fa81498c77dcbce3b2931bf6006db816f62eacd9fd4ea975dfd85a5b7f3c6402cfd050d4ca3c13a628
+ languageName: node
+ linkType: hard
+
+"esquery@npm:^1.4.0":
+ version: 1.4.0
+ resolution: "esquery@npm:1.4.0"
+ dependencies:
+ estraverse: ^5.1.0
+ checksum: a0807e17abd7fbe5fbd4fab673038d6d8a50675cdae6b04fbaa520c34581be0c5fa24582990e8acd8854f671dd291c78bb2efb9e0ed5b62f33bac4f9cf820210
+ languageName: node
+ linkType: hard
+
+"esrecurse@npm:^4.3.0":
+ version: 4.3.0
+ resolution: "esrecurse@npm:4.3.0"
+ dependencies:
+ estraverse: ^5.2.0
+ checksum: ebc17b1a33c51cef46fdc28b958994b1dc43cd2e86237515cbc3b4e5d2be6a811b2315d0a1a4d9d340b6d2308b15322f5c8291059521cc5f4802f65e7ec32837
+ languageName: node
+ linkType: hard
+
+"estraverse@npm:^4.1.1":
+ version: 4.3.0
+ resolution: "estraverse@npm:4.3.0"
+ checksum: a6299491f9940bb246124a8d44b7b7a413a8336f5436f9837aaa9330209bd9ee8af7e91a654a3545aee9c54b3308e78ee360cef1d777d37cfef77d2fa33b5827
+ languageName: node
+ linkType: hard
+
+"estraverse@npm:^5.1.0, estraverse@npm:^5.2.0, estraverse@npm:^5.3.0":
+ version: 5.3.0
+ resolution: "estraverse@npm:5.3.0"
+ checksum: 072780882dc8416ad144f8fe199628d2b3e7bbc9989d9ed43795d2c90309a2047e6bc5979d7e2322a341163d22cfad9e21f4110597fe487519697389497e4e2b
+ languageName: node
+ linkType: hard
+
+"esutils@npm:^2.0.2":
+ version: 2.0.3
+ resolution: "esutils@npm:2.0.3"
+ checksum: 22b5b08f74737379a840b8ed2036a5fb35826c709ab000683b092d9054e5c2a82c27818f12604bfc2a9a76b90b6834ef081edbc1c7ae30d1627012e067c6ec87
+ languageName: node
+ linkType: hard
+
+"fast-deep-equal@npm:^3.1.1, fast-deep-equal@npm:^3.1.3":
+ version: 3.1.3
+ resolution: "fast-deep-equal@npm:3.1.3"
+ checksum: e21a9d8d84f53493b6aa15efc9cfd53dd5b714a1f23f67fb5dc8f574af80df889b3bce25dc081887c6d25457cce704e636395333abad896ccdec03abaf1f3f9d
+ languageName: node
+ linkType: hard
+
+"fast-glob@npm:^3.2.9":
+ version: 3.2.12
+ resolution: "fast-glob@npm:3.2.12"
+ dependencies:
+ "@nodelib/fs.stat": ^2.0.2
+ "@nodelib/fs.walk": ^1.2.3
+ glob-parent: ^5.1.2
+ merge2: ^1.3.0
+ micromatch: ^4.0.4
+ checksum: 0b1990f6ce831c7e28c4d505edcdaad8e27e88ab9fa65eedadb730438cfc7cde4910d6c975d6b7b8dc8a73da4773702ebcfcd6e3518e73938bb1383badfe01c2
+ languageName: node
+ linkType: hard
+
+"fast-json-stable-stringify@npm:^2.0.0":
+ version: 2.1.0
+ resolution: "fast-json-stable-stringify@npm:2.1.0"
+ checksum: b191531e36c607977e5b1c47811158733c34ccb3bfde92c44798929e9b4154884378536d26ad90dfecd32e1ffc09c545d23535ad91b3161a27ddbb8ebe0cbecb
+ languageName: node
+ linkType: hard
+
+"fast-levenshtein@npm:^2.0.6":
+ version: 2.0.6
+ resolution: "fast-levenshtein@npm:2.0.6"
+ checksum: 92cfec0a8dfafd9c7a15fba8f2cc29cd0b62b85f056d99ce448bbcd9f708e18ab2764bda4dd5158364f4145a7c72788538994f0d1787b956ef0d1062b0f7c24c
+ languageName: node
+ linkType: hard
+
+"fastq@npm:^1.6.0":
+ version: 1.13.0
+ resolution: "fastq@npm:1.13.0"
+ dependencies:
+ reusify: ^1.0.4
+ checksum: 32cf15c29afe622af187d12fc9cd93e160a0cb7c31a3bb6ace86b7dea3b28e7b72acde89c882663f307b2184e14782c6c664fa315973c03626c7d4bff070bb0b
+ languageName: node
+ linkType: hard
+
+"file-entry-cache@npm:^6.0.1":
+ version: 6.0.1
+ resolution: "file-entry-cache@npm:6.0.1"
+ dependencies:
+ flat-cache: ^3.0.4
+ checksum: f49701feaa6314c8127c3c2f6173cfefff17612f5ed2daaafc6da13b5c91fd43e3b2a58fd0d63f9f94478a501b167615931e7200e31485e320f74a33885a9c74
+ languageName: node
+ linkType: hard
+
+"fill-range@npm:^7.0.1":
+ version: 7.0.1
+ resolution: "fill-range@npm:7.0.1"
+ dependencies:
+ to-regex-range: ^5.0.1
+ checksum: cc283f4e65b504259e64fd969bcf4def4eb08d85565e906b7d36516e87819db52029a76b6363d0f02d0d532f0033c9603b9e2d943d56ee3b0d4f7ad3328ff917
+ languageName: node
+ linkType: hard
+
+"flat-cache@npm:^3.0.4":
+ version: 3.0.4
+ resolution: "flat-cache@npm:3.0.4"
+ dependencies:
+ flatted: ^3.1.0
+ rimraf: ^3.0.2
+ checksum: 4fdd10ecbcbf7d520f9040dd1340eb5dfe951e6f0ecf2252edeec03ee68d989ec8b9a20f4434270e71bcfd57800dc09b3344fca3966b2eb8f613072c7d9a2365
+ languageName: node
+ linkType: hard
+
+"flatted@npm:^3.1.0":
+ version: 3.2.7
+ resolution: "flatted@npm:3.2.7"
+ checksum: 427633049d55bdb80201c68f7eb1cbd533e03eac541f97d3aecab8c5526f12a20ccecaeede08b57503e772c769e7f8680b37e8d482d1e5f8d7e2194687f9ea35
+ languageName: node
+ linkType: hard
+
+"fs.realpath@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "fs.realpath@npm:1.0.0"
+ checksum: 99ddea01a7e75aa276c250a04eedeffe5662bce66c65c07164ad6264f9de18fb21be9433ead460e54cff20e31721c811f4fb5d70591799df5f85dce6d6746fd0
+ languageName: node
+ linkType: hard
+
+"function-bind@npm:^1.1.1":
+ version: 1.1.1
+ resolution: "function-bind@npm:1.1.1"
+ checksum: b32fbaebb3f8ec4969f033073b43f5c8befbb58f1a79e12f1d7490358150359ebd92f49e72ff0144f65f2c48ea2a605bff2d07965f548f6474fd8efd95bf361a
+ languageName: node
+ linkType: hard
+
+"function.prototype.name@npm:^1.1.5":
+ version: 1.1.5
+ resolution: "function.prototype.name@npm:1.1.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.0
+ functions-have-names: ^1.2.2
+ checksum: acd21d733a9b649c2c442f067567743214af5fa248dbeee69d8278ce7df3329ea5abac572be9f7470b4ec1cd4d8f1040e3c5caccf98ebf2bf861a0deab735c27
+ languageName: node
+ linkType: hard
+
+"functional-red-black-tree@npm:^1.0.1":
+ version: 1.0.1
+ resolution: "functional-red-black-tree@npm:1.0.1"
+ checksum: ca6c170f37640e2d94297da8bb4bf27a1d12bea3e00e6a3e007fd7aa32e37e000f5772acf941b4e4f3cf1c95c3752033d0c509af157ad8f526e7f00723b9eb9f
+ languageName: node
+ linkType: hard
+
+"functions-have-names@npm:^1.2.2":
+ version: 1.2.3
+ resolution: "functions-have-names@npm:1.2.3"
+ checksum: c3f1f5ba20f4e962efb71344ce0a40722163e85bee2101ce25f88214e78182d2d2476aa85ef37950c579eb6cf6ee811c17b3101bb84004bb75655f3e33f3fdb5
+ languageName: node
+ linkType: hard
+
+"gensync@npm:^1.0.0-beta.2":
+ version: 1.0.0-beta.2
+ resolution: "gensync@npm:1.0.0-beta.2"
+ checksum: a7437e58c6be12aa6c90f7730eac7fa9833dc78872b4ad2963d2031b00a3367a93f98aec75f9aaac7220848e4026d67a8655e870b24f20a543d103c0d65952ec
+ languageName: node
+ linkType: hard
+
+"get-intrinsic@npm:^1.0.2, get-intrinsic@npm:^1.1.0, get-intrinsic@npm:^1.1.1, get-intrinsic@npm:^1.1.2":
+ version: 1.1.3
+ resolution: "get-intrinsic@npm:1.1.3"
+ dependencies:
+ function-bind: ^1.1.1
+ has: ^1.0.3
+ has-symbols: ^1.0.3
+ checksum: 152d79e87251d536cf880ba75cfc3d6c6c50e12b3a64e1ea960e73a3752b47c69f46034456eae1b0894359ce3bc64c55c186f2811f8a788b75b638b06fab228a
+ languageName: node
+ linkType: hard
+
+"get-symbol-description@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "get-symbol-description@npm:1.0.0"
+ dependencies:
+ call-bind: ^1.0.2
+ get-intrinsic: ^1.1.1
+ checksum: 9ceff8fe968f9270a37a1f73bf3f1f7bda69ca80f4f80850670e0e7b9444ff99323f7ac52f96567f8b5f5fbe7ac717a0d81d3407c7313e82810c6199446a5247
+ languageName: node
+ linkType: hard
+
+"glob-parent@npm:^5.1.2":
+ version: 5.1.2
+ resolution: "glob-parent@npm:5.1.2"
+ dependencies:
+ is-glob: ^4.0.1
+ checksum: f4f2bfe2425296e8a47e36864e4f42be38a996db40420fe434565e4480e3322f18eb37589617a98640c5dc8fdec1a387007ee18dbb1f3f5553409c34d17f425e
+ languageName: node
+ linkType: hard
+
+"glob@npm:7.1.7":
+ version: 7.1.7
+ resolution: "glob@npm:7.1.7"
+ dependencies:
+ fs.realpath: ^1.0.0
+ inflight: ^1.0.4
+ inherits: 2
+ minimatch: ^3.0.4
+ once: ^1.3.0
+ path-is-absolute: ^1.0.0
+ checksum: b61f48973bbdcf5159997b0874a2165db572b368b931135832599875919c237fc05c12984e38fe828e69aa8a921eb0e8a4997266211c517c9cfaae8a93988bb8
+ languageName: node
+ linkType: hard
+
+"glob@npm:^7.1.3, glob@npm:^7.2.0":
+ version: 7.2.3
+ resolution: "glob@npm:7.2.3"
+ dependencies:
+ fs.realpath: ^1.0.0
+ inflight: ^1.0.4
+ inherits: 2
+ minimatch: ^3.1.1
+ once: ^1.3.0
+ path-is-absolute: ^1.0.0
+ checksum: 29452e97b38fa704dabb1d1045350fb2467cf0277e155aa9ff7077e90ad81d1ea9d53d3ee63bd37c05b09a065e90f16aec4a65f5b8de401d1dac40bc5605d133
+ languageName: node
+ linkType: hard
+
+"globals@npm:^11.1.0":
+ version: 11.12.0
+ resolution: "globals@npm:11.12.0"
+ checksum: 67051a45eca3db904aee189dfc7cd53c20c7d881679c93f6146ddd4c9f4ab2268e68a919df740d39c71f4445d2b38ee360fc234428baea1dbdfe68bbcb46979e
+ languageName: node
+ linkType: hard
+
+"globals@npm:^13.6.0, globals@npm:^13.9.0":
+ version: 13.17.0
+ resolution: "globals@npm:13.17.0"
+ dependencies:
+ type-fest: ^0.20.2
+ checksum: fbaf4112e59b92c9f5575e85ce65e9e17c0b82711196ec5f58beb08599bbd92fd72703d6dfc9b080381fd35b644e1b11dcf25b38cc2341ec21df942594cbc8ce
+ languageName: node
+ linkType: hard
+
+"globby@npm:^11.1.0":
+ version: 11.1.0
+ resolution: "globby@npm:11.1.0"
+ dependencies:
+ array-union: ^2.1.0
+ dir-glob: ^3.0.1
+ fast-glob: ^3.2.9
+ ignore: ^5.2.0
+ merge2: ^1.4.1
+ slash: ^3.0.0
+ checksum: b4be8885e0cfa018fc783792942d53926c35c50b3aefd3fdcfb9d22c627639dc26bd2327a40a0b74b074100ce95bb7187bfeae2f236856aa3de183af7a02aea6
+ languageName: node
+ linkType: hard
+
+"graceful-fs@npm:^4.2.4":
+ version: 4.2.10
+ resolution: "graceful-fs@npm:4.2.10"
+ checksum: 3f109d70ae123951905d85032ebeae3c2a5a7a997430df00ea30df0e3a6c60cf6689b109654d6fdacd28810a053348c4d14642da1d075049e6be1ba5216218da
+ languageName: node
+ linkType: hard
+
+"has-bigints@npm:^1.0.1, has-bigints@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "has-bigints@npm:1.0.2"
+ checksum: 390e31e7be7e5c6fe68b81babb73dfc35d413604d7ee5f56da101417027a4b4ce6a27e46eff97ad040c835b5d228676eae99a9b5c3bc0e23c8e81a49241ff45b
+ languageName: node
+ linkType: hard
+
+"has-flag@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "has-flag@npm:3.0.0"
+ checksum: 4a15638b454bf086c8148979aae044dd6e39d63904cd452d970374fa6a87623423da485dfb814e7be882e05c096a7ccf1ebd48e7e7501d0208d8384ff4dea73b
+ languageName: node
+ linkType: hard
+
+"has-flag@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "has-flag@npm:4.0.0"
+ checksum: 261a1357037ead75e338156b1f9452c016a37dcd3283a972a30d9e4a87441ba372c8b81f818cd0fbcd9c0354b4ae7e18b9e1afa1971164aef6d18c2b6095a8ad
+ languageName: node
+ linkType: hard
+
+"has-property-descriptors@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "has-property-descriptors@npm:1.0.0"
+ dependencies:
+ get-intrinsic: ^1.1.1
+ checksum: a6d3f0a266d0294d972e354782e872e2fe1b6495b321e6ef678c9b7a06a40408a6891817350c62e752adced73a94ac903c54734fee05bf65b1905ee1368194bb
+ languageName: node
+ linkType: hard
+
+"has-symbols@npm:^1.0.2, has-symbols@npm:^1.0.3":
+ version: 1.0.3
+ resolution: "has-symbols@npm:1.0.3"
+ checksum: a054c40c631c0d5741a8285010a0777ea0c068f99ed43e5d6eb12972da223f8af553a455132fdb0801bdcfa0e0f443c0c03a68d8555aa529b3144b446c3f2410
+ languageName: node
+ linkType: hard
+
+"has-tostringtag@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "has-tostringtag@npm:1.0.0"
+ dependencies:
+ has-symbols: ^1.0.2
+ checksum: cc12eb28cb6ae22369ebaad3a8ab0799ed61270991be88f208d508076a1e99abe4198c965935ce85ea90b60c94ddda73693b0920b58e7ead048b4a391b502c1c
+ languageName: node
+ linkType: hard
+
+"has@npm:^1.0.3":
+ version: 1.0.3
+ resolution: "has@npm:1.0.3"
+ dependencies:
+ function-bind: ^1.1.1
+ checksum: b9ad53d53be4af90ce5d1c38331e712522417d017d5ef1ebd0507e07c2fbad8686fffb8e12ddecd4c39ca9b9b47431afbb975b8abf7f3c3b82c98e9aad052792
+ languageName: node
+ linkType: hard
+
+"ignore@npm:^4.0.6":
+ version: 4.0.6
+ resolution: "ignore@npm:4.0.6"
+ checksum: 248f82e50a430906f9ee7f35e1158e3ec4c3971451dd9f99c9bc1548261b4db2b99709f60ac6c6cac9333494384176cc4cc9b07acbe42d52ac6a09cad734d800
+ languageName: node
+ linkType: hard
+
+"ignore@npm:^5.2.0":
+ version: 5.2.0
+ resolution: "ignore@npm:5.2.0"
+ checksum: 6b1f926792d614f64c6c83da3a1f9c83f6196c2839aa41e1e32dd7b8d174cef2e329d75caabb62cb61ce9dc432f75e67d07d122a037312db7caa73166a1bdb77
+ languageName: node
+ linkType: hard
+
+"import-fresh@npm:^3.0.0, import-fresh@npm:^3.2.1":
+ version: 3.3.0
+ resolution: "import-fresh@npm:3.3.0"
+ dependencies:
+ parent-module: ^1.0.0
+ resolve-from: ^4.0.0
+ checksum: 2cacfad06e652b1edc50be650f7ec3be08c5e5a6f6d12d035c440a42a8cc028e60a5b99ca08a77ab4d6b1346da7d971915828f33cdab730d3d42f08242d09baa
+ languageName: node
+ linkType: hard
+
+"imurmurhash@npm:^0.1.4":
+ version: 0.1.4
+ resolution: "imurmurhash@npm:0.1.4"
+ checksum: 7cae75c8cd9a50f57dadd77482359f659eaebac0319dd9368bcd1714f55e65badd6929ca58569da2b6494ef13fdd5598cd700b1eba23f8b79c5f19d195a3ecf7
+ languageName: node
+ linkType: hard
+
+"inflight@npm:^1.0.4":
+ version: 1.0.6
+ resolution: "inflight@npm:1.0.6"
+ dependencies:
+ once: ^1.3.0
+ wrappy: 1
+ checksum: f4f76aa072ce19fae87ce1ef7d221e709afb59d445e05d47fba710e85470923a75de35bfae47da6de1b18afc3ce83d70facf44cfb0aff89f0a3f45c0a0244dfd
+ languageName: node
+ linkType: hard
+
+"inherits@npm:2":
+ version: 2.0.4
+ resolution: "inherits@npm:2.0.4"
+ checksum: 4a48a733847879d6cf6691860a6b1e3f0f4754176e4d71494c41f3475553768b10f84b5ce1d40fbd0e34e6bfbb864ee35858ad4dd2cf31e02fc4a154b724d7f1
+ languageName: node
+ linkType: hard
+
+"internal-slot@npm:^1.0.3":
+ version: 1.0.3
+ resolution: "internal-slot@npm:1.0.3"
+ dependencies:
+ get-intrinsic: ^1.1.0
+ has: ^1.0.3
+ side-channel: ^1.0.4
+ checksum: 1944f92e981e47aebc98a88ff0db579fd90543d937806104d0b96557b10c1f170c51fb777b97740a8b6ddeec585fca8c39ae99fd08a8e058dfc8ab70937238bf
+ languageName: node
+ linkType: hard
+
+"is-bigint@npm:^1.0.1":
+ version: 1.0.4
+ resolution: "is-bigint@npm:1.0.4"
+ dependencies:
+ has-bigints: ^1.0.1
+ checksum: c56edfe09b1154f8668e53ebe8252b6f185ee852a50f9b41e8d921cb2bed425652049fbe438723f6cb48a63ca1aa051e948e7e401e093477c99c84eba244f666
+ languageName: node
+ linkType: hard
+
+"is-boolean-object@npm:^1.1.0":
+ version: 1.1.2
+ resolution: "is-boolean-object@npm:1.1.2"
+ dependencies:
+ call-bind: ^1.0.2
+ has-tostringtag: ^1.0.0
+ checksum: c03b23dbaacadc18940defb12c1c0e3aaece7553ef58b162a0f6bba0c2a7e1551b59f365b91e00d2dbac0522392d576ef322628cb1d036a0fe51eb466db67222
+ languageName: node
+ linkType: hard
+
+"is-callable@npm:^1.1.4, is-callable@npm:^1.2.4":
+ version: 1.2.6
+ resolution: "is-callable@npm:1.2.6"
+ checksum: 7667d6a6be66df00741cfa18c657877c46a00139ea7ea7765251e9db0182745c9ee173506941a329d6914e34e59e9cc80029fb3f68bbf8c22a6c155ee6ea77b3
+ languageName: node
+ linkType: hard
+
+"is-core-module@npm:^2.8.1, is-core-module@npm:^2.9.0":
+ version: 2.10.0
+ resolution: "is-core-module@npm:2.10.0"
+ dependencies:
+ has: ^1.0.3
+ checksum: 0f3f77811f430af3256fa7bbc806f9639534b140f8ee69476f632c3e1eb4e28a38be0b9d1b8ecf596179c841b53576129279df95e7051d694dac4ceb6f967593
+ languageName: node
+ linkType: hard
+
+"is-date-object@npm:^1.0.1":
+ version: 1.0.5
+ resolution: "is-date-object@npm:1.0.5"
+ dependencies:
+ has-tostringtag: ^1.0.0
+ checksum: baa9077cdf15eb7b58c79398604ca57379b2fc4cf9aa7a9b9e295278648f628c9b201400c01c5e0f7afae56507d741185730307cbe7cad3b9f90a77e5ee342fc
+ languageName: node
+ linkType: hard
+
+"is-extglob@npm:^2.1.1":
+ version: 2.1.1
+ resolution: "is-extglob@npm:2.1.1"
+ checksum: df033653d06d0eb567461e58a7a8c9f940bd8c22274b94bf7671ab36df5719791aae15eef6d83bbb5e23283967f2f984b8914559d4449efda578c775c4be6f85
+ languageName: node
+ linkType: hard
+
+"is-fullwidth-code-point@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "is-fullwidth-code-point@npm:3.0.0"
+ checksum: 44a30c29457c7fb8f00297bce733f0a64cd22eca270f83e58c105e0d015e45c019491a4ab2faef91ab51d4738c670daff901c799f6a700e27f7314029e99e348
+ languageName: node
+ linkType: hard
+
+"is-glob@npm:^4.0.0, is-glob@npm:^4.0.1, is-glob@npm:^4.0.3":
+ version: 4.0.3
+ resolution: "is-glob@npm:4.0.3"
+ dependencies:
+ is-extglob: ^2.1.1
+ checksum: d381c1319fcb69d341cc6e6c7cd588e17cd94722d9a32dbd60660b993c4fb7d0f19438674e68dfec686d09b7c73139c9166b47597f846af387450224a8101ab4
+ languageName: node
+ linkType: hard
+
+"is-negative-zero@npm:^2.0.2":
+ version: 2.0.2
+ resolution: "is-negative-zero@npm:2.0.2"
+ checksum: f3232194c47a549da60c3d509c9a09be442507616b69454716692e37ae9f37c4dea264fb208ad0c9f3efd15a796a46b79df07c7e53c6227c32170608b809149a
+ languageName: node
+ linkType: hard
+
+"is-number-object@npm:^1.0.4":
+ version: 1.0.7
+ resolution: "is-number-object@npm:1.0.7"
+ dependencies:
+ has-tostringtag: ^1.0.0
+ checksum: d1e8d01bb0a7134c74649c4e62da0c6118a0bfc6771ea3c560914d52a627873e6920dd0fd0ebc0e12ad2ff4687eac4c308f7e80320b973b2c8a2c8f97a7524f7
+ languageName: node
+ linkType: hard
+
+"is-number@npm:^7.0.0":
+ version: 7.0.0
+ resolution: "is-number@npm:7.0.0"
+ checksum: 456ac6f8e0f3111ed34668a624e45315201dff921e5ac181f8ec24923b99e9f32ca1a194912dc79d539c97d33dba17dc635202ff0b2cf98326f608323276d27a
+ languageName: node
+ linkType: hard
+
+"is-regex@npm:^1.1.4":
+ version: 1.1.4
+ resolution: "is-regex@npm:1.1.4"
+ dependencies:
+ call-bind: ^1.0.2
+ has-tostringtag: ^1.0.0
+ checksum: 362399b33535bc8f386d96c45c9feb04cf7f8b41c182f54174c1a45c9abbbe5e31290bbad09a458583ff6bf3b2048672cdb1881b13289569a7c548370856a652
+ languageName: node
+ linkType: hard
+
+"is-shared-array-buffer@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "is-shared-array-buffer@npm:1.0.2"
+ dependencies:
+ call-bind: ^1.0.2
+ checksum: 9508929cf14fdc1afc9d61d723c6e8d34f5e117f0bffda4d97e7a5d88c3a8681f633a74f8e3ad1fe92d5113f9b921dc5ca44356492079612f9a247efbce7032a
+ languageName: node
+ linkType: hard
+
+"is-string@npm:^1.0.5, is-string@npm:^1.0.7":
+ version: 1.0.7
+ resolution: "is-string@npm:1.0.7"
+ dependencies:
+ has-tostringtag: ^1.0.0
+ checksum: 323b3d04622f78d45077cf89aab783b2f49d24dc641aa89b5ad1a72114cfeff2585efc8c12ef42466dff32bde93d839ad321b26884cf75e5a7892a938b089989
+ languageName: node
+ linkType: hard
+
+"is-symbol@npm:^1.0.2, is-symbol@npm:^1.0.3":
+ version: 1.0.4
+ resolution: "is-symbol@npm:1.0.4"
+ dependencies:
+ has-symbols: ^1.0.2
+ checksum: 92805812ef590738d9de49d677cd17dfd486794773fb6fa0032d16452af46e9b91bb43ffe82c983570f015b37136f4b53b28b8523bfb10b0ece7a66c31a54510
+ languageName: node
+ linkType: hard
+
+"is-weakref@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "is-weakref@npm:1.0.2"
+ dependencies:
+ call-bind: ^1.0.2
+ checksum: 95bd9a57cdcb58c63b1c401c60a474b0f45b94719c30f548c891860f051bc2231575c290a6b420c6bc6e7ed99459d424c652bd5bf9a1d5259505dc35b4bf83de
+ languageName: node
+ linkType: hard
+
+"isexe@npm:^2.0.0":
+ version: 2.0.0
+ resolution: "isexe@npm:2.0.0"
+ checksum: 26bf6c5480dda5161c820c5b5c751ae1e766c587b1f951ea3fcfc973bafb7831ae5b54a31a69bd670220e42e99ec154475025a468eae58ea262f813fdc8d1c62
+ languageName: node
+ linkType: hard
+
+"js-tokens@npm:^3.0.0 || ^4.0.0, js-tokens@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "js-tokens@npm:4.0.0"
+ checksum: 8a95213a5a77deb6cbe94d86340e8d9ace2b93bc367790b260101d2f36a2eaf4e4e22d9fa9cf459b38af3a32fb4190e638024cf82ec95ef708680e405ea7cc78
+ languageName: node
+ linkType: hard
+
+"js-yaml@npm:^3.13.1":
+ version: 3.14.1
+ resolution: "js-yaml@npm:3.14.1"
+ dependencies:
+ argparse: ^1.0.7
+ esprima: ^4.0.0
+ bin:
+ js-yaml: bin/js-yaml.js
+ checksum: bef146085f472d44dee30ec34e5cf36bf89164f5d585435a3d3da89e52622dff0b188a580e4ad091c3341889e14cb88cac6e4deb16dc5b1e9623bb0601fc255c
+ languageName: node
+ linkType: hard
+
+"jsesc@npm:^2.5.1":
+ version: 2.5.2
+ resolution: "jsesc@npm:2.5.2"
+ bin:
+ jsesc: bin/jsesc
+ checksum: 4dc190771129e12023f729ce20e1e0bfceac84d73a85bc3119f7f938843fe25a4aeccb54b6494dce26fcf263d815f5f31acdefac7cc9329efb8422a4f4d9fa9d
+ languageName: node
+ linkType: hard
+
+"json-schema-traverse@npm:^0.4.1":
+ version: 0.4.1
+ resolution: "json-schema-traverse@npm:0.4.1"
+ checksum: 7486074d3ba247769fda17d5181b345c9fb7d12e0da98b22d1d71a5db9698d8b4bd900a3ec1a4ffdd60846fc2556274a5c894d0c48795f14cb03aeae7b55260b
+ languageName: node
+ linkType: hard
+
+"json-schema-traverse@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "json-schema-traverse@npm:1.0.0"
+ checksum: 02f2f466cdb0362558b2f1fd5e15cce82ef55d60cd7f8fa828cf35ba74330f8d767fcae5c5c2adb7851fa811766c694b9405810879bc4e1ddd78a7c0e03658ad
+ languageName: node
+ linkType: hard
+
+"json-stable-stringify-without-jsonify@npm:^1.0.1":
+ version: 1.0.1
+ resolution: "json-stable-stringify-without-jsonify@npm:1.0.1"
+ checksum: cff44156ddce9c67c44386ad5cddf91925fe06b1d217f2da9c4910d01f358c6e3989c4d5a02683c7a5667f9727ff05831f7aa8ae66c8ff691c556f0884d49215
+ languageName: node
+ linkType: hard
+
+"json5@npm:^1.0.1":
+ version: 1.0.1
+ resolution: "json5@npm:1.0.1"
+ dependencies:
+ minimist: ^1.2.0
+ bin:
+ json5: lib/cli.js
+ checksum: e76ea23dbb8fc1348c143da628134a98adf4c5a4e8ea2adaa74a80c455fc2cdf0e2e13e6398ef819bfe92306b610ebb2002668ed9fc1af386d593691ef346fc3
+ languageName: node
+ linkType: hard
+
+"json5@npm:^2.2.1":
+ version: 2.2.1
+ resolution: "json5@npm:2.2.1"
+ bin:
+ json5: lib/cli.js
+ checksum: 74b8a23b102a6f2bf2d224797ae553a75488b5adbaee9c9b6e5ab8b510a2fc6e38f876d4c77dea672d4014a44b2399e15f2051ac2b37b87f74c0c7602003543b
+ languageName: node
+ linkType: hard
+
+"jsx-ast-utils@npm:^2.4.1 || ^3.0.0, jsx-ast-utils@npm:^3.3.2":
+ version: 3.3.3
+ resolution: "jsx-ast-utils@npm:3.3.3"
+ dependencies:
+ array-includes: ^3.1.5
+ object.assign: ^4.1.3
+ checksum: a2ed78cac49a0f0c4be8b1eafe3c5257a1411341d8e7f1ac740debae003de04e5f6372bfcfbd9d082e954ffd99aac85bcda85b7c6bc11609992483f4cdc0f745
+ languageName: node
+ linkType: hard
+
+"language-subtag-registry@npm:~0.3.2":
+ version: 0.3.22
+ resolution: "language-subtag-registry@npm:0.3.22"
+ checksum: 8ab70a7e0e055fe977ac16ea4c261faec7205ac43db5e806f72e5b59606939a3b972c4bd1e10e323b35d6ffa97c3e1c4c99f6553069dad2dfdd22020fa3eb56a
+ languageName: node
+ linkType: hard
+
+"language-tags@npm:^1.0.5":
+ version: 1.0.5
+ resolution: "language-tags@npm:1.0.5"
+ dependencies:
+ language-subtag-registry: ~0.3.2
+ checksum: c81b5d8b9f5f9cfd06ee71ada6ddfe1cf83044dd5eeefcd1e420ad491944da8957688db4a0a9bc562df4afdc2783425cbbdfd152c01d93179cf86888903123cf
+ languageName: node
+ linkType: hard
+
+"levn@npm:^0.4.1":
+ version: 0.4.1
+ resolution: "levn@npm:0.4.1"
+ dependencies:
+ prelude-ls: ^1.2.1
+ type-check: ~0.4.0
+ checksum: 12c5021c859bd0f5248561bf139121f0358285ec545ebf48bb3d346820d5c61a4309535c7f387ed7d84361cf821e124ce346c6b7cef8ee09a67c1473b46d0fc4
+ languageName: node
+ linkType: hard
+
+"lodash.merge@npm:^4.6.2":
+ version: 4.6.2
+ resolution: "lodash.merge@npm:4.6.2"
+ checksum: ad580b4bdbb7ca1f7abf7e1bce63a9a0b98e370cf40194b03380a46b4ed799c9573029599caebc1b14e3f24b111aef72b96674a56cfa105e0f5ac70546cdc005
+ languageName: node
+ linkType: hard
+
+"lodash.truncate@npm:^4.4.2":
+ version: 4.4.2
+ resolution: "lodash.truncate@npm:4.4.2"
+ checksum: b463d8a382cfb5f0e71c504dcb6f807a7bd379ff1ea216669aa42c52fc28c54e404bfbd96791aa09e6df0de2c1d7b8f1b7f4b1a61f324d38fe98bc535aeee4f5
+ languageName: node
+ linkType: hard
+
+"lodash@npm:4.17.21":
+ version: 4.17.21
+ resolution: "lodash@npm:4.17.21"
+ checksum: eb835a2e51d381e561e508ce932ea50a8e5a68f4ebdd771ea240d3048244a8d13658acbd502cd4829768c56f2e16bdd4340b9ea141297d472517b83868e677f7
+ languageName: node
+ linkType: hard
+
+"lodash@patch:lodash@npm%3A4.17.21#./.yarn/patches/lodash-npm-4.17.21-6382451519.patch::locator=berry-patch%40workspace%3A.":
+ version: 4.17.21
+ resolution: "lodash@patch:lodash@npm%3A4.17.21#./.yarn/patches/lodash-npm-4.17.21-6382451519.patch::version=4.17.21&hash=2c6e9e&locator=berry-patch%40workspace%3A."
+ checksum: 0f54b5291a5cfa3322cc3cb85716df4e23503535b79a341f12a41231513baaa6285fd9808d9894100dcea8b36bf91644360c4f783db1814719a4e103a04f59f3
+ languageName: node
+ linkType: hard
+
+"loose-envify@npm:^1.1.0, loose-envify@npm:^1.4.0":
+ version: 1.4.0
+ resolution: "loose-envify@npm:1.4.0"
+ dependencies:
+ js-tokens: ^3.0.0 || ^4.0.0
+ bin:
+ loose-envify: cli.js
+ checksum: 6517e24e0cad87ec9888f500c5b5947032cdfe6ef65e1c1936a0c48a524b81e65542c9c3edc91c97d5bddc806ee2a985dbc79be89215d613b1de5db6d1cfe6f4
+ languageName: node
+ linkType: hard
+
+"lru-cache@npm:^6.0.0":
+ version: 6.0.0
+ resolution: "lru-cache@npm:6.0.0"
+ dependencies:
+ yallist: ^4.0.0
+ checksum: f97f499f898f23e4585742138a22f22526254fdba6d75d41a1c2526b3b6cc5747ef59c5612ba7375f42aca4f8461950e925ba08c991ead0651b4918b7c978297
+ languageName: node
+ linkType: hard
+
+"merge2@npm:^1.3.0, merge2@npm:^1.4.1":
+ version: 1.4.1
+ resolution: "merge2@npm:1.4.1"
+ checksum: 7268db63ed5169466540b6fb947aec313200bcf6d40c5ab722c22e242f651994619bcd85601602972d3c85bd2cc45a358a4c61937e9f11a061919a1da569b0c2
+ languageName: node
+ linkType: hard
+
+"micromatch@npm:^4.0.4":
+ version: 4.0.5
+ resolution: "micromatch@npm:4.0.5"
+ dependencies:
+ braces: ^3.0.2
+ picomatch: ^2.3.1
+ checksum: 02a17b671c06e8fefeeb6ef996119c1e597c942e632a21ef589154f23898c9c6a9858526246abb14f8bca6e77734aa9dcf65476fca47cedfb80d9577d52843fc
+ languageName: node
+ linkType: hard
+
+"minimatch@npm:^3.0.4, minimatch@npm:^3.1.1, minimatch@npm:^3.1.2":
+ version: 3.1.2
+ resolution: "minimatch@npm:3.1.2"
+ dependencies:
+ brace-expansion: ^1.1.7
+ checksum: c154e566406683e7bcb746e000b84d74465b3a832c45d59912b9b55cd50dee66e5c4b1e5566dba26154040e51672f9aa450a9aef0c97cfc7336b78b7afb9540a
+ languageName: node
+ linkType: hard
+
+"minimist@npm:^1.2.0, minimist@npm:^1.2.6":
+ version: 1.2.6
+ resolution: "minimist@npm:1.2.6"
+ checksum: d15428cd1e11eb14e1233bcfb88ae07ed7a147de251441d61158619dfb32c4d7e9061d09cab4825fdee18ecd6fce323228c8c47b5ba7cd20af378ca4048fb3fb
+ languageName: node
+ linkType: hard
+
+"ms@npm:2.0.0":
+ version: 2.0.0
+ resolution: "ms@npm:2.0.0"
+ checksum: 0e6a22b8b746d2e0b65a430519934fefd41b6db0682e3477c10f60c76e947c4c0ad06f63ffdf1d78d335f83edee8c0aa928aa66a36c7cd95b69b26f468d527f4
+ languageName: node
+ linkType: hard
+
+"ms@npm:2.1.2":
+ version: 2.1.2
+ resolution: "ms@npm:2.1.2"
+ checksum: 673cdb2c3133eb050c745908d8ce632ed2c02d85640e2edb3ace856a2266a813b30c613569bf3354fdf4ea7d1a1494add3bfa95e2713baa27d0c2c71fc44f58f
+ languageName: node
+ linkType: hard
+
+"ms@npm:^2.1.1":
+ version: 2.1.3
+ resolution: "ms@npm:2.1.3"
+ checksum: aa92de608021b242401676e35cfa5aa42dd70cbdc082b916da7fb925c542173e36bce97ea3e804923fe92c0ad991434e4a38327e15a1b5b5f945d66df615ae6d
+ languageName: node
+ linkType: hard
+
+"nanoid@npm:^3.3.4":
+ version: 3.3.4
+ resolution: "nanoid@npm:3.3.4"
+ bin:
+ nanoid: bin/nanoid.cjs
+ checksum: 2fddd6dee994b7676f008d3ffa4ab16035a754f4bb586c61df5a22cf8c8c94017aadd360368f47d653829e0569a92b129979152ff97af23a558331e47e37cd9c
+ languageName: node
+ linkType: hard
+
+"natural-compare@npm:^1.4.0":
+ version: 1.4.0
+ resolution: "natural-compare@npm:1.4.0"
+ checksum: 23ad088b08f898fc9b53011d7bb78ec48e79de7627e01ab5518e806033861bef68d5b0cd0e2205c2f36690ac9571ff6bcb05eb777ced2eeda8d4ac5b44592c3d
+ languageName: node
+ linkType: hard
+
+"next-transpile-modules@npm:9.0.0":
+ version: 9.0.0
+ resolution: "next-transpile-modules@npm:9.0.0"
+ dependencies:
+ enhanced-resolve: ^5.7.0
+ escalade: ^3.1.1
+ checksum: 9a5d86d80cedc2404b2b1d5bd4994f2f7bf60e5e20f24e8cc5cfec34da1418b4a439916f37a95ca336bcf6d81094c3647354ac6a0c6737b3df59e62b6380507d
+ languageName: node
+ linkType: hard
+
+"next@npm:12.2.5":
+ version: 12.2.5
+ resolution: "next@npm:12.2.5"
+ dependencies:
+ "@next/env": 12.2.5
+ "@next/swc-android-arm-eabi": 12.2.5
+ "@next/swc-android-arm64": 12.2.5
+ "@next/swc-darwin-arm64": 12.2.5
+ "@next/swc-darwin-x64": 12.2.5
+ "@next/swc-freebsd-x64": 12.2.5
+ "@next/swc-linux-arm-gnueabihf": 12.2.5
+ "@next/swc-linux-arm64-gnu": 12.2.5
+ "@next/swc-linux-arm64-musl": 12.2.5
+ "@next/swc-linux-x64-gnu": 12.2.5
+ "@next/swc-linux-x64-musl": 12.2.5
+ "@next/swc-win32-arm64-msvc": 12.2.5
+ "@next/swc-win32-ia32-msvc": 12.2.5
+ "@next/swc-win32-x64-msvc": 12.2.5
+ "@swc/helpers": 0.4.3
+ caniuse-lite: ^1.0.30001332
+ postcss: 8.4.14
+ styled-jsx: 5.0.4
+ use-sync-external-store: 1.2.0
+ peerDependencies:
+ fibers: ">= 3.1.0"
+ node-sass: ^6.0.0 || ^7.0.0
+ react: ^17.0.2 || ^18.0.0-0
+ react-dom: ^17.0.2 || ^18.0.0-0
+ sass: ^1.3.0
+ dependenciesMeta:
+ "@next/swc-android-arm-eabi":
+ optional: true
+ "@next/swc-android-arm64":
+ optional: true
+ "@next/swc-darwin-arm64":
+ optional: true
+ "@next/swc-darwin-x64":
+ optional: true
+ "@next/swc-freebsd-x64":
+ optional: true
+ "@next/swc-linux-arm-gnueabihf":
+ optional: true
+ "@next/swc-linux-arm64-gnu":
+ optional: true
+ "@next/swc-linux-arm64-musl":
+ optional: true
+ "@next/swc-linux-x64-gnu":
+ optional: true
+ "@next/swc-linux-x64-musl":
+ optional: true
+ "@next/swc-win32-arm64-msvc":
+ optional: true
+ "@next/swc-win32-ia32-msvc":
+ optional: true
+ "@next/swc-win32-x64-msvc":
+ optional: true
+ peerDependenciesMeta:
+ fibers:
+ optional: true
+ node-sass:
+ optional: true
+ sass:
+ optional: true
+ bin:
+ next: dist/bin/next
+ checksum: e8fcbd93d74fda81640fd174a9d380f22db404d3ce0893730db3db806317ae18c86d1dbb502e63e47c92fb21a93812de62639c2f1204330cb569fdac4d3d0573
+ languageName: node
+ linkType: hard
+
+"node-releases@npm:^2.0.6":
+ version: 2.0.6
+ resolution: "node-releases@npm:2.0.6"
+ checksum: e86a926dc9fbb3b41b4c4a89d998afdf140e20a4e8dbe6c0a807f7b2948b42ea97d7fd3ad4868041487b6e9ee98409829c6e4d84a734a4215dff060a7fbeb4bf
+ languageName: node
+ linkType: hard
+
+"object-assign@npm:^4.1.1":
+ version: 4.1.1
+ resolution: "object-assign@npm:4.1.1"
+ checksum: fcc6e4ea8c7fe48abfbb552578b1c53e0d194086e2e6bbbf59e0a536381a292f39943c6e9628af05b5528aa5e3318bb30d6b2e53cadaf5b8fe9e12c4b69af23f
+ languageName: node
+ linkType: hard
+
+"object-inspect@npm:^1.12.2, object-inspect@npm:^1.9.0":
+ version: 1.12.2
+ resolution: "object-inspect@npm:1.12.2"
+ checksum: a534fc1b8534284ed71f25ce3a496013b7ea030f3d1b77118f6b7b1713829262be9e6243acbcb3ef8c626e2b64186112cb7f6db74e37b2789b9c789ca23048b2
+ languageName: node
+ linkType: hard
+
+"object-keys@npm:^1.1.1":
+ version: 1.1.1
+ resolution: "object-keys@npm:1.1.1"
+ checksum: b363c5e7644b1e1b04aa507e88dcb8e3a2f52b6ffd0ea801e4c7a62d5aa559affe21c55a07fd4b1fd55fc03a33c610d73426664b20032405d7b92a1414c34d6a
+ languageName: node
+ linkType: hard
+
+"object.assign@npm:^4.1.3, object.assign@npm:^4.1.4":
+ version: 4.1.4
+ resolution: "object.assign@npm:4.1.4"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.4
+ has-symbols: ^1.0.3
+ object-keys: ^1.1.1
+ checksum: 76cab513a5999acbfe0ff355f15a6a125e71805fcf53de4e9d4e082e1989bdb81d1e329291e1e4e0ae7719f0e4ef80e88fb2d367ae60500d79d25a6224ac8864
+ languageName: node
+ linkType: hard
+
+"object.entries@npm:^1.1.5":
+ version: 1.1.5
+ resolution: "object.entries@npm:1.1.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.1
+ checksum: d658696f74fd222060d8428d2a9fda2ce736b700cb06f6bdf4a16a1892d145afb746f453502b2fa55d1dca8ead6f14ddbcf66c545df45adadea757a6c4cd86c7
+ languageName: node
+ linkType: hard
+
+"object.fromentries@npm:^2.0.5":
+ version: 2.0.5
+ resolution: "object.fromentries@npm:2.0.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.1
+ checksum: 61a0b565ded97b76df9e30b569729866e1824cce902f98e90bb106e84f378aea20163366f66dc75c9000e2aad2ed0caf65c6f530cb2abc4c0c0f6c982102db4b
+ languageName: node
+ linkType: hard
+
+"object.hasown@npm:^1.1.1":
+ version: 1.1.1
+ resolution: "object.hasown@npm:1.1.1"
+ dependencies:
+ define-properties: ^1.1.4
+ es-abstract: ^1.19.5
+ checksum: d8ed4907ce57f48b93e3b53c418fd6787bf226a51e8d698c91e39b78e80fe5b124cb6282f6a9d5be21cf9e2c7829ab10206dcc6112b7748860eefe641880c793
+ languageName: node
+ linkType: hard
+
+"object.values@npm:^1.1.5":
+ version: 1.1.5
+ resolution: "object.values@npm:1.1.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.1
+ checksum: 0f17e99741ebfbd0fa55ce942f6184743d3070c61bd39221afc929c8422c4907618c8da694c6915bc04a83ab3224260c779ba37fc07bb668bdc5f33b66a902a4
+ languageName: node
+ linkType: hard
+
+"once@npm:^1.3.0":
+ version: 1.4.0
+ resolution: "once@npm:1.4.0"
+ dependencies:
+ wrappy: 1
+ checksum: cd0a88501333edd640d95f0d2700fbde6bff20b3d4d9bdc521bdd31af0656b5706570d6c6afe532045a20bb8dc0849f8332d6f2a416e0ba6d3d3b98806c7db68
+ languageName: node
+ linkType: hard
+
+"optionator@npm:^0.9.1":
+ version: 0.9.1
+ resolution: "optionator@npm:0.9.1"
+ dependencies:
+ deep-is: ^0.1.3
+ fast-levenshtein: ^2.0.6
+ levn: ^0.4.1
+ prelude-ls: ^1.2.1
+ type-check: ^0.4.0
+ word-wrap: ^1.2.3
+ checksum: dbc6fa065604b24ea57d734261914e697bd73b69eff7f18e967e8912aa2a40a19a9f599a507fa805be6c13c24c4eae8c71306c239d517d42d4c041c942f508a0
+ languageName: node
+ linkType: hard
+
+"parent-module@npm:^1.0.0":
+ version: 1.0.1
+ resolution: "parent-module@npm:1.0.1"
+ dependencies:
+ callsites: ^3.0.0
+ checksum: 6ba8b255145cae9470cf5551eb74be2d22281587af787a2626683a6c20fbb464978784661478dd2a3f1dad74d1e802d403e1b03c1a31fab310259eec8ac560ff
+ languageName: node
+ linkType: hard
+
+"path-is-absolute@npm:^1.0.0":
+ version: 1.0.1
+ resolution: "path-is-absolute@npm:1.0.1"
+ checksum: 060840f92cf8effa293bcc1bea81281bd7d363731d214cbe5c227df207c34cd727430f70c6037b5159c8a870b9157cba65e775446b0ab06fd5ecc7e54615a3b8
+ languageName: node
+ linkType: hard
+
+"path-key@npm:^3.1.0":
+ version: 3.1.1
+ resolution: "path-key@npm:3.1.1"
+ checksum: 55cd7a9dd4b343412a8386a743f9c746ef196e57c823d90ca3ab917f90ab9f13dd0ded27252ba49dbdfcab2b091d998bc446f6220cd3cea65db407502a740020
+ languageName: node
+ linkType: hard
+
+"path-parse@npm:^1.0.7":
+ version: 1.0.7
+ resolution: "path-parse@npm:1.0.7"
+ checksum: 49abf3d81115642938a8700ec580da6e830dde670be21893c62f4e10bd7dd4c3742ddc603fe24f898cba7eb0c6bc1777f8d9ac14185d34540c6d4d80cd9cae8a
+ languageName: node
+ linkType: hard
+
+"path-type@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "path-type@npm:4.0.0"
+ checksum: 5b1e2daa247062061325b8fdbfd1fb56dde0a448fb1455453276ea18c60685bdad23a445dc148cf87bc216be1573357509b7d4060494a6fd768c7efad833ee45
+ languageName: node
+ linkType: hard
+
+"picocolors@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "picocolors@npm:1.0.0"
+ checksum: a2e8092dd86c8396bdba9f2b5481032848525b3dc295ce9b57896f931e63fc16f79805144321f72976383fc249584672a75cc18d6777c6b757603f372f745981
+ languageName: node
+ linkType: hard
+
+"picomatch@npm:^2.3.1":
+ version: 2.3.1
+ resolution: "picomatch@npm:2.3.1"
+ checksum: 050c865ce81119c4822c45d3c84f1ced46f93a0126febae20737bd05ca20589c564d6e9226977df859ed5e03dc73f02584a2b0faad36e896936238238b0446cf
+ languageName: node
+ linkType: hard
+
+"postcss@npm:8.4.14":
+ version: 8.4.14
+ resolution: "postcss@npm:8.4.14"
+ dependencies:
+ nanoid: ^3.3.4
+ picocolors: ^1.0.0
+ source-map-js: ^1.0.2
+ checksum: fe58766ff32e4becf65a7d57678995cfd239df6deed2fe0557f038b47c94e4132e7e5f68b5aa820c13adfec32e523b693efaeb65798efb995ce49ccd83953816
+ languageName: node
+ linkType: hard
+
+"prelude-ls@npm:^1.2.1":
+ version: 1.2.1
+ resolution: "prelude-ls@npm:1.2.1"
+ checksum: cd192ec0d0a8e4c6da3bb80e4f62afe336df3f76271ac6deb0e6a36187133b6073a19e9727a1ff108cd8b9982e4768850d413baa71214dd80c7979617dca827a
+ languageName: node
+ linkType: hard
+
+prettier@latest:
+ version: 2.7.1
+ resolution: "prettier@npm:2.7.1"
+ bin:
+ prettier: bin-prettier.js
+ checksum: 55a4409182260866ab31284d929b3cb961e5fdb91fe0d2e099dac92eaecec890f36e524b4c19e6ceae839c99c6d7195817579cdffc8e2c80da0cb794463a748b
+ languageName: node
+ linkType: hard
+
+"progress@npm:^2.0.0":
+ version: 2.0.3
+ resolution: "progress@npm:2.0.3"
+ checksum: f67403fe7b34912148d9252cb7481266a354bd99ce82c835f79070643bb3c6583d10dbcfda4d41e04bbc1d8437e9af0fb1e1f2135727878f5308682a579429b7
+ languageName: node
+ linkType: hard
+
+"prop-types@npm:^15.8.1":
+ version: 15.8.1
+ resolution: "prop-types@npm:15.8.1"
+ dependencies:
+ loose-envify: ^1.4.0
+ object-assign: ^4.1.1
+ react-is: ^16.13.1
+ checksum: c056d3f1c057cb7ff8344c645450e14f088a915d078dcda795041765047fa080d38e5d626560ccaac94a4e16e3aa15f3557c1a9a8d1174530955e992c675e459
+ languageName: node
+ linkType: hard
+
+"punycode@npm:^2.1.0":
+ version: 2.1.1
+ resolution: "punycode@npm:2.1.1"
+ checksum: 823bf443c6dd14f669984dea25757b37993f67e8d94698996064035edd43bed8a5a17a9f12e439c2b35df1078c6bec05a6c86e336209eb1061e8025c481168e8
+ languageName: node
+ linkType: hard
+
+"queue-microtask@npm:^1.2.2":
+ version: 1.2.3
+ resolution: "queue-microtask@npm:1.2.3"
+ checksum: b676f8c040cdc5b12723ad2f91414d267605b26419d5c821ff03befa817ddd10e238d22b25d604920340fd73efd8ba795465a0377c4adf45a4a41e4234e42dc4
+ languageName: node
+ linkType: hard
+
+"react-dom@npm:18.2.0":
+ version: 18.2.0
+ resolution: "react-dom@npm:18.2.0"
+ dependencies:
+ loose-envify: ^1.1.0
+ scheduler: ^0.23.0
+ peerDependencies:
+ react: ^18.2.0
+ checksum: 7d323310bea3a91be2965f9468d552f201b1c27891e45ddc2d6b8f717680c95a75ae0bc1e3f5cf41472446a2589a75aed4483aee8169287909fcd59ad149e8cc
+ languageName: node
+ linkType: hard
+
+"react-is@npm:^16.13.1":
+ version: 16.13.1
+ resolution: "react-is@npm:16.13.1"
+ checksum: f7a19ac3496de32ca9ae12aa030f00f14a3d45374f1ceca0af707c831b2a6098ef0d6bdae51bd437b0a306d7f01d4677fcc8de7c0d331eb47ad0f46130e53c5f
+ languageName: node
+ linkType: hard
+
+"react@npm:18.2.0, react@npm:^18.2.0":
+ version: 18.2.0
+ resolution: "react@npm:18.2.0"
+ dependencies:
+ loose-envify: ^1.1.0
+ checksum: 88e38092da8839b830cda6feef2e8505dec8ace60579e46aa5490fc3dc9bba0bd50336507dc166f43e3afc1c42939c09fe33b25fae889d6f402721dcd78fca1b
+ languageName: node
+ linkType: hard
+
+"regenerator-runtime@npm:^0.13.4":
+ version: 0.13.9
+ resolution: "regenerator-runtime@npm:0.13.9"
+ checksum: 65ed455fe5afd799e2897baf691ca21c2772e1a969d19bb0c4695757c2d96249eb74ee3553ea34a91062b2a676beedf630b4c1551cc6299afb937be1426ec55e
+ languageName: node
+ linkType: hard
+
+"regexp.prototype.flags@npm:^1.4.1, regexp.prototype.flags@npm:^1.4.3":
+ version: 1.4.3
+ resolution: "regexp.prototype.flags@npm:1.4.3"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ functions-have-names: ^1.2.2
+ checksum: 51228bae732592adb3ededd5e15426be25f289e9c4ef15212f4da73f4ec3919b6140806374b8894036a86020d054a8d2657d3fee6bb9b4d35d8939c20030b7a6
+ languageName: node
+ linkType: hard
+
+"regexpp@npm:^3.1.0":
+ version: 3.2.0
+ resolution: "regexpp@npm:3.2.0"
+ checksum: a78dc5c7158ad9ddcfe01aa9144f46e192ddbfa7b263895a70a5c6c73edd9ce85faf7c0430e59ac38839e1734e275b9c3de5c57ee3ab6edc0e0b1bdebefccef8
+ languageName: node
+ linkType: hard
+
+"require-from-string@npm:^2.0.2":
+ version: 2.0.2
+ resolution: "require-from-string@npm:2.0.2"
+ checksum: a03ef6895445f33a4015300c426699bc66b2b044ba7b670aa238610381b56d3f07c686251740d575e22f4c87531ba662d06937508f0f3c0f1ddc04db3130560b
+ languageName: node
+ linkType: hard
+
+"resolve-from@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "resolve-from@npm:4.0.0"
+ checksum: f4ba0b8494846a5066328ad33ef8ac173801a51739eb4d63408c847da9a2e1c1de1e6cbbf72699211f3d13f8fc1325648b169bd15eb7da35688e30a5fb0e4a7f
+ languageName: node
+ linkType: hard
+
+"resolve@npm:^1.20.0, resolve@npm:^1.22.0":
+ version: 1.22.1
+ resolution: "resolve@npm:1.22.1"
+ dependencies:
+ is-core-module: ^2.9.0
+ path-parse: ^1.0.7
+ supports-preserve-symlinks-flag: ^1.0.0
+ bin:
+ resolve: bin/resolve
+ checksum: 07af5fc1e81aa1d866cbc9e9460fbb67318a10fa3c4deadc35c3ad8a898ee9a71a86a65e4755ac3195e0ea0cfbe201eb323ebe655ce90526fd61917313a34e4e
+ languageName: node
+ linkType: hard
+
+"resolve@npm:^2.0.0-next.3":
+ version: 2.0.0-next.4
+ resolution: "resolve@npm:2.0.0-next.4"
+ dependencies:
+ is-core-module: ^2.9.0
+ path-parse: ^1.0.7
+ supports-preserve-symlinks-flag: ^1.0.0
+ bin:
+ resolve: bin/resolve
+ checksum: c438ac9a650f2030fd074219d7f12ceb983b475da2d89ad3d6dd05fbf6b7a0a8cd37d4d10b43cb1f632bc19f22246ab7f36ebda54d84a29bfb2910a0680906d3
+ languageName: node
+ linkType: hard
+
+"resolve@patch:resolve@^1.20.0#~builtin<compat/resolve>, resolve@patch:resolve@^1.22.0#~builtin<compat/resolve>":
+ version: 1.22.1
+ resolution: "resolve@patch:resolve@npm%3A1.22.1#~builtin<compat/resolve>::version=1.22.1&hash=07638b"
+ dependencies:
+ is-core-module: ^2.9.0
+ path-parse: ^1.0.7
+ supports-preserve-symlinks-flag: ^1.0.0
+ bin:
+ resolve: bin/resolve
+ checksum: 5656f4d0bedcf8eb52685c1abdf8fbe73a1603bb1160a24d716e27a57f6cecbe2432ff9c89c2bd57542c3a7b9d14b1882b73bfe2e9d7849c9a4c0b8b39f02b8b
+ languageName: node
+ linkType: hard
+
+"resolve@patch:resolve@^2.0.0-next.3#~builtin<compat/resolve>":
+ version: 2.0.0-next.4
+ resolution: "resolve@patch:resolve@npm%3A2.0.0-next.4#~builtin<compat/resolve>::version=2.0.0-next.4&hash=07638b"
+ dependencies:
+ is-core-module: ^2.9.0
+ path-parse: ^1.0.7
+ supports-preserve-symlinks-flag: ^1.0.0
+ bin:
+ resolve: bin/resolve
+ checksum: 4bf9f4f8a458607af90518ff73c67a4bc1a38b5a23fef2bb0ccbd45e8be89820a1639b637b0ba377eb2be9eedfb1739a84cde24fe4cd670c8207d8fea922b011
+ languageName: node
+ linkType: hard
+
+"reusify@npm:^1.0.4":
+ version: 1.0.4
+ resolution: "reusify@npm:1.0.4"
+ checksum: c3076ebcc22a6bc252cb0b9c77561795256c22b757f40c0d8110b1300723f15ec0fc8685e8d4ea6d7666f36c79ccc793b1939c748bf36f18f542744a4e379fcc
+ languageName: node
+ linkType: hard
+
+"rimraf@npm:^3.0.2":
+ version: 3.0.2
+ resolution: "rimraf@npm:3.0.2"
+ dependencies:
+ glob: ^7.1.3
+ bin:
+ rimraf: bin.js
+ checksum: 87f4164e396f0171b0a3386cc1877a817f572148ee13a7e113b238e48e8a9f2f31d009a92ec38a591ff1567d9662c6b67fd8818a2dbbaed74bc26a87a2a4a9a0
+ languageName: node
+ linkType: hard
+
+"run-parallel@npm:^1.1.9":
+ version: 1.2.0
+ resolution: "run-parallel@npm:1.2.0"
+ dependencies:
+ queue-microtask: ^1.2.2
+ checksum: cb4f97ad25a75ebc11a8ef4e33bb962f8af8516bb2001082ceabd8902e15b98f4b84b4f8a9b222e5d57fc3bd1379c483886ed4619367a7680dad65316993021d
+ languageName: node
+ linkType: hard
+
+"safe-buffer@npm:~5.1.1":
+ version: 5.1.2
+ resolution: "safe-buffer@npm:5.1.2"
+ checksum: f2f1f7943ca44a594893a852894055cf619c1fbcb611237fc39e461ae751187e7baf4dc391a72125e0ac4fb2d8c5c0b3c71529622e6a58f46b960211e704903c
+ languageName: node
+ linkType: hard
+
+"scheduler@npm:^0.23.0":
+ version: 0.23.0
+ resolution: "scheduler@npm:0.23.0"
+ dependencies:
+ loose-envify: ^1.1.0
+ checksum: d79192eeaa12abef860c195ea45d37cbf2bbf5f66e3c4dcd16f54a7da53b17788a70d109ee3d3dde1a0fd50e6a8fc171f4300356c5aee4fc0171de526bf35f8a
+ languageName: node
+ linkType: hard
+
+"semver@npm:^6.3.0":
+ version: 6.3.0
+ resolution: "semver@npm:6.3.0"
+ bin:
+ semver: ./bin/semver.js
+ checksum: 1b26ecf6db9e8292dd90df4e781d91875c0dcc1b1909e70f5d12959a23c7eebb8f01ea581c00783bbee72ceeaad9505797c381756326073850dc36ed284b21b9
+ languageName: node
+ linkType: hard
+
+"semver@npm:^7.2.1, semver@npm:^7.3.7":
+ version: 7.3.7
+ resolution: "semver@npm:7.3.7"
+ dependencies:
+ lru-cache: ^6.0.0
+ bin:
+ semver: bin/semver.js
+ checksum: 2fa3e877568cd6ce769c75c211beaed1f9fce80b28338cadd9d0b6c40f2e2862bafd62c19a6cff42f3d54292b7c623277bcab8816a2b5521cf15210d43e75232
+ languageName: node
+ linkType: hard
+
+"shebang-command@npm:^2.0.0":
+ version: 2.0.0
+ resolution: "shebang-command@npm:2.0.0"
+ dependencies:
+ shebang-regex: ^3.0.0
+ checksum: 6b52fe87271c12968f6a054e60f6bde5f0f3d2db483a1e5c3e12d657c488a15474121a1d55cd958f6df026a54374ec38a4a963988c213b7570e1d51575cea7fa
+ languageName: node
+ linkType: hard
+
+"shebang-regex@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "shebang-regex@npm:3.0.0"
+ checksum: 1a2bcae50de99034fcd92ad4212d8e01eedf52c7ec7830eedcf886622804fe36884278f2be8be0ea5fde3fd1c23911643a4e0f726c8685b61871c8908af01222
+ languageName: node
+ linkType: hard
+
+"side-channel@npm:^1.0.4":
+ version: 1.0.4
+ resolution: "side-channel@npm:1.0.4"
+ dependencies:
+ call-bind: ^1.0.0
+ get-intrinsic: ^1.0.2
+ object-inspect: ^1.9.0
+ checksum: 351e41b947079c10bd0858364f32bb3a7379514c399edb64ab3dce683933483fc63fb5e4efe0a15a2e8a7e3c436b6a91736ddb8d8c6591b0460a24bb4a1ee245
+ languageName: node
+ linkType: hard
+
+"slash@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "slash@npm:3.0.0"
+ checksum: 94a93fff615f25a999ad4b83c9d5e257a7280c90a32a7cb8b4a87996e4babf322e469c42b7f649fd5796edd8687652f3fb452a86dc97a816f01113183393f11c
+ languageName: node
+ linkType: hard
+
+"slice-ansi@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "slice-ansi@npm:4.0.0"
+ dependencies:
+ ansi-styles: ^4.0.0
+ astral-regex: ^2.0.0
+ is-fullwidth-code-point: ^3.0.0
+ checksum: 4a82d7f085b0e1b070e004941ada3c40d3818563ac44766cca4ceadd2080427d337554f9f99a13aaeb3b4a94d9964d9466c807b3d7b7541d1ec37ee32d308756
+ languageName: node
+ linkType: hard
+
+"source-map-js@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "source-map-js@npm:1.0.2"
+ checksum: c049a7fc4deb9a7e9b481ae3d424cc793cb4845daa690bc5a05d428bf41bf231ced49b4cf0c9e77f9d42fdb3d20d6187619fc586605f5eabe995a316da8d377c
+ languageName: node
+ linkType: hard
+
+"sprintf-js@npm:~1.0.2":
+ version: 1.0.3
+ resolution: "sprintf-js@npm:1.0.3"
+ checksum: 19d79aec211f09b99ec3099b5b2ae2f6e9cdefe50bc91ac4c69144b6d3928a640bb6ae5b3def70c2e85a2c3d9f5ec2719921e3a59d3ca3ef4b2fd1a4656a0df3
+ languageName: node
+ linkType: hard
+
+"string-width@npm:^4.2.3":
+ version: 4.2.3
+ resolution: "string-width@npm:4.2.3"
+ dependencies:
+ emoji-regex: ^8.0.0
+ is-fullwidth-code-point: ^3.0.0
+ strip-ansi: ^6.0.1
+ checksum: e52c10dc3fbfcd6c3a15f159f54a90024241d0f149cf8aed2982a2d801d2e64df0bf1dc351cf8e95c3319323f9f220c16e740b06faecd53e2462df1d2b5443fb
+ languageName: node
+ linkType: hard
+
+"string.prototype.matchall@npm:^4.0.7":
+ version: 4.0.7
+ resolution: "string.prototype.matchall@npm:4.0.7"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.3
+ es-abstract: ^1.19.1
+ get-intrinsic: ^1.1.1
+ has-symbols: ^1.0.3
+ internal-slot: ^1.0.3
+ regexp.prototype.flags: ^1.4.1
+ side-channel: ^1.0.4
+ checksum: fc09f3ccbfb325de0472bcc87a6be0598a7499e0b4a31db5789676155b15754a4cc4bb83924f15fc9ed48934dac7366ee52c8b9bd160bed6fd072c93b489e75c
+ languageName: node
+ linkType: hard
+
+"string.prototype.trimend@npm:^1.0.5":
+ version: 1.0.5
+ resolution: "string.prototype.trimend@npm:1.0.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.4
+ es-abstract: ^1.19.5
+ checksum: d44f543833112f57224e79182debadc9f4f3bf9d48a0414d6f0cbd2a86f2b3e8c0ca1f95c3f8e5b32ae83e91554d79d932fc746b411895f03f93d89ed3dfb6bc
+ languageName: node
+ linkType: hard
+
+"string.prototype.trimstart@npm:^1.0.5":
+ version: 1.0.5
+ resolution: "string.prototype.trimstart@npm:1.0.5"
+ dependencies:
+ call-bind: ^1.0.2
+ define-properties: ^1.1.4
+ es-abstract: ^1.19.5
+ checksum: a4857c5399ad709d159a77371eeaa8f9cc284469a0b5e1bfe405de16f1fd4166a8ea6f4180e55032f348d1b679b1599fd4301fbc7a8b72bdb3e795e43f7b1048
+ languageName: node
+ linkType: hard
+
+"strip-ansi@npm:^6.0.0, strip-ansi@npm:^6.0.1":
+ version: 6.0.1
+ resolution: "strip-ansi@npm:6.0.1"
+ dependencies:
+ ansi-regex: ^5.0.1
+ checksum: f3cd25890aef3ba6e1a74e20896c21a46f482e93df4a06567cebf2b57edabb15133f1f94e57434e0a958d61186087b1008e89c94875d019910a213181a14fc8c
+ languageName: node
+ linkType: hard
+
+"strip-bom@npm:^3.0.0":
+ version: 3.0.0
+ resolution: "strip-bom@npm:3.0.0"
+ checksum: 8d50ff27b7ebe5ecc78f1fe1e00fcdff7af014e73cf724b46fb81ef889eeb1015fc5184b64e81a2efe002180f3ba431bdd77e300da5c6685d702780fbf0c8d5b
+ languageName: node
+ linkType: hard
+
+"strip-json-comments@npm:^3.1.0, strip-json-comments@npm:^3.1.1":
+ version: 3.1.1
+ resolution: "strip-json-comments@npm:3.1.1"
+ checksum: 492f73e27268f9b1c122733f28ecb0e7e8d8a531a6662efbd08e22cccb3f9475e90a1b82cab06a392f6afae6d2de636f977e231296400d0ec5304ba70f166443
+ languageName: node
+ linkType: hard
+
+"styled-jsx@npm:5.0.4":
+ version: 5.0.4
+ resolution: "styled-jsx@npm:5.0.4"
+ peerDependencies:
+ react: ">= 16.8.0 || 17.x.x || ^18.0.0-0"
+ peerDependenciesMeta:
+ "@babel/core":
+ optional: true
+ babel-plugin-macros:
+ optional: true
+ checksum: db7530155626e5eebc9d80ca117ea5aed6219b0a65469196b0b5727550fbe743117d7eea1499d80511ccb312d31f4a1027a58d1f94a83f0986c9acfdcce8bdd1
+ languageName: node
+ linkType: hard
+
+"supports-color@npm:^5.3.0":
+ version: 5.5.0
+ resolution: "supports-color@npm:5.5.0"
+ dependencies:
+ has-flag: ^3.0.0
+ checksum: 95f6f4ba5afdf92f495b5a912d4abee8dcba766ae719b975c56c084f5004845f6f5a5f7769f52d53f40e21952a6d87411bafe34af4a01e65f9926002e38e1dac
+ languageName: node
+ linkType: hard
+
+"supports-color@npm:^7.1.0":
+ version: 7.2.0
+ resolution: "supports-color@npm:7.2.0"
+ dependencies:
+ has-flag: ^4.0.0
+ checksum: 3dda818de06ebbe5b9653e07842d9479f3555ebc77e9a0280caf5a14fb877ffee9ed57007c3b78f5a6324b8dbeec648d9e97a24e2ed9fdb81ddc69ea07100f4a
+ languageName: node
+ linkType: hard
+
+"supports-preserve-symlinks-flag@npm:^1.0.0":
+ version: 1.0.0
+ resolution: "supports-preserve-symlinks-flag@npm:1.0.0"
+ checksum: 53b1e247e68e05db7b3808b99b892bd36fb096e6fba213a06da7fab22045e97597db425c724f2bbd6c99a3c295e1e73f3e4de78592289f38431049e1277ca0ae
+ languageName: node
+ linkType: hard
+
+"table@npm:^6.0.9":
+ version: 6.8.0
+ resolution: "table@npm:6.8.0"
+ dependencies:
+ ajv: ^8.0.1
+ lodash.truncate: ^4.4.2
+ slice-ansi: ^4.0.0
+ string-width: ^4.2.3
+ strip-ansi: ^6.0.1
+ checksum: 5b07fe462ee03d2e1fac02cbb578efd2e0b55ac07e3d3db2e950aa9570ade5a4a2b8d3c15e9f25c89e4e50b646bc4269934601ee1eef4ca7968ad31960977690
+ languageName: node
+ linkType: hard
+
+"tapable@npm:^2.2.0":
+ version: 2.2.1
+ resolution: "tapable@npm:2.2.1"
+ checksum: 3b7a1b4d86fa940aad46d9e73d1e8739335efd4c48322cb37d073eb6f80f5281889bf0320c6d8ffcfa1a0dd5bfdbd0f9d037e252ef972aca595330538aac4d51
+ languageName: node
+ linkType: hard
+
+"text-table@npm:^0.2.0":
+ version: 0.2.0
+ resolution: "text-table@npm:0.2.0"
+ checksum: b6937a38c80c7f84d9c11dd75e49d5c44f71d95e810a3250bd1f1797fc7117c57698204adf676b71497acc205d769d65c16ae8fa10afad832ae1322630aef10a
+ languageName: node
+ linkType: hard
+
+"to-fast-properties@npm:^2.0.0":
+ version: 2.0.0
+ resolution: "to-fast-properties@npm:2.0.0"
+ checksum: be2de62fe58ead94e3e592680052683b1ec986c72d589e7b21e5697f8744cdbf48c266fa72f6c15932894c10187b5f54573a3bcf7da0bfd964d5caf23d436168
+ languageName: node
+ linkType: hard
+
+"to-regex-range@npm:^5.0.1":
+ version: 5.0.1
+ resolution: "to-regex-range@npm:5.0.1"
+ dependencies:
+ is-number: ^7.0.0
+ checksum: f76fa01b3d5be85db6a2a143e24df9f60dd047d151062d0ba3df62953f2f697b16fe5dad9b0ac6191c7efc7b1d9dcaa4b768174b7b29da89d4428e64bc0a20ed
+ languageName: node
+ linkType: hard
+
+"tsconfig-paths@npm:^3.14.1":
+ version: 3.14.1
+ resolution: "tsconfig-paths@npm:3.14.1"
+ dependencies:
+ "@types/json5": ^0.0.29
+ json5: ^1.0.1
+ minimist: ^1.2.6
+ strip-bom: ^3.0.0
+ checksum: 8afa01c673ebb4782ba53d3a12df97fa837ce524f8ad38ee4e2b2fd57f5ac79abc21c574e9e9eb014d93efe7fe8214001b96233b5c6ea75bd1ea82afe17a4c6d
+ languageName: node
+ linkType: hard
+
+"tsconfig@*, tsconfig@workspace:packages/tsconfig":
+ version: 0.0.0-use.local
+ resolution: "tsconfig@workspace:packages/tsconfig"
+ languageName: unknown
+ linkType: soft
+
+"tslib@npm:^1.8.1":
+ version: 1.14.1
+ resolution: "tslib@npm:1.14.1"
+ checksum: dbe628ef87f66691d5d2959b3e41b9ca0045c3ee3c7c7b906cc1e328b39f199bb1ad9e671c39025bd56122ac57dfbf7385a94843b1cc07c60a4db74795829acd
+ languageName: node
+ linkType: hard
+
+"tslib@npm:^2.4.0":
+ version: 2.4.0
+ resolution: "tslib@npm:2.4.0"
+ checksum: 8c4aa6a3c5a754bf76aefc38026134180c053b7bd2f81338cb5e5ebf96fefa0f417bff221592bf801077f5bf990562f6264fecbc42cd3309b33872cb6fc3b113
+ languageName: node
+ linkType: hard
+
+"tsutils@npm:^3.21.0":
+ version: 3.21.0
+ resolution: "tsutils@npm:3.21.0"
+ dependencies:
+ tslib: ^1.8.1
+ peerDependencies:
+ typescript: ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta"
+ checksum: 1843f4c1b2e0f975e08c4c21caa4af4f7f65a12ac1b81b3b8489366826259323feb3fc7a243123453d2d1a02314205a7634e048d4a8009921da19f99755cdc48
+ languageName: node
+ linkType: hard
+
+"turbo-android-arm64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-android-arm64@npm:1.4.6"
+ conditions: os=android & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"turbo-darwin-64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-darwin-64@npm:1.4.6"
+ conditions: os=darwin & cpu=x64
+ languageName: node
+ linkType: hard
+
+"turbo-darwin-arm64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-darwin-arm64@npm:1.4.6"
+ conditions: os=darwin & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"turbo-freebsd-64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-freebsd-64@npm:1.4.6"
+ conditions: os=freebsd & cpu=x64
+ languageName: node
+ linkType: hard
+
+"turbo-freebsd-arm64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-freebsd-arm64@npm:1.4.6"
+ conditions: os=freebsd & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"turbo-linux-32@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-linux-32@npm:1.4.6"
+ conditions: os=linux & cpu=ia32
+ languageName: node
+ linkType: hard
+
+"turbo-linux-64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-linux-64@npm:1.4.6"
+ conditions: os=linux & cpu=x64
+ languageName: node
+ linkType: hard
+
+"turbo-linux-arm64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-linux-arm64@npm:1.4.6"
+ conditions: os=linux & cpu=arm64
+ languageName: node
+ linkType: hard
+
+"turbo-linux-arm@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-linux-arm@npm:1.4.6"
+ conditions: os=linux & cpu=arm
+ languageName: node
+ linkType: hard
+
+"turbo-linux-mips64le@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-linux-mips64le@npm:1.4.6"
+ conditions: os=linux & cpu=mipsel
+ languageName: node
+ linkType: hard
+
+"turbo-linux-ppc64le@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-linux-ppc64le@npm:1.4.6"
+ conditions: os=linux & cpu=ppc64
+ languageName: node
+ linkType: hard
+
+"turbo-windows-32@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-windows-32@npm:1.4.6"
+ conditions: os=win32 & cpu=ia32
+ languageName: node
+ linkType: hard
+
+"turbo-windows-64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-windows-64@npm:1.4.6"
+ conditions: os=win32 & cpu=x64
+ languageName: node
+ linkType: hard
+
+"turbo-windows-arm64@npm:1.4.6":
+ version: 1.4.6
+ resolution: "turbo-windows-arm64@npm:1.4.6"
+ conditions: os=win32 & cpu=arm64
+ languageName: node
+ linkType: hard
+
+turbo@latest:
+ version: 1.4.6
+ resolution: "turbo@npm:1.4.6"
+ dependencies:
+ turbo-android-arm64: 1.4.6
+ turbo-darwin-64: 1.4.6
+ turbo-darwin-arm64: 1.4.6
+ turbo-freebsd-64: 1.4.6
+ turbo-freebsd-arm64: 1.4.6
+ turbo-linux-32: 1.4.6
+ turbo-linux-64: 1.4.6
+ turbo-linux-arm: 1.4.6
+ turbo-linux-arm64: 1.4.6
+ turbo-linux-mips64le: 1.4.6
+ turbo-linux-ppc64le: 1.4.6
+ turbo-windows-32: 1.4.6
+ turbo-windows-64: 1.4.6
+ turbo-windows-arm64: 1.4.6
+ dependenciesMeta:
+ turbo-android-arm64:
+ optional: true
+ turbo-darwin-64:
+ optional: true
+ turbo-darwin-arm64:
+ optional: true
+ turbo-freebsd-64:
+ optional: true
+ turbo-freebsd-arm64:
+ optional: true
+ turbo-linux-32:
+ optional: true
+ turbo-linux-64:
+ optional: true
+ turbo-linux-arm:
+ optional: true
+ turbo-linux-arm64:
+ optional: true
+ turbo-linux-mips64le:
+ optional: true
+ turbo-linux-ppc64le:
+ optional: true
+ turbo-windows-32:
+ optional: true
+ turbo-windows-64:
+ optional: true
+ turbo-windows-arm64:
+ optional: true
+ bin:
+ turbo: bin/turbo
+ checksum: f7191f36e0abddf6dc88eb9a83a007a8616ebed1edd44c37f9b19e0451f3ce90c4406699f6166a99c0a6f8d39cc1f24d96513b7ef16b21747863827538b9c966
+ languageName: node
+ linkType: hard
+
+"type-check@npm:^0.4.0, type-check@npm:~0.4.0":
+ version: 0.4.0
+ resolution: "type-check@npm:0.4.0"
+ dependencies:
+ prelude-ls: ^1.2.1
+ checksum: ec688ebfc9c45d0c30412e41ca9c0cdbd704580eb3a9ccf07b9b576094d7b86a012baebc95681999dd38f4f444afd28504cb3a89f2ef16b31d4ab61a0739025a
+ languageName: node
+ linkType: hard
+
+"type-fest@npm:^0.20.2":
+ version: 0.20.2
+ resolution: "type-fest@npm:0.20.2"
+ checksum: 4fb3272df21ad1c552486f8a2f8e115c09a521ad7a8db3d56d53718d0c907b62c6e9141ba5f584af3f6830d0872c521357e512381f24f7c44acae583ad517d73
+ languageName: node
+ linkType: hard
+
+"typescript@npm:^4.5.2, typescript@npm:^4.5.3, typescript@npm:^4.7.4":
+ version: 4.8.3
+ resolution: "typescript@npm:4.8.3"
+ bin:
+ tsc: bin/tsc
+ tsserver: bin/tsserver
+ checksum: 8286a5edcaf3d68e65c451aa1e7150ad1cf53ee0813c07ec35b7abdfdb10f355ecaa13c6a226a694ae7a67785fd7eeebf89f845da0b4f7e4a35561ddc459aba0
+ languageName: node
+ linkType: hard
+
+"typescript@patch:typescript@^4.5.2#~builtin<compat/typescript>, typescript@patch:typescript@^4.5.3#~builtin<compat/typescript>, typescript@patch:typescript@^4.7.4#~builtin<compat/typescript>":
+ version: 4.8.3
+ resolution: "typescript@patch:typescript@npm%3A4.8.3#~builtin<compat/typescript>::version=4.8.3&hash=a1c5e5"
+ bin:
+ tsc: bin/tsc
+ tsserver: bin/tsserver
+ checksum: 2222d2382fb3146089b1d27ce2b55e9d1f99cc64118f1aba75809b693b856c5d3c324f052f60c75b577947fc538bc1c27bad0eb76cbdba9a63a253489504ba7e
+ languageName: node
+ linkType: hard
+
+"ui@*, ui@workspace:packages/ui":
+ version: 0.0.0-use.local
+ resolution: "ui@workspace:packages/ui"
+ dependencies:
+ "@types/react": ^17.0.37
+ "@types/react-dom": ^17.0.11
+ eslint: ^7.32.0
+ eslint-config-custom: "*"
+ react: ^18.2.0
+ tsconfig: "*"
+ typescript: ^4.5.2
+ languageName: unknown
+ linkType: soft
+
+"unbox-primitive@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "unbox-primitive@npm:1.0.2"
+ dependencies:
+ call-bind: ^1.0.2
+ has-bigints: ^1.0.2
+ has-symbols: ^1.0.3
+ which-boxed-primitive: ^1.0.2
+ checksum: b7a1cf5862b5e4b5deb091672ffa579aa274f648410009c81cca63fed3b62b610c4f3b773f912ce545bb4e31edc3138975b5bc777fc6e4817dca51affb6380e9
+ languageName: node
+ linkType: hard
+
+"update-browserslist-db@npm:^1.0.9":
+ version: 1.0.9
+ resolution: "update-browserslist-db@npm:1.0.9"
+ dependencies:
+ escalade: ^3.1.1
+ picocolors: ^1.0.0
+ peerDependencies:
+ browserslist: ">= 4.21.0"
+ bin:
+ browserslist-lint: cli.js
+ checksum: f625899b236f6a4d7f62b56be1b8da230c5563d1fef84d3ef148f2e1a3f11a5a4b3be4fd7e3703e51274c116194017775b10afb4de09eb2c0d09d36b90f1f578
+ languageName: node
+ linkType: hard
+
+"uri-js@npm:^4.2.2":
+ version: 4.4.1
+ resolution: "uri-js@npm:4.4.1"
+ dependencies:
+ punycode: ^2.1.0
+ checksum: 7167432de6817fe8e9e0c9684f1d2de2bb688c94388f7569f7dbdb1587c9f4ca2a77962f134ec90be0cc4d004c939ff0d05acc9f34a0db39a3c797dada262633
+ languageName: node
+ linkType: hard
+
+"use-sync-external-store@npm:1.2.0":
+ version: 1.2.0
+ resolution: "use-sync-external-store@npm:1.2.0"
+ peerDependencies:
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0
+ checksum: 5c639e0f8da3521d605f59ce5be9e094ca772bd44a4ce7322b055a6f58eeed8dda3c94cabd90c7a41fb6fa852210092008afe48f7038792fd47501f33299116a
+ languageName: node
+ linkType: hard
+
+"v8-compile-cache@npm:^2.0.3":
+ version: 2.3.0
+ resolution: "v8-compile-cache@npm:2.3.0"
+ checksum: adb0a271eaa2297f2f4c536acbfee872d0dd26ec2d76f66921aa7fc437319132773483344207bdbeee169225f4739016d8d2dbf0553913a52bb34da6d0334f8e
+ languageName: node
+ linkType: hard
+
+"web@workspace:apps/web":
+ version: 0.0.0-use.local
+ resolution: "web@workspace:apps/web"
+ dependencies:
+ "@babel/core": ^7.0.0
+ "@types/node": ^17.0.12
+ "@types/react": 18.0.17
+ eslint: 7.32.0
+ eslint-config-custom: "*"
+ next: 12.2.5
+ next-transpile-modules: 9.0.0
+ react: 18.2.0
+ react-dom: 18.2.0
+ tsconfig: "*"
+ typescript: ^4.5.3
+ ui: "*"
+ languageName: unknown
+ linkType: soft
+
+"which-boxed-primitive@npm:^1.0.2":
+ version: 1.0.2
+ resolution: "which-boxed-primitive@npm:1.0.2"
+ dependencies:
+ is-bigint: ^1.0.1
+ is-boolean-object: ^1.1.0
+ is-number-object: ^1.0.4
+ is-string: ^1.0.5
+ is-symbol: ^1.0.3
+ checksum: 53ce774c7379071729533922adcca47220228405e1895f26673bbd71bdf7fb09bee38c1d6399395927c6289476b5ae0629863427fd151491b71c4b6cb04f3a5e
+ languageName: node
+ linkType: hard
+
+"which@npm:^2.0.1":
+ version: 2.0.2
+ resolution: "which@npm:2.0.2"
+ dependencies:
+ isexe: ^2.0.0
+ bin:
+ node-which: ./bin/node-which
+ checksum: 1a5c563d3c1b52d5f893c8b61afe11abc3bab4afac492e8da5bde69d550de701cf9806235f20a47b5c8fa8a1d6a9135841de2596535e998027a54589000e66d1
+ languageName: node
+ linkType: hard
+
+"word-wrap@npm:^1.2.3":
+ version: 1.2.3
+ resolution: "word-wrap@npm:1.2.3"
+ checksum: 30b48f91fcf12106ed3186ae4fa86a6a1842416df425be7b60485de14bec665a54a68e4b5156647dec3a70f25e84d270ca8bc8cd23182ed095f5c7206a938c1f
+ languageName: node
+ linkType: hard
+
+"wrappy@npm:1":
+ version: 1.0.2
+ resolution: "wrappy@npm:1.0.2"
+ checksum: 159da4805f7e84a3d003d8841557196034155008f817172d4e986bd591f74aa82aa7db55929a54222309e01079a65a92a9e6414da5a6aa4b01ee44a511ac3ee5
+ languageName: node
+ linkType: hard
+
+"yallist@npm:^4.0.0":
+ version: 4.0.0
+ resolution: "yallist@npm:4.0.0"
+ checksum: 343617202af32df2a15a3be36a5a8c0c8545208f3d3dfbc6bb7c3e3b7e8c6f8e7485432e4f3b88da3031a6e20afa7c711eded32ddfb122896ac5d914e75848d5
+ languageName: node
+ linkType: hard
diff --git a/cli/internal/lockfile/testdata/minimal-berry.lock b/cli/internal/lockfile/testdata/minimal-berry.lock
new file mode 100644
index 0000000..3844ce3
--- /dev/null
+++ b/cli/internal/lockfile/testdata/minimal-berry.lock
@@ -0,0 +1,45 @@
+# This file is generated by running "yarn install" inside your project.
+# Manual changes might be lost - proceed with caution!
+
+__metadata:
+ version: 6
+ cacheKey: 8c8
+
+"a@workspace:packages/a":
+ version: 0.0.0-use.local
+ resolution: "a@workspace:packages/a"
+ dependencies:
+ c: "*"
+ lodash: ^4.17.0
+ peerDependencies:
+ lodash: ^3.0.0 || ^4.0.0
+ languageName: unknown
+ linkType: soft
+
+"b@workspace:packages/b":
+ version: 0.0.0-use.local
+ resolution: "b@workspace:packages/b"
+ dependencies:
+ c: "*"
+ lodash: ^3.0.0 || ^4.0.0
+ languageName: unknown
+ linkType: soft
+
+"c@*, c@workspace:packages/c":
+ version: 0.0.0-use.local
+ resolution: "c@workspace:packages/c"
+ languageName: unknown
+ linkType: soft
+
+"lodash@npm:^3.0.0 || ^4.0.0, lodash@npm:^4.17.0":
+ version: 4.17.21
+ resolution: "lodash@npm:4.17.21"
+ checksum: eb835a2e51d381e561e508ce932ea50a8e5a68f4ebdd771ea240d3048244a8d13658acbd502cd4829768c56f2e16bdd4340b9ea141297d472517b83868e677f7
+ languageName: node
+ linkType: hard
+
+"minimal-berry@workspace:.":
+ version: 0.0.0-use.local
+ resolution: "minimal-berry@workspace:."
+ languageName: unknown
+ linkType: soft
diff --git a/cli/internal/lockfile/testdata/npm-lock-workspace-variation.json b/cli/internal/lockfile/testdata/npm-lock-workspace-variation.json
new file mode 100644
index 0000000..4dcfc2d
--- /dev/null
+++ b/cli/internal/lockfile/testdata/npm-lock-workspace-variation.json
@@ -0,0 +1,186 @@
+{
+ "name": "npm-prune-workspace-variation",
+ "version": "0.0.0",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "npm-prune",
+ "version": "0.0.0",
+ "workspaces": { "packages": ["apps/*", "packages/*"] },
+ "devDependencies": {
+ "eslint-config-custom": "*",
+ "prettier": "latest",
+ "turbo": "latest"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "apps/docs": {
+ "version": "0.0.0",
+ "dependencies": {
+ "lodash": "^3.0.0",
+ "next": "12.3.0",
+ "react": "18.2.0",
+ "react-dom": "18.2.0",
+ "ui": "*"
+ },
+ "devDependencies": {
+ "@babel/core": "^7.0.0",
+ "@types/node": "^17.0.12",
+ "@types/react": "18.0.17",
+ "eslint": "7.32.0",
+ "eslint-config-custom": "*",
+ "next-transpile-modules": "9.0.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.3"
+ }
+ },
+ "apps/web": {
+ "version": "0.0.0",
+ "dependencies": {
+ "lodash": "^4.17.21",
+ "next": "12.3.0",
+ "react": "18.2.0",
+ "react-dom": "18.2.0",
+ "ui": "*"
+ },
+ "devDependencies": {
+ "@babel/core": "^7.0.0",
+ "@types/node": "^17.0.12",
+ "@types/react": "18.0.17",
+ "eslint": "7.32.0",
+ "eslint-config-custom": "*",
+ "next-transpile-modules": "9.0.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.3"
+ }
+ },
+ "apps/web/node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "engines": ["node >= 0.8.0"]
+ },
+ "node_modules/@ampproject/remapping": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.0.tgz",
+ "integrity": "sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.1.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz",
+ "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/highlight": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.19.3.tgz",
+ "integrity": "sha512-prBHMK4JYYK+wDjJF1q99KK4JLL+egWS4nmNqdlMUgCExMZ+iZW0hGhyC3VEbsPjvaN0TBhW//VIFwBrk8sEiw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.19.3.tgz",
+ "integrity": "sha512-WneDJxdsjEvyKtXKsaBGbDeiyOjR5vYq4HcShxnIbG0qixpoHjI3MqeZM9NDvsojNCEBItQE4juOo/bU6e72gQ==",
+ "dev": true,
+ "dependencies": {
+ "@ampproject/remapping": "^2.1.0",
+ "@babel/code-frame": "^7.18.6",
+ "@babel/generator": "^7.19.3",
+ "@babel/helper-compilation-targets": "^7.19.3",
+ "@babel/helper-module-transforms": "^7.19.0",
+ "@babel/helpers": "^7.19.0",
+ "@babel/parser": "^7.19.3",
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.3",
+ "@babel/types": "^7.19.3",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.1",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz",
+ "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.19.3",
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "jsesc": "^2.5.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/generator/node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz",
+ "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/set-array": "^1.0.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.19.3.tgz",
+ "integrity": "sha512-65ESqLGyGmLvgR0mst5AdW1FkNlj9rQsCKduzEoEPhBCDFGXvz2jW6bXFG6i0/MrV2s7hhXjjb2yAzcPuQlLwg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.19.3",
+ "@babel/helper-validator-option": "^7.18.6",
+ "browserslist": "^4.21.3",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-environment-visitor": {
+ "version": "7.18.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz",
+ "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ }
+ }
+}
diff --git a/cli/internal/lockfile/testdata/npm-lock.json b/cli/internal/lockfile/testdata/npm-lock.json
new file mode 100644
index 0000000..c5607f1
--- /dev/null
+++ b/cli/internal/lockfile/testdata/npm-lock.json
@@ -0,0 +1,6472 @@
+{
+ "name": "npm-prune",
+ "version": "0.0.0",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "npm-prune",
+ "version": "0.0.0",
+ "workspaces": ["apps/*", "packages/*"],
+ "devDependencies": {
+ "eslint-config-custom": "*",
+ "prettier": "latest",
+ "turbo": "latest"
+ },
+ "engines": {
+ "node": ">=14.0.0"
+ }
+ },
+ "apps/docs": {
+ "version": "0.0.0",
+ "dependencies": {
+ "lodash": "^3.0.0",
+ "next": "12.3.0",
+ "react": "18.2.0",
+ "react-dom": "18.2.0",
+ "ui": "*"
+ },
+ "devDependencies": {
+ "@babel/core": "^7.0.0",
+ "@types/node": "^17.0.12",
+ "@types/react": "18.0.17",
+ "eslint": "7.32.0",
+ "eslint-config-custom": "*",
+ "next-transpile-modules": "9.0.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.3"
+ }
+ },
+ "apps/web": {
+ "version": "0.0.0",
+ "dependencies": {
+ "lodash": "^4.17.21",
+ "next": "12.3.0",
+ "react": "18.2.0",
+ "react-dom": "18.2.0",
+ "ui": "*"
+ },
+ "devDependencies": {
+ "@babel/core": "^7.0.0",
+ "@types/node": "^17.0.12",
+ "@types/react": "18.0.17",
+ "eslint": "7.32.0",
+ "eslint-config-custom": "*",
+ "next-transpile-modules": "9.0.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.3"
+ }
+ },
+ "apps/web/node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "engines": ["node >= 0.8.0"]
+ },
+ "node_modules/@ampproject/remapping": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.0.tgz",
+ "integrity": "sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.1.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz",
+ "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/highlight": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.19.3.tgz",
+ "integrity": "sha512-prBHMK4JYYK+wDjJF1q99KK4JLL+egWS4nmNqdlMUgCExMZ+iZW0hGhyC3VEbsPjvaN0TBhW//VIFwBrk8sEiw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.19.3.tgz",
+ "integrity": "sha512-WneDJxdsjEvyKtXKsaBGbDeiyOjR5vYq4HcShxnIbG0qixpoHjI3MqeZM9NDvsojNCEBItQE4juOo/bU6e72gQ==",
+ "dev": true,
+ "dependencies": {
+ "@ampproject/remapping": "^2.1.0",
+ "@babel/code-frame": "^7.18.6",
+ "@babel/generator": "^7.19.3",
+ "@babel/helper-compilation-targets": "^7.19.3",
+ "@babel/helper-module-transforms": "^7.19.0",
+ "@babel/helpers": "^7.19.0",
+ "@babel/parser": "^7.19.3",
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.3",
+ "@babel/types": "^7.19.3",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.1",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz",
+ "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.19.3",
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "jsesc": "^2.5.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/generator/node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz",
+ "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/set-array": "^1.0.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.19.3.tgz",
+ "integrity": "sha512-65ESqLGyGmLvgR0mst5AdW1FkNlj9rQsCKduzEoEPhBCDFGXvz2jW6bXFG6i0/MrV2s7hhXjjb2yAzcPuQlLwg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.19.3",
+ "@babel/helper-validator-option": "^7.18.6",
+ "browserslist": "^4.21.3",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-environment-visitor": {
+ "version": "7.18.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz",
+ "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-function-name": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz",
+ "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.18.10",
+ "@babel/types": "^7.19.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-hoist-variables": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz",
+ "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz",
+ "integrity": "sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.19.0.tgz",
+ "integrity": "sha512-3HBZ377Fe14RbLIA+ac3sY4PTgpxHVkFrESaWhoI5PuyXPBBX8+C34qblV9G89ZtycGJCmCI/Ut+VUDK4bltNQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.18.9",
+ "@babel/helper-module-imports": "^7.18.6",
+ "@babel/helper-simple-access": "^7.18.6",
+ "@babel/helper-split-export-declaration": "^7.18.6",
+ "@babel/helper-validator-identifier": "^7.18.6",
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.0",
+ "@babel/types": "^7.19.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-simple-access": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz",
+ "integrity": "sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-split-export-declaration": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz",
+ "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.18.10",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz",
+ "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.19.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz",
+ "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz",
+ "integrity": "sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.19.0.tgz",
+ "integrity": "sha512-DRBCKGwIEdqY3+rPJgG/dKfQy9+08rHIAJx8q2p+HSWP87s2HCrQmaAMMyMll2kIXKCW0cO1RdQskx15Xakftg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.0",
+ "@babel/types": "^7.19.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/highlight": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz",
+ "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.18.6",
+ "chalk": "^2.0.0",
+ "js-tokens": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.3.tgz",
+ "integrity": "sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==",
+ "dev": true,
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/runtime": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.19.0.tgz",
+ "integrity": "sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==",
+ "dependencies": {
+ "regenerator-runtime": "^0.13.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/runtime-corejs3": {
+ "version": "7.19.1",
+ "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.19.1.tgz",
+ "integrity": "sha512-j2vJGnkopRzH+ykJ8h68wrHnEUmtK//E723jjixiAl/PPf6FhqY/vYRcMVlNydRKQjQsTsYEjpx+DZMIvnGk/g==",
+ "dependencies": {
+ "core-js-pure": "^3.25.1",
+ "regenerator-runtime": "^0.13.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.18.10",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz",
+ "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.18.6",
+ "@babel/parser": "^7.18.10",
+ "@babel/types": "^7.18.10"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.3.tgz",
+ "integrity": "sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.18.6",
+ "@babel/generator": "^7.19.3",
+ "@babel/helper-environment-visitor": "^7.18.9",
+ "@babel/helper-function-name": "^7.19.0",
+ "@babel/helper-hoist-variables": "^7.18.6",
+ "@babel/helper-split-export-declaration": "^7.18.6",
+ "@babel/parser": "^7.19.3",
+ "@babel/types": "^7.19.3",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.3.tgz",
+ "integrity": "sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.18.10",
+ "@babel/helper-validator-identifier": "^7.19.1",
+ "to-fast-properties": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@eslint/eslintrc": {
+ "version": "0.4.3",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz",
+ "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==",
+ "dependencies": {
+ "ajv": "^6.12.4",
+ "debug": "^4.1.1",
+ "espree": "^7.3.0",
+ "globals": "^13.9.0",
+ "ignore": "^4.0.6",
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^3.13.1",
+ "minimatch": "^3.0.4",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ }
+ },
+ "node_modules/@eslint/eslintrc/node_modules/globals": {
+ "version": "13.17.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.17.0.tgz",
+ "integrity": "sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==",
+ "dependencies": {
+ "type-fest": "^0.20.2"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@humanwhocodes/config-array": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz",
+ "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==",
+ "dependencies": {
+ "@humanwhocodes/object-schema": "^1.2.0",
+ "debug": "^4.1.1",
+ "minimatch": "^3.0.4"
+ },
+ "engines": {
+ "node": ">=10.10.0"
+ }
+ },
+ "node_modules/@humanwhocodes/object-schema": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz",
+ "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA=="
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz",
+ "integrity": "sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/set-array": "^1.0.0",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
+ "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/set-array": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
+ "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.4.14",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
+ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==",
+ "dev": true
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.15",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz",
+ "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.0.3",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ }
+ },
+ "node_modules/@next/env": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-12.3.0.tgz",
+ "integrity": "sha512-PTJpjAFVbzBQ9xXpzMTroShvD5YDIIy46jQ7d4LrWpY+/5a8H90Tm8hE3Hvkc5RBRspVo7kvEOnqQms0A+2Q6w=="
+ },
+ "node_modules/@next/eslint-plugin-next": {
+ "version": "12.3.1",
+ "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-12.3.1.tgz",
+ "integrity": "sha512-sw+lTf6r6P0j+g/n9y4qdWWI2syPqZx+uc0+B/fRENqfR3KpSid6MIKqc9gNwGhJASazEQ5b3w8h4cAET213jw==",
+ "dependencies": {
+ "glob": "7.1.7"
+ }
+ },
+ "node_modules/@next/swc-android-arm-eabi": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-android-arm-eabi/-/swc-android-arm-eabi-12.3.0.tgz",
+ "integrity": "sha512-/PuirPnAKsYBw93w/7Q9hqy+KGOU9mjYprZ/faxMUJh/dc6v3rYLxkZKNG9nFPIW4QKNTCnhP40xF9hLnxO+xg==",
+ "cpu": ["arm"],
+ "optional": true,
+ "os": ["android"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-android-arm64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-android-arm64/-/swc-android-arm64-12.3.0.tgz",
+ "integrity": "sha512-OaI+FhAM6P9B6Ybwbn0Zl8YwWido0lLwhDBi9WiYCh4RQmIXAyVIoIJPHo4fP05+mXaJ/k1trvDvuURvHOq2qw==",
+ "cpu": ["arm64"],
+ "optional": true,
+ "os": ["android"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-darwin-arm64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-12.3.0.tgz",
+ "integrity": "sha512-9s4d3Mhii+WFce8o8Jok7WC3Bawkr9wEUU++SJRptjU1L5tsfYJMrSYCACHLhZujziNDLyExe4Hwwsccps1sfg==",
+ "cpu": ["arm64"],
+ "optional": true,
+ "os": ["darwin"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-darwin-x64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-12.3.0.tgz",
+ "integrity": "sha512-2scC4MqUTwGwok+wpVxP+zWp7WcCAVOtutki2E1n99rBOTnUOX6qXkgxSy083yBN6GqwuC/dzHeN7hIKjavfRA==",
+ "cpu": ["x64"],
+ "optional": true,
+ "os": ["darwin"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-freebsd-x64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-freebsd-x64/-/swc-freebsd-x64-12.3.0.tgz",
+ "integrity": "sha512-xAlruUREij/bFa+qsE1tmsP28t7vz02N4ZDHt2lh3uJUniE0Ne9idyIDLc1Ed0IF2RjfgOp4ZVunuS3OM0sngw==",
+ "cpu": ["x64"],
+ "optional": true,
+ "os": ["freebsd"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm-gnueabihf": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm-gnueabihf/-/swc-linux-arm-gnueabihf-12.3.0.tgz",
+ "integrity": "sha512-jin2S4VT/cugc2dSZEUIabhYDJNgrUh7fufbdsaAezgcQzqfdfJqfxl4E9GuafzB4cbRPTaqA0V5uqbp0IyGkQ==",
+ "cpu": ["arm"],
+ "optional": true,
+ "os": ["linux"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm64-gnu": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-12.3.0.tgz",
+ "integrity": "sha512-RqJHDKe0WImeUrdR0kayTkRWgp4vD/MS7g0r6Xuf8+ellOFH7JAAJffDW3ayuVZeMYOa7RvgNFcOoWnrTUl9Nw==",
+ "cpu": ["arm64"],
+ "optional": true,
+ "os": ["linux"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-arm64-musl": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-12.3.0.tgz",
+ "integrity": "sha512-nvNWoUieMjvDjpYJ/4SQe9lQs2xMj6ZRs8N+bmTrVu9leY2Fg3WD6W9p/1uU9hGO8u+OdF13wc4iRShu/WYIHg==",
+ "cpu": ["arm64"],
+ "optional": true,
+ "os": ["linux"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-x64-gnu": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-12.3.0.tgz",
+ "integrity": "sha512-4ajhIuVU9PeQCMMhdDgZTLrHmjbOUFuIyg6J19hZqwEwDTSqQyrSLkbJs2Nd7IRiM6Ul/XyrtEFCpk4k+xD2+w==",
+ "cpu": ["x64"],
+ "optional": true,
+ "os": ["linux"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-linux-x64-musl": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-12.3.0.tgz",
+ "integrity": "sha512-U092RBYbaGxoMAwpauePJEu2PuZSEoUCGJBvsptQr2/2XIMwAJDYM4c/M5NfYEsBr+yjvsYNsOpYfeQ88D82Yg==",
+ "cpu": ["x64"],
+ "optional": true,
+ "os": ["linux"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-arm64-msvc": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-12.3.0.tgz",
+ "integrity": "sha512-pzSzaxjDEJe67bUok9Nxf9rykbJfHXW0owICFsPBsqHyc+cr8vpF7g9e2APTCddtVhvjkga9ILoZJ9NxWS7Yiw==",
+ "cpu": ["arm64"],
+ "optional": true,
+ "os": ["win32"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-ia32-msvc": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-12.3.0.tgz",
+ "integrity": "sha512-MQGUpMbYhQmTZ06a9e0hPQJnxFMwETo2WtyAotY3GEzbNCQVbCGhsvqEKcl+ZEHgShlHXUWvSffq1ZscY6gK7A==",
+ "cpu": ["ia32"],
+ "optional": true,
+ "os": ["win32"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@next/swc-win32-x64-msvc": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-12.3.0.tgz",
+ "integrity": "sha512-C/nw6OgQpEULWqs+wgMHXGvlJLguPRFFGqR2TAqWBerQ8J+Sg3z1ZTqwelkSi4FoqStGuZ2UdFHIDN1ySmR1xA==",
+ "cpu": ["x64"],
+ "optional": true,
+ "os": ["win32"],
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dependencies": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@rushstack/eslint-patch": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.2.0.tgz",
+ "integrity": "sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg=="
+ },
+ "node_modules/@swc/helpers": {
+ "version": "0.4.11",
+ "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.4.11.tgz",
+ "integrity": "sha512-rEUrBSGIoSFuYxwBYtlUFMlE2CwGhmW+w9355/5oduSw8e5h2+Tj4UrAGNNgP9915++wj5vkQo0UuOBqOAq4nw==",
+ "dependencies": {
+ "tslib": "^2.4.0"
+ }
+ },
+ "node_modules/@types/json5": {
+ "version": "0.0.29",
+ "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ=="
+ },
+ "node_modules/@types/node": {
+ "version": "17.0.45",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz",
+ "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==",
+ "dev": true
+ },
+ "node_modules/@types/prop-types": {
+ "version": "15.7.5",
+ "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz",
+ "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==",
+ "dev": true
+ },
+ "node_modules/@types/react": {
+ "version": "18.0.17",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.0.17.tgz",
+ "integrity": "sha512-38ETy4tL+rn4uQQi7mB81G7V1g0u2ryquNmsVIOKUAEIDK+3CUjZ6rSRpdvS99dNBnkLFL83qfmtLacGOTIhwQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ },
+ "node_modules/@types/react-dom": {
+ "version": "17.0.17",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.17.tgz",
+ "integrity": "sha512-VjnqEmqGnasQKV0CWLevqMTXBYG9GbwuE6x3VetERLh0cq2LTptFE73MrQi2S7GkKXCf2GgwItB/melLnxfnsg==",
+ "dev": true,
+ "dependencies": {
+ "@types/react": "^17"
+ }
+ },
+ "node_modules/@types/react-dom/node_modules/@types/react": {
+ "version": "17.0.50",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.50.tgz",
+ "integrity": "sha512-ZCBHzpDb5skMnc1zFXAXnL3l1FAdi+xZvwxK+PkglMmBrwjpp9nKaWuEvrGnSifCJmBFGxZOOFuwC6KH/s0NuA==",
+ "dev": true,
+ "dependencies": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ },
+ "node_modules/@types/scheduler": {
+ "version": "0.16.2",
+ "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz",
+ "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==",
+ "dev": true
+ },
+ "node_modules/@typescript-eslint/parser": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.39.0.tgz",
+ "integrity": "sha512-PhxLjrZnHShe431sBAGHaNe6BDdxAASDySgsBCGxcBecVCi8NQWxQZMcizNA4g0pN51bBAn/FUfkWG3SDVcGlA==",
+ "dependencies": {
+ "@typescript-eslint/scope-manager": "5.39.0",
+ "@typescript-eslint/types": "5.39.0",
+ "@typescript-eslint/typescript-estree": "5.39.0",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/scope-manager": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.39.0.tgz",
+ "integrity": "sha512-/I13vAqmG3dyqMVSZPjsbuNQlYS082Y7OMkwhCfLXYsmlI0ca4nkL7wJ/4gjX70LD4P8Hnw1JywUVVAwepURBw==",
+ "dependencies": {
+ "@typescript-eslint/types": "5.39.0",
+ "@typescript-eslint/visitor-keys": "5.39.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/types": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.39.0.tgz",
+ "integrity": "sha512-gQMZrnfEBFXK38hYqt8Lkwt8f4U6yq+2H5VDSgP/qiTzC8Nw8JO3OuSUOQ2qW37S/dlwdkHDntkZM6SQhKyPhw==",
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.39.0.tgz",
+ "integrity": "sha512-qLFQP0f398sdnogJoLtd43pUgB18Q50QSA+BTE5h3sUxySzbWDpTSdgt4UyxNSozY/oDK2ta6HVAzvGgq8JYnA==",
+ "dependencies": {
+ "@typescript-eslint/types": "5.39.0",
+ "@typescript-eslint/visitor-keys": "5.39.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": {
+ "version": "7.3.7",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
+ "integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@typescript-eslint/visitor-keys": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.39.0.tgz",
+ "integrity": "sha512-yyE3RPwOG+XJBLrhvsxAidUgybJVQ/hG8BhiJo0k8JSAYfk/CshVcxf0HwP4Jt7WZZ6vLmxdo1p6EyN3tzFTkg==",
+ "dependencies": {
+ "@typescript-eslint/types": "5.39.0",
+ "eslint-visitor-keys": "^3.3.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz",
+ "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==",
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "7.4.1",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
+ "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ansi-colors": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
+ "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/aria-query": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-4.2.2.tgz",
+ "integrity": "sha512-o/HelwhuKpTj/frsOsbNLNgnNGVIFsVP/SW2BSF14gVl7kAfMOJ6/8wUAUvG1R1NHKrfG+2sHZTu0yauT1qBrA==",
+ "dependencies": {
+ "@babel/runtime": "^7.10.2",
+ "@babel/runtime-corejs3": "^7.10.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/array-includes": {
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.5.tgz",
+ "integrity": "sha512-iSDYZMMyTPkiFasVqfuAQnWAYcvO/SeBSCGKePoEthjp4LEMTe4uLc7b025o4jAZpHhihh8xPo99TNWUWWkGDQ==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5",
+ "get-intrinsic": "^1.1.1",
+ "is-string": "^1.0.7"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/array.prototype.flat": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.0.tgz",
+ "integrity": "sha512-12IUEkHsAhA4DY5s0FPgNXIdc8VRSqD9Zp78a5au9abH/SOBrsp082JOWFNTjkMozh8mqcdiKuaLGhPeYztxSw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.2",
+ "es-shim-unscopables": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array.prototype.flatmap": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.0.tgz",
+ "integrity": "sha512-PZC9/8TKAIxcWKdyeb77EzULHPrIX/tIZebLJUQOMR1OwYosT8yggdfWScfTBCDj5utONvOuPQQumYsU2ULbkg==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.2",
+ "es-shim-unscopables": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/ast-types-flow": {
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz",
+ "integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag=="
+ },
+ "node_modules/astral-regex": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz",
+ "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/axe-core": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.4.3.tgz",
+ "integrity": "sha512-32+ub6kkdhhWick/UjvEwRchgoetXqTK14INLqbGm5U2TzBkBNF3nQtLYm8ovxSkQWArjEQvftCKryjZaATu3w==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/axobject-query": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-2.2.0.tgz",
+ "integrity": "sha512-Td525n+iPOOyUQIeBfcASuG6uJsDOITl7Mds5gFyerkWiX7qhUTdYUBlSgNMyVqtSJqwpt1kXGLdUt6SykLMRA=="
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "dependencies": {
+ "fill-range": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.21.4",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.4.tgz",
+ "integrity": "sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ }
+ ],
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001400",
+ "electron-to-chromium": "^1.4.251",
+ "node-releases": "^2.0.6",
+ "update-browserslist-db": "^1.0.9"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/call-bind": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
+ "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
+ "dependencies": {
+ "function-bind": "^1.1.1",
+ "get-intrinsic": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001414",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001414.tgz",
+ "integrity": "sha512-t55jfSaWjCdocnFdKQoO+d2ct9C59UZg4dY3OnUlSZ447r8pUtIKdp0hpAzrGFultmTC+Us+KpKi4GZl/LXlFg==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ }
+ ]
+ },
+ "node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
+ },
+ "node_modules/convert-source-map": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz",
+ "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==",
+ "dev": true,
+ "dependencies": {
+ "safe-buffer": "~5.1.1"
+ }
+ },
+ "node_modules/core-js-pure": {
+ "version": "3.25.5",
+ "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.25.5.tgz",
+ "integrity": "sha512-oml3M22pHM+igfWHDfdLVq2ShWmjM2V4L+dQEBs0DWVIqEm9WHCwGAlZ6BmyBQGy5sFrJmcx+856D9lVKyGWYg==",
+ "hasInstallScript": true,
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.1.tgz",
+ "integrity": "sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==",
+ "dev": true
+ },
+ "node_modules/damerau-levenshtein": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz",
+ "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA=="
+ },
+ "node_modules/debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/deep-is": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="
+ },
+ "node_modules/define-properties": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.4.tgz",
+ "integrity": "sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==",
+ "dependencies": {
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "dependencies": {
+ "path-type": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/docs": {
+ "resolved": "apps/docs",
+ "link": true
+ },
+ "node_modules/doctrine": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+ "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.4.270",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.270.tgz",
+ "integrity": "sha512-KNhIzgLiJmDDC444dj9vEOpZEgsV96ult9Iff98Vanumn+ShJHd5se8aX6KeVxdc0YQeqdrezBZv89rleDbvSg==",
+ "dev": true
+ },
+ "node_modules/emoji-regex": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
+ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
+ },
+ "node_modules/enhanced-resolve": {
+ "version": "5.10.0",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.10.0.tgz",
+ "integrity": "sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==",
+ "dev": true,
+ "dependencies": {
+ "graceful-fs": "^4.2.4",
+ "tapable": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/enquirer": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz",
+ "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==",
+ "dependencies": {
+ "ansi-colors": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/es-abstract": {
+ "version": "1.20.3",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.20.3.tgz",
+ "integrity": "sha512-AyrnaKVpMzljIdwjzrj+LxGmj8ik2LckwXacHqrJJ/jxz6dDDBcZ7I7nlHM0FvEW8MfbWJwOd+yT2XzYW49Frw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "es-to-primitive": "^1.2.1",
+ "function-bind": "^1.1.1",
+ "function.prototype.name": "^1.1.5",
+ "get-intrinsic": "^1.1.3",
+ "get-symbol-description": "^1.0.0",
+ "has": "^1.0.3",
+ "has-property-descriptors": "^1.0.0",
+ "has-symbols": "^1.0.3",
+ "internal-slot": "^1.0.3",
+ "is-callable": "^1.2.6",
+ "is-negative-zero": "^2.0.2",
+ "is-regex": "^1.1.4",
+ "is-shared-array-buffer": "^1.0.2",
+ "is-string": "^1.0.7",
+ "is-weakref": "^1.0.2",
+ "object-inspect": "^1.12.2",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.4",
+ "regexp.prototype.flags": "^1.4.3",
+ "safe-regex-test": "^1.0.0",
+ "string.prototype.trimend": "^1.0.5",
+ "string.prototype.trimstart": "^1.0.5",
+ "unbox-primitive": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es-shim-unscopables": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz",
+ "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==",
+ "dependencies": {
+ "has": "^1.0.3"
+ }
+ },
+ "node_modules/es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "dependencies": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/eslint": {
+ "version": "7.32.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz",
+ "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==",
+ "dependencies": {
+ "@babel/code-frame": "7.12.11",
+ "@eslint/eslintrc": "^0.4.3",
+ "@humanwhocodes/config-array": "^0.5.0",
+ "ajv": "^6.10.0",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.0.1",
+ "doctrine": "^3.0.0",
+ "enquirer": "^2.3.5",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^5.1.1",
+ "eslint-utils": "^2.1.0",
+ "eslint-visitor-keys": "^2.0.0",
+ "espree": "^7.3.1",
+ "esquery": "^1.4.0",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^6.0.1",
+ "functional-red-black-tree": "^1.0.1",
+ "glob-parent": "^5.1.2",
+ "globals": "^13.6.0",
+ "ignore": "^4.0.6",
+ "import-fresh": "^3.0.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "js-yaml": "^3.13.1",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.4.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.0.4",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.1",
+ "progress": "^2.0.0",
+ "regexpp": "^3.1.0",
+ "semver": "^7.2.1",
+ "strip-ansi": "^6.0.0",
+ "strip-json-comments": "^3.1.0",
+ "table": "^6.0.9",
+ "text-table": "^0.2.0",
+ "v8-compile-cache": "^2.0.3"
+ },
+ "bin": {
+ "eslint": "bin/eslint.js"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint-config-custom": {
+ "resolved": "packages/eslint-config-custom",
+ "link": true
+ },
+ "node_modules/eslint-config-next": {
+ "version": "12.3.1",
+ "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-12.3.1.tgz",
+ "integrity": "sha512-EN/xwKPU6jz1G0Qi6Bd/BqMnHLyRAL0VsaQaWA7F3KkjAgZHi4f1uL1JKGWNxdQpHTW/sdGONBd0bzxUka/DJg==",
+ "dependencies": {
+ "@next/eslint-plugin-next": "12.3.1",
+ "@rushstack/eslint-patch": "^1.1.3",
+ "@typescript-eslint/parser": "^5.21.0",
+ "eslint-import-resolver-node": "^0.3.6",
+ "eslint-import-resolver-typescript": "^2.7.1",
+ "eslint-plugin-import": "^2.26.0",
+ "eslint-plugin-jsx-a11y": "^6.5.1",
+ "eslint-plugin-react": "^7.31.7",
+ "eslint-plugin-react-hooks": "^4.5.0"
+ },
+ "peerDependencies": {
+ "eslint": "^7.23.0 || ^8.0.0",
+ "typescript": ">=3.3.1"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-config-prettier": {
+ "version": "8.5.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.5.0.tgz",
+ "integrity": "sha512-obmWKLUNCnhtQRKc+tmnYuQl0pFU1ibYJQ5BGhTVB08bHe9wC8qUeG7c08dj9XX+AuPj1YSGSQIHl1pnDHZR0Q==",
+ "bin": {
+ "eslint-config-prettier": "bin/cli.js"
+ },
+ "peerDependencies": {
+ "eslint": ">=7.0.0"
+ }
+ },
+ "node_modules/eslint-config-turbo": {
+ "version": "0.0.4",
+ "resolved": "https://registry.npmjs.org/eslint-config-turbo/-/eslint-config-turbo-0.0.4.tgz",
+ "integrity": "sha512-HErPS/wfWkSdV9Yd2dDkhZt3W2B78Ih/aWPFfaHmCMjzPalh+5KxRRGTf8MOBQLCebcWJX0lP1Zvc1rZIHlXGg==",
+ "dependencies": {
+ "eslint-plugin-turbo": "0.0.4"
+ },
+ "peerDependencies": {
+ "eslint": "^7.23.0 || ^8.0.0"
+ }
+ },
+ "node_modules/eslint-import-resolver-node": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz",
+ "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==",
+ "dependencies": {
+ "debug": "^3.2.7",
+ "resolve": "^1.20.0"
+ }
+ },
+ "node_modules/eslint-import-resolver-node/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/eslint-import-resolver-typescript": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-2.7.1.tgz",
+ "integrity": "sha512-00UbgGwV8bSgUv34igBDbTOtKhqoRMy9bFjNehT40bXg6585PNIct8HhXZ0SybqB9rWtXj9crcku8ndDn/gIqQ==",
+ "dependencies": {
+ "debug": "^4.3.4",
+ "glob": "^7.2.0",
+ "is-glob": "^4.0.3",
+ "resolve": "^1.22.0",
+ "tsconfig-paths": "^3.14.1"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "eslint": "*",
+ "eslint-plugin-import": "*"
+ }
+ },
+ "node_modules/eslint-import-resolver-typescript/node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/eslint-module-utils": {
+ "version": "2.7.4",
+ "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz",
+ "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==",
+ "dependencies": {
+ "debug": "^3.2.7"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependenciesMeta": {
+ "eslint": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-module-utils/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/eslint-plugin-import": {
+ "version": "2.26.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.26.0.tgz",
+ "integrity": "sha512-hYfi3FXaM8WPLf4S1cikh/r4IxnO6zrhZbEGz2b660EJRbuxgpDS5gkCuYgGWg2xxh2rBuIr4Pvhve/7c31koA==",
+ "dependencies": {
+ "array-includes": "^3.1.4",
+ "array.prototype.flat": "^1.2.5",
+ "debug": "^2.6.9",
+ "doctrine": "^2.1.0",
+ "eslint-import-resolver-node": "^0.3.6",
+ "eslint-module-utils": "^2.7.3",
+ "has": "^1.0.3",
+ "is-core-module": "^2.8.1",
+ "is-glob": "^4.0.3",
+ "minimatch": "^3.1.2",
+ "object.values": "^1.1.5",
+ "resolve": "^1.22.0",
+ "tsconfig-paths": "^3.14.1"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8"
+ }
+ },
+ "node_modules/eslint-plugin-import/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-import/node_modules/doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eslint-plugin-import/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/eslint-plugin-jsx-a11y": {
+ "version": "6.6.1",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.6.1.tgz",
+ "integrity": "sha512-sXgFVNHiWffBq23uiS/JaP6eVR622DqwB4yTzKvGZGcPq6/yZ3WmOZfuBks/vHWo9GaFOqC2ZK4i6+C35knx7Q==",
+ "dependencies": {
+ "@babel/runtime": "^7.18.9",
+ "aria-query": "^4.2.2",
+ "array-includes": "^3.1.5",
+ "ast-types-flow": "^0.0.7",
+ "axe-core": "^4.4.3",
+ "axobject-query": "^2.2.0",
+ "damerau-levenshtein": "^1.0.8",
+ "emoji-regex": "^9.2.2",
+ "has": "^1.0.3",
+ "jsx-ast-utils": "^3.3.2",
+ "language-tags": "^1.0.5",
+ "minimatch": "^3.1.2",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependencies": {
+ "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8"
+ }
+ },
+ "node_modules/eslint-plugin-react": {
+ "version": "7.31.8",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.31.8.tgz",
+ "integrity": "sha512-5lBTZmgQmARLLSYiwI71tiGVTLUuqXantZM6vlSY39OaDSV0M7+32K5DnLkmFrwTe+Ksz0ffuLUC91RUviVZfw==",
+ "dependencies": {
+ "array-includes": "^3.1.5",
+ "array.prototype.flatmap": "^1.3.0",
+ "doctrine": "^2.1.0",
+ "estraverse": "^5.3.0",
+ "jsx-ast-utils": "^2.4.1 || ^3.0.0",
+ "minimatch": "^3.1.2",
+ "object.entries": "^1.1.5",
+ "object.fromentries": "^2.0.5",
+ "object.hasown": "^1.1.1",
+ "object.values": "^1.1.5",
+ "prop-types": "^15.8.1",
+ "resolve": "^2.0.0-next.3",
+ "semver": "^6.3.0",
+ "string.prototype.matchall": "^4.0.7"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8"
+ }
+ },
+ "node_modules/eslint-plugin-react-hooks": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz",
+ "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==",
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0"
+ }
+ },
+ "node_modules/eslint-plugin-react/node_modules/doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eslint-plugin-react/node_modules/resolve": {
+ "version": "2.0.0-next.4",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz",
+ "integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==",
+ "dependencies": {
+ "is-core-module": "^2.9.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/eslint-plugin-turbo": {
+ "version": "0.0.4",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-turbo/-/eslint-plugin-turbo-0.0.4.tgz",
+ "integrity": "sha512-dfmYE/iPvoJInQq+5E/0mj140y/rYwKtzZkn3uVK8+nvwC5zmWKQ6ehMWrL4bYBkGzSgpOndZM+jOXhPQ2m8Cg==",
+ "peerDependencies": {
+ "eslint": "^7.23.0 || ^8.0.0"
+ }
+ },
+ "node_modules/eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/eslint-scope/node_modules/estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/eslint-utils": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz",
+ "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==",
+ "dependencies": {
+ "eslint-visitor-keys": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mysticatea"
+ }
+ },
+ "node_modules/eslint-utils/node_modules/eslint-visitor-keys": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
+ "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/eslint-visitor-keys": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz",
+ "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/eslint/node_modules/@babel/code-frame": {
+ "version": "7.12.11",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.11.tgz",
+ "integrity": "sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==",
+ "dependencies": {
+ "@babel/highlight": "^7.10.4"
+ }
+ },
+ "node_modules/eslint/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/eslint/node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/eslint/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/eslint/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/eslint/node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint/node_modules/globals": {
+ "version": "13.17.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.17.0.tgz",
+ "integrity": "sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==",
+ "dependencies": {
+ "type-fest": "^0.20.2"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/eslint/node_modules/semver": {
+ "version": "7.3.7",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
+ "integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/eslint/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/espree": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz",
+ "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==",
+ "dependencies": {
+ "acorn": "^7.4.0",
+ "acorn-jsx": "^5.3.1",
+ "eslint-visitor-keys": "^1.3.0"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ }
+ },
+ "node_modules/espree/node_modules/eslint-visitor-keys": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
+ "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/esquery": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz",
+ "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==",
+ "dependencies": {
+ "estraverse": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dependencies": {
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "node_modules/fast-glob": {
+ "version": "3.2.12",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
+ "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "node_modules/fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="
+ },
+ "node_modules/fastq": {
+ "version": "1.13.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz",
+ "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==",
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/file-entry-cache": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
+ "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
+ "dependencies": {
+ "flat-cache": "^3.0.4"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/flat-cache": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
+ "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
+ "dependencies": {
+ "flatted": "^3.1.0",
+ "rimraf": "^3.0.2"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ }
+ },
+ "node_modules/flatted": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz",
+ "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ=="
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
+ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
+ },
+ "node_modules/function.prototype.name": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz",
+ "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.0",
+ "functions-have-names": "^1.2.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/functional-red-black-tree": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz",
+ "integrity": "sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g=="
+ },
+ "node_modules/functions-have-names": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
+ "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz",
+ "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==",
+ "dependencies": {
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-symbol-description": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz",
+ "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/glob": {
+ "version": "7.1.7",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz",
+ "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "dependencies": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/globby/node_modules/ignore": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz",
+ "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.10",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
+ "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==",
+ "dev": true
+ },
+ "node_modules/has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "dependencies": {
+ "function-bind": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/has-bigints": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
+ "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/has-property-descriptors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz",
+ "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==",
+ "dependencies": {
+ "get-intrinsic": "^1.1.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
+ "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "dependencies": {
+ "has-symbols": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/ignore": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
+ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "dependencies": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "node_modules/internal-slot": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz",
+ "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==",
+ "dependencies": {
+ "get-intrinsic": "^1.1.0",
+ "has": "^1.0.3",
+ "side-channel": "^1.0.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/is-bigint": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
+ "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
+ "dependencies": {
+ "has-bigints": "^1.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-boolean-object": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
+ "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-core-module": {
+ "version": "2.10.0",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.10.0.tgz",
+ "integrity": "sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==",
+ "dependencies": {
+ "has": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-date-object": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
+ "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-negative-zero": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz",
+ "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-number-object": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
+ "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-regex": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
+ "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-shared-array-buffer": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz",
+ "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==",
+ "dependencies": {
+ "call-bind": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-string": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
+ "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-symbol": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
+ "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
+ "dependencies": {
+ "has-symbols": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-weakref": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
+ "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
+ "dependencies": {
+ "call-bind": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true,
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "node_modules/json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="
+ },
+ "node_modules/json5": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz",
+ "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==",
+ "dev": true,
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/jsx-ast-utils": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz",
+ "integrity": "sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==",
+ "dependencies": {
+ "array-includes": "^3.1.5",
+ "object.assign": "^4.1.3"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/language-subtag-registry": {
+ "version": "0.3.22",
+ "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz",
+ "integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w=="
+ },
+ "node_modules/language-tags": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz",
+ "integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==",
+ "dependencies": {
+ "language-subtag-registry": "~0.3.2"
+ }
+ },
+ "node_modules/levn": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+ "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+ "dependencies": {
+ "prelude-ls": "^1.2.1",
+ "type-check": "~0.4.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/lodash": {
+ "version": "3.10.1",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-3.10.1.tgz",
+ "integrity": "sha512-9mDDwqVIma6OZX79ZlDACZl8sBm0TEnkf99zV3iMA4GzkIT/9hiqP5mY0HoT1iNLCrKc/R1HByV+yJfRWVJryQ=="
+ },
+ "node_modules/lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="
+ },
+ "node_modules/lodash.truncate": {
+ "version": "4.4.2",
+ "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz",
+ "integrity": "sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw=="
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
+ "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "dependencies": {
+ "braces": "^3.0.2",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.6",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
+ "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
+ },
+ "node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.4",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz",
+ "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="
+ },
+ "node_modules/next": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/next/-/next-12.3.0.tgz",
+ "integrity": "sha512-GpzI6me9V1+XYtfK0Ae9WD0mKqHyzQlGq1xH1rzNIYMASo4Tkl4rTe9jSqtBpXFhOS33KohXs9ZY38Akkhdciw==",
+ "dependencies": {
+ "@next/env": "12.3.0",
+ "@swc/helpers": "0.4.11",
+ "caniuse-lite": "^1.0.30001332",
+ "postcss": "8.4.14",
+ "styled-jsx": "5.0.6",
+ "use-sync-external-store": "1.2.0"
+ },
+ "bin": {
+ "next": "dist/bin/next"
+ },
+ "engines": {
+ "node": ">=12.22.0"
+ },
+ "optionalDependencies": {
+ "@next/swc-android-arm-eabi": "12.3.0",
+ "@next/swc-android-arm64": "12.3.0",
+ "@next/swc-darwin-arm64": "12.3.0",
+ "@next/swc-darwin-x64": "12.3.0",
+ "@next/swc-freebsd-x64": "12.3.0",
+ "@next/swc-linux-arm-gnueabihf": "12.3.0",
+ "@next/swc-linux-arm64-gnu": "12.3.0",
+ "@next/swc-linux-arm64-musl": "12.3.0",
+ "@next/swc-linux-x64-gnu": "12.3.0",
+ "@next/swc-linux-x64-musl": "12.3.0",
+ "@next/swc-win32-arm64-msvc": "12.3.0",
+ "@next/swc-win32-ia32-msvc": "12.3.0",
+ "@next/swc-win32-x64-msvc": "12.3.0"
+ },
+ "peerDependencies": {
+ "fibers": ">= 3.1.0",
+ "node-sass": "^6.0.0 || ^7.0.0",
+ "react": "^17.0.2 || ^18.0.0-0",
+ "react-dom": "^17.0.2 || ^18.0.0-0",
+ "sass": "^1.3.0"
+ },
+ "peerDependenciesMeta": {
+ "fibers": {
+ "optional": true
+ },
+ "node-sass": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/next-transpile-modules": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/next-transpile-modules/-/next-transpile-modules-9.0.0.tgz",
+ "integrity": "sha512-VCNFOazIAnXn1hvgYYSTYMnoWgKgwlYh4lm1pKbSfiB3kj5ZYLcKVhfh3jkPOg1cnd9DP+pte9yCUocdPEUBTQ==",
+ "dev": true,
+ "dependencies": {
+ "enhanced-resolve": "^5.7.0",
+ "escalade": "^3.1.1"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.6.tgz",
+ "integrity": "sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==",
+ "dev": true
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.12.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz",
+ "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object.assign": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
+ "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.entries": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.5.tgz",
+ "integrity": "sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object.fromentries": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.5.tgz",
+ "integrity": "sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.hasown": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.1.tgz",
+ "integrity": "sha512-LYLe4tivNQzq4JdaWW6WO3HMZZJWzkkH8fnI6EebWl0VZth2wL2Lovm74ep2/gZzlaTdV62JZHEqHQ2yVn8Q/A==",
+ "dependencies": {
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.values": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz",
+ "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/optionator": {
+ "version": "0.9.1",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz",
+ "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==",
+ "dependencies": {
+ "deep-is": "^0.1.3",
+ "fast-levenshtein": "^2.0.6",
+ "levn": "^0.4.1",
+ "prelude-ls": "^1.2.1",
+ "type-check": "^0.4.0",
+ "word-wrap": "^1.2.3"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dependencies": {
+ "callsites": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
+ },
+ "node_modules/path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ=="
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "8.4.14",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz",
+ "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.4",
+ "picocolors": "^1.0.0",
+ "source-map-js": "^1.0.2"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/prelude-ls": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+ "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/prettier": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.7.1.tgz",
+ "integrity": "sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==",
+ "dev": true,
+ "bin": {
+ "prettier": "bin-prettier.js"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ },
+ "funding": {
+ "url": "https://github.com/prettier/prettier?sponsor=1"
+ }
+ },
+ "node_modules/progress": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
+ "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "dependencies": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
+ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/react": {
+ "version": "18.2.0",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
+ "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "18.2.0",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
+ "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.0"
+ },
+ "peerDependencies": {
+ "react": "^18.2.0"
+ }
+ },
+ "node_modules/react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
+ },
+ "node_modules/regenerator-runtime": {
+ "version": "0.13.9",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz",
+ "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA=="
+ },
+ "node_modules/regexp.prototype.flags": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz",
+ "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "functions-have-names": "^1.2.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/regexpp": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz",
+ "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mysticatea"
+ }
+ },
+ "node_modules/require-from-string": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
+ "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/resolve": {
+ "version": "1.22.1",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz",
+ "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==",
+ "dependencies": {
+ "is-core-module": "^2.9.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+ "dev": true
+ },
+ "node_modules/safe-regex-test": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz",
+ "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.3",
+ "is-regex": "^1.1.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/scheduler": {
+ "version": "0.23.0",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
+ "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
+ "dependencies": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/side-channel": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
+ "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
+ "dependencies": {
+ "call-bind": "^1.0.0",
+ "get-intrinsic": "^1.0.2",
+ "object-inspect": "^1.9.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/slice-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz",
+ "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "astral-regex": "^2.0.0",
+ "is-fullwidth-code-point": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/slice-ansi?sponsor=1"
+ }
+ },
+ "node_modules/slice-ansi/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/slice-ansi/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/slice-ansi/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/source-map-js": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
+ "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string-width/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/string.prototype.matchall": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.7.tgz",
+ "integrity": "sha512-f48okCX7JiwVi1NXCVWcFnZgADDC/n2vePlQ/KUCNqCikLLilQvwjMO8+BHVKvgzH0JB0J9LEPgxOGT02RoETg==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1",
+ "get-intrinsic": "^1.1.1",
+ "has-symbols": "^1.0.3",
+ "internal-slot": "^1.0.3",
+ "regexp.prototype.flags": "^1.4.1",
+ "side-channel": "^1.0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimend": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.5.tgz",
+ "integrity": "sha512-I7RGvmjV4pJ7O3kdf+LXFpVfdNOxtCW/2C8f6jNiW4+PQchwxkCDzlk1/7p+Wl4bqFIZeF47qAHXLuHHWKAxog==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimstart": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.5.tgz",
+ "integrity": "sha512-THx16TJCGlsN0o6dl2o6ncWUsdgnLRSA23rRE5pyGBw/mLr3Ej/R2LaqCtgP8VNMGZsvMWnf9ooZPyY2bHvUFg==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/styled-jsx": {
+ "version": "5.0.6",
+ "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.0.6.tgz",
+ "integrity": "sha512-xOeROtkK5MGMDimBQ3J6iPId8q0t/BDoG5XN6oKkZClVz9ISF/hihN8OCn2LggMU6N32aXnrXBdn3auSqNS9fA==",
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "peerDependencies": {
+ "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0"
+ },
+ "peerDependenciesMeta": {
+ "@babel/core": {
+ "optional": true
+ },
+ "babel-plugin-macros": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/table": {
+ "version": "6.8.0",
+ "resolved": "https://registry.npmjs.org/table/-/table-6.8.0.tgz",
+ "integrity": "sha512-s/fitrbVeEyHKFa7mFdkuQMWlH1Wgw/yEXMt5xACT4ZpzWFluehAxRtUUQKPuWhaLAWhFcVx6w3oC8VKaUfPGA==",
+ "dependencies": {
+ "ajv": "^8.0.1",
+ "lodash.truncate": "^4.4.2",
+ "slice-ansi": "^4.0.0",
+ "string-width": "^4.2.3",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/table/node_modules/ajv": {
+ "version": "8.11.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz",
+ "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/table/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/tapable": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
+ "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="
+ },
+ "node_modules/to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/tsconfig": {
+ "resolved": "packages/tsconfig",
+ "link": true
+ },
+ "node_modules/tsconfig-paths": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz",
+ "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==",
+ "dependencies": {
+ "@types/json5": "^0.0.29",
+ "json5": "^1.0.1",
+ "minimist": "^1.2.6",
+ "strip-bom": "^3.0.0"
+ }
+ },
+ "node_modules/tsconfig-paths/node_modules/json5": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+ "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+ "dependencies": {
+ "minimist": "^1.2.0"
+ },
+ "bin": {
+ "json5": "lib/cli.js"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz",
+ "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
+ },
+ "node_modules/tsutils": {
+ "version": "3.21.0",
+ "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
+ "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
+ "dependencies": {
+ "tslib": "^1.8.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ },
+ "peerDependencies": {
+ "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta"
+ }
+ },
+ "node_modules/tsutils/node_modules/tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
+ },
+ "node_modules/turbo": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo/-/turbo-1.5.5.tgz",
+ "integrity": "sha512-PVQSDl0STC9WXIyHcYUWs9gXsf8JjQig/FuHfuB8N6+XlgCGB3mPbfMEE6zrChGz2hufH4/guKRX1XJuNL6XTA==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "turbo": "bin/turbo"
+ },
+ "optionalDependencies": {
+ "turbo-darwin-64": "1.5.5",
+ "turbo-darwin-arm64": "1.5.5",
+ "turbo-linux-64": "1.5.5",
+ "turbo-linux-arm64": "1.5.5",
+ "turbo-windows-64": "1.5.5",
+ "turbo-windows-arm64": "1.5.5"
+ }
+ },
+ "node_modules/turbo-darwin-64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-darwin-64/-/turbo-darwin-64-1.5.5.tgz",
+ "integrity": "sha512-HvEn6P2B+NXDekq9LRpRgUjcT9/oygLTcK47U0qsAJZXRBSq/2hvD7lx4nAwgY/4W3rhYJeWtHTzbhoN6BXqGQ==",
+ "cpu": ["x64"],
+ "dev": true,
+ "optional": true,
+ "os": ["darwin"]
+ },
+ "node_modules/turbo-darwin-arm64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-darwin-arm64/-/turbo-darwin-arm64-1.5.5.tgz",
+ "integrity": "sha512-Dmxr09IUy6M0nc7/xWod9galIO2DD500B75sJSkHeT+CCdJOWnlinux0ZPF8CSygNqymwYO8AO2l15/6yxcycg==",
+ "cpu": ["arm64"],
+ "dev": true,
+ "optional": true,
+ "os": ["darwin"]
+ },
+ "node_modules/turbo-linux-64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-linux-64/-/turbo-linux-64-1.5.5.tgz",
+ "integrity": "sha512-wd07TZ4zXXWjzZE00FcFMLmkybQQK/NV9ff66vvAV0vdiuacSMBCNLrD6Mm4ncfrUPW/rwFW5kU/7hyuEqqtDw==",
+ "cpu": ["x64"],
+ "dev": true,
+ "optional": true,
+ "os": ["linux"]
+ },
+ "node_modules/turbo-linux-arm64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-linux-arm64/-/turbo-linux-arm64-1.5.5.tgz",
+ "integrity": "sha512-q3q33tuo74R7gicnfvFbnZZvqmlq7Vakcvx0eshifnJw4PR+oMnTCb4w8ElVFx070zsb8DVTibq99y8NJH8T1Q==",
+ "cpu": ["arm64"],
+ "dev": true,
+ "optional": true,
+ "os": ["linux"]
+ },
+ "node_modules/turbo-windows-64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-windows-64/-/turbo-windows-64-1.5.5.tgz",
+ "integrity": "sha512-lPp9kHonNFfqgovbaW+UAPO5cLmoAN+m3G3FzqcrRPnlzt97vXYsDhDd/4Zy3oAKoAcprtP4CGy0ddisqsKTVw==",
+ "cpu": ["x64"],
+ "dev": true,
+ "optional": true,
+ "os": ["win32"]
+ },
+ "node_modules/turbo-windows-arm64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-windows-arm64/-/turbo-windows-arm64-1.5.5.tgz",
+ "integrity": "sha512-3AfGULKNZiZVrEzsIE+W79ZRW1+f5r4nM4wLlJ1PTBHyRxBZdD6KTH1tijGfy/uTlcV5acYnKHEkDc6Q9PAXGQ==",
+ "cpu": ["arm64"],
+ "dev": true,
+ "optional": true,
+ "os": ["win32"]
+ },
+ "node_modules/type-check": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+ "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+ "dependencies": {
+ "prelude-ls": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "4.8.4",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz",
+ "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=4.2.0"
+ }
+ },
+ "node_modules/ui": {
+ "resolved": "packages/ui",
+ "link": true
+ },
+ "node_modules/unbox-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
+ "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-bigints": "^1.0.2",
+ "has-symbols": "^1.0.3",
+ "which-boxed-primitive": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.9.tgz",
+ "integrity": "sha512-/xsqn21EGVdXI3EXSum1Yckj3ZVZugqyOZQ/CxYPBD/R+ko9NSUScf8tFF4dOKY+2pvSSJA/S+5B8s4Zr4kyvg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ }
+ ],
+ "dependencies": {
+ "escalade": "^3.1.1",
+ "picocolors": "^1.0.0"
+ },
+ "bin": {
+ "browserslist-lint": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/use-sync-external-store": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz",
+ "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/v8-compile-cache": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz",
+ "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA=="
+ },
+ "node_modules/web": {
+ "resolved": "apps/web",
+ "link": true
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "dependencies": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/word-wrap": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
+ "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
+ },
+ "node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
+ },
+ "packages/eslint-config-custom": {
+ "version": "0.0.0",
+ "license": "MIT",
+ "dependencies": {
+ "eslint": "^7.23.0",
+ "eslint-config-next": "^12.0.8",
+ "eslint-config-prettier": "^8.3.0",
+ "eslint-config-turbo": "latest",
+ "eslint-plugin-react": "7.31.8"
+ },
+ "devDependencies": {
+ "typescript": "^4.7.4"
+ }
+ },
+ "packages/tsconfig": {
+ "version": "0.0.0"
+ },
+ "packages/ui": {
+ "version": "0.0.0",
+ "license": "MIT",
+ "devDependencies": {
+ "@types/react": "^17.0.37",
+ "@types/react-dom": "^17.0.11",
+ "eslint": "^7.32.0",
+ "eslint-config-custom": "*",
+ "react": "^18.2.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.2"
+ }
+ },
+ "packages/ui/node_modules/@types/react": {
+ "version": "17.0.50",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.50.tgz",
+ "integrity": "sha512-ZCBHzpDb5skMnc1zFXAXnL3l1FAdi+xZvwxK+PkglMmBrwjpp9nKaWuEvrGnSifCJmBFGxZOOFuwC6KH/s0NuA==",
+ "dev": true,
+ "dependencies": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ }
+ },
+ "dependencies": {
+ "@ampproject/remapping": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.0.tgz",
+ "integrity": "sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/gen-mapping": "^0.1.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ },
+ "@babel/code-frame": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz",
+ "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==",
+ "dev": true,
+ "requires": {
+ "@babel/highlight": "^7.18.6"
+ }
+ },
+ "@babel/compat-data": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.19.3.tgz",
+ "integrity": "sha512-prBHMK4JYYK+wDjJF1q99KK4JLL+egWS4nmNqdlMUgCExMZ+iZW0hGhyC3VEbsPjvaN0TBhW//VIFwBrk8sEiw==",
+ "dev": true
+ },
+ "@babel/core": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.19.3.tgz",
+ "integrity": "sha512-WneDJxdsjEvyKtXKsaBGbDeiyOjR5vYq4HcShxnIbG0qixpoHjI3MqeZM9NDvsojNCEBItQE4juOo/bU6e72gQ==",
+ "dev": true,
+ "requires": {
+ "@ampproject/remapping": "^2.1.0",
+ "@babel/code-frame": "^7.18.6",
+ "@babel/generator": "^7.19.3",
+ "@babel/helper-compilation-targets": "^7.19.3",
+ "@babel/helper-module-transforms": "^7.19.0",
+ "@babel/helpers": "^7.19.0",
+ "@babel/parser": "^7.19.3",
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.3",
+ "@babel/types": "^7.19.3",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.1",
+ "semver": "^6.3.0"
+ }
+ },
+ "@babel/generator": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.19.3.tgz",
+ "integrity": "sha512-fqVZnmp1ncvZU757UzDheKZpfPgatqY59XtW2/j/18H7u76akb8xqvjw82f+i2UKd/ksYsSick/BCLQUUtJ/qQ==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.19.3",
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "jsesc": "^2.5.1"
+ },
+ "dependencies": {
+ "@jridgewell/gen-mapping": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz",
+ "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/set-array": "^1.0.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ }
+ }
+ },
+ "@babel/helper-compilation-targets": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.19.3.tgz",
+ "integrity": "sha512-65ESqLGyGmLvgR0mst5AdW1FkNlj9rQsCKduzEoEPhBCDFGXvz2jW6bXFG6i0/MrV2s7hhXjjb2yAzcPuQlLwg==",
+ "dev": true,
+ "requires": {
+ "@babel/compat-data": "^7.19.3",
+ "@babel/helper-validator-option": "^7.18.6",
+ "browserslist": "^4.21.3",
+ "semver": "^6.3.0"
+ }
+ },
+ "@babel/helper-environment-visitor": {
+ "version": "7.18.9",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz",
+ "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==",
+ "dev": true
+ },
+ "@babel/helper-function-name": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz",
+ "integrity": "sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.18.10",
+ "@babel/types": "^7.19.0"
+ }
+ },
+ "@babel/helper-hoist-variables": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz",
+ "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.18.6"
+ }
+ },
+ "@babel/helper-module-imports": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz",
+ "integrity": "sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.18.6"
+ }
+ },
+ "@babel/helper-module-transforms": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.19.0.tgz",
+ "integrity": "sha512-3HBZ377Fe14RbLIA+ac3sY4PTgpxHVkFrESaWhoI5PuyXPBBX8+C34qblV9G89ZtycGJCmCI/Ut+VUDK4bltNQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-environment-visitor": "^7.18.9",
+ "@babel/helper-module-imports": "^7.18.6",
+ "@babel/helper-simple-access": "^7.18.6",
+ "@babel/helper-split-export-declaration": "^7.18.6",
+ "@babel/helper-validator-identifier": "^7.18.6",
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.0",
+ "@babel/types": "^7.19.0"
+ }
+ },
+ "@babel/helper-simple-access": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz",
+ "integrity": "sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.18.6"
+ }
+ },
+ "@babel/helper-split-export-declaration": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz",
+ "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.18.6"
+ }
+ },
+ "@babel/helper-string-parser": {
+ "version": "7.18.10",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz",
+ "integrity": "sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==",
+ "dev": true
+ },
+ "@babel/helper-validator-identifier": {
+ "version": "7.19.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz",
+ "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w=="
+ },
+ "@babel/helper-validator-option": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz",
+ "integrity": "sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==",
+ "dev": true
+ },
+ "@babel/helpers": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.19.0.tgz",
+ "integrity": "sha512-DRBCKGwIEdqY3+rPJgG/dKfQy9+08rHIAJx8q2p+HSWP87s2HCrQmaAMMyMll2kIXKCW0cO1RdQskx15Xakftg==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.18.10",
+ "@babel/traverse": "^7.19.0",
+ "@babel/types": "^7.19.0"
+ }
+ },
+ "@babel/highlight": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz",
+ "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==",
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.18.6",
+ "chalk": "^2.0.0",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "@babel/parser": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.19.3.tgz",
+ "integrity": "sha512-pJ9xOlNWHiy9+FuFP09DEAFbAn4JskgRsVcc169w2xRBC3FRGuQEwjeIMMND9L2zc0iEhO/tGv4Zq+km+hxNpQ==",
+ "dev": true
+ },
+ "@babel/runtime": {
+ "version": "7.19.0",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.19.0.tgz",
+ "integrity": "sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==",
+ "requires": {
+ "regenerator-runtime": "^0.13.4"
+ }
+ },
+ "@babel/runtime-corejs3": {
+ "version": "7.19.1",
+ "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.19.1.tgz",
+ "integrity": "sha512-j2vJGnkopRzH+ykJ8h68wrHnEUmtK//E723jjixiAl/PPf6FhqY/vYRcMVlNydRKQjQsTsYEjpx+DZMIvnGk/g==",
+ "requires": {
+ "core-js-pure": "^3.25.1",
+ "regenerator-runtime": "^0.13.4"
+ }
+ },
+ "@babel/template": {
+ "version": "7.18.10",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz",
+ "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.18.6",
+ "@babel/parser": "^7.18.10",
+ "@babel/types": "^7.18.10"
+ }
+ },
+ "@babel/traverse": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.19.3.tgz",
+ "integrity": "sha512-qh5yf6149zhq2sgIXmwjnsvmnNQC2iw70UFjp4olxucKrWd/dvlUsBI88VSLUsnMNF7/vnOiA+nk1+yLoCqROQ==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.18.6",
+ "@babel/generator": "^7.19.3",
+ "@babel/helper-environment-visitor": "^7.18.9",
+ "@babel/helper-function-name": "^7.19.0",
+ "@babel/helper-hoist-variables": "^7.18.6",
+ "@babel/helper-split-export-declaration": "^7.18.6",
+ "@babel/parser": "^7.19.3",
+ "@babel/types": "^7.19.3",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ }
+ },
+ "@babel/types": {
+ "version": "7.19.3",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.19.3.tgz",
+ "integrity": "sha512-hGCaQzIY22DJlDh9CH7NOxgKkFjBk0Cw9xDO1Xmh2151ti7wiGfQ3LauXzL4HP1fmFlTX6XjpRETTpUcv7wQLw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-string-parser": "^7.18.10",
+ "@babel/helper-validator-identifier": "^7.19.1",
+ "to-fast-properties": "^2.0.0"
+ }
+ },
+ "@eslint/eslintrc": {
+ "version": "0.4.3",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz",
+ "integrity": "sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==",
+ "requires": {
+ "ajv": "^6.12.4",
+ "debug": "^4.1.1",
+ "espree": "^7.3.0",
+ "globals": "^13.9.0",
+ "ignore": "^4.0.6",
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^3.13.1",
+ "minimatch": "^3.0.4",
+ "strip-json-comments": "^3.1.1"
+ },
+ "dependencies": {
+ "globals": {
+ "version": "13.17.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.17.0.tgz",
+ "integrity": "sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==",
+ "requires": {
+ "type-fest": "^0.20.2"
+ }
+ }
+ }
+ },
+ "@humanwhocodes/config-array": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.5.0.tgz",
+ "integrity": "sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==",
+ "requires": {
+ "@humanwhocodes/object-schema": "^1.2.0",
+ "debug": "^4.1.1",
+ "minimatch": "^3.0.4"
+ }
+ },
+ "@humanwhocodes/object-schema": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz",
+ "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA=="
+ },
+ "@jridgewell/gen-mapping": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz",
+ "integrity": "sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/set-array": "^1.0.0",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ }
+ },
+ "@jridgewell/resolve-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
+ "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
+ "dev": true
+ },
+ "@jridgewell/set-array": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
+ "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
+ "dev": true
+ },
+ "@jridgewell/sourcemap-codec": {
+ "version": "1.4.14",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
+ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==",
+ "dev": true
+ },
+ "@jridgewell/trace-mapping": {
+ "version": "0.3.15",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz",
+ "integrity": "sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/resolve-uri": "^3.0.3",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ }
+ },
+ "@next/env": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/env/-/env-12.3.0.tgz",
+ "integrity": "sha512-PTJpjAFVbzBQ9xXpzMTroShvD5YDIIy46jQ7d4LrWpY+/5a8H90Tm8hE3Hvkc5RBRspVo7kvEOnqQms0A+2Q6w=="
+ },
+ "@next/eslint-plugin-next": {
+ "version": "12.3.1",
+ "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-12.3.1.tgz",
+ "integrity": "sha512-sw+lTf6r6P0j+g/n9y4qdWWI2syPqZx+uc0+B/fRENqfR3KpSid6MIKqc9gNwGhJASazEQ5b3w8h4cAET213jw==",
+ "requires": {
+ "glob": "7.1.7"
+ }
+ },
+ "@next/swc-android-arm-eabi": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-android-arm-eabi/-/swc-android-arm-eabi-12.3.0.tgz",
+ "integrity": "sha512-/PuirPnAKsYBw93w/7Q9hqy+KGOU9mjYprZ/faxMUJh/dc6v3rYLxkZKNG9nFPIW4QKNTCnhP40xF9hLnxO+xg==",
+ "optional": true
+ },
+ "@next/swc-android-arm64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-android-arm64/-/swc-android-arm64-12.3.0.tgz",
+ "integrity": "sha512-OaI+FhAM6P9B6Ybwbn0Zl8YwWido0lLwhDBi9WiYCh4RQmIXAyVIoIJPHo4fP05+mXaJ/k1trvDvuURvHOq2qw==",
+ "optional": true
+ },
+ "@next/swc-darwin-arm64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-12.3.0.tgz",
+ "integrity": "sha512-9s4d3Mhii+WFce8o8Jok7WC3Bawkr9wEUU++SJRptjU1L5tsfYJMrSYCACHLhZujziNDLyExe4Hwwsccps1sfg==",
+ "optional": true
+ },
+ "@next/swc-darwin-x64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-12.3.0.tgz",
+ "integrity": "sha512-2scC4MqUTwGwok+wpVxP+zWp7WcCAVOtutki2E1n99rBOTnUOX6qXkgxSy083yBN6GqwuC/dzHeN7hIKjavfRA==",
+ "optional": true
+ },
+ "@next/swc-freebsd-x64": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-freebsd-x64/-/swc-freebsd-x64-12.3.0.tgz",
+ "integrity": "sha512-xAlruUREij/bFa+qsE1tmsP28t7vz02N4ZDHt2lh3uJUniE0Ne9idyIDLc1Ed0IF2RjfgOp4ZVunuS3OM0sngw==",
+ "optional": true
+ },
+ "@next/swc-linux-arm-gnueabihf": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm-gnueabihf/-/swc-linux-arm-gnueabihf-12.3.0.tgz",
+ "integrity": "sha512-jin2S4VT/cugc2dSZEUIabhYDJNgrUh7fufbdsaAezgcQzqfdfJqfxl4E9GuafzB4cbRPTaqA0V5uqbp0IyGkQ==",
+ "optional": true
+ },
+ "@next/swc-linux-arm64-gnu": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-12.3.0.tgz",
+ "integrity": "sha512-RqJHDKe0WImeUrdR0kayTkRWgp4vD/MS7g0r6Xuf8+ellOFH7JAAJffDW3ayuVZeMYOa7RvgNFcOoWnrTUl9Nw==",
+ "optional": true
+ },
+ "@next/swc-linux-arm64-musl": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-12.3.0.tgz",
+ "integrity": "sha512-nvNWoUieMjvDjpYJ/4SQe9lQs2xMj6ZRs8N+bmTrVu9leY2Fg3WD6W9p/1uU9hGO8u+OdF13wc4iRShu/WYIHg==",
+ "optional": true
+ },
+ "@next/swc-linux-x64-gnu": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-12.3.0.tgz",
+ "integrity": "sha512-4ajhIuVU9PeQCMMhdDgZTLrHmjbOUFuIyg6J19hZqwEwDTSqQyrSLkbJs2Nd7IRiM6Ul/XyrtEFCpk4k+xD2+w==",
+ "optional": true
+ },
+ "@next/swc-linux-x64-musl": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-12.3.0.tgz",
+ "integrity": "sha512-U092RBYbaGxoMAwpauePJEu2PuZSEoUCGJBvsptQr2/2XIMwAJDYM4c/M5NfYEsBr+yjvsYNsOpYfeQ88D82Yg==",
+ "optional": true
+ },
+ "@next/swc-win32-arm64-msvc": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-12.3.0.tgz",
+ "integrity": "sha512-pzSzaxjDEJe67bUok9Nxf9rykbJfHXW0owICFsPBsqHyc+cr8vpF7g9e2APTCddtVhvjkga9ILoZJ9NxWS7Yiw==",
+ "optional": true
+ },
+ "@next/swc-win32-ia32-msvc": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-12.3.0.tgz",
+ "integrity": "sha512-MQGUpMbYhQmTZ06a9e0hPQJnxFMwETo2WtyAotY3GEzbNCQVbCGhsvqEKcl+ZEHgShlHXUWvSffq1ZscY6gK7A==",
+ "optional": true
+ },
+ "@next/swc-win32-x64-msvc": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-12.3.0.tgz",
+ "integrity": "sha512-C/nw6OgQpEULWqs+wgMHXGvlJLguPRFFGqR2TAqWBerQ8J+Sg3z1ZTqwelkSi4FoqStGuZ2UdFHIDN1ySmR1xA==",
+ "optional": true
+ },
+ "@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "requires": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ }
+ },
+ "@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="
+ },
+ "@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "requires": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ }
+ },
+ "@rushstack/eslint-patch": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.2.0.tgz",
+ "integrity": "sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg=="
+ },
+ "@swc/helpers": {
+ "version": "0.4.11",
+ "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.4.11.tgz",
+ "integrity": "sha512-rEUrBSGIoSFuYxwBYtlUFMlE2CwGhmW+w9355/5oduSw8e5h2+Tj4UrAGNNgP9915++wj5vkQo0UuOBqOAq4nw==",
+ "requires": {
+ "tslib": "^2.4.0"
+ }
+ },
+ "@types/json5": {
+ "version": "0.0.29",
+ "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ=="
+ },
+ "@types/node": {
+ "version": "17.0.45",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz",
+ "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==",
+ "dev": true
+ },
+ "@types/prop-types": {
+ "version": "15.7.5",
+ "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz",
+ "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==",
+ "dev": true
+ },
+ "@types/react": {
+ "version": "18.0.17",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.0.17.tgz",
+ "integrity": "sha512-38ETy4tL+rn4uQQi7mB81G7V1g0u2ryquNmsVIOKUAEIDK+3CUjZ6rSRpdvS99dNBnkLFL83qfmtLacGOTIhwQ==",
+ "dev": true,
+ "requires": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ },
+ "@types/react-dom": {
+ "version": "17.0.17",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-17.0.17.tgz",
+ "integrity": "sha512-VjnqEmqGnasQKV0CWLevqMTXBYG9GbwuE6x3VetERLh0cq2LTptFE73MrQi2S7GkKXCf2GgwItB/melLnxfnsg==",
+ "dev": true,
+ "requires": {
+ "@types/react": "^17"
+ },
+ "dependencies": {
+ "@types/react": {
+ "version": "17.0.50",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.50.tgz",
+ "integrity": "sha512-ZCBHzpDb5skMnc1zFXAXnL3l1FAdi+xZvwxK+PkglMmBrwjpp9nKaWuEvrGnSifCJmBFGxZOOFuwC6KH/s0NuA==",
+ "dev": true,
+ "requires": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ }
+ }
+ },
+ "@types/scheduler": {
+ "version": "0.16.2",
+ "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.2.tgz",
+ "integrity": "sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==",
+ "dev": true
+ },
+ "@typescript-eslint/parser": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.39.0.tgz",
+ "integrity": "sha512-PhxLjrZnHShe431sBAGHaNe6BDdxAASDySgsBCGxcBecVCi8NQWxQZMcizNA4g0pN51bBAn/FUfkWG3SDVcGlA==",
+ "requires": {
+ "@typescript-eslint/scope-manager": "5.39.0",
+ "@typescript-eslint/types": "5.39.0",
+ "@typescript-eslint/typescript-estree": "5.39.0",
+ "debug": "^4.3.4"
+ }
+ },
+ "@typescript-eslint/scope-manager": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.39.0.tgz",
+ "integrity": "sha512-/I13vAqmG3dyqMVSZPjsbuNQlYS082Y7OMkwhCfLXYsmlI0ca4nkL7wJ/4gjX70LD4P8Hnw1JywUVVAwepURBw==",
+ "requires": {
+ "@typescript-eslint/types": "5.39.0",
+ "@typescript-eslint/visitor-keys": "5.39.0"
+ }
+ },
+ "@typescript-eslint/types": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.39.0.tgz",
+ "integrity": "sha512-gQMZrnfEBFXK38hYqt8Lkwt8f4U6yq+2H5VDSgP/qiTzC8Nw8JO3OuSUOQ2qW37S/dlwdkHDntkZM6SQhKyPhw=="
+ },
+ "@typescript-eslint/typescript-estree": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.39.0.tgz",
+ "integrity": "sha512-qLFQP0f398sdnogJoLtd43pUgB18Q50QSA+BTE5h3sUxySzbWDpTSdgt4UyxNSozY/oDK2ta6HVAzvGgq8JYnA==",
+ "requires": {
+ "@typescript-eslint/types": "5.39.0",
+ "@typescript-eslint/visitor-keys": "5.39.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "7.3.7",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
+ "integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ }
+ }
+ },
+ "@typescript-eslint/visitor-keys": {
+ "version": "5.39.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.39.0.tgz",
+ "integrity": "sha512-yyE3RPwOG+XJBLrhvsxAidUgybJVQ/hG8BhiJo0k8JSAYfk/CshVcxf0HwP4Jt7WZZ6vLmxdo1p6EyN3tzFTkg==",
+ "requires": {
+ "@typescript-eslint/types": "5.39.0",
+ "eslint-visitor-keys": "^3.3.0"
+ },
+ "dependencies": {
+ "eslint-visitor-keys": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz",
+ "integrity": "sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA=="
+ }
+ }
+ },
+ "acorn": {
+ "version": "7.4.1",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz",
+ "integrity": "sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A=="
+ },
+ "acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "requires": {}
+ },
+ "ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ansi-colors": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
+ "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw=="
+ },
+ "ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "aria-query": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-4.2.2.tgz",
+ "integrity": "sha512-o/HelwhuKpTj/frsOsbNLNgnNGVIFsVP/SW2BSF14gVl7kAfMOJ6/8wUAUvG1R1NHKrfG+2sHZTu0yauT1qBrA==",
+ "requires": {
+ "@babel/runtime": "^7.10.2",
+ "@babel/runtime-corejs3": "^7.10.2"
+ }
+ },
+ "array-includes": {
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.5.tgz",
+ "integrity": "sha512-iSDYZMMyTPkiFasVqfuAQnWAYcvO/SeBSCGKePoEthjp4LEMTe4uLc7b025o4jAZpHhihh8xPo99TNWUWWkGDQ==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5",
+ "get-intrinsic": "^1.1.1",
+ "is-string": "^1.0.7"
+ }
+ },
+ "array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw=="
+ },
+ "array.prototype.flat": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.0.tgz",
+ "integrity": "sha512-12IUEkHsAhA4DY5s0FPgNXIdc8VRSqD9Zp78a5au9abH/SOBrsp082JOWFNTjkMozh8mqcdiKuaLGhPeYztxSw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.2",
+ "es-shim-unscopables": "^1.0.0"
+ }
+ },
+ "array.prototype.flatmap": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.0.tgz",
+ "integrity": "sha512-PZC9/8TKAIxcWKdyeb77EzULHPrIX/tIZebLJUQOMR1OwYosT8yggdfWScfTBCDj5utONvOuPQQumYsU2ULbkg==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.2",
+ "es-shim-unscopables": "^1.0.0"
+ }
+ },
+ "ast-types-flow": {
+ "version": "0.0.7",
+ "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz",
+ "integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag=="
+ },
+ "astral-regex": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz",
+ "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ=="
+ },
+ "axe-core": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.4.3.tgz",
+ "integrity": "sha512-32+ub6kkdhhWick/UjvEwRchgoetXqTK14INLqbGm5U2TzBkBNF3nQtLYm8ovxSkQWArjEQvftCKryjZaATu3w=="
+ },
+ "axobject-query": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-2.2.0.tgz",
+ "integrity": "sha512-Td525n+iPOOyUQIeBfcASuG6uJsDOITl7Mds5gFyerkWiX7qhUTdYUBlSgNMyVqtSJqwpt1kXGLdUt6SykLMRA=="
+ },
+ "balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "requires": {
+ "fill-range": "^7.0.1"
+ }
+ },
+ "browserslist": {
+ "version": "4.21.4",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.4.tgz",
+ "integrity": "sha512-CBHJJdDmgjl3daYjN5Cp5kbTf1mUhZoS+beLklHIvkOWscs83YAhLlF3Wsh/lciQYAcbBJgTOD44VtG31ZM4Hw==",
+ "dev": true,
+ "requires": {
+ "caniuse-lite": "^1.0.30001400",
+ "electron-to-chromium": "^1.4.251",
+ "node-releases": "^2.0.6",
+ "update-browserslist-db": "^1.0.9"
+ }
+ },
+ "call-bind": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
+ "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
+ "requires": {
+ "function-bind": "^1.1.1",
+ "get-intrinsic": "^1.0.2"
+ }
+ },
+ "callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="
+ },
+ "caniuse-lite": {
+ "version": "1.0.30001414",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001414.tgz",
+ "integrity": "sha512-t55jfSaWjCdocnFdKQoO+d2ct9C59UZg4dY3OnUlSZ447r8pUtIKdp0hpAzrGFultmTC+Us+KpKi4GZl/LXlFg=="
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
+ },
+ "convert-source-map": {
+ "version": "1.8.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz",
+ "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.1"
+ }
+ },
+ "core-js-pure": {
+ "version": "3.25.5",
+ "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.25.5.tgz",
+ "integrity": "sha512-oml3M22pHM+igfWHDfdLVq2ShWmjM2V4L+dQEBs0DWVIqEm9WHCwGAlZ6BmyBQGy5sFrJmcx+856D9lVKyGWYg=="
+ },
+ "cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "requires": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ }
+ },
+ "csstype": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.1.tgz",
+ "integrity": "sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==",
+ "dev": true
+ },
+ "damerau-levenshtein": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz",
+ "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA=="
+ },
+ "debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "deep-is": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ=="
+ },
+ "define-properties": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.4.tgz",
+ "integrity": "sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==",
+ "requires": {
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "requires": {
+ "path-type": "^4.0.0"
+ }
+ },
+ "docs": {
+ "version": "file:apps/docs",
+ "requires": {
+ "@babel/core": "^7.0.0",
+ "@types/node": "^17.0.12",
+ "@types/react": "18.0.17",
+ "eslint": "7.32.0",
+ "eslint-config-custom": "*",
+ "lodash": "^3.0.0",
+ "next": "12.3.0",
+ "next-transpile-modules": "9.0.0",
+ "react": "18.2.0",
+ "react-dom": "18.2.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.3",
+ "ui": "*"
+ }
+ },
+ "doctrine": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+ "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ },
+ "electron-to-chromium": {
+ "version": "1.4.270",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.270.tgz",
+ "integrity": "sha512-KNhIzgLiJmDDC444dj9vEOpZEgsV96ult9Iff98Vanumn+ShJHd5se8aX6KeVxdc0YQeqdrezBZv89rleDbvSg==",
+ "dev": true
+ },
+ "emoji-regex": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
+ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
+ },
+ "enhanced-resolve": {
+ "version": "5.10.0",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.10.0.tgz",
+ "integrity": "sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.2.4",
+ "tapable": "^2.2.0"
+ }
+ },
+ "enquirer": {
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/enquirer/-/enquirer-2.3.6.tgz",
+ "integrity": "sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==",
+ "requires": {
+ "ansi-colors": "^4.1.1"
+ }
+ },
+ "es-abstract": {
+ "version": "1.20.3",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.20.3.tgz",
+ "integrity": "sha512-AyrnaKVpMzljIdwjzrj+LxGmj8ik2LckwXacHqrJJ/jxz6dDDBcZ7I7nlHM0FvEW8MfbWJwOd+yT2XzYW49Frw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "es-to-primitive": "^1.2.1",
+ "function-bind": "^1.1.1",
+ "function.prototype.name": "^1.1.5",
+ "get-intrinsic": "^1.1.3",
+ "get-symbol-description": "^1.0.0",
+ "has": "^1.0.3",
+ "has-property-descriptors": "^1.0.0",
+ "has-symbols": "^1.0.3",
+ "internal-slot": "^1.0.3",
+ "is-callable": "^1.2.6",
+ "is-negative-zero": "^2.0.2",
+ "is-regex": "^1.1.4",
+ "is-shared-array-buffer": "^1.0.2",
+ "is-string": "^1.0.7",
+ "is-weakref": "^1.0.2",
+ "object-inspect": "^1.12.2",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.4",
+ "regexp.prototype.flags": "^1.4.3",
+ "safe-regex-test": "^1.0.0",
+ "string.prototype.trimend": "^1.0.5",
+ "string.prototype.trimstart": "^1.0.5",
+ "unbox-primitive": "^1.0.2"
+ }
+ },
+ "es-shim-unscopables": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz",
+ "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==",
+ "requires": {
+ "has": "^1.0.3"
+ }
+ },
+ "es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "requires": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ }
+ },
+ "escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="
+ },
+ "eslint": {
+ "version": "7.32.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-7.32.0.tgz",
+ "integrity": "sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==",
+ "requires": {
+ "@babel/code-frame": "7.12.11",
+ "@eslint/eslintrc": "^0.4.3",
+ "@humanwhocodes/config-array": "^0.5.0",
+ "ajv": "^6.10.0",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.0.1",
+ "doctrine": "^3.0.0",
+ "enquirer": "^2.3.5",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^5.1.1",
+ "eslint-utils": "^2.1.0",
+ "eslint-visitor-keys": "^2.0.0",
+ "espree": "^7.3.1",
+ "esquery": "^1.4.0",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^6.0.1",
+ "functional-red-black-tree": "^1.0.1",
+ "glob-parent": "^5.1.2",
+ "globals": "^13.6.0",
+ "ignore": "^4.0.6",
+ "import-fresh": "^3.0.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "js-yaml": "^3.13.1",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.4.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.0.4",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.1",
+ "progress": "^2.0.0",
+ "regexpp": "^3.1.0",
+ "semver": "^7.2.1",
+ "strip-ansi": "^6.0.0",
+ "strip-json-comments": "^3.1.0",
+ "table": "^6.0.9",
+ "text-table": "^0.2.0",
+ "v8-compile-cache": "^2.0.3"
+ },
+ "dependencies": {
+ "@babel/code-frame": {
+ "version": "7.12.11",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.12.11.tgz",
+ "integrity": "sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==",
+ "requires": {
+ "@babel/highlight": "^7.10.4"
+ }
+ },
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="
+ },
+ "globals": {
+ "version": "13.17.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.17.0.tgz",
+ "integrity": "sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==",
+ "requires": {
+ "type-fest": "^0.20.2"
+ }
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="
+ },
+ "semver": {
+ "version": "7.3.7",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz",
+ "integrity": "sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==",
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "eslint-config-custom": {
+ "version": "file:packages/eslint-config-custom",
+ "requires": {
+ "eslint": "^7.23.0",
+ "eslint-config-next": "^12.0.8",
+ "eslint-config-prettier": "^8.3.0",
+ "eslint-config-turbo": "latest",
+ "eslint-plugin-react": "7.31.8",
+ "typescript": "^4.7.4"
+ }
+ },
+ "eslint-config-next": {
+ "version": "12.3.1",
+ "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-12.3.1.tgz",
+ "integrity": "sha512-EN/xwKPU6jz1G0Qi6Bd/BqMnHLyRAL0VsaQaWA7F3KkjAgZHi4f1uL1JKGWNxdQpHTW/sdGONBd0bzxUka/DJg==",
+ "requires": {
+ "@next/eslint-plugin-next": "12.3.1",
+ "@rushstack/eslint-patch": "^1.1.3",
+ "@typescript-eslint/parser": "^5.21.0",
+ "eslint-import-resolver-node": "^0.3.6",
+ "eslint-import-resolver-typescript": "^2.7.1",
+ "eslint-plugin-import": "^2.26.0",
+ "eslint-plugin-jsx-a11y": "^6.5.1",
+ "eslint-plugin-react": "^7.31.7",
+ "eslint-plugin-react-hooks": "^4.5.0"
+ }
+ },
+ "eslint-config-prettier": {
+ "version": "8.5.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.5.0.tgz",
+ "integrity": "sha512-obmWKLUNCnhtQRKc+tmnYuQl0pFU1ibYJQ5BGhTVB08bHe9wC8qUeG7c08dj9XX+AuPj1YSGSQIHl1pnDHZR0Q==",
+ "requires": {}
+ },
+ "eslint-config-turbo": {
+ "version": "0.0.4",
+ "resolved": "https://registry.npmjs.org/eslint-config-turbo/-/eslint-config-turbo-0.0.4.tgz",
+ "integrity": "sha512-HErPS/wfWkSdV9Yd2dDkhZt3W2B78Ih/aWPFfaHmCMjzPalh+5KxRRGTf8MOBQLCebcWJX0lP1Zvc1rZIHlXGg==",
+ "requires": {
+ "eslint-plugin-turbo": "0.0.4"
+ }
+ },
+ "eslint-import-resolver-node": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz",
+ "integrity": "sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==",
+ "requires": {
+ "debug": "^3.2.7",
+ "resolve": "^1.20.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ }
+ }
+ },
+ "eslint-import-resolver-typescript": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-2.7.1.tgz",
+ "integrity": "sha512-00UbgGwV8bSgUv34igBDbTOtKhqoRMy9bFjNehT40bXg6585PNIct8HhXZ0SybqB9rWtXj9crcku8ndDn/gIqQ==",
+ "requires": {
+ "debug": "^4.3.4",
+ "glob": "^7.2.0",
+ "is-glob": "^4.0.3",
+ "resolve": "^1.22.0",
+ "tsconfig-paths": "^3.14.1"
+ },
+ "dependencies": {
+ "glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ }
+ }
+ },
+ "eslint-module-utils": {
+ "version": "2.7.4",
+ "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz",
+ "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==",
+ "requires": {
+ "debug": "^3.2.7"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ }
+ }
+ },
+ "eslint-plugin-import": {
+ "version": "2.26.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.26.0.tgz",
+ "integrity": "sha512-hYfi3FXaM8WPLf4S1cikh/r4IxnO6zrhZbEGz2b660EJRbuxgpDS5gkCuYgGWg2xxh2rBuIr4Pvhve/7c31koA==",
+ "requires": {
+ "array-includes": "^3.1.4",
+ "array.prototype.flat": "^1.2.5",
+ "debug": "^2.6.9",
+ "doctrine": "^2.1.0",
+ "eslint-import-resolver-node": "^0.3.6",
+ "eslint-module-utils": "^2.7.3",
+ "has": "^1.0.3",
+ "is-core-module": "^2.8.1",
+ "is-glob": "^4.0.3",
+ "minimatch": "^3.1.2",
+ "object.values": "^1.1.5",
+ "resolve": "^1.22.0",
+ "tsconfig-paths": "^3.14.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ }
+ }
+ },
+ "eslint-plugin-jsx-a11y": {
+ "version": "6.6.1",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.6.1.tgz",
+ "integrity": "sha512-sXgFVNHiWffBq23uiS/JaP6eVR622DqwB4yTzKvGZGcPq6/yZ3WmOZfuBks/vHWo9GaFOqC2ZK4i6+C35knx7Q==",
+ "requires": {
+ "@babel/runtime": "^7.18.9",
+ "aria-query": "^4.2.2",
+ "array-includes": "^3.1.5",
+ "ast-types-flow": "^0.0.7",
+ "axe-core": "^4.4.3",
+ "axobject-query": "^2.2.0",
+ "damerau-levenshtein": "^1.0.8",
+ "emoji-regex": "^9.2.2",
+ "has": "^1.0.3",
+ "jsx-ast-utils": "^3.3.2",
+ "language-tags": "^1.0.5",
+ "minimatch": "^3.1.2",
+ "semver": "^6.3.0"
+ }
+ },
+ "eslint-plugin-react": {
+ "version": "7.31.8",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.31.8.tgz",
+ "integrity": "sha512-5lBTZmgQmARLLSYiwI71tiGVTLUuqXantZM6vlSY39OaDSV0M7+32K5DnLkmFrwTe+Ksz0ffuLUC91RUviVZfw==",
+ "requires": {
+ "array-includes": "^3.1.5",
+ "array.prototype.flatmap": "^1.3.0",
+ "doctrine": "^2.1.0",
+ "estraverse": "^5.3.0",
+ "jsx-ast-utils": "^2.4.1 || ^3.0.0",
+ "minimatch": "^3.1.2",
+ "object.entries": "^1.1.5",
+ "object.fromentries": "^2.0.5",
+ "object.hasown": "^1.1.1",
+ "object.values": "^1.1.5",
+ "prop-types": "^15.8.1",
+ "resolve": "^2.0.0-next.3",
+ "semver": "^6.3.0",
+ "string.prototype.matchall": "^4.0.7"
+ },
+ "dependencies": {
+ "doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ },
+ "resolve": {
+ "version": "2.0.0-next.4",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz",
+ "integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==",
+ "requires": {
+ "is-core-module": "^2.9.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ }
+ }
+ }
+ },
+ "eslint-plugin-react-hooks": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz",
+ "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==",
+ "requires": {}
+ },
+ "eslint-plugin-turbo": {
+ "version": "0.0.4",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-turbo/-/eslint-plugin-turbo-0.0.4.tgz",
+ "integrity": "sha512-dfmYE/iPvoJInQq+5E/0mj140y/rYwKtzZkn3uVK8+nvwC5zmWKQ6ehMWrL4bYBkGzSgpOndZM+jOXhPQ2m8Cg==",
+ "requires": {}
+ },
+ "eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "requires": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ },
+ "dependencies": {
+ "estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw=="
+ }
+ }
+ },
+ "eslint-utils": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz",
+ "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==",
+ "requires": {
+ "eslint-visitor-keys": "^1.1.0"
+ },
+ "dependencies": {
+ "eslint-visitor-keys": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
+ "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ=="
+ }
+ }
+ },
+ "eslint-visitor-keys": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz",
+ "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw=="
+ },
+ "espree": {
+ "version": "7.3.1",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-7.3.1.tgz",
+ "integrity": "sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==",
+ "requires": {
+ "acorn": "^7.4.0",
+ "acorn-jsx": "^5.3.1",
+ "eslint-visitor-keys": "^1.3.0"
+ },
+ "dependencies": {
+ "eslint-visitor-keys": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
+ "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ=="
+ }
+ }
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A=="
+ },
+ "esquery": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.4.0.tgz",
+ "integrity": "sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==",
+ "requires": {
+ "estraverse": "^5.1.0"
+ }
+ },
+ "esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "requires": {
+ "estraverse": "^5.2.0"
+ }
+ },
+ "estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="
+ },
+ "esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="
+ },
+ "fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "fast-glob": {
+ "version": "3.2.12",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
+ "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
+ "requires": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ }
+ },
+ "fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw=="
+ },
+ "fastq": {
+ "version": "1.13.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz",
+ "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==",
+ "requires": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "file-entry-cache": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
+ "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
+ "requires": {
+ "flat-cache": "^3.0.4"
+ }
+ },
+ "fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "flat-cache": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
+ "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
+ "requires": {
+ "flatted": "^3.1.0",
+ "rimraf": "^3.0.2"
+ }
+ },
+ "flatted": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz",
+ "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ=="
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
+ },
+ "function-bind": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
+ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
+ },
+ "function.prototype.name": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz",
+ "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.0",
+ "functions-have-names": "^1.2.2"
+ }
+ },
+ "functional-red-black-tree": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz",
+ "integrity": "sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g=="
+ },
+ "functions-have-names": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
+ "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ=="
+ },
+ "gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true
+ },
+ "get-intrinsic": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz",
+ "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==",
+ "requires": {
+ "function-bind": "^1.1.1",
+ "has": "^1.0.3",
+ "has-symbols": "^1.0.3"
+ }
+ },
+ "get-symbol-description": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz",
+ "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.1"
+ }
+ },
+ "glob": {
+ "version": "7.1.7",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz",
+ "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==",
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true
+ },
+ "globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "requires": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ },
+ "dependencies": {
+ "ignore": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.0.tgz",
+ "integrity": "sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ=="
+ }
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.10",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
+ "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==",
+ "dev": true
+ },
+ "has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "requires": {
+ "function-bind": "^1.1.1"
+ }
+ },
+ "has-bigints": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
+ "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ=="
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw=="
+ },
+ "has-property-descriptors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz",
+ "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==",
+ "requires": {
+ "get-intrinsic": "^1.1.1"
+ }
+ },
+ "has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A=="
+ },
+ "has-tostringtag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
+ "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "requires": {
+ "has-symbols": "^1.0.2"
+ }
+ },
+ "ignore": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
+ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg=="
+ },
+ "import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "requires": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ }
+ },
+ "imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "internal-slot": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.3.tgz",
+ "integrity": "sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==",
+ "requires": {
+ "get-intrinsic": "^1.1.0",
+ "has": "^1.0.3",
+ "side-channel": "^1.0.4"
+ }
+ },
+ "is-bigint": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
+ "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
+ "requires": {
+ "has-bigints": "^1.0.1"
+ }
+ },
+ "is-boolean-object": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
+ "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA=="
+ },
+ "is-core-module": {
+ "version": "2.10.0",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.10.0.tgz",
+ "integrity": "sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==",
+ "requires": {
+ "has": "^1.0.3"
+ }
+ },
+ "is-date-object": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
+ "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="
+ },
+ "is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
+ },
+ "is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-negative-zero": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz",
+ "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA=="
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="
+ },
+ "is-number-object": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
+ "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-regex": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
+ "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-shared-array-buffer": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz",
+ "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==",
+ "requires": {
+ "call-bind": "^1.0.2"
+ }
+ },
+ "is-string": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
+ "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-symbol": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
+ "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
+ "requires": {
+ "has-symbols": "^1.0.2"
+ }
+ },
+ "is-weakref": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
+ "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
+ "requires": {
+ "call-bind": "^1.0.2"
+ }
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ },
+ "jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true
+ },
+ "json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw=="
+ },
+ "json5": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.1.tgz",
+ "integrity": "sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==",
+ "dev": true
+ },
+ "jsx-ast-utils": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz",
+ "integrity": "sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==",
+ "requires": {
+ "array-includes": "^3.1.5",
+ "object.assign": "^4.1.3"
+ }
+ },
+ "language-subtag-registry": {
+ "version": "0.3.22",
+ "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz",
+ "integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w=="
+ },
+ "language-tags": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz",
+ "integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==",
+ "requires": {
+ "language-subtag-registry": "~0.3.2"
+ }
+ },
+ "levn": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+ "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+ "requires": {
+ "prelude-ls": "^1.2.1",
+ "type-check": "~0.4.0"
+ }
+ },
+ "lodash": {
+ "version": "3.10.1",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-3.10.1.tgz",
+ "integrity": "sha512-9mDDwqVIma6OZX79ZlDACZl8sBm0TEnkf99zV3iMA4GzkIT/9hiqP5mY0HoT1iNLCrKc/R1HByV+yJfRWVJryQ=="
+ },
+ "lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ=="
+ },
+ "lodash.truncate": {
+ "version": "4.4.2",
+ "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz",
+ "integrity": "sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw=="
+ },
+ "loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "requires": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ }
+ },
+ "lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "requires": {
+ "yallist": "^4.0.0"
+ }
+ },
+ "merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
+ },
+ "micromatch": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
+ "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "requires": {
+ "braces": "^3.0.2",
+ "picomatch": "^2.3.1"
+ }
+ },
+ "minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.6",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz",
+ "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q=="
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "nanoid": {
+ "version": "3.3.4",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.4.tgz",
+ "integrity": "sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw=="
+ },
+ "natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="
+ },
+ "next": {
+ "version": "12.3.0",
+ "resolved": "https://registry.npmjs.org/next/-/next-12.3.0.tgz",
+ "integrity": "sha512-GpzI6me9V1+XYtfK0Ae9WD0mKqHyzQlGq1xH1rzNIYMASo4Tkl4rTe9jSqtBpXFhOS33KohXs9ZY38Akkhdciw==",
+ "requires": {
+ "@next/env": "12.3.0",
+ "@next/swc-android-arm-eabi": "12.3.0",
+ "@next/swc-android-arm64": "12.3.0",
+ "@next/swc-darwin-arm64": "12.3.0",
+ "@next/swc-darwin-x64": "12.3.0",
+ "@next/swc-freebsd-x64": "12.3.0",
+ "@next/swc-linux-arm-gnueabihf": "12.3.0",
+ "@next/swc-linux-arm64-gnu": "12.3.0",
+ "@next/swc-linux-arm64-musl": "12.3.0",
+ "@next/swc-linux-x64-gnu": "12.3.0",
+ "@next/swc-linux-x64-musl": "12.3.0",
+ "@next/swc-win32-arm64-msvc": "12.3.0",
+ "@next/swc-win32-ia32-msvc": "12.3.0",
+ "@next/swc-win32-x64-msvc": "12.3.0",
+ "@swc/helpers": "0.4.11",
+ "caniuse-lite": "^1.0.30001332",
+ "postcss": "8.4.14",
+ "styled-jsx": "5.0.6",
+ "use-sync-external-store": "1.2.0"
+ }
+ },
+ "next-transpile-modules": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/next-transpile-modules/-/next-transpile-modules-9.0.0.tgz",
+ "integrity": "sha512-VCNFOazIAnXn1hvgYYSTYMnoWgKgwlYh4lm1pKbSfiB3kj5ZYLcKVhfh3jkPOg1cnd9DP+pte9yCUocdPEUBTQ==",
+ "dev": true,
+ "requires": {
+ "enhanced-resolve": "^5.7.0",
+ "escalade": "^3.1.1"
+ }
+ },
+ "node-releases": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.6.tgz",
+ "integrity": "sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==",
+ "dev": true
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="
+ },
+ "object-inspect": {
+ "version": "1.12.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz",
+ "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ=="
+ },
+ "object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA=="
+ },
+ "object.assign": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
+ "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "object.entries": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.5.tgz",
+ "integrity": "sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1"
+ }
+ },
+ "object.fromentries": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.5.tgz",
+ "integrity": "sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1"
+ }
+ },
+ "object.hasown": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.1.tgz",
+ "integrity": "sha512-LYLe4tivNQzq4JdaWW6WO3HMZZJWzkkH8fnI6EebWl0VZth2wL2Lovm74ep2/gZzlaTdV62JZHEqHQ2yVn8Q/A==",
+ "requires": {
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5"
+ }
+ },
+ "object.values": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.5.tgz",
+ "integrity": "sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1"
+ }
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "optionator": {
+ "version": "0.9.1",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz",
+ "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==",
+ "requires": {
+ "deep-is": "^0.1.3",
+ "fast-levenshtein": "^2.0.6",
+ "levn": "^0.4.1",
+ "prelude-ls": "^1.2.1",
+ "type-check": "^0.4.0",
+ "word-wrap": "^1.2.3"
+ }
+ },
+ "parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "requires": {
+ "callsites": "^3.0.0"
+ }
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg=="
+ },
+ "path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="
+ },
+ "path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
+ },
+ "path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="
+ },
+ "picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ=="
+ },
+ "picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="
+ },
+ "postcss": {
+ "version": "8.4.14",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz",
+ "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==",
+ "requires": {
+ "nanoid": "^3.3.4",
+ "picocolors": "^1.0.0",
+ "source-map-js": "^1.0.2"
+ }
+ },
+ "prelude-ls": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+ "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g=="
+ },
+ "prettier": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.7.1.tgz",
+ "integrity": "sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==",
+ "dev": true
+ },
+ "progress": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
+ "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA=="
+ },
+ "prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "requires": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
+ "punycode": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
+ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A=="
+ },
+ "queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="
+ },
+ "react": {
+ "version": "18.2.0",
+ "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
+ "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
+ "requires": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "react-dom": {
+ "version": "18.2.0",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
+ "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "scheduler": "^0.23.0"
+ }
+ },
+ "react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
+ },
+ "regenerator-runtime": {
+ "version": "0.13.9",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz",
+ "integrity": "sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA=="
+ },
+ "regexp.prototype.flags": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz",
+ "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "functions-have-names": "^1.2.2"
+ }
+ },
+ "regexpp": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz",
+ "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg=="
+ },
+ "require-from-string": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
+ "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="
+ },
+ "resolve": {
+ "version": "1.22.1",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz",
+ "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==",
+ "requires": {
+ "is-core-module": "^2.9.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ }
+ },
+ "resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="
+ },
+ "reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw=="
+ },
+ "rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "requires": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+ "dev": true
+ },
+ "safe-regex-test": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz",
+ "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.3",
+ "is-regex": "^1.1.4"
+ }
+ },
+ "scheduler": {
+ "version": "0.23.0",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
+ "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
+ "requires": {
+ "loose-envify": "^1.1.0"
+ }
+ },
+ "semver": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+ "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw=="
+ },
+ "shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "requires": {
+ "shebang-regex": "^3.0.0"
+ }
+ },
+ "shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="
+ },
+ "side-channel": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
+ "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
+ "requires": {
+ "call-bind": "^1.0.0",
+ "get-intrinsic": "^1.0.2",
+ "object-inspect": "^1.9.0"
+ }
+ },
+ "slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q=="
+ },
+ "slice-ansi": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz",
+ "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==",
+ "requires": {
+ "ansi-styles": "^4.0.0",
+ "astral-regex": "^2.0.0",
+ "is-fullwidth-code-point": "^3.0.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ }
+ }
+ },
+ "source-map-js": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
+ "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw=="
+ },
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="
+ },
+ "string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "dependencies": {
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ }
+ }
+ },
+ "string.prototype.matchall": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.7.tgz",
+ "integrity": "sha512-f48okCX7JiwVi1NXCVWcFnZgADDC/n2vePlQ/KUCNqCikLLilQvwjMO8+BHVKvgzH0JB0J9LEPgxOGT02RoETg==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.3",
+ "es-abstract": "^1.19.1",
+ "get-intrinsic": "^1.1.1",
+ "has-symbols": "^1.0.3",
+ "internal-slot": "^1.0.3",
+ "regexp.prototype.flags": "^1.4.1",
+ "side-channel": "^1.0.4"
+ }
+ },
+ "string.prototype.trimend": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.5.tgz",
+ "integrity": "sha512-I7RGvmjV4pJ7O3kdf+LXFpVfdNOxtCW/2C8f6jNiW4+PQchwxkCDzlk1/7p+Wl4bqFIZeF47qAHXLuHHWKAxog==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5"
+ }
+ },
+ "string.prototype.trimstart": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.5.tgz",
+ "integrity": "sha512-THx16TJCGlsN0o6dl2o6ncWUsdgnLRSA23rRE5pyGBw/mLr3Ej/R2LaqCtgP8VNMGZsvMWnf9ooZPyY2bHvUFg==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "es-abstract": "^1.19.5"
+ }
+ },
+ "strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "requires": {
+ "ansi-regex": "^5.0.1"
+ }
+ },
+ "strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA=="
+ },
+ "strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig=="
+ },
+ "styled-jsx": {
+ "version": "5.0.6",
+ "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.0.6.tgz",
+ "integrity": "sha512-xOeROtkK5MGMDimBQ3J6iPId8q0t/BDoG5XN6oKkZClVz9ISF/hihN8OCn2LggMU6N32aXnrXBdn3auSqNS9fA==",
+ "requires": {}
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w=="
+ },
+ "table": {
+ "version": "6.8.0",
+ "resolved": "https://registry.npmjs.org/table/-/table-6.8.0.tgz",
+ "integrity": "sha512-s/fitrbVeEyHKFa7mFdkuQMWlH1Wgw/yEXMt5xACT4ZpzWFluehAxRtUUQKPuWhaLAWhFcVx6w3oC8VKaUfPGA==",
+ "requires": {
+ "ajv": "^8.0.1",
+ "lodash.truncate": "^4.4.2",
+ "slice-ansi": "^4.0.0",
+ "string-width": "^4.2.3",
+ "strip-ansi": "^6.0.1"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.11.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz",
+ "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ }
+ }
+ },
+ "tapable": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
+ "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
+ "dev": true
+ },
+ "text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="
+ },
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
+ "dev": true
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ },
+ "tsconfig": {
+ "version": "file:packages/tsconfig"
+ },
+ "tsconfig-paths": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz",
+ "integrity": "sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==",
+ "requires": {
+ "@types/json5": "^0.0.29",
+ "json5": "^1.0.1",
+ "minimist": "^1.2.6",
+ "strip-bom": "^3.0.0"
+ },
+ "dependencies": {
+ "json5": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+ "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+ "requires": {
+ "minimist": "^1.2.0"
+ }
+ }
+ }
+ },
+ "tslib": {
+ "version": "2.4.0",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz",
+ "integrity": "sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ=="
+ },
+ "tsutils": {
+ "version": "3.21.0",
+ "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
+ "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
+ "requires": {
+ "tslib": "^1.8.1"
+ },
+ "dependencies": {
+ "tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg=="
+ }
+ }
+ },
+ "turbo": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo/-/turbo-1.5.5.tgz",
+ "integrity": "sha512-PVQSDl0STC9WXIyHcYUWs9gXsf8JjQig/FuHfuB8N6+XlgCGB3mPbfMEE6zrChGz2hufH4/guKRX1XJuNL6XTA==",
+ "dev": true,
+ "requires": {
+ "turbo-darwin-64": "1.5.5",
+ "turbo-darwin-arm64": "1.5.5",
+ "turbo-linux-64": "1.5.5",
+ "turbo-linux-arm64": "1.5.5",
+ "turbo-windows-64": "1.5.5",
+ "turbo-windows-arm64": "1.5.5"
+ }
+ },
+ "turbo-darwin-64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-darwin-64/-/turbo-darwin-64-1.5.5.tgz",
+ "integrity": "sha512-HvEn6P2B+NXDekq9LRpRgUjcT9/oygLTcK47U0qsAJZXRBSq/2hvD7lx4nAwgY/4W3rhYJeWtHTzbhoN6BXqGQ==",
+ "dev": true,
+ "optional": true
+ },
+ "turbo-darwin-arm64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-darwin-arm64/-/turbo-darwin-arm64-1.5.5.tgz",
+ "integrity": "sha512-Dmxr09IUy6M0nc7/xWod9galIO2DD500B75sJSkHeT+CCdJOWnlinux0ZPF8CSygNqymwYO8AO2l15/6yxcycg==",
+ "dev": true,
+ "optional": true
+ },
+ "turbo-linux-64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-linux-64/-/turbo-linux-64-1.5.5.tgz",
+ "integrity": "sha512-wd07TZ4zXXWjzZE00FcFMLmkybQQK/NV9ff66vvAV0vdiuacSMBCNLrD6Mm4ncfrUPW/rwFW5kU/7hyuEqqtDw==",
+ "dev": true,
+ "optional": true
+ },
+ "turbo-linux-arm64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-linux-arm64/-/turbo-linux-arm64-1.5.5.tgz",
+ "integrity": "sha512-q3q33tuo74R7gicnfvFbnZZvqmlq7Vakcvx0eshifnJw4PR+oMnTCb4w8ElVFx070zsb8DVTibq99y8NJH8T1Q==",
+ "dev": true,
+ "optional": true
+ },
+ "turbo-windows-64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-windows-64/-/turbo-windows-64-1.5.5.tgz",
+ "integrity": "sha512-lPp9kHonNFfqgovbaW+UAPO5cLmoAN+m3G3FzqcrRPnlzt97vXYsDhDd/4Zy3oAKoAcprtP4CGy0ddisqsKTVw==",
+ "dev": true,
+ "optional": true
+ },
+ "turbo-windows-arm64": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/turbo-windows-arm64/-/turbo-windows-arm64-1.5.5.tgz",
+ "integrity": "sha512-3AfGULKNZiZVrEzsIE+W79ZRW1+f5r4nM4wLlJ1PTBHyRxBZdD6KTH1tijGfy/uTlcV5acYnKHEkDc6Q9PAXGQ==",
+ "dev": true,
+ "optional": true
+ },
+ "type-check": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+ "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+ "requires": {
+ "prelude-ls": "^1.2.1"
+ }
+ },
+ "type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ=="
+ },
+ "typescript": {
+ "version": "4.8.4",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz",
+ "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ=="
+ },
+ "ui": {
+ "version": "file:packages/ui",
+ "requires": {
+ "@types/react": "^17.0.37",
+ "@types/react-dom": "^17.0.11",
+ "eslint": "^7.32.0",
+ "eslint-config-custom": "*",
+ "react": "^18.2.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.2"
+ },
+ "dependencies": {
+ "@types/react": {
+ "version": "17.0.50",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-17.0.50.tgz",
+ "integrity": "sha512-ZCBHzpDb5skMnc1zFXAXnL3l1FAdi+xZvwxK+PkglMmBrwjpp9nKaWuEvrGnSifCJmBFGxZOOFuwC6KH/s0NuA==",
+ "dev": true,
+ "requires": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ }
+ }
+ },
+ "unbox-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
+ "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-bigints": "^1.0.2",
+ "has-symbols": "^1.0.3",
+ "which-boxed-primitive": "^1.0.2"
+ }
+ },
+ "update-browserslist-db": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.9.tgz",
+ "integrity": "sha512-/xsqn21EGVdXI3EXSum1Yckj3ZVZugqyOZQ/CxYPBD/R+ko9NSUScf8tFF4dOKY+2pvSSJA/S+5B8s4Zr4kyvg==",
+ "dev": true,
+ "requires": {
+ "escalade": "^3.1.1",
+ "picocolors": "^1.0.0"
+ }
+ },
+ "uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "requires": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "use-sync-external-store": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz",
+ "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==",
+ "requires": {}
+ },
+ "v8-compile-cache": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz",
+ "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA=="
+ },
+ "web": {
+ "version": "file:apps/web",
+ "requires": {
+ "@babel/core": "^7.0.0",
+ "@types/node": "^17.0.12",
+ "@types/react": "18.0.17",
+ "eslint": "7.32.0",
+ "eslint-config-custom": "*",
+ "lodash": "^4.17.21",
+ "next": "12.3.0",
+ "next-transpile-modules": "9.0.0",
+ "react": "18.2.0",
+ "react-dom": "18.2.0",
+ "tsconfig": "*",
+ "typescript": "^4.5.3",
+ "ui": "*"
+ },
+ "dependencies": {
+ "lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ }
+ }
+ },
+ "which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "requires": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ }
+ },
+ "word-wrap": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
+ "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ=="
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
+ },
+ "yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
+ }
+ }
+}
diff --git a/cli/internal/lockfile/testdata/pnpm-absolute-v6.yaml b/cli/internal/lockfile/testdata/pnpm-absolute-v6.yaml
new file mode 100644
index 0000000..dc5d0e6
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm-absolute-v6.yaml
@@ -0,0 +1,18 @@
+lockfileVersion: "6.0"
+importers:
+ packages/a:
+ dependencies:
+ "@scope/parent":
+ specifier: ^1.0.0
+ version: 1.0.0
+
+packages:
+ /@scope/parent@1.0.0:
+ resolution: { integrity: junk }
+ dependencies:
+ child: /@scope/child@1.0.0
+ dev: false
+
+ /@scope/child@1.0.0:
+ resolution: { integrity: junk }
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm-absolute.yaml b/cli/internal/lockfile/testdata/pnpm-absolute.yaml
new file mode 100644
index 0000000..d39f802
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm-absolute.yaml
@@ -0,0 +1,38 @@
+lockfileVersion: 5.4
+importers:
+ packages/a:
+ specifiers:
+ another: ^1.0.0
+ "@scope/parent": ^1.0.0
+ special: npm:Special@1.2.3
+ dependencies:
+ another: 1.0.0
+ "@scope/parent": 1.0.0
+ special: /Special/1.2.3
+
+packages:
+ /@scope/parent/1.0.0:
+ resolution: { integrity: junk }
+ dependencies:
+ child: /@scope/child/1.0.0
+ dev: false
+
+ /@scope/child/1.0.0:
+ resolution: { integrity: junk }
+ dev: false
+
+ /another/1.0.0:
+ resolution: { integrity: junk }
+ dev: false
+ dependencies:
+ foo: 1.0.0
+
+ /foo/1.0.0:
+ resolution: { integrity: junk }
+ dev: false
+ dependencies:
+ Special: 1.2.3
+
+ /Special/1.2.3:
+ resolution: { integrity: junk }
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm-patch-v6.yaml b/cli/internal/lockfile/testdata/pnpm-patch-v6.yaml
new file mode 100644
index 0000000..b620472
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm-patch-v6.yaml
@@ -0,0 +1,40 @@
+lockfileVersion: "6.0"
+
+patchedDependencies:
+ lodash@4.17.21:
+ hash: lgum37zgng4nfkynzh3cs7wdeq
+ path: patches/lodash@4.17.21.patch
+ "@babel/helper-string-parser@7.19.4":
+ hash: wjhgmpzh47qmycrzgpeyoyh3ce
+ path: patches/@babel__helper-string-parser@7.19.4.patch
+
+importers:
+ .: {}
+
+ packages/a:
+ dependencies:
+ lodash:
+ specifier: ^4.17.21
+ version: 4.17.21(patch_hash=lgum37zgng4nfkynzh3cs7wdeq)
+
+ packages/b:
+ dependencies:
+ "@babel/helper-string-parser":
+ specifier: ^7.19.4
+ version: 7.19.4(patch_hash=wjhgmpzh47qmycrzgpeyoyh3ce)(@babel/core@7.21.0)
+
+packages:
+ /@babel/helper-string-parser@7.19.4(patch_hash=wjhgmpzh47qmycrzgpeyoyh3ce)(@babel/core@7.21.0):
+ resolution:
+ {
+ integrity: sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==,
+ }
+ engines: { node: ">=6.9.0" }
+ dev: false
+
+ /lodash@4.17.21(patch_hash=lgum37zgng4nfkynzh3cs7wdeq):
+ resolution:
+ {
+ integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==,
+ }
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm-patch.yaml b/cli/internal/lockfile/testdata/pnpm-patch.yaml
new file mode 100644
index 0000000..ea84d72
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm-patch.yaml
@@ -0,0 +1,63 @@
+lockfileVersion: 5.4
+
+patchedDependencies:
+ is-odd@3.0.1:
+ hash: nrrwwz7lemethtlvvm75r5bmhq
+ path: patches/is-odd@3.0.1.patch
+ "@babel/core@7.20.12":
+ hash: 3hyn7hbvzkemudbydlwjmrb65y
+ path: patches/@babel__core@7.20.12.patch
+ moleculer@0.14.28:
+ hash: 5pk7ojv7qbqha75ozglk4y4f74
+ path: patches/moleculer@0.14.28.patch
+
+importers:
+ .:
+ specifiers: {}
+
+ packages/dependency:
+ specifiers:
+ is-odd: ^3.0.1
+ "@babel/core": ^7.20.12
+ dependencies:
+ is-odd: 3.0.1_nrrwwz7lemethtlvvm75r5bmhq
+ "@babel/core": 7.20.12_3hyn7hbvzkemudbydlwjmrb65y
+
+packages:
+ /@babel/core/7.20.12_3hyn7hbvzkemudbydlwjmrb65y:
+ resolution:
+ {
+ integrity: sha512-XsMfHovsUYHFMdrIHkZphTN/2Hzzi78R08NuHfDBehym2VsPDL6Zn/JAD/JQdnRvbSsbQc4mVaU1m6JgtTEElg==,
+ }
+ engines: { node: ">=6.9.0" }
+ dev: false
+
+ /is-number/6.0.0:
+ resolution:
+ {
+ integrity: sha512-Wu1VHeILBK8KAWJUAiSZQX94GmOE45Rg6/538fKwiloUu21KncEkYGPqob2oSZ5mUT73vLGrHQjKw3KMPwfDzg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: false
+
+ /is-odd/3.0.1_nrrwwz7lemethtlvvm75r5bmhq:
+ resolution:
+ {
+ integrity: sha512-CQpnWPrDwmP1+SMHXZhtLtJv90yiyVfluGsX5iNCVkrhQtU3TQHsUWPG9wkdk9Lgd5yNpAg9jQEo90CBaXgWMA==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ is-number: 6.0.0
+ dev: false
+ patched: true
+
+ /moleculer/0.14.28_5pk7ojv7qbqha75ozglk4y4f74_kumip57h7zlinbhp4gz3jrbqry:
+ resolution:
+ {
+ integrity: sha512-CQpnWPrDwmP1+SMHXZhtLtJv90yiyVfluGsX5iNCVkrhQtU3TQHsUWPG9wkdk9Lgd5yNpAg9jQEo90CBaXgWMA==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ is-number: 6.0.0
+ dev: false
+ patched: true
diff --git a/cli/internal/lockfile/testdata/pnpm-peer-v6.yaml b/cli/internal/lockfile/testdata/pnpm-peer-v6.yaml
new file mode 100644
index 0000000..feddd07
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm-peer-v6.yaml
@@ -0,0 +1,67 @@
+lockfileVersion: "6.0"
+
+importers:
+ .: {}
+
+ apps/web:
+ dependencies:
+ next:
+ specifier: 13.0.4
+ version: 13.0.4(react-dom@18.2.0)(react@18.2.0)
+ react:
+ specifier: 18.2.0
+ version: 18.2.0
+ react-dom:
+ specifier: 18.2.0
+ version: 18.2.0(react@18.2.0)
+
+ packages/next-config: {}
+
+ packages/package-for-ci: {}
+
+ packages/tsconfig: {}
+
+packages:
+ /next@13.0.4:
+ resolution:
+ {
+ integrity: sha512-4P0MvbjPCI1E/UPL1GrTXtYlgFnbBbY3JQ+AMY8jYE2SwyvCWctEJySoRjveznAHjrl6TIjuAJeB8u1c2StYUQ==,
+ }
+ engines: { node: ">=14.6.0" }
+ hasBin: true
+ peerDependencies:
+ fibers: ">= 3.1.0"
+ node-sass: ^6.0.0 || ^7.0.0
+ react: ^18.2.0
+ react-dom: ^18.2.0
+ sass: ^1.3.0
+ peerDependenciesMeta:
+ fibers:
+ optional: true
+ node-sass:
+ optional: true
+ sass:
+ optional: true
+ dev: true
+
+ /next@13.0.4(react-dom@18.2.0)(react@18.2.0):
+ resolution:
+ {
+ integrity: sha512-4P0MvbjPCI1E/UPL1GrTXtYlgFnbBbY3JQ+AMY8jYE2SwyvCWctEJySoRjveznAHjrl6TIjuAJeB8u1c2StYUQ==,
+ }
+ engines: { node: ">=14.6.0" }
+ hasBin: true
+ peerDependencies:
+ fibers: ">= 3.1.0"
+ node-sass: ^6.0.0 || ^7.0.0
+ react: ^18.2.0
+ react-dom: ^18.2.0
+ sass: ^1.3.0
+ peerDependenciesMeta:
+ fibers:
+ optional: true
+ node-sass:
+ optional: true
+ sass:
+ optional: true
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm-top-level-dupe.yaml b/cli/internal/lockfile/testdata/pnpm-top-level-dupe.yaml
new file mode 100644
index 0000000..6837f22
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm-top-level-dupe.yaml
@@ -0,0 +1,36 @@
+lockfileVersion: 5.4
+
+importers:
+ packages/a:
+ specifiers:
+ ci-info: ^2.0.0
+ is-ci: ^3.0.1
+ dependencies:
+ ci-info: 2.0.0
+ is-ci: 3.0.1
+
+packages:
+ /ci-info/2.0.0:
+ resolution:
+ {
+ integrity: sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==,
+ }
+ dev: false
+
+ /ci-info/3.7.1:
+ resolution:
+ {
+ integrity: sha512-4jYS4MOAaCIStSRwiuxc4B8MYhIe676yO1sYGzARnjXkWpmzZMMYxY6zu8WYWDhSuth5zhrQ1rhNSibyyvv4/w==,
+ }
+ engines: { node: ">=8" }
+ dev: false
+
+ /is-ci/3.0.1:
+ resolution:
+ {
+ integrity: sha512-ZYvCgrefwqoQ6yTyYUbQu64HsITZ3NfKX1lzaEYdkTDcfKzzCI/wthRRYKkdjHKFVgNiXKAKm65Zo1pk2as/QQ==,
+ }
+ hasBin: true
+ dependencies:
+ ci-info: 3.7.1
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm6-workspace.yaml b/cli/internal/lockfile/testdata/pnpm6-workspace.yaml
new file mode 100644
index 0000000..daf92b7
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm6-workspace.yaml
@@ -0,0 +1,1704 @@
+lockfileVersion: 5.3
+
+importers:
+ .:
+ specifiers:
+ "@pnpm/make-dedicated-lockfile": ^0.3.19
+ devDependencies:
+ "@pnpm/make-dedicated-lockfile": 0.3.19
+
+ packages/a:
+ specifiers:
+ b: workspace:*
+ express: ^4.18.1
+ dependencies:
+ b: link:../b
+ express: 4.18.1
+
+ packages/b:
+ specifiers:
+ c: workspace:*
+ lodash: ^4.17.21
+ dependencies:
+ c: link:../c
+ lodash: 4.17.21
+
+ packages/c:
+ specifiers:
+ chalk: ^5.0.1
+ dependencies:
+ chalk: 5.0.1
+
+packages:
+ /@babel/code-frame/7.18.6:
+ resolution:
+ {
+ integrity: sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/highlight": 7.18.6
+ dev: true
+
+ /@babel/helper-validator-identifier/7.18.6:
+ resolution:
+ {
+ integrity: sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==,
+ }
+ engines: { node: ">=6.9.0" }
+ dev: true
+
+ /@babel/highlight/7.18.6:
+ resolution:
+ {
+ integrity: sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/helper-validator-identifier": 7.18.6
+ chalk: 2.4.2
+ js-tokens: 4.0.0
+ dev: true
+
+ /@pnpm/constants/6.1.0:
+ resolution:
+ {
+ integrity: sha512-L6AiU3OXv9kjKGTJN9j8n1TeJGDcLX9atQlZvAkthlvbXjvKc5SKNWESc/eXhr5nEfuMWhQhiKHDJCpYejmeCQ==,
+ }
+ engines: { node: ">=14.19" }
+ dev: true
+
+ /@pnpm/crypto.base32-hash/1.0.1:
+ resolution:
+ {
+ integrity: sha512-pzAXNn6KxTA3kbcI3iEnYs4vtH51XEVqmK/1EiD18MaPKylhqy8UvMJK3zKG+jeP82cqQbozcTGm4yOQ8i3vNw==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ rfc4648: 1.5.2
+ dev: true
+
+ /@pnpm/error/3.0.1:
+ resolution:
+ {
+ integrity: sha512-hMlbWbFcfcfolNfSjKjpeaZFow71kNg438LZ8rAd01swiVIYRUf/sRv8gGySru6AijYfz5UqslpIJRDbYBkgQA==,
+ }
+ engines: { node: ">=14.19" }
+ dependencies:
+ "@pnpm/constants": 6.1.0
+ dev: true
+
+ /@pnpm/exec/2.0.0:
+ resolution:
+ {
+ integrity: sha512-b5ALfWEOFQprWKntN7MF8XWCyslBk2c8u20GEDcDDQOs6c0HyHlWxX5lig8riQKdS000U6YyS4L4b32NOleXAQ==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ "@pnpm/self-installer": 2.2.1
+ command-exists: 1.2.9
+ cross-spawn: 7.0.3
+ dev: true
+
+ /@pnpm/exportable-manifest/3.1.2:
+ resolution:
+ {
+ integrity: sha512-IvTBwt3n73pXsU6iS1Y4OipBg3GBN37I/mUR8t3q5N0c5TkVxj9xAsra5/m7mX4dsYCv9BPL6Rw+MuKSV5P1hA==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/error": 3.0.1
+ "@pnpm/read-project-manifest": 3.0.9
+ "@pnpm/types": 8.5.0
+ ramda: /@pnpm/ramda/0.28.1
+ dev: true
+
+ /@pnpm/find-workspace-dir/4.0.2:
+ resolution:
+ {
+ integrity: sha512-gU7ycFSWuEGJh7RE/STa33Ch27geODTXIfc+ntiE1BietxfpJIAk34zz51kTUuCFthBkpHlO6yV7jgHD2Tuc3g==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/error": 3.0.1
+ find-up: 5.0.0
+ dev: true
+
+ /@pnpm/git-utils/0.1.0:
+ resolution:
+ {
+ integrity: sha512-W3zsG9585cKL+FqgcT+IfTgZX5C+CbNkFjOnJN+qbysT1N30+BbvEByCcDMsTy7QDrAk6oS7WU1Rym3U2xlh2Q==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ execa: /safe-execa/0.1.2
+ dev: true
+
+ /@pnpm/graceful-fs/2.0.0:
+ resolution:
+ {
+ integrity: sha512-ogUZCGf0/UILZt6d8PsO4gA4pXh7f0BumXeFkcCe4AQ65PXPKfAkHC0C30Lheh2EgFOpLZm3twDP1Eiww18gew==,
+ }
+ engines: { node: ">=14.19" }
+ dependencies:
+ graceful-fs: 4.2.10
+ dev: true
+
+ /@pnpm/lockfile-file/5.3.3_@pnpm+logger@4.0.0:
+ resolution:
+ {
+ integrity: sha512-IOvjeMRX+++osG9VsfSd7+hVa/sIzhqdrm/nFcL7AexFhC7wjXbWW3YMlN5Cw4v0fwm93fgRZlikIKJ7BmkBBA==,
+ }
+ engines: { node: ">=14.6" }
+ peerDependencies:
+ "@pnpm/logger": ^4.0.0
+ dependencies:
+ "@pnpm/constants": 6.1.0
+ "@pnpm/error": 3.0.1
+ "@pnpm/git-utils": 0.1.0
+ "@pnpm/lockfile-types": 4.3.1
+ "@pnpm/logger": 4.0.0
+ "@pnpm/merge-lockfile-changes": 3.0.9
+ "@pnpm/types": 8.5.0
+ "@zkochan/rimraf": 2.1.2
+ comver-to-semver: 1.0.0
+ js-yaml: /@zkochan/js-yaml/0.0.6
+ normalize-path: 3.0.0
+ ramda: /@pnpm/ramda/0.28.1
+ semver: 7.3.7
+ sort-keys: 4.2.0
+ strip-bom: 4.0.0
+ write-file-atomic: 3.0.3
+ dev: true
+
+ /@pnpm/lockfile-types/4.3.1:
+ resolution:
+ {
+ integrity: sha512-xoorF+CuuUvpjfi8Uw/xkf8LI9VDzs9W1gjSxkKS8UwK60zU5fu4agILJfVVGlHO1tnjJeGRuspBjp7UZ8ufMA==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/types": 8.5.0
+ dev: true
+
+ /@pnpm/logger/4.0.0:
+ resolution:
+ {
+ integrity: sha512-SIShw+k556e7S7tLZFVSIHjCdiVog1qWzcKW2RbLEHPItdisAFVNIe34kYd9fMSswTlSRLS/qRjw3ZblzWmJ9Q==,
+ }
+ engines: { node: ">=12.17" }
+ dependencies:
+ bole: 4.0.1
+ ndjson: 2.0.0
+ dev: true
+
+ /@pnpm/make-dedicated-lockfile/0.3.19:
+ resolution:
+ {
+ integrity: sha512-VHllqMh5zviSHds2kOlWSiwmxos3LLGWCVIHpo+HX45D3TXx+oMOgE8k6WB0dSOTVIuGKduoCNTGeSW4p2bD2w==,
+ }
+ engines: { node: ">=14.6" }
+ hasBin: true
+ dependencies:
+ "@pnpm/error": 3.0.1
+ "@pnpm/exec": 2.0.0
+ "@pnpm/exportable-manifest": 3.1.2
+ "@pnpm/find-workspace-dir": 4.0.2
+ "@pnpm/lockfile-file": 5.3.3_@pnpm+logger@4.0.0
+ "@pnpm/logger": 4.0.0
+ "@pnpm/prune-lockfile": 4.0.14
+ "@pnpm/read-project-manifest": 3.0.9
+ "@pnpm/types": 8.5.0
+ ramda: /@pnpm/ramda/0.28.1
+ rename-overwrite: 4.0.2
+ dev: true
+
+ /@pnpm/merge-lockfile-changes/3.0.9:
+ resolution:
+ {
+ integrity: sha512-UOl3AYsi13R8bvQNJPNUml8sZYKBRns0xjAcPQomoX3WTU0dv+KzVyv86Iv86YlApP0aJj9MS8Vq++JOC10RKg==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/lockfile-types": 4.3.1
+ comver-to-semver: 1.0.0
+ ramda: /@pnpm/ramda/0.28.1
+ semver: 7.3.7
+ dev: true
+
+ /@pnpm/prune-lockfile/4.0.14:
+ resolution:
+ {
+ integrity: sha512-lICCgm9j3e2Bu75zK4PA1FKjpu9pCcagRbZWruONBf44byyEkHcnTf8b8a9M1MvtoiArhmKOmyOVJ2OFyBBRyA==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/constants": 6.1.0
+ "@pnpm/lockfile-types": 4.3.1
+ "@pnpm/types": 8.5.0
+ dependency-path: 9.2.4
+ ramda: /@pnpm/ramda/0.28.1
+ dev: true
+
+ /@pnpm/ramda/0.28.1:
+ resolution:
+ {
+ integrity: sha512-zcAG+lvU0fMziNeGXpPyCyCJYp5ZVrPElEE4t14jAmViaihohocZ+dDkcRIyAomox8pQsuZnv1EyHR+pOhmUWw==,
+ }
+ dev: true
+
+ /@pnpm/read-project-manifest/3.0.9:
+ resolution:
+ {
+ integrity: sha512-27j40C48hA/tqsCiqk9ApJxp2g6WGrrj2RSs0NKhsSHynxAuA1tIvwatNISQbAiMjZiu1lfhzhq8m1QdblyNmA==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/error": 3.0.1
+ "@pnpm/graceful-fs": 2.0.0
+ "@pnpm/types": 8.5.0
+ "@pnpm/write-project-manifest": 3.0.7
+ detect-indent: 6.1.0
+ fast-deep-equal: 3.1.3
+ is-windows: 1.0.2
+ json5: 2.2.1
+ parse-json: 5.2.0
+ read-yaml-file: 2.1.0
+ sort-keys: 4.2.0
+ strip-bom: 4.0.0
+ dev: true
+
+ /@pnpm/self-installer/2.2.1:
+ resolution:
+ {
+ integrity: sha512-aefLe96wAWghkx6q1PwbVS1Iz1iGE+HKwkTmtzWLFXeGhbknaIdG2voMwaBGIYGCSxm8sDKR1uLO4aRRAYuc+Q==,
+ }
+ engines: { node: ">=4" }
+ hasBin: true
+ dev: true
+
+ /@pnpm/types/8.5.0:
+ resolution:
+ {
+ integrity: sha512-PSKnhkwgiZtp9dcWZR9mPz2W9UopmADr9o8FTqazo5kjUSh2xQmDUSJOJ/ZWcfNziO64Ix/VbcxKIZeplhog1Q==,
+ }
+ engines: { node: ">=14.6" }
+ dev: true
+
+ /@pnpm/write-project-manifest/3.0.7:
+ resolution:
+ {
+ integrity: sha512-rMgIWR52asESg1D7Cp/vBi3dBsv18iUWPvvtYNynrcOjRdE3NsH5CAdfZP/XN6HJF6CSY8rS9W4YC5Q3JGtxiw==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/types": 8.5.0
+ json5: 2.2.1
+ write-file-atomic: 3.0.3
+ write-yaml-file: 4.2.0
+ dev: true
+
+ /@zkochan/js-yaml/0.0.6:
+ resolution:
+ {
+ integrity: sha512-nzvgl3VfhcELQ8LyVrYOru+UtAy1nrygk2+AGbTm8a5YcO6o8lSjAT+pfg3vJWxIoZKOUhrK6UU7xW/+00kQrg==,
+ }
+ hasBin: true
+ dependencies:
+ argparse: 2.0.1
+ dev: true
+
+ /@zkochan/rimraf/2.1.2:
+ resolution:
+ {
+ integrity: sha512-Lc2oK51J6aQWcLWTloobJun5ZF41BbTDdLvE+aMcexoVWFoFqvZmnZoyXR2IZk6NJEVoZW8tjgtvQLfTsmRs2Q==,
+ }
+ engines: { node: ">=12.10" }
+ dependencies:
+ rimraf: 3.0.2
+ dev: true
+
+ /@zkochan/which/2.0.3:
+ resolution:
+ {
+ integrity: sha512-C1ReN7vt2/2O0fyTsx5xnbQuxBrmG5NMSbcIkPKCCfCTJgpZBsuRYzFXHj3nVq8vTfK7vxHUmzfCpSHgO7j4rg==,
+ }
+ engines: { node: ">= 8" }
+ hasBin: true
+ dependencies:
+ isexe: 2.0.0
+ dev: true
+
+ /accepts/1.3.8:
+ resolution:
+ {
+ integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==,
+ }
+ engines: { node: ">= 0.6" }
+ dependencies:
+ mime-types: 2.1.35
+ negotiator: 0.6.3
+ dev: false
+
+ /ansi-styles/3.2.1:
+ resolution:
+ {
+ integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ color-convert: 1.9.3
+ dev: true
+
+ /argparse/2.0.1:
+ resolution:
+ {
+ integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==,
+ }
+ dev: true
+
+ /array-flatten/1.1.1:
+ resolution:
+ {
+ integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==,
+ }
+ dev: false
+
+ /balanced-match/1.0.2:
+ resolution:
+ {
+ integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==,
+ }
+ dev: true
+
+ /body-parser/1.20.0:
+ resolution:
+ {
+ integrity: sha512-DfJ+q6EPcGKZD1QWUjSpqp+Q7bDQTsQIF4zfUAtZ6qk+H/3/QRhg9CEp39ss+/T2vw0+HaidC0ecJj/DRLIaKg==,
+ }
+ engines: { node: ">= 0.8", npm: 1.2.8000 || >= 1.4.16 }
+ dependencies:
+ bytes: 3.1.2
+ content-type: 1.0.4
+ debug: 2.6.9
+ depd: 2.0.0
+ destroy: 1.2.0
+ http-errors: 2.0.0
+ iconv-lite: 0.4.24
+ on-finished: 2.4.1
+ qs: 6.10.3
+ raw-body: 2.5.1
+ type-is: 1.6.18
+ unpipe: 1.0.0
+ dev: false
+
+ /bole/4.0.1:
+ resolution:
+ {
+ integrity: sha512-42r0aSOJFJti2l6LasBHq2BuWJzohGs349olQnH/ETlJo87XnoWw7UT8pGE6UstjxzOKkwz7tjoFcmSr6L16vg==,
+ }
+ dependencies:
+ fast-safe-stringify: 2.1.1
+ individual: 3.0.0
+ dev: true
+
+ /brace-expansion/1.1.11:
+ resolution:
+ {
+ integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==,
+ }
+ dependencies:
+ balanced-match: 1.0.2
+ concat-map: 0.0.1
+ dev: true
+
+ /bytes/3.1.2:
+ resolution:
+ {
+ integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /call-bind/1.0.2:
+ resolution:
+ {
+ integrity: sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==,
+ }
+ dependencies:
+ function-bind: 1.1.1
+ get-intrinsic: 1.1.2
+ dev: false
+
+ /chalk/2.4.2:
+ resolution:
+ {
+ integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ ansi-styles: 3.2.1
+ escape-string-regexp: 1.0.5
+ supports-color: 5.5.0
+ dev: true
+
+ /chalk/5.0.1:
+ resolution:
+ {
+ integrity: sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==,
+ }
+ engines: { node: ^12.17.0 || ^14.13 || >=16.0.0 }
+ dev: false
+
+ /color-convert/1.9.3:
+ resolution:
+ {
+ integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==,
+ }
+ dependencies:
+ color-name: 1.1.3
+ dev: true
+
+ /color-name/1.1.3:
+ resolution:
+ {
+ integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==,
+ }
+ dev: true
+
+ /command-exists/1.2.9:
+ resolution:
+ {
+ integrity: sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w==,
+ }
+ dev: true
+
+ /comver-to-semver/1.0.0:
+ resolution:
+ {
+ integrity: sha512-gcGtbRxjwROQOdXLUWH1fQAXqThUVRZ219aAwgtX3KfYw429/Zv6EIJRf5TBSzWdAGwePmqH7w70WTaX4MDqag==,
+ }
+ engines: { node: ">=12.17" }
+ dev: true
+
+ /concat-map/0.0.1:
+ resolution:
+ {
+ integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==,
+ }
+ dev: true
+
+ /content-disposition/0.5.4:
+ resolution:
+ {
+ integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==,
+ }
+ engines: { node: ">= 0.6" }
+ dependencies:
+ safe-buffer: 5.2.1
+ dev: false
+
+ /content-type/1.0.4:
+ resolution:
+ {
+ integrity: sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /cookie-signature/1.0.6:
+ resolution:
+ {
+ integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==,
+ }
+ dev: false
+
+ /cookie/0.5.0:
+ resolution:
+ {
+ integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /cross-spawn/7.0.3:
+ resolution:
+ {
+ integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==,
+ }
+ engines: { node: ">= 8" }
+ dependencies:
+ path-key: 3.1.1
+ shebang-command: 2.0.0
+ which: 2.0.2
+ dev: true
+
+ /debug/2.6.9:
+ resolution:
+ {
+ integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==,
+ }
+ dependencies:
+ ms: 2.0.0
+ dev: false
+
+ /depd/2.0.0:
+ resolution:
+ {
+ integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /dependency-path/9.2.4:
+ resolution:
+ {
+ integrity: sha512-bH29ZcKyo/i5nr4SgnVZGksuoZzroOWpHtKbq8fKdKgJDr0SdUIPu2EwjJkjzbw9SqRzWd912e0opHYJTkFf6w==,
+ }
+ engines: { node: ">=14.6" }
+ dependencies:
+ "@pnpm/crypto.base32-hash": 1.0.1
+ "@pnpm/types": 8.5.0
+ encode-registry: 3.0.0
+ semver: 7.3.7
+ dev: true
+
+ /destroy/1.2.0:
+ resolution:
+ {
+ integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==,
+ }
+ engines: { node: ">= 0.8", npm: 1.2.8000 || >= 1.4.16 }
+ dev: false
+
+ /detect-indent/6.1.0:
+ resolution:
+ {
+ integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /ee-first/1.1.1:
+ resolution:
+ {
+ integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==,
+ }
+ dev: false
+
+ /encode-registry/3.0.0:
+ resolution:
+ {
+ integrity: sha512-2fRYji8K6FwYuQ6EPBKR/J9mcqb7kIoNqt1vGvJr3NrvKfncRiNm00Oxo6gi/YJF8R5Sp2bNFSFdGKTG0rje1Q==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ mem: 8.1.1
+ dev: true
+
+ /encodeurl/1.0.2:
+ resolution:
+ {
+ integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /error-ex/1.3.2:
+ resolution:
+ {
+ integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==,
+ }
+ dependencies:
+ is-arrayish: 0.2.1
+ dev: true
+
+ /escape-html/1.0.3:
+ resolution:
+ {
+ integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==,
+ }
+ dev: false
+
+ /escape-string-regexp/1.0.5:
+ resolution:
+ {
+ integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==,
+ }
+ engines: { node: ">=0.8.0" }
+ dev: true
+
+ /etag/1.8.1:
+ resolution:
+ {
+ integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /execa/5.1.1:
+ resolution:
+ {
+ integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ cross-spawn: 7.0.3
+ get-stream: 6.0.1
+ human-signals: 2.1.0
+ is-stream: 2.0.1
+ merge-stream: 2.0.0
+ npm-run-path: 4.0.1
+ onetime: 5.1.2
+ signal-exit: 3.0.7
+ strip-final-newline: 2.0.0
+ dev: true
+
+ /express/4.18.1:
+ resolution:
+ {
+ integrity: sha512-zZBcOX9TfehHQhtupq57OF8lFZ3UZi08Y97dwFCkD8p9d/d2Y3M+ykKcwaMDEL+4qyUolgBDX6AblpR3fL212Q==,
+ }
+ engines: { node: ">= 0.10.0" }
+ dependencies:
+ accepts: 1.3.8
+ array-flatten: 1.1.1
+ body-parser: 1.20.0
+ content-disposition: 0.5.4
+ content-type: 1.0.4
+ cookie: 0.5.0
+ cookie-signature: 1.0.6
+ debug: 2.6.9
+ depd: 2.0.0
+ encodeurl: 1.0.2
+ escape-html: 1.0.3
+ etag: 1.8.1
+ finalhandler: 1.2.0
+ fresh: 0.5.2
+ http-errors: 2.0.0
+ merge-descriptors: 1.0.1
+ methods: 1.1.2
+ on-finished: 2.4.1
+ parseurl: 1.3.3
+ path-to-regexp: 0.1.7
+ proxy-addr: 2.0.7
+ qs: 6.10.3
+ range-parser: 1.2.1
+ safe-buffer: 5.2.1
+ send: 0.18.0
+ serve-static: 1.15.0
+ setprototypeof: 1.2.0
+ statuses: 2.0.1
+ type-is: 1.6.18
+ utils-merge: 1.0.1
+ vary: 1.1.2
+ dev: false
+
+ /fast-deep-equal/3.1.3:
+ resolution:
+ {
+ integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==,
+ }
+ dev: true
+
+ /fast-safe-stringify/2.1.1:
+ resolution:
+ {
+ integrity: sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==,
+ }
+ dev: true
+
+ /finalhandler/1.2.0:
+ resolution:
+ {
+ integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==,
+ }
+ engines: { node: ">= 0.8" }
+ dependencies:
+ debug: 2.6.9
+ encodeurl: 1.0.2
+ escape-html: 1.0.3
+ on-finished: 2.4.1
+ parseurl: 1.3.3
+ statuses: 2.0.1
+ unpipe: 1.0.0
+ dev: false
+
+ /find-up/5.0.0:
+ resolution:
+ {
+ integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ locate-path: 6.0.0
+ path-exists: 4.0.0
+ dev: true
+
+ /forwarded/0.2.0:
+ resolution:
+ {
+ integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /fresh/0.5.2:
+ resolution:
+ {
+ integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /fs.realpath/1.0.0:
+ resolution:
+ {
+ integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==,
+ }
+ dev: true
+
+ /function-bind/1.1.1:
+ resolution:
+ {
+ integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==,
+ }
+ dev: false
+
+ /get-intrinsic/1.1.2:
+ resolution:
+ {
+ integrity: sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA==,
+ }
+ dependencies:
+ function-bind: 1.1.1
+ has: 1.0.3
+ has-symbols: 1.0.3
+ dev: false
+
+ /get-stream/6.0.1:
+ resolution:
+ {
+ integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==,
+ }
+ engines: { node: ">=10" }
+ dev: true
+
+ /glob/7.2.3:
+ resolution:
+ {
+ integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==,
+ }
+ dependencies:
+ fs.realpath: 1.0.0
+ inflight: 1.0.6
+ inherits: 2.0.4
+ minimatch: 3.1.2
+ once: 1.4.0
+ path-is-absolute: 1.0.1
+ dev: true
+
+ /graceful-fs/4.2.10:
+ resolution:
+ {
+ integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==,
+ }
+ dev: true
+
+ /has-flag/3.0.0:
+ resolution:
+ {
+ integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==,
+ }
+ engines: { node: ">=4" }
+ dev: true
+
+ /has-symbols/1.0.3:
+ resolution:
+ {
+ integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /has/1.0.3:
+ resolution:
+ {
+ integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==,
+ }
+ engines: { node: ">= 0.4.0" }
+ dependencies:
+ function-bind: 1.1.1
+ dev: false
+
+ /http-errors/2.0.0:
+ resolution:
+ {
+ integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==,
+ }
+ engines: { node: ">= 0.8" }
+ dependencies:
+ depd: 2.0.0
+ inherits: 2.0.4
+ setprototypeof: 1.2.0
+ statuses: 2.0.1
+ toidentifier: 1.0.1
+ dev: false
+
+ /human-signals/2.1.0:
+ resolution:
+ {
+ integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==,
+ }
+ engines: { node: ">=10.17.0" }
+ dev: true
+
+ /iconv-lite/0.4.24:
+ resolution:
+ {
+ integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ safer-buffer: 2.1.2
+ dev: false
+
+ /imurmurhash/0.1.4:
+ resolution:
+ {
+ integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==,
+ }
+ engines: { node: ">=0.8.19" }
+ dev: true
+
+ /individual/3.0.0:
+ resolution:
+ {
+ integrity: sha512-rUY5vtT748NMRbEMrTNiFfy29BgGZwGXUi2NFUVMWQrogSLzlJvQV9eeMWi+g1aVaQ53tpyLAQtd5x/JH0Nh1g==,
+ }
+ dev: true
+
+ /inflight/1.0.6:
+ resolution:
+ {
+ integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==,
+ }
+ dependencies:
+ once: 1.4.0
+ wrappy: 1.0.2
+ dev: true
+
+ /inherits/2.0.4:
+ resolution:
+ {
+ integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==,
+ }
+ dev: false
+
+ /ipaddr.js/1.9.1:
+ resolution:
+ {
+ integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==,
+ }
+ engines: { node: ">= 0.10" }
+ dev: false
+
+ /is-arrayish/0.2.1:
+ resolution:
+ {
+ integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==,
+ }
+ dev: true
+
+ /is-plain-obj/2.1.0:
+ resolution:
+ {
+ integrity: sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /is-stream/2.0.1:
+ resolution:
+ {
+ integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /is-typedarray/1.0.0:
+ resolution:
+ {
+ integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==,
+ }
+ dev: true
+
+ /is-windows/1.0.2:
+ resolution:
+ {
+ integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: true
+
+ /isexe/2.0.0:
+ resolution:
+ {
+ integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==,
+ }
+ dev: true
+
+ /js-tokens/4.0.0:
+ resolution:
+ {
+ integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==,
+ }
+ dev: true
+
+ /js-yaml/4.1.0:
+ resolution:
+ {
+ integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==,
+ }
+ hasBin: true
+ dependencies:
+ argparse: 2.0.1
+ dev: true
+
+ /json-parse-even-better-errors/2.3.1:
+ resolution:
+ {
+ integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==,
+ }
+ dev: true
+
+ /json-stringify-safe/5.0.1:
+ resolution:
+ {
+ integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==,
+ }
+ dev: true
+
+ /json5/2.2.1:
+ resolution:
+ {
+ integrity: sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==,
+ }
+ engines: { node: ">=6" }
+ hasBin: true
+ dev: true
+
+ /lines-and-columns/1.2.4:
+ resolution:
+ {
+ integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==,
+ }
+ dev: true
+
+ /locate-path/6.0.0:
+ resolution:
+ {
+ integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ p-locate: 5.0.0
+ dev: true
+
+ /lodash/4.17.21:
+ resolution:
+ {
+ integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==,
+ }
+ dev: false
+
+ /lru-cache/6.0.0:
+ resolution:
+ {
+ integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ yallist: 4.0.0
+ dev: true
+
+ /map-age-cleaner/0.1.3:
+ resolution:
+ {
+ integrity: sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==,
+ }
+ engines: { node: ">=6" }
+ dependencies:
+ p-defer: 1.0.0
+ dev: true
+
+ /media-typer/0.3.0:
+ resolution:
+ {
+ integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /mem/8.1.1:
+ resolution:
+ {
+ integrity: sha512-qFCFUDs7U3b8mBDPyz5EToEKoAkgCzqquIgi9nkkR9bixxOVOre+09lbuH7+9Kn2NFpm56M3GUWVbU2hQgdACA==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ map-age-cleaner: 0.1.3
+ mimic-fn: 3.1.0
+ dev: true
+
+ /merge-descriptors/1.0.1:
+ resolution:
+ {
+ integrity: sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==,
+ }
+ dev: false
+
+ /merge-stream/2.0.0:
+ resolution:
+ {
+ integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==,
+ }
+ dev: true
+
+ /methods/1.1.2:
+ resolution:
+ {
+ integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /mime-db/1.52.0:
+ resolution:
+ {
+ integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /mime-types/2.1.35:
+ resolution:
+ {
+ integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==,
+ }
+ engines: { node: ">= 0.6" }
+ dependencies:
+ mime-db: 1.52.0
+ dev: false
+
+ /mime/1.6.0:
+ resolution:
+ {
+ integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==,
+ }
+ engines: { node: ">=4" }
+ hasBin: true
+ dev: false
+
+ /mimic-fn/2.1.0:
+ resolution:
+ {
+ integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==,
+ }
+ engines: { node: ">=6" }
+ dev: true
+
+ /mimic-fn/3.1.0:
+ resolution:
+ {
+ integrity: sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /minimatch/3.1.2:
+ resolution:
+ {
+ integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==,
+ }
+ dependencies:
+ brace-expansion: 1.1.11
+ dev: true
+
+ /minimist/1.2.6:
+ resolution:
+ {
+ integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==,
+ }
+ dev: true
+
+ /ms/2.0.0:
+ resolution:
+ {
+ integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==,
+ }
+ dev: false
+
+ /ms/2.1.3:
+ resolution:
+ {
+ integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==,
+ }
+ dev: false
+
+ /ndjson/2.0.0:
+ resolution:
+ {
+ integrity: sha512-nGl7LRGrzugTtaFcJMhLbpzJM6XdivmbkdlaGcrk/LXg2KL/YBC6z1g70xh0/al+oFuVFP8N8kiWRucmeEH/qQ==,
+ }
+ engines: { node: ">=10" }
+ hasBin: true
+ dependencies:
+ json-stringify-safe: 5.0.1
+ minimist: 1.2.6
+ readable-stream: 3.6.0
+ split2: 3.2.2
+ through2: 4.0.2
+ dev: true
+
+ /negotiator/0.6.3:
+ resolution:
+ {
+ integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /normalize-path/3.0.0:
+ resolution:
+ {
+ integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: true
+
+ /npm-run-path/4.0.1:
+ resolution:
+ {
+ integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ path-key: 3.1.1
+ dev: true
+
+ /object-inspect/1.12.2:
+ resolution:
+ {
+ integrity: sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==,
+ }
+ dev: false
+
+ /on-finished/2.4.1:
+ resolution:
+ {
+ integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==,
+ }
+ engines: { node: ">= 0.8" }
+ dependencies:
+ ee-first: 1.1.1
+ dev: false
+
+ /once/1.4.0:
+ resolution:
+ {
+ integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==,
+ }
+ dependencies:
+ wrappy: 1.0.2
+ dev: true
+
+ /onetime/5.1.2:
+ resolution:
+ {
+ integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==,
+ }
+ engines: { node: ">=6" }
+ dependencies:
+ mimic-fn: 2.1.0
+ dev: true
+
+ /p-defer/1.0.0:
+ resolution:
+ {
+ integrity: sha512-wB3wfAxZpk2AzOfUMJNL+d36xothRSyj8EXOa4f6GMqYDN9BJaaSISbsk+wS9abmnebVw95C2Kb5t85UmpCxuw==,
+ }
+ engines: { node: ">=4" }
+ dev: true
+
+ /p-limit/3.1.0:
+ resolution:
+ {
+ integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ yocto-queue: 0.1.0
+ dev: true
+
+ /p-locate/5.0.0:
+ resolution:
+ {
+ integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ p-limit: 3.1.0
+ dev: true
+
+ /parse-json/5.2.0:
+ resolution:
+ {
+ integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ "@babel/code-frame": 7.18.6
+ error-ex: 1.3.2
+ json-parse-even-better-errors: 2.3.1
+ lines-and-columns: 1.2.4
+ dev: true
+
+ /parseurl/1.3.3:
+ resolution:
+ {
+ integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /path-exists/4.0.0:
+ resolution:
+ {
+ integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /path-is-absolute/1.0.1:
+ resolution:
+ {
+ integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: true
+
+ /path-key/3.1.1:
+ resolution:
+ {
+ integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /path-name/1.0.0:
+ resolution:
+ {
+ integrity: sha512-/dcAb5vMXH0f51yvMuSUqFpxUcA8JelbRmE5mW/p4CUJxrNgK24IkstnV7ENtg2IDGBOu6izKTG6eilbnbNKWQ==,
+ }
+ dev: true
+
+ /path-to-regexp/0.1.7:
+ resolution:
+ {
+ integrity: sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==,
+ }
+ dev: false
+
+ /proxy-addr/2.0.7:
+ resolution:
+ {
+ integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==,
+ }
+ engines: { node: ">= 0.10" }
+ dependencies:
+ forwarded: 0.2.0
+ ipaddr.js: 1.9.1
+ dev: false
+
+ /qs/6.10.3:
+ resolution:
+ {
+ integrity: sha512-wr7M2E0OFRfIfJZjKGieI8lBKb7fRCH4Fv5KNPEs7gJ8jadvotdsS08PzOKR7opXhZ/Xkjtt3WF9g38drmyRqQ==,
+ }
+ engines: { node: ">=0.6" }
+ dependencies:
+ side-channel: 1.0.4
+ dev: false
+
+ /range-parser/1.2.1:
+ resolution:
+ {
+ integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==,
+ }
+ engines: { node: ">= 0.6" }
+ dev: false
+
+ /raw-body/2.5.1:
+ resolution:
+ {
+ integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==,
+ }
+ engines: { node: ">= 0.8" }
+ dependencies:
+ bytes: 3.1.2
+ http-errors: 2.0.0
+ iconv-lite: 0.4.24
+ unpipe: 1.0.0
+ dev: false
+
+ /read-yaml-file/2.1.0:
+ resolution:
+ {
+ integrity: sha512-UkRNRIwnhG+y7hpqnycCL/xbTk7+ia9VuVTC0S+zVbwd65DI9eUpRMfsWIGrCWxTU/mi+JW8cHQCrv+zfCbEPQ==,
+ }
+ engines: { node: ">=10.13" }
+ dependencies:
+ js-yaml: 4.1.0
+ strip-bom: 4.0.0
+ dev: true
+
+ /readable-stream/3.6.0:
+ resolution:
+ {
+ integrity: sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==,
+ }
+ engines: { node: ">= 6" }
+ dependencies:
+ inherits: 2.0.4
+ string_decoder: 1.3.0
+ util-deprecate: 1.0.2
+ dev: true
+
+ /rename-overwrite/4.0.2:
+ resolution:
+ {
+ integrity: sha512-L1sgBgagVgOgb1Z6QZr1yJgSMHI4SXQqAH0l/UbeyHnLKxECvKIlyVEmBo4BqsCAZGg0SBSyjCh68lis5PgC7g==,
+ }
+ engines: { node: ">=12.10" }
+ dependencies:
+ "@zkochan/rimraf": 2.1.2
+ dev: true
+
+ /rfc4648/1.5.2:
+ resolution:
+ {
+ integrity: sha512-tLOizhR6YGovrEBLatX1sdcuhoSCXddw3mqNVAcKxGJ+J0hFeJ+SjeWCv5UPA/WU3YzWPPuCVYgXBKZUPGpKtg==,
+ }
+ dev: true
+
+ /rimraf/3.0.2:
+ resolution:
+ {
+ integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==,
+ }
+ hasBin: true
+ dependencies:
+ glob: 7.2.3
+ dev: true
+
+ /safe-buffer/5.2.1:
+ resolution:
+ {
+ integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==,
+ }
+ dev: false
+
+ /safe-execa/0.1.2:
+ resolution:
+ {
+ integrity: sha512-vdTshSQ2JsRCgT8eKZWNJIL26C6bVqy1SOmuCMlKHegVeo8KYRobRrefOdUq9OozSPUUiSxrylteeRmLOMFfWg==,
+ }
+ engines: { node: ">=12" }
+ dependencies:
+ "@zkochan/which": 2.0.3
+ execa: 5.1.1
+ path-name: 1.0.0
+ dev: true
+
+ /safer-buffer/2.1.2:
+ resolution:
+ {
+ integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==,
+ }
+ dev: false
+
+ /semver/7.3.7:
+ resolution:
+ {
+ integrity: sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==,
+ }
+ engines: { node: ">=10" }
+ hasBin: true
+ dependencies:
+ lru-cache: 6.0.0
+ dev: true
+
+ /send/0.18.0:
+ resolution:
+ {
+ integrity: sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==,
+ }
+ engines: { node: ">= 0.8.0" }
+ dependencies:
+ debug: 2.6.9
+ depd: 2.0.0
+ destroy: 1.2.0
+ encodeurl: 1.0.2
+ escape-html: 1.0.3
+ etag: 1.8.1
+ fresh: 0.5.2
+ http-errors: 2.0.0
+ mime: 1.6.0
+ ms: 2.1.3
+ on-finished: 2.4.1
+ range-parser: 1.2.1
+ statuses: 2.0.1
+ dev: false
+
+ /serve-static/1.15.0:
+ resolution:
+ {
+ integrity: sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==,
+ }
+ engines: { node: ">= 0.8.0" }
+ dependencies:
+ encodeurl: 1.0.2
+ escape-html: 1.0.3
+ parseurl: 1.3.3
+ send: 0.18.0
+ dev: false
+
+ /setprototypeof/1.2.0:
+ resolution:
+ {
+ integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==,
+ }
+ dev: false
+
+ /shebang-command/2.0.0:
+ resolution:
+ {
+ integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ shebang-regex: 3.0.0
+ dev: true
+
+ /shebang-regex/3.0.0:
+ resolution:
+ {
+ integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /side-channel/1.0.4:
+ resolution:
+ {
+ integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ get-intrinsic: 1.1.2
+ object-inspect: 1.12.2
+ dev: false
+
+ /signal-exit/3.0.7:
+ resolution:
+ {
+ integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==,
+ }
+ dev: true
+
+ /sort-keys/4.2.0:
+ resolution:
+ {
+ integrity: sha512-aUYIEU/UviqPgc8mHR6IW1EGxkAXpeRETYcrzg8cLAvUPZcpAlleSXHV2mY7G12GphSH6Gzv+4MMVSSkbdteHg==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ is-plain-obj: 2.1.0
+ dev: true
+
+ /split2/3.2.2:
+ resolution:
+ {
+ integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==,
+ }
+ dependencies:
+ readable-stream: 3.6.0
+ dev: true
+
+ /statuses/2.0.1:
+ resolution:
+ {
+ integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /string_decoder/1.3.0:
+ resolution:
+ {
+ integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==,
+ }
+ dependencies:
+ safe-buffer: 5.2.1
+ dev: true
+
+ /strip-bom/4.0.0:
+ resolution:
+ {
+ integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==,
+ }
+ engines: { node: ">=8" }
+ dev: true
+
+ /strip-final-newline/2.0.0:
+ resolution:
+ {
+ integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==,
+ }
+ engines: { node: ">=6" }
+ dev: true
+
+ /supports-color/5.5.0:
+ resolution:
+ {
+ integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ has-flag: 3.0.0
+ dev: true
+
+ /through2/4.0.2:
+ resolution:
+ {
+ integrity: sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==,
+ }
+ dependencies:
+ readable-stream: 3.6.0
+ dev: true
+
+ /toidentifier/1.0.1:
+ resolution:
+ {
+ integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==,
+ }
+ engines: { node: ">=0.6" }
+ dev: false
+
+ /type-is/1.6.18:
+ resolution:
+ {
+ integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==,
+ }
+ engines: { node: ">= 0.6" }
+ dependencies:
+ media-typer: 0.3.0
+ mime-types: 2.1.35
+ dev: false
+
+ /typedarray-to-buffer/3.1.5:
+ resolution:
+ {
+ integrity: sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==,
+ }
+ dependencies:
+ is-typedarray: 1.0.0
+ dev: true
+
+ /unpipe/1.0.0:
+ resolution:
+ {
+ integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /util-deprecate/1.0.2:
+ resolution:
+ {
+ integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==,
+ }
+ dev: true
+
+ /utils-merge/1.0.1:
+ resolution:
+ {
+ integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==,
+ }
+ engines: { node: ">= 0.4.0" }
+ dev: false
+
+ /vary/1.1.2:
+ resolution:
+ {
+ integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==,
+ }
+ engines: { node: ">= 0.8" }
+ dev: false
+
+ /which/2.0.2:
+ resolution:
+ {
+ integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==,
+ }
+ engines: { node: ">= 8" }
+ hasBin: true
+ dependencies:
+ isexe: 2.0.0
+ dev: true
+
+ /wrappy/1.0.2:
+ resolution:
+ {
+ integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==,
+ }
+ dev: true
+
+ /write-file-atomic/3.0.3:
+ resolution:
+ {
+ integrity: sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==,
+ }
+ dependencies:
+ imurmurhash: 0.1.4
+ is-typedarray: 1.0.0
+ signal-exit: 3.0.7
+ typedarray-to-buffer: 3.1.5
+ dev: true
+
+ /write-yaml-file/4.2.0:
+ resolution:
+ {
+ integrity: sha512-LwyucHy0uhWqbrOkh9cBluZBeNVxzHjDaE9mwepZG3n3ZlbM4v3ndrFw51zW/NXYFFqP+QWZ72ihtLWTh05e4Q==,
+ }
+ engines: { node: ">=10.13" }
+ dependencies:
+ js-yaml: 4.1.0
+ write-file-atomic: 3.0.3
+ dev: true
+
+ /yallist/4.0.0:
+ resolution:
+ {
+ integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==,
+ }
+ dev: true
+
+ /yocto-queue/0.1.0:
+ resolution:
+ {
+ integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==,
+ }
+ engines: { node: ">=10" }
+ dev: true
diff --git a/cli/internal/lockfile/testdata/pnpm7-workspace.yaml b/cli/internal/lockfile/testdata/pnpm7-workspace.yaml
new file mode 100644
index 0000000..2f7b663
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm7-workspace.yaml
@@ -0,0 +1,3445 @@
+lockfileVersion: 5.4
+
+patchedDependencies:
+ lodash@4.17.21:
+ hash: ehchni3mpmovsvjxesffg2i5a4
+ path: patches/lodash@4.17.21.patch
+ underscore@1.13.4:
+ hash: 3pbfs36izefyn2uycmknwkvuuy
+ path: patches/underscore@1.13.4.patch
+
+importers:
+ .:
+ specifiers:
+ eslint-config-custom: workspace:*
+ prettier: latest
+ turbo: latest
+ devDependencies:
+ eslint-config-custom: link:packages/eslint-config-custom
+ prettier: 2.7.1
+ turbo: 1.4.6
+
+ apps/docs:
+ specifiers:
+ "@babel/core": ^7.0.0
+ "@types/node": ^17.0.12
+ "@types/react": 18.0.17
+ dashboard-icons: github:peerigon/dashboard-icons
+ eslint: 7.32.0
+ eslint-config-custom: workspace:*
+ next: 12.2.5
+ next-transpile-modules: 9.0.0
+ react: 18.2.0
+ react-dom: 18.2.0
+ tsconfig: workspace:*
+ typescript: ^4.5.3
+ ui: workspace:*
+ underscore: ^1.13.4
+ dependencies:
+ dashboard-icons: github.com/peerigon/dashboard-icons/ce27ef933144e09cef3911025f3649040a8571b6
+ next: 12.2.5_ir3quccc6i62x6qn6jjhyjjiey
+ react: 18.2.0
+ react-dom: 18.2.0_react@18.2.0
+ ui: file:packages/ui
+ underscore: 1.13.4_3pbfs36izefyn2uycmknwkvuuy
+ devDependencies:
+ "@babel/core": 7.19.1
+ "@types/node": 17.0.45
+ "@types/react": 18.0.17
+ eslint: 7.32.0
+ eslint-config-custom: link:../../packages/eslint-config-custom
+ next-transpile-modules: 9.0.0
+ tsconfig: link:../../packages/tsconfig
+ typescript: 4.8.3
+ dependenciesMeta:
+ ui:
+ injected: true
+
+ apps/web:
+ specifiers:
+ "@babel/core": ^7.0.0
+ "@types/node": ^17.0.12
+ "@types/react": 18.0.17
+ eslint: 7.32.0
+ eslint-config-custom: workspace:*
+ lodash: ^4.17.21
+ next: 12.2.5
+ next-transpile-modules: 9.0.0
+ react: 18.2.0
+ react-dom: 18.2.0
+ tsconfig: workspace:*
+ typescript: ^4.5.3
+ ui: workspace:*
+ dependencies:
+ lodash: 4.17.21_ehchni3mpmovsvjxesffg2i5a4
+ next: 12.2.5_ir3quccc6i62x6qn6jjhyjjiey
+ react: 18.2.0
+ react-dom: 18.2.0_react@18.2.0
+ ui: link:../../packages/ui
+ devDependencies:
+ "@babel/core": 7.19.1
+ "@types/node": 17.0.45
+ "@types/react": 18.0.17
+ eslint: 7.32.0
+ eslint-config-custom: link:../../packages/eslint-config-custom
+ next-transpile-modules: 9.0.0
+ tsconfig: link:../../packages/tsconfig
+ typescript: 4.8.3
+
+ packages/eslint-config-custom:
+ specifiers:
+ eslint: ^7.23.0
+ eslint-config-next: ^12.0.8
+ eslint-config-prettier: ^8.3.0
+ eslint-config-turbo: latest
+ eslint-plugin-react: 7.31.7
+ typescript: ^4.7.4
+ dependencies:
+ eslint: 7.32.0
+ eslint-config-next: 12.3.0_dyxdave6dwjbccc5dgiifcmuza
+ eslint-config-prettier: 8.5.0_eslint@7.32.0
+ eslint-config-turbo: 0.0.3_eslint@7.32.0
+ eslint-plugin-react: 7.31.7_eslint@7.32.0
+ devDependencies:
+ typescript: 4.8.3
+
+ packages/tsconfig:
+ specifiers: {}
+
+ packages/ui:
+ specifiers:
+ "@types/react": ^18.0.17
+ "@types/react-dom": ^18.0.6
+ eslint: ^7.32.0
+ eslint-config-custom: workspace:*
+ react: ^18.2.0
+ tsconfig: workspace:*
+ typescript: ^4.5.2
+ devDependencies:
+ "@types/react": 18.0.20
+ "@types/react-dom": 18.0.6
+ eslint: 7.32.0
+ eslint-config-custom: link:../eslint-config-custom
+ react: 18.2.0
+ tsconfig: link:../tsconfig
+ typescript: 4.8.3
+
+packages:
+ /@ampproject/remapping/2.2.0:
+ resolution:
+ {
+ integrity: sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==,
+ }
+ engines: { node: ">=6.0.0" }
+ dependencies:
+ "@jridgewell/gen-mapping": 0.1.1
+ "@jridgewell/trace-mapping": 0.3.15
+
+ /@babel/code-frame/7.12.11:
+ resolution:
+ {
+ integrity: sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==,
+ }
+ dependencies:
+ "@babel/highlight": 7.18.6
+
+ /@babel/code-frame/7.18.6:
+ resolution:
+ {
+ integrity: sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/highlight": 7.18.6
+
+ /@babel/compat-data/7.19.1:
+ resolution:
+ {
+ integrity: sha512-72a9ghR0gnESIa7jBN53U32FOVCEoztyIlKaNoU05zRhEecduGK9L9c3ww7Mp06JiR+0ls0GBPFJQwwtjn9ksg==,
+ }
+ engines: { node: ">=6.9.0" }
+
+ /@babel/core/7.19.1:
+ resolution:
+ {
+ integrity: sha512-1H8VgqXme4UXCRv7/Wa1bq7RVymKOzC7znjyFM8KiEzwFqcKUKYNoQef4GhdklgNvoBXyW4gYhuBNCM5o1zImw==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@ampproject/remapping": 2.2.0
+ "@babel/code-frame": 7.18.6
+ "@babel/generator": 7.19.0
+ "@babel/helper-compilation-targets": 7.19.1_@babel+core@7.19.1
+ "@babel/helper-module-transforms": 7.19.0
+ "@babel/helpers": 7.19.0
+ "@babel/parser": 7.19.1
+ "@babel/template": 7.18.10
+ "@babel/traverse": 7.19.1
+ "@babel/types": 7.19.0
+ convert-source-map: 1.8.0
+ debug: 4.3.4
+ gensync: 1.0.0-beta.2
+ json5: 2.2.1
+ semver: 6.3.0
+ transitivePeerDependencies:
+ - supports-color
+
+ /@babel/generator/7.19.0:
+ resolution:
+ {
+ integrity: sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/types": 7.19.0
+ "@jridgewell/gen-mapping": 0.3.2
+ jsesc: 2.5.2
+
+ /@babel/helper-compilation-targets/7.19.1_@babel+core@7.19.1:
+ resolution:
+ {
+ integrity: sha512-LlLkkqhCMyz2lkQPvJNdIYU7O5YjWRgC2R4omjCTpZd8u8KMQzZvX4qce+/BluN1rcQiV7BoGUpmQ0LeHerbhg==,
+ }
+ engines: { node: ">=6.9.0" }
+ peerDependencies:
+ "@babel/core": ^7.0.0
+ dependencies:
+ "@babel/compat-data": 7.19.1
+ "@babel/core": 7.19.1
+ "@babel/helper-validator-option": 7.18.6
+ browserslist: 4.21.3
+ semver: 6.3.0
+
+ /@babel/helper-environment-visitor/7.18.9:
+ resolution:
+ {
+ integrity: sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==,
+ }
+ engines: { node: ">=6.9.0" }
+
+ /@babel/helper-function-name/7.19.0:
+ resolution:
+ {
+ integrity: sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/template": 7.18.10
+ "@babel/types": 7.19.0
+
+ /@babel/helper-hoist-variables/7.18.6:
+ resolution:
+ {
+ integrity: sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/types": 7.19.0
+
+ /@babel/helper-module-imports/7.18.6:
+ resolution:
+ {
+ integrity: sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/types": 7.19.0
+
+ /@babel/helper-module-transforms/7.19.0:
+ resolution:
+ {
+ integrity: sha512-3HBZ377Fe14RbLIA+ac3sY4PTgpxHVkFrESaWhoI5PuyXPBBX8+C34qblV9G89ZtycGJCmCI/Ut+VUDK4bltNQ==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/helper-environment-visitor": 7.18.9
+ "@babel/helper-module-imports": 7.18.6
+ "@babel/helper-simple-access": 7.18.6
+ "@babel/helper-split-export-declaration": 7.18.6
+ "@babel/helper-validator-identifier": 7.19.1
+ "@babel/template": 7.18.10
+ "@babel/traverse": 7.19.1
+ "@babel/types": 7.19.0
+ transitivePeerDependencies:
+ - supports-color
+
+ /@babel/helper-simple-access/7.18.6:
+ resolution:
+ {
+ integrity: sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/types": 7.19.0
+
+ /@babel/helper-split-export-declaration/7.18.6:
+ resolution:
+ {
+ integrity: sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/types": 7.19.0
+
+ /@babel/helper-string-parser/7.18.10:
+ resolution:
+ {
+ integrity: sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==,
+ }
+ engines: { node: ">=6.9.0" }
+
+ /@babel/helper-validator-identifier/7.19.1:
+ resolution:
+ {
+ integrity: sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==,
+ }
+ engines: { node: ">=6.9.0" }
+
+ /@babel/helper-validator-option/7.18.6:
+ resolution:
+ {
+ integrity: sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==,
+ }
+ engines: { node: ">=6.9.0" }
+
+ /@babel/helpers/7.19.0:
+ resolution:
+ {
+ integrity: sha512-DRBCKGwIEdqY3+rPJgG/dKfQy9+08rHIAJx8q2p+HSWP87s2HCrQmaAMMyMll2kIXKCW0cO1RdQskx15Xakftg==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/template": 7.18.10
+ "@babel/traverse": 7.19.1
+ "@babel/types": 7.19.0
+ transitivePeerDependencies:
+ - supports-color
+
+ /@babel/highlight/7.18.6:
+ resolution:
+ {
+ integrity: sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/helper-validator-identifier": 7.19.1
+ chalk: 2.4.2
+ js-tokens: 4.0.0
+
+ /@babel/parser/7.19.1:
+ resolution:
+ {
+ integrity: sha512-h7RCSorm1DdTVGJf3P2Mhj3kdnkmF/EiysUkzS2TdgAYqyjFdMQJbVuXOBej2SBJaXan/lIVtT6KkGbyyq753A==,
+ }
+ engines: { node: ">=6.0.0" }
+ hasBin: true
+ dependencies:
+ "@babel/types": 7.19.0
+
+ /@babel/runtime-corejs3/7.19.1:
+ resolution:
+ {
+ integrity: sha512-j2vJGnkopRzH+ykJ8h68wrHnEUmtK//E723jjixiAl/PPf6FhqY/vYRcMVlNydRKQjQsTsYEjpx+DZMIvnGk/g==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ core-js-pure: 3.25.1
+ regenerator-runtime: 0.13.9
+ dev: false
+
+ /@babel/runtime/7.19.0:
+ resolution:
+ {
+ integrity: sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ regenerator-runtime: 0.13.9
+ dev: false
+
+ /@babel/template/7.18.10:
+ resolution:
+ {
+ integrity: sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/code-frame": 7.18.6
+ "@babel/parser": 7.19.1
+ "@babel/types": 7.19.0
+
+ /@babel/traverse/7.19.1:
+ resolution:
+ {
+ integrity: sha512-0j/ZfZMxKukDaag2PtOPDbwuELqIar6lLskVPPJDjXMXjfLb1Obo/1yjxIGqqAJrmfaTIY3z2wFLAQ7qSkLsuA==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/code-frame": 7.18.6
+ "@babel/generator": 7.19.0
+ "@babel/helper-environment-visitor": 7.18.9
+ "@babel/helper-function-name": 7.19.0
+ "@babel/helper-hoist-variables": 7.18.6
+ "@babel/helper-split-export-declaration": 7.18.6
+ "@babel/parser": 7.19.1
+ "@babel/types": 7.19.0
+ debug: 4.3.4
+ globals: 11.12.0
+ transitivePeerDependencies:
+ - supports-color
+
+ /@babel/types/7.19.0:
+ resolution:
+ {
+ integrity: sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA==,
+ }
+ engines: { node: ">=6.9.0" }
+ dependencies:
+ "@babel/helper-string-parser": 7.18.10
+ "@babel/helper-validator-identifier": 7.19.1
+ to-fast-properties: 2.0.0
+
+ /@eslint/eslintrc/0.4.3:
+ resolution:
+ {
+ integrity: sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==,
+ }
+ engines: { node: ^10.12.0 || >=12.0.0 }
+ dependencies:
+ ajv: 6.12.6
+ debug: 4.3.4
+ espree: 7.3.1
+ globals: 13.17.0
+ ignore: 4.0.6
+ import-fresh: 3.3.0
+ js-yaml: 3.14.1
+ minimatch: 3.1.2
+ strip-json-comments: 3.1.1
+ transitivePeerDependencies:
+ - supports-color
+
+ /@humanwhocodes/config-array/0.5.0:
+ resolution:
+ {
+ integrity: sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==,
+ }
+ engines: { node: ">=10.10.0" }
+ dependencies:
+ "@humanwhocodes/object-schema": 1.2.1
+ debug: 4.3.4
+ minimatch: 3.1.2
+ transitivePeerDependencies:
+ - supports-color
+
+ /@humanwhocodes/object-schema/1.2.1:
+ resolution:
+ {
+ integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==,
+ }
+
+ /@jridgewell/gen-mapping/0.1.1:
+ resolution:
+ {
+ integrity: sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==,
+ }
+ engines: { node: ">=6.0.0" }
+ dependencies:
+ "@jridgewell/set-array": 1.1.2
+ "@jridgewell/sourcemap-codec": 1.4.14
+
+ /@jridgewell/gen-mapping/0.3.2:
+ resolution:
+ {
+ integrity: sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==,
+ }
+ engines: { node: ">=6.0.0" }
+ dependencies:
+ "@jridgewell/set-array": 1.1.2
+ "@jridgewell/sourcemap-codec": 1.4.14
+ "@jridgewell/trace-mapping": 0.3.15
+
+ /@jridgewell/resolve-uri/3.1.0:
+ resolution:
+ {
+ integrity: sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==,
+ }
+ engines: { node: ">=6.0.0" }
+
+ /@jridgewell/set-array/1.1.2:
+ resolution:
+ {
+ integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==,
+ }
+ engines: { node: ">=6.0.0" }
+
+ /@jridgewell/sourcemap-codec/1.4.14:
+ resolution:
+ {
+ integrity: sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==,
+ }
+
+ /@jridgewell/trace-mapping/0.3.15:
+ resolution:
+ {
+ integrity: sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==,
+ }
+ dependencies:
+ "@jridgewell/resolve-uri": 3.1.0
+ "@jridgewell/sourcemap-codec": 1.4.14
+
+ /@next/env/12.2.5:
+ resolution:
+ {
+ integrity: sha512-vLPLV3cpPGjUPT3PjgRj7e3nio9t6USkuew3JE/jMeon/9Mvp1WyR18v3iwnCuX7eUAm1HmAbJHHLAbcu/EJcw==,
+ }
+ dev: false
+
+ /@next/eslint-plugin-next/12.3.0:
+ resolution:
+ {
+ integrity: sha512-jVdq1qYTNDjUtulnE8/hkPv0pHILV4jMg5La99iaY/FFm20WxVnsAZtbNnMvlPbf8dc010oO304SX9yXbg5PAw==,
+ }
+ dependencies:
+ glob: 7.1.7
+ dev: false
+
+ /@next/swc-android-arm-eabi/12.2.5:
+ resolution:
+ {
+ integrity: sha512-cPWClKxGhgn2dLWnspW+7psl3MoLQUcNqJqOHk2BhNcou9ARDtC0IjQkKe5qcn9qg7I7U83Gp1yh2aesZfZJMA==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm]
+ os: [android]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-android-arm64/12.2.5:
+ resolution:
+ {
+ integrity: sha512-vMj0efliXmC5b7p+wfcQCX0AfU8IypjkzT64GiKJD9PgiA3IILNiGJr1fw2lyUDHkjeWx/5HMlMEpLnTsQslwg==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm64]
+ os: [android]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-darwin-arm64/12.2.5:
+ resolution:
+ {
+ integrity: sha512-VOPWbO5EFr6snla/WcxUKtvzGVShfs302TEMOtzYyWni6f9zuOetijJvVh9CCTzInnXAZMtHyNhefijA4HMYLg==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm64]
+ os: [darwin]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-darwin-x64/12.2.5:
+ resolution:
+ {
+ integrity: sha512-5o8bTCgAmtYOgauO/Xd27vW52G2/m3i5PX7MUYePquxXAnX73AAtqA3WgPXBRitEB60plSKZgOTkcpqrsh546A==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [x64]
+ os: [darwin]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-freebsd-x64/12.2.5:
+ resolution:
+ {
+ integrity: sha512-yYUbyup1JnznMtEBRkK4LT56N0lfK5qNTzr6/DEyDw5TbFVwnuy2hhLBzwCBkScFVjpFdfiC6SQAX3FrAZzuuw==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [x64]
+ os: [freebsd]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-linux-arm-gnueabihf/12.2.5:
+ resolution:
+ {
+ integrity: sha512-2ZE2/G921Acks7UopJZVMgKLdm4vN4U0yuzvAMJ6KBavPzqESA2yHJlm85TV/K9gIjKhSk5BVtauIUntFRP8cg==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm]
+ os: [linux]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-linux-arm64-gnu/12.2.5:
+ resolution:
+ {
+ integrity: sha512-/I6+PWVlz2wkTdWqhlSYYJ1pWWgUVva6SgX353oqTh8njNQp1SdFQuWDqk8LnM6ulheVfSsgkDzxrDaAQZnzjQ==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm64]
+ os: [linux]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-linux-arm64-musl/12.2.5:
+ resolution:
+ {
+ integrity: sha512-LPQRelfX6asXyVr59p5sTpx5l+0yh2Vjp/R8Wi4X9pnqcayqT4CUJLiHqCvZuLin3IsFdisJL0rKHMoaZLRfmg==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm64]
+ os: [linux]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-linux-x64-gnu/12.2.5:
+ resolution:
+ {
+ integrity: sha512-0szyAo8jMCClkjNK0hknjhmAngUppoRekW6OAezbEYwHXN/VNtsXbfzgYOqjKWxEx3OoAzrT3jLwAF0HdX2MEw==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [x64]
+ os: [linux]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-linux-x64-musl/12.2.5:
+ resolution:
+ {
+ integrity: sha512-zg/Y6oBar1yVnW6Il1I/08/2ukWtOG6s3acdJdEyIdsCzyQi4RLxbbhkD/EGQyhqBvd3QrC6ZXQEXighQUAZ0g==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [x64]
+ os: [linux]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-win32-arm64-msvc/12.2.5:
+ resolution:
+ {
+ integrity: sha512-3/90DRNSqeeSRMMEhj4gHHQlLhhKg5SCCoYfE3kBjGpE63EfnblYUqsszGGZ9ekpKL/R4/SGB40iCQr8tR5Jiw==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [arm64]
+ os: [win32]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-win32-ia32-msvc/12.2.5:
+ resolution:
+ {
+ integrity: sha512-hGLc0ZRAwnaPL4ulwpp4D2RxmkHQLuI8CFOEEHdzZpS63/hMVzv81g8jzYA0UXbb9pus/iTc3VRbVbAM03SRrw==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [ia32]
+ os: [win32]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@next/swc-win32-x64-msvc/12.2.5:
+ resolution:
+ {
+ integrity: sha512-7h5/ahY7NeaO2xygqVrSG/Y8Vs4cdjxIjowTZ5W6CKoTKn7tmnuxlUc2h74x06FKmbhAd9agOjr/AOKyxYYm9Q==,
+ }
+ engines: { node: ">= 10" }
+ cpu: [x64]
+ os: [win32]
+ requiresBuild: true
+ dev: false
+ optional: true
+
+ /@nodelib/fs.scandir/2.1.5:
+ resolution:
+ {
+ integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==,
+ }
+ engines: { node: ">= 8" }
+ dependencies:
+ "@nodelib/fs.stat": 2.0.5
+ run-parallel: 1.2.0
+ dev: false
+
+ /@nodelib/fs.stat/2.0.5:
+ resolution:
+ {
+ integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==,
+ }
+ engines: { node: ">= 8" }
+ dev: false
+
+ /@nodelib/fs.walk/1.2.8:
+ resolution:
+ {
+ integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==,
+ }
+ engines: { node: ">= 8" }
+ dependencies:
+ "@nodelib/fs.scandir": 2.1.5
+ fastq: 1.13.0
+ dev: false
+
+ /@rushstack/eslint-patch/1.1.4:
+ resolution:
+ {
+ integrity: sha512-LwzQKA4vzIct1zNZzBmRKI9QuNpLgTQMEjsQLf3BXuGYb3QPTP4Yjf6mkdX+X1mYttZ808QpOwAzZjv28kq7DA==,
+ }
+ dev: false
+
+ /@swc/helpers/0.4.3:
+ resolution:
+ {
+ integrity: sha512-6JrF+fdUK2zbGpJIlN7G3v966PQjyx/dPt1T9km2wj+EUBqgrxCk3uX4Kct16MIm9gGxfKRcfax2hVf5jvlTzA==,
+ }
+ dependencies:
+ tslib: 2.4.0
+ dev: false
+
+ /@types/json5/0.0.29:
+ resolution:
+ {
+ integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==,
+ }
+ dev: false
+
+ /@types/node/17.0.45:
+ resolution:
+ {
+ integrity: sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==,
+ }
+ dev: true
+
+ /@types/prop-types/15.7.5:
+ resolution:
+ {
+ integrity: sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==,
+ }
+ dev: true
+
+ /@types/react-dom/18.0.6:
+ resolution:
+ {
+ integrity: sha512-/5OFZgfIPSwy+YuIBP/FgJnQnsxhZhjjrnxudMddeblOouIodEQ75X14Rr4wGSG/bknL+Omy9iWlLo1u/9GzAA==,
+ }
+ dependencies:
+ "@types/react": 18.0.20
+ dev: true
+
+ /@types/react/18.0.17:
+ resolution:
+ {
+ integrity: sha512-38ETy4tL+rn4uQQi7mB81G7V1g0u2ryquNmsVIOKUAEIDK+3CUjZ6rSRpdvS99dNBnkLFL83qfmtLacGOTIhwQ==,
+ }
+ dependencies:
+ "@types/prop-types": 15.7.5
+ "@types/scheduler": 0.16.2
+ csstype: 3.1.1
+ dev: true
+
+ /@types/react/18.0.20:
+ resolution:
+ {
+ integrity: sha512-MWul1teSPxujEHVwZl4a5HxQ9vVNsjTchVA+xRqv/VYGCuKGAU6UhfrTdF5aBefwD1BHUD8i/zq+O/vyCm/FrA==,
+ }
+ dependencies:
+ "@types/prop-types": 15.7.5
+ "@types/scheduler": 0.16.2
+ csstype: 3.1.1
+ dev: true
+
+ /@types/scheduler/0.16.2:
+ resolution:
+ {
+ integrity: sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==,
+ }
+ dev: true
+
+ /@typescript-eslint/parser/5.37.0_dyxdave6dwjbccc5dgiifcmuza:
+ resolution:
+ {
+ integrity: sha512-01VzI/ipYKuaG5PkE5+qyJ6m02fVALmMPY3Qq5BHflDx3y4VobbLdHQkSMg9VPRS4KdNt4oYTMaomFoHonBGAw==,
+ }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ peerDependencies:
+ eslint: ^6.0.0 || ^7.0.0 || ^8.0.0
+ typescript: "*"
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ dependencies:
+ "@typescript-eslint/scope-manager": 5.37.0
+ "@typescript-eslint/types": 5.37.0
+ "@typescript-eslint/typescript-estree": 5.37.0_typescript@4.8.3
+ debug: 4.3.4
+ eslint: 7.32.0
+ typescript: 4.8.3
+ transitivePeerDependencies:
+ - supports-color
+ dev: false
+
+ /@typescript-eslint/scope-manager/5.37.0:
+ resolution:
+ {
+ integrity: sha512-F67MqrmSXGd/eZnujjtkPgBQzgespu/iCZ+54Ok9X5tALb9L2v3G+QBSoWkXG0p3lcTJsL+iXz5eLUEdSiJU9Q==,
+ }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ dependencies:
+ "@typescript-eslint/types": 5.37.0
+ "@typescript-eslint/visitor-keys": 5.37.0
+ dev: false
+
+ /@typescript-eslint/types/5.37.0:
+ resolution:
+ {
+ integrity: sha512-3frIJiTa5+tCb2iqR/bf7XwU20lnU05r/sgPJnRpwvfZaqCJBrl8Q/mw9vr3NrNdB/XtVyMA0eppRMMBqdJ1bA==,
+ }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ dev: false
+
+ /@typescript-eslint/typescript-estree/5.37.0_typescript@4.8.3:
+ resolution:
+ {
+ integrity: sha512-JkFoFIt/cx59iqEDSgIGnQpCTRv96MQnXCYvJi7QhBC24uyuzbD8wVbajMB1b9x4I0octYFJ3OwjAwNqk1AjDA==,
+ }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ peerDependencies:
+ typescript: "*"
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ dependencies:
+ "@typescript-eslint/types": 5.37.0
+ "@typescript-eslint/visitor-keys": 5.37.0
+ debug: 4.3.4
+ globby: 11.1.0
+ is-glob: 4.0.3
+ semver: 7.3.7
+ tsutils: 3.21.0_typescript@4.8.3
+ typescript: 4.8.3
+ transitivePeerDependencies:
+ - supports-color
+ dev: false
+
+ /@typescript-eslint/visitor-keys/5.37.0:
+ resolution:
+ {
+ integrity: sha512-Hp7rT4cENBPIzMwrlehLW/28EVCOcE9U1Z1BQTc8EA8v5qpr7GRGuG+U58V5tTY48zvUOA3KHvw3rA8tY9fbdA==,
+ }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ dependencies:
+ "@typescript-eslint/types": 5.37.0
+ eslint-visitor-keys: 3.3.0
+ dev: false
+
+ /acorn-jsx/5.3.2_acorn@7.4.1:
+ resolution:
+ {
+ integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==,
+ }
+ peerDependencies:
+ acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+ dependencies:
+ acorn: 7.4.1
+
+ /acorn/7.4.1:
+ resolution:
+ {
+ integrity: sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==,
+ }
+ engines: { node: ">=0.4.0" }
+ hasBin: true
+
+ /ajv/6.12.6:
+ resolution:
+ {
+ integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==,
+ }
+ dependencies:
+ fast-deep-equal: 3.1.3
+ fast-json-stable-stringify: 2.1.0
+ json-schema-traverse: 0.4.1
+ uri-js: 4.4.1
+
+ /ajv/8.11.0:
+ resolution:
+ {
+ integrity: sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==,
+ }
+ dependencies:
+ fast-deep-equal: 3.1.3
+ json-schema-traverse: 1.0.0
+ require-from-string: 2.0.2
+ uri-js: 4.4.1
+
+ /ansi-colors/4.1.3:
+ resolution:
+ {
+ integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==,
+ }
+ engines: { node: ">=6" }
+
+ /ansi-regex/5.0.1:
+ resolution:
+ {
+ integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==,
+ }
+ engines: { node: ">=8" }
+
+ /ansi-styles/3.2.1:
+ resolution:
+ {
+ integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ color-convert: 1.9.3
+
+ /ansi-styles/4.3.0:
+ resolution:
+ {
+ integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ color-convert: 2.0.1
+
+ /argparse/1.0.10:
+ resolution:
+ {
+ integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==,
+ }
+ dependencies:
+ sprintf-js: 1.0.3
+
+ /aria-query/4.2.2:
+ resolution:
+ {
+ integrity: sha512-o/HelwhuKpTj/frsOsbNLNgnNGVIFsVP/SW2BSF14gVl7kAfMOJ6/8wUAUvG1R1NHKrfG+2sHZTu0yauT1qBrA==,
+ }
+ engines: { node: ">=6.0" }
+ dependencies:
+ "@babel/runtime": 7.19.0
+ "@babel/runtime-corejs3": 7.19.1
+ dev: false
+
+ /array-includes/3.1.5:
+ resolution:
+ {
+ integrity: sha512-iSDYZMMyTPkiFasVqfuAQnWAYcvO/SeBSCGKePoEthjp4LEMTe4uLc7b025o4jAZpHhihh8xPo99TNWUWWkGDQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ get-intrinsic: 1.1.3
+ is-string: 1.0.7
+ dev: false
+
+ /array-union/2.1.0:
+ resolution:
+ {
+ integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==,
+ }
+ engines: { node: ">=8" }
+ dev: false
+
+ /array.prototype.flat/1.3.0:
+ resolution:
+ {
+ integrity: sha512-12IUEkHsAhA4DY5s0FPgNXIdc8VRSqD9Zp78a5au9abH/SOBrsp082JOWFNTjkMozh8mqcdiKuaLGhPeYztxSw==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ es-shim-unscopables: 1.0.0
+ dev: false
+
+ /array.prototype.flatmap/1.3.0:
+ resolution:
+ {
+ integrity: sha512-PZC9/8TKAIxcWKdyeb77EzULHPrIX/tIZebLJUQOMR1OwYosT8yggdfWScfTBCDj5utONvOuPQQumYsU2ULbkg==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ es-shim-unscopables: 1.0.0
+ dev: false
+
+ /ast-types-flow/0.0.7:
+ resolution:
+ {
+ integrity: sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==,
+ }
+ dev: false
+
+ /astral-regex/2.0.0:
+ resolution:
+ {
+ integrity: sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==,
+ }
+ engines: { node: ">=8" }
+
+ /axe-core/4.4.3:
+ resolution:
+ {
+ integrity: sha512-32+ub6kkdhhWick/UjvEwRchgoetXqTK14INLqbGm5U2TzBkBNF3nQtLYm8ovxSkQWArjEQvftCKryjZaATu3w==,
+ }
+ engines: { node: ">=4" }
+ dev: false
+
+ /axobject-query/2.2.0:
+ resolution:
+ {
+ integrity: sha512-Td525n+iPOOyUQIeBfcASuG6uJsDOITl7Mds5gFyerkWiX7qhUTdYUBlSgNMyVqtSJqwpt1kXGLdUt6SykLMRA==,
+ }
+ dev: false
+
+ /balanced-match/1.0.2:
+ resolution:
+ {
+ integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==,
+ }
+
+ /brace-expansion/1.1.11:
+ resolution:
+ {
+ integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==,
+ }
+ dependencies:
+ balanced-match: 1.0.2
+ concat-map: 0.0.1
+
+ /braces/3.0.2:
+ resolution:
+ {
+ integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ fill-range: 7.0.1
+ dev: false
+
+ /browserslist/4.21.3:
+ resolution:
+ {
+ integrity: sha512-898rgRXLAyRkM1GryrrBHGkqA5hlpkV5MhtZwg9QXeiyLUYs2k00Un05aX5l2/yJIOObYKOpS2JNo8nJDE7fWQ==,
+ }
+ engines: { node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7 }
+ hasBin: true
+ dependencies:
+ caniuse-lite: 1.0.30001399
+ electron-to-chromium: 1.4.249
+ node-releases: 2.0.6
+ update-browserslist-db: 1.0.9_browserslist@4.21.3
+
+ /call-bind/1.0.2:
+ resolution:
+ {
+ integrity: sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==,
+ }
+ dependencies:
+ function-bind: 1.1.1
+ get-intrinsic: 1.1.3
+ dev: false
+
+ /callsites/3.1.0:
+ resolution:
+ {
+ integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==,
+ }
+ engines: { node: ">=6" }
+
+ /caniuse-lite/1.0.30001399:
+ resolution:
+ {
+ integrity: sha512-4vQ90tMKS+FkvuVWS5/QY1+d805ODxZiKFzsU8o/RsVJz49ZSRR8EjykLJbqhzdPgadbX6wB538wOzle3JniRA==,
+ }
+
+ /chalk/2.4.2:
+ resolution:
+ {
+ integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ ansi-styles: 3.2.1
+ escape-string-regexp: 1.0.5
+ supports-color: 5.5.0
+
+ /chalk/4.1.2:
+ resolution:
+ {
+ integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ ansi-styles: 4.3.0
+ supports-color: 7.2.0
+
+ /color-convert/1.9.3:
+ resolution:
+ {
+ integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==,
+ }
+ dependencies:
+ color-name: 1.1.3
+
+ /color-convert/2.0.1:
+ resolution:
+ {
+ integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==,
+ }
+ engines: { node: ">=7.0.0" }
+ dependencies:
+ color-name: 1.1.4
+
+ /color-name/1.1.3:
+ resolution:
+ {
+ integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==,
+ }
+
+ /color-name/1.1.4:
+ resolution:
+ {
+ integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==,
+ }
+
+ /concat-map/0.0.1:
+ resolution:
+ {
+ integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==,
+ }
+
+ /convert-source-map/1.8.0:
+ resolution:
+ {
+ integrity: sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==,
+ }
+ dependencies:
+ safe-buffer: 5.1.2
+
+ /core-js-pure/3.25.1:
+ resolution:
+ {
+ integrity: sha512-7Fr74bliUDdeJCBMxkkIuQ4xfxn/SwrVg+HkJUAoNEXVqYLv55l6Af0dJ5Lq2YBUW9yKqSkLXaS5SYPK6MGa/A==,
+ }
+ requiresBuild: true
+ dev: false
+
+ /cross-spawn/7.0.3:
+ resolution:
+ {
+ integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==,
+ }
+ engines: { node: ">= 8" }
+ dependencies:
+ path-key: 3.1.1
+ shebang-command: 2.0.0
+ which: 2.0.2
+
+ /csstype/3.1.1:
+ resolution:
+ {
+ integrity: sha512-DJR/VvkAvSZW9bTouZue2sSxDwdTN92uHjqeKVm+0dAqdfNykRzQ95tay8aXMBAAPpUiq4Qcug2L7neoRh2Egw==,
+ }
+ dev: true
+
+ /damerau-levenshtein/1.0.8:
+ resolution:
+ {
+ integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==,
+ }
+ dev: false
+
+ /debug/2.6.9:
+ resolution:
+ {
+ integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==,
+ }
+ peerDependencies:
+ supports-color: "*"
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+ dependencies:
+ ms: 2.0.0
+ dev: false
+
+ /debug/3.2.7:
+ resolution:
+ {
+ integrity: sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==,
+ }
+ peerDependencies:
+ supports-color: "*"
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+ dependencies:
+ ms: 2.1.3
+ dev: false
+
+ /debug/4.3.4:
+ resolution:
+ {
+ integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==,
+ }
+ engines: { node: ">=6.0" }
+ peerDependencies:
+ supports-color: "*"
+ peerDependenciesMeta:
+ supports-color:
+ optional: true
+ dependencies:
+ ms: 2.1.2
+
+ /deep-is/0.1.4:
+ resolution:
+ {
+ integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==,
+ }
+
+ /define-properties/1.1.4:
+ resolution:
+ {
+ integrity: sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-property-descriptors: 1.0.0
+ object-keys: 1.1.1
+ dev: false
+
+ /dir-glob/3.0.1:
+ resolution:
+ {
+ integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ path-type: 4.0.0
+ dev: false
+
+ /doctrine/2.1.0:
+ resolution:
+ {
+ integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ esutils: 2.0.3
+ dev: false
+
+ /doctrine/3.0.0:
+ resolution:
+ {
+ integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==,
+ }
+ engines: { node: ">=6.0.0" }
+ dependencies:
+ esutils: 2.0.3
+
+ /electron-to-chromium/1.4.249:
+ resolution:
+ {
+ integrity: sha512-GMCxR3p2HQvIw47A599crTKYZprqihoBL4lDSAUmr7IYekXFK5t/WgEBrGJDCa2HWIZFQEkGuMqPCi05ceYqPQ==,
+ }
+
+ /emoji-regex/8.0.0:
+ resolution:
+ {
+ integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==,
+ }
+
+ /emoji-regex/9.2.2:
+ resolution:
+ {
+ integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==,
+ }
+ dev: false
+
+ /enhanced-resolve/5.10.0:
+ resolution:
+ {
+ integrity: sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==,
+ }
+ engines: { node: ">=10.13.0" }
+ dependencies:
+ graceful-fs: 4.2.10
+ tapable: 2.2.1
+ dev: true
+
+ /enquirer/2.3.6:
+ resolution:
+ {
+ integrity: sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==,
+ }
+ engines: { node: ">=8.6" }
+ dependencies:
+ ansi-colors: 4.1.3
+
+ /es-abstract/1.20.2:
+ resolution:
+ {
+ integrity: sha512-XxXQuVNrySBNlEkTYJoDNFe5+s2yIOpzq80sUHEdPdQr0S5nTLz4ZPPPswNIpKseDDUS5yghX1gfLIHQZ1iNuQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ es-to-primitive: 1.2.1
+ function-bind: 1.1.1
+ function.prototype.name: 1.1.5
+ get-intrinsic: 1.1.3
+ get-symbol-description: 1.0.0
+ has: 1.0.3
+ has-property-descriptors: 1.0.0
+ has-symbols: 1.0.3
+ internal-slot: 1.0.3
+ is-callable: 1.2.5
+ is-negative-zero: 2.0.2
+ is-regex: 1.1.4
+ is-shared-array-buffer: 1.0.2
+ is-string: 1.0.7
+ is-weakref: 1.0.2
+ object-inspect: 1.12.2
+ object-keys: 1.1.1
+ object.assign: 4.1.4
+ regexp.prototype.flags: 1.4.3
+ string.prototype.trimend: 1.0.5
+ string.prototype.trimstart: 1.0.5
+ unbox-primitive: 1.0.2
+ dev: false
+
+ /es-shim-unscopables/1.0.0:
+ resolution:
+ {
+ integrity: sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==,
+ }
+ dependencies:
+ has: 1.0.3
+ dev: false
+
+ /es-to-primitive/1.2.1:
+ resolution:
+ {
+ integrity: sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ is-callable: 1.2.5
+ is-date-object: 1.0.5
+ is-symbol: 1.0.4
+ dev: false
+
+ /escalade/3.1.1:
+ resolution:
+ {
+ integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==,
+ }
+ engines: { node: ">=6" }
+
+ /escape-string-regexp/1.0.5:
+ resolution:
+ {
+ integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==,
+ }
+ engines: { node: ">=0.8.0" }
+
+ /escape-string-regexp/4.0.0:
+ resolution:
+ {
+ integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==,
+ }
+ engines: { node: ">=10" }
+
+ /eslint-config-next/12.3.0_dyxdave6dwjbccc5dgiifcmuza:
+ resolution:
+ {
+ integrity: sha512-guHSkNyKnTBB8HU35COgAMeMV0E026BiYRYvyEVVaTOeFcnU3i1EI8/Da0Rl7H3Sgua5FEvoA0vYd2s8kdIUXg==,
+ }
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ typescript: ">=3.3.1"
+ peerDependenciesMeta:
+ typescript:
+ optional: true
+ dependencies:
+ "@next/eslint-plugin-next": 12.3.0
+ "@rushstack/eslint-patch": 1.1.4
+ "@typescript-eslint/parser": 5.37.0_dyxdave6dwjbccc5dgiifcmuza
+ eslint: 7.32.0
+ eslint-import-resolver-node: 0.3.6
+ eslint-import-resolver-typescript: 2.7.1_hpmu7kn6tcn2vnxpfzvv33bxmy
+ eslint-plugin-import: 2.26.0_xag76ci373f5hzfwsxolrbhy4a
+ eslint-plugin-jsx-a11y: 6.6.1_eslint@7.32.0
+ eslint-plugin-react: 7.31.7_eslint@7.32.0
+ eslint-plugin-react-hooks: 4.6.0_eslint@7.32.0
+ typescript: 4.8.3
+ transitivePeerDependencies:
+ - eslint-import-resolver-webpack
+ - supports-color
+ dev: false
+
+ /eslint-config-prettier/8.5.0_eslint@7.32.0:
+ resolution:
+ {
+ integrity: sha512-obmWKLUNCnhtQRKc+tmnYuQl0pFU1ibYJQ5BGhTVB08bHe9wC8qUeG7c08dj9XX+AuPj1YSGSQIHl1pnDHZR0Q==,
+ }
+ hasBin: true
+ peerDependencies:
+ eslint: ">=7.0.0"
+ dependencies:
+ eslint: 7.32.0
+ dev: false
+
+ /eslint-config-turbo/0.0.3_eslint@7.32.0:
+ resolution:
+ {
+ integrity: sha512-hK5MlxDugUWZV9ZKcyfNwLXrlMuM2wPgAUk51cUFBC3nXRCVmCA9uSRFBZsyAIurN1wH7mS7G1NBo5F8VkF7lQ==,
+ }
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ dependencies:
+ eslint: 7.32.0
+ eslint-plugin-turbo: 0.0.3_eslint@7.32.0
+ dev: false
+
+ /eslint-import-resolver-node/0.3.6:
+ resolution:
+ {
+ integrity: sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==,
+ }
+ dependencies:
+ debug: 3.2.7
+ resolve: 1.22.1
+ transitivePeerDependencies:
+ - supports-color
+ dev: false
+
+ /eslint-import-resolver-typescript/2.7.1_hpmu7kn6tcn2vnxpfzvv33bxmy:
+ resolution:
+ {
+ integrity: sha512-00UbgGwV8bSgUv34igBDbTOtKhqoRMy9bFjNehT40bXg6585PNIct8HhXZ0SybqB9rWtXj9crcku8ndDn/gIqQ==,
+ }
+ engines: { node: ">=4" }
+ peerDependencies:
+ eslint: "*"
+ eslint-plugin-import: "*"
+ dependencies:
+ debug: 4.3.4
+ eslint: 7.32.0
+ eslint-plugin-import: 2.26.0_xag76ci373f5hzfwsxolrbhy4a
+ glob: 7.2.3
+ is-glob: 4.0.3
+ resolve: 1.22.1
+ tsconfig-paths: 3.14.1
+ transitivePeerDependencies:
+ - supports-color
+ dev: false
+
+ /eslint-module-utils/2.7.4_qk4u2ghovatg5ueomqmuln4u2e:
+ resolution:
+ {
+ integrity: sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==,
+ }
+ engines: { node: ">=4" }
+ peerDependencies:
+ "@typescript-eslint/parser": "*"
+ eslint: "*"
+ eslint-import-resolver-node: "*"
+ eslint-import-resolver-typescript: "*"
+ eslint-import-resolver-webpack: "*"
+ peerDependenciesMeta:
+ "@typescript-eslint/parser":
+ optional: true
+ eslint:
+ optional: true
+ eslint-import-resolver-node:
+ optional: true
+ eslint-import-resolver-typescript:
+ optional: true
+ eslint-import-resolver-webpack:
+ optional: true
+ dependencies:
+ "@typescript-eslint/parser": 5.37.0_dyxdave6dwjbccc5dgiifcmuza
+ debug: 3.2.7
+ eslint: 7.32.0
+ eslint-import-resolver-node: 0.3.6
+ eslint-import-resolver-typescript: 2.7.1_hpmu7kn6tcn2vnxpfzvv33bxmy
+ transitivePeerDependencies:
+ - supports-color
+ dev: false
+
+ /eslint-plugin-import/2.26.0_xag76ci373f5hzfwsxolrbhy4a:
+ resolution:
+ {
+ integrity: sha512-hYfi3FXaM8WPLf4S1cikh/r4IxnO6zrhZbEGz2b660EJRbuxgpDS5gkCuYgGWg2xxh2rBuIr4Pvhve/7c31koA==,
+ }
+ engines: { node: ">=4" }
+ peerDependencies:
+ "@typescript-eslint/parser": "*"
+ eslint: ^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8
+ peerDependenciesMeta:
+ "@typescript-eslint/parser":
+ optional: true
+ dependencies:
+ "@typescript-eslint/parser": 5.37.0_dyxdave6dwjbccc5dgiifcmuza
+ array-includes: 3.1.5
+ array.prototype.flat: 1.3.0
+ debug: 2.6.9
+ doctrine: 2.1.0
+ eslint: 7.32.0
+ eslint-import-resolver-node: 0.3.6
+ eslint-module-utils: 2.7.4_qk4u2ghovatg5ueomqmuln4u2e
+ has: 1.0.3
+ is-core-module: 2.10.0
+ is-glob: 4.0.3
+ minimatch: 3.1.2
+ object.values: 1.1.5
+ resolve: 1.22.1
+ tsconfig-paths: 3.14.1
+ transitivePeerDependencies:
+ - eslint-import-resolver-typescript
+ - eslint-import-resolver-webpack
+ - supports-color
+ dev: false
+
+ /eslint-plugin-jsx-a11y/6.6.1_eslint@7.32.0:
+ resolution:
+ {
+ integrity: sha512-sXgFVNHiWffBq23uiS/JaP6eVR622DqwB4yTzKvGZGcPq6/yZ3WmOZfuBks/vHWo9GaFOqC2ZK4i6+C35knx7Q==,
+ }
+ engines: { node: ">=4.0" }
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8
+ dependencies:
+ "@babel/runtime": 7.19.0
+ aria-query: 4.2.2
+ array-includes: 3.1.5
+ ast-types-flow: 0.0.7
+ axe-core: 4.4.3
+ axobject-query: 2.2.0
+ damerau-levenshtein: 1.0.8
+ emoji-regex: 9.2.2
+ eslint: 7.32.0
+ has: 1.0.3
+ jsx-ast-utils: 3.3.3
+ language-tags: 1.0.5
+ minimatch: 3.1.2
+ semver: 6.3.0
+ dev: false
+
+ /eslint-plugin-react-hooks/4.6.0_eslint@7.32.0:
+ resolution:
+ {
+ integrity: sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==,
+ }
+ engines: { node: ">=10" }
+ peerDependencies:
+ eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0
+ dependencies:
+ eslint: 7.32.0
+ dev: false
+
+ /eslint-plugin-react/7.31.7_eslint@7.32.0:
+ resolution:
+ {
+ integrity: sha512-8NldBTeYp/kQoTV1uT0XF6HcmDqbgZ0lNPkN0wlRw8DJKXEnaWu+oh/6gt3xIhzvQ35wB2Y545fJhIbJSZ2NNw==,
+ }
+ engines: { node: ">=4" }
+ peerDependencies:
+ eslint: ^3 || ^4 || ^5 || ^6 || ^7 || ^8
+ dependencies:
+ array-includes: 3.1.5
+ array.prototype.flatmap: 1.3.0
+ doctrine: 2.1.0
+ eslint: 7.32.0
+ estraverse: 5.3.0
+ jsx-ast-utils: 3.3.3
+ minimatch: 3.1.2
+ object.entries: 1.1.5
+ object.fromentries: 2.0.5
+ object.hasown: 1.1.1
+ object.values: 1.1.5
+ prop-types: 15.8.1
+ resolve: 2.0.0-next.4
+ semver: 6.3.0
+ string.prototype.matchall: 4.0.7
+ dev: false
+
+ /eslint-plugin-turbo/0.0.3_eslint@7.32.0:
+ resolution:
+ {
+ integrity: sha512-QjidATGxWtaB9QUrD3NocUySmsgWKZlBMFlw4kX2IIjRLAxMPwukk90h3ZTaNXyRHuaQsrEgh7hhlCZoxP0TTw==,
+ }
+ peerDependencies:
+ eslint: ^7.23.0 || ^8.0.0
+ dependencies:
+ eslint: 7.32.0
+ dev: false
+
+ /eslint-scope/5.1.1:
+ resolution:
+ {
+ integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==,
+ }
+ engines: { node: ">=8.0.0" }
+ dependencies:
+ esrecurse: 4.3.0
+ estraverse: 4.3.0
+
+ /eslint-utils/2.1.0:
+ resolution:
+ {
+ integrity: sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==,
+ }
+ engines: { node: ">=6" }
+ dependencies:
+ eslint-visitor-keys: 1.3.0
+
+ /eslint-visitor-keys/1.3.0:
+ resolution:
+ {
+ integrity: sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==,
+ }
+ engines: { node: ">=4" }
+
+ /eslint-visitor-keys/2.1.0:
+ resolution:
+ {
+ integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==,
+ }
+ engines: { node: ">=10" }
+
+ /eslint-visitor-keys/3.3.0:
+ resolution:
+ {
+ integrity: sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==,
+ }
+ engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 }
+ dev: false
+
+ /eslint/7.32.0:
+ resolution:
+ {
+ integrity: sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==,
+ }
+ engines: { node: ^10.12.0 || >=12.0.0 }
+ hasBin: true
+ dependencies:
+ "@babel/code-frame": 7.12.11
+ "@eslint/eslintrc": 0.4.3
+ "@humanwhocodes/config-array": 0.5.0
+ ajv: 6.12.6
+ chalk: 4.1.2
+ cross-spawn: 7.0.3
+ debug: 4.3.4
+ doctrine: 3.0.0
+ enquirer: 2.3.6
+ escape-string-regexp: 4.0.0
+ eslint-scope: 5.1.1
+ eslint-utils: 2.1.0
+ eslint-visitor-keys: 2.1.0
+ espree: 7.3.1
+ esquery: 1.4.0
+ esutils: 2.0.3
+ fast-deep-equal: 3.1.3
+ file-entry-cache: 6.0.1
+ functional-red-black-tree: 1.0.1
+ glob-parent: 5.1.2
+ globals: 13.17.0
+ ignore: 4.0.6
+ import-fresh: 3.3.0
+ imurmurhash: 0.1.4
+ is-glob: 4.0.3
+ js-yaml: 3.14.1
+ json-stable-stringify-without-jsonify: 1.0.1
+ levn: 0.4.1
+ lodash.merge: 4.6.2
+ minimatch: 3.1.2
+ natural-compare: 1.4.0
+ optionator: 0.9.1
+ progress: 2.0.3
+ regexpp: 3.2.0
+ semver: 7.3.7
+ strip-ansi: 6.0.1
+ strip-json-comments: 3.1.1
+ table: 6.8.0
+ text-table: 0.2.0
+ v8-compile-cache: 2.3.0
+ transitivePeerDependencies:
+ - supports-color
+
+ /espree/7.3.1:
+ resolution:
+ {
+ integrity: sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==,
+ }
+ engines: { node: ^10.12.0 || >=12.0.0 }
+ dependencies:
+ acorn: 7.4.1
+ acorn-jsx: 5.3.2_acorn@7.4.1
+ eslint-visitor-keys: 1.3.0
+
+ /esprima/4.0.1:
+ resolution:
+ {
+ integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==,
+ }
+ engines: { node: ">=4" }
+ hasBin: true
+
+ /esquery/1.4.0:
+ resolution:
+ {
+ integrity: sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==,
+ }
+ engines: { node: ">=0.10" }
+ dependencies:
+ estraverse: 5.3.0
+
+ /esrecurse/4.3.0:
+ resolution:
+ {
+ integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==,
+ }
+ engines: { node: ">=4.0" }
+ dependencies:
+ estraverse: 5.3.0
+
+ /estraverse/4.3.0:
+ resolution:
+ {
+ integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==,
+ }
+ engines: { node: ">=4.0" }
+
+ /estraverse/5.3.0:
+ resolution:
+ {
+ integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==,
+ }
+ engines: { node: ">=4.0" }
+
+ /esutils/2.0.3:
+ resolution:
+ {
+ integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==,
+ }
+ engines: { node: ">=0.10.0" }
+
+ /fast-deep-equal/3.1.3:
+ resolution:
+ {
+ integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==,
+ }
+
+ /fast-glob/3.2.12:
+ resolution:
+ {
+ integrity: sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==,
+ }
+ engines: { node: ">=8.6.0" }
+ dependencies:
+ "@nodelib/fs.stat": 2.0.5
+ "@nodelib/fs.walk": 1.2.8
+ glob-parent: 5.1.2
+ merge2: 1.4.1
+ micromatch: 4.0.5
+ dev: false
+
+ /fast-json-stable-stringify/2.1.0:
+ resolution:
+ {
+ integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==,
+ }
+
+ /fast-levenshtein/2.0.6:
+ resolution:
+ {
+ integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==,
+ }
+
+ /fastq/1.13.0:
+ resolution:
+ {
+ integrity: sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==,
+ }
+ dependencies:
+ reusify: 1.0.4
+ dev: false
+
+ /file-entry-cache/6.0.1:
+ resolution:
+ {
+ integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==,
+ }
+ engines: { node: ^10.12.0 || >=12.0.0 }
+ dependencies:
+ flat-cache: 3.0.4
+
+ /fill-range/7.0.1:
+ resolution:
+ {
+ integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ to-regex-range: 5.0.1
+ dev: false
+
+ /flat-cache/3.0.4:
+ resolution:
+ {
+ integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==,
+ }
+ engines: { node: ^10.12.0 || >=12.0.0 }
+ dependencies:
+ flatted: 3.2.7
+ rimraf: 3.0.2
+
+ /flatted/3.2.7:
+ resolution:
+ {
+ integrity: sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==,
+ }
+
+ /fs.realpath/1.0.0:
+ resolution:
+ {
+ integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==,
+ }
+
+ /function-bind/1.1.1:
+ resolution:
+ {
+ integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==,
+ }
+ dev: false
+
+ /function.prototype.name/1.1.5:
+ resolution:
+ {
+ integrity: sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ functions-have-names: 1.2.3
+ dev: false
+
+ /functional-red-black-tree/1.0.1:
+ resolution:
+ {
+ integrity: sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==,
+ }
+
+ /functions-have-names/1.2.3:
+ resolution:
+ {
+ integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==,
+ }
+ dev: false
+
+ /gensync/1.0.0-beta.2:
+ resolution:
+ {
+ integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==,
+ }
+ engines: { node: ">=6.9.0" }
+
+ /get-intrinsic/1.1.3:
+ resolution:
+ {
+ integrity: sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==,
+ }
+ dependencies:
+ function-bind: 1.1.1
+ has: 1.0.3
+ has-symbols: 1.0.3
+ dev: false
+
+ /get-symbol-description/1.0.0:
+ resolution:
+ {
+ integrity: sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ get-intrinsic: 1.1.3
+ dev: false
+
+ /glob-parent/5.1.2:
+ resolution:
+ {
+ integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==,
+ }
+ engines: { node: ">= 6" }
+ dependencies:
+ is-glob: 4.0.3
+
+ /glob/7.1.7:
+ resolution:
+ {
+ integrity: sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==,
+ }
+ dependencies:
+ fs.realpath: 1.0.0
+ inflight: 1.0.6
+ inherits: 2.0.4
+ minimatch: 3.1.2
+ once: 1.4.0
+ path-is-absolute: 1.0.1
+ dev: false
+
+ /glob/7.2.3:
+ resolution:
+ {
+ integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==,
+ }
+ dependencies:
+ fs.realpath: 1.0.0
+ inflight: 1.0.6
+ inherits: 2.0.4
+ minimatch: 3.1.2
+ once: 1.4.0
+ path-is-absolute: 1.0.1
+
+ /globals/11.12.0:
+ resolution:
+ {
+ integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==,
+ }
+ engines: { node: ">=4" }
+
+ /globals/13.17.0:
+ resolution:
+ {
+ integrity: sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ type-fest: 0.20.2
+
+ /globby/11.1.0:
+ resolution:
+ {
+ integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ array-union: 2.1.0
+ dir-glob: 3.0.1
+ fast-glob: 3.2.12
+ ignore: 5.2.0
+ merge2: 1.4.1
+ slash: 3.0.0
+ dev: false
+
+ /graceful-fs/4.2.10:
+ resolution:
+ {
+ integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==,
+ }
+ dev: true
+
+ /has-bigints/1.0.2:
+ resolution:
+ {
+ integrity: sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==,
+ }
+ dev: false
+
+ /has-flag/3.0.0:
+ resolution:
+ {
+ integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==,
+ }
+ engines: { node: ">=4" }
+
+ /has-flag/4.0.0:
+ resolution:
+ {
+ integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==,
+ }
+ engines: { node: ">=8" }
+
+ /has-property-descriptors/1.0.0:
+ resolution:
+ {
+ integrity: sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==,
+ }
+ dependencies:
+ get-intrinsic: 1.1.3
+ dev: false
+
+ /has-symbols/1.0.3:
+ resolution:
+ {
+ integrity: sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /has-tostringtag/1.0.0:
+ resolution:
+ {
+ integrity: sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-symbols: 1.0.3
+ dev: false
+
+ /has/1.0.3:
+ resolution:
+ {
+ integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==,
+ }
+ engines: { node: ">= 0.4.0" }
+ dependencies:
+ function-bind: 1.1.1
+ dev: false
+
+ /ignore/4.0.6:
+ resolution:
+ {
+ integrity: sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==,
+ }
+ engines: { node: ">= 4" }
+
+ /ignore/5.2.0:
+ resolution:
+ {
+ integrity: sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==,
+ }
+ engines: { node: ">= 4" }
+ dev: false
+
+ /import-fresh/3.3.0:
+ resolution:
+ {
+ integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==,
+ }
+ engines: { node: ">=6" }
+ dependencies:
+ parent-module: 1.0.1
+ resolve-from: 4.0.0
+
+ /imurmurhash/0.1.4:
+ resolution:
+ {
+ integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==,
+ }
+ engines: { node: ">=0.8.19" }
+
+ /inflight/1.0.6:
+ resolution:
+ {
+ integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==,
+ }
+ dependencies:
+ once: 1.4.0
+ wrappy: 1.0.2
+
+ /inherits/2.0.4:
+ resolution:
+ {
+ integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==,
+ }
+
+ /internal-slot/1.0.3:
+ resolution:
+ {
+ integrity: sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ get-intrinsic: 1.1.3
+ has: 1.0.3
+ side-channel: 1.0.4
+ dev: false
+
+ /is-bigint/1.0.4:
+ resolution:
+ {
+ integrity: sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==,
+ }
+ dependencies:
+ has-bigints: 1.0.2
+ dev: false
+
+ /is-boolean-object/1.1.2:
+ resolution:
+ {
+ integrity: sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ has-tostringtag: 1.0.0
+ dev: false
+
+ /is-callable/1.2.5:
+ resolution:
+ {
+ integrity: sha512-ZIWRujF6MvYGkEuHMYtFRkL2wAtFw89EHfKlXrkPkjQZZRWeh9L1q3SV13NIfHnqxugjLvAOkEHx9mb1zcMnEw==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /is-core-module/2.10.0:
+ resolution:
+ {
+ integrity: sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==,
+ }
+ dependencies:
+ has: 1.0.3
+ dev: false
+
+ /is-date-object/1.0.5:
+ resolution:
+ {
+ integrity: sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-tostringtag: 1.0.0
+ dev: false
+
+ /is-extglob/2.1.1:
+ resolution:
+ {
+ integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==,
+ }
+ engines: { node: ">=0.10.0" }
+
+ /is-fullwidth-code-point/3.0.0:
+ resolution:
+ {
+ integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==,
+ }
+ engines: { node: ">=8" }
+
+ /is-glob/4.0.3:
+ resolution:
+ {
+ integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ is-extglob: 2.1.1
+
+ /is-negative-zero/2.0.2:
+ resolution:
+ {
+ integrity: sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /is-number-object/1.0.7:
+ resolution:
+ {
+ integrity: sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-tostringtag: 1.0.0
+ dev: false
+
+ /is-number/7.0.0:
+ resolution:
+ {
+ integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==,
+ }
+ engines: { node: ">=0.12.0" }
+ dev: false
+
+ /is-regex/1.1.4:
+ resolution:
+ {
+ integrity: sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ has-tostringtag: 1.0.0
+ dev: false
+
+ /is-shared-array-buffer/1.0.2:
+ resolution:
+ {
+ integrity: sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ dev: false
+
+ /is-string/1.0.7:
+ resolution:
+ {
+ integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-tostringtag: 1.0.0
+ dev: false
+
+ /is-symbol/1.0.4:
+ resolution:
+ {
+ integrity: sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ has-symbols: 1.0.3
+ dev: false
+
+ /is-weakref/1.0.2:
+ resolution:
+ {
+ integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ dev: false
+
+ /isexe/2.0.0:
+ resolution:
+ {
+ integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==,
+ }
+
+ /js-tokens/4.0.0:
+ resolution:
+ {
+ integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==,
+ }
+
+ /js-yaml/3.14.1:
+ resolution:
+ {
+ integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==,
+ }
+ hasBin: true
+ dependencies:
+ argparse: 1.0.10
+ esprima: 4.0.1
+
+ /jsesc/2.5.2:
+ resolution:
+ {
+ integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==,
+ }
+ engines: { node: ">=4" }
+ hasBin: true
+
+ /json-schema-traverse/0.4.1:
+ resolution:
+ {
+ integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==,
+ }
+
+ /json-schema-traverse/1.0.0:
+ resolution:
+ {
+ integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==,
+ }
+
+ /json-stable-stringify-without-jsonify/1.0.1:
+ resolution:
+ {
+ integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==,
+ }
+
+ /json5/1.0.1:
+ resolution:
+ {
+ integrity: sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==,
+ }
+ hasBin: true
+ dependencies:
+ minimist: 1.2.6
+ dev: false
+
+ /json5/2.2.1:
+ resolution:
+ {
+ integrity: sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==,
+ }
+ engines: { node: ">=6" }
+ hasBin: true
+
+ /jsx-ast-utils/3.3.3:
+ resolution:
+ {
+ integrity: sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==,
+ }
+ engines: { node: ">=4.0" }
+ dependencies:
+ array-includes: 3.1.5
+ object.assign: 4.1.4
+ dev: false
+
+ /language-subtag-registry/0.3.22:
+ resolution:
+ {
+ integrity: sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w==,
+ }
+ dev: false
+
+ /language-tags/1.0.5:
+ resolution:
+ {
+ integrity: sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==,
+ }
+ dependencies:
+ language-subtag-registry: 0.3.22
+ dev: false
+
+ /levn/0.4.1:
+ resolution:
+ {
+ integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==,
+ }
+ engines: { node: ">= 0.8.0" }
+ dependencies:
+ prelude-ls: 1.2.1
+ type-check: 0.4.0
+
+ /lodash.merge/4.6.2:
+ resolution:
+ {
+ integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==,
+ }
+
+ /lodash.truncate/4.4.2:
+ resolution:
+ {
+ integrity: sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==,
+ }
+
+ /lodash/4.17.21_ehchni3mpmovsvjxesffg2i5a4:
+ resolution:
+ {
+ integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==,
+ }
+ dev: false
+ patched: true
+
+ /loose-envify/1.4.0:
+ resolution:
+ {
+ integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==,
+ }
+ hasBin: true
+ dependencies:
+ js-tokens: 4.0.0
+
+ /lru-cache/6.0.0:
+ resolution:
+ {
+ integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ yallist: 4.0.0
+
+ /merge2/1.4.1:
+ resolution:
+ {
+ integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==,
+ }
+ engines: { node: ">= 8" }
+ dev: false
+
+ /micromatch/4.0.5:
+ resolution:
+ {
+ integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==,
+ }
+ engines: { node: ">=8.6" }
+ dependencies:
+ braces: 3.0.2
+ picomatch: 2.3.1
+ dev: false
+
+ /minimatch/3.1.2:
+ resolution:
+ {
+ integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==,
+ }
+ dependencies:
+ brace-expansion: 1.1.11
+
+ /minimist/1.2.6:
+ resolution:
+ {
+ integrity: sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==,
+ }
+ dev: false
+
+ /ms/2.0.0:
+ resolution:
+ {
+ integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==,
+ }
+ dev: false
+
+ /ms/2.1.2:
+ resolution:
+ {
+ integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==,
+ }
+
+ /ms/2.1.3:
+ resolution:
+ {
+ integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==,
+ }
+ dev: false
+
+ /nanoid/3.3.4:
+ resolution:
+ {
+ integrity: sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==,
+ }
+ engines: { node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1 }
+ hasBin: true
+ dev: false
+
+ /natural-compare/1.4.0:
+ resolution:
+ {
+ integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==,
+ }
+
+ /next-transpile-modules/9.0.0:
+ resolution:
+ {
+ integrity: sha512-VCNFOazIAnXn1hvgYYSTYMnoWgKgwlYh4lm1pKbSfiB3kj5ZYLcKVhfh3jkPOg1cnd9DP+pte9yCUocdPEUBTQ==,
+ }
+ dependencies:
+ enhanced-resolve: 5.10.0
+ escalade: 3.1.1
+ dev: true
+
+ /next/12.2.5_ir3quccc6i62x6qn6jjhyjjiey:
+ resolution:
+ {
+ integrity: sha512-tBdjqX5XC/oFs/6gxrZhjmiq90YWizUYU6qOWAfat7zJwrwapJ+BYgX2PmiacunXMaRpeVT4vz5MSPSLgNkrpA==,
+ }
+ engines: { node: ">=12.22.0" }
+ hasBin: true
+ peerDependencies:
+ fibers: ">= 3.1.0"
+ node-sass: ^6.0.0 || ^7.0.0
+ react: ^17.0.2 || ^18.0.0-0
+ react-dom: ^17.0.2 || ^18.0.0-0
+ sass: ^1.3.0
+ peerDependenciesMeta:
+ fibers:
+ optional: true
+ node-sass:
+ optional: true
+ sass:
+ optional: true
+ dependencies:
+ "@next/env": 12.2.5
+ "@swc/helpers": 0.4.3
+ caniuse-lite: 1.0.30001399
+ postcss: 8.4.14
+ react: 18.2.0
+ react-dom: 18.2.0_react@18.2.0
+ styled-jsx: 5.0.4_3toe27fv7etiytxb5kxc7fxaw4
+ use-sync-external-store: 1.2.0_react@18.2.0
+ optionalDependencies:
+ "@next/swc-android-arm-eabi": 12.2.5
+ "@next/swc-android-arm64": 12.2.5
+ "@next/swc-darwin-arm64": 12.2.5
+ "@next/swc-darwin-x64": 12.2.5
+ "@next/swc-freebsd-x64": 12.2.5
+ "@next/swc-linux-arm-gnueabihf": 12.2.5
+ "@next/swc-linux-arm64-gnu": 12.2.5
+ "@next/swc-linux-arm64-musl": 12.2.5
+ "@next/swc-linux-x64-gnu": 12.2.5
+ "@next/swc-linux-x64-musl": 12.2.5
+ "@next/swc-win32-arm64-msvc": 12.2.5
+ "@next/swc-win32-ia32-msvc": 12.2.5
+ "@next/swc-win32-x64-msvc": 12.2.5
+ transitivePeerDependencies:
+ - "@babel/core"
+ - babel-plugin-macros
+ dev: false
+
+ /node-releases/2.0.6:
+ resolution:
+ {
+ integrity: sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==,
+ }
+
+ /object-assign/4.1.1:
+ resolution:
+ {
+ integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: false
+
+ /object-inspect/1.12.2:
+ resolution:
+ {
+ integrity: sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==,
+ }
+ dev: false
+
+ /object-keys/1.1.1:
+ resolution:
+ {
+ integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /object.assign/4.1.4:
+ resolution:
+ {
+ integrity: sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ has-symbols: 1.0.3
+ object-keys: 1.1.1
+ dev: false
+
+ /object.entries/1.1.5:
+ resolution:
+ {
+ integrity: sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ dev: false
+
+ /object.fromentries/2.0.5:
+ resolution:
+ {
+ integrity: sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ dev: false
+
+ /object.hasown/1.1.1:
+ resolution:
+ {
+ integrity: sha512-LYLe4tivNQzq4JdaWW6WO3HMZZJWzkkH8fnI6EebWl0VZth2wL2Lovm74ep2/gZzlaTdV62JZHEqHQ2yVn8Q/A==,
+ }
+ dependencies:
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ dev: false
+
+ /object.values/1.1.5:
+ resolution:
+ {
+ integrity: sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ dev: false
+
+ /once/1.4.0:
+ resolution:
+ {
+ integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==,
+ }
+ dependencies:
+ wrappy: 1.0.2
+
+ /optionator/0.9.1:
+ resolution:
+ {
+ integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==,
+ }
+ engines: { node: ">= 0.8.0" }
+ dependencies:
+ deep-is: 0.1.4
+ fast-levenshtein: 2.0.6
+ levn: 0.4.1
+ prelude-ls: 1.2.1
+ type-check: 0.4.0
+ word-wrap: 1.2.3
+
+ /parent-module/1.0.1:
+ resolution:
+ {
+ integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==,
+ }
+ engines: { node: ">=6" }
+ dependencies:
+ callsites: 3.1.0
+
+ /path-is-absolute/1.0.1:
+ resolution:
+ {
+ integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==,
+ }
+ engines: { node: ">=0.10.0" }
+
+ /path-key/3.1.1:
+ resolution:
+ {
+ integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==,
+ }
+ engines: { node: ">=8" }
+
+ /path-parse/1.0.7:
+ resolution:
+ {
+ integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==,
+ }
+ dev: false
+
+ /path-type/4.0.0:
+ resolution:
+ {
+ integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==,
+ }
+ engines: { node: ">=8" }
+ dev: false
+
+ /picocolors/1.0.0:
+ resolution:
+ {
+ integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==,
+ }
+
+ /picomatch/2.3.1:
+ resolution:
+ {
+ integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==,
+ }
+ engines: { node: ">=8.6" }
+ dev: false
+
+ /postcss/8.4.14:
+ resolution:
+ {
+ integrity: sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==,
+ }
+ engines: { node: ^10 || ^12 || >=14 }
+ dependencies:
+ nanoid: 3.3.4
+ picocolors: 1.0.0
+ source-map-js: 1.0.2
+ dev: false
+
+ /prelude-ls/1.2.1:
+ resolution:
+ {
+ integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==,
+ }
+ engines: { node: ">= 0.8.0" }
+
+ /prettier/2.7.1:
+ resolution:
+ {
+ integrity: sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==,
+ }
+ engines: { node: ">=10.13.0" }
+ hasBin: true
+ dev: true
+
+ /progress/2.0.3:
+ resolution:
+ {
+ integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==,
+ }
+ engines: { node: ">=0.4.0" }
+
+ /prop-types/15.8.1:
+ resolution:
+ {
+ integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==,
+ }
+ dependencies:
+ loose-envify: 1.4.0
+ object-assign: 4.1.1
+ react-is: 16.13.1
+ dev: false
+
+ /punycode/2.1.1:
+ resolution:
+ {
+ integrity: sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==,
+ }
+ engines: { node: ">=6" }
+
+ /queue-microtask/1.2.3:
+ resolution:
+ {
+ integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==,
+ }
+ dev: false
+
+ /react-dom/18.2.0_react@18.2.0:
+ resolution:
+ {
+ integrity: sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==,
+ }
+ peerDependencies:
+ react: ^18.2.0
+ dependencies:
+ loose-envify: 1.4.0
+ react: 18.2.0
+ scheduler: 0.23.0
+ dev: false
+
+ /react-is/16.13.1:
+ resolution:
+ {
+ integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==,
+ }
+ dev: false
+
+ /react/18.2.0:
+ resolution:
+ {
+ integrity: sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ loose-envify: 1.4.0
+
+ /regenerator-runtime/0.13.9:
+ resolution:
+ {
+ integrity: sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==,
+ }
+ dev: false
+
+ /regexp.prototype.flags/1.4.3:
+ resolution:
+ {
+ integrity: sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==,
+ }
+ engines: { node: ">= 0.4" }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ functions-have-names: 1.2.3
+ dev: false
+
+ /regexpp/3.2.0:
+ resolution:
+ {
+ integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==,
+ }
+ engines: { node: ">=8" }
+
+ /require-from-string/2.0.2:
+ resolution:
+ {
+ integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==,
+ }
+ engines: { node: ">=0.10.0" }
+
+ /resolve-from/4.0.0:
+ resolution:
+ {
+ integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==,
+ }
+ engines: { node: ">=4" }
+
+ /resolve/1.22.1:
+ resolution:
+ {
+ integrity: sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==,
+ }
+ hasBin: true
+ dependencies:
+ is-core-module: 2.10.0
+ path-parse: 1.0.7
+ supports-preserve-symlinks-flag: 1.0.0
+ dev: false
+
+ /resolve/2.0.0-next.4:
+ resolution:
+ {
+ integrity: sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==,
+ }
+ hasBin: true
+ dependencies:
+ is-core-module: 2.10.0
+ path-parse: 1.0.7
+ supports-preserve-symlinks-flag: 1.0.0
+ dev: false
+
+ /reusify/1.0.4:
+ resolution:
+ {
+ integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==,
+ }
+ engines: { iojs: ">=1.0.0", node: ">=0.10.0" }
+ dev: false
+
+ /rimraf/3.0.2:
+ resolution:
+ {
+ integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==,
+ }
+ hasBin: true
+ dependencies:
+ glob: 7.2.3
+
+ /run-parallel/1.2.0:
+ resolution:
+ {
+ integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==,
+ }
+ dependencies:
+ queue-microtask: 1.2.3
+ dev: false
+
+ /safe-buffer/5.1.2:
+ resolution:
+ {
+ integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==,
+ }
+
+ /scheduler/0.23.0:
+ resolution:
+ {
+ integrity: sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==,
+ }
+ dependencies:
+ loose-envify: 1.4.0
+ dev: false
+
+ /semver/6.3.0:
+ resolution:
+ {
+ integrity: sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==,
+ }
+ hasBin: true
+
+ /semver/7.3.7:
+ resolution:
+ {
+ integrity: sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==,
+ }
+ engines: { node: ">=10" }
+ hasBin: true
+ dependencies:
+ lru-cache: 6.0.0
+
+ /shebang-command/2.0.0:
+ resolution:
+ {
+ integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ shebang-regex: 3.0.0
+
+ /shebang-regex/3.0.0:
+ resolution:
+ {
+ integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==,
+ }
+ engines: { node: ">=8" }
+
+ /side-channel/1.0.4:
+ resolution:
+ {
+ integrity: sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ get-intrinsic: 1.1.3
+ object-inspect: 1.12.2
+ dev: false
+
+ /slash/3.0.0:
+ resolution:
+ {
+ integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==,
+ }
+ engines: { node: ">=8" }
+ dev: false
+
+ /slice-ansi/4.0.0:
+ resolution:
+ {
+ integrity: sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==,
+ }
+ engines: { node: ">=10" }
+ dependencies:
+ ansi-styles: 4.3.0
+ astral-regex: 2.0.0
+ is-fullwidth-code-point: 3.0.0
+
+ /source-map-js/1.0.2:
+ resolution:
+ {
+ integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: false
+
+ /sprintf-js/1.0.3:
+ resolution:
+ {
+ integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==,
+ }
+
+ /string-width/4.2.3:
+ resolution:
+ {
+ integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ emoji-regex: 8.0.0
+ is-fullwidth-code-point: 3.0.0
+ strip-ansi: 6.0.1
+
+ /string.prototype.matchall/4.0.7:
+ resolution:
+ {
+ integrity: sha512-f48okCX7JiwVi1NXCVWcFnZgADDC/n2vePlQ/KUCNqCikLLilQvwjMO8+BHVKvgzH0JB0J9LEPgxOGT02RoETg==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ get-intrinsic: 1.1.3
+ has-symbols: 1.0.3
+ internal-slot: 1.0.3
+ regexp.prototype.flags: 1.4.3
+ side-channel: 1.0.4
+ dev: false
+
+ /string.prototype.trimend/1.0.5:
+ resolution:
+ {
+ integrity: sha512-I7RGvmjV4pJ7O3kdf+LXFpVfdNOxtCW/2C8f6jNiW4+PQchwxkCDzlk1/7p+Wl4bqFIZeF47qAHXLuHHWKAxog==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ dev: false
+
+ /string.prototype.trimstart/1.0.5:
+ resolution:
+ {
+ integrity: sha512-THx16TJCGlsN0o6dl2o6ncWUsdgnLRSA23rRE5pyGBw/mLr3Ej/R2LaqCtgP8VNMGZsvMWnf9ooZPyY2bHvUFg==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ define-properties: 1.1.4
+ es-abstract: 1.20.2
+ dev: false
+
+ /strip-ansi/6.0.1:
+ resolution:
+ {
+ integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ ansi-regex: 5.0.1
+
+ /strip-bom/3.0.0:
+ resolution:
+ {
+ integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==,
+ }
+ engines: { node: ">=4" }
+ dev: false
+
+ /strip-json-comments/3.1.1:
+ resolution:
+ {
+ integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==,
+ }
+ engines: { node: ">=8" }
+
+ /styled-jsx/5.0.4_3toe27fv7etiytxb5kxc7fxaw4:
+ resolution:
+ {
+ integrity: sha512-sDFWLbg4zR+UkNzfk5lPilyIgtpddfxXEULxhujorr5jtePTUqiPDc5BC0v1NRqTr/WaFBGQQUoYToGlF4B2KQ==,
+ }
+ engines: { node: ">= 12.0.0" }
+ peerDependencies:
+ "@babel/core": "*"
+ babel-plugin-macros: "*"
+ react: ">= 16.8.0 || 17.x.x || ^18.0.0-0"
+ peerDependenciesMeta:
+ "@babel/core":
+ optional: true
+ babel-plugin-macros:
+ optional: true
+ dependencies:
+ "@babel/core": 7.19.1
+ react: 18.2.0
+ dev: false
+
+ /supports-color/5.5.0:
+ resolution:
+ {
+ integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ has-flag: 3.0.0
+
+ /supports-color/7.2.0:
+ resolution:
+ {
+ integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==,
+ }
+ engines: { node: ">=8" }
+ dependencies:
+ has-flag: 4.0.0
+
+ /supports-preserve-symlinks-flag/1.0.0:
+ resolution:
+ {
+ integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==,
+ }
+ engines: { node: ">= 0.4" }
+ dev: false
+
+ /table/6.8.0:
+ resolution:
+ {
+ integrity: sha512-s/fitrbVeEyHKFa7mFdkuQMWlH1Wgw/yEXMt5xACT4ZpzWFluehAxRtUUQKPuWhaLAWhFcVx6w3oC8VKaUfPGA==,
+ }
+ engines: { node: ">=10.0.0" }
+ dependencies:
+ ajv: 8.11.0
+ lodash.truncate: 4.4.2
+ slice-ansi: 4.0.0
+ string-width: 4.2.3
+ strip-ansi: 6.0.1
+
+ /tapable/2.2.1:
+ resolution:
+ {
+ integrity: sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==,
+ }
+ engines: { node: ">=6" }
+ dev: true
+
+ /text-table/0.2.0:
+ resolution:
+ {
+ integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==,
+ }
+
+ /to-fast-properties/2.0.0:
+ resolution:
+ {
+ integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==,
+ }
+ engines: { node: ">=4" }
+
+ /to-regex-range/5.0.1:
+ resolution:
+ {
+ integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==,
+ }
+ engines: { node: ">=8.0" }
+ dependencies:
+ is-number: 7.0.0
+ dev: false
+
+ /tsconfig-paths/3.14.1:
+ resolution:
+ {
+ integrity: sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==,
+ }
+ dependencies:
+ "@types/json5": 0.0.29
+ json5: 1.0.1
+ minimist: 1.2.6
+ strip-bom: 3.0.0
+ dev: false
+
+ /tslib/1.14.1:
+ resolution:
+ {
+ integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==,
+ }
+ dev: false
+
+ /tslib/2.4.0:
+ resolution:
+ {
+ integrity: sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==,
+ }
+ dev: false
+
+ /tsutils/3.21.0_typescript@4.8.3:
+ resolution:
+ {
+ integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==,
+ }
+ engines: { node: ">= 6" }
+ peerDependencies:
+ typescript: ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta"
+ dependencies:
+ tslib: 1.14.1
+ typescript: 4.8.3
+ dev: false
+
+ /turbo-android-arm64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-YxSlHc64CF5J7yNUMiLBHkeLyzrpe75Oy7tivWb3z7ySG44BXPikk4HDJZPh0T1ELvukDwuPKkvDukJ2oCLJpA==,
+ }
+ cpu: [arm64]
+ os: [android]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-darwin-64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-f6uto7LLpjwZ6iZSF+8uaDpuiTji6xmnWDxNuW23DBE8iv5mxehHd+6Ys851uKDRrPb3QdCu9ctyigKTAla5Vg==,
+ }
+ cpu: [x64]
+ os: [darwin]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-darwin-arm64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-o9C6e5XyuMHQwE0fEhUxfpXxvNr2QXXWX8nxIjygxeF19AqKbk/s08vZBOEmXV6/gx/pRhZ1S2nf0PIUjKBD/Q==,
+ }
+ cpu: [arm64]
+ os: [darwin]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-freebsd-64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-Gg9VOUo6McXYKGevcYjGUSmMryZyZggvpdPh7Dw3QTcT8Tsy6OBtq6WnJ2O4kFDsMigyKtEOJPceD9vDMZt3yQ==,
+ }
+ cpu: [x64]
+ os: [freebsd]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-freebsd-arm64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-W7VrcneWFN1QENKt5cpAPSsf9ArYBBAm3VtPBZEO5tX8kuahGlah1SKdKJXrRxYOY82wyNxDagS/rHpBlrAAzw==,
+ }
+ cpu: [arm64]
+ os: [freebsd]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-linux-32/1.4.6:
+ resolution:
+ {
+ integrity: sha512-76j/zsui6mWPX8pZVMGgF8eiKHPmKuGa2lo0A/Ja0HUvdYCOGUfHsWJGVVIeYbuEp3jsKyVt7OnMDeH9CqO6bg==,
+ }
+ cpu: [ia32]
+ os: [linux]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-linux-64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-z4A37Xm7lZyO9ddtGnvQHWMrsAKX6vFBxdbtb9MY76VRblo7lWSuk4LwCeM+T+ZDJ9LBFiF7aD/diRShlLx9jA==,
+ }
+ cpu: [x64]
+ os: [linux]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-linux-arm/1.4.6:
+ resolution:
+ {
+ integrity: sha512-Uh/V3oaAdhyZW6FKPpKihAxQo3EbvLaVNnzzkBmBnvHRkqoDJHhpuG72V7nn8pzxVbJ1++NEVjvbc2kmKFvGjg==,
+ }
+ cpu: [arm]
+ os: [linux]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-linux-arm64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-FW1jmOpZfOoVVvml338N0MPnYjiMyYWTaMb4T+IosgGYymcUE3xJjfXJcqfU/9/uKTyY8zG0qr9/5rw2kpMS2Q==,
+ }
+ cpu: [arm64]
+ os: [linux]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-linux-mips64le/1.4.6:
+ resolution:
+ {
+ integrity: sha512-iWaL3Pwj52BH3T2M8nXScmbSnq4+x47MYK7lJMG7FsZGAIoT5ToO1Wt1iX3GRHTcnIZYm/kCfJ1ptK/NCossLA==,
+ }
+ cpu: [mipsel]
+ os: [linux]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-linux-ppc64le/1.4.6:
+ resolution:
+ {
+ integrity: sha512-Af/KlUmpiORDyELxT7byXNWl3fefErGQMJfeqXEtAdhs8OCKQWuU+lchcZbiBZYNpL+lZoa3PAmP9Fpx7R4plA==,
+ }
+ cpu: [ppc64]
+ os: [linux]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-windows-32/1.4.6:
+ resolution:
+ {
+ integrity: sha512-NBd+XPlRSaR//lVN13Q9DOqK3CbowSvafIyGsO4jfvMsGTdyNDL6AYtFsvTKW91/G7ZhATmSEkPn2pZRuhP/DA==,
+ }
+ cpu: [ia32]
+ os: [win32]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-windows-64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-86AbmG+CjzVTpn4RGtwU2CYy4zSyAc9bIQ4pDGLIpCJg6JlD11duaiMJh0SCU/HCqWLJjWDI4qD+f9WNbgPsyQ==,
+ }
+ cpu: [x64]
+ os: [win32]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo-windows-arm64/1.4.6:
+ resolution:
+ {
+ integrity: sha512-V+pWcqhTtmQQ3ew8qEjYtUwzyW6tO1RgvP+6OKzItYzTnMTr1Fe42Q21V+tqRNxuNfFDKsgVJdk2p5wB87bvyQ==,
+ }
+ cpu: [arm64]
+ os: [win32]
+ requiresBuild: true
+ dev: true
+ optional: true
+
+ /turbo/1.4.6:
+ resolution:
+ {
+ integrity: sha512-FKtBXlOJ7YjSK22yj4sJLCtDcHFElypt7xw9cZN7Wyv9x4XBrTmh5KP6RmcGnRR1/GJlTNwD2AY2T9QTPnHh+g==,
+ }
+ hasBin: true
+ requiresBuild: true
+ optionalDependencies:
+ turbo-android-arm64: 1.4.6
+ turbo-darwin-64: 1.4.6
+ turbo-darwin-arm64: 1.4.6
+ turbo-freebsd-64: 1.4.6
+ turbo-freebsd-arm64: 1.4.6
+ turbo-linux-32: 1.4.6
+ turbo-linux-64: 1.4.6
+ turbo-linux-arm: 1.4.6
+ turbo-linux-arm64: 1.4.6
+ turbo-linux-mips64le: 1.4.6
+ turbo-linux-ppc64le: 1.4.6
+ turbo-windows-32: 1.4.6
+ turbo-windows-64: 1.4.6
+ turbo-windows-arm64: 1.4.6
+ dev: true
+
+ /type-check/0.4.0:
+ resolution:
+ {
+ integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==,
+ }
+ engines: { node: ">= 0.8.0" }
+ dependencies:
+ prelude-ls: 1.2.1
+
+ /type-fest/0.20.2:
+ resolution:
+ {
+ integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==,
+ }
+ engines: { node: ">=10" }
+
+ /typescript/4.8.3:
+ resolution:
+ {
+ integrity: sha512-goMHfm00nWPa8UvR/CPSvykqf6dVV8x/dp0c5mFTMTIu0u0FlGWRioyy7Nn0PGAdHxpJZnuO/ut+PpQ8UiHAig==,
+ }
+ engines: { node: ">=4.2.0" }
+ hasBin: true
+
+ /unbox-primitive/1.0.2:
+ resolution:
+ {
+ integrity: sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==,
+ }
+ dependencies:
+ call-bind: 1.0.2
+ has-bigints: 1.0.2
+ has-symbols: 1.0.3
+ which-boxed-primitive: 1.0.2
+ dev: false
+
+ /underscore/1.13.4_3pbfs36izefyn2uycmknwkvuuy:
+ resolution:
+ {
+ integrity: sha512-BQFnUDuAQ4Yf/cYY5LNrK9NCJFKriaRbD9uR1fTeXnBeoa97W0i41qkZfGO9pSo8I5KzjAcSY2XYtdf0oKd7KQ==,
+ }
+ dev: false
+ patched: true
+
+ /update-browserslist-db/1.0.9_browserslist@4.21.3:
+ resolution:
+ {
+ integrity: sha512-/xsqn21EGVdXI3EXSum1Yckj3ZVZugqyOZQ/CxYPBD/R+ko9NSUScf8tFF4dOKY+2pvSSJA/S+5B8s4Zr4kyvg==,
+ }
+ hasBin: true
+ peerDependencies:
+ browserslist: ">= 4.21.0"
+ dependencies:
+ browserslist: 4.21.3
+ escalade: 3.1.1
+ picocolors: 1.0.0
+
+ /uri-js/4.4.1:
+ resolution:
+ {
+ integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==,
+ }
+ dependencies:
+ punycode: 2.1.1
+
+ /use-sync-external-store/1.2.0_react@18.2.0:
+ resolution:
+ {
+ integrity: sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==,
+ }
+ peerDependencies:
+ react: ^16.8.0 || ^17.0.0 || ^18.0.0
+ dependencies:
+ react: 18.2.0
+ dev: false
+
+ /v8-compile-cache/2.3.0:
+ resolution:
+ {
+ integrity: sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==,
+ }
+
+ /which-boxed-primitive/1.0.2:
+ resolution:
+ {
+ integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==,
+ }
+ dependencies:
+ is-bigint: 1.0.4
+ is-boolean-object: 1.1.2
+ is-number-object: 1.0.7
+ is-string: 1.0.7
+ is-symbol: 1.0.4
+ dev: false
+
+ /which/2.0.2:
+ resolution:
+ {
+ integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==,
+ }
+ engines: { node: ">= 8" }
+ hasBin: true
+ dependencies:
+ isexe: 2.0.0
+
+ /word-wrap/1.2.3:
+ resolution:
+ {
+ integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==,
+ }
+ engines: { node: ">=0.10.0" }
+
+ /wrappy/1.0.2:
+ resolution:
+ {
+ integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==,
+ }
+
+ /yallist/4.0.0:
+ resolution:
+ {
+ integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==,
+ }
+
+ file:packages/ui:
+ resolution: { directory: packages/ui, type: directory }
+ name: ui
+ version: 0.0.0
+ dev: false
+
+ github.com/peerigon/dashboard-icons/ce27ef933144e09cef3911025f3649040a8571b6:
+ resolution:
+ {
+ tarball: https://codeload.github.com/peerigon/dashboard-icons/tar.gz/ce27ef933144e09cef3911025f3649040a8571b,
+ }
+ name: dashboard-icons
+ version: 1.0.0
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm8.yaml b/cli/internal/lockfile/testdata/pnpm8.yaml
new file mode 100644
index 0000000..d7d9e27
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm8.yaml
@@ -0,0 +1,107 @@
+lockfileVersion: "6.0"
+
+patchedDependencies:
+ is-even@1.0.0:
+ hash: trwuddosrpxsvtoqztvint6pca
+ path: patches/is-even@1.0.0.patch
+
+importers:
+ .: {}
+
+ packages/a:
+ dependencies:
+ c:
+ specifier: workspace:*
+ version: link:../c
+ is-odd:
+ specifier: ^3.0.1
+ version: 3.0.1
+
+ packages/b:
+ dependencies:
+ c:
+ specifier: workspace:*
+ version: link:../c
+ is-even:
+ specifier: ^1.0.0
+ version: 1.0.0_trwuddosrpxsvtoqztvint6pca
+
+ packages/c:
+ dependencies:
+ lodash:
+ specifier: ^4.17.21
+ version: 4.17.21
+
+packages:
+ /is-buffer@1.1.6:
+ resolution:
+ {
+ integrity: sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==,
+ }
+ dev: false
+
+ /is-even@1.0.0_trwuddosrpxsvtoqztvint6pca:
+ resolution:
+ {
+ integrity: sha512-LEhnkAdJqic4Dbqn58A0y52IXoHWlsueqQkKfMfdEnIYG8A1sm/GHidKkS6yvXlMoRrkM34csHnXQtOqcb+Jzg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ is-odd: 0.1.2
+ dev: false
+ patched: true
+
+ /is-number@3.0.0:
+ resolution:
+ {
+ integrity: sha512-4cboCqIpliH+mAvFNegjZQ4kgKc3ZUhQVr3HvWbSh5q3WH2v82ct+T2Y1hdU5Gdtorx/cLifQjqCbL7bpznLTg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ kind-of: 3.2.2
+ dev: false
+
+ /is-number@6.0.0:
+ resolution:
+ {
+ integrity: sha512-Wu1VHeILBK8KAWJUAiSZQX94GmOE45Rg6/538fKwiloUu21KncEkYGPqob2oSZ5mUT73vLGrHQjKw3KMPwfDzg==,
+ }
+ engines: { node: ">=0.10.0" }
+ dev: false
+
+ /is-odd@0.1.2:
+ resolution:
+ {
+ integrity: sha512-Ri7C2K7o5IrUU9UEI8losXJCCD/UtsaIrkR5sxIcFg4xQ9cRJXlWA5DQvTE0yDc0krvSNLsRGXN11UPS6KyfBw==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ is-number: 3.0.0
+ dev: false
+
+ /is-odd@3.0.1:
+ resolution:
+ {
+ integrity: sha512-CQpnWPrDwmP1+SMHXZhtLtJv90yiyVfluGsX5iNCVkrhQtU3TQHsUWPG9wkdk9Lgd5yNpAg9jQEo90CBaXgWMA==,
+ }
+ engines: { node: ">=4" }
+ dependencies:
+ is-number: 6.0.0
+ dev: false
+
+ /kind-of@3.2.2:
+ resolution:
+ {
+ integrity: sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==,
+ }
+ engines: { node: ">=0.10.0" }
+ dependencies:
+ is-buffer: 1.1.6
+ dev: false
+
+ /lodash@4.17.21:
+ resolution:
+ {
+ integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==,
+ }
+ dev: false
diff --git a/cli/internal/lockfile/testdata/pnpm_override.yaml b/cli/internal/lockfile/testdata/pnpm_override.yaml
new file mode 100644
index 0000000..2102192
--- /dev/null
+++ b/cli/internal/lockfile/testdata/pnpm_override.yaml
@@ -0,0 +1,24 @@
+lockfileVersion: 5.4
+
+overrides:
+ "@nomiclabs/hardhat-ethers": npm:hardhat-deploy-ethers@^0.3.0-beta.13
+
+importers:
+ config/hardhat:
+ specifiers:
+ "@nomiclabs/hardhat-ethers": npm:hardhat-deploy-ethers@^0.3.0-beta.13
+ dependencies:
+ "@nomiclabs/hardhat-ethers": /hardhat-deploy-ethers/0.3.0-beta.13_yab2ug5tvye2kp6e24l5x3z7uy
+
+packages:
+ /hardhat-deploy-ethers/0.3.0-beta.13_yab2ug5tvye2kp6e24l5x3z7uy:
+ resolution:
+ {
+ integrity: sha512-PdWVcKB9coqWV1L7JTpfXRCI91Cgwsm7KLmBcwZ8f0COSm1xtABHZTyz3fvF6p42cTnz1VM0QnfDvMFlIRkSNw==,
+ }
+ peerDependencies:
+ ethers: ^5.0.0
+ hardhat: ^2.0.0
+ dependencies:
+ ethers: 5.7.2
+ hardhat: 2.12.4_typescript@4.9.4
diff --git a/cli/internal/lockfile/testdata/yarn.lock b/cli/internal/lockfile/testdata/yarn.lock
new file mode 100644
index 0000000..f4272d1
--- /dev/null
+++ b/cli/internal/lockfile/testdata/yarn.lock
@@ -0,0 +1,2304 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+"@ampproject/remapping@^2.1.0":
+ version "2.2.0"
+ resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.0.tgz#56c133824780de3174aed5ab6834f3026790154d"
+ integrity sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==
+ dependencies:
+ "@jridgewell/gen-mapping" "^0.1.0"
+ "@jridgewell/trace-mapping" "^0.3.9"
+
+"@babel/code-frame@7.12.11":
+ version "7.12.11"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.12.11.tgz#f4ad435aa263db935b8f10f2c552d23fb716a63f"
+ integrity sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw==
+ dependencies:
+ "@babel/highlight" "^7.10.4"
+
+"@babel/code-frame@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a"
+ integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==
+ dependencies:
+ "@babel/highlight" "^7.18.6"
+
+"@babel/compat-data@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.19.0.tgz#2a592fd89bacb1fcde68de31bee4f2f2dacb0e86"
+ integrity sha512-y5rqgTTPTmaF5e2nVhOxw+Ur9HDJLsWb6U/KpgUzRZEdPfE6VOubXBKLdbcUTijzRptednSBDQbYZBOSqJxpJw==
+
+"@babel/core@^7.0.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.19.0.tgz#d2f5f4f2033c00de8096be3c9f45772563e150c3"
+ integrity sha512-reM4+U7B9ss148rh2n1Qs9ASS+w94irYXga7c2jaQv9RVzpS7Mv1a9rnYYwuDa45G+DkORt9g6An2k/V4d9LbQ==
+ dependencies:
+ "@ampproject/remapping" "^2.1.0"
+ "@babel/code-frame" "^7.18.6"
+ "@babel/generator" "^7.19.0"
+ "@babel/helper-compilation-targets" "^7.19.0"
+ "@babel/helper-module-transforms" "^7.19.0"
+ "@babel/helpers" "^7.19.0"
+ "@babel/parser" "^7.19.0"
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.19.0"
+ "@babel/types" "^7.19.0"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.1"
+ semver "^6.3.0"
+
+"@babel/generator@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.19.0.tgz#785596c06425e59334df2ccee63ab166b738419a"
+ integrity sha512-S1ahxf1gZ2dpoiFgA+ohK9DIpz50bJ0CWs7Zlzb54Z4sG8qmdIrGrVqmy1sAtTVRb+9CU6U8VqT9L0Zj7hxHVg==
+ dependencies:
+ "@babel/types" "^7.19.0"
+ "@jridgewell/gen-mapping" "^0.3.2"
+ jsesc "^2.5.1"
+
+"@babel/helper-compilation-targets@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.19.0.tgz#537ec8339d53e806ed422f1e06c8f17d55b96bb0"
+ integrity sha512-Ai5bNWXIvwDvWM7njqsG3feMlL9hCVQsPYXodsZyLwshYkZVJt59Gftau4VrE8S9IT9asd2uSP1hG6wCNw+sXA==
+ dependencies:
+ "@babel/compat-data" "^7.19.0"
+ "@babel/helper-validator-option" "^7.18.6"
+ browserslist "^4.20.2"
+ semver "^6.3.0"
+
+"@babel/helper-environment-visitor@^7.18.9":
+ version "7.18.9"
+ resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz#0c0cee9b35d2ca190478756865bb3528422f51be"
+ integrity sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==
+
+"@babel/helper-function-name@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.19.0.tgz#941574ed5390682e872e52d3f38ce9d1bef4648c"
+ integrity sha512-WAwHBINyrpqywkUH0nTnNgI5ina5TFn85HKS0pbPDfxFfhyR/aNQEn4hGi1P1JyT//I0t4OgXUlofzWILRvS5w==
+ dependencies:
+ "@babel/template" "^7.18.10"
+ "@babel/types" "^7.19.0"
+
+"@babel/helper-hoist-variables@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678"
+ integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-module-imports@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz#1e3ebdbbd08aad1437b428c50204db13c5a3ca6e"
+ integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-module-transforms@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.19.0.tgz#309b230f04e22c58c6a2c0c0c7e50b216d350c30"
+ integrity sha512-3HBZ377Fe14RbLIA+ac3sY4PTgpxHVkFrESaWhoI5PuyXPBBX8+C34qblV9G89ZtycGJCmCI/Ut+VUDK4bltNQ==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-module-imports" "^7.18.6"
+ "@babel/helper-simple-access" "^7.18.6"
+ "@babel/helper-split-export-declaration" "^7.18.6"
+ "@babel/helper-validator-identifier" "^7.18.6"
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.19.0"
+ "@babel/types" "^7.19.0"
+
+"@babel/helper-simple-access@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz#d6d8f51f4ac2978068df934b569f08f29788c7ea"
+ integrity sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-split-export-declaration@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075"
+ integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==
+ dependencies:
+ "@babel/types" "^7.18.6"
+
+"@babel/helper-string-parser@^7.18.10":
+ version "7.18.10"
+ resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz#181f22d28ebe1b3857fa575f5c290b1aaf659b56"
+ integrity sha512-XtIfWmeNY3i4t7t4D2t02q50HvqHybPqW2ki1kosnvWCwuCMeo81Jf0gwr85jy/neUdg5XDdeFE/80DXiO+njw==
+
+"@babel/helper-validator-identifier@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz#9c97e30d31b2b8c72a1d08984f2ca9b574d7a076"
+ integrity sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g==
+
+"@babel/helper-validator-option@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz#bf0d2b5a509b1f336099e4ff36e1a63aa5db4db8"
+ integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==
+
+"@babel/helpers@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.19.0.tgz#f30534657faf246ae96551d88dd31e9d1fa1fc18"
+ integrity sha512-DRBCKGwIEdqY3+rPJgG/dKfQy9+08rHIAJx8q2p+HSWP87s2HCrQmaAMMyMll2kIXKCW0cO1RdQskx15Xakftg==
+ dependencies:
+ "@babel/template" "^7.18.10"
+ "@babel/traverse" "^7.19.0"
+ "@babel/types" "^7.19.0"
+
+"@babel/highlight@^7.10.4", "@babel/highlight@^7.18.6":
+ version "7.18.6"
+ resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf"
+ integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==
+ dependencies:
+ "@babel/helper-validator-identifier" "^7.18.6"
+ chalk "^2.0.0"
+ js-tokens "^4.0.0"
+
+"@babel/parser@^7.18.10", "@babel/parser@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.19.0.tgz#497fcafb1d5b61376959c1c338745ef0577aa02c"
+ integrity sha512-74bEXKX2h+8rrfQUfsBfuZZHzsEs6Eql4pqy/T4Nn6Y9wNPggQOqD6z6pn5Bl8ZfysKouFZT/UXEH94ummEeQw==
+
+"@babel/runtime-corejs3@^7.10.2":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/runtime-corejs3/-/runtime-corejs3-7.19.0.tgz#0df75cb8e5ecba3ca9e658898694e5326d52397f"
+ integrity sha512-JyXXoCu1N8GLuKc2ii8y5RGma5FMpFeO2nAQIe0Yzrbq+rQnN+sFj47auLblR5ka6aHNGPDgv8G/iI2Grb0ldQ==
+ dependencies:
+ core-js-pure "^3.20.2"
+ regenerator-runtime "^0.13.4"
+
+"@babel/runtime@^7.10.2", "@babel/runtime@^7.18.9":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.19.0.tgz#22b11c037b094d27a8a2504ea4dcff00f50e2259"
+ integrity sha512-eR8Lo9hnDS7tqkO7NsV+mKvCmv5boaXFSZ70DnfhcgiEne8hv9oCEd36Klw74EtizEqLsy4YnW8UWwpBVolHZA==
+ dependencies:
+ regenerator-runtime "^0.13.4"
+
+"@babel/template@^7.18.10":
+ version "7.18.10"
+ resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.18.10.tgz#6f9134835970d1dbf0835c0d100c9f38de0c5e71"
+ integrity sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==
+ dependencies:
+ "@babel/code-frame" "^7.18.6"
+ "@babel/parser" "^7.18.10"
+ "@babel/types" "^7.18.10"
+
+"@babel/traverse@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.19.0.tgz#eb9c561c7360005c592cc645abafe0c3c4548eed"
+ integrity sha512-4pKpFRDh+utd2mbRC8JLnlsMUii3PMHjpL6a0SZ4NMZy7YFP9aXORxEhdMVOc9CpWtDF09IkciQLEhK7Ml7gRA==
+ dependencies:
+ "@babel/code-frame" "^7.18.6"
+ "@babel/generator" "^7.19.0"
+ "@babel/helper-environment-visitor" "^7.18.9"
+ "@babel/helper-function-name" "^7.19.0"
+ "@babel/helper-hoist-variables" "^7.18.6"
+ "@babel/helper-split-export-declaration" "^7.18.6"
+ "@babel/parser" "^7.19.0"
+ "@babel/types" "^7.19.0"
+ debug "^4.1.0"
+ globals "^11.1.0"
+
+"@babel/types@^7.18.10", "@babel/types@^7.18.6", "@babel/types@^7.19.0":
+ version "7.19.0"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.19.0.tgz#75f21d73d73dc0351f3368d28db73465f4814600"
+ integrity sha512-YuGopBq3ke25BVSiS6fgF49Ul9gH1x70Bcr6bqRLjWCkcX8Hre1/5+z+IiWOIerRMSSEfGZVB9z9kyq7wVs9YA==
+ dependencies:
+ "@babel/helper-string-parser" "^7.18.10"
+ "@babel/helper-validator-identifier" "^7.18.6"
+ to-fast-properties "^2.0.0"
+
+"@eslint/eslintrc@^0.4.3":
+ version "0.4.3"
+ resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-0.4.3.tgz#9e42981ef035beb3dd49add17acb96e8ff6f394c"
+ integrity sha512-J6KFFz5QCYUJq3pf0mjEcCJVERbzv71PUIDczuh9JkwGEzced6CO5ADLHB1rbf/+oPBtoPfMYNOpGDzCANlbXw==
+ dependencies:
+ ajv "^6.12.4"
+ debug "^4.1.1"
+ espree "^7.3.0"
+ globals "^13.9.0"
+ ignore "^4.0.6"
+ import-fresh "^3.2.1"
+ js-yaml "^3.13.1"
+ minimatch "^3.0.4"
+ strip-json-comments "^3.1.1"
+
+"@humanwhocodes/config-array@^0.5.0":
+ version "0.5.0"
+ resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.5.0.tgz#1407967d4c6eecd7388f83acf1eaf4d0c6e58ef9"
+ integrity sha512-FagtKFz74XrTl7y6HCzQpwDfXP0yhxe9lHLD1UZxjvZIcbyRz8zTFF/yYNfSfzU414eDwZ1SrO0Qvtyf+wFMQg==
+ dependencies:
+ "@humanwhocodes/object-schema" "^1.2.0"
+ debug "^4.1.1"
+ minimatch "^3.0.4"
+
+"@humanwhocodes/object-schema@^1.2.0":
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz#b520529ec21d8e5945a1851dfd1c32e94e39ff45"
+ integrity sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==
+
+"@jridgewell/gen-mapping@^0.1.0":
+ version "0.1.1"
+ resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz#e5d2e450306a9491e3bd77e323e38d7aff315996"
+ integrity sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==
+ dependencies:
+ "@jridgewell/set-array" "^1.0.0"
+ "@jridgewell/sourcemap-codec" "^1.4.10"
+
+"@jridgewell/gen-mapping@^0.3.2":
+ version "0.3.2"
+ resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9"
+ integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
+ dependencies:
+ "@jridgewell/set-array" "^1.0.1"
+ "@jridgewell/sourcemap-codec" "^1.4.10"
+ "@jridgewell/trace-mapping" "^0.3.9"
+
+"@jridgewell/resolve-uri@^3.0.3":
+ version "3.1.0"
+ resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
+ integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
+
+"@jridgewell/set-array@^1.0.0", "@jridgewell/set-array@^1.0.1":
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
+ integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
+
+"@jridgewell/sourcemap-codec@^1.4.10":
+ version "1.4.14"
+ resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
+ integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
+
+"@jridgewell/trace-mapping@^0.3.9":
+ version "0.3.15"
+ resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.15.tgz#aba35c48a38d3fd84b37e66c9c0423f9744f9774"
+ integrity sha512-oWZNOULl+UbhsgB51uuZzglikfIKSUBO/M9W2OfEjn7cmqoAiCgmv9lyACTUacZwBz0ITnJ2NqjU8Tx0DHL88g==
+ dependencies:
+ "@jridgewell/resolve-uri" "^3.0.3"
+ "@jridgewell/sourcemap-codec" "^1.4.10"
+
+"@next/env@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/env/-/env-12.2.5.tgz#d908c57b35262b94db3e431e869b72ac3e1ad3e3"
+ integrity sha512-vLPLV3cpPGjUPT3PjgRj7e3nio9t6USkuew3JE/jMeon/9Mvp1WyR18v3iwnCuX7eUAm1HmAbJHHLAbcu/EJcw==
+
+"@next/eslint-plugin-next@12.3.0":
+ version "12.3.0"
+ resolved "https://registry.yarnpkg.com/@next/eslint-plugin-next/-/eslint-plugin-next-12.3.0.tgz#302c1f03618d5001ce92ea6826c329268759128e"
+ integrity sha512-jVdq1qYTNDjUtulnE8/hkPv0pHILV4jMg5La99iaY/FFm20WxVnsAZtbNnMvlPbf8dc010oO304SX9yXbg5PAw==
+ dependencies:
+ glob "7.1.7"
+
+"@next/swc-android-arm-eabi@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-android-arm-eabi/-/swc-android-arm-eabi-12.2.5.tgz#903a5479ab4c2705d9c08d080907475f7bacf94d"
+ integrity sha512-cPWClKxGhgn2dLWnspW+7psl3MoLQUcNqJqOHk2BhNcou9ARDtC0IjQkKe5qcn9qg7I7U83Gp1yh2aesZfZJMA==
+
+"@next/swc-android-arm64@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-android-arm64/-/swc-android-arm64-12.2.5.tgz#2f9a98ec4166c7860510963b31bda1f57a77c792"
+ integrity sha512-vMj0efliXmC5b7p+wfcQCX0AfU8IypjkzT64GiKJD9PgiA3IILNiGJr1fw2lyUDHkjeWx/5HMlMEpLnTsQslwg==
+
+"@next/swc-darwin-arm64@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-darwin-arm64/-/swc-darwin-arm64-12.2.5.tgz#31b1c3c659d54be546120c488a1e1bad21c24a1d"
+ integrity sha512-VOPWbO5EFr6snla/WcxUKtvzGVShfs302TEMOtzYyWni6f9zuOetijJvVh9CCTzInnXAZMtHyNhefijA4HMYLg==
+
+"@next/swc-darwin-x64@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-darwin-x64/-/swc-darwin-x64-12.2.5.tgz#2e44dd82b2b7fef88238d1bc4d3bead5884cedfd"
+ integrity sha512-5o8bTCgAmtYOgauO/Xd27vW52G2/m3i5PX7MUYePquxXAnX73AAtqA3WgPXBRitEB60plSKZgOTkcpqrsh546A==
+
+"@next/swc-freebsd-x64@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-freebsd-x64/-/swc-freebsd-x64-12.2.5.tgz#e24e75d8c2581bfebc75e4f08f6ddbd116ce9dbd"
+ integrity sha512-yYUbyup1JnznMtEBRkK4LT56N0lfK5qNTzr6/DEyDw5TbFVwnuy2hhLBzwCBkScFVjpFdfiC6SQAX3FrAZzuuw==
+
+"@next/swc-linux-arm-gnueabihf@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-arm-gnueabihf/-/swc-linux-arm-gnueabihf-12.2.5.tgz#46d8c514d834d2b5f67086013f0bd5e3081e10b9"
+ integrity sha512-2ZE2/G921Acks7UopJZVMgKLdm4vN4U0yuzvAMJ6KBavPzqESA2yHJlm85TV/K9gIjKhSk5BVtauIUntFRP8cg==
+
+"@next/swc-linux-arm64-gnu@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-12.2.5.tgz#91f725ac217d3a1f4f9f53b553615ba582fd3d9f"
+ integrity sha512-/I6+PWVlz2wkTdWqhlSYYJ1pWWgUVva6SgX353oqTh8njNQp1SdFQuWDqk8LnM6ulheVfSsgkDzxrDaAQZnzjQ==
+
+"@next/swc-linux-arm64-musl@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-12.2.5.tgz#e627e8c867920995810250303cd9b8e963598383"
+ integrity sha512-LPQRelfX6asXyVr59p5sTpx5l+0yh2Vjp/R8Wi4X9pnqcayqT4CUJLiHqCvZuLin3IsFdisJL0rKHMoaZLRfmg==
+
+"@next/swc-linux-x64-gnu@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-12.2.5.tgz#83a5e224fbc4d119ef2e0f29d0d79c40cc43887e"
+ integrity sha512-0szyAo8jMCClkjNK0hknjhmAngUppoRekW6OAezbEYwHXN/VNtsXbfzgYOqjKWxEx3OoAzrT3jLwAF0HdX2MEw==
+
+"@next/swc-linux-x64-musl@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-12.2.5.tgz#be700d48471baac1ec2e9539396625584a317e95"
+ integrity sha512-zg/Y6oBar1yVnW6Il1I/08/2ukWtOG6s3acdJdEyIdsCzyQi4RLxbbhkD/EGQyhqBvd3QrC6ZXQEXighQUAZ0g==
+
+"@next/swc-win32-arm64-msvc@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-12.2.5.tgz#a93e958133ad3310373fda33a79aa10af2a0aa97"
+ integrity sha512-3/90DRNSqeeSRMMEhj4gHHQlLhhKg5SCCoYfE3kBjGpE63EfnblYUqsszGGZ9ekpKL/R4/SGB40iCQr8tR5Jiw==
+
+"@next/swc-win32-ia32-msvc@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-12.2.5.tgz#4f5f7ba0a98ff89a883625d4af0125baed8b2e19"
+ integrity sha512-hGLc0ZRAwnaPL4ulwpp4D2RxmkHQLuI8CFOEEHdzZpS63/hMVzv81g8jzYA0UXbb9pus/iTc3VRbVbAM03SRrw==
+
+"@next/swc-win32-x64-msvc@12.2.5":
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-12.2.5.tgz#20fed129b04a0d3f632c6d0de135345bb623b1e4"
+ integrity sha512-7h5/ahY7NeaO2xygqVrSG/Y8Vs4cdjxIjowTZ5W6CKoTKn7tmnuxlUc2h74x06FKmbhAd9agOjr/AOKyxYYm9Q==
+
+"@nodelib/fs.scandir@2.1.5":
+ version "2.1.5"
+ resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5"
+ integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==
+ dependencies:
+ "@nodelib/fs.stat" "2.0.5"
+ run-parallel "^1.1.9"
+
+"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
+ version "2.0.5"
+ resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b"
+ integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
+
+"@nodelib/fs.walk@^1.2.3":
+ version "1.2.8"
+ resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a"
+ integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==
+ dependencies:
+ "@nodelib/fs.scandir" "2.1.5"
+ fastq "^1.6.0"
+
+"@rushstack/eslint-patch@^1.1.3":
+ version "1.1.4"
+ resolved "https://registry.yarnpkg.com/@rushstack/eslint-patch/-/eslint-patch-1.1.4.tgz#0c8b74c50f29ee44f423f7416829c0bf8bb5eb27"
+ integrity sha512-LwzQKA4vzIct1zNZzBmRKI9QuNpLgTQMEjsQLf3BXuGYb3QPTP4Yjf6mkdX+X1mYttZ808QpOwAzZjv28kq7DA==
+
+"@swc/helpers@0.4.3":
+ version "0.4.3"
+ resolved "https://registry.yarnpkg.com/@swc/helpers/-/helpers-0.4.3.tgz#16593dfc248c53b699d4b5026040f88ddb497012"
+ integrity sha512-6JrF+fdUK2zbGpJIlN7G3v966PQjyx/dPt1T9km2wj+EUBqgrxCk3uX4Kct16MIm9gGxfKRcfax2hVf5jvlTzA==
+ dependencies:
+ tslib "^2.4.0"
+
+"@types/json5@^0.0.29":
+ version "0.0.29"
+ resolved "https://registry.yarnpkg.com/@types/json5/-/json5-0.0.29.tgz#ee28707ae94e11d2b827bcbe5270bcea7f3e71ee"
+ integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==
+
+"@types/node@^17.0.12":
+ version "17.0.45"
+ resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.45.tgz#2c0fafd78705e7a18b7906b5201a522719dc5190"
+ integrity sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==
+
+"@types/prop-types@*":
+ version "15.7.5"
+ resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.5.tgz#5f19d2b85a98e9558036f6a3cacc8819420f05cf"
+ integrity sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==
+
+"@types/react-dom@^17.0.11":
+ version "17.0.17"
+ resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-17.0.17.tgz#2e3743277a793a96a99f1bf87614598289da68a1"
+ integrity sha512-VjnqEmqGnasQKV0CWLevqMTXBYG9GbwuE6x3VetERLh0cq2LTptFE73MrQi2S7GkKXCf2GgwItB/melLnxfnsg==
+ dependencies:
+ "@types/react" "^17"
+
+"@types/react@18.0.17":
+ version "18.0.17"
+ resolved "https://registry.yarnpkg.com/@types/react/-/react-18.0.17.tgz#4583d9c322d67efe4b39a935d223edcc7050ccf4"
+ integrity sha512-38ETy4tL+rn4uQQi7mB81G7V1g0u2ryquNmsVIOKUAEIDK+3CUjZ6rSRpdvS99dNBnkLFL83qfmtLacGOTIhwQ==
+ dependencies:
+ "@types/prop-types" "*"
+ "@types/scheduler" "*"
+ csstype "^3.0.2"
+
+"@types/react@^17", "@types/react@^17.0.37":
+ version "17.0.49"
+ resolved "https://registry.yarnpkg.com/@types/react/-/react-17.0.49.tgz#df87ba4ca8b7942209c3dc655846724539dc1049"
+ integrity sha512-CCBPMZaPhcKkYUTqFs/hOWqKjPxhTEmnZWjlHHgIMop67DsXywf9B5Os9Hz8KSacjNOgIdnZVJamwl232uxoPg==
+ dependencies:
+ "@types/prop-types" "*"
+ "@types/scheduler" "*"
+ csstype "^3.0.2"
+
+"@types/scheduler@*":
+ version "0.16.2"
+ resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.2.tgz#1a62f89525723dde24ba1b01b092bf5df8ad4d39"
+ integrity sha512-hppQEBDmlwhFAXKJX2KnWLYu5yMfi91yazPb2l+lbJiwW+wdo1gNeRA+3RgNSO39WYX2euey41KEwnqesU2Jew==
+
+"@typescript-eslint/parser@^5.21.0":
+ version "5.36.2"
+ resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-5.36.2.tgz#3ddf323d3ac85a25295a55fcb9c7a49ab4680ddd"
+ integrity sha512-qS/Kb0yzy8sR0idFspI9Z6+t7mqk/oRjnAYfewG+VN73opAUvmYL3oPIMmgOX6CnQS6gmVIXGshlb5RY/R22pA==
+ dependencies:
+ "@typescript-eslint/scope-manager" "5.36.2"
+ "@typescript-eslint/types" "5.36.2"
+ "@typescript-eslint/typescript-estree" "5.36.2"
+ debug "^4.3.4"
+
+"@typescript-eslint/scope-manager@5.36.2":
+ version "5.36.2"
+ resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.36.2.tgz#a75eb588a3879ae659514780831370642505d1cd"
+ integrity sha512-cNNP51L8SkIFSfce8B1NSUBTJTu2Ts4nWeWbFrdaqjmn9yKrAaJUBHkyTZc0cL06OFHpb+JZq5AUHROS398Orw==
+ dependencies:
+ "@typescript-eslint/types" "5.36.2"
+ "@typescript-eslint/visitor-keys" "5.36.2"
+
+"@typescript-eslint/types@5.36.2":
+ version "5.36.2"
+ resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.36.2.tgz#a5066e500ebcfcee36694186ccc57b955c05faf9"
+ integrity sha512-9OJSvvwuF1L5eS2EQgFUbECb99F0mwq501w0H0EkYULkhFa19Qq7WFbycdw1PexAc929asupbZcgjVIe6OK/XQ==
+
+"@typescript-eslint/typescript-estree@5.36.2":
+ version "5.36.2"
+ resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.36.2.tgz#0c93418b36c53ba0bc34c61fe9405c4d1d8fe560"
+ integrity sha512-8fyH+RfbKc0mTspfuEjlfqA4YywcwQK2Amcf6TDOwaRLg7Vwdu4bZzyvBZp4bjt1RRjQ5MDnOZahxMrt2l5v9w==
+ dependencies:
+ "@typescript-eslint/types" "5.36.2"
+ "@typescript-eslint/visitor-keys" "5.36.2"
+ debug "^4.3.4"
+ globby "^11.1.0"
+ is-glob "^4.0.3"
+ semver "^7.3.7"
+ tsutils "^3.21.0"
+
+"@typescript-eslint/visitor-keys@5.36.2":
+ version "5.36.2"
+ resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-5.36.2.tgz#2f8f78da0a3bad3320d2ac24965791ac39dace5a"
+ integrity sha512-BtRvSR6dEdrNt7Net2/XDjbYKU5Ml6GqJgVfXT0CxTCJlnIqK7rAGreuWKMT2t8cFUT2Msv5oxw0GMRD7T5J7A==
+ dependencies:
+ "@typescript-eslint/types" "5.36.2"
+ eslint-visitor-keys "^3.3.0"
+
+acorn-jsx@^5.3.1:
+ version "5.3.2"
+ resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937"
+ integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
+
+acorn@^7.4.0:
+ version "7.4.1"
+ resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa"
+ integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==
+
+ajv@^6.10.0, ajv@^6.12.4:
+ version "6.12.6"
+ resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4"
+ integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
+ dependencies:
+ fast-deep-equal "^3.1.1"
+ fast-json-stable-stringify "^2.0.0"
+ json-schema-traverse "^0.4.1"
+ uri-js "^4.2.2"
+
+ajv@^8.0.1:
+ version "8.11.0"
+ resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.11.0.tgz#977e91dd96ca669f54a11e23e378e33b884a565f"
+ integrity sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==
+ dependencies:
+ fast-deep-equal "^3.1.1"
+ json-schema-traverse "^1.0.0"
+ require-from-string "^2.0.2"
+ uri-js "^4.2.2"
+
+ansi-colors@^4.1.1:
+ version "4.1.3"
+ resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b"
+ integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==
+
+ansi-regex@^5.0.1:
+ version "5.0.1"
+ resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
+ integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
+
+ansi-styles@^3.2.1:
+ version "3.2.1"
+ resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d"
+ integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
+ dependencies:
+ color-convert "^1.9.0"
+
+ansi-styles@^4.0.0, ansi-styles@^4.1.0:
+ version "4.3.0"
+ resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937"
+ integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
+ dependencies:
+ color-convert "^2.0.1"
+
+argparse@^1.0.7:
+ version "1.0.10"
+ resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911"
+ integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==
+ dependencies:
+ sprintf-js "~1.0.2"
+
+aria-query@^4.2.2:
+ version "4.2.2"
+ resolved "https://registry.yarnpkg.com/aria-query/-/aria-query-4.2.2.tgz#0d2ca6c9aceb56b8977e9fed6aed7e15bbd2f83b"
+ integrity sha512-o/HelwhuKpTj/frsOsbNLNgnNGVIFsVP/SW2BSF14gVl7kAfMOJ6/8wUAUvG1R1NHKrfG+2sHZTu0yauT1qBrA==
+ dependencies:
+ "@babel/runtime" "^7.10.2"
+ "@babel/runtime-corejs3" "^7.10.2"
+
+array-includes@^3.1.4, array-includes@^3.1.5:
+ version "3.1.5"
+ resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.5.tgz#2c320010db8d31031fd2a5f6b3bbd4b1aad31bdb"
+ integrity sha512-iSDYZMMyTPkiFasVqfuAQnWAYcvO/SeBSCGKePoEthjp4LEMTe4uLc7b025o4jAZpHhihh8xPo99TNWUWWkGDQ==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.19.5"
+ get-intrinsic "^1.1.1"
+ is-string "^1.0.7"
+
+array-union@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d"
+ integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==
+
+array.prototype.flat@^1.2.5:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/array.prototype.flat/-/array.prototype.flat-1.3.0.tgz#0b0c1567bf57b38b56b4c97b8aa72ab45e4adc7b"
+ integrity sha512-12IUEkHsAhA4DY5s0FPgNXIdc8VRSqD9Zp78a5au9abH/SOBrsp082JOWFNTjkMozh8mqcdiKuaLGhPeYztxSw==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.2"
+ es-shim-unscopables "^1.0.0"
+
+array.prototype.flatmap@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/array.prototype.flatmap/-/array.prototype.flatmap-1.3.0.tgz#a7e8ed4225f4788a70cd910abcf0791e76a5534f"
+ integrity sha512-PZC9/8TKAIxcWKdyeb77EzULHPrIX/tIZebLJUQOMR1OwYosT8yggdfWScfTBCDj5utONvOuPQQumYsU2ULbkg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.2"
+ es-shim-unscopables "^1.0.0"
+
+ast-types-flow@^0.0.7:
+ version "0.0.7"
+ resolved "https://registry.yarnpkg.com/ast-types-flow/-/ast-types-flow-0.0.7.tgz#f70b735c6bca1a5c9c22d982c3e39e7feba3bdad"
+ integrity sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==
+
+astral-regex@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-2.0.0.tgz#483143c567aeed4785759c0865786dc77d7d2e31"
+ integrity sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==
+
+axe-core@^4.4.3:
+ version "4.4.3"
+ resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.4.3.tgz#11c74d23d5013c0fa5d183796729bc3482bd2f6f"
+ integrity sha512-32+ub6kkdhhWick/UjvEwRchgoetXqTK14INLqbGm5U2TzBkBNF3nQtLYm8ovxSkQWArjEQvftCKryjZaATu3w==
+
+axobject-query@^2.2.0:
+ version "2.2.0"
+ resolved "https://registry.yarnpkg.com/axobject-query/-/axobject-query-2.2.0.tgz#943d47e10c0b704aa42275e20edf3722648989be"
+ integrity sha512-Td525n+iPOOyUQIeBfcASuG6uJsDOITl7Mds5gFyerkWiX7qhUTdYUBlSgNMyVqtSJqwpt1kXGLdUt6SykLMRA==
+
+balanced-match@^1.0.0:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
+ integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
+
+brace-expansion@^1.1.7:
+ version "1.1.11"
+ resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd"
+ integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==
+ dependencies:
+ balanced-match "^1.0.0"
+ concat-map "0.0.1"
+
+braces@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107"
+ integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==
+ dependencies:
+ fill-range "^7.0.1"
+
+browserslist@^4.20.2:
+ version "4.21.3"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.3.tgz#5df277694eb3c48bc5c4b05af3e8b7e09c5a6d1a"
+ integrity sha512-898rgRXLAyRkM1GryrrBHGkqA5hlpkV5MhtZwg9QXeiyLUYs2k00Un05aX5l2/yJIOObYKOpS2JNo8nJDE7fWQ==
+ dependencies:
+ caniuse-lite "^1.0.30001370"
+ electron-to-chromium "^1.4.202"
+ node-releases "^2.0.6"
+ update-browserslist-db "^1.0.5"
+
+call-bind@^1.0.0, call-bind@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.2.tgz#b1d4e89e688119c3c9a903ad30abb2f6a919be3c"
+ integrity sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==
+ dependencies:
+ function-bind "^1.1.1"
+ get-intrinsic "^1.0.2"
+
+callsites@^3.0.0:
+ version "3.1.0"
+ resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
+ integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==
+
+caniuse-lite@^1.0.30001332, caniuse-lite@^1.0.30001370:
+ version "1.0.30001393"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001393.tgz#1aa161e24fe6af2e2ccda000fc2b94be0b0db356"
+ integrity sha512-N/od11RX+Gsk+1qY/jbPa0R6zJupEa0lxeBG598EbrtblxVCTJsQwbRBm6+V+rxpc5lHKdsXb9RY83cZIPLseA==
+
+chalk@^2.0.0:
+ version "2.4.2"
+ resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
+ integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
+ dependencies:
+ ansi-styles "^3.2.1"
+ escape-string-regexp "^1.0.5"
+ supports-color "^5.3.0"
+
+chalk@^4.0.0:
+ version "4.1.2"
+ resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01"
+ integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==
+ dependencies:
+ ansi-styles "^4.1.0"
+ supports-color "^7.1.0"
+
+color-convert@^1.9.0:
+ version "1.9.3"
+ resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
+ integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==
+ dependencies:
+ color-name "1.1.3"
+
+color-convert@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3"
+ integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
+ dependencies:
+ color-name "~1.1.4"
+
+color-name@1.1.3:
+ version "1.1.3"
+ resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25"
+ integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
+
+color-name@~1.1.4:
+ version "1.1.4"
+ resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
+ integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
+
+concat-map@0.0.1:
+ version "0.0.1"
+ resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+ integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==
+
+convert-source-map@^1.7.0:
+ version "1.8.0"
+ resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.8.0.tgz#f3373c32d21b4d780dd8004514684fb791ca4369"
+ integrity sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==
+ dependencies:
+ safe-buffer "~5.1.1"
+
+core-js-pure@^3.20.2:
+ version "3.25.1"
+ resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.25.1.tgz#79546518ae87cc362c991d9c2d211f45107991ee"
+ integrity sha512-7Fr74bliUDdeJCBMxkkIuQ4xfxn/SwrVg+HkJUAoNEXVqYLv55l6Af0dJ5Lq2YBUW9yKqSkLXaS5SYPK6MGa/A==
+
+cross-spawn@^7.0.2:
+ version "7.0.3"
+ resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
+ integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
+ dependencies:
+ path-key "^3.1.0"
+ shebang-command "^2.0.0"
+ which "^2.0.1"
+
+csstype@^3.0.2:
+ version "3.1.0"
+ resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.0.tgz#4ddcac3718d787cf9df0d1b7d15033925c8f29f2"
+ integrity sha512-uX1KG+x9h5hIJsaKR9xHUeUraxf8IODOwq9JLNPq6BwB04a/xgpq3rcx47l5BZu5zBPlgD342tdke3Hom/nJRA==
+
+damerau-levenshtein@^1.0.8:
+ version "1.0.8"
+ resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz#b43d286ccbd36bc5b2f7ed41caf2d0aba1f8a6e7"
+ integrity sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==
+
+debug@^2.6.9:
+ version "2.6.9"
+ resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
+ integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
+ dependencies:
+ ms "2.0.0"
+
+debug@^3.2.7:
+ version "3.2.7"
+ resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.7.tgz#72580b7e9145fb39b6676f9c5e5fb100b934179a"
+ integrity sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==
+ dependencies:
+ ms "^2.1.1"
+
+debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.4:
+ version "4.3.4"
+ resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865"
+ integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==
+ dependencies:
+ ms "2.1.2"
+
+deep-is@^0.1.3:
+ version "0.1.4"
+ resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831"
+ integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==
+
+define-properties@^1.1.3, define-properties@^1.1.4:
+ version "1.1.4"
+ resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.4.tgz#0b14d7bd7fbeb2f3572c3a7eda80ea5d57fb05b1"
+ integrity sha512-uckOqKcfaVvtBdsVkdPv3XjveQJsNQqmhXgRi8uhvWWuPYZCNlzT8qAyblUgNoXdHdjMTzAqeGjAoli8f+bzPA==
+ dependencies:
+ has-property-descriptors "^1.0.0"
+ object-keys "^1.1.1"
+
+dir-glob@^3.0.1:
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f"
+ integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==
+ dependencies:
+ path-type "^4.0.0"
+
+doctrine@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d"
+ integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==
+ dependencies:
+ esutils "^2.0.2"
+
+doctrine@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961"
+ integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==
+ dependencies:
+ esutils "^2.0.2"
+
+electron-to-chromium@^1.4.202:
+ version "1.4.244"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.244.tgz#ae9b56ed4ae2107e3a860dad80ed662c936e369e"
+ integrity sha512-E21saXLt2eTDaTxgUtiJtBUqanF9A32wZasAwDZ8gvrqXoxrBrbwtDCx7c/PQTLp81wj4X0OLDeoGQg7eMo3+w==
+
+emoji-regex@^8.0.0:
+ version "8.0.0"
+ resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37"
+ integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
+
+emoji-regex@^9.2.2:
+ version "9.2.2"
+ resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72"
+ integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==
+
+enhanced-resolve@^5.7.0:
+ version "5.10.0"
+ resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.10.0.tgz#0dc579c3bb2a1032e357ac45b8f3a6f3ad4fb1e6"
+ integrity sha512-T0yTFjdpldGY8PmuXXR0PyQ1ufZpEGiHVrp7zHKB7jdR4qlmZHhONVM5AQOAWXuF/w3dnHbEQVrNptJgt7F+cQ==
+ dependencies:
+ graceful-fs "^4.2.4"
+ tapable "^2.2.0"
+
+enquirer@^2.3.5:
+ version "2.3.6"
+ resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d"
+ integrity sha512-yjNnPr315/FjS4zIsUxYguYUPP2e1NK4d7E7ZOLiyYCcbFBiTMyID+2wvm2w6+pZ/odMA7cRkjhsPbltwBOrLg==
+ dependencies:
+ ansi-colors "^4.1.1"
+
+es-abstract@^1.19.0, es-abstract@^1.19.1, es-abstract@^1.19.2, es-abstract@^1.19.5:
+ version "1.20.2"
+ resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.20.2.tgz#8495a07bc56d342a3b8ea3ab01bd986700c2ccb3"
+ integrity sha512-XxXQuVNrySBNlEkTYJoDNFe5+s2yIOpzq80sUHEdPdQr0S5nTLz4ZPPPswNIpKseDDUS5yghX1gfLIHQZ1iNuQ==
+ dependencies:
+ call-bind "^1.0.2"
+ es-to-primitive "^1.2.1"
+ function-bind "^1.1.1"
+ function.prototype.name "^1.1.5"
+ get-intrinsic "^1.1.2"
+ get-symbol-description "^1.0.0"
+ has "^1.0.3"
+ has-property-descriptors "^1.0.0"
+ has-symbols "^1.0.3"
+ internal-slot "^1.0.3"
+ is-callable "^1.2.4"
+ is-negative-zero "^2.0.2"
+ is-regex "^1.1.4"
+ is-shared-array-buffer "^1.0.2"
+ is-string "^1.0.7"
+ is-weakref "^1.0.2"
+ object-inspect "^1.12.2"
+ object-keys "^1.1.1"
+ object.assign "^4.1.4"
+ regexp.prototype.flags "^1.4.3"
+ string.prototype.trimend "^1.0.5"
+ string.prototype.trimstart "^1.0.5"
+ unbox-primitive "^1.0.2"
+
+es-shim-unscopables@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz#702e632193201e3edf8713635d083d378e510241"
+ integrity sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==
+ dependencies:
+ has "^1.0.3"
+
+es-to-primitive@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a"
+ integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==
+ dependencies:
+ is-callable "^1.1.4"
+ is-date-object "^1.0.1"
+ is-symbol "^1.0.2"
+
+escalade@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40"
+ integrity sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==
+
+escape-string-regexp@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+ integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
+
+escape-string-regexp@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34"
+ integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
+
+eslint-config-next@^12.0.8:
+ version "12.3.0"
+ resolved "https://registry.yarnpkg.com/eslint-config-next/-/eslint-config-next-12.3.0.tgz#d887ab2d143fe1a2b308e9321e932a613e610800"
+ integrity sha512-guHSkNyKnTBB8HU35COgAMeMV0E026BiYRYvyEVVaTOeFcnU3i1EI8/Da0Rl7H3Sgua5FEvoA0vYd2s8kdIUXg==
+ dependencies:
+ "@next/eslint-plugin-next" "12.3.0"
+ "@rushstack/eslint-patch" "^1.1.3"
+ "@typescript-eslint/parser" "^5.21.0"
+ eslint-import-resolver-node "^0.3.6"
+ eslint-import-resolver-typescript "^2.7.1"
+ eslint-plugin-import "^2.26.0"
+ eslint-plugin-jsx-a11y "^6.5.1"
+ eslint-plugin-react "^7.29.4"
+ eslint-plugin-react-hooks "^4.5.0"
+
+eslint-config-prettier@^8.3.0:
+ version "8.5.0"
+ resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-8.5.0.tgz#5a81680ec934beca02c7b1a61cf8ca34b66feab1"
+ integrity sha512-obmWKLUNCnhtQRKc+tmnYuQl0pFU1ibYJQ5BGhTVB08bHe9wC8qUeG7c08dj9XX+AuPj1YSGSQIHl1pnDHZR0Q==
+
+eslint-config-turbo@latest:
+ version "0.0.3"
+ resolved "https://registry.yarnpkg.com/eslint-config-turbo/-/eslint-config-turbo-0.0.3.tgz#61a3b6fdc4186bb6832ab4b48bb6ed2d3bad57a8"
+ integrity sha512-hK5MlxDugUWZV9ZKcyfNwLXrlMuM2wPgAUk51cUFBC3nXRCVmCA9uSRFBZsyAIurN1wH7mS7G1NBo5F8VkF7lQ==
+ dependencies:
+ eslint-plugin-turbo "0.0.3"
+
+eslint-import-resolver-node@^0.3.6:
+ version "0.3.6"
+ resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.6.tgz#4048b958395da89668252001dbd9eca6b83bacbd"
+ integrity sha512-0En0w03NRVMn9Uiyn8YRPDKvWjxCWkslUEhGNTdGx15RvPJYQ+lbOlqrlNI2vEAs4pDYK4f/HN2TbDmk5TP0iw==
+ dependencies:
+ debug "^3.2.7"
+ resolve "^1.20.0"
+
+eslint-import-resolver-typescript@^2.7.1:
+ version "2.7.1"
+ resolved "https://registry.yarnpkg.com/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-2.7.1.tgz#a90a4a1c80da8d632df25994c4c5fdcdd02b8751"
+ integrity sha512-00UbgGwV8bSgUv34igBDbTOtKhqoRMy9bFjNehT40bXg6585PNIct8HhXZ0SybqB9rWtXj9crcku8ndDn/gIqQ==
+ dependencies:
+ debug "^4.3.4"
+ glob "^7.2.0"
+ is-glob "^4.0.3"
+ resolve "^1.22.0"
+ tsconfig-paths "^3.14.1"
+
+eslint-module-utils@^2.7.3:
+ version "2.7.4"
+ resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz#4f3e41116aaf13a20792261e61d3a2e7e0583974"
+ integrity sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==
+ dependencies:
+ debug "^3.2.7"
+
+eslint-plugin-import@^2.26.0:
+ version "2.26.0"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.26.0.tgz#f812dc47be4f2b72b478a021605a59fc6fe8b88b"
+ integrity sha512-hYfi3FXaM8WPLf4S1cikh/r4IxnO6zrhZbEGz2b660EJRbuxgpDS5gkCuYgGWg2xxh2rBuIr4Pvhve/7c31koA==
+ dependencies:
+ array-includes "^3.1.4"
+ array.prototype.flat "^1.2.5"
+ debug "^2.6.9"
+ doctrine "^2.1.0"
+ eslint-import-resolver-node "^0.3.6"
+ eslint-module-utils "^2.7.3"
+ has "^1.0.3"
+ is-core-module "^2.8.1"
+ is-glob "^4.0.3"
+ minimatch "^3.1.2"
+ object.values "^1.1.5"
+ resolve "^1.22.0"
+ tsconfig-paths "^3.14.1"
+
+eslint-plugin-jsx-a11y@^6.5.1:
+ version "6.6.1"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.6.1.tgz#93736fc91b83fdc38cc8d115deedfc3091aef1ff"
+ integrity sha512-sXgFVNHiWffBq23uiS/JaP6eVR622DqwB4yTzKvGZGcPq6/yZ3WmOZfuBks/vHWo9GaFOqC2ZK4i6+C35knx7Q==
+ dependencies:
+ "@babel/runtime" "^7.18.9"
+ aria-query "^4.2.2"
+ array-includes "^3.1.5"
+ ast-types-flow "^0.0.7"
+ axe-core "^4.4.3"
+ axobject-query "^2.2.0"
+ damerau-levenshtein "^1.0.8"
+ emoji-regex "^9.2.2"
+ has "^1.0.3"
+ jsx-ast-utils "^3.3.2"
+ language-tags "^1.0.5"
+ minimatch "^3.1.2"
+ semver "^6.3.0"
+
+eslint-plugin-react-hooks@^4.5.0:
+ version "4.6.0"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz#4c3e697ad95b77e93f8646aaa1630c1ba607edd3"
+ integrity sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==
+
+eslint-plugin-react@7.31.7, eslint-plugin-react@^7.29.4:
+ version "7.31.7"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.31.7.tgz#36fb1c611a7db5f757fce09cbbcc01682f8b0fbb"
+ integrity sha512-8NldBTeYp/kQoTV1uT0XF6HcmDqbgZ0lNPkN0wlRw8DJKXEnaWu+oh/6gt3xIhzvQ35wB2Y545fJhIbJSZ2NNw==
+ dependencies:
+ array-includes "^3.1.5"
+ array.prototype.flatmap "^1.3.0"
+ doctrine "^2.1.0"
+ estraverse "^5.3.0"
+ jsx-ast-utils "^2.4.1 || ^3.0.0"
+ minimatch "^3.1.2"
+ object.entries "^1.1.5"
+ object.fromentries "^2.0.5"
+ object.hasown "^1.1.1"
+ object.values "^1.1.5"
+ prop-types "^15.8.1"
+ resolve "^2.0.0-next.3"
+ semver "^6.3.0"
+ string.prototype.matchall "^4.0.7"
+
+eslint-plugin-turbo@0.0.3:
+ version "0.0.3"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-turbo/-/eslint-plugin-turbo-0.0.3.tgz#9d86895732f95b0c236d6363177a52368fffdc71"
+ integrity sha512-QjidATGxWtaB9QUrD3NocUySmsgWKZlBMFlw4kX2IIjRLAxMPwukk90h3ZTaNXyRHuaQsrEgh7hhlCZoxP0TTw==
+
+eslint-scope@^5.1.1:
+ version "5.1.1"
+ resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c"
+ integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==
+ dependencies:
+ esrecurse "^4.3.0"
+ estraverse "^4.1.1"
+
+eslint-utils@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27"
+ integrity sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==
+ dependencies:
+ eslint-visitor-keys "^1.1.0"
+
+eslint-visitor-keys@^1.1.0, eslint-visitor-keys@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz#30ebd1ef7c2fdff01c3a4f151044af25fab0523e"
+ integrity sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==
+
+eslint-visitor-keys@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz#f65328259305927392c938ed44eb0a5c9b2bd303"
+ integrity sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==
+
+eslint-visitor-keys@^3.3.0:
+ version "3.3.0"
+ resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.3.0.tgz#f6480fa6b1f30efe2d1968aa8ac745b862469826"
+ integrity sha512-mQ+suqKJVyeuwGYHAdjMFqjCyfl8+Ldnxuyp3ldiMBFKkvytrXUZWaiPCEav8qDHKty44bD+qV1IP4T+w+xXRA==
+
+eslint@7.32.0, eslint@^7.23.0, eslint@^7.32.0:
+ version "7.32.0"
+ resolved "https://registry.yarnpkg.com/eslint/-/eslint-7.32.0.tgz#c6d328a14be3fb08c8d1d21e12c02fdb7a2a812d"
+ integrity sha512-VHZ8gX+EDfz+97jGcgyGCyRia/dPOd6Xh9yPv8Bl1+SoaIwD+a/vlrOmGRUyOYu7MwUhc7CxqeaDZU13S4+EpA==
+ dependencies:
+ "@babel/code-frame" "7.12.11"
+ "@eslint/eslintrc" "^0.4.3"
+ "@humanwhocodes/config-array" "^0.5.0"
+ ajv "^6.10.0"
+ chalk "^4.0.0"
+ cross-spawn "^7.0.2"
+ debug "^4.0.1"
+ doctrine "^3.0.0"
+ enquirer "^2.3.5"
+ escape-string-regexp "^4.0.0"
+ eslint-scope "^5.1.1"
+ eslint-utils "^2.1.0"
+ eslint-visitor-keys "^2.0.0"
+ espree "^7.3.1"
+ esquery "^1.4.0"
+ esutils "^2.0.2"
+ fast-deep-equal "^3.1.3"
+ file-entry-cache "^6.0.1"
+ functional-red-black-tree "^1.0.1"
+ glob-parent "^5.1.2"
+ globals "^13.6.0"
+ ignore "^4.0.6"
+ import-fresh "^3.0.0"
+ imurmurhash "^0.1.4"
+ is-glob "^4.0.0"
+ js-yaml "^3.13.1"
+ json-stable-stringify-without-jsonify "^1.0.1"
+ levn "^0.4.1"
+ lodash.merge "^4.6.2"
+ minimatch "^3.0.4"
+ natural-compare "^1.4.0"
+ optionator "^0.9.1"
+ progress "^2.0.0"
+ regexpp "^3.1.0"
+ semver "^7.2.1"
+ strip-ansi "^6.0.0"
+ strip-json-comments "^3.1.0"
+ table "^6.0.9"
+ text-table "^0.2.0"
+ v8-compile-cache "^2.0.3"
+
+espree@^7.3.0, espree@^7.3.1:
+ version "7.3.1"
+ resolved "https://registry.yarnpkg.com/espree/-/espree-7.3.1.tgz#f2df330b752c6f55019f8bd89b7660039c1bbbb6"
+ integrity sha512-v3JCNCE64umkFpmkFGqzVKsOT0tN1Zr+ueqLZfpV1Ob8e+CEgPWa+OxCoGH3tnhimMKIaBm4m/vaRpJ/krRz2g==
+ dependencies:
+ acorn "^7.4.0"
+ acorn-jsx "^5.3.1"
+ eslint-visitor-keys "^1.3.0"
+
+esprima@^4.0.0:
+ version "4.0.1"
+ resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
+ integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
+
+esquery@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.4.0.tgz#2148ffc38b82e8c7057dfed48425b3e61f0f24a5"
+ integrity sha512-cCDispWt5vHHtwMY2YrAQ4ibFkAL8RbH5YGBnZBc90MolvvfkkQcJro/aZiAQUlQ3qgrYS6D6v8Gc5G5CQsc9w==
+ dependencies:
+ estraverse "^5.1.0"
+
+esrecurse@^4.3.0:
+ version "4.3.0"
+ resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921"
+ integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==
+ dependencies:
+ estraverse "^5.2.0"
+
+estraverse@^4.1.1:
+ version "4.3.0"
+ resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d"
+ integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==
+
+estraverse@^5.1.0, estraverse@^5.2.0, estraverse@^5.3.0:
+ version "5.3.0"
+ resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123"
+ integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==
+
+esutils@^2.0.2:
+ version "2.0.3"
+ resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64"
+ integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==
+
+fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3:
+ version "3.1.3"
+ resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525"
+ integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==
+
+fast-glob@^3.2.9:
+ version "3.2.11"
+ resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.11.tgz#a1172ad95ceb8a16e20caa5c5e56480e5129c1d9"
+ integrity sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew==
+ dependencies:
+ "@nodelib/fs.stat" "^2.0.2"
+ "@nodelib/fs.walk" "^1.2.3"
+ glob-parent "^5.1.2"
+ merge2 "^1.3.0"
+ micromatch "^4.0.4"
+
+fast-json-stable-stringify@^2.0.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633"
+ integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==
+
+fast-levenshtein@^2.0.6:
+ version "2.0.6"
+ resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
+ integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==
+
+fastq@^1.6.0:
+ version "1.13.0"
+ resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.13.0.tgz#616760f88a7526bdfc596b7cab8c18938c36b98c"
+ integrity sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==
+ dependencies:
+ reusify "^1.0.4"
+
+file-entry-cache@^6.0.1:
+ version "6.0.1"
+ resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027"
+ integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==
+ dependencies:
+ flat-cache "^3.0.4"
+
+fill-range@^7.0.1:
+ version "7.0.1"
+ resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40"
+ integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==
+ dependencies:
+ to-regex-range "^5.0.1"
+
+flat-cache@^3.0.4:
+ version "3.0.4"
+ resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.0.4.tgz#61b0338302b2fe9f957dcc32fc2a87f1c3048b11"
+ integrity sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==
+ dependencies:
+ flatted "^3.1.0"
+ rimraf "^3.0.2"
+
+flatted@^3.1.0:
+ version "3.2.7"
+ resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.7.tgz#609f39207cb614b89d0765b477cb2d437fbf9787"
+ integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==
+
+fs.realpath@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+ integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==
+
+function-bind@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
+ integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
+
+function.prototype.name@^1.1.5:
+ version "1.1.5"
+ resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.5.tgz#cce0505fe1ffb80503e6f9e46cc64e46a12a9621"
+ integrity sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.0"
+ functions-have-names "^1.2.2"
+
+functional-red-black-tree@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327"
+ integrity sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g==
+
+functions-have-names@^1.2.2:
+ version "1.2.3"
+ resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834"
+ integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==
+
+gensync@^1.0.0-beta.2:
+ version "1.0.0-beta.2"
+ resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0"
+ integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==
+
+get-intrinsic@^1.0.2, get-intrinsic@^1.1.0, get-intrinsic@^1.1.1, get-intrinsic@^1.1.2:
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.1.2.tgz#336975123e05ad0b7ba41f152ee4aadbea6cf598"
+ integrity sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA==
+ dependencies:
+ function-bind "^1.1.1"
+ has "^1.0.3"
+ has-symbols "^1.0.3"
+
+get-symbol-description@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.0.tgz#7fdb81c900101fbd564dd5f1a30af5aadc1e58d6"
+ integrity sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==
+ dependencies:
+ call-bind "^1.0.2"
+ get-intrinsic "^1.1.1"
+
+glob-parent@^5.1.2:
+ version "5.1.2"
+ resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4"
+ integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
+ dependencies:
+ is-glob "^4.0.1"
+
+glob@7.1.7:
+ version "7.1.7"
+ resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.7.tgz#3b193e9233f01d42d0b3f78294bbeeb418f94a90"
+ integrity sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==
+ dependencies:
+ fs.realpath "^1.0.0"
+ inflight "^1.0.4"
+ inherits "2"
+ minimatch "^3.0.4"
+ once "^1.3.0"
+ path-is-absolute "^1.0.0"
+
+glob@^7.1.3, glob@^7.2.0:
+ version "7.2.3"
+ resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b"
+ integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==
+ dependencies:
+ fs.realpath "^1.0.0"
+ inflight "^1.0.4"
+ inherits "2"
+ minimatch "^3.1.1"
+ once "^1.3.0"
+ path-is-absolute "^1.0.0"
+
+globals@^11.1.0:
+ version "11.12.0"
+ resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e"
+ integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
+
+globals@^13.6.0, globals@^13.9.0:
+ version "13.17.0"
+ resolved "https://registry.yarnpkg.com/globals/-/globals-13.17.0.tgz#902eb1e680a41da93945adbdcb5a9f361ba69bd4"
+ integrity sha512-1C+6nQRb1GwGMKm2dH/E7enFAMxGTmGI7/dEdhy/DNelv85w9B72t3uc5frtMNXIbzrarJJ/lTCjcaZwbLJmyw==
+ dependencies:
+ type-fest "^0.20.2"
+
+globby@^11.1.0:
+ version "11.1.0"
+ resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b"
+ integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==
+ dependencies:
+ array-union "^2.1.0"
+ dir-glob "^3.0.1"
+ fast-glob "^3.2.9"
+ ignore "^5.2.0"
+ merge2 "^1.4.1"
+ slash "^3.0.0"
+
+graceful-fs@^4.2.4:
+ version "4.2.10"
+ resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.10.tgz#147d3a006da4ca3ce14728c7aefc287c367d7a6c"
+ integrity sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==
+
+has-bigints@^1.0.1, has-bigints@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa"
+ integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==
+
+has-flag@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
+ integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==
+
+has-flag@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b"
+ integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==
+
+has-property-descriptors@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz#610708600606d36961ed04c196193b6a607fa861"
+ integrity sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==
+ dependencies:
+ get-intrinsic "^1.1.1"
+
+has-symbols@^1.0.2, has-symbols@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8"
+ integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==
+
+has-tostringtag@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25"
+ integrity sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==
+ dependencies:
+ has-symbols "^1.0.2"
+
+has@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796"
+ integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==
+ dependencies:
+ function-bind "^1.1.1"
+
+ignore@^4.0.6:
+ version "4.0.6"
+ resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc"
+ integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==
+
+ignore@^5.2.0:
+ version "5.2.0"
+ resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.0.tgz#6d3bac8fa7fe0d45d9f9be7bac2fc279577e345a"
+ integrity sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ==
+
+import-fresh@^3.0.0, import-fresh@^3.2.1:
+ version "3.3.0"
+ resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b"
+ integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==
+ dependencies:
+ parent-module "^1.0.0"
+ resolve-from "^4.0.0"
+
+imurmurhash@^0.1.4:
+ version "0.1.4"
+ resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea"
+ integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==
+
+inflight@^1.0.4:
+ version "1.0.6"
+ resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9"
+ integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==
+ dependencies:
+ once "^1.3.0"
+ wrappy "1"
+
+inherits@2:
+ version "2.0.4"
+ resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
+ integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
+
+internal-slot@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.3.tgz#7347e307deeea2faac2ac6205d4bc7d34967f59c"
+ integrity sha512-O0DB1JC/sPyZl7cIo78n5dR7eUSwwpYPiXRhTzNxZVAMUuB8vlnRFyLxdrVToks6XPLVnFfbzaVd5WLjhgg+vA==
+ dependencies:
+ get-intrinsic "^1.1.0"
+ has "^1.0.3"
+ side-channel "^1.0.4"
+
+is-bigint@^1.0.1:
+ version "1.0.4"
+ resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3"
+ integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==
+ dependencies:
+ has-bigints "^1.0.1"
+
+is-boolean-object@^1.1.0:
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719"
+ integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==
+ dependencies:
+ call-bind "^1.0.2"
+ has-tostringtag "^1.0.0"
+
+is-callable@^1.1.4, is-callable@^1.2.4:
+ version "1.2.4"
+ resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.4.tgz#47301d58dd0259407865547853df6d61fe471945"
+ integrity sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w==
+
+is-core-module@^2.8.1, is-core-module@^2.9.0:
+ version "2.10.0"
+ resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.10.0.tgz#9012ede0a91c69587e647514e1d5277019e728ed"
+ integrity sha512-Erxj2n/LDAZ7H8WNJXd9tw38GYM3dv8rk8Zcs+jJuxYTW7sozH+SS8NtrSjVL1/vpLvWi1hxy96IzjJ3EHTJJg==
+ dependencies:
+ has "^1.0.3"
+
+is-date-object@^1.0.1:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f"
+ integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
+is-extglob@^2.1.1:
+ version "2.1.1"
+ resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
+ integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==
+
+is-fullwidth-code-point@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d"
+ integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
+
+is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3:
+ version "4.0.3"
+ resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084"
+ integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==
+ dependencies:
+ is-extglob "^2.1.1"
+
+is-negative-zero@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.2.tgz#7bf6f03a28003b8b3965de3ac26f664d765f3150"
+ integrity sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==
+
+is-number-object@^1.0.4:
+ version "1.0.7"
+ resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc"
+ integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
+is-number@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b"
+ integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==
+
+is-regex@^1.1.4:
+ version "1.1.4"
+ resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958"
+ integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==
+ dependencies:
+ call-bind "^1.0.2"
+ has-tostringtag "^1.0.0"
+
+is-shared-array-buffer@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz#8f259c573b60b6a32d4058a1a07430c0a7344c79"
+ integrity sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==
+ dependencies:
+ call-bind "^1.0.2"
+
+is-string@^1.0.5, is-string@^1.0.7:
+ version "1.0.7"
+ resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd"
+ integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
+is-symbol@^1.0.2, is-symbol@^1.0.3:
+ version "1.0.4"
+ resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c"
+ integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==
+ dependencies:
+ has-symbols "^1.0.2"
+
+is-weakref@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2"
+ integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==
+ dependencies:
+ call-bind "^1.0.2"
+
+isexe@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
+ integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==
+
+"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499"
+ integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
+
+js-yaml@^3.13.1:
+ version "3.14.1"
+ resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537"
+ integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==
+ dependencies:
+ argparse "^1.0.7"
+ esprima "^4.0.0"
+
+jsesc@^2.5.1:
+ version "2.5.2"
+ resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
+ integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
+
+json-schema-traverse@^0.4.1:
+ version "0.4.1"
+ resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660"
+ integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==
+
+json-schema-traverse@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz#ae7bcb3656ab77a73ba5c49bf654f38e6b6860e2"
+ integrity sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==
+
+json-stable-stringify-without-jsonify@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651"
+ integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==
+
+json5@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe"
+ integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==
+ dependencies:
+ minimist "^1.2.0"
+
+json5@^2.2.1:
+ version "2.2.1"
+ resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.1.tgz#655d50ed1e6f95ad1a3caababd2b0efda10b395c"
+ integrity sha512-1hqLFMSrGHRHxav9q9gNjJ5EXznIxGVO09xQRrwplcS8qs28pZ8s8hupZAmqDwZUmVZ2Qb2jnyPOWcDH8m8dlA==
+
+"jsx-ast-utils@^2.4.1 || ^3.0.0", jsx-ast-utils@^3.3.2:
+ version "3.3.3"
+ resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz#76b3e6e6cece5c69d49a5792c3d01bd1a0cdc7ea"
+ integrity sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==
+ dependencies:
+ array-includes "^3.1.5"
+ object.assign "^4.1.3"
+
+language-subtag-registry@~0.3.2:
+ version "0.3.22"
+ resolved "https://registry.yarnpkg.com/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz#2e1500861b2e457eba7e7ae86877cbd08fa1fd1d"
+ integrity sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w==
+
+language-tags@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/language-tags/-/language-tags-1.0.5.tgz#d321dbc4da30ba8bf3024e040fa5c14661f9193a"
+ integrity sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==
+ dependencies:
+ language-subtag-registry "~0.3.2"
+
+levn@^0.4.1:
+ version "0.4.1"
+ resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade"
+ integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==
+ dependencies:
+ prelude-ls "^1.2.1"
+ type-check "~0.4.0"
+
+lodash.merge@^4.6.2:
+ version "4.6.2"
+ resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a"
+ integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==
+
+lodash.truncate@^4.4.2:
+ version "4.4.2"
+ resolved "https://registry.yarnpkg.com/lodash.truncate/-/lodash.truncate-4.4.2.tgz#5a350da0b1113b837ecfffd5812cbe58d6eae193"
+ integrity sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==
+
+lodash@^4.17.21:
+ version "4.17.21"
+ resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
+ integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
+
+loose-envify@^1.1.0, loose-envify@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf"
+ integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==
+ dependencies:
+ js-tokens "^3.0.0 || ^4.0.0"
+
+lru-cache@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94"
+ integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==
+ dependencies:
+ yallist "^4.0.0"
+
+merge2@^1.3.0, merge2@^1.4.1:
+ version "1.4.1"
+ resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae"
+ integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
+
+micromatch@^4.0.4:
+ version "4.0.5"
+ resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6"
+ integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==
+ dependencies:
+ braces "^3.0.2"
+ picomatch "^2.3.1"
+
+minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2:
+ version "3.1.2"
+ resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"
+ integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==
+ dependencies:
+ brace-expansion "^1.1.7"
+
+minimist@^1.2.0, minimist@^1.2.6:
+ version "1.2.6"
+ resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44"
+ integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==
+
+ms@2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
+ integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==
+
+ms@2.1.2:
+ version "2.1.2"
+ resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009"
+ integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
+
+ms@^2.1.1:
+ version "2.1.3"
+ resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2"
+ integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
+
+nanoid@^3.3.4:
+ version "3.3.4"
+ resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.4.tgz#730b67e3cd09e2deacf03c027c81c9d9dbc5e8ab"
+ integrity sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==
+
+natural-compare@^1.4.0:
+ version "1.4.0"
+ resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7"
+ integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==
+
+next-transpile-modules@9.0.0:
+ version "9.0.0"
+ resolved "https://registry.yarnpkg.com/next-transpile-modules/-/next-transpile-modules-9.0.0.tgz#133b1742af082e61cc76b02a0f12ffd40ce2bf90"
+ integrity sha512-VCNFOazIAnXn1hvgYYSTYMnoWgKgwlYh4lm1pKbSfiB3kj5ZYLcKVhfh3jkPOg1cnd9DP+pte9yCUocdPEUBTQ==
+ dependencies:
+ enhanced-resolve "^5.7.0"
+ escalade "^3.1.1"
+
+next@12.2.5:
+ version "12.2.5"
+ resolved "https://registry.yarnpkg.com/next/-/next-12.2.5.tgz#14fb5975e8841fad09553b8ef41fe1393602b717"
+ integrity sha512-tBdjqX5XC/oFs/6gxrZhjmiq90YWizUYU6qOWAfat7zJwrwapJ+BYgX2PmiacunXMaRpeVT4vz5MSPSLgNkrpA==
+ dependencies:
+ "@next/env" "12.2.5"
+ "@swc/helpers" "0.4.3"
+ caniuse-lite "^1.0.30001332"
+ postcss "8.4.14"
+ styled-jsx "5.0.4"
+ use-sync-external-store "1.2.0"
+ optionalDependencies:
+ "@next/swc-android-arm-eabi" "12.2.5"
+ "@next/swc-android-arm64" "12.2.5"
+ "@next/swc-darwin-arm64" "12.2.5"
+ "@next/swc-darwin-x64" "12.2.5"
+ "@next/swc-freebsd-x64" "12.2.5"
+ "@next/swc-linux-arm-gnueabihf" "12.2.5"
+ "@next/swc-linux-arm64-gnu" "12.2.5"
+ "@next/swc-linux-arm64-musl" "12.2.5"
+ "@next/swc-linux-x64-gnu" "12.2.5"
+ "@next/swc-linux-x64-musl" "12.2.5"
+ "@next/swc-win32-arm64-msvc" "12.2.5"
+ "@next/swc-win32-ia32-msvc" "12.2.5"
+ "@next/swc-win32-x64-msvc" "12.2.5"
+
+node-releases@^2.0.6:
+ version "2.0.6"
+ resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.6.tgz#8a7088c63a55e493845683ebf3c828d8c51c5503"
+ integrity sha512-PiVXnNuFm5+iYkLBNeq5211hvO38y63T0i2KKh2KnUs3RpzJ+JtODFjkD8yjLwnDkTYF1eKXheUwdssR+NRZdg==
+
+object-assign@^4.1.1:
+ version "4.1.1"
+ resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
+ integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==
+
+object-inspect@^1.12.2, object-inspect@^1.9.0:
+ version "1.12.2"
+ resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.12.2.tgz#c0641f26394532f28ab8d796ab954e43c009a8ea"
+ integrity sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==
+
+object-keys@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e"
+ integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
+
+object.assign@^4.1.3, object.assign@^4.1.4:
+ version "4.1.4"
+ resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.4.tgz#9673c7c7c351ab8c4d0b516f4343ebf4dfb7799f"
+ integrity sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ has-symbols "^1.0.3"
+ object-keys "^1.1.1"
+
+object.entries@^1.1.5:
+ version "1.1.5"
+ resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.5.tgz#e1acdd17c4de2cd96d5a08487cfb9db84d881861"
+ integrity sha512-TyxmjUoZggd4OrrU1W66FMDG6CuqJxsFvymeyXI51+vQLN67zYfZseptRge703kKQdo4uccgAKebXFcRCzk4+g==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.1"
+
+object.fromentries@^2.0.5:
+ version "2.0.5"
+ resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.5.tgz#7b37b205109c21e741e605727fe8b0ad5fa08251"
+ integrity sha512-CAyG5mWQRRiBU57Re4FKoTBjXfDoNwdFVH2Y1tS9PqCsfUTymAohOkEMSG3aRNKmv4lV3O7p1et7c187q6bynw==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.1"
+
+object.hasown@^1.1.1:
+ version "1.1.1"
+ resolved "https://registry.yarnpkg.com/object.hasown/-/object.hasown-1.1.1.tgz#ad1eecc60d03f49460600430d97f23882cf592a3"
+ integrity sha512-LYLe4tivNQzq4JdaWW6WO3HMZZJWzkkH8fnI6EebWl0VZth2wL2Lovm74ep2/gZzlaTdV62JZHEqHQ2yVn8Q/A==
+ dependencies:
+ define-properties "^1.1.4"
+ es-abstract "^1.19.5"
+
+object.values@^1.1.5:
+ version "1.1.5"
+ resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.5.tgz#959f63e3ce9ef108720333082131e4a459b716ac"
+ integrity sha512-QUZRW0ilQ3PnPpbNtgdNV1PDbEqLIiSFB3l+EnGtBQ/8SUTLj1PZwtQHABZtLgwpJZTSZhuGLOGk57Drx2IvYg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.1"
+
+once@^1.3.0:
+ version "1.4.0"
+ resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
+ integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==
+ dependencies:
+ wrappy "1"
+
+optionator@^0.9.1:
+ version "0.9.1"
+ resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.1.tgz#4f236a6373dae0566a6d43e1326674f50c291499"
+ integrity sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==
+ dependencies:
+ deep-is "^0.1.3"
+ fast-levenshtein "^2.0.6"
+ levn "^0.4.1"
+ prelude-ls "^1.2.1"
+ type-check "^0.4.0"
+ word-wrap "^1.2.3"
+
+parent-module@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2"
+ integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==
+ dependencies:
+ callsites "^3.0.0"
+
+path-is-absolute@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f"
+ integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==
+
+path-key@^3.1.0:
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375"
+ integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
+
+path-parse@^1.0.7:
+ version "1.0.7"
+ resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
+ integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
+
+path-type@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b"
+ integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==
+
+picocolors@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c"
+ integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==
+
+picomatch@^2.3.1:
+ version "2.3.1"
+ resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42"
+ integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
+
+postcss@8.4.14:
+ version "8.4.14"
+ resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.14.tgz#ee9274d5622b4858c1007a74d76e42e56fd21caf"
+ integrity sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==
+ dependencies:
+ nanoid "^3.3.4"
+ picocolors "^1.0.0"
+ source-map-js "^1.0.2"
+
+prelude-ls@^1.2.1:
+ version "1.2.1"
+ resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396"
+ integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==
+
+prettier@latest:
+ version "2.7.1"
+ resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.7.1.tgz#e235806850d057f97bb08368a4f7d899f7760c64"
+ integrity sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==
+
+progress@^2.0.0:
+ version "2.0.3"
+ resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8"
+ integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==
+
+prop-types@^15.8.1:
+ version "15.8.1"
+ resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5"
+ integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==
+ dependencies:
+ loose-envify "^1.4.0"
+ object-assign "^4.1.1"
+ react-is "^16.13.1"
+
+punycode@^2.1.0:
+ version "2.1.1"
+ resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec"
+ integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
+
+queue-microtask@^1.2.2:
+ version "1.2.3"
+ resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243"
+ integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
+
+react-dom@18.2.0:
+ version "18.2.0"
+ resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-18.2.0.tgz#22aaf38708db2674ed9ada224ca4aa708d821e3d"
+ integrity sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==
+ dependencies:
+ loose-envify "^1.1.0"
+ scheduler "^0.23.0"
+
+react-is@^16.13.1:
+ version "16.13.1"
+ resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4"
+ integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==
+
+react@18.2.0, react@^18.2.0:
+ version "18.2.0"
+ resolved "https://registry.yarnpkg.com/react/-/react-18.2.0.tgz#555bd98592883255fa00de14f1151a917b5d77d5"
+ integrity sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==
+ dependencies:
+ loose-envify "^1.1.0"
+
+regenerator-runtime@^0.13.4:
+ version "0.13.9"
+ resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz#8925742a98ffd90814988d7566ad30ca3b263b52"
+ integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA==
+
+regexp.prototype.flags@^1.4.1, regexp.prototype.flags@^1.4.3:
+ version "1.4.3"
+ resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz#87cab30f80f66660181a3bb7bf5981a872b367ac"
+ integrity sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ functions-have-names "^1.2.2"
+
+regexpp@^3.1.0:
+ version "3.2.0"
+ resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.2.0.tgz#0425a2768d8f23bad70ca4b90461fa2f1213e1b2"
+ integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==
+
+require-from-string@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909"
+ integrity sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==
+
+resolve-from@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6"
+ integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==
+
+resolve@^1.20.0, resolve@^1.22.0:
+ version "1.22.1"
+ resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177"
+ integrity sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==
+ dependencies:
+ is-core-module "^2.9.0"
+ path-parse "^1.0.7"
+ supports-preserve-symlinks-flag "^1.0.0"
+
+resolve@^2.0.0-next.3:
+ version "2.0.0-next.4"
+ resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.4.tgz#3d37a113d6429f496ec4752d2a2e58efb1fd4660"
+ integrity sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==
+ dependencies:
+ is-core-module "^2.9.0"
+ path-parse "^1.0.7"
+ supports-preserve-symlinks-flag "^1.0.0"
+
+reusify@^1.0.4:
+ version "1.0.4"
+ resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76"
+ integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==
+
+rimraf@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a"
+ integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==
+ dependencies:
+ glob "^7.1.3"
+
+run-parallel@^1.1.9:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee"
+ integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==
+ dependencies:
+ queue-microtask "^1.2.2"
+
+safe-buffer@~5.1.1:
+ version "5.1.2"
+ resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
+ integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
+
+scheduler@^0.23.0:
+ version "0.23.0"
+ resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.23.0.tgz#ba8041afc3d30eb206a487b6b384002e4e61fdfe"
+ integrity sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==
+ dependencies:
+ loose-envify "^1.1.0"
+
+semver@^6.3.0:
+ version "6.3.0"
+ resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d"
+ integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==
+
+semver@^7.2.1, semver@^7.3.7:
+ version "7.3.7"
+ resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.7.tgz#12c5b649afdbf9049707796e22a4028814ce523f"
+ integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g==
+ dependencies:
+ lru-cache "^6.0.0"
+
+shebang-command@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea"
+ integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==
+ dependencies:
+ shebang-regex "^3.0.0"
+
+shebang-regex@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172"
+ integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==
+
+side-channel@^1.0.4:
+ version "1.0.4"
+ resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf"
+ integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==
+ dependencies:
+ call-bind "^1.0.0"
+ get-intrinsic "^1.0.2"
+ object-inspect "^1.9.0"
+
+slash@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634"
+ integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==
+
+slice-ansi@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-4.0.0.tgz#500e8dd0fd55b05815086255b3195adf2a45fe6b"
+ integrity sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==
+ dependencies:
+ ansi-styles "^4.0.0"
+ astral-regex "^2.0.0"
+ is-fullwidth-code-point "^3.0.0"
+
+source-map-js@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c"
+ integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==
+
+sprintf-js@~1.0.2:
+ version "1.0.3"
+ resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
+ integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==
+
+string-width@^4.2.3:
+ version "4.2.3"
+ resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
+ integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
+ dependencies:
+ emoji-regex "^8.0.0"
+ is-fullwidth-code-point "^3.0.0"
+ strip-ansi "^6.0.1"
+
+string.prototype.matchall@^4.0.7:
+ version "4.0.7"
+ resolved "https://registry.yarnpkg.com/string.prototype.matchall/-/string.prototype.matchall-4.0.7.tgz#8e6ecb0d8a1fb1fda470d81acecb2dba057a481d"
+ integrity sha512-f48okCX7JiwVi1NXCVWcFnZgADDC/n2vePlQ/KUCNqCikLLilQvwjMO8+BHVKvgzH0JB0J9LEPgxOGT02RoETg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.3"
+ es-abstract "^1.19.1"
+ get-intrinsic "^1.1.1"
+ has-symbols "^1.0.3"
+ internal-slot "^1.0.3"
+ regexp.prototype.flags "^1.4.1"
+ side-channel "^1.0.4"
+
+string.prototype.trimend@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.5.tgz#914a65baaab25fbdd4ee291ca7dde57e869cb8d0"
+ integrity sha512-I7RGvmjV4pJ7O3kdf+LXFpVfdNOxtCW/2C8f6jNiW4+PQchwxkCDzlk1/7p+Wl4bqFIZeF47qAHXLuHHWKAxog==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.19.5"
+
+string.prototype.trimstart@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.5.tgz#5466d93ba58cfa2134839f81d7f42437e8c01fef"
+ integrity sha512-THx16TJCGlsN0o6dl2o6ncWUsdgnLRSA23rRE5pyGBw/mLr3Ej/R2LaqCtgP8VNMGZsvMWnf9ooZPyY2bHvUFg==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.19.5"
+
+strip-ansi@^6.0.0, strip-ansi@^6.0.1:
+ version "6.0.1"
+ resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
+ integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
+ dependencies:
+ ansi-regex "^5.0.1"
+
+strip-bom@^3.0.0:
+ version "3.0.0"
+ resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3"
+ integrity sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==
+
+strip-json-comments@^3.1.0, strip-json-comments@^3.1.1:
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006"
+ integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
+
+styled-jsx@5.0.4:
+ version "5.0.4"
+ resolved "https://registry.yarnpkg.com/styled-jsx/-/styled-jsx-5.0.4.tgz#5b1bd0b9ab44caae3dd1361295559706e044aa53"
+ integrity sha512-sDFWLbg4zR+UkNzfk5lPilyIgtpddfxXEULxhujorr5jtePTUqiPDc5BC0v1NRqTr/WaFBGQQUoYToGlF4B2KQ==
+
+supports-color@^5.3.0:
+ version "5.5.0"
+ resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
+ integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
+ dependencies:
+ has-flag "^3.0.0"
+
+supports-color@^7.1.0:
+ version "7.2.0"
+ resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da"
+ integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==
+ dependencies:
+ has-flag "^4.0.0"
+
+supports-preserve-symlinks-flag@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
+ integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
+
+table@^6.0.9:
+ version "6.8.0"
+ resolved "https://registry.yarnpkg.com/table/-/table-6.8.0.tgz#87e28f14fa4321c3377ba286f07b79b281a3b3ca"
+ integrity sha512-s/fitrbVeEyHKFa7mFdkuQMWlH1Wgw/yEXMt5xACT4ZpzWFluehAxRtUUQKPuWhaLAWhFcVx6w3oC8VKaUfPGA==
+ dependencies:
+ ajv "^8.0.1"
+ lodash.truncate "^4.4.2"
+ slice-ansi "^4.0.0"
+ string-width "^4.2.3"
+ strip-ansi "^6.0.1"
+
+tapable@^2.2.0:
+ version "2.2.1"
+ resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.2.1.tgz#1967a73ef4060a82f12ab96af86d52fdb76eeca0"
+ integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==
+
+text-table@^0.2.0:
+ version "0.2.0"
+ resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
+ integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==
+
+to-fast-properties@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e"
+ integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==
+
+to-regex-range@^5.0.1:
+ version "5.0.1"
+ resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4"
+ integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==
+ dependencies:
+ is-number "^7.0.0"
+
+tsconfig-paths@^3.14.1:
+ version "3.14.1"
+ resolved "https://registry.yarnpkg.com/tsconfig-paths/-/tsconfig-paths-3.14.1.tgz#ba0734599e8ea36c862798e920bcf163277b137a"
+ integrity sha512-fxDhWnFSLt3VuTwtvJt5fpwxBHg5AdKWMsgcPOOIilyjymcYVZoCQF8fvFRezCNfblEXmi+PcM1eYHeOAgXCOQ==
+ dependencies:
+ "@types/json5" "^0.0.29"
+ json5 "^1.0.1"
+ minimist "^1.2.6"
+ strip-bom "^3.0.0"
+
+tslib@^1.8.1:
+ version "1.14.1"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00"
+ integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
+
+tslib@^2.4.0:
+ version "2.4.0"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.4.0.tgz#7cecaa7f073ce680a05847aa77be941098f36dc3"
+ integrity sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==
+
+tsutils@^3.21.0:
+ version "3.21.0"
+ resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.21.0.tgz#b48717d394cea6c1e096983eed58e9d61715b623"
+ integrity sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==
+ dependencies:
+ tslib "^1.8.1"
+
+turbo-android-arm64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-android-arm64/-/turbo-android-arm64-1.4.6.tgz#2127110335a86a50856852c2728eb75f7f61b77b"
+ integrity sha512-YxSlHc64CF5J7yNUMiLBHkeLyzrpe75Oy7tivWb3z7ySG44BXPikk4HDJZPh0T1ELvukDwuPKkvDukJ2oCLJpA==
+
+turbo-darwin-64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-darwin-64/-/turbo-darwin-64-1.4.6.tgz#8b3d930ed0d0b8c358d87ed2347381496f4283dd"
+ integrity sha512-f6uto7LLpjwZ6iZSF+8uaDpuiTji6xmnWDxNuW23DBE8iv5mxehHd+6Ys851uKDRrPb3QdCu9ctyigKTAla5Vg==
+
+turbo-darwin-arm64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-darwin-arm64/-/turbo-darwin-arm64-1.4.6.tgz#7f045cbfbb1d6ac18ea28122b9a6a5fdc629488a"
+ integrity sha512-o9C6e5XyuMHQwE0fEhUxfpXxvNr2QXXWX8nxIjygxeF19AqKbk/s08vZBOEmXV6/gx/pRhZ1S2nf0PIUjKBD/Q==
+
+turbo-freebsd-64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-freebsd-64/-/turbo-freebsd-64-1.4.6.tgz#b16c5617f2e818a709627351f1e14d1fd8dcf0e7"
+ integrity sha512-Gg9VOUo6McXYKGevcYjGUSmMryZyZggvpdPh7Dw3QTcT8Tsy6OBtq6WnJ2O4kFDsMigyKtEOJPceD9vDMZt3yQ==
+
+turbo-freebsd-arm64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-freebsd-arm64/-/turbo-freebsd-arm64-1.4.6.tgz#462b8ba68cccac93d169c80cf458d221c662a770"
+ integrity sha512-W7VrcneWFN1QENKt5cpAPSsf9ArYBBAm3VtPBZEO5tX8kuahGlah1SKdKJXrRxYOY82wyNxDagS/rHpBlrAAzw==
+
+turbo-linux-32@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-linux-32/-/turbo-linux-32-1.4.6.tgz#0a0859be0941e4bcdc4bff81b97ee36f02cc1ffd"
+ integrity sha512-76j/zsui6mWPX8pZVMGgF8eiKHPmKuGa2lo0A/Ja0HUvdYCOGUfHsWJGVVIeYbuEp3jsKyVt7OnMDeH9CqO6bg==
+
+turbo-linux-64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-linux-64/-/turbo-linux-64-1.4.6.tgz#0a7d76fab78d7850c26d9d6b372c40ffca9835f8"
+ integrity sha512-z4A37Xm7lZyO9ddtGnvQHWMrsAKX6vFBxdbtb9MY76VRblo7lWSuk4LwCeM+T+ZDJ9LBFiF7aD/diRShlLx9jA==
+
+turbo-linux-arm64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-linux-arm64/-/turbo-linux-arm64-1.4.6.tgz#c66d3c6917ccbdb34cd7ce37c900613f4d690ebc"
+ integrity sha512-FW1jmOpZfOoVVvml338N0MPnYjiMyYWTaMb4T+IosgGYymcUE3xJjfXJcqfU/9/uKTyY8zG0qr9/5rw2kpMS2Q==
+
+turbo-linux-arm@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-linux-arm/-/turbo-linux-arm-1.4.6.tgz#9a9d73a722bdd6acb40276d0616c155168a32172"
+ integrity sha512-Uh/V3oaAdhyZW6FKPpKihAxQo3EbvLaVNnzzkBmBnvHRkqoDJHhpuG72V7nn8pzxVbJ1++NEVjvbc2kmKFvGjg==
+
+turbo-linux-mips64le@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-linux-mips64le/-/turbo-linux-mips64le-1.4.6.tgz#eb74c333c16ef03aa30dcb83fcc29d58218656e4"
+ integrity sha512-iWaL3Pwj52BH3T2M8nXScmbSnq4+x47MYK7lJMG7FsZGAIoT5ToO1Wt1iX3GRHTcnIZYm/kCfJ1ptK/NCossLA==
+
+turbo-linux-ppc64le@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-linux-ppc64le/-/turbo-linux-ppc64le-1.4.6.tgz#74597f4c30fe73c92ef8912e4bf25ccbe7c7ec7e"
+ integrity sha512-Af/KlUmpiORDyELxT7byXNWl3fefErGQMJfeqXEtAdhs8OCKQWuU+lchcZbiBZYNpL+lZoa3PAmP9Fpx7R4plA==
+
+turbo-windows-32@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-windows-32/-/turbo-windows-32-1.4.6.tgz#df1f3c25fea0bbccf7c5b44111ddbcd57f6fe547"
+ integrity sha512-NBd+XPlRSaR//lVN13Q9DOqK3CbowSvafIyGsO4jfvMsGTdyNDL6AYtFsvTKW91/G7ZhATmSEkPn2pZRuhP/DA==
+
+turbo-windows-64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-windows-64/-/turbo-windows-64-1.4.6.tgz#6a7d8897bb60234b6bc4b5d013adb00fac6f2beb"
+ integrity sha512-86AbmG+CjzVTpn4RGtwU2CYy4zSyAc9bIQ4pDGLIpCJg6JlD11duaiMJh0SCU/HCqWLJjWDI4qD+f9WNbgPsyQ==
+
+turbo-windows-arm64@1.4.6:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo-windows-arm64/-/turbo-windows-arm64-1.4.6.tgz#4c80528c6670ef50129053ad8279c832190b7234"
+ integrity sha512-V+pWcqhTtmQQ3ew8qEjYtUwzyW6tO1RgvP+6OKzItYzTnMTr1Fe42Q21V+tqRNxuNfFDKsgVJdk2p5wB87bvyQ==
+
+turbo@latest:
+ version "1.4.6"
+ resolved "https://registry.yarnpkg.com/turbo/-/turbo-1.4.6.tgz#c97c23cf898380bedabd04c5a91ab4eb9829bcdc"
+ integrity sha512-FKtBXlOJ7YjSK22yj4sJLCtDcHFElypt7xw9cZN7Wyv9x4XBrTmh5KP6RmcGnRR1/GJlTNwD2AY2T9QTPnHh+g==
+ optionalDependencies:
+ turbo-android-arm64 "1.4.6"
+ turbo-darwin-64 "1.4.6"
+ turbo-darwin-arm64 "1.4.6"
+ turbo-freebsd-64 "1.4.6"
+ turbo-freebsd-arm64 "1.4.6"
+ turbo-linux-32 "1.4.6"
+ turbo-linux-64 "1.4.6"
+ turbo-linux-arm "1.4.6"
+ turbo-linux-arm64 "1.4.6"
+ turbo-linux-mips64le "1.4.6"
+ turbo-linux-ppc64le "1.4.6"
+ turbo-windows-32 "1.4.6"
+ turbo-windows-64 "1.4.6"
+ turbo-windows-arm64 "1.4.6"
+
+type-check@^0.4.0, type-check@~0.4.0:
+ version "0.4.0"
+ resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1"
+ integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==
+ dependencies:
+ prelude-ls "^1.2.1"
+
+type-fest@^0.20.2:
+ version "0.20.2"
+ resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4"
+ integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==
+
+typescript@^4.5.2, typescript@^4.5.3, typescript@^4.7.4:
+ version "4.8.3"
+ resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.8.3.tgz#d59344522c4bc464a65a730ac695007fdb66dd88"
+ integrity sha512-goMHfm00nWPa8UvR/CPSvykqf6dVV8x/dp0c5mFTMTIu0u0FlGWRioyy7Nn0PGAdHxpJZnuO/ut+PpQ8UiHAig==
+
+unbox-primitive@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e"
+ integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==
+ dependencies:
+ call-bind "^1.0.2"
+ has-bigints "^1.0.2"
+ has-symbols "^1.0.3"
+ which-boxed-primitive "^1.0.2"
+
+update-browserslist-db@^1.0.5:
+ version "1.0.7"
+ resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.7.tgz#16279639cff1d0f800b14792de43d97df2d11b7d"
+ integrity sha512-iN/XYesmZ2RmmWAiI4Z5rq0YqSiv0brj9Ce9CfhNE4xIW2h+MFxcgkxIzZ+ShkFPUkjU3gQ+3oypadD3RAMtrg==
+ dependencies:
+ escalade "^3.1.1"
+ picocolors "^1.0.0"
+
+uri-js@^4.2.2:
+ version "4.4.1"
+ resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e"
+ integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==
+ dependencies:
+ punycode "^2.1.0"
+
+use-sync-external-store@1.2.0:
+ version "1.2.0"
+ resolved "https://registry.yarnpkg.com/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz#7dbefd6ef3fe4e767a0cf5d7287aacfb5846928a"
+ integrity sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==
+
+v8-compile-cache@^2.0.3:
+ version "2.3.0"
+ resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz#2de19618c66dc247dcfb6f99338035d8245a2cee"
+ integrity sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==
+
+which-boxed-primitive@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6"
+ integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==
+ dependencies:
+ is-bigint "^1.0.1"
+ is-boolean-object "^1.1.0"
+ is-number-object "^1.0.4"
+ is-string "^1.0.5"
+ is-symbol "^1.0.3"
+
+which@^2.0.1:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1"
+ integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==
+ dependencies:
+ isexe "^2.0.0"
+
+word-wrap@^1.2.3:
+ version "1.2.3"
+ resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c"
+ integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==
+
+wrappy@1:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"
+ integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==
+
+yallist@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72"
+ integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==
diff --git a/cli/internal/lockfile/yarn_lockfile.go b/cli/internal/lockfile/yarn_lockfile.go
new file mode 100644
index 0000000..99d7764
--- /dev/null
+++ b/cli/internal/lockfile/yarn_lockfile.go
@@ -0,0 +1,124 @@
+package lockfile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "github.com/andybalholm/crlf"
+ "github.com/iseki0/go-yarnlock"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+var _crlfLiteral = []byte("\r\n")
+
+// YarnLockfile representation of yarn lockfile
+type YarnLockfile struct {
+ inner yarnlock.LockFile
+ hasCRLF bool
+}
+
+var _ Lockfile = (*YarnLockfile)(nil)
+
+// ResolvePackage Given a package and version returns the key, resolved version, and if it was found
+func (l *YarnLockfile) ResolvePackage(_workspacePath turbopath.AnchoredUnixPath, name string, version string) (Package, error) {
+ for _, key := range yarnPossibleKeys(name, version) {
+ if entry, ok := (l.inner)[key]; ok {
+ return Package{
+ Found: true,
+ Key: key,
+ Version: entry.Version,
+ }, nil
+ }
+ }
+
+ return Package{}, nil
+}
+
+// AllDependencies Given a lockfile key return all (dev/optional/peer) dependencies of that package
+func (l *YarnLockfile) AllDependencies(key string) (map[string]string, bool) {
+ deps := map[string]string{}
+ entry, ok := (l.inner)[key]
+ if !ok {
+ return deps, false
+ }
+
+ for name, version := range entry.Dependencies {
+ deps[name] = version
+ }
+ for name, version := range entry.OptionalDependencies {
+ deps[name] = version
+ }
+
+ return deps, true
+}
+
+// Subgraph Given a list of lockfile keys returns a Lockfile based off the original one that only contains the packages given
+func (l *YarnLockfile) Subgraph(_ []turbopath.AnchoredSystemPath, packages []string) (Lockfile, error) {
+ lockfile := make(map[string]yarnlock.LockFileEntry, len(packages))
+ for _, key := range packages {
+ entry, ok := (l.inner)[key]
+ if ok {
+ lockfile[key] = entry
+ }
+ }
+
+ return &YarnLockfile{lockfile, l.hasCRLF}, nil
+}
+
+// Encode encode the lockfile representation and write it to the given writer
+func (l *YarnLockfile) Encode(w io.Writer) error {
+ writer := w
+ if l.hasCRLF {
+ writer = crlf.NewWriter(w)
+ }
+ if err := l.inner.Encode(writer); err != nil {
+ return errors.Wrap(err, "Unable to encode yarn.lock")
+ }
+ return nil
+}
+
+// Patches return a list of patches used in the lockfile
+func (l *YarnLockfile) Patches() []turbopath.AnchoredUnixPath {
+ return nil
+}
+
+// DecodeYarnLockfile Takes the contents of a yarn lockfile and returns a struct representation
+func DecodeYarnLockfile(contents []byte) (*YarnLockfile, error) {
+ lockfile, err := yarnlock.ParseLockFileData(contents)
+ hasCRLF := bytes.HasSuffix(contents, _crlfLiteral)
+ newline := []byte("\n")
+
+ // there's no trailing newline for this file, need to inspect more to see newline style
+ if !hasCRLF && !bytes.HasSuffix(contents, newline) {
+ firstNewline := bytes.IndexByte(contents, newline[0])
+ if firstNewline != -1 && firstNewline != 0 {
+ byteBeforeNewline := contents[firstNewline-1]
+ hasCRLF = byteBeforeNewline == '\r'
+ }
+ }
+
+ if err != nil {
+ return nil, errors.Wrap(err, "Unable to decode yarn.lock")
+ }
+
+ return &YarnLockfile{lockfile, hasCRLF}, nil
+}
+
+// GlobalChange checks if there are any differences between lockfiles that would completely invalidate
+// the cache.
+func (l *YarnLockfile) GlobalChange(other Lockfile) bool {
+ _, ok := other.(*YarnLockfile)
+ return !ok
+}
+
+func yarnPossibleKeys(name string, version string) []string {
+ return []string{
+ fmt.Sprintf("%v@%v", name, version),
+ fmt.Sprintf("%v@npm:%v", name, version),
+ fmt.Sprintf("%v@file:%v", name, version),
+ fmt.Sprintf("%v@workspace:%v", name, version),
+ fmt.Sprintf("%v@yarn:%v", name, version),
+ }
+}
diff --git a/cli/internal/lockfile/yarn_lockfile_test.go b/cli/internal/lockfile/yarn_lockfile_test.go
new file mode 100644
index 0000000..ef4fcb0
--- /dev/null
+++ b/cli/internal/lockfile/yarn_lockfile_test.go
@@ -0,0 +1,51 @@
+package lockfile
+
+import (
+ "bytes"
+ "testing"
+
+ "gotest.tools/v3/assert"
+)
+
+func TestRoundtrip(t *testing.T) {
+ content, err := getFixture(t, "yarn.lock")
+ if err != nil {
+ t.Error(err)
+ }
+
+ lockfile, err := DecodeYarnLockfile(content)
+ if err != nil {
+ t.Error(err)
+ }
+
+ var b bytes.Buffer
+ if err := lockfile.Encode(&b); err != nil {
+ t.Error(err)
+ }
+
+ assert.DeepEqual(t, string(content), b.String())
+}
+
+func TestKeySplitting(t *testing.T) {
+ content, err := getFixture(t, "yarn.lock")
+ if err != nil {
+ t.Error(err)
+ }
+
+ lockfile, err := DecodeYarnLockfile(content)
+ if err != nil {
+ t.Error(err)
+ }
+
+ // @babel/types has multiple entries, these should all appear in the lockfile struct
+ keys := []string{
+ "@babel/types@^7.18.10",
+ "@babel/types@^7.18.6",
+ "@babel/types@^7.19.0",
+ }
+
+ for _, key := range keys {
+ _, ok := lockfile.inner[key]
+ assert.Assert(t, ok, "Unable to find entry for %s in parsed lockfile", key)
+ }
+}
diff --git a/cli/internal/logstreamer/logstreamer.go b/cli/internal/logstreamer/logstreamer.go
new file mode 100644
index 0000000..4379c25
--- /dev/null
+++ b/cli/internal/logstreamer/logstreamer.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2013 Kevin van Zonneveld <kevin@vanzonneveld.net>. All rights reserved.
+// Source: https://github.com/kvz/logstreamer
+// SPDX-License-Identifier: MIT
+package logstreamer
+
+import (
+ "bytes"
+ "io"
+ "log"
+ "os"
+ "strings"
+)
+
+type Logstreamer struct {
+ Logger *log.Logger
+ buf *bytes.Buffer
+ // If prefix == stdout, colors green
+ // If prefix == stderr, colors red
+ // Else, prefix is taken as-is, and prepended to anything
+ // you throw at Write()
+ prefix string
+ // if true, saves output in memory
+ record bool
+ persist string
+
+ // Adds color to stdout & stderr if terminal supports it
+ colorOkay string
+ colorFail string
+ colorReset string
+}
+
+func NewLogstreamer(logger *log.Logger, prefix string, record bool) *Logstreamer {
+ streamer := &Logstreamer{
+ Logger: logger,
+ buf: bytes.NewBuffer([]byte("")),
+ prefix: prefix,
+ record: record,
+ persist: "",
+ colorOkay: "",
+ colorFail: "",
+ colorReset: "",
+ }
+
+ if strings.HasPrefix(os.Getenv("TERM"), "xterm") {
+ streamer.colorOkay = "\x1b[32m"
+ streamer.colorFail = "\x1b[31m"
+ streamer.colorReset = "\x1b[0m"
+ }
+
+ return streamer
+}
+
+func (l *Logstreamer) Write(p []byte) (n int, err error) {
+ if n, err = l.buf.Write(p); err != nil {
+ return
+ }
+
+ err = l.OutputLines()
+ return
+}
+
+func (l *Logstreamer) Close() error {
+ if err := l.Flush(); err != nil {
+ return err
+ }
+ l.buf = bytes.NewBuffer([]byte(""))
+ return nil
+}
+
+func (l *Logstreamer) Flush() error {
+ p := make([]byte, l.buf.Len())
+ if _, err := l.buf.Read(p); err != nil {
+ return err
+ }
+
+ l.out(string(p))
+ return nil
+}
+
+func (l *Logstreamer) OutputLines() error {
+ for {
+ line, err := l.buf.ReadString('\n')
+
+ if len(line) > 0 {
+ if strings.HasSuffix(line, "\n") {
+ l.out(line)
+ } else {
+ // put back into buffer, it's not a complete line yet
+ // Close() or Flush() have to be used to flush out
+ // the last remaining line if it does not end with a newline
+ if _, err := l.buf.WriteString(line); err != nil {
+ return err
+ }
+ }
+ }
+
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (l *Logstreamer) FlushRecord() string {
+ buffer := l.persist
+ l.persist = ""
+ return buffer
+}
+
+func (l *Logstreamer) out(str string) {
+ if len(str) < 1 {
+ return
+ }
+
+ if l.record {
+ l.persist = l.persist + str
+ }
+
+ if l.prefix == "stdout" {
+ str = l.colorOkay + l.prefix + l.colorReset + " " + str
+ } else if l.prefix == "stderr" {
+ str = l.colorFail + l.prefix + l.colorReset + " " + str
+ }
+
+ l.Logger.Print(str)
+}
+
+// PrettyStdoutWriter wraps an ioWriter so it can add string
+// prefixes to every message it writes to stdout.
+type PrettyStdoutWriter struct {
+ w io.Writer
+ Prefix string
+}
+
+var _ io.Writer = (*PrettyStdoutWriter)(nil)
+
+// NewPrettyStdoutWriter returns an instance of PrettyStdoutWriter
+func NewPrettyStdoutWriter(prefix string) *PrettyStdoutWriter {
+ return &PrettyStdoutWriter{
+ w: os.Stdout,
+ Prefix: prefix,
+ }
+}
+
+func (psw *PrettyStdoutWriter) Write(p []byte) (int, error) {
+ str := psw.Prefix + string(p)
+ n, err := psw.w.Write([]byte(str))
+
+ if err != nil {
+ return n, err
+ }
+
+ return len(p), nil
+}
diff --git a/cli/internal/logstreamer/logstreamer_test.go b/cli/internal/logstreamer/logstreamer_test.go
new file mode 100644
index 0000000..94d8a82
--- /dev/null
+++ b/cli/internal/logstreamer/logstreamer_test.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2013 Kevin van Zonneveld <kevin@vanzonneveld.net>. All rights reserved.
+// Source: https://github.com/kvz/logstreamer
+// SPDX-License-Identifier: MIT
+package logstreamer
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestLogstreamerOk(t *testing.T) {
+ // Create a logger (your app probably already has one)
+ logger := log.New(os.Stdout, "--> ", log.Ldate|log.Ltime)
+
+ // Setup a streamer that we'll pipe cmd.Stdout to
+ logStreamerOut := NewLogstreamer(logger, "stdout", false)
+ defer logStreamerOut.Close()
+ // Setup a streamer that we'll pipe cmd.Stderr to.
+ // We want to record/buffer anything that's written to this (3rd argument true)
+ logStreamerErr := NewLogstreamer(logger, "stderr", true)
+ defer logStreamerErr.Close()
+
+ // Execute something that succeeds
+ cmd := exec.Command(
+ "ls",
+ "-al",
+ )
+ cmd.Stderr = logStreamerErr
+ cmd.Stdout = logStreamerOut
+
+ // Reset any error we recorded
+ logStreamerErr.FlushRecord()
+
+ // Execute command
+ err := cmd.Start()
+
+ // Failed to spawn?
+ if err != nil {
+ t.Fatal("ERROR could not spawn command.", err.Error())
+ }
+
+ // Failed to execute?
+ err = cmd.Wait()
+ if err != nil {
+ t.Fatal("ERROR command finished with error. ", err.Error(), logStreamerErr.FlushRecord())
+ }
+}
+
+func TestLogstreamerErr(t *testing.T) {
+ // Create a logger (your app probably already has one)
+ logger := log.New(os.Stdout, "--> ", log.Ldate|log.Ltime)
+
+ // Setup a streamer that we'll pipe cmd.Stdout to
+ logStreamerOut := NewLogstreamer(logger, "stdout", false)
+ defer logStreamerOut.Close()
+ // Setup a streamer that we'll pipe cmd.Stderr to.
+ // We want to record/buffer anything that's written to this (3rd argument true)
+ logStreamerErr := NewLogstreamer(logger, "stderr", true)
+ defer logStreamerErr.Close()
+
+ // Execute something that succeeds
+ cmd := exec.Command(
+ "ls",
+ "nonexisting",
+ )
+ cmd.Stderr = logStreamerErr
+ cmd.Stdout = logStreamerOut
+
+ // Reset any error we recorded
+ logStreamerErr.FlushRecord()
+
+ // Execute command
+ err := cmd.Start()
+
+ // Failed to spawn?
+ if err != nil {
+ logger.Print("ERROR could not spawn command. ")
+ }
+
+ // Failed to execute?
+ err = cmd.Wait()
+ if err != nil {
+ fmt.Printf("Good. command finished with %s. %s. \n", err.Error(), logStreamerErr.FlushRecord())
+ } else {
+ t.Fatal("This command should have failed")
+ }
+}
+
+func TestLogstreamerFlush(t *testing.T) {
+ const text = "Text without newline"
+
+ var buffer bytes.Buffer
+ byteWriter := bufio.NewWriter(&buffer)
+
+ logger := log.New(byteWriter, "", 0)
+ logStreamerOut := NewLogstreamer(logger, "", false)
+ defer logStreamerOut.Close()
+
+ logStreamerOut.Write([]byte(text))
+ logStreamerOut.Flush()
+ byteWriter.Flush()
+
+ s := strings.TrimSpace(buffer.String())
+
+ if s != text {
+ t.Fatalf("Expected '%s', got '%s'.", text, s)
+ }
+}
diff --git a/cli/internal/nodes/packagetask.go b/cli/internal/nodes/packagetask.go
new file mode 100644
index 0000000..e2dcb27
--- /dev/null
+++ b/cli/internal/nodes/packagetask.go
@@ -0,0 +1,45 @@
+// Package nodes defines the nodes that are present in the execution graph used by turbo.
+package nodes
+
+import (
+ "fmt"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// PackageTask represents running a particular task in a particular package
+type PackageTask struct {
+ TaskID string
+ Task string
+ PackageName string
+ Pkg *fs.PackageJSON
+ EnvMode util.EnvMode
+ TaskDefinition *fs.TaskDefinition
+ Dir string
+ Command string
+ Outputs []string
+ ExcludedOutputs []string
+ LogFile string
+ Hash string
+}
+
+// OutputPrefix returns the prefix to be used for logging and ui for this task
+func (pt *PackageTask) OutputPrefix(isSinglePackage bool) string {
+ if isSinglePackage {
+ return pt.Task
+ }
+ return fmt.Sprintf("%v:%v", pt.PackageName, pt.Task)
+}
+
+// HashableOutputs returns the package-relative globs for files to be considered outputs
+// of this task
+func (pt *PackageTask) HashableOutputs() fs.TaskOutputs {
+ inclusionOutputs := []string{fmt.Sprintf(".turbo/turbo-%v.log", pt.Task)}
+ inclusionOutputs = append(inclusionOutputs, pt.TaskDefinition.Outputs.Inclusions...)
+
+ return fs.TaskOutputs{
+ Inclusions: inclusionOutputs,
+ Exclusions: pt.TaskDefinition.Outputs.Exclusions,
+ }
+}
diff --git a/cli/internal/packagemanager/berry.go b/cli/internal/packagemanager/berry.go
new file mode 100644
index 0000000..d6264b1
--- /dev/null
+++ b/cli/internal/packagemanager/berry.go
@@ -0,0 +1,156 @@
+package packagemanager
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/Masterminds/semver"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+var nodejsBerry = PackageManager{
+ Name: "nodejs-berry",
+ Slug: "yarn",
+ Command: "yarn",
+ Specfile: "package.json",
+ Lockfile: "yarn.lock",
+ PackageDir: "node_modules",
+
+ getWorkspaceGlobs: func(rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ pkg, err := fs.ReadPackageJSON(rootpath.UntypedJoin("package.json"))
+ if err != nil {
+ return nil, fmt.Errorf("package.json: %w", err)
+ }
+ if len(pkg.Workspaces) == 0 {
+ return nil, fmt.Errorf("package.json: no workspaces found. Turborepo requires Yarn workspaces to be defined in the root package.json")
+ }
+ return pkg.Workspaces, nil
+ },
+
+ getWorkspaceIgnores: func(pm PackageManager, rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ // Matches upstream values:
+ // Key code: https://github.com/yarnpkg/berry/blob/8e0c4b897b0881878a1f901230ea49b7c8113fbe/packages/yarnpkg-core/sources/Workspace.ts#L64-L70
+ return []string{
+ "**/node_modules",
+ "**/.git",
+ "**/.yarn",
+ }, nil
+ },
+
+ canPrune: func(cwd turbopath.AbsoluteSystemPath) (bool, error) {
+ if isNMLinker, err := util.IsNMLinker(cwd.ToStringDuringMigration()); err != nil {
+ return false, errors.Wrap(err, "could not determine if yarn is using `nodeLinker: node-modules`")
+ } else if !isNMLinker {
+ return false, errors.New("only yarn v2/v3 with `nodeLinker: node-modules` is supported at this time")
+ }
+ return true, nil
+ },
+
+ // Versions newer than 2.0 are berry, and before that we simply call them yarn.
+ Matches: func(manager string, version string) (bool, error) {
+ if manager != "yarn" {
+ return false, nil
+ }
+
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ return false, fmt.Errorf("could not parse yarn version: %w", err)
+ }
+ // -0 allows pre-releases versions to be considered valid
+ c, err := semver.NewConstraint(">=2.0.0-0")
+ if err != nil {
+ return false, fmt.Errorf("could not create constraint: %w", err)
+ }
+
+ return c.Check(v), nil
+ },
+
+ // Detect for berry needs to identify which version of yarn is running on the system.
+ // Further, berry can be configured in an incompatible way, so we check for compatibility here as well.
+ detect: func(projectDirectory turbopath.AbsoluteSystemPath, packageManager *PackageManager) (bool, error) {
+ specfileExists := projectDirectory.UntypedJoin(packageManager.Specfile).FileExists()
+ lockfileExists := projectDirectory.UntypedJoin(packageManager.Lockfile).FileExists()
+
+ // Short-circuit, definitely not Yarn.
+ if !specfileExists || !lockfileExists {
+ return false, nil
+ }
+
+ cmd := exec.Command("yarn", "--version")
+ cmd.Dir = projectDirectory.ToString()
+ out, err := cmd.Output()
+ if err != nil {
+ return false, fmt.Errorf("could not detect yarn version: %w", err)
+ }
+
+ // See if we're a match when we compare these two things.
+ matches, _ := packageManager.Matches(packageManager.Slug, string(out))
+
+ // Short-circuit, definitely not Berry because version number says we're Yarn.
+ if !matches {
+ return false, nil
+ }
+
+ // We're Berry!
+
+ // Check for supported configuration.
+ isNMLinker, err := util.IsNMLinker(projectDirectory.ToStringDuringMigration())
+
+ if err != nil {
+ // Failed to read the linker state, so we treat an unknown configuration as a failure.
+ return false, fmt.Errorf("could not check if yarn is using nm-linker: %w", err)
+ } else if !isNMLinker {
+ // Not using nm-linker, so unsupported configuration.
+ return false, fmt.Errorf("only yarn nm-linker is supported")
+ }
+
+ // Berry, supported configuration.
+ return true, nil
+ },
+
+ UnmarshalLockfile: func(_rootPackageJSON *fs.PackageJSON, contents []byte) (lockfile.Lockfile, error) {
+ return lockfile.DecodeBerryLockfile(contents)
+ },
+
+ prunePatches: func(pkgJSON *fs.PackageJSON, patches []turbopath.AnchoredUnixPath) error {
+ pkgJSON.Mu.Lock()
+ defer pkgJSON.Mu.Unlock()
+
+ keysToDelete := []string{}
+ resolutions, ok := pkgJSON.RawJSON["resolutions"].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("Invalid structure for resolutions field in package.json")
+ }
+
+ for dependency, untypedPatch := range resolutions {
+ inPatches := false
+ patch, ok := untypedPatch.(string)
+ if !ok {
+ return fmt.Errorf("Expected value of %s in package.json to be a string, got %v", dependency, untypedPatch)
+ }
+
+ for _, wantedPatch := range patches {
+ if strings.HasSuffix(patch, wantedPatch.ToString()) {
+ inPatches = true
+ break
+ }
+ }
+
+ // We only want to delete unused patches as they are the only ones that throw if unused
+ if !inPatches && strings.HasSuffix(patch, ".patch") {
+ keysToDelete = append(keysToDelete, dependency)
+ }
+ }
+
+ for _, key := range keysToDelete {
+ delete(resolutions, key)
+ }
+
+ return nil
+ },
+}
diff --git a/cli/internal/packagemanager/fixtures/package.json b/cli/internal/packagemanager/fixtures/package.json
new file mode 100644
index 0000000..6b27f7c
--- /dev/null
+++ b/cli/internal/packagemanager/fixtures/package.json
@@ -0,0 +1,7 @@
+{
+ "name": "fixture",
+ "workspaces": [
+ "apps/*",
+ "packages/*"
+ ]
+}
diff --git a/cli/internal/packagemanager/fixtures/pnpm-patches.json b/cli/internal/packagemanager/fixtures/pnpm-patches.json
new file mode 100644
index 0000000..f772bc3
--- /dev/null
+++ b/cli/internal/packagemanager/fixtures/pnpm-patches.json
@@ -0,0 +1,11 @@
+{
+ "name": "turborepo-prune-removes-patched",
+ "version": "1.0.0",
+ "packageManager": "pnpm@7.15.0",
+ "workspaces": ["packages/*"],
+ "pnpm": {
+ "patchedDependencies": {
+ "is-odd@3.0.1": "patches/is-odd@3.0.1.patch"
+ }
+ }
+}
diff --git a/cli/internal/packagemanager/fixtures/pnpm-workspace.yaml b/cli/internal/packagemanager/fixtures/pnpm-workspace.yaml
new file mode 100644
index 0000000..7fbb770
--- /dev/null
+++ b/cli/internal/packagemanager/fixtures/pnpm-workspace.yaml
@@ -0,0 +1,3 @@
+packages:
+ - "packages/*"
+ - "!packages/skip"
diff --git a/cli/internal/packagemanager/infer_root.go b/cli/internal/packagemanager/infer_root.go
new file mode 100644
index 0000000..7920f12
--- /dev/null
+++ b/cli/internal/packagemanager/infer_root.go
@@ -0,0 +1,146 @@
+package packagemanager
+
+import (
+ "path/filepath"
+
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// PackageType represents the mode in which turbo is running.
+type PackageType string
+
+const (
+ // Single is for single-package mode.
+ Single PackageType = "single"
+ // Multi is for monorepo mode.
+ Multi PackageType = "multi"
+)
+
+func candidateDirectoryWorkspaceGlobs(directory turbopath.AbsoluteSystemPath) []string {
+ packageManagers := []PackageManager{
+ nodejsNpm,
+ nodejsPnpm,
+ }
+
+ for _, pm := range packageManagers {
+ globs, err := pm.getWorkspaceGlobs(directory)
+ if err != nil {
+ // Try the other package manager workspace formats.
+ continue
+ }
+
+ return globs
+ }
+
+ return nil
+}
+
+func isOneOfTheWorkspaces(globs []string, nearestPackageJSONDir turbopath.AbsoluteSystemPath, currentPackageJSONDir turbopath.AbsoluteSystemPath) bool {
+ for _, glob := range globs {
+ globpattern := currentPackageJSONDir.UntypedJoin(filepath.FromSlash(glob)).ToString()
+ match, _ := doublestar.PathMatch(globpattern, nearestPackageJSONDir.ToString())
+ if match {
+ return true
+ }
+ }
+
+ return false
+}
+
+// InferRoot identifies which directory we should treat as the root, and which mode
+// turbo should be in when operating at that directory.
+func InferRoot(directory turbopath.AbsoluteSystemPath) (turbopath.AbsoluteSystemPath, PackageType) {
+ // Go doesn't have iterators, so this is very not-elegant.
+
+ // Scenarios:
+ // 0. Has a turbo.json but doesn't have a peer package.json. directory + multi
+ // 1. Nearest turbo.json, check peer package.json/pnpm-workspace.yaml.
+ // A. Has workspaces, multi package mode.
+ // B. No workspaces, single package mode.
+ // 2. If no turbo.json find the closest package.json parent.
+ // A. No parent package.json, default to current behavior.
+ // B. Nearest package.json defines workspaces. Can't be in single-package mode, so we bail. (This could be changed in the future.)
+ // 3. Closest package.json does not define workspaces. Traverse toward the root looking for package.jsons.
+ // A. No parent package.json with workspaces. nearestPackageJson + single
+ // B. Stop at the first one that has workspaces.
+ // i. If we are one of the workspaces, directory + multi. (This could be changed in the future.)
+ // ii. If we're not one of the workspaces, nearestPackageJson + single.
+
+ nearestTurboJSON, findTurboJSONErr := directory.Findup("turbo.json")
+ if nearestTurboJSON == "" || findTurboJSONErr != nil {
+ // We didn't find a turbo.json. We're in situation 2 or 3.
+
+ // Unroll the first loop for Scenario 2
+ nearestPackageJSON, nearestPackageJSONErr := directory.Findup("package.json")
+
+ // If we fail to find any package.json files we aren't in single package mode.
+ // We let things go through our existing failure paths.
+ // Scenario 2A.
+ if nearestPackageJSON == "" || nearestPackageJSONErr != nil {
+ return directory, Multi
+ }
+
+ // If we find a package.json which has workspaces we aren't in single package mode.
+ // We let things go through our existing failure paths.
+ // Scenario 2B.
+ if candidateDirectoryWorkspaceGlobs(nearestPackageJSON.Dir()) != nil {
+ // In a future world we could maybe change this behavior.
+ // return nearestPackageJson.Dir(), Multi
+ return directory, Multi
+ }
+
+ // Scenario 3.
+ // Find the nearest package.json that has workspaces.
+ // If found _and_ the nearestPackageJson is one of the workspaces, thatPackageJson + multi.
+ // Else, nearestPackageJson + single
+ cursor := nearestPackageJSON.Dir().UntypedJoin("..")
+ for {
+ nextPackageJSON, nextPackageJSONErr := cursor.Findup("package.json")
+ if nextPackageJSON == "" || nextPackageJSONErr != nil {
+ // We haven't found a parent defining workspaces.
+ // So we're single package mode at nearestPackageJson.
+ // Scenario 3A.
+ return nearestPackageJSON.Dir(), Single
+ }
+
+ // Found a package.json file, see if it has workspaces.
+ // Workspaces are not allowed to be recursive, so we know what to
+ // return the moment we find something with workspaces.
+ globs := candidateDirectoryWorkspaceGlobs(nextPackageJSON.Dir())
+ if globs != nil {
+ if isOneOfTheWorkspaces(globs, nearestPackageJSON.Dir(), nextPackageJSON.Dir()) {
+ // If it has workspaces, and nearestPackageJson is one of them, we're multi.
+ // We don't infer in this scenario.
+ // Scenario 3BI.
+ // TODO: return nextPackageJson.Dir(), Multi
+ return directory, Multi
+ }
+
+ // We found a parent with workspaces, but we're not one of them.
+ // We choose to operate in single package mode.
+ // Scenario 3BII
+ return nearestPackageJSON.Dir(), Single
+ }
+
+ // Loop around and see if we have another parent.
+ cursor = nextPackageJSON.Dir().UntypedJoin("..")
+ }
+ } else {
+ // If there is no sibling package.json we do no inference.
+ siblingPackageJSONPath := nearestTurboJSON.Dir().UntypedJoin("package.json")
+ if !siblingPackageJSONPath.Exists() {
+ // We do no inference.
+ // Scenario 0
+ return directory, Multi
+ }
+
+ if candidateDirectoryWorkspaceGlobs(nearestTurboJSON.Dir()) != nil {
+ // Scenario 1A.
+ return nearestTurboJSON.Dir(), Multi
+ }
+
+ // Scenario 1B.
+ return nearestTurboJSON.Dir(), Single
+ }
+}
diff --git a/cli/internal/packagemanager/infer_root_test.go b/cli/internal/packagemanager/infer_root_test.go
new file mode 100644
index 0000000..2e37a80
--- /dev/null
+++ b/cli/internal/packagemanager/infer_root_test.go
@@ -0,0 +1,347 @@
+package packagemanager
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func TestInferRoot(t *testing.T) {
+ type file struct {
+ path turbopath.AnchoredSystemPath
+ content []byte
+ }
+
+ tests := []struct {
+ name string
+ fs []file
+ executionDirectory turbopath.AnchoredSystemPath
+ rootPath turbopath.AnchoredSystemPath
+ packageMode PackageType
+ }{
+ // Scenario 0
+ {
+ name: "turbo.json at current dir, no package.json",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "turbo.json at parent dir, no package.json",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ // This is "no inference"
+ rootPath: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ packageMode: Multi,
+ },
+ // Scenario 1A
+ {
+ name: "turbo.json at current dir, has package.json, has workspaces key",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "turbo.json at parent dir, has package.json, has workspaces key",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "turbo.json at parent dir, has package.json, has pnpm workspaces",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("pnpm-workspace.yaml").ToSystemPath(),
+ content: []byte("packages:\n - docs"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ // Scenario 1A aware of the weird thing we do for packages.
+ {
+ name: "turbo.json at current dir, has package.json, has packages key",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"packages\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Single,
+ },
+ {
+ name: "turbo.json at parent dir, has package.json, has packages key",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"packages\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Single,
+ },
+ // Scenario 1A aware of the the weird thing we do for packages when both methods of specification exist.
+ {
+ name: "turbo.json at current dir, has package.json, has workspace and packages key",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"clobbered\" ], \"packages\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "turbo.json at parent dir, has package.json, has workspace and packages key",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"clobbered\" ], \"packages\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ // Scenario 1B
+ {
+ name: "turbo.json at current dir, has package.json, no workspaces",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Single,
+ },
+ {
+ name: "turbo.json at parent dir, has package.json, no workspaces",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Single,
+ },
+ {
+ name: "turbo.json at parent dir, has package.json, no workspaces, includes pnpm",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {path: turbopath.AnchoredUnixPath("turbo.json").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("pnpm-workspace.yaml").ToSystemPath(),
+ content: []byte(""),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Single,
+ },
+ // Scenario 2A
+ {
+ name: "no turbo.json, no package.json at current",
+ fs: []file{},
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "no turbo.json, no package.json at parent",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ packageMode: Multi,
+ },
+ // Scenario 2B
+ {
+ name: "no turbo.json, has package.json with workspaces at current",
+ fs: []file{
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "no turbo.json, has package.json with workspaces at parent",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"exists\" ] }"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ packageMode: Multi,
+ },
+ {
+ name: "no turbo.json, has package.json with pnpm workspaces at parent",
+ fs: []file{
+ {path: turbopath.AnchoredUnixPath("execution/path/subdir/.file").ToSystemPath()},
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"exists\" ] }"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("pnpm-workspace.yaml").ToSystemPath(),
+ content: []byte("packages:\n - docs"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("execution/path/subdir").ToSystemPath(),
+ packageMode: Multi,
+ },
+ // Scenario 3A
+ {
+ name: "no turbo.json, lots of package.json files but no workspaces",
+ fs: []file{
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/two/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/two/three/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("one/two/three").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("one/two/three").ToSystemPath(),
+ packageMode: Single,
+ },
+ // Scenario 3BI
+ {
+ name: "no turbo.json, lots of package.json files, and a workspace at the root that matches execution directory",
+ fs: []file{
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"one/two/three\" ] }"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/two/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/two/three/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("one/two/three").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("one/two/three").ToSystemPath(),
+ packageMode: Multi,
+ },
+ // Scenario 3BII
+ {
+ name: "no turbo.json, lots of package.json files, and a workspace at the root that matches execution directory",
+ fs: []file{
+ {
+ path: turbopath.AnchoredUnixPath("package.json").ToSystemPath(),
+ content: []byte("{ \"workspaces\": [ \"does-not-exist\" ] }"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/two/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ {
+ path: turbopath.AnchoredUnixPath("one/two/three/package.json").ToSystemPath(),
+ content: []byte("{}"),
+ },
+ },
+ executionDirectory: turbopath.AnchoredUnixPath("one/two/three").ToSystemPath(),
+ rootPath: turbopath.AnchoredUnixPath("one/two/three").ToSystemPath(),
+ packageMode: Single,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fsRoot := turbopath.AbsoluteSystemPath(t.TempDir())
+ for _, file := range tt.fs {
+ path := file.path.RestoreAnchor(fsRoot)
+ assert.NilError(t, path.Dir().MkdirAll(0777))
+ assert.NilError(t, path.WriteFile(file.content, 0777))
+ }
+
+ turboRoot, packageMode := InferRoot(tt.executionDirectory.RestoreAnchor(fsRoot))
+ if !reflect.DeepEqual(turboRoot, tt.rootPath.RestoreAnchor(fsRoot)) {
+ t.Errorf("InferRoot() turboRoot = %v, want %v", turboRoot, tt.rootPath.RestoreAnchor(fsRoot))
+ }
+ if packageMode != tt.packageMode {
+ t.Errorf("InferRoot() packageMode = %v, want %v", packageMode, tt.packageMode)
+ }
+ })
+ }
+}
diff --git a/cli/internal/packagemanager/npm.go b/cli/internal/packagemanager/npm.go
new file mode 100644
index 0000000..ce2eb8c
--- /dev/null
+++ b/cli/internal/packagemanager/npm.go
@@ -0,0 +1,59 @@
+package packagemanager
+
+import (
+ "fmt"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+var nodejsNpm = PackageManager{
+ Name: "nodejs-npm",
+ Slug: "npm",
+ Command: "npm",
+ Specfile: "package.json",
+ Lockfile: "package-lock.json",
+ PackageDir: "node_modules",
+ ArgSeparator: []string{"--"},
+
+ getWorkspaceGlobs: func(rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ pkg, err := fs.ReadPackageJSON(rootpath.UntypedJoin("package.json"))
+ if err != nil {
+ return nil, fmt.Errorf("package.json: %w", err)
+ }
+ if len(pkg.Workspaces) == 0 {
+ return nil, fmt.Errorf("package.json: no workspaces found. Turborepo requires npm workspaces to be defined in the root package.json")
+ }
+ return pkg.Workspaces, nil
+ },
+
+ getWorkspaceIgnores: func(pm PackageManager, rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ // Matches upstream values:
+ // function: https://github.com/npm/map-workspaces/blob/a46503543982cb35f51cc2d6253d4dcc6bca9b32/lib/index.js#L73
+ // key code: https://github.com/npm/map-workspaces/blob/a46503543982cb35f51cc2d6253d4dcc6bca9b32/lib/index.js#L90-L96
+ // call site: https://github.com/npm/cli/blob/7a858277171813b37d46a032e49db44c8624f78f/lib/workspaces/get-workspaces.js#L14
+ return []string{
+ "**/node_modules/**",
+ }, nil
+ },
+
+ Matches: func(manager string, version string) (bool, error) {
+ return manager == "npm", nil
+ },
+
+ detect: func(projectDirectory turbopath.AbsoluteSystemPath, packageManager *PackageManager) (bool, error) {
+ specfileExists := projectDirectory.UntypedJoin(packageManager.Specfile).FileExists()
+ lockfileExists := projectDirectory.UntypedJoin(packageManager.Lockfile).FileExists()
+
+ return (specfileExists && lockfileExists), nil
+ },
+
+ canPrune: func(cwd turbopath.AbsoluteSystemPath) (bool, error) {
+ return true, nil
+ },
+
+ UnmarshalLockfile: func(_rootPackageJSON *fs.PackageJSON, contents []byte) (lockfile.Lockfile, error) {
+ return lockfile.DecodeNpmLockfile(contents)
+ },
+}
diff --git a/cli/internal/packagemanager/packagemanager.go b/cli/internal/packagemanager/packagemanager.go
new file mode 100644
index 0000000..dc5b966
--- /dev/null
+++ b/cli/internal/packagemanager/packagemanager.go
@@ -0,0 +1,197 @@
+// Adapted from https://github.com/replit/upm
+// Copyright (c) 2019 Neoreason d/b/a Repl.it. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package packagemanager
+
+import (
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/globby"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// PackageManager is an abstraction across package managers
+type PackageManager struct {
+ // The descriptive name of the Package Manager.
+ Name string
+
+ // The unique identifier of the Package Manager.
+ Slug string
+
+ // The command used to invoke the Package Manager.
+ Command string
+
+ // The location of the package spec file used by the Package Manager.
+ Specfile string
+
+ // The location of the package lock file used by the Package Manager.
+ Lockfile string
+
+ // The directory in which package assets are stored by the Package Manager.
+ PackageDir string
+
+ // The location of the file that defines the workspace. Empty if workspaces defined in package.json
+ WorkspaceConfigurationPath string
+
+ // The separator that the Package Manger uses to identify arguments that
+ // should be passed through to the underlying script.
+ ArgSeparator []string
+
+ // Return the list of workspace glob
+ getWorkspaceGlobs func(rootpath turbopath.AbsoluteSystemPath) ([]string, error)
+
+ // Return the list of workspace ignore globs
+ getWorkspaceIgnores func(pm PackageManager, rootpath turbopath.AbsoluteSystemPath) ([]string, error)
+
+ // Detect if Turbo knows how to produce a pruned workspace for the project
+ canPrune func(cwd turbopath.AbsoluteSystemPath) (bool, error)
+
+ // Test a manager and version tuple to see if it is the Package Manager.
+ Matches func(manager string, version string) (bool, error)
+
+ // Detect if the project is using the Package Manager by inspecting the system.
+ detect func(projectDirectory turbopath.AbsoluteSystemPath, packageManager *PackageManager) (bool, error)
+
+ // Read a lockfile for a given package manager
+ UnmarshalLockfile func(rootPackageJSON *fs.PackageJSON, contents []byte) (lockfile.Lockfile, error)
+
+ // Prune the given pkgJSON to only include references to the given patches
+ prunePatches func(pkgJSON *fs.PackageJSON, patches []turbopath.AnchoredUnixPath) error
+}
+
+var packageManagers = []PackageManager{
+ nodejsYarn,
+ nodejsBerry,
+ nodejsNpm,
+ nodejsPnpm,
+ nodejsPnpm6,
+}
+
+var (
+ packageManagerPattern = `(npm|pnpm|yarn)@(\d+)\.\d+\.\d+(-.+)?`
+ packageManagerRegex = regexp.MustCompile(packageManagerPattern)
+)
+
+// ParsePackageManagerString takes a package manager version string parses it into consituent components
+func ParsePackageManagerString(packageManager string) (manager string, version string, err error) {
+ match := packageManagerRegex.FindString(packageManager)
+ if len(match) == 0 {
+ return "", "", fmt.Errorf("We could not parse packageManager field in package.json, expected: %s, received: %s", packageManagerPattern, packageManager)
+ }
+
+ return strings.Split(match, "@")[0], strings.Split(match, "@")[1], nil
+}
+
+// GetPackageManager attempts all methods for identifying the package manager in use.
+func GetPackageManager(projectDirectory turbopath.AbsoluteSystemPath, pkg *fs.PackageJSON) (packageManager *PackageManager, err error) {
+ result, _ := readPackageManager(pkg)
+ if result != nil {
+ return result, nil
+ }
+
+ return detectPackageManager(projectDirectory)
+}
+
+// readPackageManager attempts to read the package manager from the package.json.
+func readPackageManager(pkg *fs.PackageJSON) (packageManager *PackageManager, err error) {
+ if pkg.PackageManager != "" {
+ manager, version, err := ParsePackageManagerString(pkg.PackageManager)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, packageManager := range packageManagers {
+ isResponsible, err := packageManager.Matches(manager, version)
+ if isResponsible && (err == nil) {
+ return &packageManager, nil
+ }
+ }
+ }
+
+ return nil, errors.New(util.Sprintf("We did not find a package manager specified in your root package.json. Please set the \"packageManager\" property in your root package.json (${UNDERLINE}https://nodejs.org/api/packages.html#packagemanager)${RESET} or run `npx @turbo/codemod add-package-manager` in the root of your monorepo."))
+}
+
+// detectPackageManager attempts to detect the package manager by inspecting the project directory state.
+func detectPackageManager(projectDirectory turbopath.AbsoluteSystemPath) (packageManager *PackageManager, err error) {
+ for _, packageManager := range packageManagers {
+ isResponsible, err := packageManager.detect(projectDirectory, &packageManager)
+ if err != nil {
+ return nil, err
+ }
+ if isResponsible {
+ return &packageManager, nil
+ }
+ }
+
+ return nil, errors.New(util.Sprintf("We did not detect an in-use package manager for your project. Please set the \"packageManager\" property in your root package.json (${UNDERLINE}https://nodejs.org/api/packages.html#packagemanager)${RESET} or run `npx @turbo/codemod add-package-manager` in the root of your monorepo."))
+}
+
+// GetWorkspaces returns the list of package.json files for the current repository.
+func (pm PackageManager) GetWorkspaces(rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ globs, err := pm.getWorkspaceGlobs(rootpath)
+ if err != nil {
+ return nil, err
+ }
+
+ justJsons := make([]string, len(globs))
+ for i, space := range globs {
+ justJsons[i] = filepath.Join(space, "package.json")
+ }
+
+ ignores, err := pm.getWorkspaceIgnores(pm, rootpath)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := globby.GlobFiles(rootpath.ToStringDuringMigration(), justJsons, ignores)
+ if err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+// GetWorkspaceIgnores returns an array of globs not to search for workspaces.
+func (pm PackageManager) GetWorkspaceIgnores(rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ return pm.getWorkspaceIgnores(pm, rootpath)
+}
+
+// CanPrune returns if turbo can produce a pruned workspace. Can error if fs issues occur
+func (pm PackageManager) CanPrune(projectDirectory turbopath.AbsoluteSystemPath) (bool, error) {
+ if pm.canPrune != nil {
+ return pm.canPrune(projectDirectory)
+ }
+ return false, nil
+}
+
+// ReadLockfile will read the applicable lockfile into memory
+func (pm PackageManager) ReadLockfile(projectDirectory turbopath.AbsoluteSystemPath, rootPackageJSON *fs.PackageJSON) (lockfile.Lockfile, error) {
+ if pm.UnmarshalLockfile == nil {
+ return nil, nil
+ }
+ contents, err := projectDirectory.UntypedJoin(pm.Lockfile).ReadFile()
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %w", pm.Lockfile, err)
+ }
+ lf, err := pm.UnmarshalLockfile(rootPackageJSON, contents)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error in %v", pm.Lockfile)
+ }
+ return lf, nil
+}
+
+// PrunePatchedPackages will alter the provided pkgJSON to only reference the provided patches
+func (pm PackageManager) PrunePatchedPackages(pkgJSON *fs.PackageJSON, patches []turbopath.AnchoredUnixPath) error {
+ if pm.prunePatches != nil {
+ return pm.prunePatches(pkgJSON, patches)
+ }
+ return nil
+}
diff --git a/cli/internal/packagemanager/packagemanager_test.go b/cli/internal/packagemanager/packagemanager_test.go
new file mode 100644
index 0000000..a5dc472
--- /dev/null
+++ b/cli/internal/packagemanager/packagemanager_test.go
@@ -0,0 +1,411 @@
+package packagemanager
+
+import (
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func TestParsePackageManagerString(t *testing.T) {
+ tests := []struct {
+ name string
+ packageManager string
+ wantManager string
+ wantVersion string
+ wantErr bool
+ }{
+ {
+ name: "errors with a tag version",
+ packageManager: "npm@latest",
+ wantManager: "",
+ wantVersion: "",
+ wantErr: true,
+ },
+ {
+ name: "errors with no version",
+ packageManager: "npm",
+ wantManager: "",
+ wantVersion: "",
+ wantErr: true,
+ },
+ {
+ name: "requires fully-qualified semver versions (one digit)",
+ packageManager: "npm@1",
+ wantManager: "",
+ wantVersion: "",
+ wantErr: true,
+ },
+ {
+ name: "requires fully-qualified semver versions (two digits)",
+ packageManager: "npm@1.2",
+ wantManager: "",
+ wantVersion: "",
+ wantErr: true,
+ },
+ {
+ name: "supports custom labels",
+ packageManager: "npm@1.2.3-alpha.1",
+ wantManager: "npm",
+ wantVersion: "1.2.3-alpha.1",
+ wantErr: false,
+ },
+ {
+ name: "only supports specified package managers",
+ packageManager: "pip@1.2.3",
+ wantManager: "",
+ wantVersion: "",
+ wantErr: true,
+ },
+ {
+ name: "supports npm",
+ packageManager: "npm@0.0.1",
+ wantManager: "npm",
+ wantVersion: "0.0.1",
+ wantErr: false,
+ },
+ {
+ name: "supports pnpm",
+ packageManager: "pnpm@0.0.1",
+ wantManager: "pnpm",
+ wantVersion: "0.0.1",
+ wantErr: false,
+ },
+ {
+ name: "supports yarn",
+ packageManager: "yarn@111.0.1",
+ wantManager: "yarn",
+ wantVersion: "111.0.1",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotManager, gotVersion, err := ParsePackageManagerString(tt.packageManager)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ParsePackageManagerString() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotManager != tt.wantManager {
+ t.Errorf("ParsePackageManagerString() got manager = %v, want manager %v", gotManager, tt.wantManager)
+ }
+ if gotVersion != tt.wantVersion {
+ t.Errorf("ParsePackageManagerString() got version = %v, want version %v", gotVersion, tt.wantVersion)
+ }
+ })
+ }
+}
+
+func TestGetPackageManager(t *testing.T) {
+ cwdRaw, err := os.Getwd()
+ assert.NilError(t, err, "os.Getwd")
+ cwd, err := fs.GetCwd(cwdRaw)
+ assert.NilError(t, err, "GetCwd")
+ tests := []struct {
+ name string
+ projectDirectory turbopath.AbsoluteSystemPath
+ pkg *fs.PackageJSON
+ want string
+ wantErr bool
+ }{
+ {
+ name: "finds npm from a package manager string",
+ projectDirectory: cwd,
+ pkg: &fs.PackageJSON{PackageManager: "npm@1.2.3"},
+ want: "nodejs-npm",
+ wantErr: false,
+ },
+ {
+ name: "finds pnpm6 from a package manager string",
+ projectDirectory: cwd,
+ pkg: &fs.PackageJSON{PackageManager: "pnpm@1.2.3"},
+ want: "nodejs-pnpm6",
+ wantErr: false,
+ },
+ {
+ name: "finds pnpm from a package manager string",
+ projectDirectory: cwd,
+ pkg: &fs.PackageJSON{PackageManager: "pnpm@7.8.9"},
+ want: "nodejs-pnpm",
+ wantErr: false,
+ },
+ {
+ name: "finds yarn from a package manager string",
+ projectDirectory: cwd,
+ pkg: &fs.PackageJSON{PackageManager: "yarn@1.2.3"},
+ want: "nodejs-yarn",
+ wantErr: false,
+ },
+ {
+ name: "finds berry from a package manager string",
+ projectDirectory: cwd,
+ pkg: &fs.PackageJSON{PackageManager: "yarn@2.3.4"},
+ want: "nodejs-berry",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotPackageManager, err := GetPackageManager(tt.projectDirectory, tt.pkg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetPackageManager() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotPackageManager.Name != tt.want {
+ t.Errorf("GetPackageManager() = %v, want %v", gotPackageManager.Name, tt.want)
+ }
+ })
+ }
+}
+
+func Test_readPackageManager(t *testing.T) {
+ tests := []struct {
+ name string
+ pkg *fs.PackageJSON
+ want string
+ wantErr bool
+ }{
+ {
+ name: "finds npm from a package manager string",
+ pkg: &fs.PackageJSON{PackageManager: "npm@1.2.3"},
+ want: "nodejs-npm",
+ wantErr: false,
+ },
+ {
+ name: "finds pnpm6 from a package manager string",
+ pkg: &fs.PackageJSON{PackageManager: "pnpm@1.2.3"},
+ want: "nodejs-pnpm6",
+ wantErr: false,
+ },
+ {
+ name: "finds pnpm from a package manager string",
+ pkg: &fs.PackageJSON{PackageManager: "pnpm@7.8.9"},
+ want: "nodejs-pnpm",
+ wantErr: false,
+ },
+ {
+ name: "finds yarn from a package manager string",
+ pkg: &fs.PackageJSON{PackageManager: "yarn@1.2.3"},
+ want: "nodejs-yarn",
+ wantErr: false,
+ },
+ {
+ name: "finds berry from a package manager string",
+ pkg: &fs.PackageJSON{PackageManager: "yarn@2.3.4"},
+ want: "nodejs-berry",
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotPackageManager, err := readPackageManager(tt.pkg)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("readPackageManager() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if gotPackageManager.Name != tt.want {
+ t.Errorf("readPackageManager() = %v, want %v", gotPackageManager.Name, tt.want)
+ }
+ })
+ }
+}
+
+func Test_GetWorkspaces(t *testing.T) {
+ type test struct {
+ name string
+ pm PackageManager
+ rootPath turbopath.AbsoluteSystemPath
+ want []string
+ wantErr bool
+ }
+
+ cwd, _ := os.Getwd()
+
+ repoRoot, err := fs.GetCwd(cwd)
+ assert.NilError(t, err, "GetCwd")
+ rootPath := map[string]turbopath.AbsoluteSystemPath{
+ "nodejs-npm": repoRoot.UntypedJoin("../../../examples/with-yarn"),
+ "nodejs-berry": repoRoot.UntypedJoin("../../../examples/with-yarn"),
+ "nodejs-yarn": repoRoot.UntypedJoin("../../../examples/with-yarn"),
+ "nodejs-pnpm": repoRoot.UntypedJoin("../../../examples/basic"),
+ "nodejs-pnpm6": repoRoot.UntypedJoin("../../../examples/basic"),
+ }
+
+ want := map[string][]string{
+ "nodejs-npm": {
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/apps/docs/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/apps/web/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/eslint-config-custom/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/tsconfig/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/ui/package.json")),
+ },
+ "nodejs-berry": {
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/apps/docs/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/apps/web/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/eslint-config-custom/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/tsconfig/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/ui/package.json")),
+ },
+ "nodejs-yarn": {
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/apps/docs/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/apps/web/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/eslint-config-custom/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/tsconfig/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/with-yarn/packages/ui/package.json")),
+ },
+ "nodejs-pnpm": {
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/apps/docs/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/apps/web/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/packages/eslint-config-custom/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/packages/tsconfig/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/packages/ui/package.json")),
+ },
+ "nodejs-pnpm6": {
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/apps/docs/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/apps/web/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/packages/eslint-config-custom/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/packages/tsconfig/package.json")),
+ filepath.ToSlash(filepath.Join(cwd, "../../../examples/basic/packages/ui/package.json")),
+ },
+ }
+
+ tests := make([]test, len(packageManagers))
+ for i, packageManager := range packageManagers {
+ tests[i] = test{
+ name: packageManager.Name,
+ pm: packageManager,
+ rootPath: rootPath[packageManager.Name],
+ want: want[packageManager.Name],
+ wantErr: false,
+ }
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotWorkspaces, err := tt.pm.GetWorkspaces(tt.rootPath)
+
+ gotToSlash := make([]string, len(gotWorkspaces))
+ for index, workspace := range gotWorkspaces {
+ gotToSlash[index] = filepath.ToSlash(workspace)
+ }
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetWorkspaces() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ sort.Strings(gotToSlash)
+ if !reflect.DeepEqual(gotToSlash, tt.want) {
+ t.Errorf("GetWorkspaces() = %v, want %v", gotToSlash, tt.want)
+ }
+ })
+ }
+}
+
+func Test_GetWorkspaceIgnores(t *testing.T) {
+ type test struct {
+ name string
+ pm PackageManager
+ rootPath turbopath.AbsoluteSystemPath
+ want []string
+ wantErr bool
+ }
+
+ cwdRaw, err := os.Getwd()
+ assert.NilError(t, err, "os.Getwd")
+ cwd, err := fs.GetCwd(cwdRaw)
+ assert.NilError(t, err, "GetCwd")
+ want := map[string][]string{
+ "nodejs-npm": {"**/node_modules/**"},
+ "nodejs-berry": {"**/node_modules", "**/.git", "**/.yarn"},
+ "nodejs-yarn": {"apps/*/node_modules/**", "packages/*/node_modules/**"},
+ "nodejs-pnpm": {"**/node_modules/**", "**/bower_components/**", "packages/skip"},
+ "nodejs-pnpm6": {"**/node_modules/**", "**/bower_components/**", "packages/skip"},
+ }
+
+ tests := make([]test, len(packageManagers))
+ for i, packageManager := range packageManagers {
+ tests[i] = test{
+ name: packageManager.Name,
+ pm: packageManager,
+ rootPath: cwd.UntypedJoin("fixtures"),
+ want: want[packageManager.Name],
+ wantErr: false,
+ }
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotWorkspaceIgnores, err := tt.pm.GetWorkspaceIgnores(tt.rootPath)
+
+ gotToSlash := make([]string, len(gotWorkspaceIgnores))
+ for index, ignore := range gotWorkspaceIgnores {
+ gotToSlash[index] = filepath.ToSlash(ignore)
+ }
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetWorkspaceIgnores() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(gotToSlash, tt.want) {
+ t.Errorf("GetWorkspaceIgnores() = %v, want %v", gotToSlash, tt.want)
+ }
+ })
+ }
+}
+
+func Test_CanPrune(t *testing.T) {
+ type test struct {
+ name string
+ pm PackageManager
+ rootPath turbopath.AbsoluteSystemPath
+ want bool
+ wantErr bool
+ }
+
+ type want struct {
+ want bool
+ wantErr bool
+ }
+
+ cwdRaw, err := os.Getwd()
+ assert.NilError(t, err, "os.Getwd")
+ cwd, err := fs.GetCwd(cwdRaw)
+ assert.NilError(t, err, "GetCwd")
+ wants := map[string]want{
+ "nodejs-npm": {true, false},
+ "nodejs-berry": {false, true},
+ "nodejs-yarn": {true, false},
+ "nodejs-pnpm": {true, false},
+ "nodejs-pnpm6": {true, false},
+ }
+
+ tests := make([]test, len(packageManagers))
+ for i, packageManager := range packageManagers {
+ tests[i] = test{
+ name: packageManager.Name,
+ pm: packageManager,
+ rootPath: cwd.UntypedJoin("../../../examples/with-yarn"),
+ want: wants[packageManager.Name].want,
+ wantErr: wants[packageManager.Name].wantErr,
+ }
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ canPrune, err := tt.pm.CanPrune(tt.rootPath)
+
+ if (err != nil) != tt.wantErr {
+ t.Errorf("CanPrune() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if canPrune != tt.want {
+ t.Errorf("CanPrune() = %v, want %v", canPrune, tt.want)
+ }
+ })
+ }
+}
diff --git a/cli/internal/packagemanager/pnpm.go b/cli/internal/packagemanager/pnpm.go
new file mode 100644
index 0000000..e65a4dc
--- /dev/null
+++ b/cli/internal/packagemanager/pnpm.go
@@ -0,0 +1,168 @@
+package packagemanager
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/Masterminds/semver"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/yaml"
+)
+
+// PnpmWorkspaces is a representation of workspace package globs found
+// in pnpm-workspace.yaml
+type PnpmWorkspaces struct {
+ Packages []string `yaml:"packages,omitempty"`
+}
+
+func readPnpmWorkspacePackages(workspaceFile turbopath.AbsoluteSystemPath) ([]string, error) {
+ bytes, err := workspaceFile.ReadFile()
+ if err != nil {
+ return nil, fmt.Errorf("%v: %w", workspaceFile, err)
+ }
+ var pnpmWorkspaces PnpmWorkspaces
+ if err := yaml.Unmarshal(bytes, &pnpmWorkspaces); err != nil {
+ return nil, fmt.Errorf("%v: %w", workspaceFile, err)
+ }
+ return pnpmWorkspaces.Packages, nil
+}
+
+func getPnpmWorkspaceGlobs(rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ pkgGlobs, err := readPnpmWorkspacePackages(rootpath.UntypedJoin("pnpm-workspace.yaml"))
+ if err != nil {
+ return nil, err
+ }
+
+ if len(pkgGlobs) == 0 {
+ return nil, fmt.Errorf("pnpm-workspace.yaml: no packages found. Turborepo requires pnpm workspaces and thus packages to be defined in the root pnpm-workspace.yaml")
+ }
+
+ filteredPkgGlobs := []string{}
+ for _, pkgGlob := range pkgGlobs {
+ if !strings.HasPrefix(pkgGlob, "!") {
+ filteredPkgGlobs = append(filteredPkgGlobs, pkgGlob)
+ }
+ }
+ return filteredPkgGlobs, nil
+}
+
+func getPnpmWorkspaceIgnores(pm PackageManager, rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ // Matches upstream values:
+ // function: https://github.com/pnpm/pnpm/blob/d99daa902442e0c8ab945143ebaf5cdc691a91eb/packages/find-packages/src/index.ts#L27
+ // key code: https://github.com/pnpm/pnpm/blob/d99daa902442e0c8ab945143ebaf5cdc691a91eb/packages/find-packages/src/index.ts#L30
+ // call site: https://github.com/pnpm/pnpm/blob/d99daa902442e0c8ab945143ebaf5cdc691a91eb/packages/find-workspace-packages/src/index.ts#L32-L39
+ ignores := []string{
+ "**/node_modules/**",
+ "**/bower_components/**",
+ }
+ pkgGlobs, err := readPnpmWorkspacePackages(rootpath.UntypedJoin("pnpm-workspace.yaml"))
+ if err != nil {
+ return nil, err
+ }
+ for _, pkgGlob := range pkgGlobs {
+ if strings.HasPrefix(pkgGlob, "!") {
+ ignores = append(ignores, pkgGlob[1:])
+ }
+ }
+ return ignores, nil
+}
+
+var nodejsPnpm = PackageManager{
+ Name: "nodejs-pnpm",
+ Slug: "pnpm",
+ Command: "pnpm",
+ Specfile: "package.json",
+ Lockfile: "pnpm-lock.yaml",
+ PackageDir: "node_modules",
+ // pnpm v7+ changed their handling of '--'. We no longer need to pass it to pass args to
+ // the script being run, and in fact doing so will cause the '--' to be passed through verbatim,
+ // potentially breaking scripts that aren't expecting it.
+ // We are allowed to use nil here because ArgSeparator already has a type, so it's a typed nil,
+ // This could just as easily be []string{}, but the style guide says to prefer
+ // nil for empty slices.
+ ArgSeparator: nil,
+ WorkspaceConfigurationPath: "pnpm-workspace.yaml",
+
+ getWorkspaceGlobs: getPnpmWorkspaceGlobs,
+
+ getWorkspaceIgnores: getPnpmWorkspaceIgnores,
+
+ Matches: func(manager string, version string) (bool, error) {
+ if manager != "pnpm" {
+ return false, nil
+ }
+
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ return false, fmt.Errorf("could not parse pnpm version: %w", err)
+ }
+ c, err := semver.NewConstraint(">=7.0.0")
+ if err != nil {
+ return false, fmt.Errorf("could not create constraint: %w", err)
+ }
+
+ return c.Check(v), nil
+ },
+
+ detect: func(projectDirectory turbopath.AbsoluteSystemPath, packageManager *PackageManager) (bool, error) {
+ specfileExists := projectDirectory.UntypedJoin(packageManager.Specfile).FileExists()
+ lockfileExists := projectDirectory.UntypedJoin(packageManager.Lockfile).FileExists()
+
+ return (specfileExists && lockfileExists), nil
+ },
+
+ canPrune: func(cwd turbopath.AbsoluteSystemPath) (bool, error) {
+ return true, nil
+ },
+
+ UnmarshalLockfile: func(_rootPackageJSON *fs.PackageJSON, contents []byte) (lockfile.Lockfile, error) {
+ return lockfile.DecodePnpmLockfile(contents)
+ },
+
+ prunePatches: func(pkgJSON *fs.PackageJSON, patches []turbopath.AnchoredUnixPath) error {
+ return pnpmPrunePatches(pkgJSON, patches)
+ },
+}
+
+func pnpmPrunePatches(pkgJSON *fs.PackageJSON, patches []turbopath.AnchoredUnixPath) error {
+ pkgJSON.Mu.Lock()
+ defer pkgJSON.Mu.Unlock()
+
+ keysToDelete := []string{}
+ pnpmConfig, ok := pkgJSON.RawJSON["pnpm"].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("Invalid structure for pnpm field in package.json")
+ }
+ patchedDependencies, ok := pnpmConfig["patchedDependencies"].(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("Invalid structure for patchedDependencies field in package.json")
+ }
+
+ for dependency, untypedPatch := range patchedDependencies {
+ patch, ok := untypedPatch.(string)
+ if !ok {
+ return fmt.Errorf("Expected only strings in patchedDependencies. Got %v", untypedPatch)
+ }
+
+ inPatches := false
+
+ for _, wantedPatch := range patches {
+ if wantedPatch.ToString() == patch {
+ inPatches = true
+ break
+ }
+ }
+
+ if !inPatches {
+ keysToDelete = append(keysToDelete, dependency)
+ }
+ }
+
+ for _, key := range keysToDelete {
+ delete(patchedDependencies, key)
+ }
+
+ return nil
+}
diff --git a/cli/internal/packagemanager/pnpm6.go b/cli/internal/packagemanager/pnpm6.go
new file mode 100644
index 0000000..6039966
--- /dev/null
+++ b/cli/internal/packagemanager/pnpm6.go
@@ -0,0 +1,63 @@
+package packagemanager
+
+import (
+ "fmt"
+
+ "github.com/Masterminds/semver"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// Pnpm6Workspaces is a representation of workspace package globs found
+// in pnpm-workspace.yaml
+type Pnpm6Workspaces struct {
+ Packages []string `yaml:"packages,omitempty"`
+}
+
+var nodejsPnpm6 = PackageManager{
+ Name: "nodejs-pnpm6",
+ Slug: "pnpm",
+ Command: "pnpm",
+ Specfile: "package.json",
+ Lockfile: "pnpm-lock.yaml",
+ PackageDir: "node_modules",
+ ArgSeparator: []string{"--"},
+ WorkspaceConfigurationPath: "pnpm-workspace.yaml",
+
+ getWorkspaceGlobs: getPnpmWorkspaceGlobs,
+
+ getWorkspaceIgnores: getPnpmWorkspaceIgnores,
+
+ Matches: func(manager string, version string) (bool, error) {
+ if manager != "pnpm" {
+ return false, nil
+ }
+
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ return false, fmt.Errorf("could not parse pnpm version: %w", err)
+ }
+ c, err := semver.NewConstraint("<7.0.0")
+ if err != nil {
+ return false, fmt.Errorf("could not create constraint: %w", err)
+ }
+
+ return c.Check(v), nil
+ },
+
+ detect: func(projectDirectory turbopath.AbsoluteSystemPath, packageManager *PackageManager) (bool, error) {
+ specfileExists := projectDirectory.UntypedJoin(packageManager.Specfile).FileExists()
+ lockfileExists := projectDirectory.UntypedJoin(packageManager.Lockfile).FileExists()
+
+ return (specfileExists && lockfileExists), nil
+ },
+
+ canPrune: func(cwd turbopath.AbsoluteSystemPath) (bool, error) {
+ return true, nil
+ },
+
+ UnmarshalLockfile: func(_rootPackageJSON *fs.PackageJSON, contents []byte) (lockfile.Lockfile, error) {
+ return lockfile.DecodePnpmLockfile(contents)
+ },
+}
diff --git a/cli/internal/packagemanager/pnpm_test.go b/cli/internal/packagemanager/pnpm_test.go
new file mode 100644
index 0000000..c05bc43
--- /dev/null
+++ b/cli/internal/packagemanager/pnpm_test.go
@@ -0,0 +1,57 @@
+package packagemanager
+
+import (
+ "os"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "gotest.tools/v3/assert"
+)
+
+func pnpmPatchesSection(t *testing.T, pkgJSON *fs.PackageJSON) map[string]interface{} {
+ t.Helper()
+ pnpmSection, ok := pkgJSON.RawJSON["pnpm"].(map[string]interface{})
+ assert.Assert(t, ok)
+ patchesSection, ok := pnpmSection["patchedDependencies"].(map[string]interface{})
+ assert.Assert(t, ok)
+ return patchesSection
+}
+
+func getPnpmPackageJSON(t *testing.T) *fs.PackageJSON {
+ t.Helper()
+ rawCwd, err := os.Getwd()
+ assert.NilError(t, err)
+ cwd, err := fs.CheckedToAbsoluteSystemPath(rawCwd)
+ assert.NilError(t, err)
+ pkgJSONPath := cwd.Join("fixtures", "pnpm-patches.json")
+ pkgJSON, err := fs.ReadPackageJSON(pkgJSONPath)
+ assert.NilError(t, err)
+ return pkgJSON
+}
+
+func Test_PnpmPrunePatches_KeepsNecessary(t *testing.T) {
+ pkgJSON := getPnpmPackageJSON(t)
+ initialPatches := pnpmPatchesSection(t, pkgJSON)
+
+ assert.DeepEqual(t, initialPatches, map[string]interface{}{"is-odd@3.0.1": "patches/is-odd@3.0.1.patch"})
+
+ err := pnpmPrunePatches(pkgJSON, []turbopath.AnchoredUnixPath{turbopath.AnchoredUnixPath("patches/is-odd@3.0.1.patch")})
+ assert.NilError(t, err)
+
+ newPatches := pnpmPatchesSection(t, pkgJSON)
+ assert.DeepEqual(t, newPatches, map[string]interface{}{"is-odd@3.0.1": "patches/is-odd@3.0.1.patch"})
+}
+
+func Test_PnpmPrunePatches_RemovesExtra(t *testing.T) {
+ pkgJSON := getPnpmPackageJSON(t)
+ initialPatches := pnpmPatchesSection(t, pkgJSON)
+
+ assert.DeepEqual(t, initialPatches, map[string]interface{}{"is-odd@3.0.1": "patches/is-odd@3.0.1.patch"})
+
+ err := pnpmPrunePatches(pkgJSON, nil)
+ assert.NilError(t, err)
+
+ newPatches := pnpmPatchesSection(t, pkgJSON)
+ assert.DeepEqual(t, newPatches, map[string]interface{}{})
+}
diff --git a/cli/internal/packagemanager/yarn.go b/cli/internal/packagemanager/yarn.go
new file mode 100644
index 0000000..8779c5f
--- /dev/null
+++ b/cli/internal/packagemanager/yarn.go
@@ -0,0 +1,116 @@
+package packagemanager
+
+import (
+ "errors"
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/Masterminds/semver"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// NoWorkspacesFoundError is a custom error used so that upstream implementations can switch on it
+type NoWorkspacesFoundError struct{}
+
+func (e *NoWorkspacesFoundError) Error() string {
+ return "package.json: no workspaces found. Turborepo requires Yarn workspaces to be defined in the root package.json"
+}
+
+var nodejsYarn = PackageManager{
+ Name: "nodejs-yarn",
+ Slug: "yarn",
+ Command: "yarn",
+ Specfile: "package.json",
+ Lockfile: "yarn.lock",
+ PackageDir: "node_modules",
+ ArgSeparator: []string{"--"},
+
+ getWorkspaceGlobs: func(rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ pkg, err := fs.ReadPackageJSON(rootpath.UntypedJoin("package.json"))
+ if err != nil {
+ return nil, fmt.Errorf("package.json: %w", err)
+ }
+ if len(pkg.Workspaces) == 0 {
+ return nil, &NoWorkspacesFoundError{}
+ }
+ return pkg.Workspaces, nil
+ },
+
+ getWorkspaceIgnores: func(pm PackageManager, rootpath turbopath.AbsoluteSystemPath) ([]string, error) {
+ // function: https://github.com/yarnpkg/yarn/blob/3119382885ea373d3c13d6a846de743eca8c914b/src/config.js#L799
+
+ // Yarn is unique in ignore patterns handling.
+ // The only time it does globbing is for package.json or yarn.json and it scopes the search to each workspace.
+ // For example: `apps/*/node_modules/**/+(package.json|yarn.json)`
+ // The `extglob` `+(package.json|yarn.json)` (from micromatch) after node_modules/** is redundant.
+
+ globs, err := pm.getWorkspaceGlobs(rootpath)
+ if err != nil {
+ // In case of a non-monorepo, the workspaces field is empty and only node_modules in the root should be ignored
+ var e *NoWorkspacesFoundError
+ if errors.As(err, &e) {
+ return []string{"node_modules/**"}, nil
+ }
+
+ return nil, err
+ }
+
+ ignores := make([]string, len(globs))
+
+ for i, glob := range globs {
+ ignores[i] = filepath.Join(glob, "/node_modules/**")
+ }
+
+ return ignores, nil
+ },
+
+ canPrune: func(cwd turbopath.AbsoluteSystemPath) (bool, error) {
+ return true, nil
+ },
+
+ // Versions older than 2.0 are yarn, after that they become berry
+ Matches: func(manager string, version string) (bool, error) {
+ if manager != "yarn" {
+ return false, nil
+ }
+
+ v, err := semver.NewVersion(version)
+ if err != nil {
+ return false, fmt.Errorf("could not parse yarn version: %w", err)
+ }
+ c, err := semver.NewConstraint("<2.0.0-0")
+ if err != nil {
+ return false, fmt.Errorf("could not create constraint: %w", err)
+ }
+
+ return c.Check(v), nil
+ },
+
+ // Detect for yarn needs to identify which version of yarn is running on the system.
+ detect: func(projectDirectory turbopath.AbsoluteSystemPath, packageManager *PackageManager) (bool, error) {
+ specfileExists := projectDirectory.UntypedJoin(packageManager.Specfile).FileExists()
+ lockfileExists := projectDirectory.UntypedJoin(packageManager.Lockfile).FileExists()
+
+ // Short-circuit, definitely not Yarn.
+ if !specfileExists || !lockfileExists {
+ return false, nil
+ }
+
+ cmd := exec.Command("yarn", "--version")
+ cmd.Dir = projectDirectory.ToString()
+ out, err := cmd.Output()
+ if err != nil {
+ return false, fmt.Errorf("could not detect yarn version: %w", err)
+ }
+
+ return packageManager.Matches(packageManager.Slug, strings.TrimSpace(string(out)))
+ },
+
+ UnmarshalLockfile: func(_rootPackageJSON *fs.PackageJSON, contents []byte) (lockfile.Lockfile, error) {
+ return lockfile.DecodeYarnLockfile(contents)
+ },
+}
diff --git a/cli/internal/process/child.go b/cli/internal/process/child.go
new file mode 100644
index 0000000..1c3e6e7
--- /dev/null
+++ b/cli/internal/process/child.go
@@ -0,0 +1,406 @@
+package process
+
+/**
+ * Code in this file is based on the source code at
+ * https://github.com/hashicorp/consul-template/tree/3ea7d99ad8eff17897e0d63dac86d74770170bb8/child/child.go
+ *
+ * Major changes include removing the ability to restart a child process,
+ * requiring a fully-formed exec.Cmd to be passed in, and including cmd.Dir
+ * in the description of a child process.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "math/rand"
+ "os"
+ "os/exec"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+)
+
+func init() {
+ // Seed the default rand Source with current time to produce better random
+ // numbers used with splay
+ rand.Seed(time.Now().UnixNano())
+}
+
+var (
+ // ErrMissingCommand is the error returned when no command is specified
+ // to run.
+ ErrMissingCommand = errors.New("missing command")
+
+ // ExitCodeOK is the default OK exit code.
+ ExitCodeOK = 0
+
+ // ExitCodeError is the default error code returned when the child exits with
+ // an error without a more specific code.
+ ExitCodeError = 127
+)
+
+// Child is a wrapper around a child process which can be used to send signals
+// and manage the processes' lifecycle.
+type Child struct {
+ sync.RWMutex
+
+ timeout time.Duration
+
+ killSignal os.Signal
+ killTimeout time.Duration
+
+ splay time.Duration
+
+ // cmd is the actual child process under management.
+ cmd *exec.Cmd
+
+ // exitCh is the channel where the processes exit will be returned.
+ exitCh chan int
+
+ // stopLock is the mutex to lock when stopping. stopCh is the circuit breaker
+ // to force-terminate any waiting splays to kill the process now. stopped is
+ // a boolean that tells us if we have previously been stopped.
+ stopLock sync.RWMutex
+ stopCh chan struct{}
+ stopped bool
+
+ // whether to set process group id or not (default on)
+ setpgid bool
+
+ Label string
+
+ logger hclog.Logger
+}
+
+// NewInput is input to the NewChild function.
+type NewInput struct {
+ // Cmd is the unstarted, preconfigured command to run
+ Cmd *exec.Cmd
+
+ // Timeout is the maximum amount of time to allow the command to execute. If
+ // set to 0, the command is permitted to run infinitely.
+ Timeout time.Duration
+
+ // KillSignal is the signal to send to gracefully kill this process. This
+ // value may be nil.
+ KillSignal os.Signal
+
+ // KillTimeout is the amount of time to wait for the process to gracefully
+ // terminate before force-killing.
+ KillTimeout time.Duration
+
+ // Splay is the maximum random amount of time to wait before sending signals.
+ // This option helps reduce the thundering herd problem by effectively
+ // sleeping for a random amount of time before sending the signal. This
+ // prevents multiple processes from all signaling at the same time. This value
+ // may be zero (which disables the splay entirely).
+ Splay time.Duration
+
+ // Logger receives debug log lines about the process state and transitions
+ Logger hclog.Logger
+}
+
+// New creates a new child process for management with high-level APIs for
+// sending signals to the child process, restarting the child process, and
+// gracefully terminating the child process.
+func newChild(i NewInput) (*Child, error) {
+ // exec.Command prepends the command to be run to the arguments list, so
+ // we only need the arguments here, it will include the command itself.
+ label := fmt.Sprintf("(%v) %v", i.Cmd.Dir, strings.Join(i.Cmd.Args, " "))
+ child := &Child{
+ cmd: i.Cmd,
+ timeout: i.Timeout,
+ killSignal: i.KillSignal,
+ killTimeout: i.KillTimeout,
+ splay: i.Splay,
+ stopCh: make(chan struct{}, 1),
+ setpgid: true,
+ Label: label,
+ logger: i.Logger.Named(label),
+ }
+
+ return child, nil
+}
+
+// ExitCh returns the current exit channel for this child process. This channel
+// may change if the process is restarted, so implementers must not cache this
+// value.
+func (c *Child) ExitCh() <-chan int {
+ c.RLock()
+ defer c.RUnlock()
+ return c.exitCh
+}
+
+// Pid returns the pid of the child process. If no child process exists, 0 is
+// returned.
+func (c *Child) Pid() int {
+ c.RLock()
+ defer c.RUnlock()
+ return c.pid()
+}
+
+// Command returns the human-formatted command with arguments.
+func (c *Child) Command() string {
+ return c.Label
+}
+
+// Start starts and begins execution of the child process. A buffered channel
+// is returned which is where the command's exit code will be returned upon
+// exit. Any errors that occur prior to starting the command will be returned
+// as the second error argument, but any errors returned by the command after
+// execution will be returned as a non-zero value over the exit code channel.
+func (c *Child) Start() error {
+ // log.Printf("[INFO] (child) spawning: %s", c.Command())
+ c.Lock()
+ defer c.Unlock()
+ return c.start()
+}
+
+// Signal sends the signal to the child process, returning any errors that
+// occur.
+func (c *Child) Signal(s os.Signal) error {
+ c.logger.Debug("receiving signal %q", s.String())
+ c.RLock()
+ defer c.RUnlock()
+ return c.signal(s)
+}
+
+// Kill sends the kill signal to the child process and waits for successful
+// termination. If no kill signal is defined, the process is killed with the
+// most aggressive kill signal. If the process does not gracefully stop within
+// the provided KillTimeout, the process is force-killed. If a splay was
+// provided, this function will sleep for a random period of time between 0 and
+// the provided splay value to reduce the thundering herd problem. This function
+// does not return any errors because it guarantees the process will be dead by
+// the return of the function call.
+func (c *Child) Kill() {
+ c.logger.Debug("killing process")
+ c.Lock()
+ defer c.Unlock()
+ c.kill(false)
+}
+
+// Stop behaves almost identical to Kill except it suppresses future processes
+// from being started by this child and it prevents the killing of the child
+// process from sending its value back up the exit channel. This is useful
+// when doing a graceful shutdown of an application.
+func (c *Child) Stop() {
+ c.internalStop(false)
+}
+
+// StopImmediately behaves almost identical to Stop except it does not wait
+// for any random splay if configured. This is used for performing a fast
+// shutdown of consul-template and its children when a kill signal is received.
+func (c *Child) StopImmediately() {
+ c.internalStop(true)
+}
+
+func (c *Child) internalStop(immediately bool) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.stopLock.Lock()
+ defer c.stopLock.Unlock()
+ if c.stopped {
+ return
+ }
+ c.kill(immediately)
+ close(c.stopCh)
+ c.stopped = true
+}
+
+func (c *Child) start() error {
+ setSetpgid(c.cmd, c.setpgid)
+ if err := c.cmd.Start(); err != nil {
+ return err
+ }
+
+ // Create a new exitCh so that previously invoked commands (if any) don't
+ // cause us to exit, and start a goroutine to wait for that process to end.
+ exitCh := make(chan int, 1)
+ go func() {
+ var code int
+ // It's possible that kill is called before we even
+ // manage to get here. Make sure we still have a valid
+ // cmd before waiting on it.
+ c.RLock()
+ var cmd = c.cmd
+ c.RUnlock()
+ var err error
+ if cmd != nil {
+ err = cmd.Wait()
+ }
+ if err == nil {
+ code = ExitCodeOK
+ } else {
+ code = ExitCodeError
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ code = status.ExitStatus()
+ }
+ }
+ }
+
+ // If the child is in the process of killing, do not send a response back
+ // down the exit channel.
+ c.stopLock.RLock()
+ defer c.stopLock.RUnlock()
+ if !c.stopped {
+ select {
+ case <-c.stopCh:
+ case exitCh <- code:
+ }
+ }
+
+ close(exitCh)
+ }()
+
+ c.exitCh = exitCh
+
+ // If a timeout was given, start the timer to wait for the child to exit
+ if c.timeout != 0 {
+ select {
+ case code := <-exitCh:
+ if code != 0 {
+ return fmt.Errorf(
+ "command exited with a non-zero exit status:\n"+
+ "\n"+
+ " %s\n"+
+ "\n"+
+ "This is assumed to be a failure. Please ensure the command\n"+
+ "exits with a zero exit status.",
+ c.Command(),
+ )
+ }
+ case <-time.After(c.timeout):
+ // Force-kill the process
+ c.stopLock.Lock()
+ defer c.stopLock.Unlock()
+ if c.cmd != nil && c.cmd.Process != nil {
+ c.cmd.Process.Kill()
+ }
+
+ return fmt.Errorf(
+ "command did not exit within %q:\n"+
+ "\n"+
+ " %s\n"+
+ "\n"+
+ "Commands must exit in a timely manner in order for processing to\n"+
+ "continue. Consider using a process supervisor or utilizing the\n"+
+ "built-in exec mode instead.",
+ c.timeout,
+ c.Command(),
+ )
+ }
+ }
+
+ return nil
+}
+
+func (c *Child) pid() int {
+ if !c.running() {
+ return 0
+ }
+ return c.cmd.Process.Pid
+}
+
+func (c *Child) signal(s os.Signal) error {
+ if !c.running() {
+ return nil
+ }
+
+ sig, ok := s.(syscall.Signal)
+ if !ok {
+ return fmt.Errorf("bad signal: %s", s)
+ }
+ pid := c.cmd.Process.Pid
+ if c.setpgid {
+ // kill takes negative pid to indicate that you want to use gpid
+ pid = -(pid)
+ }
+ // cross platform way to signal process/process group
+ p, err := os.FindProcess(pid)
+ if err != nil {
+ return err
+ }
+ return p.Signal(sig)
+}
+
+// kill sends the signal to kill the process using the configured signal
+// if set, else the default system signal
+func (c *Child) kill(immediately bool) {
+
+ if !c.running() {
+ c.logger.Debug("Kill() called but process dead; not waiting for splay.")
+ return
+ } else if immediately {
+ c.logger.Debug("Kill() called but performing immediate shutdown; not waiting for splay.")
+ } else {
+ c.logger.Debug("Kill(%v) called", immediately)
+ select {
+ case <-c.stopCh:
+ case <-c.randomSplay():
+ }
+ }
+
+ var exited bool
+ defer func() {
+ if !exited {
+ c.logger.Debug("PKill")
+ c.cmd.Process.Kill()
+ }
+ c.cmd = nil
+ }()
+
+ if c.killSignal == nil {
+ return
+ }
+
+ if err := c.signal(c.killSignal); err != nil {
+ c.logger.Debug("Kill failed: %s", err)
+ if processNotFoundErr(err) {
+ exited = true // checked in defer
+ }
+ return
+ }
+
+ killCh := make(chan struct{}, 1)
+ go func() {
+ defer close(killCh)
+ c.cmd.Process.Wait()
+ }()
+
+ select {
+ case <-c.stopCh:
+ case <-killCh:
+ exited = true
+ case <-time.After(c.killTimeout):
+ c.logger.Debug("timeout")
+ }
+}
+
+func (c *Child) running() bool {
+ select {
+ case <-c.exitCh:
+ return false
+ default:
+ }
+ return c.cmd != nil && c.cmd.Process != nil
+}
+
+func (c *Child) randomSplay() <-chan time.Time {
+ if c.splay == 0 {
+ return time.After(0)
+ }
+
+ ns := c.splay.Nanoseconds()
+ offset := rand.Int63n(ns)
+ t := time.Duration(offset)
+
+ c.logger.Debug("waiting %.2fs for random splay", t.Seconds())
+
+ return time.After(t)
+}
diff --git a/cli/internal/process/child_nix_test.go b/cli/internal/process/child_nix_test.go
new file mode 100644
index 0000000..7311d18
--- /dev/null
+++ b/cli/internal/process/child_nix_test.go
@@ -0,0 +1,190 @@
+//go:build !windows
+// +build !windows
+
+package process
+
+/**
+ * Code in this file is based on the source code at
+ * https://github.com/hashicorp/consul-template/tree/3ea7d99ad8eff17897e0d63dac86d74770170bb8/child/child_test.go
+ *
+ * Tests in this file use signals or pgid features not available on windows
+ */
+
+import (
+ "os/exec"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-gatedio"
+)
+
+func TestSignal(t *testing.T) {
+
+ c := testChild(t)
+ cmd := exec.Command("sh", "-c", "trap 'echo one; exit' USR1; while true; do sleep 0.2; done")
+ c.cmd = cmd
+
+ out := gatedio.NewByteBuffer()
+ c.cmd.Stdout = out
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ // For some reason bash doesn't start immediately
+ time.Sleep(fileWaitSleepDelay)
+
+ if err := c.Signal(syscall.SIGUSR1); err != nil {
+ t.Fatal(err)
+ }
+
+ // Give time for the file to flush
+ time.Sleep(fileWaitSleepDelay)
+
+ expected := "one\n"
+ if out.String() != expected {
+ t.Errorf("expected %q to be %q", out.String(), expected)
+ }
+}
+
+func TestStop_childAlreadyDead(t *testing.T) {
+ c := testChild(t)
+ c.cmd = exec.Command("sh", "-c", "exit 1")
+ c.splay = 100 * time.Second
+ c.killSignal = syscall.SIGTERM
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ // For some reason bash doesn't start immediately
+ time.Sleep(fileWaitSleepDelay)
+
+ killStartTime := time.Now()
+ c.Stop()
+ killEndTime := time.Now()
+
+ if killEndTime.Sub(killStartTime) > fileWaitSleepDelay {
+ t.Error("expected not to wait for splay")
+ }
+}
+
+func TestSignal_noProcess(t *testing.T) {
+
+ c := testChild(t)
+ if err := c.Signal(syscall.SIGUSR1); err != nil {
+ // Just assert there is no error
+ t.Fatal(err)
+ }
+}
+
+func TestKill_signal(t *testing.T) {
+
+ c := testChild(t)
+ cmd := exec.Command("sh", "-c", "trap 'echo one; exit' USR1; while true; do sleep 0.2; done")
+ c.killSignal = syscall.SIGUSR1
+
+ out := gatedio.NewByteBuffer()
+ cmd.Stdout = out
+ c.cmd = cmd
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ // For some reason bash doesn't start immediately
+ time.Sleep(fileWaitSleepDelay)
+
+ c.Kill()
+
+ // Give time for the file to flush
+ time.Sleep(fileWaitSleepDelay)
+
+ expected := "one\n"
+ if out.String() != expected {
+ t.Errorf("expected %q to be %q", out.String(), expected)
+ }
+}
+
+func TestKill_noProcess(t *testing.T) {
+ c := testChild(t)
+ c.killSignal = syscall.SIGUSR1
+ c.Kill()
+}
+
+func TestStop_noWaitForSplay(t *testing.T) {
+ c := testChild(t)
+ c.cmd = exec.Command("sh", "-c", "trap 'echo one; exit' USR1; while true; do sleep 0.2; done")
+ c.splay = 100 * time.Second
+ c.killSignal = syscall.SIGUSR1
+
+ out := gatedio.NewByteBuffer()
+ c.cmd.Stdout = out
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ // For some reason bash doesn't start immediately
+ time.Sleep(fileWaitSleepDelay)
+
+ killStartTime := time.Now()
+ c.StopImmediately()
+ killEndTime := time.Now()
+
+ expected := "one\n"
+ if out.String() != expected {
+ t.Errorf("expected %q to be %q", out.String(), expected)
+ }
+
+ if killEndTime.Sub(killStartTime) > fileWaitSleepDelay {
+ t.Error("expected not to wait for splay")
+ }
+}
+
+func TestSetpgid(t *testing.T) {
+ t.Run("true", func(t *testing.T) {
+ c := testChild(t)
+ c.cmd = exec.Command("sh", "-c", "while true; do sleep 0.2; done")
+ // default, but to be explicit for the test
+ c.setpgid = true
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ // when setpgid is true, the pid and gpid should be the same
+ gpid, err := syscall.Getpgid(c.Pid())
+ if err != nil {
+ t.Fatal("Getpgid error:", err)
+ }
+
+ if c.Pid() != gpid {
+ t.Fatal("pid and gpid should match")
+ }
+ })
+ t.Run("false", func(t *testing.T) {
+ c := testChild(t)
+ c.cmd = exec.Command("sh", "-c", "while true; do sleep 0.2; done")
+ c.setpgid = false
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ // when setpgid is true, the pid and gpid should be the same
+ gpid, err := syscall.Getpgid(c.Pid())
+ if err != nil {
+ t.Fatal("Getpgid error:", err)
+ }
+
+ if c.Pid() == gpid {
+ t.Fatal("pid and gpid should NOT match")
+ }
+ })
+}
diff --git a/cli/internal/process/child_test.go b/cli/internal/process/child_test.go
new file mode 100644
index 0000000..63dee22
--- /dev/null
+++ b/cli/internal/process/child_test.go
@@ -0,0 +1,193 @@
+package process
+
+/**
+ * Code in this file is based on the source code at
+ * https://github.com/hashicorp/consul-template/tree/3ea7d99ad8eff17897e0d63dac86d74770170bb8/child/child_test.go
+ *
+ * Major changes include supporting api changes in child.go and removing
+ * tests for reloading, which was removed in child.go
+ */
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-gatedio"
+ "github.com/hashicorp/go-hclog"
+)
+
+const fileWaitSleepDelay = 150 * time.Millisecond
+
+func testChild(t *testing.T) *Child {
+ cmd := exec.Command("echo", "hello", "world")
+ cmd.Stdout = ioutil.Discard
+ cmd.Stderr = ioutil.Discard
+ c, err := newChild(NewInput{
+ Cmd: cmd,
+ KillSignal: os.Kill,
+ KillTimeout: 2 * time.Second,
+ Splay: 0 * time.Second,
+ Logger: hclog.Default(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ return c
+}
+
+func TestNew(t *testing.T) {
+
+ stdin := gatedio.NewByteBuffer()
+ stdout := gatedio.NewByteBuffer()
+ stderr := gatedio.NewByteBuffer()
+ command := "echo"
+ args := []string{"hello", "world"}
+ env := []string{"a=b", "c=d"}
+ killSignal := os.Kill
+ killTimeout := fileWaitSleepDelay
+ splay := fileWaitSleepDelay
+
+ cmd := exec.Command(command, args...)
+ cmd.Stdin = stdin
+ cmd.Stderr = stderr
+ cmd.Stdout = stdout
+ cmd.Env = env
+ c, err := newChild(NewInput{
+ Cmd: cmd,
+ KillSignal: killSignal,
+ KillTimeout: killTimeout,
+ Splay: splay,
+ Logger: hclog.Default(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if c.killSignal != killSignal {
+ t.Errorf("expected %q to be %q", c.killSignal, killSignal)
+ }
+
+ if c.killTimeout != killTimeout {
+ t.Errorf("expected %q to be %q", c.killTimeout, killTimeout)
+ }
+
+ if c.splay != splay {
+ t.Errorf("expected %q to be %q", c.splay, splay)
+ }
+
+ if c.stopCh == nil {
+ t.Errorf("expected %#v to be", c.stopCh)
+ }
+}
+
+func TestExitCh_noProcess(t *testing.T) {
+
+ c := testChild(t)
+ ch := c.ExitCh()
+ if ch != nil {
+ t.Errorf("expected %#v to be nil", ch)
+ }
+}
+
+func TestExitCh(t *testing.T) {
+
+ c := testChild(t)
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ println("Started")
+ defer c.Stop()
+
+ ch := c.ExitCh()
+ if ch == nil {
+ t.Error("expected ch to exist")
+ }
+}
+
+func TestPid_noProcess(t *testing.T) {
+
+ c := testChild(t)
+ pid := c.Pid()
+ if pid != 0 {
+ t.Errorf("expected %q to be 0", pid)
+ }
+}
+
+func TestPid(t *testing.T) {
+
+ c := testChild(t)
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ pid := c.Pid()
+ if pid == 0 {
+ t.Error("expected pid to not be 0")
+ }
+}
+
+func TestStart(t *testing.T) {
+
+ c := testChild(t)
+
+ // Set our own reader and writer so we can verify they are wired to the child.
+ stdin := gatedio.NewByteBuffer()
+ stdout := gatedio.NewByteBuffer()
+ stderr := gatedio.NewByteBuffer()
+ // Custom env and command
+ env := []string{"a=b", "c=d"}
+ cmd := exec.Command("env")
+ cmd.Stdin = stdin
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ cmd.Env = env
+ c.cmd = cmd
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ select {
+ case <-c.ExitCh():
+ case <-time.After(fileWaitSleepDelay):
+ t.Fatal("process should have exited")
+ }
+
+ output := stdout.String()
+ for _, envVar := range env {
+ if !strings.Contains(output, envVar) {
+ t.Errorf("expected to find %q in %q", envVar, output)
+ }
+ }
+}
+
+func TestKill_noSignal(t *testing.T) {
+
+ c := testChild(t)
+ c.cmd = exec.Command("sh", "-c", "while true; do sleep 0.2; done")
+ c.killTimeout = 20 * time.Millisecond
+ c.killSignal = nil
+
+ if err := c.Start(); err != nil {
+ t.Fatal(err)
+ }
+ defer c.Stop()
+
+ // For some reason bash doesn't start immediately
+ time.Sleep(fileWaitSleepDelay)
+
+ c.Kill()
+
+ // Give time for the file to flush
+ time.Sleep(fileWaitSleepDelay)
+
+ if c.cmd != nil {
+ t.Errorf("expected cmd to be nil")
+ }
+}
diff --git a/cli/internal/process/manager.go b/cli/internal/process/manager.go
new file mode 100644
index 0000000..0488a29
--- /dev/null
+++ b/cli/internal/process/manager.go
@@ -0,0 +1,120 @@
+package process
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+)
+
+// ErrClosing is returned when the process manager is in the process of closing,
+// meaning that no more child processes can be Exec'd, and existing, non-failed
+// child processes will be stopped with this error.
+var ErrClosing = errors.New("process manager is already closing")
+
+// ChildExit is returned when a child process exits with a non-zero exit code
+type ChildExit struct {
+ ExitCode int
+ Command string
+}
+
+func (ce *ChildExit) Error() string {
+ return fmt.Sprintf("command %s exited (%d)", ce.Command, ce.ExitCode)
+}
+
+// Manager tracks all of the child processes that have been spawned
+type Manager struct {
+ done bool
+ children map[*Child]struct{}
+ mu sync.Mutex
+ doneCh chan struct{}
+ logger hclog.Logger
+}
+
+// NewManager creates a new properly-initialized Manager instance
+func NewManager(logger hclog.Logger) *Manager {
+ return &Manager{
+ children: make(map[*Child]struct{}),
+ doneCh: make(chan struct{}),
+ logger: logger,
+ }
+}
+
+// Exec spawns a child process to run the given command, then blocks
+// until it completes. Returns a nil error if the child process finished
+// successfully, ErrClosing if the manager closed during execution, and
+// a ChildExit error if the child process exited with a non-zero exit code.
+func (m *Manager) Exec(cmd *exec.Cmd) error {
+ m.mu.Lock()
+ if m.done {
+ m.mu.Unlock()
+ return ErrClosing
+ }
+
+ child, err := newChild(NewInput{
+ Cmd: cmd,
+ // Run forever by default
+ Timeout: 0,
+ // When it's time to exit, give a 10 second timeout
+ KillTimeout: 10 * time.Second,
+ // Send SIGINT to stop children
+ KillSignal: os.Interrupt,
+ Logger: m.logger,
+ })
+ if err != nil {
+ return err
+ }
+
+ m.children[child] = struct{}{}
+ m.mu.Unlock()
+ err = child.Start()
+ if err != nil {
+ m.mu.Lock()
+ delete(m.children, child)
+ m.mu.Unlock()
+ return err
+ }
+ err = nil
+ exitCode, ok := <-child.ExitCh()
+ if !ok {
+ err = ErrClosing
+ } else if exitCode != ExitCodeOK {
+ err = &ChildExit{
+ ExitCode: exitCode,
+ Command: child.Command(),
+ }
+ }
+
+ m.mu.Lock()
+ delete(m.children, child)
+ m.mu.Unlock()
+ return err
+}
+
+// Close sends SIGINT to all child processes if it hasn't been done yet,
+// and in either case blocks until they all exit or timeout
+func (m *Manager) Close() {
+ m.mu.Lock()
+ if m.done {
+ m.mu.Unlock()
+ <-m.doneCh
+ return
+ }
+ wg := sync.WaitGroup{}
+ m.done = true
+ for child := range m.children {
+ child := child
+ wg.Add(1)
+ go func() {
+ child.Stop()
+ wg.Done()
+ }()
+ }
+ m.mu.Unlock()
+ wg.Wait()
+ close(m.doneCh)
+}
diff --git a/cli/internal/process/manager_test.go b/cli/internal/process/manager_test.go
new file mode 100644
index 0000000..fb40ffa
--- /dev/null
+++ b/cli/internal/process/manager_test.go
@@ -0,0 +1,94 @@
+package process
+
+import (
+ "errors"
+ "os/exec"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-gatedio"
+ "github.com/hashicorp/go-hclog"
+)
+
+func newManager() *Manager {
+ return NewManager(hclog.Default())
+}
+
+func TestExec_simple(t *testing.T) {
+ mgr := newManager()
+
+ out := gatedio.NewByteBuffer()
+ cmd := exec.Command("env")
+ cmd.Stdout = out
+
+ err := mgr.Exec(cmd)
+ if err != nil {
+ t.Errorf("expected %q to be nil", err)
+ }
+
+ output := out.String()
+ if output == "" {
+ t.Error("expected output from running 'env', got empty string")
+ }
+}
+
+func TestClose(t *testing.T) {
+ mgr := newManager()
+
+ wg := sync.WaitGroup{}
+ tasks := 4
+ errors := make([]error, tasks)
+ start := time.Now()
+ for i := 0; i < tasks; i++ {
+ wg.Add(1)
+ go func(index int) {
+ cmd := exec.Command("sleep", "0.5")
+ err := mgr.Exec(cmd)
+ if err != nil {
+ errors[index] = err
+ }
+ wg.Done()
+ }(i)
+ }
+ // let processes kick off
+ time.Sleep(50 * time.Millisecond)
+ mgr.Close()
+ end := time.Now()
+ wg.Wait()
+ duration := end.Sub(start)
+ if duration >= 500*time.Millisecond {
+ t.Errorf("expected to close, total time was %q", duration)
+ }
+ for _, err := range errors {
+ if err != ErrClosing {
+ t.Errorf("expected manager closing error, found %q", err)
+ }
+ }
+}
+
+func TestClose_alreadyClosed(t *testing.T) {
+ mgr := newManager()
+ mgr.Close()
+
+ // repeated closing does not error
+ mgr.Close()
+
+ err := mgr.Exec(exec.Command("sleep", "1"))
+ if err != ErrClosing {
+ t.Errorf("expected manager closing error, found %q", err)
+ }
+}
+
+func TestExitCode(t *testing.T) {
+ mgr := newManager()
+
+ err := mgr.Exec(exec.Command("ls", "doesnotexist"))
+ exitErr := &ChildExit{}
+ if !errors.As(err, &exitErr) {
+ t.Errorf("expected a ChildExit err, got %q", err)
+ }
+ if exitErr.ExitCode == 0 {
+ t.Error("expected non-zero exit code , got 0")
+ }
+}
diff --git a/cli/internal/process/sys_nix.go b/cli/internal/process/sys_nix.go
new file mode 100644
index 0000000..0e6c003
--- /dev/null
+++ b/cli/internal/process/sys_nix.go
@@ -0,0 +1,23 @@
+//go:build !windows
+// +build !windows
+
+package process
+
+/**
+ * Code in this file is based on the source code at
+ * https://github.com/hashicorp/consul-template/tree/3ea7d99ad8eff17897e0d63dac86d74770170bb8/child/sys_nix.go
+ */
+
+import (
+ "os/exec"
+ "syscall"
+)
+
+func setSetpgid(cmd *exec.Cmd, value bool) {
+ cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: value}
+}
+
+func processNotFoundErr(err error) bool {
+ // ESRCH == no such process, ie. already exited
+ return err == syscall.ESRCH
+}
diff --git a/cli/internal/process/sys_windows.go b/cli/internal/process/sys_windows.go
new file mode 100644
index 0000000..c626c22
--- /dev/null
+++ b/cli/internal/process/sys_windows.go
@@ -0,0 +1,17 @@
+//go:build windows
+// +build windows
+
+package process
+
+/**
+ * Code in this file is based on the source code at
+ * https://github.com/hashicorp/consul-template/tree/3ea7d99ad8eff17897e0d63dac86d74770170bb8/child/sys_windows.go
+ */
+
+import "os/exec"
+
+func setSetpgid(cmd *exec.Cmd, value bool) {}
+
+func processNotFoundErr(err error) bool {
+ return false
+}
diff --git a/cli/internal/prune/prune.go b/cli/internal/prune/prune.go
new file mode 100644
index 0000000..a82023f
--- /dev/null
+++ b/cli/internal/prune/prune.go
@@ -0,0 +1,314 @@
+package prune
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/context"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+
+ "github.com/fatih/color"
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/cli"
+ "github.com/pkg/errors"
+)
+
+type opts struct {
+ scope []string
+ docker bool
+ outputDir string
+}
+
+// ExecutePrune executes the `prune` command.
+func ExecutePrune(helper *cmdutil.Helper, args *turbostate.ParsedArgsFromRust) error {
+ base, err := helper.GetCmdBase(args)
+ if err != nil {
+ return err
+ }
+ if len(args.Command.Prune.Scope) == 0 {
+ err := errors.New("at least one target must be specified")
+ base.LogError(err.Error())
+ return err
+ }
+ p := &prune{
+ base,
+ }
+ if err := p.prune(args.Command.Prune); err != nil {
+ logError(p.base.Logger, p.base.UI, err)
+ return err
+ }
+ return nil
+}
+
+func logError(logger hclog.Logger, ui cli.Ui, err error) {
+ logger.Error(fmt.Sprintf("error: %v", err))
+ pref := color.New(color.Bold, color.FgRed, color.ReverseVideo).Sprint(" ERROR ")
+ ui.Error(fmt.Sprintf("%s%s", pref, color.RedString(" %v", err)))
+}
+
+type prune struct {
+ base *cmdutil.CmdBase
+}
+
+// Prune creates a smaller monorepo with only the required workspaces
+func (p *prune) prune(opts *turbostate.PrunePayload) error {
+ rootPackageJSONPath := p.base.RepoRoot.UntypedJoin("package.json")
+ rootPackageJSON, err := fs.ReadPackageJSON(rootPackageJSONPath)
+ if err != nil {
+ return fmt.Errorf("failed to read package.json: %w", err)
+ }
+ ctx, err := context.BuildPackageGraph(p.base.RepoRoot, rootPackageJSON)
+ if err != nil {
+ return errors.Wrap(err, "could not construct graph")
+ }
+ outDir := p.base.RepoRoot.UntypedJoin(opts.OutputDir)
+ fullDir := outDir
+ if opts.Docker {
+ fullDir = fullDir.UntypedJoin("full")
+ }
+
+ p.base.Logger.Trace("scope", "value", strings.Join(opts.Scope, ", "))
+ p.base.Logger.Trace("docker", "value", opts.Docker)
+ p.base.Logger.Trace("out dir", "value", outDir.ToString())
+
+ for _, scope := range opts.Scope {
+ p.base.Logger.Trace("scope", "value", scope)
+ target, scopeIsValid := ctx.WorkspaceInfos.PackageJSONs[scope]
+ if !scopeIsValid {
+ return errors.Errorf("invalid scope: package %v not found", scope)
+ }
+ p.base.Logger.Trace("target", "value", target.Name)
+ p.base.Logger.Trace("directory", "value", target.Dir)
+ p.base.Logger.Trace("external deps", "value", target.UnresolvedExternalDeps)
+ p.base.Logger.Trace("internal deps", "value", target.InternalDeps)
+ }
+
+ canPrune, err := ctx.PackageManager.CanPrune(p.base.RepoRoot)
+ if err != nil {
+ return err
+ }
+ if !canPrune {
+ return errors.Errorf("this command is not yet implemented for %s", ctx.PackageManager.Name)
+ }
+ if lockfile.IsNil(ctx.Lockfile) {
+ return errors.New("Cannot prune without parsed lockfile")
+ }
+
+ p.base.UI.Output(fmt.Sprintf("Generating pruned monorepo for %v in %v", ui.Bold(strings.Join(opts.Scope, ", ")), ui.Bold(outDir.ToString())))
+
+ packageJSONPath := outDir.UntypedJoin("package.json")
+ if err := packageJSONPath.EnsureDir(); err != nil {
+ return errors.Wrap(err, "could not create output directory")
+ }
+ if workspacePath := ctx.PackageManager.WorkspaceConfigurationPath; workspacePath != "" && p.base.RepoRoot.UntypedJoin(workspacePath).FileExists() {
+ workspaceFile := fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin(workspacePath)}
+ if err := fs.CopyFile(&workspaceFile, outDir.UntypedJoin(ctx.PackageManager.WorkspaceConfigurationPath).ToStringDuringMigration()); err != nil {
+ return errors.Wrapf(err, "could not copy %s", ctx.PackageManager.WorkspaceConfigurationPath)
+ }
+ if err := fs.CopyFile(&workspaceFile, fullDir.UntypedJoin(ctx.PackageManager.WorkspaceConfigurationPath).ToStringDuringMigration()); err != nil {
+ return errors.Wrapf(err, "could not copy %s", ctx.PackageManager.WorkspaceConfigurationPath)
+ }
+ if opts.Docker {
+ if err := fs.CopyFile(&workspaceFile, outDir.UntypedJoin("json", ctx.PackageManager.WorkspaceConfigurationPath).ToStringDuringMigration()); err != nil {
+ return errors.Wrapf(err, "could not copy %s", ctx.PackageManager.WorkspaceConfigurationPath)
+ }
+ }
+ }
+ workspaces := []turbopath.AnchoredSystemPath{}
+ targets, err := ctx.InternalDependencies(append(opts.Scope, util.RootPkgName))
+ if err != nil {
+ return errors.Wrap(err, "could not traverse the dependency graph to find topological dependencies")
+ }
+ p.base.Logger.Trace("targets", "value", targets)
+
+ lockfileKeys := make([]string, 0, len(rootPackageJSON.TransitiveDeps))
+ for _, pkg := range rootPackageJSON.TransitiveDeps {
+ lockfileKeys = append(lockfileKeys, pkg.Key)
+ }
+
+ for _, internalDep := range targets {
+ // We skip over the pseudo root node and the root package
+ if internalDep == ctx.RootNode || internalDep == util.RootPkgName {
+ continue
+ }
+
+ workspaces = append(workspaces, ctx.WorkspaceInfos.PackageJSONs[internalDep].Dir)
+ originalDir := ctx.WorkspaceInfos.PackageJSONs[internalDep].Dir.RestoreAnchor(p.base.RepoRoot)
+ info, err := originalDir.Lstat()
+ if err != nil {
+ return errors.Wrapf(err, "failed to lstat %s", originalDir)
+ }
+ targetDir := ctx.WorkspaceInfos.PackageJSONs[internalDep].Dir.RestoreAnchor(fullDir)
+ if err := targetDir.MkdirAllMode(info.Mode()); err != nil {
+ return errors.Wrapf(err, "failed to create folder %s for %v", targetDir, internalDep)
+ }
+
+ if err := fs.RecursiveCopy(ctx.WorkspaceInfos.PackageJSONs[internalDep].Dir.ToStringDuringMigration(), targetDir.ToStringDuringMigration()); err != nil {
+ return errors.Wrapf(err, "failed to copy %v into %v", internalDep, targetDir)
+ }
+ if opts.Docker {
+ jsonDir := outDir.UntypedJoin("json", ctx.WorkspaceInfos.PackageJSONs[internalDep].PackageJSONPath.ToStringDuringMigration())
+ if err := jsonDir.EnsureDir(); err != nil {
+ return errors.Wrapf(err, "failed to create folder %v for %v", jsonDir, internalDep)
+ }
+ if err := fs.RecursiveCopy(ctx.WorkspaceInfos.PackageJSONs[internalDep].PackageJSONPath.ToStringDuringMigration(), jsonDir.ToStringDuringMigration()); err != nil {
+ return errors.Wrapf(err, "failed to copy %v into %v", internalDep, jsonDir)
+ }
+ }
+
+ for _, pkg := range ctx.WorkspaceInfos.PackageJSONs[internalDep].TransitiveDeps {
+ lockfileKeys = append(lockfileKeys, pkg.Key)
+ }
+
+ p.base.UI.Output(fmt.Sprintf(" - Added %v", ctx.WorkspaceInfos.PackageJSONs[internalDep].Name))
+ }
+ p.base.Logger.Trace("new workspaces", "value", workspaces)
+
+ lockfile, err := ctx.Lockfile.Subgraph(workspaces, lockfileKeys)
+ if err != nil {
+ return errors.Wrap(err, "Failed creating pruned lockfile")
+ }
+
+ lockfilePath := outDir.UntypedJoin(ctx.PackageManager.Lockfile)
+ lockfileFile, err := lockfilePath.Create()
+ if err != nil {
+ return errors.Wrap(err, "Failed to create lockfile")
+ }
+
+ lockfileWriter := bufio.NewWriter(lockfileFile)
+ if err := lockfile.Encode(lockfileWriter); err != nil {
+ return errors.Wrap(err, "Failed to encode pruned lockfile")
+ }
+
+ if err := lockfileWriter.Flush(); err != nil {
+ return errors.Wrap(err, "Failed to flush pruned lockfile")
+ }
+
+ if fs.FileExists(".gitignore") {
+ if err := fs.CopyFile(&fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin(".gitignore")}, fullDir.UntypedJoin(".gitignore").ToStringDuringMigration()); err != nil {
+ return errors.Wrap(err, "failed to copy root .gitignore")
+ }
+ }
+
+ if fs.FileExists(".npmrc") {
+ if err := fs.CopyFile(&fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin(".npmrc")}, fullDir.UntypedJoin(".npmrc").ToStringDuringMigration()); err != nil {
+ return errors.Wrap(err, "failed to copy root .npmrc")
+ }
+ if opts.Docker {
+ if err := fs.CopyFile(&fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin(".npmrc")}, outDir.UntypedJoin("json/.npmrc").ToStringDuringMigration()); err != nil {
+ return errors.Wrap(err, "failed to copy root .npmrc")
+ }
+ }
+ }
+
+ turboJSON, err := fs.LoadTurboConfig(p.base.RepoRoot, rootPackageJSON, false)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return errors.Wrap(err, "failed to read turbo.json")
+ }
+ if turboJSON != nil {
+ // when executing a prune, it is not enough to simply copy the file, as
+ // tasks may refer to scopes that no longer exist. to remedy this, we need
+ // to remove from the Pipeline the TaskDefinitions that no longer apply
+ for pipelineTask := range turboJSON.Pipeline {
+ includeTask := false
+ for _, includedPackage := range targets {
+ if util.IsTaskInPackage(pipelineTask, includedPackage) {
+ includeTask = true
+ break
+ }
+ }
+
+ if !includeTask {
+ delete(turboJSON.Pipeline, pipelineTask)
+ }
+ }
+
+ bytes, err := turboJSON.MarshalJSON()
+
+ if err != nil {
+ return errors.Wrap(err, "failed to write turbo.json")
+ }
+
+ if err := fullDir.UntypedJoin("turbo.json").WriteFile(bytes, 0644); err != nil {
+ return errors.Wrap(err, "failed to prune workspace tasks from turbo.json")
+ }
+ }
+
+ originalPackageJSON := fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin("package.json")}
+ newPackageJSONPath := fullDir.UntypedJoin("package.json")
+ // If the original lockfile uses any patches we rewrite the package.json to make sure it doesn't
+ // include any patches that might have been pruned.
+ if originalPatches := ctx.Lockfile.Patches(); originalPatches != nil {
+ patches := lockfile.Patches()
+ if err := ctx.PackageManager.PrunePatchedPackages(rootPackageJSON, patches); err != nil {
+ return errors.Wrapf(err, "Unable to prune patches section of %s", rootPackageJSONPath)
+ }
+ packageJSONContent, err := fs.MarshalPackageJSON(rootPackageJSON)
+ if err != nil {
+ return err
+ }
+
+ info, err := originalPackageJSON.GetInfo()
+ if err != nil {
+ return err
+ }
+ newPackageJSON, err := newPackageJSONPath.Create()
+ if err != nil {
+ return err
+ }
+ if _, err := newPackageJSON.Write(packageJSONContent); err != nil {
+ return err
+ }
+ if err := newPackageJSON.Chmod(info.Mode()); err != nil {
+ return err
+ }
+ if err := newPackageJSON.Close(); err != nil {
+ return err
+ }
+
+ for _, patch := range patches {
+ if err := fs.CopyFile(
+ &fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin(patch.ToString())},
+ fullDir.UntypedJoin(patch.ToString()).ToStringDuringMigration(),
+ ); err != nil {
+ return errors.Wrap(err, "Failed copying patch file")
+ }
+ if opts.Docker {
+ jsonDir := outDir.Join(turbopath.RelativeSystemPath("json"))
+ if err := fs.CopyFile(
+ &fs.LstatCachedFile{Path: p.base.RepoRoot.UntypedJoin(patch.ToString())},
+ patch.ToSystemPath().RestoreAnchor(jsonDir).ToStringDuringMigration(),
+ ); err != nil {
+ return errors.Wrap(err, "Failed copying patch file")
+ }
+ }
+ }
+ } else {
+ if err := fs.CopyFile(
+ &originalPackageJSON,
+ fullDir.UntypedJoin("package.json").ToStringDuringMigration(),
+ ); err != nil {
+ return errors.Wrap(err, "failed to copy root package.json")
+ }
+ }
+
+ if opts.Docker {
+ // Copy from the package.json in the full directory so we get the pruned version if needed
+ if err := fs.CopyFile(
+ &fs.LstatCachedFile{Path: newPackageJSONPath},
+ outDir.Join(turbopath.RelativeUnixPath("json/package.json").ToSystemPath()).ToString(),
+ ); err != nil {
+ return errors.Wrap(err, "failed to copy root package.json")
+ }
+ }
+
+ return nil
+}
diff --git a/cli/internal/run/dry_run.go b/cli/internal/run/dry_run.go
new file mode 100644
index 0000000..eeee431
--- /dev/null
+++ b/cli/internal/run/dry_run.go
@@ -0,0 +1,122 @@
+// Package run implements `turbo run`
+// This file implements the logic for `turbo run --dry`
+package run
+
+import (
+ gocontext "context"
+ "sync"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/cache"
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/core"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/graph"
+ "github.com/vercel/turbo/cli/internal/nodes"
+ "github.com/vercel/turbo/cli/internal/runsummary"
+ "github.com/vercel/turbo/cli/internal/taskhash"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// DryRun gets all the info needed from tasks and prints out a summary, but doesn't actually
+// execute the task.
+func DryRun(
+ ctx gocontext.Context,
+ g *graph.CompleteGraph,
+ rs *runSpec,
+ engine *core.Engine,
+ _ *taskhash.Tracker, // unused, but keep here for parity with RealRun method signature
+ turboCache cache.Cache,
+ _ *fs.TurboJSON, // unused, but keep here for parity with RealRun method signature
+ globalEnvMode util.EnvMode,
+ base *cmdutil.CmdBase,
+ summary runsummary.Meta,
+) error {
+ defer turboCache.Shutdown()
+
+ taskSummaries := []*runsummary.TaskSummary{}
+
+ mu := sync.Mutex{}
+ execFunc := func(ctx gocontext.Context, packageTask *nodes.PackageTask, taskSummary *runsummary.TaskSummary) error {
+ // Assign some fallbacks if they were missing
+ if taskSummary.Command == "" {
+ taskSummary.Command = runsummary.MissingTaskLabel
+ }
+
+ if taskSummary.Framework == "" {
+ taskSummary.Framework = runsummary.MissingFrameworkLabel
+ }
+
+ // This mutex is not _really_ required, since we are using Concurrency: 1 as an execution
+ // option, but we add it here to match the shape of RealRuns execFunc.
+ mu.Lock()
+ defer mu.Unlock()
+ taskSummaries = append(taskSummaries, taskSummary)
+ return nil
+ }
+
+ // This setup mirrors a real run. We call engine.execute() with
+ // a visitor function and some hardcoded execOpts.
+ // Note: we do not currently attempt to parallelize the graph walking
+ // (as we do in real execution)
+ getArgs := func(taskID string) []string {
+ return rs.ArgsForTask(taskID)
+ }
+
+ visitorFn := g.GetPackageTaskVisitor(ctx, engine.TaskGraph, globalEnvMode, getArgs, base.Logger, execFunc)
+ execOpts := core.EngineExecutionOptions{
+ Concurrency: 1,
+ Parallel: false,
+ }
+
+ if errs := engine.Execute(visitorFn, execOpts); len(errs) > 0 {
+ for _, err := range errs {
+ base.UI.Error(err.Error())
+ }
+ return errors.New("errors occurred during dry-run graph traversal")
+ }
+
+ // We walk the graph with no concurrency.
+ // Populating the cache state is parallelizable.
+ // Do this _after_ walking the graph.
+ populateCacheState(turboCache, taskSummaries)
+
+ // Assign the Task Summaries to the main summary
+ summary.RunSummary.Tasks = taskSummaries
+
+ // The exitCode isn't really used by the Run Summary Close() method for dry runs
+ // but we pass in a successful value to match Real Runs.
+ return summary.Close(ctx, 0, g.WorkspaceInfos)
+}
+
+func populateCacheState(turboCache cache.Cache, taskSummaries []*runsummary.TaskSummary) {
+ // We make at most 8 requests at a time for cache state.
+ maxParallelRequests := 8
+ taskCount := len(taskSummaries)
+
+ parallelRequestCount := maxParallelRequests
+ if taskCount < maxParallelRequests {
+ parallelRequestCount = taskCount
+ }
+
+ queue := make(chan int, taskCount)
+
+ wg := &sync.WaitGroup{}
+ for i := 0; i < parallelRequestCount; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for index := range queue {
+ task := taskSummaries[index]
+ itemStatus := turboCache.Exists(task.Hash)
+ task.CacheSummary = runsummary.NewTaskCacheSummary(itemStatus, nil)
+ }
+ }()
+ }
+
+ for index := range taskSummaries {
+ queue <- index
+ }
+ close(queue)
+ wg.Wait()
+}
diff --git a/cli/internal/run/global_hash.go b/cli/internal/run/global_hash.go
new file mode 100644
index 0000000..2ebf642
--- /dev/null
+++ b/cli/internal/run/global_hash.go
@@ -0,0 +1,164 @@
+package run
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/cli"
+ "github.com/vercel/turbo/cli/internal/env"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/globby"
+ "github.com/vercel/turbo/cli/internal/hashing"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/packagemanager"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+const _globalCacheKey = "Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo"
+
+// Variables that we always include
+var _defaultEnvVars = []string{
+ "VERCEL_ANALYTICS_ID",
+}
+
+// GlobalHashable represents all the things that we use to create the global hash
+type GlobalHashable struct {
+ globalFileHashMap map[turbopath.AnchoredUnixPath]string
+ rootExternalDepsHash string
+ envVars env.DetailedMap
+ globalCacheKey string
+ pipeline fs.PristinePipeline
+ envVarPassthroughs []string
+ envMode util.EnvMode
+}
+
+// This exists because the global hash used to have different fields. Changing
+// to a new struct layout changes the global hash. We can remove this converter
+// when we are going to have to update the global hash for something else.
+type oldGlobalHashable struct {
+ globalFileHashMap map[turbopath.AnchoredUnixPath]string
+ rootExternalDepsHash string
+ envVars env.EnvironmentVariablePairs
+ globalCacheKey string
+ pipeline fs.PristinePipeline
+}
+
+// calculateGlobalHashFromHashable returns a hash string from the globalHashable
+func calculateGlobalHashFromHashable(full GlobalHashable) (string, error) {
+ switch full.envMode {
+ case util.Infer:
+ if full.envVarPassthroughs != nil {
+ // In infer mode, if there is any passThru config (even if it is an empty array)
+ // we'll hash the whole object, so we can detect changes to that config
+ // Further, resolve the envMode to the concrete value.
+ full.envMode = util.Strict
+ return fs.HashObject(full)
+ }
+
+ // If we're in infer mode, and there is no global pass through config,
+ // we use the old struct layout. this will be true for everyone not using the strict env
+ // feature, and we don't want to break their cache.
+ return fs.HashObject(oldGlobalHashable{
+ globalFileHashMap: full.globalFileHashMap,
+ rootExternalDepsHash: full.rootExternalDepsHash,
+ envVars: full.envVars.All.ToHashable(),
+ globalCacheKey: full.globalCacheKey,
+ pipeline: full.pipeline,
+ })
+ case util.Loose:
+ // Remove the passthroughs from hash consideration if we're explicitly loose.
+ full.envVarPassthroughs = nil
+ return fs.HashObject(full)
+ case util.Strict:
+ // Collapse `nil` and `[]` in strict mode.
+ if full.envVarPassthroughs == nil {
+ full.envVarPassthroughs = make([]string, 0)
+ }
+ return fs.HashObject(full)
+ default:
+ panic("unimplemented environment mode")
+ }
+}
+
+func calculateGlobalHash(
+ rootpath turbopath.AbsoluteSystemPath,
+ rootPackageJSON *fs.PackageJSON,
+ pipeline fs.Pipeline,
+ envVarDependencies []string,
+ globalFileDependencies []string,
+ packageManager *packagemanager.PackageManager,
+ lockFile lockfile.Lockfile,
+ envVarPassthroughs []string,
+ envMode util.EnvMode,
+ logger hclog.Logger,
+ ui cli.Ui,
+ isStructuredOutput bool,
+) (GlobalHashable, error) {
+ // Calculate env var dependencies
+ envVars := []string{}
+ envVars = append(envVars, envVarDependencies...)
+ envVars = append(envVars, _defaultEnvVars...)
+ globalHashableEnvVars, err := env.GetHashableEnvVars(envVars, []string{".*THASH.*"}, "")
+ if err != nil {
+ return GlobalHashable{}, err
+ }
+
+ // The only way we can add env vars into the hash via matching is via THASH,
+ // so we only do a simple check here for entries in `BySource.Matching`.
+ // If we enable globalEnv to accept wildcard characters, we'll need to update this
+ // check.
+ if !isStructuredOutput && len(globalHashableEnvVars.BySource.Matching) > 0 {
+ ui.Warn(fmt.Sprintf("[DEPRECATED] Using .*THASH.* to specify an environment variable for inclusion into the hash is deprecated. You specified: %s.", strings.Join(globalHashableEnvVars.BySource.Matching.Names(), ", ")))
+ }
+
+ logger.Debug("global hash env vars", "vars", globalHashableEnvVars.All.Names())
+
+ // Calculate global file dependencies
+ globalDeps := make(util.Set)
+ if len(globalFileDependencies) > 0 {
+ ignores, err := packageManager.GetWorkspaceIgnores(rootpath)
+ if err != nil {
+ return GlobalHashable{}, err
+ }
+
+ f, err := globby.GlobFiles(rootpath.ToStringDuringMigration(), globalFileDependencies, ignores)
+ if err != nil {
+ return GlobalHashable{}, err
+ }
+
+ for _, val := range f {
+ globalDeps.Add(val)
+ }
+ }
+
+ if lockFile == nil {
+ // If we don't have lockfile information available, add the specfile and lockfile to global deps
+ globalDeps.Add(filepath.Join(rootpath.ToStringDuringMigration(), packageManager.Specfile))
+ globalDeps.Add(filepath.Join(rootpath.ToStringDuringMigration(), packageManager.Lockfile))
+ }
+
+ // No prefix, global deps already have full paths
+ globalDepsArray := globalDeps.UnsafeListOfStrings()
+ globalDepsPaths := make([]turbopath.AbsoluteSystemPath, len(globalDepsArray))
+ for i, path := range globalDepsArray {
+ globalDepsPaths[i] = turbopath.AbsoluteSystemPathFromUpstream(path)
+ }
+
+ globalFileHashMap, err := hashing.GetHashableDeps(rootpath, globalDepsPaths)
+ if err != nil {
+ return GlobalHashable{}, fmt.Errorf("error hashing files: %w", err)
+ }
+
+ return GlobalHashable{
+ globalFileHashMap: globalFileHashMap,
+ rootExternalDepsHash: rootPackageJSON.ExternalDepsHash,
+ envVars: globalHashableEnvVars,
+ globalCacheKey: _globalCacheKey,
+ pipeline: pipeline.Pristine(),
+ envVarPassthroughs: envVarPassthroughs,
+ envMode: envMode,
+ }, nil
+}
diff --git a/cli/internal/run/graph_run.go b/cli/internal/run/graph_run.go
new file mode 100644
index 0000000..8531718
--- /dev/null
+++ b/cli/internal/run/graph_run.go
@@ -0,0 +1,46 @@
+package run
+
+import (
+ gocontext "context"
+
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/core"
+ "github.com/vercel/turbo/cli/internal/graphvisualizer"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// GraphRun generates a visualization of the task graph rather than executing it.
+func GraphRun(ctx gocontext.Context, rs *runSpec, engine *core.Engine, base *cmdutil.CmdBase) error {
+ graph := engine.TaskGraph
+ if rs.Opts.runOpts.SinglePackage {
+ graph = filterSinglePackageGraphForDisplay(engine.TaskGraph)
+ }
+ visualizer := graphvisualizer.New(base.RepoRoot, base.UI, graph)
+
+ if rs.Opts.runOpts.GraphDot {
+ visualizer.RenderDotGraph()
+ } else {
+ err := visualizer.GenerateGraphFile(rs.Opts.runOpts.GraphFile)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// filterSinglePackageGraphForDisplay builds an equivalent graph with package names stripped from tasks.
+// Given that this should only be used in a single-package context, all of the package names are expected
+// to be //. Also, all nodes are always connected to the root node, so we are not concerned with leaving
+// behind any unconnected nodes.
+func filterSinglePackageGraphForDisplay(originalGraph *dag.AcyclicGraph) *dag.AcyclicGraph {
+ graph := &dag.AcyclicGraph{}
+ for _, edge := range originalGraph.Edges() {
+ src := util.StripPackageName(edge.Source().(string))
+ tgt := util.StripPackageName(edge.Target().(string))
+ graph.Add(src)
+ graph.Add(tgt)
+ graph.Connect(dag.BasicEdge(src, tgt))
+ }
+ return graph
+}
diff --git a/cli/internal/run/log_tag_go.go b/cli/internal/run/log_tag_go.go
new file mode 100644
index 0000000..a3e825f
--- /dev/null
+++ b/cli/internal/run/log_tag_go.go
@@ -0,0 +1,11 @@
+//go:build go || !rust
+// +build go !rust
+
+package run
+
+import "github.com/hashicorp/go-hclog"
+
+// LogTag logs out the build tag (in this case "go") for the current build.
+func LogTag(logger hclog.Logger) {
+ logger.Debug("build tag: go")
+}
diff --git a/cli/internal/run/log_tag_rust.go b/cli/internal/run/log_tag_rust.go
new file mode 100644
index 0000000..065f438
--- /dev/null
+++ b/cli/internal/run/log_tag_rust.go
@@ -0,0 +1,11 @@
+//go:build rust
+// +build rust
+
+package run
+
+import "github.com/hashicorp/go-hclog"
+
+// LogTag logs out the build tag (in this case "rust") for the current build.
+func LogTag(logger hclog.Logger) {
+ logger.Debug("build tag: rust")
+}
diff --git a/cli/internal/run/real_run.go b/cli/internal/run/real_run.go
new file mode 100644
index 0000000..32c7965
--- /dev/null
+++ b/cli/internal/run/real_run.go
@@ -0,0 +1,420 @@
+package run
+
+import (
+ gocontext "context"
+ "fmt"
+ "log"
+ "os/exec"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/fatih/color"
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/cli"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/cache"
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/colorcache"
+ "github.com/vercel/turbo/cli/internal/core"
+ "github.com/vercel/turbo/cli/internal/env"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/graph"
+ "github.com/vercel/turbo/cli/internal/logstreamer"
+ "github.com/vercel/turbo/cli/internal/nodes"
+ "github.com/vercel/turbo/cli/internal/packagemanager"
+ "github.com/vercel/turbo/cli/internal/process"
+ "github.com/vercel/turbo/cli/internal/runcache"
+ "github.com/vercel/turbo/cli/internal/runsummary"
+ "github.com/vercel/turbo/cli/internal/spinner"
+ "github.com/vercel/turbo/cli/internal/taskhash"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// RealRun executes a set of tasks
+func RealRun(
+ ctx gocontext.Context,
+ g *graph.CompleteGraph,
+ rs *runSpec,
+ engine *core.Engine,
+ taskHashTracker *taskhash.Tracker,
+ turboCache cache.Cache,
+ turboJSON *fs.TurboJSON,
+ globalEnvMode util.EnvMode,
+ packagesInScope []string,
+ base *cmdutil.CmdBase,
+ runSummary runsummary.Meta,
+ packageManager *packagemanager.PackageManager,
+ processes *process.Manager,
+) error {
+ singlePackage := rs.Opts.runOpts.SinglePackage
+
+ if singlePackage {
+ base.UI.Output(fmt.Sprintf("%s %s", ui.Dim("• Running"), ui.Dim(ui.Bold(strings.Join(rs.Targets, ", ")))))
+ } else {
+ base.UI.Output(fmt.Sprintf(ui.Dim("• Packages in scope: %v"), strings.Join(packagesInScope, ", ")))
+ base.UI.Output(fmt.Sprintf("%s %s %s", ui.Dim("• Running"), ui.Dim(ui.Bold(strings.Join(rs.Targets, ", "))), ui.Dim(fmt.Sprintf("in %v packages", rs.FilteredPkgs.Len()))))
+ }
+
+ // Log whether remote cache is enabled
+ useHTTPCache := !rs.Opts.cacheOpts.SkipRemote
+ if useHTTPCache {
+ base.UI.Info(ui.Dim("• Remote caching enabled"))
+ } else {
+ base.UI.Info(ui.Dim("• Remote caching disabled"))
+ }
+
+ defer func() {
+ _ = spinner.WaitFor(ctx, turboCache.Shutdown, base.UI, "...writing to cache...", 1500*time.Millisecond)
+ }()
+ colorCache := colorcache.New()
+
+ runCache := runcache.New(turboCache, base.RepoRoot, rs.Opts.runcacheOpts, colorCache)
+
+ ec := &execContext{
+ colorCache: colorCache,
+ runSummary: runSummary,
+ rs: rs,
+ ui: &cli.ConcurrentUi{Ui: base.UI},
+ runCache: runCache,
+ env: turboJSON.GlobalEnv,
+ passthroughEnv: turboJSON.GlobalPassthroughEnv,
+ logger: base.Logger,
+ packageManager: packageManager,
+ processes: processes,
+ taskHashTracker: taskHashTracker,
+ repoRoot: base.RepoRoot,
+ isSinglePackage: singlePackage,
+ }
+
+ // run the thing
+ execOpts := core.EngineExecutionOptions{
+ Parallel: rs.Opts.runOpts.Parallel,
+ Concurrency: rs.Opts.runOpts.Concurrency,
+ }
+
+ mu := sync.Mutex{}
+ taskSummaries := []*runsummary.TaskSummary{}
+ execFunc := func(ctx gocontext.Context, packageTask *nodes.PackageTask, taskSummary *runsummary.TaskSummary) error {
+ taskExecutionSummary, err := ec.exec(ctx, packageTask)
+
+ // taskExecutionSummary will be nil if the task never executed
+ // (i.e. if the workspace didn't implement the script corresponding to the task)
+ // We don't need to collect any of the outputs or execution if the task didn't execute.
+ if taskExecutionSummary != nil {
+ taskSummary.ExpandedOutputs = taskHashTracker.GetExpandedOutputs(taskSummary.TaskID)
+ taskSummary.Execution = taskExecutionSummary
+ taskSummary.CacheSummary = taskHashTracker.GetCacheStatus(taskSummary.TaskID)
+
+ // lock since multiple things to be appending to this array at the same time
+ mu.Lock()
+ taskSummaries = append(taskSummaries, taskSummary)
+ // not using defer, just release the lock
+ mu.Unlock()
+ }
+
+ // Return the error when there is one
+ if err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ getArgs := func(taskID string) []string {
+ return rs.ArgsForTask(taskID)
+ }
+
+ visitorFn := g.GetPackageTaskVisitor(ctx, engine.TaskGraph, globalEnvMode, getArgs, base.Logger, execFunc)
+ errs := engine.Execute(visitorFn, execOpts)
+
+ // Track if we saw any child with a non-zero exit code
+ exitCode := 0
+ exitCodeErr := &process.ChildExit{}
+
+ // Assign tasks after execution
+ runSummary.RunSummary.Tasks = taskSummaries
+
+ for _, err := range errs {
+ if errors.As(err, &exitCodeErr) {
+ // If a process gets killed via a signal, Go reports it's exit code as -1.
+ // We take the absolute value of the exit code so we don't select '0' as
+ // the greatest exit code.
+ childExit := exitCodeErr.ExitCode
+ if childExit < 0 {
+ childExit = -childExit
+ }
+ if childExit > exitCode {
+ exitCode = childExit
+ }
+ } else if exitCode == 0 {
+ // We hit some error, it shouldn't be exit code 0
+ exitCode = 1
+ }
+ base.UI.Error(err.Error())
+ }
+
+ // When continue on error is enabled don't register failed tasks as errors
+ // and instead must inspect the task summaries.
+ if ec.rs.Opts.runOpts.ContinueOnError {
+ for _, summary := range runSummary.RunSummary.Tasks {
+ if childExit := summary.Execution.ExitCode(); childExit != nil {
+ childExit := *childExit
+ if childExit < 0 {
+ childExit = -childExit
+ }
+ if childExit > exitCode {
+ exitCode = childExit
+ }
+ }
+ }
+ }
+
+ if err := runSummary.Close(ctx, exitCode, g.WorkspaceInfos); err != nil {
+ // We don't need to throw an error, but we can warn on this.
+ // Note: this method doesn't actually return an error for Real Runs at the time of writing.
+ base.UI.Info(fmt.Sprintf("Failed to close Run Summary %v", err))
+ }
+
+ if exitCode != 0 {
+ return &process.ChildExit{
+ ExitCode: exitCode,
+ }
+ }
+ return nil
+}
+
+type execContext struct {
+ colorCache *colorcache.ColorCache
+ runSummary runsummary.Meta
+ rs *runSpec
+ ui cli.Ui
+ runCache *runcache.RunCache
+ env []string
+ passthroughEnv []string
+ logger hclog.Logger
+ packageManager *packagemanager.PackageManager
+ processes *process.Manager
+ taskHashTracker *taskhash.Tracker
+ repoRoot turbopath.AbsoluteSystemPath
+ isSinglePackage bool
+}
+
+func (ec *execContext) logError(prefix string, err error) {
+ ec.logger.Error(prefix, "error", err)
+
+ if prefix != "" {
+ prefix += ": "
+ }
+
+ ec.ui.Error(fmt.Sprintf("%s%s%s", ui.ERROR_PREFIX, prefix, color.RedString(" %v", err)))
+}
+
+func (ec *execContext) exec(ctx gocontext.Context, packageTask *nodes.PackageTask) (*runsummary.TaskExecutionSummary, error) {
+ // Setup tracer. Every time tracer() is called the taskExecutionSummary's duration is updated
+ // So make sure to call it before returning.
+ tracer, taskExecutionSummary := ec.runSummary.RunSummary.TrackTask(packageTask.TaskID)
+
+ progressLogger := ec.logger.Named("")
+ progressLogger.Debug("start")
+
+ passThroughArgs := ec.rs.ArgsForTask(packageTask.Task)
+ hash := packageTask.Hash
+ ec.logger.Debug("task hash", "value", hash)
+ // TODO(gsoltis): if/when we fix https://github.com/vercel/turbo/issues/937
+ // the following block should never get hit. In the meantime, keep it after hashing
+ // so that downstream tasks can count on the hash existing
+ //
+ // bail if the script doesn't exist
+ if packageTask.Command == "" {
+ progressLogger.Debug("no task in package, skipping")
+ progressLogger.Debug("done", "status", "skipped", "duration", taskExecutionSummary.Duration)
+ // Return nil here because there was no execution, so there is no task execution summary
+ return nil, nil
+ }
+
+ // Set building status now that we know it's going to run.
+ tracer(runsummary.TargetBuilding, nil, &successCode)
+
+ var prefix string
+ var prettyPrefix string
+ if ec.rs.Opts.runOpts.LogPrefix == "none" {
+ prefix = ""
+ } else {
+ prefix = packageTask.OutputPrefix(ec.isSinglePackage)
+ }
+
+ prettyPrefix = ec.colorCache.PrefixWithColor(packageTask.PackageName, prefix)
+
+ // Cache ---------------------------------------------
+ taskCache := ec.runCache.TaskCache(packageTask, hash)
+ // Create a logger for replaying
+ prefixedUI := &cli.PrefixedUi{
+ Ui: ec.ui,
+ OutputPrefix: prettyPrefix,
+ InfoPrefix: prettyPrefix,
+ ErrorPrefix: prettyPrefix,
+ WarnPrefix: prettyPrefix,
+ }
+
+ cacheStatus, timeSaved, err := taskCache.RestoreOutputs(ctx, prefixedUI, progressLogger)
+
+ // It's safe to set the CacheStatus even if there's an error, because if there's
+ // an error, the 0 values are actually what we want. We save cacheStatus and timeSaved
+ // for the task, so that even if there's an error, we have those values for the taskSummary.
+ ec.taskHashTracker.SetCacheStatus(
+ packageTask.TaskID,
+ runsummary.NewTaskCacheSummary(cacheStatus, &timeSaved),
+ )
+
+ if err != nil {
+ prefixedUI.Error(fmt.Sprintf("error fetching from cache: %s", err))
+ } else if cacheStatus.Local || cacheStatus.Remote { // If there was a cache hit
+ ec.taskHashTracker.SetExpandedOutputs(packageTask.TaskID, taskCache.ExpandedOutputs)
+ // We only cache successful executions, so we can assume this is a successCode exit.
+ tracer(runsummary.TargetCached, nil, &successCode)
+ return taskExecutionSummary, nil
+ }
+
+ // Setup command execution
+ argsactual := append([]string{"run"}, packageTask.Task)
+ if len(passThroughArgs) > 0 {
+ // This will be either '--' or a typed nil
+ argsactual = append(argsactual, ec.packageManager.ArgSeparator...)
+ argsactual = append(argsactual, passThroughArgs...)
+ }
+
+ cmd := exec.Command(ec.packageManager.Command, argsactual...)
+ cmd.Dir = packageTask.Pkg.Dir.ToSystemPath().RestoreAnchor(ec.repoRoot).ToString()
+
+ currentState := env.GetEnvMap()
+ passthroughEnv := env.EnvironmentVariableMap{}
+
+ if packageTask.EnvMode == util.Strict {
+ defaultPassthrough := []string{
+ "PATH",
+ "SHELL",
+ "SYSTEMROOT", // Go will always include this on Windows, but we're being explicit here
+ }
+
+ passthroughEnv.Merge(env.FromKeys(currentState, defaultPassthrough))
+ passthroughEnv.Merge(env.FromKeys(currentState, ec.env))
+ passthroughEnv.Merge(env.FromKeys(currentState, ec.passthroughEnv))
+ passthroughEnv.Merge(env.FromKeys(currentState, packageTask.TaskDefinition.EnvVarDependencies))
+ passthroughEnv.Merge(env.FromKeys(currentState, packageTask.TaskDefinition.PassthroughEnv))
+ } else {
+ passthroughEnv.Merge(currentState)
+ }
+
+ // Always last to make sure it clobbers.
+ passthroughEnv.Add("TURBO_HASH", hash)
+
+ cmd.Env = passthroughEnv.ToHashable()
+
+ // Setup stdout/stderr
+ // If we are not caching anything, then we don't need to write logs to disk
+ // be careful about this conditional given the default of cache = true
+ writer, err := taskCache.OutputWriter(prettyPrefix)
+ if err != nil {
+ tracer(runsummary.TargetBuildFailed, err, nil)
+
+ ec.logError(prettyPrefix, err)
+ if !ec.rs.Opts.runOpts.ContinueOnError {
+ return nil, errors.Wrapf(err, "failed to capture outputs for \"%v\"", packageTask.TaskID)
+ }
+ }
+
+ // Create a logger
+ logger := log.New(writer, "", 0)
+ // Setup a streamer that we'll pipe cmd.Stdout to
+ logStreamerOut := logstreamer.NewLogstreamer(logger, prettyPrefix, false)
+ // Setup a streamer that we'll pipe cmd.Stderr to.
+ logStreamerErr := logstreamer.NewLogstreamer(logger, prettyPrefix, false)
+ cmd.Stderr = logStreamerErr
+ cmd.Stdout = logStreamerOut
+ // Flush/Reset any error we recorded
+ logStreamerErr.FlushRecord()
+ logStreamerOut.FlushRecord()
+
+ closeOutputs := func() error {
+ var closeErrors []error
+
+ if err := logStreamerOut.Close(); err != nil {
+ closeErrors = append(closeErrors, errors.Wrap(err, "log stdout"))
+ }
+ if err := logStreamerErr.Close(); err != nil {
+ closeErrors = append(closeErrors, errors.Wrap(err, "log stderr"))
+ }
+
+ if err := writer.Close(); err != nil {
+ closeErrors = append(closeErrors, errors.Wrap(err, "log file"))
+ }
+ if len(closeErrors) > 0 {
+ msgs := make([]string, len(closeErrors))
+ for i, err := range closeErrors {
+ msgs[i] = err.Error()
+ }
+ return fmt.Errorf("could not flush log output: %v", strings.Join(msgs, ", "))
+ }
+ return nil
+ }
+
+ // Run the command
+ if err := ec.processes.Exec(cmd); err != nil {
+ // close off our outputs. We errored, so we mostly don't care if we fail to close
+ _ = closeOutputs()
+ // if we already know we're in the process of exiting,
+ // we don't need to record an error to that effect.
+ if errors.Is(err, process.ErrClosing) {
+ return taskExecutionSummary, nil
+ }
+
+ // If the error we got is a ChildExit, it will have an ExitCode field
+ // Pass that along into the tracer.
+ var e *process.ChildExit
+ if errors.As(err, &e) {
+ tracer(runsummary.TargetBuildFailed, err, &e.ExitCode)
+ } else {
+ // If it wasn't a ChildExit, and something else went wrong, we don't have an exitCode
+ tracer(runsummary.TargetBuildFailed, err, nil)
+ }
+
+ progressLogger.Error(fmt.Sprintf("Error: command finished with error: %v", err))
+ if !ec.rs.Opts.runOpts.ContinueOnError {
+ prefixedUI.Error(fmt.Sprintf("ERROR: command finished with error: %s", err))
+ ec.processes.Close()
+ } else {
+ prefixedUI.Warn("command finished with error, but continuing...")
+ // Set to nil so we don't short-circuit any other execution
+ err = nil
+ }
+
+ // If there was an error, flush the buffered output
+ taskCache.OnError(prefixedUI, progressLogger)
+
+ return taskExecutionSummary, err
+ }
+
+ // Add another timestamp into the tracer, so we have an accurate timestamp for how long the task took.
+ tracer(runsummary.TargetExecuted, nil, nil)
+
+ // Close off our outputs and cache them
+ if err := closeOutputs(); err != nil {
+ ec.logError("", err)
+ } else {
+ if err = taskCache.SaveOutputs(ctx, progressLogger, prefixedUI, int(taskExecutionSummary.Duration.Milliseconds())); err != nil {
+ ec.logError("", fmt.Errorf("error caching output: %w", err))
+ } else {
+ ec.taskHashTracker.SetExpandedOutputs(packageTask.TaskID, taskCache.ExpandedOutputs)
+ }
+ }
+
+ // Clean up tracing
+ tracer(runsummary.TargetBuilt, nil, &successCode)
+ progressLogger.Debug("done", "status", "complete", "duration", taskExecutionSummary.Duration)
+ return taskExecutionSummary, nil
+}
+
+var successCode = 0
diff --git a/cli/internal/run/run.go b/cli/internal/run/run.go
new file mode 100644
index 0000000..2ac1141
--- /dev/null
+++ b/cli/internal/run/run.go
@@ -0,0 +1,487 @@
+package run
+
+import (
+ gocontext "context"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/vercel/turbo/cli/internal/analytics"
+ "github.com/vercel/turbo/cli/internal/cache"
+ "github.com/vercel/turbo/cli/internal/cmdutil"
+ "github.com/vercel/turbo/cli/internal/context"
+ "github.com/vercel/turbo/cli/internal/core"
+ "github.com/vercel/turbo/cli/internal/daemon"
+ "github.com/vercel/turbo/cli/internal/daemonclient"
+ "github.com/vercel/turbo/cli/internal/env"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/graph"
+ "github.com/vercel/turbo/cli/internal/process"
+ "github.com/vercel/turbo/cli/internal/runsummary"
+ "github.com/vercel/turbo/cli/internal/scm"
+ "github.com/vercel/turbo/cli/internal/scope"
+ "github.com/vercel/turbo/cli/internal/signals"
+ "github.com/vercel/turbo/cli/internal/taskhash"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+
+ "github.com/pkg/errors"
+)
+
+// ExecuteRun executes the run command
+func ExecuteRun(ctx gocontext.Context, helper *cmdutil.Helper, signalWatcher *signals.Watcher, args *turbostate.ParsedArgsFromRust) error {
+ base, err := helper.GetCmdBase(args)
+ LogTag(base.Logger)
+ if err != nil {
+ return err
+ }
+ tasks := args.Command.Run.Tasks
+ passThroughArgs := args.Command.Run.PassThroughArgs
+ if len(tasks) == 0 {
+ return errors.New("at least one task must be specified")
+ }
+ opts, err := optsFromArgs(args)
+ if err != nil {
+ return err
+ }
+
+ opts.runOpts.PassThroughArgs = passThroughArgs
+ run := configureRun(base, opts, signalWatcher)
+ if err := run.run(ctx, tasks); err != nil {
+ base.LogError("run failed: %v", err)
+ return err
+ }
+ return nil
+}
+
+func optsFromArgs(args *turbostate.ParsedArgsFromRust) (*Opts, error) {
+ runPayload := args.Command.Run
+
+ opts := getDefaultOptions()
+ // aliases := make(map[string]string)
+ if err := scope.OptsFromArgs(&opts.scopeOpts, args); err != nil {
+ return nil, err
+ }
+
+ // Cache flags
+ opts.clientOpts.Timeout = args.RemoteCacheTimeout
+ opts.cacheOpts.SkipFilesystem = runPayload.RemoteOnly
+ opts.cacheOpts.OverrideDir = runPayload.CacheDir
+ opts.cacheOpts.Workers = runPayload.CacheWorkers
+
+ // Run flags
+ opts.runOpts.LogPrefix = runPayload.LogPrefix
+ opts.runOpts.Summarize = runPayload.Summarize
+ opts.runOpts.ExperimentalSpaceID = runPayload.ExperimentalSpaceID
+ opts.runOpts.EnvMode = runPayload.EnvMode
+
+ // Runcache flags
+ opts.runcacheOpts.SkipReads = runPayload.Force
+ opts.runcacheOpts.SkipWrites = runPayload.NoCache
+
+ if runPayload.OutputLogs != "" {
+ err := opts.runcacheOpts.SetTaskOutputMode(runPayload.OutputLogs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Run flags
+ if runPayload.Concurrency != "" {
+ concurrency, err := util.ParseConcurrency(runPayload.Concurrency)
+ if err != nil {
+ return nil, err
+ }
+ opts.runOpts.Concurrency = concurrency
+ }
+ opts.runOpts.Parallel = runPayload.Parallel
+ opts.runOpts.Profile = runPayload.Profile
+ opts.runOpts.ContinueOnError = runPayload.ContinueExecution
+ opts.runOpts.Only = runPayload.Only
+ opts.runOpts.NoDaemon = runPayload.NoDaemon
+ opts.runOpts.SinglePackage = args.Command.Run.SinglePackage
+
+ // See comment on Graph in turbostate.go for an explanation on Graph's representation.
+ // If flag is passed...
+ if runPayload.Graph != nil {
+ // If no value is attached, we print to stdout
+ if *runPayload.Graph == "" {
+ opts.runOpts.GraphDot = true
+ } else {
+ // Otherwise, we emit to the file name attached as value
+ opts.runOpts.GraphDot = false
+ opts.runOpts.GraphFile = *runPayload.Graph
+ }
+ }
+
+ if runPayload.DryRun != "" {
+ opts.runOpts.DryRunJSON = runPayload.DryRun == _dryRunJSONValue
+
+ if runPayload.DryRun == _dryRunTextValue || runPayload.DryRun == _dryRunJSONValue {
+ opts.runOpts.DryRun = true
+ } else {
+ return nil, fmt.Errorf("invalid dry-run mode: %v", runPayload.DryRun)
+ }
+ }
+
+ return opts, nil
+}
+
+func configureRun(base *cmdutil.CmdBase, opts *Opts, signalWatcher *signals.Watcher) *run {
+ if os.Getenv("TURBO_FORCE") == "true" {
+ opts.runcacheOpts.SkipReads = true
+ }
+
+ if os.Getenv("TURBO_REMOTE_ONLY") == "true" {
+ opts.cacheOpts.SkipFilesystem = true
+ }
+
+ processes := process.NewManager(base.Logger.Named("processes"))
+ signalWatcher.AddOnClose(processes.Close)
+ return &run{
+ base: base,
+ opts: opts,
+ processes: processes,
+ }
+}
+
+type run struct {
+ base *cmdutil.CmdBase
+ opts *Opts
+ processes *process.Manager
+}
+
+func (r *run) run(ctx gocontext.Context, targets []string) error {
+ startAt := time.Now()
+ packageJSONPath := r.base.RepoRoot.UntypedJoin("package.json")
+ rootPackageJSON, err := fs.ReadPackageJSON(packageJSONPath)
+ if err != nil {
+ return fmt.Errorf("failed to read package.json: %w", err)
+ }
+
+ isStructuredOutput := r.opts.runOpts.GraphDot || r.opts.runOpts.DryRunJSON
+
+ var pkgDepGraph *context.Context
+ if r.opts.runOpts.SinglePackage {
+ pkgDepGraph, err = context.SinglePackageGraph(r.base.RepoRoot, rootPackageJSON)
+ } else {
+ pkgDepGraph, err = context.BuildPackageGraph(r.base.RepoRoot, rootPackageJSON)
+ }
+ if err != nil {
+ var warnings *context.Warnings
+ if errors.As(err, &warnings) {
+ r.base.LogWarning("Issues occurred when constructing package graph. Turbo will function, but some features may not be available", err)
+ } else {
+ return err
+ }
+ }
+
+ if ui.IsCI && !r.opts.runOpts.NoDaemon {
+ r.base.Logger.Info("skipping turbod since we appear to be in a non-interactive context")
+ } else if !r.opts.runOpts.NoDaemon {
+ turbodClient, err := daemon.GetClient(ctx, r.base.RepoRoot, r.base.Logger, r.base.TurboVersion, daemon.ClientOpts{})
+ if err != nil {
+ r.base.LogWarning("", errors.Wrap(err, "failed to contact turbod. Continuing in standalone mode"))
+ } else {
+ defer func() { _ = turbodClient.Close() }()
+ r.base.Logger.Debug("running in daemon mode")
+ daemonClient := daemonclient.New(turbodClient)
+ r.opts.runcacheOpts.OutputWatcher = daemonClient
+ }
+ }
+
+ if err := util.ValidateGraph(&pkgDepGraph.WorkspaceGraph); err != nil {
+ return errors.Wrap(err, "Invalid package dependency graph")
+ }
+
+ // TODO: consolidate some of these arguments
+ // Note: not all properties are set here. GlobalHash and Pipeline keys are set later
+ g := &graph.CompleteGraph{
+ WorkspaceGraph: pkgDepGraph.WorkspaceGraph,
+ WorkspaceInfos: pkgDepGraph.WorkspaceInfos,
+ RootNode: pkgDepGraph.RootNode,
+ TaskDefinitions: map[string]*fs.TaskDefinition{},
+ RepoRoot: r.base.RepoRoot,
+ }
+
+ turboJSON, err := g.GetTurboConfigFromWorkspace(util.RootPkgName, r.opts.runOpts.SinglePackage)
+ if err != nil {
+ return err
+ }
+
+ // TODO: these values come from a config file, hopefully viper can help us merge these
+ r.opts.cacheOpts.RemoteCacheOpts = turboJSON.RemoteCacheOptions
+
+ pipeline := turboJSON.Pipeline
+ g.Pipeline = pipeline
+ scmInstance, err := scm.FromInRepo(r.base.RepoRoot)
+ if err != nil {
+ if errors.Is(err, scm.ErrFallback) {
+ r.base.Logger.Debug("", err)
+ } else {
+ return errors.Wrap(err, "failed to create SCM")
+ }
+ }
+ filteredPkgs, isAllPackages, err := scope.ResolvePackages(&r.opts.scopeOpts, r.base.RepoRoot, scmInstance, pkgDepGraph, r.base.UI, r.base.Logger)
+ if err != nil {
+ return errors.Wrap(err, "failed to resolve packages to run")
+ }
+ if isAllPackages {
+ // if there is a root task for any of our targets, we need to add it
+ for _, target := range targets {
+ key := util.RootTaskID(target)
+ if _, ok := pipeline[key]; ok {
+ filteredPkgs.Add(util.RootPkgName)
+ // we only need to know we're running a root task once to add it for consideration
+ break
+ }
+ }
+ }
+
+ globalHashable, err := calculateGlobalHash(
+ r.base.RepoRoot,
+ rootPackageJSON,
+ pipeline,
+ turboJSON.GlobalEnv,
+ turboJSON.GlobalDeps,
+ pkgDepGraph.PackageManager,
+ pkgDepGraph.Lockfile,
+ turboJSON.GlobalPassthroughEnv,
+ r.opts.runOpts.EnvMode,
+ r.base.Logger,
+ r.base.UI,
+ isStructuredOutput,
+ )
+
+ if err != nil {
+ return fmt.Errorf("failed to collect global hash inputs: %v", err)
+ }
+
+ if globalHash, err := calculateGlobalHashFromHashable(globalHashable); err == nil {
+ r.base.Logger.Debug("global hash", "value", globalHash)
+ g.GlobalHash = globalHash
+ } else {
+ return fmt.Errorf("failed to calculate global hash: %v", err)
+ }
+
+ r.base.Logger.Debug("local cache folder", "path", r.opts.cacheOpts.OverrideDir)
+
+ rs := &runSpec{
+ Targets: targets,
+ FilteredPkgs: filteredPkgs,
+ Opts: r.opts,
+ }
+ packageManager := pkgDepGraph.PackageManager
+
+ engine, err := buildTaskGraphEngine(
+ g,
+ rs,
+ r.opts.runOpts.SinglePackage,
+ )
+
+ if err != nil {
+ return errors.Wrap(err, "error preparing engine")
+ }
+
+ taskHashTracker := taskhash.NewTracker(
+ g.RootNode,
+ g.GlobalHash,
+ // TODO(mehulkar): remove g,Pipeline, because we need to get task definitions from CompleteGaph instead
+ g.Pipeline,
+ )
+
+ g.TaskHashTracker = taskHashTracker
+
+ // CalculateFileHashes assigns PackageInputsExpandedHashes as a side-effect
+ err = taskHashTracker.CalculateFileHashes(
+ engine.TaskGraph.Vertices(),
+ rs.Opts.runOpts.Concurrency,
+ g.WorkspaceInfos,
+ g.TaskDefinitions,
+ r.base.RepoRoot,
+ )
+
+ if err != nil {
+ return errors.Wrap(err, "error hashing package files")
+ }
+
+ // If we are running in parallel, then we remove all the edges in the graph
+ // except for the root. Rebuild the task graph for backwards compatibility.
+ // We still use dependencies specified by the pipeline configuration.
+ if rs.Opts.runOpts.Parallel {
+ for _, edge := range g.WorkspaceGraph.Edges() {
+ if edge.Target() != g.RootNode {
+ g.WorkspaceGraph.RemoveEdge(edge)
+ }
+ }
+ engine, err = buildTaskGraphEngine(
+ g,
+ rs,
+ r.opts.runOpts.SinglePackage,
+ )
+ if err != nil {
+ return errors.Wrap(err, "error preparing engine")
+ }
+ }
+
+ // Graph Run
+ if rs.Opts.runOpts.GraphFile != "" || rs.Opts.runOpts.GraphDot {
+ return GraphRun(ctx, rs, engine, r.base)
+ }
+
+ packagesInScope := rs.FilteredPkgs.UnsafeListOfStrings()
+ sort.Strings(packagesInScope)
+ // Initiate analytics and cache
+ analyticsClient := r.initAnalyticsClient(ctx)
+ defer analyticsClient.CloseWithTimeout(50 * time.Millisecond)
+ turboCache, err := r.initCache(ctx, rs, analyticsClient)
+
+ if err != nil {
+ if errors.Is(err, cache.ErrNoCachesEnabled) {
+ r.base.UI.Warn("No caches are enabled. You can try \"turbo login\", \"turbo link\", or ensuring you are not passing --remote-only to enable caching")
+ } else {
+ return errors.Wrap(err, "failed to set up caching")
+ }
+ }
+
+ var envVarPassthroughMap env.EnvironmentVariableMap
+ if globalHashable.envVarPassthroughs != nil {
+ if envVarPassthroughDetailedMap, err := env.GetHashableEnvVars(globalHashable.envVarPassthroughs, nil, ""); err == nil {
+ envVarPassthroughMap = envVarPassthroughDetailedMap.BySource.Explicit
+ }
+ }
+
+ globalEnvMode := rs.Opts.runOpts.EnvMode
+ if globalEnvMode == util.Infer && turboJSON.GlobalPassthroughEnv != nil {
+ globalEnvMode = util.Strict
+ }
+
+ // RunSummary contains information that is statically analyzable about
+ // the tasks that we expect to run based on the user command.
+ summary := runsummary.NewRunSummary(
+ startAt,
+ r.base.UI,
+ r.base.RepoRoot,
+ rs.Opts.scopeOpts.PackageInferenceRoot,
+ r.base.TurboVersion,
+ r.base.APIClient,
+ rs.Opts.runOpts,
+ packagesInScope,
+ globalEnvMode,
+ runsummary.NewGlobalHashSummary(
+ globalHashable.globalFileHashMap,
+ globalHashable.rootExternalDepsHash,
+ globalHashable.envVars,
+ envVarPassthroughMap,
+ globalHashable.globalCacheKey,
+ globalHashable.pipeline,
+ ),
+ rs.Opts.SynthesizeCommand(rs.Targets),
+ )
+
+ // Dry Run
+ if rs.Opts.runOpts.DryRun {
+ return DryRun(
+ ctx,
+ g,
+ rs,
+ engine,
+ taskHashTracker,
+ turboCache,
+ turboJSON,
+ globalEnvMode,
+ r.base,
+ summary,
+ )
+ }
+
+ // Regular run
+ return RealRun(
+ ctx,
+ g,
+ rs,
+ engine,
+ taskHashTracker,
+ turboCache,
+ turboJSON,
+ globalEnvMode,
+ packagesInScope,
+ r.base,
+ summary,
+ // Extra arg only for regular runs, dry-run doesn't get this
+ packageManager,
+ r.processes,
+ )
+}
+
+func (r *run) initAnalyticsClient(ctx gocontext.Context) analytics.Client {
+ apiClient := r.base.APIClient
+ var analyticsSink analytics.Sink
+ if apiClient.IsLinked() {
+ analyticsSink = apiClient
+ } else {
+ r.opts.cacheOpts.SkipRemote = true
+ analyticsSink = analytics.NullSink
+ }
+ analyticsClient := analytics.NewClient(ctx, analyticsSink, r.base.Logger.Named("analytics"))
+ return analyticsClient
+}
+
+func (r *run) initCache(ctx gocontext.Context, rs *runSpec, analyticsClient analytics.Client) (cache.Cache, error) {
+ apiClient := r.base.APIClient
+ // Theoretically this is overkill, but bias towards not spamming the console
+ once := &sync.Once{}
+
+ return cache.New(rs.Opts.cacheOpts, r.base.RepoRoot, apiClient, analyticsClient, func(_cache cache.Cache, err error) {
+ // Currently the HTTP Cache is the only one that can be disabled.
+ // With a cache system refactor, we might consider giving names to the caches so
+ // we can accurately report them here.
+ once.Do(func() {
+ r.base.LogWarning("Remote Caching is unavailable", err)
+ })
+ })
+}
+
+func buildTaskGraphEngine(
+ g *graph.CompleteGraph,
+ rs *runSpec,
+ isSinglePackage bool,
+) (*core.Engine, error) {
+ engine := core.NewEngine(g, isSinglePackage)
+
+ // Note: g.Pipeline is a map, but this for loop only cares about the keys
+ for taskName := range g.Pipeline {
+ engine.AddTask(taskName)
+ }
+
+ if err := engine.Prepare(&core.EngineBuildingOptions{
+ Packages: rs.FilteredPkgs.UnsafeListOfStrings(),
+ TaskNames: rs.Targets,
+ TasksOnly: rs.Opts.runOpts.Only,
+ }); err != nil {
+ return nil, err
+ }
+
+ // Check for cycles in the DAG.
+ if err := util.ValidateGraph(engine.TaskGraph); err != nil {
+ return nil, fmt.Errorf("Invalid task dependency graph:\n%v", err)
+ }
+
+ // Check that no tasks would be blocked by a persistent task
+ if err := engine.ValidatePersistentDependencies(g, rs.Opts.runOpts.Concurrency); err != nil {
+ return nil, fmt.Errorf("Invalid persistent task configuration:\n%v", err)
+ }
+
+ return engine, nil
+}
+
+// dry run custom flag
+// NOTE: These *must* be kept in sync with the corresponding Rust
+// enum definitions in shim/src/commands/mod.rs
+const (
+ _dryRunJSONValue = "Json"
+ _dryRunTextValue = "Text"
+)
diff --git a/cli/internal/run/run_spec.go b/cli/internal/run/run_spec.go
new file mode 100644
index 0000000..14402d3
--- /dev/null
+++ b/cli/internal/run/run_spec.go
@@ -0,0 +1,90 @@
+// Package run implements `turbo run`
+// This file implements some structs for options
+package run
+
+import (
+ "strings"
+
+ "github.com/vercel/turbo/cli/internal/cache"
+ "github.com/vercel/turbo/cli/internal/client"
+ "github.com/vercel/turbo/cli/internal/runcache"
+ "github.com/vercel/turbo/cli/internal/scope"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// runSpec contains the run-specific configuration elements that come from a particular
+// invocation of turbo.
+type runSpec struct {
+ // Target is a list of task that are going to run this time
+ // E.g. in `turbo run build lint` Targets will be ["build", "lint"]
+ Targets []string
+
+ // FilteredPkgs is the list of packages that are relevant for this run.
+ FilteredPkgs util.Set
+
+ // Opts contains various opts, gathered from CLI flags,
+ // but bucketed in smaller structs based on what they mean.
+ Opts *Opts
+}
+
+// ArgsForTask returns the set of args that need to be passed through to the task
+func (rs *runSpec) ArgsForTask(task string) []string {
+ passThroughArgs := make([]string, 0, len(rs.Opts.runOpts.PassThroughArgs))
+ for _, target := range rs.Targets {
+ if target == task {
+ passThroughArgs = append(passThroughArgs, rs.Opts.runOpts.PassThroughArgs...)
+ }
+ }
+ return passThroughArgs
+}
+
+// Opts holds the current run operations configuration
+type Opts struct {
+ runOpts util.RunOpts
+ cacheOpts cache.Opts
+ clientOpts client.Opts
+ runcacheOpts runcache.Opts
+ scopeOpts scope.Opts
+}
+
+// SynthesizeCommand produces a command that produces an equivalent set of packages, tasks,
+// and task arguments to what the current set of opts selects.
+func (o *Opts) SynthesizeCommand(tasks []string) string {
+ cmd := "turbo run"
+ cmd += " " + strings.Join(tasks, " ")
+ for _, filterPattern := range o.scopeOpts.FilterPatterns {
+ cmd += " --filter=" + filterPattern
+ }
+ for _, filterPattern := range o.scopeOpts.LegacyFilter.AsFilterPatterns() {
+ cmd += " --filter=" + filterPattern
+ }
+ if o.runOpts.Parallel {
+ cmd += " --parallel"
+ }
+ if o.runOpts.ContinueOnError {
+ cmd += " --continue"
+ }
+ if o.runOpts.DryRun {
+ if o.runOpts.DryRunJSON {
+ cmd += " --dry=json"
+ } else {
+ cmd += " --dry"
+ }
+ }
+ if len(o.runOpts.PassThroughArgs) > 0 {
+ cmd += " -- " + strings.Join(o.runOpts.PassThroughArgs, " ")
+ }
+ return cmd
+}
+
+// getDefaultOptions returns the default set of Opts for every run
+func getDefaultOptions() *Opts {
+ return &Opts{
+ runOpts: util.RunOpts{
+ Concurrency: 10,
+ },
+ clientOpts: client.Opts{
+ Timeout: client.ClientTimeout,
+ },
+ }
+}
diff --git a/cli/internal/run/run_spec_test.go b/cli/internal/run/run_spec_test.go
new file mode 100644
index 0000000..2bcfe2b
--- /dev/null
+++ b/cli/internal/run/run_spec_test.go
@@ -0,0 +1,107 @@
+package run
+
+import (
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/scope"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+func TestSynthesizeCommand(t *testing.T) {
+ testCases := []struct {
+ filterPatterns []string
+ legacyFilter scope.LegacyFilter
+ passThroughArgs []string
+ parallel bool
+ continueOnError bool
+ dryRun bool
+ dryRunJSON bool
+ tasks []string
+ expected string
+ }{
+ {
+ filterPatterns: []string{"my-app"},
+ tasks: []string{"build"},
+ expected: "turbo run build --filter=my-app",
+ },
+ {
+ filterPatterns: []string{"my-app"},
+ tasks: []string{"build"},
+ passThroughArgs: []string{"-v", "--foo=bar"},
+ expected: "turbo run build --filter=my-app -- -v --foo=bar",
+ },
+ {
+ legacyFilter: scope.LegacyFilter{
+ Entrypoints: []string{"my-app"},
+ SkipDependents: true,
+ },
+ tasks: []string{"build"},
+ passThroughArgs: []string{"-v", "--foo=bar"},
+ expected: "turbo run build --filter=my-app -- -v --foo=bar",
+ },
+ {
+ legacyFilter: scope.LegacyFilter{
+ Entrypoints: []string{"my-app"},
+ SkipDependents: true,
+ },
+ filterPatterns: []string{"other-app"},
+ tasks: []string{"build"},
+ passThroughArgs: []string{"-v", "--foo=bar"},
+ expected: "turbo run build --filter=other-app --filter=my-app -- -v --foo=bar",
+ },
+ {
+ legacyFilter: scope.LegacyFilter{
+ Entrypoints: []string{"my-app"},
+ IncludeDependencies: true,
+ Since: "some-ref",
+ },
+ filterPatterns: []string{"other-app"},
+ tasks: []string{"build"},
+ expected: "turbo run build --filter=other-app --filter=...my-app...[some-ref]...",
+ },
+ {
+ filterPatterns: []string{"my-app"},
+ tasks: []string{"build"},
+ parallel: true,
+ continueOnError: true,
+ expected: "turbo run build --filter=my-app --parallel --continue",
+ },
+ {
+ filterPatterns: []string{"my-app"},
+ tasks: []string{"build"},
+ dryRun: true,
+ expected: "turbo run build --filter=my-app --dry",
+ },
+ {
+ filterPatterns: []string{"my-app"},
+ tasks: []string{"build"},
+ dryRun: true,
+ dryRunJSON: true,
+ expected: "turbo run build --filter=my-app --dry=json",
+ },
+ }
+
+ for _, testCase := range testCases {
+ testCase := testCase
+ t.Run(testCase.expected, func(t *testing.T) {
+ o := Opts{
+ scopeOpts: scope.Opts{
+ FilterPatterns: testCase.filterPatterns,
+ LegacyFilter: testCase.legacyFilter,
+ },
+ runOpts: util.RunOpts{
+ PassThroughArgs: testCase.passThroughArgs,
+ Parallel: testCase.parallel,
+ ContinueOnError: testCase.continueOnError,
+ DryRun: testCase.dryRun,
+ DryRunJSON: testCase.dryRunJSON,
+ },
+ }
+ cmd := o.SynthesizeCommand(testCase.tasks)
+ if cmd != testCase.expected {
+ t.Errorf("SynthesizeCommand() got %v, want %v", cmd, testCase.expected)
+ }
+ })
+ }
+
+}
diff --git a/cli/internal/runcache/output_watcher.go b/cli/internal/runcache/output_watcher.go
new file mode 100644
index 0000000..5f90f0e
--- /dev/null
+++ b/cli/internal/runcache/output_watcher.go
@@ -0,0 +1,32 @@
+package runcache
+
+import (
+ "context"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+)
+
+// OutputWatcher instances are responsible for tracking changes to task outputs
+type OutputWatcher interface {
+ // GetChangedOutputs returns which of the given globs have changed since the specified hash was last run
+ GetChangedOutputs(ctx context.Context, hash string, repoRelativeOutputGlobs []string) ([]string, error)
+ // NotifyOutputsWritten tells the watcher that the given globs have been cached with the specified hash
+ NotifyOutputsWritten(ctx context.Context, hash string, repoRelativeOutputGlobs fs.TaskOutputs) error
+}
+
+// NoOpOutputWatcher implements OutputWatcher, but always considers every glob to have changed
+type NoOpOutputWatcher struct{}
+
+var _ OutputWatcher = (*NoOpOutputWatcher)(nil)
+
+// GetChangedOutputs implements OutputWatcher.GetChangedOutputs.
+// Since this is a no-op watcher, no tracking is done.
+func (NoOpOutputWatcher) GetChangedOutputs(ctx context.Context, hash string, repoRelativeOutputGlobs []string) ([]string, error) {
+ return repoRelativeOutputGlobs, nil
+}
+
+// NotifyOutputsWritten implements OutputWatcher.NotifyOutputsWritten.
+// Since this is a no-op watcher, consider all globs to have changed
+func (NoOpOutputWatcher) NotifyOutputsWritten(ctx context.Context, hash string, repoRelativeOutputGlobs fs.TaskOutputs) error {
+ return nil
+}
diff --git a/cli/internal/runcache/runcache.go b/cli/internal/runcache/runcache.go
new file mode 100644
index 0000000..ba6145b
--- /dev/null
+++ b/cli/internal/runcache/runcache.go
@@ -0,0 +1,354 @@
+package runcache
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/cli"
+ "github.com/vercel/turbo/cli/internal/cache"
+ "github.com/vercel/turbo/cli/internal/colorcache"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/globby"
+ "github.com/vercel/turbo/cli/internal/logstreamer"
+ "github.com/vercel/turbo/cli/internal/nodes"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// LogReplayer is a function that is responsible for replaying the contents of a given log file
+type LogReplayer = func(logger hclog.Logger, output *cli.PrefixedUi, logFile turbopath.AbsoluteSystemPath)
+
+// Opts holds the configurable options for a RunCache instance
+type Opts struct {
+ SkipReads bool
+ SkipWrites bool
+ TaskOutputModeOverride *util.TaskOutputMode
+ LogReplayer LogReplayer
+ OutputWatcher OutputWatcher
+}
+
+// SetTaskOutputMode parses the task output mode from string and then sets it in opts
+func (opts *Opts) SetTaskOutputMode(value string) error {
+ outputMode, err := util.FromTaskOutputModeString(value)
+ if err != nil {
+ return fmt.Errorf("must be one of \"%v\"", TaskOutputModes())
+ }
+ opts.TaskOutputModeOverride = &outputMode
+ return nil
+}
+
+// TaskOutputModes creates the description string for task outputs
+func TaskOutputModes() string {
+ var builder strings.Builder
+
+ first := true
+ for _, mode := range util.TaskOutputModeStrings {
+ if !first {
+ builder.WriteString("|")
+ }
+ first = false
+ builder.WriteString(string(mode))
+ }
+ return builder.String()
+}
+
+// RunCache represents the interface to the cache for a single `turbo run`
+type RunCache struct {
+ taskOutputModeOverride *util.TaskOutputMode
+ cache cache.Cache
+ readsDisabled bool
+ writesDisabled bool
+ repoRoot turbopath.AbsoluteSystemPath
+ logReplayer LogReplayer
+ outputWatcher OutputWatcher
+ colorCache *colorcache.ColorCache
+}
+
+// New returns a new instance of RunCache, wrapping the given cache
+func New(cache cache.Cache, repoRoot turbopath.AbsoluteSystemPath, opts Opts, colorCache *colorcache.ColorCache) *RunCache {
+ rc := &RunCache{
+ taskOutputModeOverride: opts.TaskOutputModeOverride,
+ cache: cache,
+ readsDisabled: opts.SkipReads,
+ writesDisabled: opts.SkipWrites,
+ repoRoot: repoRoot,
+ logReplayer: opts.LogReplayer,
+ outputWatcher: opts.OutputWatcher,
+ colorCache: colorCache,
+ }
+
+ if rc.logReplayer == nil {
+ rc.logReplayer = defaultLogReplayer
+ }
+ if rc.outputWatcher == nil {
+ rc.outputWatcher = &NoOpOutputWatcher{}
+ }
+ return rc
+}
+
+// TaskCache represents a single task's (package-task?) interface to the RunCache
+// and controls access to the task's outputs
+type TaskCache struct {
+ ExpandedOutputs []turbopath.AnchoredSystemPath
+ rc *RunCache
+ repoRelativeGlobs fs.TaskOutputs
+ hash string
+ pt *nodes.PackageTask
+ taskOutputMode util.TaskOutputMode
+ cachingDisabled bool
+ LogFileName turbopath.AbsoluteSystemPath
+}
+
+// RestoreOutputs attempts to restore output for the corresponding task from the cache.
+// Returns the cacheStatus, the timeSaved, and error values, so the consumer can understand
+// what happened in here.
+func (tc *TaskCache) RestoreOutputs(ctx context.Context, prefixedUI *cli.PrefixedUi, progressLogger hclog.Logger) (cache.ItemStatus, int, error) {
+ if tc.cachingDisabled || tc.rc.readsDisabled {
+ if tc.taskOutputMode != util.NoTaskOutput && tc.taskOutputMode != util.ErrorTaskOutput {
+ prefixedUI.Output(fmt.Sprintf("cache bypass, force executing %s", ui.Dim(tc.hash)))
+ }
+ return cache.ItemStatus{Local: false, Remote: false}, 0, nil
+ }
+
+ changedOutputGlobs, err := tc.rc.outputWatcher.GetChangedOutputs(ctx, tc.hash, tc.repoRelativeGlobs.Inclusions)
+ if err != nil {
+ progressLogger.Warn(fmt.Sprintf("Failed to check if we can skip restoring outputs for %v: %v. Proceeding to check cache", tc.pt.TaskID, err))
+ prefixedUI.Warn(ui.Dim(fmt.Sprintf("Failed to check if we can skip restoring outputs for %v: %v. Proceeding to check cache", tc.pt.TaskID, err)))
+ changedOutputGlobs = tc.repoRelativeGlobs.Inclusions
+ }
+
+ hasChangedOutputs := len(changedOutputGlobs) > 0
+ var cacheStatus cache.ItemStatus
+ var timeSaved int
+ if hasChangedOutputs {
+ // Note that we currently don't use the output globs when restoring, but we could in the
+ // future to avoid doing unnecessary file I/O. We also need to pass along the exclusion
+ // globs as well.
+ itemStatus, restoredFiles, duration, err := tc.rc.cache.Fetch(tc.rc.repoRoot, tc.hash, nil)
+ hit := itemStatus.Local || itemStatus.Remote
+ timeSaved = duration
+ tc.ExpandedOutputs = restoredFiles
+ // Assign to this variable outside this closure so we can return at the end of the function
+ cacheStatus = itemStatus
+ if err != nil {
+ // If there was an error fetching from cache, we'll say there was no cache hit
+ return cache.ItemStatus{Local: false, Remote: false}, 0, err
+ } else if !hit {
+ if tc.taskOutputMode != util.NoTaskOutput && tc.taskOutputMode != util.ErrorTaskOutput {
+ prefixedUI.Output(fmt.Sprintf("cache miss, executing %s", ui.Dim(tc.hash)))
+ }
+ // If there was no hit, we can also say there was no hit
+ return cache.ItemStatus{Local: false, Remote: false}, 0, nil
+ }
+
+ if err := tc.rc.outputWatcher.NotifyOutputsWritten(ctx, tc.hash, tc.repoRelativeGlobs); err != nil {
+ // Don't fail the whole operation just because we failed to watch the outputs
+ prefixedUI.Warn(ui.Dim(fmt.Sprintf("Failed to mark outputs as cached for %v: %v", tc.pt.TaskID, err)))
+ }
+ } else {
+ // If no outputs have changed, that means we have a local cache hit.
+ cacheStatus.Local = true
+ prefixedUI.Warn(fmt.Sprintf("Skipping cache check for %v, outputs have not changed since previous run.", tc.pt.TaskID))
+ }
+
+ switch tc.taskOutputMode {
+ // When only showing new task output, cached output should only show the computed hash
+ case util.NewTaskOutput:
+ fallthrough
+ case util.HashTaskOutput:
+ prefixedUI.Info(fmt.Sprintf("cache hit, suppressing output %s", ui.Dim(tc.hash)))
+ case util.FullTaskOutput:
+ progressLogger.Debug("log file", "path", tc.LogFileName)
+ prefixedUI.Info(fmt.Sprintf("cache hit, replaying output %s", ui.Dim(tc.hash)))
+ tc.ReplayLogFile(prefixedUI, progressLogger)
+ case util.ErrorTaskOutput:
+ // The task succeeded, so we don't output anything in this case
+ default:
+ // NoLogs, do not output anything
+ }
+ // TODO: timeSaved could be part of cacheStatus, so we don't have to make a new struct
+ // downstream, but this would be a more invasive change right now.
+ return cacheStatus, timeSaved, nil
+}
+
+// ReplayLogFile writes out the stored logfile to the terminal
+func (tc TaskCache) ReplayLogFile(prefixedUI *cli.PrefixedUi, progressLogger hclog.Logger) {
+ if tc.LogFileName.FileExists() {
+ tc.rc.logReplayer(progressLogger, prefixedUI, tc.LogFileName)
+ }
+}
+
+// OnError replays the logfile if --output-mode=errors-only.
+// This is called if the task exited with an non-zero error code.
+func (tc TaskCache) OnError(terminal *cli.PrefixedUi, logger hclog.Logger) {
+ if tc.taskOutputMode == util.ErrorTaskOutput {
+ tc.ReplayLogFile(terminal, logger)
+ }
+}
+
+// nopWriteCloser is modeled after io.NopCloser, which is for Readers
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (nopWriteCloser) Close() error { return nil }
+
+type fileWriterCloser struct {
+ io.Writer
+ file *os.File
+ bufio *bufio.Writer
+}
+
+func (fwc *fileWriterCloser) Close() error {
+ if err := fwc.bufio.Flush(); err != nil {
+ return err
+ }
+ return fwc.file.Close()
+}
+
+// OutputWriter creates a sink suitable for handling the output of the command associated
+// with this task.
+func (tc TaskCache) OutputWriter(prefix string) (io.WriteCloser, error) {
+ // an os.Stdout wrapper that will add prefixes before printing to stdout
+ stdoutWriter := logstreamer.NewPrettyStdoutWriter(prefix)
+
+ if tc.cachingDisabled || tc.rc.writesDisabled {
+ return nopWriteCloser{stdoutWriter}, nil
+ }
+ // Setup log file
+ if err := tc.LogFileName.EnsureDir(); err != nil {
+ return nil, err
+ }
+
+ output, err := tc.LogFileName.Create()
+ if err != nil {
+ return nil, err
+ }
+
+ bufWriter := bufio.NewWriter(output)
+ fwc := &fileWriterCloser{
+ file: output,
+ bufio: bufWriter,
+ }
+ if tc.taskOutputMode == util.NoTaskOutput || tc.taskOutputMode == util.HashTaskOutput || tc.taskOutputMode == util.ErrorTaskOutput {
+ // only write to log file, not to stdout
+ fwc.Writer = bufWriter
+ } else {
+ fwc.Writer = io.MultiWriter(stdoutWriter, bufWriter)
+ }
+
+ return fwc, nil
+}
+
+var _emptyIgnore []string
+
+// SaveOutputs is responsible for saving the outputs of task to the cache, after the task has completed
+func (tc *TaskCache) SaveOutputs(ctx context.Context, logger hclog.Logger, terminal cli.Ui, duration int) error {
+ if tc.cachingDisabled || tc.rc.writesDisabled {
+ return nil
+ }
+
+ logger.Debug("caching output", "outputs", tc.repoRelativeGlobs)
+
+ filesToBeCached, err := globby.GlobAll(tc.rc.repoRoot.ToStringDuringMigration(), tc.repoRelativeGlobs.Inclusions, tc.repoRelativeGlobs.Exclusions)
+ if err != nil {
+ return err
+ }
+
+ relativePaths := make([]turbopath.AnchoredSystemPath, len(filesToBeCached))
+
+ for index, value := range filesToBeCached {
+ relativePath, err := tc.rc.repoRoot.RelativePathString(value)
+ if err != nil {
+ logger.Error(fmt.Sprintf("error: %v", err))
+ terminal.Error(fmt.Sprintf("%s%s", ui.ERROR_PREFIX, color.RedString(" %v", fmt.Errorf("File path cannot be made relative: %w", err))))
+ continue
+ }
+ relativePaths[index] = fs.UnsafeToAnchoredSystemPath(relativePath)
+ }
+
+ if err = tc.rc.cache.Put(tc.rc.repoRoot, tc.hash, duration, relativePaths); err != nil {
+ return err
+ }
+ err = tc.rc.outputWatcher.NotifyOutputsWritten(ctx, tc.hash, tc.repoRelativeGlobs)
+ if err != nil {
+ // Don't fail the cache write because we also failed to record it, we will just do
+ // extra I/O in the future restoring files that haven't changed from cache
+ logger.Warn(fmt.Sprintf("Failed to mark outputs as cached for %v: %v", tc.pt.TaskID, err))
+ terminal.Warn(ui.Dim(fmt.Sprintf("Failed to mark outputs as cached for %v: %v", tc.pt.TaskID, err)))
+ }
+
+ tc.ExpandedOutputs = relativePaths
+
+ return nil
+}
+
+// TaskCache returns a TaskCache instance, providing an interface to the underlying cache specific
+// to this run and the given PackageTask
+func (rc *RunCache) TaskCache(pt *nodes.PackageTask, hash string) TaskCache {
+ logFileName := rc.repoRoot.UntypedJoin(pt.LogFile)
+ hashableOutputs := pt.HashableOutputs()
+ repoRelativeGlobs := fs.TaskOutputs{
+ Inclusions: make([]string, len(hashableOutputs.Inclusions)),
+ Exclusions: make([]string, len(hashableOutputs.Exclusions)),
+ }
+
+ for index, output := range hashableOutputs.Inclusions {
+ repoRelativeGlobs.Inclusions[index] = filepath.Join(pt.Pkg.Dir.ToStringDuringMigration(), output)
+ }
+ for index, output := range hashableOutputs.Exclusions {
+ repoRelativeGlobs.Exclusions[index] = filepath.Join(pt.Pkg.Dir.ToStringDuringMigration(), output)
+ }
+
+ taskOutputMode := pt.TaskDefinition.OutputMode
+ if rc.taskOutputModeOverride != nil {
+ taskOutputMode = *rc.taskOutputModeOverride
+ }
+
+ return TaskCache{
+ ExpandedOutputs: []turbopath.AnchoredSystemPath{},
+ rc: rc,
+ repoRelativeGlobs: repoRelativeGlobs,
+ hash: hash,
+ pt: pt,
+ taskOutputMode: taskOutputMode,
+ cachingDisabled: !pt.TaskDefinition.ShouldCache,
+ LogFileName: logFileName,
+ }
+}
+
+// defaultLogReplayer will try to replay logs back to the given Ui instance
+func defaultLogReplayer(logger hclog.Logger, output *cli.PrefixedUi, logFileName turbopath.AbsoluteSystemPath) {
+ logger.Debug("start replaying logs")
+ f, err := logFileName.Open()
+ if err != nil {
+ output.Warn(fmt.Sprintf("error reading logs: %v", err))
+ logger.Error(fmt.Sprintf("error reading logs: %v", err.Error()))
+ }
+ defer func() { _ = f.Close() }()
+ scan := bufio.NewScanner(f)
+ for scan.Scan() {
+ str := string(scan.Bytes())
+ // cli.PrefixedUi won't prefix empty strings (it'll just print them as empty strings).
+ // So if we have a blank string, we'll just output the string here, instead of passing
+ // it onto the PrefixedUi.
+ if str == "" {
+ // Just output the prefix if the current line is a blank string
+ // Note: output.OutputPrefix is also a colored prefix already
+ output.Ui.Output(output.OutputPrefix)
+ } else {
+ // Writing to Stdout
+ output.Output(str)
+ }
+
+ }
+ logger.Debug("finish replaying logs")
+}
diff --git a/cli/internal/runsummary/execution_summary.go b/cli/internal/runsummary/execution_summary.go
new file mode 100644
index 0000000..fabb690
--- /dev/null
+++ b/cli/internal/runsummary/execution_summary.go
@@ -0,0 +1,282 @@
+package runsummary
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/vercel/turbo/cli/internal/chrometracing"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+
+ "github.com/mitchellh/cli"
+)
+
+// executionEvent represents a single event in the build process, i.e. a target starting or finishing
+// building, or reaching some milestone within those steps.
+type executionEvent struct {
+ // Timestamp of this event
+ Time time.Time
+ // Duration of this event
+ Duration time.Duration
+ // Target which has just changed
+ Label string
+ // Its current status
+ Status executionEventName
+ // Error, only populated for failure statuses
+ Err string
+
+ exitCode *int
+}
+
+// executionEventName represents the status of a target when we log a build result.
+type executionEventName int
+
+// The collection of expected build result statuses.
+const (
+ targetInitialized executionEventName = iota
+ TargetBuilding
+ TargetBuildStopped
+ TargetExecuted
+ TargetBuilt
+ TargetCached
+ TargetBuildFailed
+)
+
+func (en executionEventName) toString() string {
+ switch en {
+ case targetInitialized:
+ return "initialized"
+ case TargetBuilding:
+ return "building"
+ case TargetBuildStopped:
+ return "buildStopped"
+ case TargetExecuted:
+ return "executed"
+ case TargetBuilt:
+ return "built"
+ case TargetCached:
+ return "cached"
+ case TargetBuildFailed:
+ return "buildFailed"
+ }
+
+ return ""
+}
+
+// TaskExecutionSummary contains data about the state of a single task in a turbo run.
+// Some fields are updated over time as the task prepares to execute and finishes execution.
+type TaskExecutionSummary struct {
+ startAt time.Time // set once
+ status executionEventName // current status, updated during execution
+ err string // only populated for failure statuses
+ Duration time.Duration // updated during the task execution
+ exitCode *int // pointer so we can distinguish between 0 and unknown.
+}
+
+func (ts *TaskExecutionSummary) endTime() time.Time {
+ return ts.startAt.Add(ts.Duration)
+}
+
+// MarshalJSON munges the TaskExecutionSummary into a format we want
+// We'll use an anonmyous, private struct for this, so it's not confusingly duplicated
+func (ts *TaskExecutionSummary) MarshalJSON() ([]byte, error) {
+ serializable := struct {
+ Start int64 `json:"startTime"`
+ End int64 `json:"endTime"`
+ Err string `json:"error,omitempty"`
+ ExitCode *int `json:"exitCode"`
+ }{
+ Start: ts.startAt.UnixMilli(),
+ End: ts.endTime().UnixMilli(),
+ Err: ts.err,
+ ExitCode: ts.exitCode,
+ }
+
+ return json.Marshal(&serializable)
+}
+
+// ExitCode access exit code nil means no exit code was received
+func (ts *TaskExecutionSummary) ExitCode() *int {
+ var exitCode int
+ if ts.exitCode == nil {
+ return nil
+ }
+ exitCode = *ts.exitCode
+ return &exitCode
+}
+
+// executionSummary is the state of the entire `turbo run`. Individual task state in `Tasks` field
+type executionSummary struct {
+ // mu guards reads/writes to the `state` field
+ mu sync.Mutex
+ tasks map[string]*TaskExecutionSummary // key is a taskID
+ profileFilename string
+
+ // These get serialized to JSON
+ command string // a synthesized turbo command to produce this invocation
+ repoPath turbopath.RelativeSystemPath // the (possibly empty) path from the turborepo root to where the command was run
+ success int // number of tasks that exited successfully (does not include cache hits)
+ failure int // number of tasks that exited with failure
+ cached int // number of tasks that had a cache hit
+ attempted int // number of tasks that started
+ startedAt time.Time
+ endedAt time.Time
+ exitCode int
+}
+
+// MarshalJSON munges the executionSummary into a format we want
+// We'll use an anonmyous, private struct for this, so it's not confusingly duplicated.
+func (es *executionSummary) MarshalJSON() ([]byte, error) {
+ serializable := struct {
+ Command string `json:"command"`
+ RepoPath string `json:"repoPath"`
+ Success int `json:"success"`
+ Failure int `json:"failed"`
+ Cached int `json:"cached"`
+ Attempted int `json:"attempted"`
+ StartTime int64 `json:"startTime"`
+ EndTime int64 `json:"endTime"`
+ ExitCode int `json:"exitCode"`
+ }{
+ Command: es.command,
+ RepoPath: es.repoPath.ToString(),
+ StartTime: es.startedAt.UnixMilli(),
+ EndTime: es.endedAt.UnixMilli(),
+ Success: es.success,
+ Failure: es.failure,
+ Cached: es.cached,
+ Attempted: es.attempted,
+ ExitCode: es.exitCode,
+ }
+
+ return json.Marshal(&serializable)
+}
+
+// newExecutionSummary creates a executionSummary instance to track events in a `turbo run`.`
+func newExecutionSummary(command string, repoPath turbopath.RelativeSystemPath, start time.Time, tracingProfile string) *executionSummary {
+ if tracingProfile != "" {
+ chrometracing.EnableTracing()
+ }
+
+ return &executionSummary{
+ command: command,
+ repoPath: repoPath,
+ success: 0,
+ failure: 0,
+ cached: 0,
+ attempted: 0,
+ tasks: make(map[string]*TaskExecutionSummary),
+ startedAt: start,
+ profileFilename: tracingProfile,
+ }
+}
+
+// Run starts the Execution of a single task. It returns a function that can
+// be used to update the state of a given taskID with the executionEventName enum
+func (es *executionSummary) run(taskID string) (func(outcome executionEventName, err error, exitCode *int), *TaskExecutionSummary) {
+ start := time.Now()
+ taskExecutionSummary := es.add(&executionEvent{
+ Time: start,
+ Label: taskID,
+ Status: targetInitialized,
+ })
+
+ tracer := chrometracing.Event(taskID)
+
+ // This function can be called with an enum and an optional error to update
+ // the state of a given taskID.
+ tracerFn := func(outcome executionEventName, err error, exitCode *int) {
+ defer tracer.Done()
+ now := time.Now()
+ result := &executionEvent{
+ Time: now,
+ Duration: now.Sub(start),
+ Label: taskID,
+ Status: outcome,
+ // We'll assign this here regardless of whether it is nil, but we'll check for nil
+ // when we assign it to the taskExecutionSummary.
+ exitCode: exitCode,
+ }
+
+ if err != nil {
+ result.Err = err.Error()
+ }
+
+ // Ignore the return value here
+ es.add(result)
+ }
+
+ return tracerFn, taskExecutionSummary
+}
+
+func (es *executionSummary) add(event *executionEvent) *TaskExecutionSummary {
+ es.mu.Lock()
+ defer es.mu.Unlock()
+
+ var taskExecSummary *TaskExecutionSummary
+ if ts, ok := es.tasks[event.Label]; ok {
+ // If we already know about this task, we'll update it with the new event
+ taskExecSummary = ts
+ } else {
+ // If we don't know about it yet, init and add it into the parent struct
+ // (event.Status should always be `targetBuilding` here.)
+ taskExecSummary = &TaskExecutionSummary{startAt: event.Time}
+ es.tasks[event.Label] = taskExecSummary
+ }
+
+ // Update the Status, Duration, and Err fields
+ taskExecSummary.status = event.Status
+ taskExecSummary.err = event.Err
+ taskExecSummary.Duration = event.Duration
+
+ if event.exitCode != nil {
+ taskExecSummary.exitCode = event.exitCode
+ }
+
+ switch {
+ case event.Status == TargetBuilding:
+ es.attempted++
+ case event.Status == TargetBuildFailed:
+ es.failure++
+ case event.Status == TargetCached:
+ es.cached++
+ case event.Status == TargetBuilt:
+ es.success++
+ }
+
+ return es.tasks[event.Label]
+}
+
+// writeChromeTracing writes to a profile name if the `--profile` flag was passed to turbo run
+func writeChrometracing(filename string, terminal cli.Ui) error {
+ outputPath := chrometracing.Path()
+ if outputPath == "" {
+ // tracing wasn't enabled
+ return nil
+ }
+
+ name := fmt.Sprintf("turbo-%s.trace", time.Now().Format(time.RFC3339))
+ if filename != "" {
+ name = filename
+ }
+ if err := chrometracing.Close(); err != nil {
+ terminal.Warn(fmt.Sprintf("Failed to flush tracing data: %v", err))
+ }
+ cwdRaw, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+ root, err := fs.GetCwd(cwdRaw)
+ if err != nil {
+ return err
+ }
+ // chrometracing.Path() is absolute by default, but can still be relative if overriden via $CHROMETRACING_DIR
+ // so we have to account for that before converting to turbopath.AbsoluteSystemPath
+ if err := fs.CopyFile(&fs.LstatCachedFile{Path: fs.ResolveUnknownPath(root, outputPath)}, name); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cli/internal/runsummary/format_execution_summary.go b/cli/internal/runsummary/format_execution_summary.go
new file mode 100644
index 0000000..37092be
--- /dev/null
+++ b/cli/internal/runsummary/format_execution_summary.go
@@ -0,0 +1,70 @@
+package runsummary
+
+import (
+ "os"
+ "time"
+
+ "github.com/fatih/color"
+ internalUI "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+func (rsm *Meta) printExecutionSummary() {
+ maybeFullTurbo := ""
+ summary := rsm.RunSummary
+ ui := rsm.ui
+
+ attempted := summary.ExecutionSummary.attempted
+ successful := summary.ExecutionSummary.cached + summary.ExecutionSummary.success
+ cached := summary.ExecutionSummary.cached
+ // TODO: can we use a method on ExecutionSummary here?
+ duration := time.Since(summary.ExecutionSummary.startedAt).Truncate(time.Millisecond)
+
+ if cached == attempted && attempted > 0 {
+ terminalProgram := os.Getenv("TERM_PROGRAM")
+ // On the macOS Terminal, the rainbow colors show up as a magenta background
+ // with a gray background on a single letter. Instead, we print in bold magenta
+ if terminalProgram == "Apple_Terminal" {
+ fallbackTurboColor := color.New(color.FgHiMagenta, color.Bold).SprintFunc()
+ maybeFullTurbo = fallbackTurboColor(">>> FULL TURBO")
+ } else {
+ maybeFullTurbo = internalUI.Rainbow(">>> FULL TURBO")
+ }
+ }
+
+ if attempted == 0 {
+ ui.Output("") // Clear the line
+ ui.Warn("No tasks were executed as part of this run.")
+ }
+
+ ui.Output("") // Clear the line
+ spacer := " " // 4 chars
+
+ var lines []string
+
+ // The only difference between these two branches is that when there is a run summary
+ // we print the path to that file and we adjust the whitespace in the printed text so it aligns.
+ // We could just always align to account for the summary line, but that would require a whole
+ // bunch of test output assertions to change.
+ if rsm.getPath().FileExists() {
+ lines = []string{
+ util.Sprintf("${BOLD} Tasks:${BOLD_GREEN}%s%v successful${RESET}${GRAY}, %v total${RESET}", spacer, successful, attempted),
+ util.Sprintf("${BOLD} Cached:%s%v cached${RESET}${GRAY}, %v total${RESET}", spacer, cached, attempted),
+ util.Sprintf("${BOLD} Time:%s%v${RESET} %v${RESET}", spacer, duration, maybeFullTurbo),
+ util.Sprintf("${BOLD}Summary:%s%s${RESET}", spacer, rsm.getPath()),
+ }
+ } else {
+ lines = []string{
+ util.Sprintf("${BOLD} Tasks:${BOLD_GREEN}%s%v successful${RESET}${GRAY}, %v total${RESET}", spacer, successful, attempted),
+ util.Sprintf("${BOLD}Cached:%s%v cached${RESET}${GRAY}, %v total${RESET}", spacer, cached, attempted),
+ util.Sprintf("${BOLD} Time:%s%v${RESET} %v${RESET}", spacer, duration, maybeFullTurbo),
+ }
+ }
+
+ // Print the real thing
+ for _, line := range lines {
+ ui.Output(line)
+ }
+
+ ui.Output("")
+}
diff --git a/cli/internal/runsummary/format_json.go b/cli/internal/runsummary/format_json.go
new file mode 100644
index 0000000..76a0a40
--- /dev/null
+++ b/cli/internal/runsummary/format_json.go
@@ -0,0 +1,66 @@
+package runsummary
+
+import (
+ "encoding/json"
+
+ "github.com/pkg/errors"
+ "github.com/segmentio/ksuid"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// FormatJSON returns a json string representing a RunSummary
+func (rsm *Meta) FormatJSON() ([]byte, error) {
+ rsm.normalize() // normalize data
+
+ var bytes []byte
+ var err error
+
+ if rsm.singlePackage {
+ bytes, err = json.MarshalIndent(nonMonorepoRunSummary(*rsm.RunSummary), "", " ")
+ } else {
+ bytes, err = json.MarshalIndent(rsm.RunSummary, "", " ")
+ }
+
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to render JSON")
+ }
+ return bytes, nil
+}
+
+func (rsm *Meta) normalize() {
+ for _, t := range rsm.RunSummary.Tasks {
+ t.EnvVars.Global = rsm.RunSummary.GlobalHashSummary.envVars
+ t.EnvVars.GlobalPassthrough = rsm.RunSummary.GlobalHashSummary.passthroughEnvVars
+ }
+
+ // Remove execution summary for dry runs
+ if rsm.runType == runTypeDryJSON {
+ rsm.RunSummary.ExecutionSummary = nil
+ }
+
+ // For single packages, we don't need the Packages
+ // and each task summary needs some cleaning.
+ if rsm.singlePackage {
+ rsm.RunSummary.Packages = []string{}
+
+ for _, task := range rsm.RunSummary.Tasks {
+ task.cleanForSinglePackage()
+ }
+ }
+}
+
+// nonMonorepoRunSummary is an exact copy of RunSummary, but the JSON tags are structured
+// for rendering a single-package run of turbo. Notably, we want to always omit packages
+// since there is no concept of packages in a single-workspace repo.
+// This struct exists solely for the purpose of serializing to JSON and should not be
+// used anywhere else.
+type nonMonorepoRunSummary struct {
+ ID ksuid.KSUID `json:"id"`
+ Version string `json:"version"`
+ TurboVersion string `json:"turboVersion"`
+ GlobalHashSummary *GlobalHashSummary `json:"globalCacheInputs"`
+ Packages []string `json:"-"`
+ EnvMode util.EnvMode `json:"envMode"`
+ ExecutionSummary *executionSummary `json:"execution,omitempty"`
+ Tasks []*TaskSummary `json:"tasks"`
+}
diff --git a/cli/internal/runsummary/format_text.go b/cli/internal/runsummary/format_text.go
new file mode 100644
index 0000000..28b1638
--- /dev/null
+++ b/cli/internal/runsummary/format_text.go
@@ -0,0 +1,100 @@
+package runsummary
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+// FormatAndPrintText prints a Run Summary to the Terminal UI
+func (rsm Meta) FormatAndPrintText(workspaceInfos workspace.Catalog) error {
+ ui := rsm.ui
+ summary := rsm.RunSummary
+
+ rsm.normalize() // normalize data
+
+ if !rsm.singlePackage {
+ ui.Output("")
+ ui.Info(util.Sprintf("${CYAN}${BOLD}Packages in Scope${RESET}"))
+ p := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ fmt.Fprintln(p, "Name\tPath\t")
+ for _, pkg := range summary.Packages {
+ fmt.Fprintf(p, "%s\t%s\t\n", pkg, workspaceInfos.PackageJSONs[pkg].Dir)
+ }
+ if err := p.Flush(); err != nil {
+ return err
+ }
+ }
+
+ fileCount := 0
+ for range summary.GlobalHashSummary.GlobalFileHashMap {
+ fileCount = fileCount + 1
+ }
+ w1 := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ ui.Output("")
+ ui.Info(util.Sprintf("${CYAN}${BOLD}Global Hash Inputs${RESET}"))
+ fmt.Fprintln(w1, util.Sprintf(" ${GREY}Global Files\t=\t%d${RESET}", fileCount))
+ fmt.Fprintln(w1, util.Sprintf(" ${GREY}External Dependencies Hash\t=\t%s${RESET}", summary.GlobalHashSummary.RootExternalDepsHash))
+ fmt.Fprintln(w1, util.Sprintf(" ${GREY}Global Cache Key\t=\t%s${RESET}", summary.GlobalHashSummary.GlobalCacheKey))
+ if bytes, err := json.Marshal(summary.GlobalHashSummary.Pipeline); err == nil {
+ fmt.Fprintln(w1, util.Sprintf(" ${GREY}Root pipeline\t=\t%s${RESET}", bytes))
+ }
+ if err := w1.Flush(); err != nil {
+ return err
+ }
+
+ ui.Output("")
+ ui.Info(util.Sprintf("${CYAN}${BOLD}Tasks to Run${RESET}"))
+
+ for _, task := range summary.Tasks {
+ taskName := task.TaskID
+
+ if rsm.singlePackage {
+ taskName = task.Task
+ }
+
+ ui.Info(util.Sprintf("${BOLD}%s${RESET}", taskName))
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Task\t=\t%s\t${RESET}", task.Task))
+
+ if !rsm.singlePackage {
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Package\t=\t%s\t${RESET}", task.Package))
+ }
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Hash\t=\t%s\t${RESET}", task.Hash))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Cached (Local)\t=\t%s\t${RESET}", strconv.FormatBool(task.CacheSummary.Local)))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Cached (Remote)\t=\t%s\t${RESET}", strconv.FormatBool(task.CacheSummary.Remote)))
+
+ if !rsm.singlePackage {
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Directory\t=\t%s\t${RESET}", task.Dir))
+ }
+
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Command\t=\t%s\t${RESET}", task.Command))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Outputs\t=\t%s\t${RESET}", strings.Join(task.Outputs, ", ")))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Log File\t=\t%s\t${RESET}", task.LogFile))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependencies\t=\t%s\t${RESET}", strings.Join(task.Dependencies, ", ")))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependendents\t=\t%s\t${RESET}", strings.Join(task.Dependents, ", ")))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Inputs Files Considered\t=\t%d\t${RESET}", len(task.ExpandedInputs)))
+
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Configured Environment Variables\t=\t%s\t${RESET}", strings.Join(task.EnvVars.Configured, ", ")))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Inferred Environment Variables\t=\t%s\t${RESET}", strings.Join(task.EnvVars.Inferred, ", ")))
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Global Environment Variables\t=\t%s\t${RESET}", strings.Join(task.EnvVars.Global, ", ")))
+
+ bytes, err := json.Marshal(task.ResolvedTaskDefinition)
+ // If there's an error, we can silently ignore it, we don't need to block the entire print.
+ if err == nil {
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}ResolvedTaskDefinition\t=\t%s\t${RESET}", string(bytes)))
+ }
+
+ fmt.Fprintln(w, util.Sprintf(" ${GREY}Framework\t=\t%s\t${RESET}", task.Framework))
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/cli/internal/runsummary/globalhash_summary.go b/cli/internal/runsummary/globalhash_summary.go
new file mode 100644
index 0000000..e24976d5
--- /dev/null
+++ b/cli/internal/runsummary/globalhash_summary.go
@@ -0,0 +1,38 @@
+package runsummary
+
+import (
+ "github.com/vercel/turbo/cli/internal/env"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// GlobalHashSummary contains the pieces of data that impacted the global hash (then then impacted the task hash)
+type GlobalHashSummary struct {
+ GlobalCacheKey string `json:"rootKey"`
+ GlobalFileHashMap map[turbopath.AnchoredUnixPath]string `json:"files"`
+ RootExternalDepsHash string `json:"hashOfExternalDependencies"`
+ Pipeline fs.PristinePipeline `json:"rootPipeline"`
+
+ // This is a private field because and not in JSON, because we'll add it to each task
+ envVars env.EnvironmentVariablePairs
+ passthroughEnvVars env.EnvironmentVariablePairs
+}
+
+// NewGlobalHashSummary creates a GlobalHashSummary struct from a set of fields.
+func NewGlobalHashSummary(
+ fileHashMap map[turbopath.AnchoredUnixPath]string,
+ rootExternalDepsHash string,
+ envVars env.DetailedMap,
+ passthroughEnvVars env.EnvironmentVariableMap,
+ globalCacheKey string,
+ pipeline fs.PristinePipeline,
+) *GlobalHashSummary {
+ return &GlobalHashSummary{
+ envVars: envVars.All.ToSecretHashable(),
+ passthroughEnvVars: passthroughEnvVars.ToSecretHashable(),
+ GlobalFileHashMap: fileHashMap,
+ RootExternalDepsHash: rootExternalDepsHash,
+ GlobalCacheKey: globalCacheKey,
+ Pipeline: pipeline,
+ }
+}
diff --git a/cli/internal/runsummary/run_summary.go b/cli/internal/runsummary/run_summary.go
new file mode 100644
index 0000000..a297114
--- /dev/null
+++ b/cli/internal/runsummary/run_summary.go
@@ -0,0 +1,320 @@
+// Package runsummary implements structs that report on a `turbo run` and `turbo run --dry`
+package runsummary
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/mitchellh/cli"
+ "github.com/segmentio/ksuid"
+ "github.com/vercel/turbo/cli/internal/client"
+ "github.com/vercel/turbo/cli/internal/spinner"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+// MissingTaskLabel is printed when a package is missing a definition for a task that is supposed to run
+// E.g. if `turbo run build --dry` is run, and package-a doesn't define a `build` script in package.json,
+// the RunSummary will print this, instead of the script (e.g. `next build`).
+const MissingTaskLabel = "<NONEXISTENT>"
+
+// MissingFrameworkLabel is a string to identify when a workspace doesn't detect a framework
+const MissingFrameworkLabel = "<NO FRAMEWORK DETECTED>"
+
+const runSummarySchemaVersion = "0"
+const runsEndpoint = "/v0/spaces/%s/runs"
+const runsPatchEndpoint = "/v0/spaces/%s/runs/%s"
+const tasksEndpoint = "/v0/spaces/%s/runs/%s/tasks"
+
+type runType int
+
+const (
+ runTypeReal runType = iota
+ runTypeDryText
+ runTypeDryJSON
+)
+
+// Meta is a wrapper around the serializable RunSummary, with some extra information
+// about the Run and references to other things that we need.
+type Meta struct {
+ RunSummary *RunSummary
+ ui cli.Ui
+ repoRoot turbopath.AbsoluteSystemPath // used to write run summary
+ repoPath turbopath.RelativeSystemPath
+ singlePackage bool
+ shouldSave bool
+ apiClient *client.APIClient
+ spaceID string
+ runType runType
+ synthesizedCommand string
+}
+
+// RunSummary contains a summary of what happens in the `turbo run` command and why.
+type RunSummary struct {
+ ID ksuid.KSUID `json:"id"`
+ Version string `json:"version"`
+ TurboVersion string `json:"turboVersion"`
+ GlobalHashSummary *GlobalHashSummary `json:"globalCacheInputs"`
+ Packages []string `json:"packages"`
+ EnvMode util.EnvMode `json:"envMode"`
+ ExecutionSummary *executionSummary `json:"execution,omitempty"`
+ Tasks []*TaskSummary `json:"tasks"`
+}
+
+// NewRunSummary returns a RunSummary instance
+func NewRunSummary(
+ startAt time.Time,
+ ui cli.Ui,
+ repoRoot turbopath.AbsoluteSystemPath,
+ repoPath turbopath.RelativeSystemPath,
+ turboVersion string,
+ apiClient *client.APIClient,
+ runOpts util.RunOpts,
+ packages []string,
+ globalEnvMode util.EnvMode,
+ globalHashSummary *GlobalHashSummary,
+ synthesizedCommand string,
+) Meta {
+ singlePackage := runOpts.SinglePackage
+ profile := runOpts.Profile
+ shouldSave := runOpts.Summarize
+ spaceID := runOpts.ExperimentalSpaceID
+
+ runType := runTypeReal
+ if runOpts.DryRun {
+ runType = runTypeDryText
+ if runOpts.DryRunJSON {
+ runType = runTypeDryJSON
+ }
+ }
+
+ executionSummary := newExecutionSummary(synthesizedCommand, repoPath, startAt, profile)
+
+ return Meta{
+ RunSummary: &RunSummary{
+ ID: ksuid.New(),
+ Version: runSummarySchemaVersion,
+ ExecutionSummary: executionSummary,
+ TurboVersion: turboVersion,
+ Packages: packages,
+ EnvMode: globalEnvMode,
+ Tasks: []*TaskSummary{},
+ GlobalHashSummary: globalHashSummary,
+ },
+ ui: ui,
+ runType: runType,
+ repoRoot: repoRoot,
+ singlePackage: singlePackage,
+ shouldSave: shouldSave,
+ apiClient: apiClient,
+ spaceID: spaceID,
+ synthesizedCommand: synthesizedCommand,
+ }
+}
+
+// getPath returns a path to where the runSummary is written.
+// The returned path will always be relative to the dir passsed in.
+// We don't do a lot of validation, so `../../` paths are allowed.
+func (rsm *Meta) getPath() turbopath.AbsoluteSystemPath {
+ filename := fmt.Sprintf("%s.json", rsm.RunSummary.ID)
+ return rsm.repoRoot.UntypedJoin(filepath.Join(".turbo", "runs"), filename)
+}
+
+// Close wraps up the RunSummary at the end of a `turbo run`.
+func (rsm *Meta) Close(ctx context.Context, exitCode int, workspaceInfos workspace.Catalog) error {
+ if rsm.runType == runTypeDryJSON || rsm.runType == runTypeDryText {
+ return rsm.closeDryRun(workspaceInfos)
+ }
+
+ rsm.RunSummary.ExecutionSummary.exitCode = exitCode
+ rsm.RunSummary.ExecutionSummary.endedAt = time.Now()
+
+ summary := rsm.RunSummary
+ if err := writeChrometracing(summary.ExecutionSummary.profileFilename, rsm.ui); err != nil {
+ rsm.ui.Error(fmt.Sprintf("Error writing tracing data: %v", err))
+ }
+
+ // TODO: printing summary to local, writing to disk, and sending to API
+ // are all the same thng, we should use a strategy similar to cache save/upload to
+ // do this in parallel.
+
+ // Otherwise, attempt to save the summary
+ // Warn on the error, but we don't need to throw an error
+ if rsm.shouldSave {
+ if err := rsm.save(); err != nil {
+ rsm.ui.Warn(fmt.Sprintf("Error writing run summary: %v", err))
+ }
+ }
+
+ rsm.printExecutionSummary()
+
+ // If we're not supposed to save or if there's no spaceID
+ if !rsm.shouldSave || rsm.spaceID == "" {
+ return nil
+ }
+
+ if !rsm.apiClient.IsLinked() {
+ rsm.ui.Warn("Failed to post to space because repo is not linked to a Space. Run `turbo link` first.")
+ return nil
+ }
+
+ // Wrap the record function so we can hoist out url/errors but keep
+ // the function signature/type the spinner.WaitFor expects.
+ var url string
+ var errs []error
+ record := func() {
+ url, errs = rsm.record()
+ }
+
+ func() {
+ _ = spinner.WaitFor(ctx, record, rsm.ui, "...sending run summary...", 1000*time.Millisecond)
+ }()
+
+ // After the spinner is done, print any errors and the url
+ if len(errs) > 0 {
+ rsm.ui.Warn("Errors recording run to Spaces")
+ for _, err := range errs {
+ rsm.ui.Warn(fmt.Sprintf("%v", err))
+ }
+ }
+
+ if url != "" {
+ rsm.ui.Output(fmt.Sprintf("Run: %s", url))
+ rsm.ui.Output("")
+ }
+
+ return nil
+}
+
+// closeDryRun wraps up the Run Summary at the end of `turbo run --dry`.
+// Ideally this should be inlined into Close(), but RunSummary doesn't currently
+// have context about whether a run was real or dry.
+func (rsm *Meta) closeDryRun(workspaceInfos workspace.Catalog) error {
+ // Render the dry run as json
+ if rsm.runType == runTypeDryJSON {
+ rendered, err := rsm.FormatJSON()
+ if err != nil {
+ return err
+ }
+
+ rsm.ui.Output(string(rendered))
+ return nil
+ }
+
+ return rsm.FormatAndPrintText(workspaceInfos)
+}
+
+// TrackTask makes it possible for the consumer to send information about the execution of a task.
+func (summary *RunSummary) TrackTask(taskID string) (func(outcome executionEventName, err error, exitCode *int), *TaskExecutionSummary) {
+ return summary.ExecutionSummary.run(taskID)
+}
+
+// Save saves the run summary to a file
+func (rsm *Meta) save() error {
+ json, err := rsm.FormatJSON()
+ if err != nil {
+ return err
+ }
+
+ // summaryPath will always be relative to the dir passsed in.
+ // We don't do a lot of validation, so `../../` paths are allowed
+ summaryPath := rsm.getPath()
+
+ if err := summaryPath.EnsureDir(); err != nil {
+ return err
+ }
+
+ return summaryPath.WriteFile(json, 0644)
+}
+
+// record sends the summary to the API
+func (rsm *Meta) record() (string, []error) {
+ errs := []error{}
+
+ // Right now we'll send the POST to create the Run and the subsequent task payloads
+ // after all execution is done, but in the future, this first POST request
+ // can happen when the Run actually starts, so we can send updates to the associated Space
+ // as tasks complete.
+ createRunEndpoint := fmt.Sprintf(runsEndpoint, rsm.spaceID)
+ response := &spacesRunResponse{}
+
+ payload := rsm.newSpacesRunCreatePayload()
+ if startPayload, err := json.Marshal(payload); err == nil {
+ if resp, err := rsm.apiClient.JSONPost(createRunEndpoint, startPayload); err != nil {
+ errs = append(errs, fmt.Errorf("POST %s: %w", createRunEndpoint, err))
+ } else {
+ if err := json.Unmarshal(resp, response); err != nil {
+ errs = append(errs, fmt.Errorf("Error unmarshaling response: %w", err))
+ }
+ }
+ }
+
+ if response.ID != "" {
+ if taskErrs := rsm.postTaskSummaries(response.ID); len(taskErrs) > 0 {
+ errs = append(errs, taskErrs...)
+ }
+
+ if donePayload, err := json.Marshal(newSpacesDonePayload(rsm.RunSummary)); err == nil {
+ patchURL := fmt.Sprintf(runsPatchEndpoint, rsm.spaceID, response.ID)
+ if _, err := rsm.apiClient.JSONPatch(patchURL, donePayload); err != nil {
+ errs = append(errs, fmt.Errorf("PATCH %s: %w", patchURL, err))
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return response.URL, errs
+ }
+
+ return response.URL, nil
+}
+
+func (rsm *Meta) postTaskSummaries(runID string) []error {
+ errs := []error{}
+ // We make at most 8 requests at a time.
+ maxParallelRequests := 8
+ taskSummaries := rsm.RunSummary.Tasks
+ taskCount := len(taskSummaries)
+ taskURL := fmt.Sprintf(tasksEndpoint, rsm.spaceID, runID)
+
+ parallelRequestCount := maxParallelRequests
+ if taskCount < maxParallelRequests {
+ parallelRequestCount = taskCount
+ }
+
+ queue := make(chan int, taskCount)
+
+ wg := &sync.WaitGroup{}
+ for i := 0; i < parallelRequestCount; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for index := range queue {
+ task := taskSummaries[index]
+ payload := newSpacesTaskPayload(task)
+ if taskPayload, err := json.Marshal(payload); err == nil {
+ if _, err := rsm.apiClient.JSONPost(taskURL, taskPayload); err != nil {
+ errs = append(errs, fmt.Errorf("Error sending %s summary to space: %w", task.TaskID, err))
+ }
+ }
+ }
+ }()
+ }
+
+ for index := range taskSummaries {
+ queue <- index
+ }
+ close(queue)
+ wg.Wait()
+
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return nil
+}
diff --git a/cli/internal/runsummary/spaces.go b/cli/internal/runsummary/spaces.go
new file mode 100644
index 0000000..bf19941
--- /dev/null
+++ b/cli/internal/runsummary/spaces.go
@@ -0,0 +1,96 @@
+package runsummary
+
+import (
+ "github.com/vercel/turbo/cli/internal/ci"
+)
+
+// spacesRunResponse deserialized the response from POST Run endpoint
+type spacesRunResponse struct {
+ ID string
+ URL string
+}
+
+type spacesRunPayload struct {
+ StartTime int64 `json:"startTime,omitempty"` // when the run was started
+ EndTime int64 `json:"endTime,omitempty"` // when the run ended. we should never submit start and end at the same time.
+ Status string `json:"status,omitempty"` // Status is "running" or "completed"
+ Type string `json:"type,omitempty"` // hardcoded to "TURBO"
+ ExitCode int `json:"exitCode,omitempty"` // exit code for the full run
+ Command string `json:"command,omitempty"` // the thing that kicked off the turbo run
+ RepositoryPath string `json:"repositoryPath,omitempty"` // where the command was invoked from
+ Context string `json:"context,omitempty"` // the host on which this Run was executed (e.g. Github Action, Vercel, etc)
+
+ // TODO: we need to add these in
+ // originationUser string
+ // gitBranch string
+ // gitSha string
+}
+
+// spacesCacheStatus is the same as TaskCacheSummary so we can convert
+// spacesCacheStatus(cacheSummary), but change the json tags, to omit local and remote fields
+type spacesCacheStatus struct {
+ // omitted fields, but here so we can convert from TaskCacheSummary easily
+ Local bool `json:"-"`
+ Remote bool `json:"-"`
+ Status string `json:"status"` // should always be there
+ Source string `json:"source,omitempty"`
+ TimeSaved int `json:"timeSaved"`
+}
+
+type spacesTask struct {
+ Key string `json:"key,omitempty"`
+ Name string `json:"name,omitempty"`
+ Workspace string `json:"workspace,omitempty"`
+ Hash string `json:"hash,omitempty"`
+ StartTime int64 `json:"startTime,omitempty"`
+ EndTime int64 `json:"endTime,omitempty"`
+ Cache spacesCacheStatus `json:"cache,omitempty"`
+ ExitCode int `json:"exitCode,omitempty"`
+ Dependencies []string `json:"dependencies,omitempty"`
+ Dependents []string `json:"dependents,omitempty"`
+ Logs string `json:"log"`
+}
+
+func (rsm *Meta) newSpacesRunCreatePayload() *spacesRunPayload {
+ startTime := rsm.RunSummary.ExecutionSummary.startedAt.UnixMilli()
+ context := "LOCAL"
+ if name := ci.Constant(); name != "" {
+ context = name
+ }
+ return &spacesRunPayload{
+ StartTime: startTime,
+ Status: "running",
+ Command: rsm.synthesizedCommand,
+ RepositoryPath: rsm.repoPath.ToString(),
+ Type: "TURBO",
+ Context: context,
+ }
+}
+
+func newSpacesDonePayload(runsummary *RunSummary) *spacesRunPayload {
+ endTime := runsummary.ExecutionSummary.endedAt.UnixMilli()
+ return &spacesRunPayload{
+ Status: "completed",
+ EndTime: endTime,
+ ExitCode: runsummary.ExecutionSummary.exitCode,
+ }
+}
+
+func newSpacesTaskPayload(taskSummary *TaskSummary) *spacesTask {
+ startTime := taskSummary.Execution.startAt.UnixMilli()
+ endTime := taskSummary.Execution.endTime().UnixMilli()
+
+ return &spacesTask{
+ Key: taskSummary.TaskID,
+ Name: taskSummary.Task,
+ Workspace: taskSummary.Package,
+ Hash: taskSummary.Hash,
+ StartTime: startTime,
+ EndTime: endTime,
+ Cache: spacesCacheStatus(taskSummary.CacheSummary), // wrapped so we can remove fields
+ ExitCode: *taskSummary.Execution.exitCode,
+ Dependencies: taskSummary.Dependencies,
+ Dependents: taskSummary.Dependents,
+ Logs: string(taskSummary.GetLogs()),
+ }
+}
diff --git a/cli/internal/runsummary/task_summary.go b/cli/internal/runsummary/task_summary.go
new file mode 100644
index 0000000..fb0cb30
--- /dev/null
+++ b/cli/internal/runsummary/task_summary.go
@@ -0,0 +1,117 @@
+package runsummary
+
+import (
+ "os"
+
+ "github.com/vercel/turbo/cli/internal/cache"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// TaskCacheSummary is an extended version of cache.ItemStatus
+// that includes TimeSaved and some better data.
+type TaskCacheSummary struct {
+ Local bool `json:"local"` // Deprecated, but keeping around for --dry=json
+ Remote bool `json:"remote"` // Deprecated, but keeping around for --dry=json
+ Status string `json:"status"` // should always be there
+ Source string `json:"source,omitempty"` // can be empty on status:miss
+ TimeSaved int `json:"timeSaved"` // always include, but can be 0
+}
+
+// NewTaskCacheSummary decorates a cache.ItemStatus into a TaskCacheSummary
+// Importantly, it adds the derived keys of `source` and `status` based on
+// the local/remote booleans. It would be nice if these were just included
+// from upstream, but that is a more invasive change.
+func NewTaskCacheSummary(itemStatus cache.ItemStatus, timeSaved *int) TaskCacheSummary {
+ status := cache.CacheEventMiss
+ if itemStatus.Local || itemStatus.Remote {
+ status = cache.CacheEventHit
+ }
+
+ var source string
+ if itemStatus.Local {
+ source = cache.CacheSourceFS
+ } else if itemStatus.Remote {
+ source = cache.CacheSourceRemote
+ }
+
+ cs := TaskCacheSummary{
+ // copy these over
+ Local: itemStatus.Local,
+ Remote: itemStatus.Remote,
+ Status: status,
+ Source: source,
+ }
+ // add in a dereferences timeSaved, should be 0 if nil
+ if timeSaved != nil {
+ cs.TimeSaved = *timeSaved
+ }
+ return cs
+}
+
+// TaskSummary contains information about the task that was about to run
+// TODO(mehulkar): `Outputs` and `ExcludedOutputs` are slightly redundant
+// as the information is also available in ResolvedTaskDefinition. We could remove them
+// and favor a version of Outputs that is the fully expanded list of files.
+type TaskSummary struct {
+ TaskID string `json:"taskId,omitempty"`
+ Task string `json:"task"`
+ Package string `json:"package,omitempty"`
+ Hash string `json:"hash"`
+ ExpandedInputs map[turbopath.AnchoredUnixPath]string `json:"inputs"`
+ ExternalDepsHash string `json:"hashOfExternalDependencies"`
+ CacheSummary TaskCacheSummary `json:"cache"`
+ Command string `json:"command"`
+ CommandArguments []string `json:"cliArguments"`
+ Outputs []string `json:"outputs"`
+ ExcludedOutputs []string `json:"excludedOutputs"`
+ LogFile string `json:"logFile"`
+ Dir string `json:"directory,omitempty"`
+ Dependencies []string `json:"dependencies"`
+ Dependents []string `json:"dependents"`
+ ResolvedTaskDefinition *fs.TaskDefinition `json:"resolvedTaskDefinition"`
+ ExpandedOutputs []turbopath.AnchoredSystemPath `json:"expandedOutputs"`
+ Framework string `json:"framework"`
+ EnvMode util.EnvMode `json:"envMode"`
+ EnvVars TaskEnvVarSummary `json:"environmentVariables"`
+ Execution *TaskExecutionSummary `json:"execution,omitempty"` // omit when it's not set
+}
+
+// GetLogs reads the Logfile and returns the data
+func (ts *TaskSummary) GetLogs() []byte {
+ bytes, err := os.ReadFile(ts.LogFile)
+ if err != nil {
+ return []byte{}
+ }
+ return bytes
+}
+
+// TaskEnvVarSummary contains the environment variables that impacted a task's hash
+type TaskEnvVarSummary struct {
+ Configured []string `json:"configured"`
+ Inferred []string `json:"inferred"`
+ Global []string `json:"global"`
+ Passthrough []string `json:"passthrough"`
+ GlobalPassthrough []string `json:"globalPassthrough"`
+}
+
+// cleanForSinglePackage converts a TaskSummary to remove references to workspaces
+func (ts *TaskSummary) cleanForSinglePackage() {
+ dependencies := make([]string, len(ts.Dependencies))
+ for i, dependency := range ts.Dependencies {
+ dependencies[i] = util.StripPackageName(dependency)
+ }
+ dependents := make([]string, len(ts.Dependents))
+ for i, dependent := range ts.Dependents {
+ dependents[i] = util.StripPackageName(dependent)
+ }
+ task := util.StripPackageName(ts.TaskID)
+
+ ts.TaskID = task
+ ts.Task = task
+ ts.Dependencies = dependencies
+ ts.Dependents = dependents
+ ts.Dir = ""
+ ts.Package = ""
+}
diff --git a/cli/internal/scm/git_go.go b/cli/internal/scm/git_go.go
new file mode 100644
index 0000000..0dac2bf
--- /dev/null
+++ b/cli/internal/scm/git_go.go
@@ -0,0 +1,111 @@
+//go:build go || !rust
+// +build go !rust
+
+// Package scm abstracts operations on various tools like git
+// Currently, only git is supported.
+//
+// Adapted from https://github.com/thought-machine/please/tree/master/src/scm
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package scm
+
+import (
+ "fmt"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// git implements operations on a git repository.
+type git struct {
+ repoRoot turbopath.AbsoluteSystemPath
+}
+
+// ChangedFiles returns a list of modified files since the given commit, optionally including untracked files.
+func (g *git) ChangedFiles(fromCommit string, toCommit string, relativeTo string) ([]string, error) {
+ if relativeTo == "" {
+ relativeTo = g.repoRoot.ToString()
+ }
+ relSuffix := []string{"--", relativeTo}
+ command := []string{"diff", "--name-only", toCommit}
+
+ out, err := exec.Command("git", append(command, relSuffix...)...).CombinedOutput()
+ if err != nil {
+ return nil, errors.Wrapf(err, "finding changes relative to %v", relativeTo)
+ }
+ files := strings.Split(string(out), "\n")
+
+ if fromCommit != "" {
+ // Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just
+ // the changes that have occurred on the current branch.
+ command = []string{"diff", "--name-only", fromCommit + "..." + toCommit}
+ out, err = exec.Command("git", append(command, relSuffix...)...).CombinedOutput()
+ if err != nil {
+ // Check if we can provide a better error message for non-existent commits.
+ // If we error on the check or can't find it, fall back to whatever error git
+ // reported.
+ if exists, err := commitExists(fromCommit); err == nil && !exists {
+ return nil, fmt.Errorf("commit %v does not exist", fromCommit)
+ }
+ return nil, errors.Wrapf(err, "git comparing with %v", fromCommit)
+ }
+ committedChanges := strings.Split(string(out), "\n")
+ files = append(files, committedChanges...)
+ }
+ command = []string{"ls-files", "--other", "--exclude-standard"}
+ out, err = exec.Command("git", append(command, relSuffix...)...).CombinedOutput()
+ if err != nil {
+ return nil, errors.Wrap(err, "finding untracked files")
+ }
+ untracked := strings.Split(string(out), "\n")
+ files = append(files, untracked...)
+ // git will report changed files relative to the worktree: re-relativize to relativeTo
+ normalized := make([]string, 0)
+ for _, f := range files {
+ if f == "" {
+ continue
+ }
+ normalizedFile, err := g.fixGitRelativePath(strings.TrimSpace(f), relativeTo)
+ if err != nil {
+ return nil, err
+ }
+ normalized = append(normalized, normalizedFile)
+ }
+ return normalized, nil
+}
+
+func (g *git) PreviousContent(fromCommit string, filePath string) ([]byte, error) {
+ if fromCommit == "" {
+ return nil, fmt.Errorf("Need commit sha to inspect file contents")
+ }
+
+ out, err := exec.Command("git", "show", fmt.Sprintf("%s:%s", fromCommit, filePath)).CombinedOutput()
+ if err != nil {
+ return nil, errors.Wrapf(err, "unable to get contents of %s", filePath)
+ }
+
+ return out, nil
+}
+
+func commitExists(commit string) (bool, error) {
+ err := exec.Command("git", "cat-file", "-t", commit).Run()
+ if err != nil {
+ exitErr := &exec.ExitError{}
+ if errors.As(err, &exitErr) && exitErr.ExitCode() == 128 {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
+func (g *git) fixGitRelativePath(worktreePath, relativeTo string) (string, error) {
+ p, err := filepath.Rel(relativeTo, filepath.Join(g.repoRoot, worktreePath))
+ if err != nil {
+ return "", errors.Wrapf(err, "unable to determine relative path for %s and %s", g.repoRoot, relativeTo)
+ }
+ return p, nil
+}
diff --git a/cli/internal/scm/git_rust.go b/cli/internal/scm/git_rust.go
new file mode 100644
index 0000000..4b4cd2d
--- /dev/null
+++ b/cli/internal/scm/git_rust.go
@@ -0,0 +1,34 @@
+// Package scm abstracts operations on various tools like git
+// Currently, only git is supported.
+//
+// Adapted from https://github.com/thought-machine/please/tree/master/src/scm
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+//go:build rust
+// +build rust
+
+package scm
+
+import (
+ "fmt"
+ "github.com/vercel/turbo/cli/internal/ffi"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// git implements operations on a git repository.
+type git struct {
+ repoRoot turbopath.AbsoluteSystemPath
+}
+
+// ChangedFiles returns a list of modified files since the given commit, optionally including untracked files.
+func (g *git) ChangedFiles(fromCommit string, toCommit string, monorepoRoot string) ([]string, error) {
+ return ffi.ChangedFiles(g.repoRoot.ToString(), monorepoRoot, fromCommit, toCommit)
+}
+
+func (g *git) PreviousContent(fromCommit string, filePath string) ([]byte, error) {
+ if fromCommit == "" {
+ return nil, fmt.Errorf("Need commit sha to inspect file contents")
+ }
+
+ return ffi.PreviousContent(g.repoRoot.ToString(), fromCommit, filePath)
+}
diff --git a/cli/internal/scm/scm.go b/cli/internal/scm/scm.go
new file mode 100644
index 0000000..e7f17c8
--- /dev/null
+++ b/cli/internal/scm/scm.go
@@ -0,0 +1,53 @@
+// Package scm abstracts operations on various tools like git
+// Currently, only git is supported.
+//
+// Adapted from https://github.com/thought-machine/please/tree/master/src/scm
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package scm
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+var ErrFallback = errors.New("cannot find a .git folder. Falling back to manual file hashing (which may be slower). If you are running this build in a pruned directory, you can ignore this message. Otherwise, please initialize a git repository in the root of your monorepo")
+
+// An SCM represents an SCM implementation that we can ask for various things.
+type SCM interface {
+ // ChangedFiles returns a list of modified files since the given commit, including untracked files
+ ChangedFiles(fromCommit string, toCommit string, relativeTo string) ([]string, error)
+ // PreviousContent Returns the content of the file at fromCommit
+ PreviousContent(fromCommit string, filePath string) ([]byte, error)
+}
+
+// newGitSCM returns a new SCM instance for this repo root.
+// It returns nil if there is no known implementation there.
+func newGitSCM(repoRoot turbopath.AbsoluteSystemPath) SCM {
+ if repoRoot.UntypedJoin(".git").Exists() {
+ return &git{repoRoot: repoRoot}
+ }
+ return nil
+}
+
+// newFallback returns a new SCM instance for this repo root.
+// If there is no known implementation it returns a stub.
+func newFallback(repoRoot turbopath.AbsoluteSystemPath) (SCM, error) {
+ if scm := newGitSCM(repoRoot); scm != nil {
+ return scm, nil
+ }
+
+ return &stub{}, ErrFallback
+}
+
+// FromInRepo produces an SCM instance, given a path within a
+// repository. It does not need to be a git repository, and if
+// it is not, the given path is assumed to be the root.
+func FromInRepo(repoRoot turbopath.AbsoluteSystemPath) (SCM, error) {
+ dotGitDir, err := repoRoot.Findup(".git")
+ if err != nil {
+ return nil, err
+ }
+ return newFallback(dotGitDir.Dir())
+}
diff --git a/cli/internal/scm/stub.go b/cli/internal/scm/stub.go
new file mode 100644
index 0000000..2e356c5
--- /dev/null
+++ b/cli/internal/scm/stub.go
@@ -0,0 +1,14 @@
+// Adapted from https://github.com/thought-machine/please/tree/master/src/scm
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package scm
+
+type stub struct{}
+
+func (s *stub) ChangedFiles(fromCommit string, toCommit string, relativeTo string) ([]string, error) {
+ return nil, nil
+}
+
+func (s *stub) PreviousContent(fromCommit string, filePath string) ([]byte, error) {
+ return nil, nil
+}
diff --git a/cli/internal/scope/filter/filter.go b/cli/internal/scope/filter/filter.go
new file mode 100644
index 0000000..60aaf1d
--- /dev/null
+++ b/cli/internal/scope/filter/filter.go
@@ -0,0 +1,421 @@
+package filter
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+type SelectedPackages struct {
+ pkgs util.Set
+ unusedFilters []*TargetSelector
+}
+
+// PackagesChangedInRange is the signature of a function to provide the set of
+// packages that have changed in a particular range of git refs.
+type PackagesChangedInRange = func(fromRef string, toRef string) (util.Set, error)
+
+// PackageInference holds the information we have inferred from the working-directory
+// (really --infer-filter-root flag) about which packages are of interest.
+type PackageInference struct {
+ // PackageName, if set, means that we have determined that filters without a package-specifier
+ // should get this package name
+ PackageName string
+ // DirectoryRoot is used to infer a "parentDir" for the filter in the event that we haven't
+ // identified a specific package. If the filter already contains a parentDir, this acts as
+ // a prefix. If the filter does not contain a parentDir, we consider this to be a glob for
+ // all subdirectories
+ DirectoryRoot turbopath.RelativeSystemPath
+}
+
+type Resolver struct {
+ Graph *dag.AcyclicGraph
+ WorkspaceInfos workspace.Catalog
+ Cwd turbopath.AbsoluteSystemPath
+ Inference *PackageInference
+ PackagesChangedInRange PackagesChangedInRange
+}
+
+// GetPackagesFromPatterns compiles filter patterns and applies them, returning
+// the selected packages
+func (r *Resolver) GetPackagesFromPatterns(patterns []string) (util.Set, error) {
+ selectors := []*TargetSelector{}
+ for _, pattern := range patterns {
+ selector, err := ParseTargetSelector(pattern)
+ if err != nil {
+ return nil, err
+ }
+ selectors = append(selectors, selector)
+ }
+ selected, err := r.getFilteredPackages(selectors)
+ if err != nil {
+ return nil, err
+ }
+ return selected.pkgs, nil
+}
+
+func (pi *PackageInference) apply(selector *TargetSelector) error {
+ if selector.namePattern != "" {
+ // The selector references a package name, don't apply inference
+ return nil
+ }
+ if pi.PackageName != "" {
+ selector.namePattern = pi.PackageName
+ }
+ if selector.parentDir != "" {
+ parentDir := pi.DirectoryRoot.Join(selector.parentDir)
+ selector.parentDir = parentDir
+ } else if pi.PackageName == "" {
+ // The user didn't set a parent directory and we didn't find a single package,
+ // so use the directory we inferred and select all subdirectories
+ selector.parentDir = pi.DirectoryRoot.Join("**")
+ }
+ return nil
+}
+
+func (r *Resolver) applyInference(selectors []*TargetSelector) ([]*TargetSelector, error) {
+ if r.Inference == nil {
+ return selectors, nil
+ }
+ // If there are existing patterns, use inference on those. If there are no
+ // patterns, but there is a directory supplied, synthesize a selector
+ if len(selectors) == 0 {
+ selectors = append(selectors, &TargetSelector{})
+ }
+ for _, selector := range selectors {
+ if err := r.Inference.apply(selector); err != nil {
+ return nil, err
+ }
+ }
+ return selectors, nil
+}
+
+func (r *Resolver) getFilteredPackages(selectors []*TargetSelector) (*SelectedPackages, error) {
+ selectors, err := r.applyInference(selectors)
+ if err != nil {
+ return nil, err
+ }
+ prodPackageSelectors := []*TargetSelector{}
+ allPackageSelectors := []*TargetSelector{}
+ for _, selector := range selectors {
+ if selector.followProdDepsOnly {
+ prodPackageSelectors = append(prodPackageSelectors, selector)
+ } else {
+ allPackageSelectors = append(allPackageSelectors, selector)
+ }
+ }
+ if len(allPackageSelectors) > 0 || len(prodPackageSelectors) > 0 {
+ if len(allPackageSelectors) > 0 {
+ selected, err := r.filterGraph(allPackageSelectors)
+ if err != nil {
+ return nil, err
+ }
+ return selected, nil
+ }
+ }
+ return &SelectedPackages{
+ pkgs: make(util.Set),
+ }, nil
+}
+
+func (r *Resolver) filterGraph(selectors []*TargetSelector) (*SelectedPackages, error) {
+ includeSelectors := []*TargetSelector{}
+ excludeSelectors := []*TargetSelector{}
+ for _, selector := range selectors {
+ if selector.exclude {
+ excludeSelectors = append(excludeSelectors, selector)
+ } else {
+ includeSelectors = append(includeSelectors, selector)
+ }
+ }
+ var include *SelectedPackages
+ if len(includeSelectors) > 0 {
+ found, err := r.filterGraphWithSelectors(includeSelectors)
+ if err != nil {
+ return nil, err
+ }
+ include = found
+ } else {
+ vertexSet := make(util.Set)
+ for _, v := range r.Graph.Vertices() {
+ vertexSet.Add(v)
+ }
+ include = &SelectedPackages{
+ pkgs: vertexSet,
+ }
+ }
+ exclude, err := r.filterGraphWithSelectors(excludeSelectors)
+ if err != nil {
+ return nil, err
+ }
+ return &SelectedPackages{
+ pkgs: include.pkgs.Difference(exclude.pkgs),
+ unusedFilters: append(include.unusedFilters, exclude.unusedFilters...),
+ }, nil
+}
+
+func (r *Resolver) filterGraphWithSelectors(selectors []*TargetSelector) (*SelectedPackages, error) {
+ unmatchedSelectors := []*TargetSelector{}
+
+ cherryPickedPackages := make(dag.Set)
+ walkedDependencies := make(dag.Set)
+ walkedDependents := make(dag.Set)
+ walkedDependentsDependencies := make(dag.Set)
+
+ for _, selector := range selectors {
+ // TODO(gsoltis): this should be a list?
+ entryPackages, err := r.filterGraphWithSelector(selector)
+ if err != nil {
+ return nil, err
+ }
+ if entryPackages.Len() == 0 {
+ unmatchedSelectors = append(unmatchedSelectors, selector)
+ }
+ for _, pkg := range entryPackages {
+ if selector.includeDependencies {
+ dependencies, err := r.Graph.Ancestors(pkg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get dependencies of package %v", pkg)
+ }
+ for dep := range dependencies {
+ walkedDependencies.Add(dep)
+ }
+ if !selector.excludeSelf {
+ walkedDependencies.Add(pkg)
+ }
+ }
+ if selector.includeDependents {
+ dependents, err := r.Graph.Descendents(pkg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get dependents of package %v", pkg)
+ }
+ for dep := range dependents {
+ walkedDependents.Add(dep)
+ if selector.includeDependencies {
+ dependentDeps, err := r.Graph.Ancestors(dep)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to get dependencies of dependent %v", dep)
+ }
+ for dependentDep := range dependentDeps {
+ walkedDependentsDependencies.Add(dependentDep)
+ }
+ }
+ }
+ if !selector.excludeSelf {
+ walkedDependents.Add(pkg)
+ }
+ }
+ if !selector.includeDependencies && !selector.includeDependents {
+ cherryPickedPackages.Add(pkg)
+ }
+ }
+ }
+ allPkgs := make(util.Set)
+ for pkg := range cherryPickedPackages {
+ allPkgs.Add(pkg)
+ }
+ for pkg := range walkedDependencies {
+ allPkgs.Add(pkg)
+ }
+ for pkg := range walkedDependents {
+ allPkgs.Add(pkg)
+ }
+ for pkg := range walkedDependentsDependencies {
+ allPkgs.Add(pkg)
+ }
+ return &SelectedPackages{
+ pkgs: allPkgs,
+ unusedFilters: unmatchedSelectors,
+ }, nil
+}
+
+func (r *Resolver) filterGraphWithSelector(selector *TargetSelector) (util.Set, error) {
+ if selector.matchDependencies {
+ return r.filterSubtreesWithSelector(selector)
+ }
+ return r.filterNodesWithSelector(selector)
+}
+
+// filterNodesWithSelector returns the set of nodes that match a given selector
+func (r *Resolver) filterNodesWithSelector(selector *TargetSelector) (util.Set, error) {
+ entryPackages := make(util.Set)
+ selectorWasUsed := false
+ if selector.fromRef != "" {
+ // get changed packaged
+ selectorWasUsed = true
+ changedPkgs, err := r.PackagesChangedInRange(selector.fromRef, selector.getToRef())
+ if err != nil {
+ return nil, err
+ }
+ parentDir := selector.parentDir
+ for pkgName := range changedPkgs {
+ if parentDir != "" {
+ // Type assert/coerce to string here because we want to use
+ // this value in a map that has string keys.
+ // TODO(mehulkar) `changedPkgs` is a util.Set, we could make a `util.PackageNamesSet``
+ // or something similar that is all strings.
+ pkgNameStr := pkgName.(string)
+ if pkgName == util.RootPkgName {
+ // The root package changed, only add it if
+ // the parentDir is equivalent to the root
+ if matches, err := doublestar.PathMatch(r.Cwd.Join(parentDir).ToString(), r.Cwd.ToString()); err != nil {
+ return nil, fmt.Errorf("failed to resolve directory relationship %v contains %v: %v", parentDir, r.Cwd, err)
+ } else if matches {
+ entryPackages.Add(pkgName)
+ }
+ } else if pkg, ok := r.WorkspaceInfos.PackageJSONs[pkgNameStr]; !ok {
+ return nil, fmt.Errorf("missing info for package %v", pkgName)
+ } else if matches, err := doublestar.PathMatch(r.Cwd.Join(parentDir).ToString(), pkg.Dir.RestoreAnchor(r.Cwd).ToString()); err != nil {
+ return nil, fmt.Errorf("failed to resolve directory relationship %v contains %v: %v", selector.parentDir, pkg.Dir, err)
+ } else if matches {
+ entryPackages.Add(pkgName)
+ }
+ } else {
+ entryPackages.Add(pkgName)
+ }
+ }
+ } else if selector.parentDir != "" {
+ // get packages by path
+ selectorWasUsed = true
+ parentDir := selector.parentDir
+ if parentDir == "." {
+ entryPackages.Add(util.RootPkgName)
+ } else {
+ for name, pkg := range r.WorkspaceInfos.PackageJSONs {
+ if matches, err := doublestar.PathMatch(r.Cwd.Join(parentDir).ToString(), pkg.Dir.RestoreAnchor(r.Cwd).ToString()); err != nil {
+ return nil, fmt.Errorf("failed to resolve directory relationship %v contains %v: %v", selector.parentDir, pkg.Dir, err)
+ } else if matches {
+ entryPackages.Add(name)
+ }
+ }
+ }
+ }
+ if selector.namePattern != "" {
+ // find packages that match name
+ if !selectorWasUsed {
+ matched, err := matchPackageNamesToVertices(selector.namePattern, r.Graph.Vertices())
+ if err != nil {
+ return nil, err
+ }
+ entryPackages = matched
+ selectorWasUsed = true
+ } else {
+ matched, err := matchPackageNames(selector.namePattern, entryPackages)
+ if err != nil {
+ return nil, err
+ }
+ entryPackages = matched
+ }
+ }
+ // TODO(gsoltis): we can do this earlier
+ // Check if the selector specified anything
+ if !selectorWasUsed {
+ return nil, fmt.Errorf("invalid selector: %v", selector.raw)
+ }
+ return entryPackages, nil
+}
+
+// filterSubtreesWithSelector returns the set of nodes where the node or any of its dependencies
+// match a selector
+func (r *Resolver) filterSubtreesWithSelector(selector *TargetSelector) (util.Set, error) {
+ // foreach package that matches parentDir && namePattern, check if any dependency is in changed packages
+ changedPkgs, err := r.PackagesChangedInRange(selector.fromRef, selector.getToRef())
+ if err != nil {
+ return nil, err
+ }
+
+ parentDir := selector.parentDir
+ entryPackages := make(util.Set)
+ for name, pkg := range r.WorkspaceInfos.PackageJSONs {
+ if parentDir == "" {
+ entryPackages.Add(name)
+ } else if matches, err := doublestar.PathMatch(parentDir.ToString(), pkg.Dir.RestoreAnchor(r.Cwd).ToString()); err != nil {
+ return nil, fmt.Errorf("failed to resolve directory relationship %v contains %v: %v", selector.parentDir, pkg.Dir, err)
+ } else if matches {
+ entryPackages.Add(name)
+ }
+ }
+ if selector.namePattern != "" {
+ matched, err := matchPackageNames(selector.namePattern, entryPackages)
+ if err != nil {
+ return nil, err
+ }
+ entryPackages = matched
+ }
+ roots := make(util.Set)
+ matched := make(util.Set)
+ for pkg := range entryPackages {
+ if matched.Includes(pkg) {
+ roots.Add(pkg)
+ continue
+ }
+ deps, err := r.Graph.Ancestors(pkg)
+ if err != nil {
+ return nil, err
+ }
+ for changedPkg := range changedPkgs {
+ if !selector.excludeSelf && pkg == changedPkg {
+ roots.Add(pkg)
+ break
+ }
+ if deps.Include(changedPkg) {
+ roots.Add(pkg)
+ matched.Add(changedPkg)
+ break
+ }
+ }
+ }
+ return roots, nil
+}
+
+func matchPackageNamesToVertices(pattern string, vertices []dag.Vertex) (util.Set, error) {
+ packages := make(util.Set)
+ for _, v := range vertices {
+ packages.Add(v)
+ }
+ packages.Add(util.RootPkgName)
+ return matchPackageNames(pattern, packages)
+}
+
+func matchPackageNames(pattern string, packages util.Set) (util.Set, error) {
+ matcher, err := matcherFromPattern(pattern)
+ if err != nil {
+ return nil, err
+ }
+ matched := make(util.Set)
+ for _, pkg := range packages {
+ pkg := pkg.(string)
+ if matcher(pkg) {
+ matched.Add(pkg)
+ }
+ }
+ if matched.Len() == 0 && !strings.HasPrefix(pattern, "@") && !strings.Contains(pattern, "/") {
+ // we got no matches and the pattern isn't a scoped package.
+ // Check if we have exactly one scoped package that does match
+ scopedPattern := fmt.Sprintf("@*/%v", pattern)
+ matcher, err = matcherFromPattern(scopedPattern)
+ if err != nil {
+ return nil, err
+ }
+ foundScopedPkg := false
+ for _, pkg := range packages {
+ pkg := pkg.(string)
+ if matcher(pkg) {
+ if foundScopedPkg {
+ // we found a second scoped package. Return the empty set, we can't
+ // disambiguate
+ return make(util.Set), nil
+ }
+ foundScopedPkg = true
+ matched.Add(pkg)
+ }
+ }
+ }
+ return matched, nil
+}
diff --git a/cli/internal/scope/filter/filter_test.go b/cli/internal/scope/filter/filter_test.go
new file mode 100644
index 0000000..a23ae1d
--- /dev/null
+++ b/cli/internal/scope/filter/filter_test.go
@@ -0,0 +1,614 @@
+package filter
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+func setMatches(t *testing.T, name string, s util.Set, expected []string) {
+ expectedSet := make(util.Set)
+ for _, item := range expected {
+ expectedSet.Add(item)
+ }
+ missing := s.Difference(expectedSet)
+ if missing.Len() > 0 {
+ t.Errorf("%v set has extra elements: %v", name, strings.Join(missing.UnsafeListOfStrings(), ", "))
+ }
+ extra := expectedSet.Difference(s)
+ if extra.Len() > 0 {
+ t.Errorf("%v set missing elements: %v", name, strings.Join(extra.UnsafeListOfStrings(), ", "))
+ }
+}
+
+func Test_filter(t *testing.T) {
+ rawCwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ root, err := fs.GetCwd(rawCwd)
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: make(map[string]*fs.PackageJSON),
+ }
+ packageJSONs := workspaceInfos.PackageJSONs
+ graph := &dag.AcyclicGraph{}
+ graph.Add("project-0")
+ packageJSONs["project-0"] = &fs.PackageJSON{
+ Name: "project-0",
+ Dir: turbopath.AnchoredUnixPath("packages/project-0").ToSystemPath(),
+ }
+ graph.Add("project-1")
+ packageJSONs["project-1"] = &fs.PackageJSON{
+ Name: "project-1",
+ Dir: turbopath.AnchoredUnixPath("packages/project-1").ToSystemPath(),
+ }
+ graph.Add("project-2")
+ packageJSONs["project-2"] = &fs.PackageJSON{
+ Name: "project-2",
+ Dir: "project-2",
+ }
+ graph.Add("project-3")
+ packageJSONs["project-3"] = &fs.PackageJSON{
+ Name: "project-3",
+ Dir: "project-3",
+ }
+ graph.Add("project-4")
+ packageJSONs["project-4"] = &fs.PackageJSON{
+ Name: "project-4",
+ Dir: "project-4",
+ }
+ graph.Add("project-5")
+ packageJSONs["project-5"] = &fs.PackageJSON{
+ Name: "project-5",
+ Dir: "project-5",
+ }
+ // Note: inside project-5
+ graph.Add("project-6")
+ packageJSONs["project-6"] = &fs.PackageJSON{
+ Name: "project-6",
+ Dir: turbopath.AnchoredUnixPath("project-5/packages/project-6").ToSystemPath(),
+ }
+ // Add dependencies
+ graph.Connect(dag.BasicEdge("project-0", "project-1"))
+ graph.Connect(dag.BasicEdge("project-0", "project-5"))
+ graph.Connect(dag.BasicEdge("project-1", "project-2"))
+ graph.Connect(dag.BasicEdge("project-1", "project-4"))
+
+ testCases := []struct {
+ Name string
+ Selectors []*TargetSelector
+ PackageInference *PackageInference
+ Expected []string
+ }{
+ {
+ "select root package",
+ []*TargetSelector{
+ {
+ namePattern: util.RootPkgName,
+ },
+ },
+ nil,
+ []string{util.RootPkgName},
+ },
+ {
+ "select only package dependencies (excluding the package itself)",
+ []*TargetSelector{
+ {
+ excludeSelf: true,
+ includeDependencies: true,
+ namePattern: "project-1",
+ },
+ },
+ nil,
+ []string{"project-2", "project-4"},
+ },
+ {
+ "select package with dependencies",
+ []*TargetSelector{
+ {
+ excludeSelf: false,
+ includeDependencies: true,
+ namePattern: "project-1",
+ },
+ },
+ nil,
+ []string{"project-1", "project-2", "project-4"},
+ },
+ {
+ "select package with dependencies and dependents, including dependent dependencies",
+ []*TargetSelector{
+ {
+ excludeSelf: true,
+ includeDependencies: true,
+ includeDependents: true,
+ namePattern: "project-1",
+ },
+ },
+ nil,
+ []string{"project-0", "project-1", "project-2", "project-4", "project-5"},
+ },
+ {
+ "select package with dependents",
+ []*TargetSelector{
+ {
+ includeDependents: true,
+ namePattern: "project-2",
+ },
+ },
+ nil,
+ []string{"project-1", "project-2", "project-0"},
+ },
+ {
+ "select dependents excluding package itself",
+ []*TargetSelector{
+ {
+ excludeSelf: true,
+ includeDependents: true,
+ namePattern: "project-2",
+ },
+ },
+ nil,
+ []string{"project-0", "project-1"},
+ },
+ {
+ "filter using two selectors: one selects dependencies another selects dependents",
+ []*TargetSelector{
+ {
+ excludeSelf: true,
+ includeDependents: true,
+ namePattern: "project-2",
+ },
+ {
+ excludeSelf: true,
+ includeDependencies: true,
+ namePattern: "project-1",
+ },
+ },
+ nil,
+ []string{"project-0", "project-1", "project-2", "project-4"},
+ },
+ {
+ "select just a package by name",
+ []*TargetSelector{
+ {
+ namePattern: "project-2",
+ },
+ },
+ nil,
+ []string{"project-2"},
+ },
+ // Note: we don't support the option to switch path prefix mode
+ // {
+ // "select by parentDir",
+ // []*TargetSelector{
+ // {
+ // parentDir: "/packages",
+ // },
+ // },
+ // []string{"project-0", "project-1"},
+ // },
+ {
+ "select by parentDir using glob",
+ []*TargetSelector{
+ {
+ parentDir: turbopath.MakeRelativeSystemPath("packages", "*"),
+ },
+ },
+ nil,
+ []string{"project-0", "project-1"},
+ },
+ {
+ "select by parentDir using globstar",
+ []*TargetSelector{
+ {
+ parentDir: turbopath.MakeRelativeSystemPath("project-5", "**"),
+ },
+ },
+ nil,
+ []string{"project-5", "project-6"},
+ },
+ {
+ "select by parentDir with no glob",
+ []*TargetSelector{
+ {
+ parentDir: turbopath.MakeRelativeSystemPath("project-5"),
+ },
+ },
+ nil,
+ []string{"project-5"},
+ },
+ {
+ "select all packages except one",
+ []*TargetSelector{
+ {
+ exclude: true,
+ namePattern: "project-1",
+ },
+ },
+ nil,
+ []string{"project-0", "project-2", "project-3", "project-4", "project-5", "project-6"},
+ },
+ {
+ "select by parentDir and exclude one package by pattern",
+ []*TargetSelector{
+ {
+ parentDir: turbopath.MakeRelativeSystemPath("packages", "*"),
+ },
+ {
+ exclude: true,
+ namePattern: "*-1",
+ },
+ },
+ nil,
+ []string{"project-0"},
+ },
+ {
+ "select root package by directory",
+ []*TargetSelector{
+ {
+ parentDir: turbopath.MakeRelativeSystemPath("."), // input . gets cleaned to ""
+ },
+ },
+ nil,
+ []string{util.RootPkgName},
+ },
+ {
+ "select packages directory",
+ []*TargetSelector{},
+ &PackageInference{
+ DirectoryRoot: turbopath.MakeRelativeSystemPath("packages"),
+ },
+ []string{"project-0", "project-1"},
+ },
+ {
+ "infer single package",
+ []*TargetSelector{},
+ &PackageInference{
+ DirectoryRoot: turbopath.MakeRelativeSystemPath("packages", "project-0"),
+ PackageName: "project-0",
+ },
+ []string{"project-0"},
+ },
+ {
+ "infer single package from subdirectory",
+ []*TargetSelector{},
+ &PackageInference{
+ DirectoryRoot: turbopath.MakeRelativeSystemPath("packages", "project-0", "src"),
+ PackageName: "project-0",
+ },
+ []string{"project-0"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ r := &Resolver{
+ Graph: graph,
+ WorkspaceInfos: workspaceInfos,
+ Cwd: root,
+ Inference: tc.PackageInference,
+ }
+ pkgs, err := r.getFilteredPackages(tc.Selectors)
+ if err != nil {
+ t.Fatalf("%v failed to filter packages: %v", tc.Name, err)
+ }
+ setMatches(t, tc.Name, pkgs.pkgs, tc.Expected)
+ })
+ }
+
+ t.Run("report unmatched filters", func(t *testing.T) {
+ r := &Resolver{
+ Graph: graph,
+ WorkspaceInfos: workspaceInfos,
+ Cwd: root,
+ }
+ pkgs, err := r.getFilteredPackages([]*TargetSelector{
+ {
+ excludeSelf: true,
+ includeDependencies: true,
+ namePattern: "project-7",
+ },
+ })
+ if err != nil {
+ t.Fatalf("unmatched filter failed to filter packages: %v", err)
+ }
+ if pkgs.pkgs.Len() != 0 {
+ t.Errorf("unmatched filter expected no packages, got %v", strings.Join(pkgs.pkgs.UnsafeListOfStrings(), ", "))
+ }
+ if len(pkgs.unusedFilters) != 1 {
+ t.Errorf("unmatched filter expected to report one unused filter, got %v", len(pkgs.unusedFilters))
+ }
+ })
+}
+
+func Test_matchScopedPackage(t *testing.T) {
+ rawCwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ root, err := fs.GetCwd(rawCwd)
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: make(map[string]*fs.PackageJSON),
+ }
+ packageJSONs := workspaceInfos.PackageJSONs
+ graph := &dag.AcyclicGraph{}
+ graph.Add("@foo/bar")
+ packageJSONs["@foo/bar"] = &fs.PackageJSON{
+ Name: "@foo/bar",
+ Dir: turbopath.AnchoredUnixPath("packages/bar").ToSystemPath(),
+ }
+ r := &Resolver{
+ Graph: graph,
+ WorkspaceInfos: workspaceInfos,
+ Cwd: root,
+ }
+ pkgs, err := r.getFilteredPackages([]*TargetSelector{
+ {
+ namePattern: "bar",
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to filter packages: %v", err)
+ }
+ setMatches(t, "match scoped package", pkgs.pkgs, []string{"@foo/bar"})
+}
+
+func Test_matchExactPackages(t *testing.T) {
+ rawCwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ root, err := fs.GetCwd(rawCwd)
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: make(map[string]*fs.PackageJSON),
+ }
+ packageJSONs := workspaceInfos.PackageJSONs
+ graph := &dag.AcyclicGraph{}
+ graph.Add("@foo/bar")
+ packageJSONs["@foo/bar"] = &fs.PackageJSON{
+ Name: "@foo/bar",
+ Dir: turbopath.AnchoredUnixPath("packages/@foo/bar").ToSystemPath(),
+ }
+ graph.Add("bar")
+ packageJSONs["bar"] = &fs.PackageJSON{
+ Name: "bar",
+ Dir: turbopath.AnchoredUnixPath("packages/bar").ToSystemPath(),
+ }
+ r := &Resolver{
+ Graph: graph,
+ WorkspaceInfos: workspaceInfos,
+ Cwd: root,
+ }
+ pkgs, err := r.getFilteredPackages([]*TargetSelector{
+ {
+ namePattern: "bar",
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to filter packages: %v", err)
+ }
+ setMatches(t, "match exact package", pkgs.pkgs, []string{"bar"})
+}
+
+func Test_matchMultipleScopedPackages(t *testing.T) {
+ rawCwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ root, err := fs.GetCwd(rawCwd)
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: make(map[string]*fs.PackageJSON),
+ }
+ packageJSONs := workspaceInfos.PackageJSONs
+ graph := &dag.AcyclicGraph{}
+ graph.Add("@foo/bar")
+ packageJSONs["@foo/bar"] = &fs.PackageJSON{
+ Name: "@foo/bar",
+ Dir: turbopath.AnchoredUnixPath("packages/@foo/bar").ToSystemPath(),
+ }
+ graph.Add("@types/bar")
+ packageJSONs["@types/bar"] = &fs.PackageJSON{
+ Name: "@types/bar",
+ Dir: turbopath.AnchoredUnixPath("packages/@types/bar").ToSystemPath(),
+ }
+ r := &Resolver{
+ Graph: graph,
+ WorkspaceInfos: workspaceInfos,
+ Cwd: root,
+ }
+ pkgs, err := r.getFilteredPackages([]*TargetSelector{
+ {
+ namePattern: "bar",
+ },
+ })
+ if err != nil {
+ t.Fatalf("failed to filter packages: %v", err)
+ }
+ setMatches(t, "match nothing with multiple scoped packages", pkgs.pkgs, []string{})
+}
+
+func Test_SCM(t *testing.T) {
+ rawCwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ root, err := fs.GetCwd(rawCwd)
+ if err != nil {
+ t.Fatalf("failed to get working directory: %v", err)
+ }
+ head1Changed := make(util.Set)
+ head1Changed.Add("package-1")
+ head1Changed.Add("package-2")
+ head1Changed.Add(util.RootPkgName)
+ head2Changed := make(util.Set)
+ head2Changed.Add("package-3")
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: make(map[string]*fs.PackageJSON),
+ }
+ packageJSONs := workspaceInfos.PackageJSONs
+ graph := &dag.AcyclicGraph{}
+ graph.Add("package-1")
+ packageJSONs["package-1"] = &fs.PackageJSON{
+ Name: "package-1",
+ Dir: "package-1",
+ }
+ graph.Add("package-2")
+ packageJSONs["package-2"] = &fs.PackageJSON{
+ Name: "package-2",
+ Dir: "package-2",
+ }
+ graph.Add("package-3")
+ packageJSONs["package-3"] = &fs.PackageJSON{
+ Name: "package-3",
+ Dir: "package-3",
+ }
+ graph.Add("package-20")
+ packageJSONs["package-20"] = &fs.PackageJSON{
+ Name: "package-20",
+ Dir: "package-20",
+ }
+
+ graph.Connect(dag.BasicEdge("package-3", "package-20"))
+
+ r := &Resolver{
+ Graph: graph,
+ WorkspaceInfos: workspaceInfos,
+ Cwd: root,
+ PackagesChangedInRange: func(fromRef string, toRef string) (util.Set, error) {
+ if fromRef == "HEAD~1" && toRef == "HEAD" {
+ return head1Changed, nil
+ } else if fromRef == "HEAD~2" && toRef == "HEAD" {
+ union := head1Changed.Copy()
+ for val := range head2Changed {
+ union.Add(val)
+ }
+ return union, nil
+ } else if fromRef == "HEAD~2" && toRef == "HEAD~1" {
+ return head2Changed, nil
+ }
+ panic(fmt.Sprintf("unsupported commit range %v...%v", fromRef, toRef))
+ },
+ }
+
+ testCases := []struct {
+ Name string
+ Selectors []*TargetSelector
+ Expected []string
+ }{
+ {
+ "all changed packages",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~1",
+ },
+ },
+ []string{"package-1", "package-2", util.RootPkgName},
+ },
+ {
+ "all changed packages with parent dir exact match",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~1",
+ parentDir: ".",
+ },
+ },
+ []string{util.RootPkgName},
+ },
+ {
+ "changed packages in directory",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~1",
+ parentDir: "package-2",
+ },
+ },
+ []string{"package-2"},
+ },
+ {
+ "changed packages matching pattern",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~1",
+ namePattern: "package-2*",
+ },
+ },
+ []string{"package-2"},
+ },
+ {
+ "changed packages matching pattern",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~1",
+ namePattern: "package-2*",
+ },
+ },
+ []string{"package-2"},
+ },
+ // Note: missing test here that takes advantage of automatically exempting
+ // test-only changes from pulling in dependents
+ //
+ // turbo-specific tests below here
+ {
+ "changed package was requested scope, and we're matching dependencies",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~1",
+ namePattern: "package-1",
+ matchDependencies: true,
+ },
+ },
+ []string{"package-1"},
+ },
+ {
+ "older commit",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~2",
+ },
+ },
+ []string{"package-1", "package-2", "package-3", util.RootPkgName},
+ },
+ {
+ "commit range",
+ []*TargetSelector{
+ {
+ fromRef: "HEAD~2",
+ toRefOverride: "HEAD~1",
+ },
+ },
+ []string{"package-3"},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ pkgs, err := r.getFilteredPackages(tc.Selectors)
+ if err != nil {
+ t.Fatalf("%v failed to filter packages: %v", tc.Name, err)
+ }
+ setMatches(t, tc.Name, pkgs.pkgs, tc.Expected)
+ })
+ }
+}
diff --git a/cli/internal/scope/filter/matcher.go b/cli/internal/scope/filter/matcher.go
new file mode 100644
index 0000000..2460326
--- /dev/null
+++ b/cli/internal/scope/filter/matcher.go
@@ -0,0 +1,32 @@
+package filter
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+type Matcher = func(pkgName string) bool
+
+func matchAll(pkgName string) bool {
+ return true
+}
+
+func matcherFromPattern(pattern string) (Matcher, error) {
+ if pattern == "*" {
+ return matchAll, nil
+ }
+
+ escaped := regexp.QuoteMeta(pattern)
+ // replace escaped '*' with regex '.*'
+ normalized := strings.ReplaceAll(escaped, "\\*", ".*")
+ if normalized == pattern {
+ return func(pkgName string) bool { return pkgName == pattern }, nil
+ }
+ regex, err := regexp.Compile("^" + normalized + "$")
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to compile filter pattern to regex: %v", pattern)
+ }
+ return func(pkgName string) bool { return regex.Match([]byte(pkgName)) }, nil
+}
diff --git a/cli/internal/scope/filter/matcher_test.go b/cli/internal/scope/filter/matcher_test.go
new file mode 100644
index 0000000..966be2b
--- /dev/null
+++ b/cli/internal/scope/filter/matcher_test.go
@@ -0,0 +1,65 @@
+package filter
+
+import "testing"
+
+func TestMatcher(t *testing.T) {
+ testCases := map[string][]struct {
+ test string
+ want bool
+ }{
+ "*": {
+ {
+ test: "@eslint/plugin-foo",
+ want: true,
+ },
+ {
+ test: "express",
+ want: true,
+ },
+ },
+ "eslint-*": {
+ {
+ test: "eslint-plugin-foo",
+ want: true,
+ },
+ {
+ test: "express",
+ want: false,
+ },
+ },
+ "*plugin*": {
+ {
+ test: "@eslint/plugin-foo",
+ want: true,
+ },
+ {
+ test: "express",
+ want: false,
+ },
+ },
+ "a*c": {
+ {
+ test: "abc",
+ want: true,
+ },
+ },
+ "*-positive": {
+ {
+ test: "is-positive",
+ want: true,
+ },
+ },
+ }
+ for pattern, tests := range testCases {
+ matcher, err := matcherFromPattern(pattern)
+ if err != nil {
+ t.Fatalf("failed to compile match pattern %v, %v", pattern, err)
+ }
+ for _, testCase := range tests {
+ got := matcher(testCase.test)
+ if got != testCase.want {
+ t.Errorf("%v.match(%v) got %v, want %v", pattern, testCase.test, got, testCase.want)
+ }
+ }
+ }
+}
diff --git a/cli/internal/scope/filter/parse_target_selector.go b/cli/internal/scope/filter/parse_target_selector.go
new file mode 100644
index 0000000..4f5c90f
--- /dev/null
+++ b/cli/internal/scope/filter/parse_target_selector.go
@@ -0,0 +1,165 @@
+package filter
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+type TargetSelector struct {
+ includeDependencies bool
+ matchDependencies bool
+ includeDependents bool
+ exclude bool
+ excludeSelf bool
+ followProdDepsOnly bool
+ parentDir turbopath.RelativeSystemPath
+ namePattern string
+ fromRef string
+ toRefOverride string
+ raw string
+}
+
+func (ts *TargetSelector) IsValid() bool {
+ return ts.fromRef != "" || ts.parentDir != "" || ts.namePattern != ""
+}
+
+// getToRef returns the git ref to use for upper bound of the comparison when finding changed
+// packages.
+func (ts *TargetSelector) getToRef() string {
+ if ts.toRefOverride == "" {
+ return "HEAD"
+ }
+ return ts.toRefOverride
+}
+
+var errCantMatchDependencies = errors.New("cannot use match dependencies without specifying either a directory or package")
+
+var targetSelectorRegex = regexp.MustCompile(`^(?P<name>[^.](?:[^{}[\]]*[^{}[\].])?)?(?P<directory>\{[^}]*\})?(?P<commits>(?:\.{3})?\[[^\]]+\])?$`)
+
+// ParseTargetSelector is a function that returns pnpm compatible --filter command line flags
+func ParseTargetSelector(rawSelector string) (*TargetSelector, error) {
+ exclude := false
+ firstChar := rawSelector[0]
+ selector := rawSelector
+ if firstChar == '!' {
+ selector = selector[1:]
+ exclude = true
+ }
+ excludeSelf := false
+ includeDependencies := strings.HasSuffix(selector, "...")
+ if includeDependencies {
+ selector = selector[:len(selector)-3]
+ if strings.HasSuffix(selector, "^") {
+ excludeSelf = true
+ selector = selector[:len(selector)-1]
+ }
+ }
+ includeDependents := strings.HasPrefix(selector, "...")
+ if includeDependents {
+ selector = selector[3:]
+ if strings.HasPrefix(selector, "^") {
+ excludeSelf = true
+ selector = selector[1:]
+ }
+ }
+
+ matches := targetSelectorRegex.FindAllStringSubmatch(selector, -1)
+
+ if len(matches) == 0 {
+ if relativePath, ok := isSelectorByLocation(selector); ok {
+ return &TargetSelector{
+ exclude: exclude,
+ includeDependencies: includeDependencies,
+ includeDependents: includeDependents,
+ parentDir: relativePath,
+ raw: rawSelector,
+ }, nil
+ }
+ return &TargetSelector{
+ exclude: exclude,
+ excludeSelf: excludeSelf,
+ includeDependencies: includeDependencies,
+ includeDependents: includeDependents,
+ namePattern: selector,
+ raw: rawSelector,
+ }, nil
+ }
+
+ fromRef := ""
+ toRefOverride := ""
+ var parentDir turbopath.RelativeSystemPath
+ namePattern := ""
+ preAddDepdencies := false
+ if len(matches) > 0 && len(matches[0]) > 0 {
+ match := matches[0]
+ namePattern = match[targetSelectorRegex.SubexpIndex("name")]
+ rawParentDir := match[targetSelectorRegex.SubexpIndex("directory")]
+ if len(rawParentDir) > 0 {
+ // trim {}
+ rawParentDir = rawParentDir[1 : len(rawParentDir)-1]
+ if rawParentDir == "" {
+ return nil, errors.New("empty path specification")
+ } else if relPath, err := turbopath.CheckedToRelativeSystemPath(rawParentDir); err == nil {
+ parentDir = relPath
+ } else {
+ return nil, errors.Wrapf(err, "invalid path specification: %v", rawParentDir)
+ }
+ }
+ rawCommits := match[targetSelectorRegex.SubexpIndex("commits")]
+ if len(rawCommits) > 0 {
+ fromRef = rawCommits
+ if strings.HasPrefix(fromRef, "...") {
+ if parentDir == "" && namePattern == "" {
+ return &TargetSelector{}, errCantMatchDependencies
+ }
+ preAddDepdencies = true
+ fromRef = fromRef[3:]
+ }
+ // strip []
+ fromRef = fromRef[1 : len(fromRef)-1]
+ refs := strings.Split(fromRef, "...")
+ if len(refs) == 2 {
+ fromRef = refs[0]
+ toRefOverride = refs[1]
+ }
+ }
+ }
+
+ return &TargetSelector{
+ fromRef: fromRef,
+ toRefOverride: toRefOverride,
+ exclude: exclude,
+ excludeSelf: excludeSelf,
+ includeDependencies: includeDependencies,
+ matchDependencies: preAddDepdencies,
+ includeDependents: includeDependents,
+ namePattern: namePattern,
+ parentDir: parentDir,
+ raw: rawSelector,
+ }, nil
+}
+
+// isSelectorByLocation returns true if the selector is by filesystem location
+func isSelectorByLocation(rawSelector string) (turbopath.RelativeSystemPath, bool) {
+ if rawSelector[0:1] != "." {
+ return "", false
+ }
+
+ // . or ./ or .\
+ if len(rawSelector) == 1 || rawSelector[1:2] == "/" || rawSelector[1:2] == "\\" {
+ return turbopath.MakeRelativeSystemPath(rawSelector), true
+ }
+
+ if rawSelector[1:2] != "." {
+ return "", false
+ }
+
+ // .. or ../ or ..\
+ if len(rawSelector) == 2 || rawSelector[2:3] == "/" || rawSelector[2:3] == "\\" {
+ return turbopath.MakeRelativeSystemPath(rawSelector), true
+ }
+ return "", false
+}
diff --git a/cli/internal/scope/filter/parse_target_selector_test.go b/cli/internal/scope/filter/parse_target_selector_test.go
new file mode 100644
index 0000000..2973a61
--- /dev/null
+++ b/cli/internal/scope/filter/parse_target_selector_test.go
@@ -0,0 +1,311 @@
+package filter
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+func TestParseTargetSelector(t *testing.T) {
+ tests := []struct {
+ rawSelector string
+ want *TargetSelector
+ wantErr bool
+ }{
+ {
+ "{}",
+ &TargetSelector{},
+ true,
+ },
+ {
+ "foo",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "foo",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "foo...",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: true,
+ includeDependents: false,
+ namePattern: "foo",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "...foo",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: true,
+ namePattern: "foo",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "...foo...",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: true,
+ includeDependents: true,
+ namePattern: "foo",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "foo^...",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: true,
+ includeDependencies: true,
+ includeDependents: false,
+ namePattern: "foo",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "...^foo",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: true,
+ includeDependencies: false,
+ includeDependents: true,
+ namePattern: "foo",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "./foo",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: "foo",
+ },
+ false,
+ },
+ {
+ "../foo",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: turbopath.MakeRelativeSystemPath("..", "foo"),
+ },
+ false,
+ },
+ {
+ "...{./foo}",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: true,
+ namePattern: "",
+ parentDir: "foo",
+ },
+ false,
+ },
+ {
+ ".",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: ".",
+ },
+ false,
+ },
+ {
+ "..",
+ &TargetSelector{
+ fromRef: "",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: "..",
+ },
+ false,
+ },
+ {
+ "[master]",
+ &TargetSelector{
+ fromRef: "master",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "[from...to]",
+ &TargetSelector{
+ fromRef: "from",
+ toRefOverride: "to",
+ },
+ false,
+ },
+ {
+ "{foo}[master]",
+ &TargetSelector{
+ fromRef: "master",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: "foo",
+ },
+ false,
+ },
+ {
+ "pattern{foo}[master]",
+ &TargetSelector{
+ fromRef: "master",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: false,
+ namePattern: "pattern",
+ parentDir: "foo",
+ },
+ false,
+ },
+ {
+ "[master]...",
+ &TargetSelector{
+ fromRef: "master",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: true,
+ includeDependents: false,
+ namePattern: "",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "...[master]",
+ &TargetSelector{
+ fromRef: "master",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: false,
+ includeDependents: true,
+ namePattern: "",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "...[master]...",
+ &TargetSelector{
+ fromRef: "master",
+ exclude: false,
+ excludeSelf: false,
+ includeDependencies: true,
+ includeDependents: true,
+ namePattern: "",
+ parentDir: "",
+ },
+ false,
+ },
+ {
+ "...[from...to]...",
+ &TargetSelector{
+ fromRef: "from",
+ toRefOverride: "to",
+ includeDependencies: true,
+ includeDependents: true,
+ },
+ false,
+ },
+ {
+ "foo...[master]",
+ &TargetSelector{
+ fromRef: "master",
+ namePattern: "foo",
+ matchDependencies: true,
+ },
+ false,
+ },
+ {
+ "foo...[master]...",
+ &TargetSelector{
+ fromRef: "master",
+ namePattern: "foo",
+ matchDependencies: true,
+ includeDependencies: true,
+ },
+ false,
+ },
+ {
+ "{foo}...[master]",
+ &TargetSelector{
+ fromRef: "master",
+ parentDir: "foo",
+ matchDependencies: true,
+ },
+ false,
+ },
+ {
+ "......[master]",
+ &TargetSelector{},
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.rawSelector, func(t *testing.T) {
+ got, err := ParseTargetSelector(tt.rawSelector)
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("ParseTargetSelector() error = %#v, wantErr %#v", err, tt.wantErr)
+ }
+ } else {
+ // copy the raw selector from the args into what we want. This value is used
+ // for reporting errors in the case of a malformed selector
+ tt.want.raw = tt.rawSelector
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("ParseTargetSelector() = %#v, want %#v", got, tt.want)
+ }
+ }
+ })
+ }
+}
diff --git a/cli/internal/scope/scope.go b/cli/internal/scope/scope.go
new file mode 100644
index 0000000..b5ed4e7
--- /dev/null
+++ b/cli/internal/scope/scope.go
@@ -0,0 +1,380 @@
+package scope
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/cli"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/context"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/scm"
+ scope_filter "github.com/vercel/turbo/cli/internal/scope/filter"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/turbostate"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/util/filter"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+// LegacyFilter holds the options in use before the filter syntax. They have their own rules
+// for how they are compiled into filter expressions.
+type LegacyFilter struct {
+ // IncludeDependencies is whether to include pkg.dependencies in execution (defaults to false)
+ IncludeDependencies bool
+ // SkipDependents is whether to skip dependent impacted consumers in execution (defaults to false)
+ SkipDependents bool
+ // Entrypoints is a list of package entrypoints
+ Entrypoints []string
+ // Since is the git ref used to calculate changed packages
+ Since string
+}
+
+var _sinceHelp = `Limit/Set scope to changed packages since a
+mergebase. This uses the git diff ${target_branch}...
+mechanism to identify which packages have changed.`
+
+func addLegacyFlagsFromArgs(opts *LegacyFilter, args *turbostate.ParsedArgsFromRust) {
+ opts.IncludeDependencies = args.Command.Run.IncludeDependencies
+ opts.SkipDependents = args.Command.Run.NoDeps
+ opts.Entrypoints = args.Command.Run.Scope
+ opts.Since = args.Command.Run.Since
+}
+
+// Opts holds the options for how to select the entrypoint packages for a turbo run
+type Opts struct {
+ LegacyFilter LegacyFilter
+ // IgnorePatterns is the list of globs of file paths to ignore from execution scope calculation
+ IgnorePatterns []string
+ // GlobalDepPatterns is a list of globs to global files whose contents will be included in the global hash calculation
+ GlobalDepPatterns []string
+ // Patterns are the filter patterns supplied to --filter on the commandline
+ FilterPatterns []string
+
+ PackageInferenceRoot turbopath.RelativeSystemPath
+}
+
+var (
+ _filterHelp = `Use the given selector to specify package(s) to act as
+entry points. The syntax mirrors pnpm's syntax, and
+additional documentation and examples can be found in
+turbo's documentation https://turbo.build/repo/docs/reference/command-line-reference#--filter
+--filter can be specified multiple times. Packages that
+match any filter will be included.`
+ _ignoreHelp = `Files to ignore when calculating changed files (i.e. --since). Supports globs.`
+ _globalDepHelp = `Specify glob of global filesystem dependencies to be hashed. Useful for .env and files
+in the root directory. Includes turbo.json, root package.json, and the root lockfile by default.`
+)
+
+// normalize package inference path. We compare against "" in several places, so maintain
+// that behavior. In a post-rust-port world, this should more properly be an Option
+func resolvePackageInferencePath(raw string) (turbopath.RelativeSystemPath, error) {
+ pkgInferenceRoot, err := turbopath.CheckedToRelativeSystemPath(raw)
+ if err != nil {
+ return "", errors.Wrapf(err, "invalid package inference root %v", raw)
+ }
+ if pkgInferenceRoot == "." {
+ return "", nil
+ }
+ return pkgInferenceRoot, nil
+}
+
+// OptsFromArgs adds the settings relevant to this package to the given Opts
+func OptsFromArgs(opts *Opts, args *turbostate.ParsedArgsFromRust) error {
+ opts.FilterPatterns = args.Command.Run.Filter
+ opts.IgnorePatterns = args.Command.Run.Ignore
+ opts.GlobalDepPatterns = args.Command.Run.GlobalDeps
+ pkgInferenceRoot, err := resolvePackageInferencePath(args.Command.Run.PkgInferenceRoot)
+ if err != nil {
+ return err
+ }
+ opts.PackageInferenceRoot = pkgInferenceRoot
+ addLegacyFlagsFromArgs(&opts.LegacyFilter, args)
+ return nil
+}
+
+// AsFilterPatterns normalizes legacy selectors to filter syntax
+func (l *LegacyFilter) AsFilterPatterns() []string {
+ var patterns []string
+ prefix := ""
+ if !l.SkipDependents {
+ prefix = "..."
+ }
+ suffix := ""
+ if l.IncludeDependencies {
+ suffix = "..."
+ }
+ since := ""
+ if l.Since != "" {
+ since = fmt.Sprintf("[%v]", l.Since)
+ }
+ if len(l.Entrypoints) > 0 {
+ // --scope implies our tweaked syntax to see if any dependency matches
+ if since != "" {
+ since = "..." + since
+ }
+ for _, pattern := range l.Entrypoints {
+ if strings.HasPrefix(pattern, "!") {
+ patterns = append(patterns, pattern)
+ } else {
+ filterPattern := fmt.Sprintf("%v%v%v%v", prefix, pattern, since, suffix)
+ patterns = append(patterns, filterPattern)
+ }
+ }
+ } else if since != "" {
+ // no scopes specified, but --since was provided
+ filterPattern := fmt.Sprintf("%v%v%v", prefix, since, suffix)
+ patterns = append(patterns, filterPattern)
+ }
+ return patterns
+}
+
+// ResolvePackages translates specified flags to a set of entry point packages for
+// the selected tasks. Returns the selected packages and whether or not the selected
+// packages represents a default "all packages".
+func ResolvePackages(opts *Opts, repoRoot turbopath.AbsoluteSystemPath, scm scm.SCM, ctx *context.Context, tui cli.Ui, logger hclog.Logger) (util.Set, bool, error) {
+ inferenceBase, err := calculateInference(repoRoot, opts.PackageInferenceRoot, ctx.WorkspaceInfos, logger)
+ if err != nil {
+ return nil, false, err
+ }
+ filterResolver := &scope_filter.Resolver{
+ Graph: &ctx.WorkspaceGraph,
+ WorkspaceInfos: ctx.WorkspaceInfos,
+ Cwd: repoRoot,
+ Inference: inferenceBase,
+ PackagesChangedInRange: opts.getPackageChangeFunc(scm, repoRoot, ctx),
+ }
+ filterPatterns := opts.FilterPatterns
+ legacyFilterPatterns := opts.LegacyFilter.AsFilterPatterns()
+ filterPatterns = append(filterPatterns, legacyFilterPatterns...)
+ isAllPackages := len(filterPatterns) == 0 && opts.PackageInferenceRoot == ""
+ filteredPkgs, err := filterResolver.GetPackagesFromPatterns(filterPatterns)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if isAllPackages {
+ // no filters specified, run every package
+ for _, f := range ctx.WorkspaceNames {
+ filteredPkgs.Add(f)
+ }
+ }
+ filteredPkgs.Delete(ctx.RootNode)
+ return filteredPkgs, isAllPackages, nil
+}
+
+func calculateInference(repoRoot turbopath.AbsoluteSystemPath, pkgInferencePath turbopath.RelativeSystemPath, packageInfos workspace.Catalog, logger hclog.Logger) (*scope_filter.PackageInference, error) {
+ if pkgInferencePath == "" {
+ // No inference specified, no need to calculate anything
+ return nil, nil
+ }
+ logger.Debug(fmt.Sprintf("Using %v as a basis for selecting packages", pkgInferencePath))
+ fullInferencePath := repoRoot.Join(pkgInferencePath)
+ for _, pkgInfo := range packageInfos.PackageJSONs {
+ pkgPath := pkgInfo.Dir.RestoreAnchor(repoRoot)
+ inferredPathIsBelow, err := pkgPath.ContainsPath(fullInferencePath)
+ if err != nil {
+ return nil, err
+ }
+ // We skip over the root package as the inferred path will always be below it
+ if inferredPathIsBelow && pkgPath != repoRoot {
+ // set both. The user might have set a parent directory filter,
+ // in which case we *should* fail to find any packages, but we should
+ // do so in a consistent manner
+ return &scope_filter.PackageInference{
+ PackageName: pkgInfo.Name,
+ DirectoryRoot: pkgInferencePath,
+ }, nil
+ }
+ inferredPathIsBetweenRootAndPkg, err := fullInferencePath.ContainsPath(pkgPath)
+ if err != nil {
+ return nil, err
+ }
+ if inferredPathIsBetweenRootAndPkg {
+ // we've found *some* package below our inference directory. We can stop now and conclude
+ // that we're looking for all packages in a subdirectory
+ break
+ }
+ }
+ return &scope_filter.PackageInference{
+ DirectoryRoot: pkgInferencePath,
+ }, nil
+}
+
+func (o *Opts) getPackageChangeFunc(scm scm.SCM, cwd turbopath.AbsoluteSystemPath, ctx *context.Context) scope_filter.PackagesChangedInRange {
+ return func(fromRef string, toRef string) (util.Set, error) {
+ // We could filter changed files at the git level, since it's possible
+ // that the changes we're interested in are scoped, but we need to handle
+ // global dependencies changing as well. A future optimization might be to
+ // scope changed files more deeply if we know there are no global dependencies.
+ var changedFiles []string
+ if fromRef != "" {
+ scmChangedFiles, err := scm.ChangedFiles(fromRef, toRef, cwd.ToStringDuringMigration())
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(scmChangedFiles)
+ changedFiles = scmChangedFiles
+ }
+ makeAllPkgs := func() util.Set {
+ allPkgs := make(util.Set)
+ for pkg := range ctx.WorkspaceInfos.PackageJSONs {
+ allPkgs.Add(pkg)
+ }
+ return allPkgs
+ }
+ if hasRepoGlobalFileChanged, err := repoGlobalFileHasChanged(o, getDefaultGlobalDeps(), changedFiles); err != nil {
+ return nil, err
+ } else if hasRepoGlobalFileChanged {
+ return makeAllPkgs(), nil
+ }
+
+ filteredChangedFiles, err := filterIgnoredFiles(o, changedFiles)
+ if err != nil {
+ return nil, err
+ }
+ changedPkgs := getChangedPackages(filteredChangedFiles, ctx.WorkspaceInfos)
+
+ if lockfileChanges, fullChanges := getChangesFromLockfile(scm, ctx, changedFiles, fromRef); !fullChanges {
+ for _, pkg := range lockfileChanges {
+ changedPkgs.Add(pkg)
+ }
+ } else {
+ return makeAllPkgs(), nil
+ }
+
+ return changedPkgs, nil
+ }
+}
+
+func getChangesFromLockfile(scm scm.SCM, ctx *context.Context, changedFiles []string, fromRef string) ([]string, bool) {
+ lockfileFilter, err := filter.Compile([]string{ctx.PackageManager.Lockfile})
+ if err != nil {
+ panic(fmt.Sprintf("Lockfile is invalid glob: %v", err))
+ }
+ match := false
+ for _, file := range changedFiles {
+ if lockfileFilter.Match(file) {
+ match = true
+ break
+ }
+ }
+ if !match {
+ return nil, false
+ }
+
+ if lockfile.IsNil(ctx.Lockfile) {
+ return nil, true
+ }
+
+ prevContents, err := scm.PreviousContent(fromRef, ctx.PackageManager.Lockfile)
+ if err != nil {
+ // unable to reconstruct old lockfile, assume everything changed
+ return nil, true
+ }
+ prevLockfile, err := ctx.PackageManager.UnmarshalLockfile(ctx.WorkspaceInfos.PackageJSONs[util.RootPkgName], prevContents)
+ if err != nil {
+ // unable to parse old lockfile, assume everything changed
+ return nil, true
+ }
+ additionalPkgs, err := ctx.ChangedPackages(prevLockfile)
+ if err != nil {
+ // missing at least one lockfile, assume everything changed
+ return nil, true
+ }
+
+ return additionalPkgs, false
+}
+
+func getDefaultGlobalDeps() []string {
+ // include turbo.json and root package.json as implicit global dependencies
+ defaultGlobalDeps := []string{
+ "turbo.json",
+ "package.json",
+ }
+ return defaultGlobalDeps
+}
+
+func repoGlobalFileHasChanged(opts *Opts, defaultGlobalDeps []string, changedFiles []string) (bool, error) {
+ globalDepsGlob, err := filter.Compile(append(opts.GlobalDepPatterns, defaultGlobalDeps...))
+ if err != nil {
+ return false, errors.Wrap(err, "invalid global deps glob")
+ }
+
+ if globalDepsGlob != nil {
+ for _, file := range changedFiles {
+ if globalDepsGlob.Match(filepath.ToSlash(file)) {
+ return true, nil
+ }
+ }
+ }
+ return false, nil
+}
+
+func filterIgnoredFiles(opts *Opts, changedFiles []string) ([]string, error) {
+ // changedFiles is an array of repo-relative system paths.
+ // opts.IgnorePatterns is an array of unix-separator glob paths.
+ ignoreGlob, err := filter.Compile(opts.IgnorePatterns)
+ if err != nil {
+ return nil, errors.Wrap(err, "invalid ignore globs")
+ }
+ filteredChanges := []string{}
+ for _, file := range changedFiles {
+ // If we don't have anything to ignore, or if this file doesn't match the ignore pattern,
+ // keep it as a changed file.
+ if ignoreGlob == nil || !ignoreGlob.Match(filepath.ToSlash(file)) {
+ filteredChanges = append(filteredChanges, file)
+ }
+ }
+ return filteredChanges, nil
+}
+
+func fileInPackage(changedFile string, packagePath string) bool {
+ // This whole method is basically this regex: /^.*\/?$/
+ // The regex is more-expensive, so we don't do it.
+
+ // If it has the prefix, it might be in the package.
+ if strings.HasPrefix(changedFile, packagePath) {
+ // Now we need to see if the prefix stopped at a reasonable boundary.
+ prefixLen := len(packagePath)
+ changedFileLen := len(changedFile)
+
+ // Same path.
+ if prefixLen == changedFileLen {
+ return true
+ }
+
+ // We know changedFile is longer than packagePath.
+ // We can safely directly index into it.
+ // Look ahead one byte and see if it's the separator.
+ if changedFile[prefixLen] == os.PathSeparator {
+ return true
+ }
+ }
+
+ // If it does not have the prefix, it's definitely not in the package.
+ return false
+}
+
+func getChangedPackages(changedFiles []string, packageInfos workspace.Catalog) util.Set {
+ changedPackages := make(util.Set)
+ for _, changedFile := range changedFiles {
+ found := false
+ for pkgName, pkgInfo := range packageInfos.PackageJSONs {
+ if pkgName != util.RootPkgName && fileInPackage(changedFile, pkgInfo.Dir.ToStringDuringMigration()) {
+ changedPackages.Add(pkgName)
+ found = true
+ break
+ }
+ }
+ if !found {
+ // Consider the root package to have changed
+ changedPackages.Add(util.RootPkgName)
+ }
+ }
+ return changedPackages
+}
diff --git a/cli/internal/scope/scope_test.go b/cli/internal/scope/scope_test.go
new file mode 100644
index 0000000..216984d
--- /dev/null
+++ b/cli/internal/scope/scope_test.go
@@ -0,0 +1,550 @@
+package scope
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/pyr-sh/dag"
+ "github.com/vercel/turbo/cli/internal/context"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/lockfile"
+ "github.com/vercel/turbo/cli/internal/packagemanager"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/ui"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+)
+
+type mockSCM struct {
+ changed []string
+ contents map[string][]byte
+}
+
+func (m *mockSCM) ChangedFiles(_fromCommit string, _toCommit string, _relativeTo string) ([]string, error) {
+ return m.changed, nil
+}
+
+func (m *mockSCM) PreviousContent(fromCommit string, filePath string) ([]byte, error) {
+ contents, ok := m.contents[filePath]
+ if !ok {
+ return nil, fmt.Errorf("No contents found")
+ }
+ return contents, nil
+}
+
+type mockLockfile struct {
+ globalChange bool
+ versions map[string]string
+ allDeps map[string]map[string]string
+}
+
+func (m *mockLockfile) ResolvePackage(workspacePath turbopath.AnchoredUnixPath, name string, version string) (lockfile.Package, error) {
+ resolvedVersion, ok := m.versions[name]
+ if ok {
+ key := fmt.Sprintf("%s%s", name, version)
+ return lockfile.Package{Key: key, Version: resolvedVersion, Found: true}, nil
+ }
+ return lockfile.Package{Found: false}, nil
+}
+
+func (m *mockLockfile) AllDependencies(key string) (map[string]string, bool) {
+ deps, ok := m.allDeps[key]
+ return deps, ok
+}
+
+func (m *mockLockfile) Encode(w io.Writer) error {
+ return nil
+}
+
+func (m *mockLockfile) GlobalChange(other lockfile.Lockfile) bool {
+ return m.globalChange || (other != nil && other.(*mockLockfile).globalChange)
+}
+
+func (m *mockLockfile) Patches() []turbopath.AnchoredUnixPath {
+ return nil
+}
+
+func (m *mockLockfile) Subgraph(workspaces []turbopath.AnchoredSystemPath, packages []string) (lockfile.Lockfile, error) {
+ return nil, nil
+}
+
+var _ (lockfile.Lockfile) = (*mockLockfile)(nil)
+
+func TestResolvePackages(t *testing.T) {
+ cwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("cwd: %v", err)
+ }
+ root, err := fs.GetCwd(cwd)
+ if err != nil {
+ t.Fatalf("cwd: %v", err)
+ }
+ tui := ui.Default()
+ logger := hclog.Default()
+ // Dependency graph:
+ //
+ // app0 -
+ // \
+ // app1 -> libA
+ // \
+ // > libB -> libD
+ // /
+ // app2 <
+ // \
+ // > libC
+ // /
+ // app2-a <
+ //
+ // Filesystem layout:
+ //
+ // app/
+ // app0
+ // app1
+ // app2
+ // app2-a
+ // libs/
+ // libA
+ // libB
+ // libC
+ // libD
+ graph := dag.AcyclicGraph{}
+ graph.Add("app0")
+ graph.Add("app1")
+ graph.Add("app2")
+ graph.Add("app2-a")
+ graph.Add("libA")
+ graph.Add("libB")
+ graph.Add("libC")
+ graph.Add("libD")
+ graph.Connect(dag.BasicEdge("libA", "libB"))
+ graph.Connect(dag.BasicEdge("libB", "libD"))
+ graph.Connect(dag.BasicEdge("app0", "libA"))
+ graph.Connect(dag.BasicEdge("app1", "libA"))
+ graph.Connect(dag.BasicEdge("app2", "libB"))
+ graph.Connect(dag.BasicEdge("app2", "libC"))
+ graph.Connect(dag.BasicEdge("app2-a", "libC"))
+ workspaceInfos := workspace.Catalog{
+ PackageJSONs: map[string]*fs.PackageJSON{
+ "//": {
+ Dir: turbopath.AnchoredSystemPath("").ToSystemPath(),
+ UnresolvedExternalDeps: map[string]string{"global": "2"},
+ TransitiveDeps: []lockfile.Package{{Key: "global2", Version: "2", Found: true}},
+ },
+ "app0": {
+ Dir: turbopath.AnchoredUnixPath("app/app0").ToSystemPath(),
+ Name: "app0",
+ UnresolvedExternalDeps: map[string]string{"app0-dep": "2"},
+ TransitiveDeps: []lockfile.Package{
+ {Key: "app0-dep2", Version: "2", Found: true},
+ {Key: "app0-util2", Version: "2", Found: true},
+ },
+ },
+ "app1": {
+ Dir: turbopath.AnchoredUnixPath("app/app1").ToSystemPath(),
+ Name: "app1",
+ },
+ "app2": {
+ Dir: turbopath.AnchoredUnixPath("app/app2").ToSystemPath(),
+ Name: "app2",
+ },
+ "app2-a": {
+ Dir: turbopath.AnchoredUnixPath("app/app2-a").ToSystemPath(),
+ Name: "app2-a",
+ },
+ "libA": {
+ Dir: turbopath.AnchoredUnixPath("libs/libA").ToSystemPath(),
+ Name: "libA",
+ },
+ "libB": {
+ Dir: turbopath.AnchoredUnixPath("libs/libB").ToSystemPath(),
+ Name: "libB",
+ UnresolvedExternalDeps: map[string]string{"external": "1"},
+ TransitiveDeps: []lockfile.Package{
+ {Key: "external-dep-a1", Version: "1", Found: true},
+ {Key: "external-dep-b1", Version: "1", Found: true},
+ {Key: "external1", Version: "1", Found: true},
+ },
+ },
+ "libC": {
+ Dir: turbopath.AnchoredUnixPath("libs/libC").ToSystemPath(),
+ Name: "libC",
+ },
+ "libD": {
+ Dir: turbopath.AnchoredUnixPath("libs/libD").ToSystemPath(),
+ Name: "libD",
+ },
+ },
+ }
+ packageNames := []string{}
+ for name := range workspaceInfos.PackageJSONs {
+ packageNames = append(packageNames, name)
+ }
+
+ // global -> globalDep
+ // app0-dep -> app0-dep :)
+
+ makeLockfile := func(f func(*mockLockfile)) *mockLockfile {
+ l := mockLockfile{
+ globalChange: false,
+ versions: map[string]string{
+ "global": "2",
+ "app0-dep": "2",
+ "app0-util": "2",
+ "external": "1",
+ "external-dep-a": "1",
+ "external-dep-b": "1",
+ },
+ allDeps: map[string]map[string]string{
+ "global2": map[string]string{},
+ "app0-dep2": map[string]string{
+ "app0-util": "2",
+ },
+ "app0-util2": map[string]string{},
+ "external1": map[string]string{
+ "external-dep-a": "1",
+ "external-dep-b": "1",
+ },
+ "external-dep-a1": map[string]string{},
+ "external-dep-b1": map[string]string{},
+ },
+ }
+ if f != nil {
+ f(&l)
+ }
+ return &l
+ }
+
+ testCases := []struct {
+ name string
+ changed []string
+ expected []string
+ expectAllPackages bool
+ scope []string
+ since string
+ ignore string
+ globalDeps []string
+ includeDependencies bool
+ includeDependents bool
+ lockfile string
+ currLockfile *mockLockfile
+ prevLockfile *mockLockfile
+ inferPkgPath string
+ }{
+ {
+ name: "Just scope and dependencies",
+ changed: []string{},
+ includeDependencies: true,
+ scope: []string{"app2"},
+ expected: []string{"app2", "libB", "libC", "libD"},
+ },
+ {
+ name: "Only turbo.json changed",
+ changed: []string{"turbo.json"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ since: "dummy",
+ includeDependencies: true,
+ },
+ {
+ name: "Only root package.json changed",
+ changed: []string{"package.json"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ since: "dummy",
+ includeDependencies: true,
+ },
+ {
+ name: "Only package-lock.json changed",
+ changed: []string{"package-lock.json"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ since: "dummy",
+ includeDependencies: true,
+ lockfile: "package-lock.json",
+ },
+ {
+ name: "Only yarn.lock changed",
+ changed: []string{"yarn.lock"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ since: "dummy",
+ includeDependencies: true,
+ lockfile: "yarn.lock",
+ },
+ {
+ name: "Only pnpm-lock.yaml changed",
+ changed: []string{"pnpm-lock.yaml"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ since: "dummy",
+ includeDependencies: true,
+ lockfile: "pnpm-lock.yaml",
+ },
+ {
+ name: "One package changed",
+ changed: []string{"libs/libB/src/index.ts"},
+ expected: []string{"libB"},
+ since: "dummy",
+ },
+ {
+ name: "One package manifest changed",
+ changed: []string{"libs/libB/package.json"},
+ expected: []string{"libB"},
+ since: "dummy",
+ },
+ {
+ name: "An ignored package changed",
+ changed: []string{"libs/libB/src/index.ts"},
+ expected: []string{},
+ since: "dummy",
+ ignore: "libs/libB/**/*.ts",
+ },
+ {
+ // nothing in scope depends on the change
+ name: "unrelated library changed",
+ changed: []string{"libs/libC/src/index.ts"},
+ expected: []string{},
+ since: "dummy",
+ scope: []string{"app1"},
+ includeDependencies: true, // scope implies include-dependencies
+ },
+ {
+ // a dependent lib changed, scope implies include-dependencies,
+ // so all deps of app1 get built
+ name: "dependency of scope changed",
+ changed: []string{"libs/libA/src/index.ts"},
+ expected: []string{"libA", "libB", "libD", "app1"},
+ since: "dummy",
+ scope: []string{"app1"},
+ includeDependencies: true, // scope implies include-dependencies
+ },
+ {
+ // a dependent lib changed, user explicitly asked to not build dependencies.
+ // Since the package matching the scope had a changed dependency, we run it.
+ // We don't include its dependencies because the user asked for no dependencies.
+ // note: this is not yet supported by the CLI, as you cannot specify --include-dependencies=false
+ name: "dependency of scope changed, user asked to not include depedencies",
+ changed: []string{"libs/libA/src/index.ts"},
+ expected: []string{"app1"},
+ since: "dummy",
+ scope: []string{"app1"},
+ includeDependencies: false,
+ },
+ {
+ // a nested dependent lib changed, user explicitly asked to not build dependencies
+ // note: this is not yet supported by the CLI, as you cannot specify --include-dependencies=false
+ name: "nested dependency of scope changed, user asked to not include dependencies",
+ changed: []string{"libs/libB/src/index.ts"},
+ expected: []string{"app1"},
+ since: "dummy",
+ scope: []string{"app1"},
+ includeDependencies: false,
+ },
+ {
+ name: "global dependency changed, even though it was ignored, forcing a build of everything",
+ changed: []string{"libs/libB/src/index.ts"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ since: "dummy",
+ ignore: "libs/libB/**/*.ts",
+ globalDeps: []string{"libs/**/*.ts"},
+ },
+ {
+ name: "an app changed, user asked for dependencies to build",
+ changed: []string{"app/app2/src/index.ts"},
+ since: "dummy",
+ includeDependencies: true,
+ expected: []string{"app2", "libB", "libC", "libD"},
+ },
+ {
+ name: "a library changed, user asked for dependents to be built",
+ changed: []string{"libs/libB"},
+ since: "dummy",
+ includeDependents: true,
+ expected: []string{"app0", "app1", "app2", "libA", "libB"},
+ },
+ {
+ // no changes, no base to compare against, defaults to everything
+ name: "no changes or scope specified, build everything",
+ since: "",
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ expectAllPackages: true,
+ },
+ {
+ // a dependent library changed, no deps beyond the scope are build
+ // "libB" is still built because it is a dependent within the scope, but libB's dependents
+ // are skipped
+ name: "a dependent library changed, build up to scope",
+ changed: []string{"libs/libD/src/index.ts"},
+ since: "dummy",
+ scope: []string{"libB"},
+ expected: []string{"libB", "libD"},
+ includeDependencies: true, // scope implies include-dependencies
+ },
+ {
+ name: "library change, no scope",
+ changed: []string{"libs/libA/src/index.ts"},
+ expected: []string{"libA", "app0", "app1"},
+ includeDependents: true,
+ since: "dummy",
+ },
+ {
+ // make sure multiple apps with the same prefix are handled separately.
+ // prevents this issue: https://github.com/vercel/turbo/issues/1528
+ name: "Two apps with an overlapping prefix changed",
+ changed: []string{"app/app2/src/index.js", "app/app2-a/src/index.js"},
+ expected: []string{"app2", "app2-a"},
+ since: "dummy",
+ },
+ {
+ name: "Global lockfile change invalidates all packages",
+ changed: []string{"dummy.lock"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ lockfile: "dummy.lock",
+ currLockfile: makeLockfile(nil),
+ prevLockfile: makeLockfile(func(ml *mockLockfile) {
+ ml.globalChange = true
+ }),
+ since: "dummy",
+ },
+ {
+ name: "Dependency of workspace root change invalidates all packages",
+ changed: []string{"dummy.lock"},
+ expected: []string{"//", "app0", "app1", "app2", "app2-a", "libA", "libB", "libC", "libD"},
+ lockfile: "dummy.lock",
+ currLockfile: makeLockfile(nil),
+ prevLockfile: makeLockfile(func(ml *mockLockfile) {
+ ml.versions["global"] = "3"
+ ml.allDeps["global3"] = map[string]string{}
+ }),
+ since: "dummy",
+ },
+ {
+ name: "Version change invalidates package",
+ changed: []string{"dummy.lock"},
+ expected: []string{"//", "app0"},
+ lockfile: "dummy.lock",
+ currLockfile: makeLockfile(nil),
+ prevLockfile: makeLockfile(func(ml *mockLockfile) {
+ ml.versions["app0-util"] = "3"
+ ml.allDeps["app0-dep2"] = map[string]string{"app0-util": "3"}
+ ml.allDeps["app0-util3"] = map[string]string{}
+ }),
+ since: "dummy",
+ },
+ {
+ name: "Transitive dep invalidates package",
+ changed: []string{"dummy.lock"},
+ expected: []string{"//", "libB"},
+ lockfile: "dummy.lock",
+ currLockfile: makeLockfile(nil),
+ prevLockfile: makeLockfile(func(ml *mockLockfile) {
+ ml.versions["external-dep-a"] = "2"
+ ml.allDeps["external1"] = map[string]string{"external-dep-a": "2", "external-dep-b": "1"}
+ ml.allDeps["external-dep-a2"] = map[string]string{}
+ }),
+ since: "dummy",
+ },
+ {
+ name: "Transitive dep invalidates package and dependents",
+ changed: []string{"dummy.lock"},
+ expected: []string{"//", "app0", "app1", "app2", "libA", "libB"},
+ lockfile: "dummy.lock",
+ includeDependents: true,
+ currLockfile: makeLockfile(nil),
+ prevLockfile: makeLockfile(func(ml *mockLockfile) {
+ ml.versions["external-dep-a"] = "2"
+ ml.allDeps["external1"] = map[string]string{"external-dep-a": "2", "external-dep-b": "1"}
+ ml.allDeps["external-dep-a2"] = map[string]string{}
+ }),
+ since: "dummy",
+ },
+ {
+ name: "Infer app2 from directory",
+ inferPkgPath: "app/app2",
+ expected: []string{"app2"},
+ },
+ {
+ name: "Infer app2 from a subdirectory",
+ inferPkgPath: "app/app2/src",
+ expected: []string{"app2"},
+ },
+ {
+ name: "Infer from a directory with no packages",
+ inferPkgPath: "wrong",
+ expected: []string{},
+ },
+ {
+ name: "Infer from a parent directory",
+ inferPkgPath: "app",
+ expected: []string{"app0", "app1", "app2", "app2-a"},
+ },
+ {
+ name: "library change, no scope, inferred libs",
+ changed: []string{"libs/libA/src/index.ts"},
+ expected: []string{"libA"},
+ since: "dummy",
+ inferPkgPath: "libs",
+ },
+ {
+ name: "library change, no scope, inferred app",
+ changed: []string{"libs/libA/src/index.ts"},
+ expected: []string{},
+ since: "dummy",
+ inferPkgPath: "app",
+ },
+ }
+ for i, tc := range testCases {
+ t.Run(fmt.Sprintf("test #%v %v", i, tc.name), func(t *testing.T) {
+ // Convert test data to system separators.
+ systemSeparatorChanged := make([]string, len(tc.changed))
+ for index, path := range tc.changed {
+ systemSeparatorChanged[index] = filepath.FromSlash(path)
+ }
+ scm := &mockSCM{
+ changed: systemSeparatorChanged,
+ contents: make(map[string][]byte, len(systemSeparatorChanged)),
+ }
+ for _, path := range systemSeparatorChanged {
+ scm.contents[path] = nil
+ }
+ readLockfile := func(_rootPackageJSON *fs.PackageJSON, content []byte) (lockfile.Lockfile, error) {
+ return tc.prevLockfile, nil
+ }
+ pkgInferenceRoot, err := resolvePackageInferencePath(tc.inferPkgPath)
+ if err != nil {
+ t.Errorf("bad inference path (%v): %v", tc.inferPkgPath, err)
+ }
+ pkgs, isAllPackages, err := ResolvePackages(&Opts{
+ LegacyFilter: LegacyFilter{
+ Entrypoints: tc.scope,
+ Since: tc.since,
+ IncludeDependencies: tc.includeDependencies,
+ SkipDependents: !tc.includeDependents,
+ },
+ IgnorePatterns: []string{tc.ignore},
+ GlobalDepPatterns: tc.globalDeps,
+ PackageInferenceRoot: pkgInferenceRoot,
+ }, root, scm, &context.Context{
+ WorkspaceInfos: workspaceInfos,
+ WorkspaceNames: packageNames,
+ PackageManager: &packagemanager.PackageManager{Lockfile: tc.lockfile, UnmarshalLockfile: readLockfile},
+ WorkspaceGraph: graph,
+ RootNode: "root",
+ Lockfile: tc.currLockfile,
+ }, tui, logger)
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ expected := make(util.Set)
+ for _, pkg := range tc.expected {
+ expected.Add(pkg)
+ }
+ if !reflect.DeepEqual(pkgs, expected) {
+ t.Errorf("ResolvePackages got %v, want %v", pkgs, expected)
+ }
+ if isAllPackages != tc.expectAllPackages {
+ t.Errorf("isAllPackages got %v, want %v", isAllPackages, tc.expectAllPackages)
+ }
+ })
+ }
+}
diff --git a/cli/internal/server/server.go b/cli/internal/server/server.go
new file mode 100644
index 0000000..5e738cc
--- /dev/null
+++ b/cli/internal/server/server.go
@@ -0,0 +1,192 @@
+package server
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/pkg/errors"
+ "github.com/vercel/turbo/cli/internal/filewatcher"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/globwatcher"
+ "github.com/vercel/turbo/cli/internal/turbodprotocol"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// Server implements the GRPC serverside of TurbodServer
+// Note for the future: we don't yet make use of turbo.json
+// or the package graph in the server. Once we do, we may need a
+// layer of indirection between "the thing that responds to grpc requests"
+// and "the thing that holds our persistent data structures" to handle
+// changes in the underlying configuration.
+type Server struct {
+ turbodprotocol.UnimplementedTurbodServer
+ watcher *filewatcher.FileWatcher
+ globWatcher *globwatcher.GlobWatcher
+ turboVersion string
+ started time.Time
+ logFilePath turbopath.AbsoluteSystemPath
+ repoRoot turbopath.AbsoluteSystemPath
+ closerMu sync.Mutex
+ closer *closer
+}
+
+// GRPCServer is the interface that the turbo server needs to the underlying
+// GRPC server. This lets the turbo server register itself, as well as provides
+// a hook for shutting down the server.
+type GRPCServer interface {
+ grpc.ServiceRegistrar
+ GracefulStop()
+}
+
+type closer struct {
+ grpcServer GRPCServer
+ once sync.Once
+}
+
+func (c *closer) close() {
+ // This can get triggered from a request handler (Shutdown). Since
+ // calling GracefulStop blocks until all request handlers complete,
+ // we need to run it in a goroutine to let the Shutdown handler complete
+ // and avoid deadlocking.
+ c.once.Do(func() {
+ go func() {
+ c.grpcServer.GracefulStop()
+ }()
+ })
+}
+
+var _defaultCookieTimeout = 500 * time.Millisecond
+
+// New returns a new instance of Server
+func New(serverName string, logger hclog.Logger, repoRoot turbopath.AbsoluteSystemPath, turboVersion string, logFilePath turbopath.AbsoluteSystemPath) (*Server, error) {
+ cookieDir := fs.GetTurboDataDir().UntypedJoin("cookies", serverName)
+ cookieJar, err := filewatcher.NewCookieJar(cookieDir, _defaultCookieTimeout)
+ if err != nil {
+ return nil, err
+ }
+ watcher, err := filewatcher.GetPlatformSpecificBackend(logger)
+ if err != nil {
+ return nil, err
+ }
+ fileWatcher := filewatcher.New(logger.Named("FileWatcher"), repoRoot, watcher)
+ globWatcher := globwatcher.New(logger.Named("GlobWatcher"), repoRoot, cookieJar)
+ server := &Server{
+ watcher: fileWatcher,
+ globWatcher: globWatcher,
+ turboVersion: turboVersion,
+ started: time.Now(),
+ logFilePath: logFilePath,
+ repoRoot: repoRoot,
+ }
+ server.watcher.AddClient(cookieJar)
+ server.watcher.AddClient(globWatcher)
+ server.watcher.AddClient(server)
+ if err := server.watcher.Start(); err != nil {
+ return nil, errors.Wrapf(err, "watching %v", repoRoot)
+ }
+ if err := server.watcher.AddRoot(cookieDir); err != nil {
+ _ = server.watcher.Close()
+ return nil, errors.Wrapf(err, "failed to watch cookie directory: %v", cookieDir)
+ }
+ return server, nil
+}
+
+func (s *Server) tryClose() bool {
+ s.closerMu.Lock()
+ defer s.closerMu.Unlock()
+ if s.closer != nil {
+ s.closer.close()
+ return true
+ }
+ return false
+}
+
+// OnFileWatchEvent implements filewatcher.FileWatchClient.OnFileWatchEvent
+// In the event that the root of the monorepo is deleted, shut down the server.
+func (s *Server) OnFileWatchEvent(ev filewatcher.Event) {
+ if ev.EventType == filewatcher.FileDeleted && ev.Path == s.repoRoot {
+ _ = s.tryClose()
+ }
+}
+
+// OnFileWatchError implements filewatcher.FileWatchClient.OnFileWatchError
+func (s *Server) OnFileWatchError(err error) {}
+
+// OnFileWatchClosed implements filewatcher.FileWatchClient.OnFileWatchClosed
+func (s *Server) OnFileWatchClosed() {}
+
+// Close is used for shutting down this copy of the server
+func (s *Server) Close() error {
+ return s.watcher.Close()
+}
+
+// Register registers this server to respond to GRPC requests
+func (s *Server) Register(grpcServer GRPCServer) {
+ s.closerMu.Lock()
+ s.closer = &closer{
+ grpcServer: grpcServer,
+ }
+ s.closerMu.Unlock()
+ turbodprotocol.RegisterTurbodServer(grpcServer, s)
+}
+
+// NotifyOutputsWritten implements the NotifyOutputsWritten rpc from turbo.proto
+func (s *Server) NotifyOutputsWritten(ctx context.Context, req *turbodprotocol.NotifyOutputsWrittenRequest) (*turbodprotocol.NotifyOutputsWrittenResponse, error) {
+ outputs := fs.TaskOutputs{
+ Inclusions: req.OutputGlobs,
+ Exclusions: req.OutputExclusionGlobs,
+ }
+
+ err := s.globWatcher.WatchGlobs(req.Hash, outputs)
+ if err != nil {
+ return nil, err
+ }
+ return &turbodprotocol.NotifyOutputsWrittenResponse{}, nil
+}
+
+// GetChangedOutputs implements the GetChangedOutputs rpc from turbo.proto
+func (s *Server) GetChangedOutputs(ctx context.Context, req *turbodprotocol.GetChangedOutputsRequest) (*turbodprotocol.GetChangedOutputsResponse, error) {
+
+ changedGlobs, err := s.globWatcher.GetChangedGlobs(req.Hash, req.OutputGlobs)
+ if err != nil {
+ return nil, err
+ }
+ return &turbodprotocol.GetChangedOutputsResponse{
+ ChangedOutputGlobs: changedGlobs,
+ }, nil
+}
+
+// Hello implements the Hello rpc from turbo.proto
+func (s *Server) Hello(ctx context.Context, req *turbodprotocol.HelloRequest) (*turbodprotocol.HelloResponse, error) {
+ clientVersion := req.Version
+ if clientVersion != s.turboVersion {
+ err := status.Errorf(codes.FailedPrecondition, "version mismatch. Client %v Server %v", clientVersion, s.turboVersion)
+ return nil, err
+ }
+ return &turbodprotocol.HelloResponse{}, nil
+}
+
+// Shutdown implements the Shutdown rpc from turbo.proto
+func (s *Server) Shutdown(ctx context.Context, req *turbodprotocol.ShutdownRequest) (*turbodprotocol.ShutdownResponse, error) {
+ if s.tryClose() {
+ return &turbodprotocol.ShutdownResponse{}, nil
+ }
+ err := status.Error(codes.NotFound, "shutdown mechanism not found")
+ return nil, err
+}
+
+// Status implements the Status rpc from turbo.proto
+func (s *Server) Status(ctx context.Context, req *turbodprotocol.StatusRequest) (*turbodprotocol.StatusResponse, error) {
+ uptime := uint64(time.Since(s.started).Milliseconds())
+ return &turbodprotocol.StatusResponse{
+ DaemonStatus: &turbodprotocol.DaemonStatus{
+ LogFile: s.logFilePath.ToString(),
+ UptimeMsec: uptime,
+ },
+ }, nil
+}
diff --git a/cli/internal/server/server_test.go b/cli/internal/server/server_test.go
new file mode 100644
index 0000000..b7dcf3a
--- /dev/null
+++ b/cli/internal/server/server_test.go
@@ -0,0 +1,73 @@
+package server
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/go-hclog"
+ "google.golang.org/grpc"
+ "gotest.tools/v3/assert"
+
+ turbofs "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbodprotocol"
+)
+
+type mockGrpc struct {
+ stopped chan struct{}
+}
+
+func (m *mockGrpc) GracefulStop() {
+ close(m.stopped)
+}
+
+func (m *mockGrpc) RegisterService(desc *grpc.ServiceDesc, impl interface{}) {}
+
+func TestDeleteRepoRoot(t *testing.T) {
+ logger := hclog.Default()
+ logger.SetLevel(hclog.Debug)
+ repoRootRaw := t.TempDir()
+ repoRoot := turbofs.AbsoluteSystemPathFromUpstream(repoRootRaw)
+
+ grpcServer := &mockGrpc{
+ stopped: make(chan struct{}),
+ }
+
+ s, err := New("testServer", logger, repoRoot, "some-version", "/log/file/path")
+ assert.NilError(t, err, "New")
+ s.Register(grpcServer)
+
+ // Delete the repo root, ensure that GracefulStop got called
+ err = repoRoot.Remove()
+ assert.NilError(t, err, "Remove")
+
+ select {
+ case <-grpcServer.stopped:
+ case <-time.After(2 * time.Second):
+ t.Error("timed out waiting for graceful stop to be called")
+ }
+}
+
+func TestShutdown(t *testing.T) {
+ logger := hclog.Default()
+ repoRootRaw := t.TempDir()
+ repoRoot := turbofs.AbsoluteSystemPathFromUpstream(repoRootRaw)
+
+ grpcServer := &mockGrpc{
+ stopped: make(chan struct{}),
+ }
+
+ s, err := New("testServer", logger, repoRoot, "some-version", "/log/file/path")
+ assert.NilError(t, err, "New")
+ s.Register(grpcServer)
+
+ ctx := context.Background()
+ _, err = s.Shutdown(ctx, &turbodprotocol.ShutdownRequest{})
+ assert.NilError(t, err, "Shutdown")
+ // Ensure that graceful stop gets called
+ select {
+ case <-grpcServer.stopped:
+ case <-time.After(2 * time.Second):
+ t.Error("timed out waiting for graceful stop to be called")
+ }
+}
diff --git a/cli/internal/signals/signals.go b/cli/internal/signals/signals.go
new file mode 100644
index 0000000..8634144
--- /dev/null
+++ b/cli/internal/signals/signals.go
@@ -0,0 +1,60 @@
+package signals
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+)
+
+// Watcher watches for signals delivered to this process and provides
+// the opportunity for turbo to run cleanup
+type Watcher struct {
+ doneCh chan struct{}
+ closed bool
+ mu sync.Mutex
+ closers []func()
+}
+
+// AddOnClose registers a cleanup handler to run when a signal is received
+func (w *Watcher) AddOnClose(closer func()) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.closers = append(w.closers, closer)
+}
+
+// Close runs the cleanup handlers registered with this watcher
+func (w *Watcher) Close() {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ if w.closed {
+ return
+ }
+ w.closed = true
+ for _, closer := range w.closers {
+ closer()
+ }
+ w.closers = nil
+ close(w.doneCh)
+}
+
+// Done returns a channel that will be closed after all of the cleanup
+// handlers have been run.
+func (w *Watcher) Done() <-chan struct{} {
+ return w.doneCh
+}
+
+// NewWatcher returns a new Watcher instance for watching signals.
+func NewWatcher() *Watcher {
+ // TODO: platform specific signals to watch for?
+ signalCh := make(chan os.Signal, 1)
+ signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
+ w := &Watcher{
+ doneCh: make(chan struct{}),
+ }
+ go func() {
+ <-signalCh
+ w.Close()
+ }()
+ return w
+}
diff --git a/cli/internal/spinner/spinner.go b/cli/internal/spinner/spinner.go
new file mode 100644
index 0000000..8ce6b4a
--- /dev/null
+++ b/cli/internal/spinner/spinner.go
@@ -0,0 +1,89 @@
+package spinner
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/mitchellh/cli"
+ progressbar "github.com/schollz/progressbar/v3"
+ "github.com/vercel/turbo/cli/internal/ui"
+)
+
+// getWriterAndColor unwraps cli.Ui instances until it gets to a BasicUi.
+// If it happens to spot a ColoredUi along the way, it marks that color is
+// enabled.
+func getWriterAndColor(terminal cli.Ui, useColor bool) (io.Writer, bool) {
+ switch terminal := terminal.(type) {
+ case *cli.BasicUi:
+ return terminal.Writer, useColor
+ case *cli.ColoredUi:
+ return getWriterAndColor(terminal.Ui, true)
+ case *cli.ConcurrentUi:
+ return getWriterAndColor(terminal.Ui, useColor)
+ case *cli.PrefixedUi:
+ return getWriterAndColor(terminal.Ui, useColor)
+ case *cli.MockUi:
+ return terminal.OutputWriter, false
+ default:
+ panic(fmt.Sprintf("unknown Ui: %v", terminal))
+ }
+}
+
+// WaitFor runs fn, and prints msg to the terminal if it takes longer
+// than initialDelay to complete. Depending on the terminal configuration, it may
+// display a single instance of msg, or an infinite spinner, updated every 250ms.
+func WaitFor(ctx context.Context, fn func(), terminal cli.Ui, msg string, initialDelay time.Duration) error {
+ doneCh := make(chan struct{})
+ go func() {
+ fn()
+ close(doneCh)
+ }()
+ if ui.IsTTY {
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-time.After(initialDelay):
+ writer, useColor := getWriterAndColor(terminal, false)
+ bar := progressbar.NewOptions(
+ -1,
+ progressbar.OptionEnableColorCodes(useColor),
+ progressbar.OptionSetDescription(fmt.Sprintf("[yellow]%v[reset]", msg)),
+ progressbar.OptionSpinnerType(14),
+ progressbar.OptionSetWriter(writer),
+ )
+ for {
+ select {
+ case <-doneCh:
+ err := bar.Finish()
+ terminal.Output("")
+ return err
+ case <-time.After(250 * time.Millisecond):
+ if err := bar.Add(1); err != nil {
+ return err
+ }
+ case <-ctx.Done():
+ return nil
+ }
+ }
+ case <-doneCh:
+ return nil
+ }
+ } else {
+ // wait for the timeout before displaying a message, even with no tty
+ select {
+ case <-ctx.Done():
+ return nil
+ case <-doneCh:
+ return nil
+ case <-time.After(initialDelay):
+ terminal.Output(msg)
+ }
+ select {
+ case <-ctx.Done():
+ case <-doneCh:
+ }
+ return nil
+ }
+}
diff --git a/cli/internal/tarpatch/tar.go b/cli/internal/tarpatch/tar.go
new file mode 100644
index 0000000..a4dab23
--- /dev/null
+++ b/cli/internal/tarpatch/tar.go
@@ -0,0 +1,92 @@
+// Adapted from https://github.com/moby/moby/blob/924edb948c2731df3b77697a8fcc85da3f6eef57/pkg/archive/archive.go
+// Copyright Docker, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+// Package tarpatch addresses an issue with stdlib throwing an error in some environments.
+package tarpatch
+
+import (
+ "archive/tar"
+ "io/fs"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
+// prevent tar.FileInfoHeader from introspecting it and potentially calling into
+// glibc.
+type nosysFileInfo struct {
+ os.FileInfo
+}
+
+func (fi nosysFileInfo) Sys() interface{} {
+ // A Sys value of type *tar.Header is safe as it is system-independent.
+ // The tar.FileInfoHeader function copies the fields into the returned
+ // header without performing any OS lookups.
+ if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
+ return sys
+ }
+ return nil
+}
+
+// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
+//
+// Compared to the archive/tar.FileInfoHeader function, this function is safe to
+// call from a chrooted process as it does not populate fields which would
+// require operating system lookups. It behaves identically to
+// tar.FileInfoHeader when fi is a FileInfo value returned from
+// tar.Header.FileInfo().
+//
+// When fi is a FileInfo for a native file, such as returned from os.Stat() and
+// os.Lstat(), the returned Header value differs from one returned from
+// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
+// set as OS lookups would be required to populate them. The AccessTime and
+// ChangeTime fields are not currently set (not yet implemented) although that
+// is subject to change. Callers which require the AccessTime or ChangeTime
+// fields to be zeroed should explicitly zero them out in the returned Header
+// value to avoid any compatibility issues in the future.
+func FileInfoHeaderNoLookups(fi fs.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
+ if err != nil {
+ return nil, err
+ }
+ return hdr, sysStat(fi, hdr)
+}
+
+// FileInfoHeader creates a populated Header from fi.
+//
+// Compared to the archive/tar package, this function fills in less information
+// but is safe to call from a chrooted process. The AccessTime and ChangeTime
+// fields are not set in the returned header, ModTime is truncated to one-second
+// precision, and the Uname and Gname fields are only set when fi is a FileInfo
+// value returned from tar.Header.FileInfo().
+func FileInfoHeader(fullPath turbopath.AnchoredUnixPath, fileInfo fs.FileInfo, link string) (*tar.Header, error) {
+ hdr, err := FileInfoHeaderNoLookups(fileInfo, link)
+ if err != nil {
+ return nil, err
+ }
+ hdr.Format = tar.FormatPAX
+ hdr.ModTime = hdr.ModTime.Truncate(time.Second)
+ hdr.AccessTime = time.Time{}
+ hdr.ChangeTime = time.Time{}
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+ hdr.Name = canonicalTarName(fullPath, fileInfo.IsDir())
+ return hdr, nil
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+// path for files and directories to be archived regardless of the platform.
+func canonicalTarName(fullPath turbopath.AnchoredUnixPath, isDir bool) string {
+ nameString := fullPath.ToString()
+ if isDir {
+ // Append '/' if not already present.
+ if !strings.HasSuffix(nameString, "/") {
+ nameString += "/"
+ }
+ }
+
+ return nameString
+}
diff --git a/cli/internal/tarpatch/tar_unix.go b/cli/internal/tarpatch/tar_unix.go
new file mode 100644
index 0000000..3020c0e
--- /dev/null
+++ b/cli/internal/tarpatch/tar_unix.go
@@ -0,0 +1,42 @@
+//go:build !windows
+// +build !windows
+
+// Adapted from https://github.com/moby/moby/blob/924edb948c2731df3b77697a8fcc85da3f6eef57/pkg/archive/archive_unix.go
+// Copyright Docker, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+package tarpatch
+
+import (
+ "archive/tar"
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+// sysStat populates hdr from system-dependent fields of fi without performing
+// any OS lookups.
+func sysStat(fi os.FileInfo, hdr *tar.Header) error {
+ s, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return nil
+ }
+
+ hdr.Uid = int(s.Uid)
+ hdr.Gid = int(s.Gid)
+
+ if s.Mode&unix.S_IFBLK != 0 ||
+ s.Mode&unix.S_IFCHR != 0 {
+ hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
+ hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
+ }
+
+ return nil
+}
diff --git a/cli/internal/tarpatch/tar_windows.go b/cli/internal/tarpatch/tar_windows.go
new file mode 100644
index 0000000..486e6fd
--- /dev/null
+++ b/cli/internal/tarpatch/tar_windows.go
@@ -0,0 +1,27 @@
+//go:build windows
+// +build windows
+
+// Adapted from https://github.com/moby/moby/blob/924edb948c2731df3b77697a8fcc85da3f6eef57/pkg/archive/archive_windows.go
+// Copyright Docker, Inc.
+// SPDX-License-Identifier: Apache-2.0
+
+package tarpatch
+
+import (
+ "archive/tar"
+ "os"
+)
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ // Remove group- and world-writable bits.
+ perm &= 0o755
+
+ // Add the x bit: make everything +x on Windows
+ return perm | 0o111
+}
+
+func sysStat(fi os.FileInfo, hdr *tar.Header) error {
+ return nil
+}
diff --git a/cli/internal/taskhash/taskhash.go b/cli/internal/taskhash/taskhash.go
new file mode 100644
index 0000000..a912ad9
--- /dev/null
+++ b/cli/internal/taskhash/taskhash.go
@@ -0,0 +1,497 @@
+// Package taskhash handles calculating dependency hashes for nodes in the task execution graph.
+package taskhash
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-hclog"
+ "github.com/pyr-sh/dag"
+ gitignore "github.com/sabhiram/go-gitignore"
+ "github.com/vercel/turbo/cli/internal/doublestar"
+ "github.com/vercel/turbo/cli/internal/env"
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/hashing"
+ "github.com/vercel/turbo/cli/internal/inference"
+ "github.com/vercel/turbo/cli/internal/nodes"
+ "github.com/vercel/turbo/cli/internal/runsummary"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+ "github.com/vercel/turbo/cli/internal/util"
+ "github.com/vercel/turbo/cli/internal/workspace"
+ "golang.org/x/sync/errgroup"
+)
+
+// Tracker caches package-inputs hashes, as well as package-task hashes.
+// package-inputs hashes must be calculated before package-task hashes,
+// and package-task hashes must be calculated in topographical order.
+// package-task hashing is threadsafe, provided topographical order is
+// respected.
+type Tracker struct {
+ rootNode string
+ globalHash string
+ pipeline fs.Pipeline
+
+ packageInputsHashes packageFileHashes
+
+ // packageInputsExpandedHashes is a map of a hashkey to a list of files that are inputs to the task.
+ // Writes to this map happen during CalculateFileHash(). Since this happens synchronously
+ // before walking the task graph, it does not need to be protected by a mutex.
+ packageInputsExpandedHashes map[packageFileHashKey]map[turbopath.AnchoredUnixPath]string
+
+ // mu is a mutex that we can lock/unlock to read/write from maps
+ // the fields below should be protected by the mutex.
+ mu sync.RWMutex
+ packageTaskEnvVars map[string]env.DetailedMap // taskId -> envvar pairs that affect the hash.
+ packageTaskHashes map[string]string // taskID -> hash
+ packageTaskFramework map[string]string // taskID -> inferred framework for package
+ packageTaskOutputs map[string][]turbopath.AnchoredSystemPath
+ packageTaskCacheStatus map[string]runsummary.TaskCacheSummary
+}
+
+// NewTracker creates a tracker for package-inputs combinations and package-task combinations.
+func NewTracker(rootNode string, globalHash string, pipeline fs.Pipeline) *Tracker {
+ return &Tracker{
+ rootNode: rootNode,
+ globalHash: globalHash,
+ pipeline: pipeline,
+ packageTaskHashes: make(map[string]string),
+ packageTaskFramework: make(map[string]string),
+ packageTaskEnvVars: make(map[string]env.DetailedMap),
+ packageTaskOutputs: make(map[string][]turbopath.AnchoredSystemPath),
+ packageTaskCacheStatus: make(map[string]runsummary.TaskCacheSummary),
+ }
+}
+
+// packageFileSpec defines a combination of a package and optional set of input globs
+type packageFileSpec struct {
+ pkg string
+ inputs []string
+}
+
+func specFromPackageTask(packageTask *nodes.PackageTask) packageFileSpec {
+ return packageFileSpec{
+ pkg: packageTask.PackageName,
+ inputs: packageTask.TaskDefinition.Inputs,
+ }
+}
+
+// packageFileHashKey is a hashable representation of a packageFileSpec.
+type packageFileHashKey string
+
+// hashes the inputs for a packageTask
+func (pfs packageFileSpec) ToKey() packageFileHashKey {
+ sort.Strings(pfs.inputs)
+ return packageFileHashKey(fmt.Sprintf("%v#%v", pfs.pkg, strings.Join(pfs.inputs, "!")))
+}
+
+func safeCompileIgnoreFile(filepath string) (*gitignore.GitIgnore, error) {
+ if fs.FileExists(filepath) {
+ return gitignore.CompileIgnoreFile(filepath)
+ }
+ // no op
+ return gitignore.CompileIgnoreLines([]string{}...), nil
+}
+
+func (pfs *packageFileSpec) getHashObject(pkg *fs.PackageJSON, repoRoot turbopath.AbsoluteSystemPath) map[turbopath.AnchoredUnixPath]string {
+ hashObject, pkgDepsErr := hashing.GetPackageDeps(repoRoot, &hashing.PackageDepsOptions{
+ PackagePath: pkg.Dir,
+ InputPatterns: pfs.inputs,
+ })
+ if pkgDepsErr != nil {
+ manualHashObject, err := manuallyHashPackage(pkg, pfs.inputs, repoRoot)
+ if err != nil {
+ return make(map[turbopath.AnchoredUnixPath]string)
+ }
+ hashObject = manualHashObject
+ }
+
+ return hashObject
+}
+
+func (pfs *packageFileSpec) hash(hashObject map[turbopath.AnchoredUnixPath]string) (string, error) {
+ hashOfFiles, otherErr := fs.HashObject(hashObject)
+ if otherErr != nil {
+ return "", otherErr
+ }
+ return hashOfFiles, nil
+}
+
+func manuallyHashPackage(pkg *fs.PackageJSON, inputs []string, rootPath turbopath.AbsoluteSystemPath) (map[turbopath.AnchoredUnixPath]string, error) {
+ hashObject := make(map[turbopath.AnchoredUnixPath]string)
+ // Instead of implementing all gitignore properly, we hack it. We only respect .gitignore in the root and in
+ // the directory of a package.
+ ignore, err := safeCompileIgnoreFile(rootPath.UntypedJoin(".gitignore").ToString())
+ if err != nil {
+ return nil, err
+ }
+
+ ignorePkg, err := safeCompileIgnoreFile(rootPath.UntypedJoin(pkg.Dir.ToStringDuringMigration(), ".gitignore").ToString())
+ if err != nil {
+ return nil, err
+ }
+
+ pathPrefix := rootPath.UntypedJoin(pkg.Dir.ToStringDuringMigration())
+ includePattern := ""
+ excludePattern := ""
+ if len(inputs) > 0 {
+ var includePatterns []string
+ var excludePatterns []string
+ for _, pattern := range inputs {
+ if len(pattern) > 0 && pattern[0] == '!' {
+ excludePatterns = append(excludePatterns, pathPrefix.UntypedJoin(pattern[1:]).ToString())
+ } else {
+ includePatterns = append(includePatterns, pathPrefix.UntypedJoin(pattern).ToString())
+ }
+ }
+ if len(includePatterns) > 0 {
+ includePattern = "{" + strings.Join(includePatterns, ",") + "}"
+ }
+ if len(excludePatterns) > 0 {
+ excludePattern = "{" + strings.Join(excludePatterns, ",") + "}"
+ }
+ }
+
+ err = fs.Walk(pathPrefix.ToStringDuringMigration(), func(name string, isDir bool) error {
+ convertedName := turbopath.AbsoluteSystemPathFromUpstream(name)
+ rootMatch := ignore.MatchesPath(convertedName.ToString())
+ otherMatch := ignorePkg.MatchesPath(convertedName.ToString())
+ if !rootMatch && !otherMatch {
+ if !isDir {
+ if includePattern != "" {
+ val, err := doublestar.PathMatch(includePattern, convertedName.ToString())
+ if err != nil {
+ return err
+ }
+ if !val {
+ return nil
+ }
+ }
+ if excludePattern != "" {
+ val, err := doublestar.PathMatch(excludePattern, convertedName.ToString())
+ if err != nil {
+ return err
+ }
+ if val {
+ return nil
+ }
+ }
+ hash, err := fs.GitLikeHashFile(convertedName.ToString())
+ if err != nil {
+ return fmt.Errorf("could not hash file %v. \n%w", convertedName.ToString(), err)
+ }
+
+ relativePath, err := convertedName.RelativeTo(pathPrefix)
+ if err != nil {
+ return fmt.Errorf("File path cannot be made relative: %w", err)
+ }
+ hashObject[relativePath.ToUnixPath()] = hash
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return hashObject, nil
+}
+
+// packageFileHashes is a map from a package and optional input globs to the hash of
+// the matched files in the package.
+type packageFileHashes map[packageFileHashKey]string
+
+// CalculateFileHashes hashes each unique package-inputs combination that is present
+// in the task graph. Must be called before calculating task hashes.
+func (th *Tracker) CalculateFileHashes(
+ allTasks []dag.Vertex,
+ workerCount int,
+ workspaceInfos workspace.Catalog,
+ taskDefinitions map[string]*fs.TaskDefinition,
+ repoRoot turbopath.AbsoluteSystemPath,
+) error {
+ hashTasks := make(util.Set)
+
+ for _, v := range allTasks {
+ taskID, ok := v.(string)
+ if !ok {
+ return fmt.Errorf("unknown task %v", taskID)
+ }
+ if taskID == th.rootNode {
+ continue
+ }
+ pkgName, _ := util.GetPackageTaskFromId(taskID)
+ if pkgName == th.rootNode {
+ continue
+ }
+
+ taskDefinition, ok := taskDefinitions[taskID]
+ if !ok {
+ return fmt.Errorf("missing pipeline entry %v", taskID)
+ }
+
+ pfs := &packageFileSpec{
+ pkg: pkgName,
+ inputs: taskDefinition.Inputs,
+ }
+
+ hashTasks.Add(pfs)
+ }
+
+ hashes := make(map[packageFileHashKey]string, len(hashTasks))
+ hashObjects := make(map[packageFileHashKey]map[turbopath.AnchoredUnixPath]string, len(hashTasks))
+ hashQueue := make(chan *packageFileSpec, workerCount)
+ hashErrs := &errgroup.Group{}
+
+ for i := 0; i < workerCount; i++ {
+ hashErrs.Go(func() error {
+ for packageFileSpec := range hashQueue {
+ pkg, ok := workspaceInfos.PackageJSONs[packageFileSpec.pkg]
+ if !ok {
+ return fmt.Errorf("cannot find package %v", packageFileSpec.pkg)
+ }
+ hashObject := packageFileSpec.getHashObject(pkg, repoRoot)
+ hash, err := packageFileSpec.hash(hashObject)
+ if err != nil {
+ return err
+ }
+ th.mu.Lock()
+ pfsKey := packageFileSpec.ToKey()
+ hashes[pfsKey] = hash
+ hashObjects[pfsKey] = hashObject
+ th.mu.Unlock()
+ }
+ return nil
+ })
+ }
+ for ht := range hashTasks {
+ hashQueue <- ht.(*packageFileSpec)
+ }
+ close(hashQueue)
+ err := hashErrs.Wait()
+ if err != nil {
+ return err
+ }
+ th.packageInputsHashes = hashes
+ th.packageInputsExpandedHashes = hashObjects
+ return nil
+}
+
+type taskHashable struct {
+ packageDir turbopath.AnchoredUnixPath
+ hashOfFiles string
+ externalDepsHash string
+ task string
+ outputs fs.TaskOutputs
+ passThruArgs []string
+ envMode util.EnvMode
+ passthroughEnv []string
+ hashableEnvPairs []string
+ globalHash string
+ taskDependencyHashes []string
+}
+
+type oldTaskHashable struct {
+ packageDir turbopath.AnchoredUnixPath
+ hashOfFiles string
+ externalDepsHash string
+ task string
+ outputs fs.TaskOutputs
+ passThruArgs []string
+ hashableEnvPairs []string
+ globalHash string
+ taskDependencyHashes []string
+}
+
+// calculateTaskHashFromHashable returns a hash string from the taskHashable
+func calculateTaskHashFromHashable(full *taskHashable, useOldTaskHashable bool) (string, error) {
+ // The user is not using the strict environment variables feature.
+ if useOldTaskHashable {
+ return fs.HashObject(&oldTaskHashable{
+ packageDir: full.packageDir,
+ hashOfFiles: full.hashOfFiles,
+ externalDepsHash: full.externalDepsHash,
+ task: full.task,
+ outputs: full.outputs,
+ passThruArgs: full.passThruArgs,
+ hashableEnvPairs: full.hashableEnvPairs,
+ globalHash: full.globalHash,
+ taskDependencyHashes: full.taskDependencyHashes,
+ })
+ }
+
+ switch full.envMode {
+ case util.Loose:
+ // Remove the passthroughs from hash consideration if we're explicitly loose.
+ full.passthroughEnv = nil
+ return fs.HashObject(full)
+ case util.Strict:
+ // Collapse `nil` and `[]` in strict mode.
+ if full.passthroughEnv == nil {
+ full.passthroughEnv = make([]string, 0)
+ }
+ return fs.HashObject(full)
+ case util.Infer:
+ panic("task inferred status should have already been resolved")
+ default:
+ panic("unimplemented environment mode")
+ }
+}
+
+func (th *Tracker) calculateDependencyHashes(dependencySet dag.Set) ([]string, error) {
+ dependencyHashSet := make(util.Set)
+
+ rootPrefix := th.rootNode + util.TaskDelimiter
+ th.mu.RLock()
+ defer th.mu.RUnlock()
+ for _, dependency := range dependencySet {
+ if dependency == th.rootNode {
+ continue
+ }
+ dependencyTask, ok := dependency.(string)
+ if !ok {
+ return nil, fmt.Errorf("unknown task: %v", dependency)
+ }
+ if strings.HasPrefix(dependencyTask, rootPrefix) {
+ continue
+ }
+ dependencyHash, ok := th.packageTaskHashes[dependencyTask]
+ if !ok {
+ return nil, fmt.Errorf("missing hash for dependent task: %v", dependencyTask)
+ }
+ dependencyHashSet.Add(dependencyHash)
+ }
+ dependenciesHashList := dependencyHashSet.UnsafeListOfStrings()
+ sort.Strings(dependenciesHashList)
+ return dependenciesHashList, nil
+}
+
+// CalculateTaskHash calculates the hash for package-task combination. It is threadsafe, provided
+// that it has previously been called on its task-graph dependencies. File hashes must be calculated
+// first.
+func (th *Tracker) CalculateTaskHash(packageTask *nodes.PackageTask, dependencySet dag.Set, logger hclog.Logger, args []string, useOldTaskHashable bool) (string, error) {
+ pfs := specFromPackageTask(packageTask)
+ pkgFileHashKey := pfs.ToKey()
+
+ hashOfFiles, ok := th.packageInputsHashes[pkgFileHashKey]
+ if !ok {
+ return "", fmt.Errorf("cannot find package-file hash for %v", pkgFileHashKey)
+ }
+
+ var keyMatchers []string
+ framework := inference.InferFramework(packageTask.Pkg)
+ if framework != nil && framework.EnvMatcher != "" {
+ // log auto detected framework and env prefix
+ logger.Debug(fmt.Sprintf("auto detected framework for %s", packageTask.PackageName), "framework", framework.Slug, "env_prefix", framework.EnvMatcher)
+ keyMatchers = append(keyMatchers, framework.EnvMatcher)
+ }
+
+ envVars, err := env.GetHashableEnvVars(
+ packageTask.TaskDefinition.EnvVarDependencies,
+ keyMatchers,
+ "TURBO_CI_VENDOR_ENV_KEY",
+ )
+ if err != nil {
+ return "", err
+ }
+ hashableEnvPairs := envVars.All.ToHashable()
+ outputs := packageTask.HashableOutputs()
+ taskDependencyHashes, err := th.calculateDependencyHashes(dependencySet)
+ if err != nil {
+ return "", err
+ }
+ // log any auto detected env vars
+ logger.Debug(fmt.Sprintf("task hash env vars for %s:%s", packageTask.PackageName, packageTask.Task), "vars", hashableEnvPairs)
+
+ hash, err := calculateTaskHashFromHashable(&taskHashable{
+ packageDir: packageTask.Pkg.Dir.ToUnixPath(),
+ hashOfFiles: hashOfFiles,
+ externalDepsHash: packageTask.Pkg.ExternalDepsHash,
+ task: packageTask.Task,
+ outputs: outputs.Sort(),
+ passThruArgs: args,
+ envMode: packageTask.EnvMode,
+ passthroughEnv: packageTask.TaskDefinition.PassthroughEnv,
+ hashableEnvPairs: hashableEnvPairs,
+ globalHash: th.globalHash,
+ taskDependencyHashes: taskDependencyHashes,
+ }, useOldTaskHashable)
+ if err != nil {
+ return "", fmt.Errorf("failed to hash task %v: %v", packageTask.TaskID, hash)
+ }
+ th.mu.Lock()
+ th.packageTaskEnvVars[packageTask.TaskID] = envVars
+ th.packageTaskHashes[packageTask.TaskID] = hash
+ if framework != nil {
+ th.packageTaskFramework[packageTask.TaskID] = framework.Slug
+ }
+ th.mu.Unlock()
+ return hash, nil
+}
+
+// GetExpandedInputs gets the expanded set of inputs for a given PackageTask
+func (th *Tracker) GetExpandedInputs(packageTask *nodes.PackageTask) map[turbopath.AnchoredUnixPath]string {
+ pfs := specFromPackageTask(packageTask)
+ expandedInputs := th.packageInputsExpandedHashes[pfs.ToKey()]
+ inputsCopy := make(map[turbopath.AnchoredUnixPath]string, len(expandedInputs))
+
+ for path, hash := range expandedInputs {
+ inputsCopy[path] = hash
+ }
+
+ return inputsCopy
+}
+
+// GetEnvVars returns the hashed env vars for a given taskID
+func (th *Tracker) GetEnvVars(taskID string) env.DetailedMap {
+ th.mu.RLock()
+ defer th.mu.RUnlock()
+ return th.packageTaskEnvVars[taskID]
+}
+
+// GetFramework returns the inferred framework for a given taskID
+func (th *Tracker) GetFramework(taskID string) string {
+ th.mu.RLock()
+ defer th.mu.RUnlock()
+ return th.packageTaskFramework[taskID]
+}
+
+// GetExpandedOutputs returns a list of outputs for a given taskID
+func (th *Tracker) GetExpandedOutputs(taskID string) []turbopath.AnchoredSystemPath {
+ th.mu.RLock()
+ defer th.mu.RUnlock()
+ outputs, ok := th.packageTaskOutputs[taskID]
+
+ if !ok {
+ return []turbopath.AnchoredSystemPath{}
+ }
+
+ return outputs
+}
+
+// SetExpandedOutputs a list of outputs for a given taskID so it can be read later
+func (th *Tracker) SetExpandedOutputs(taskID string, outputs []turbopath.AnchoredSystemPath) {
+ th.mu.Lock()
+ defer th.mu.Unlock()
+ th.packageTaskOutputs[taskID] = outputs
+}
+
+// SetCacheStatus records the task status for the given taskID
+func (th *Tracker) SetCacheStatus(taskID string, cacheSummary runsummary.TaskCacheSummary) {
+ th.mu.Lock()
+ defer th.mu.Unlock()
+ th.packageTaskCacheStatus[taskID] = cacheSummary
+}
+
+// GetCacheStatus records the task status for the given taskID
+func (th *Tracker) GetCacheStatus(taskID string) runsummary.TaskCacheSummary {
+ th.mu.Lock()
+ defer th.mu.Unlock()
+
+ if status, ok := th.packageTaskCacheStatus[taskID]; ok {
+ return status
+ }
+
+ // Return an empty one, all the fields will be false and 0
+ return runsummary.TaskCacheSummary{}
+}
diff --git a/cli/internal/taskhash/taskhash_test.go b/cli/internal/taskhash/taskhash_test.go
new file mode 100644
index 0000000..dea0010
--- /dev/null
+++ b/cli/internal/taskhash/taskhash_test.go
@@ -0,0 +1,138 @@
+package taskhash
+
+import (
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/vercel/turbo/cli/internal/fs"
+ "github.com/vercel/turbo/cli/internal/turbopath"
+)
+
+func Test_manuallyHashPackage(t *testing.T) {
+ rootIgnore := strings.Join([]string{
+ "ignoreme",
+ "ignorethisdir/",
+ }, "\n")
+ pkgIgnore := strings.Join([]string{
+ "pkgignoreme",
+ "pkgignorethisdir/",
+ }, "\n")
+ root := t.TempDir()
+ repoRoot := turbopath.AbsoluteSystemPathFromUpstream(root)
+ pkgName := turbopath.AnchoredUnixPath("child-dir/libA").ToSystemPath()
+ type fileHash struct {
+ contents string
+ hash string
+ }
+ files := map[turbopath.AnchoredUnixPath]fileHash{
+ "top-level-file": {"top-level-file-contents", ""},
+ "other-dir/other-dir-file": {"other-dir-file-contents", ""},
+ "ignoreme": {"anything", ""},
+ "child-dir/libA/some-file": {"some-file-contents", "7e59c6a6ea9098c6d3beb00e753e2c54ea502311"},
+ "child-dir/libA/some-dir/other-file": {"some-file-contents", "7e59c6a6ea9098c6d3beb00e753e2c54ea502311"},
+ "child-dir/libA/some-dir/another-one": {"some-file-contents", "7e59c6a6ea9098c6d3beb00e753e2c54ea502311"},
+ "child-dir/libA/some-dir/excluded-file": {"some-file-contents", "7e59c6a6ea9098c6d3beb00e753e2c54ea502311"},
+ "child-dir/libA/ignoreme": {"anything", ""},
+ "child-dir/libA/ignorethisdir/anything": {"anything", ""},
+ "child-dir/libA/pkgignoreme": {"anything", ""},
+ "child-dir/libA/pkgignorethisdir/file": {"anything", ""},
+ }
+
+ rootIgnoreFile, err := repoRoot.Join(".gitignore").Create()
+ if err != nil {
+ t.Fatalf("failed to create .gitignore: %v", err)
+ }
+ _, err = rootIgnoreFile.WriteString(rootIgnore)
+ if err != nil {
+ t.Fatalf("failed to write contents to .gitignore: %v", err)
+ }
+ rootIgnoreFile.Close()
+ pkgIgnoreFilename := pkgName.RestoreAnchor(repoRoot).Join(".gitignore")
+ err = pkgIgnoreFilename.EnsureDir()
+ if err != nil {
+ t.Fatalf("failed to ensure directories for %v: %v", pkgIgnoreFilename, err)
+ }
+ pkgIgnoreFile, err := pkgIgnoreFilename.Create()
+ if err != nil {
+ t.Fatalf("failed to create libA/.gitignore: %v", err)
+ }
+ _, err = pkgIgnoreFile.WriteString(pkgIgnore)
+ if err != nil {
+ t.Fatalf("failed to write contents to libA/.gitignore: %v", err)
+ }
+ pkgIgnoreFile.Close()
+ for path, spec := range files {
+ filename := path.ToSystemPath().RestoreAnchor(repoRoot)
+ err = filename.EnsureDir()
+ if err != nil {
+ t.Fatalf("failed to ensure directories for %v: %v", filename, err)
+ }
+ f, err := filename.Create()
+ if err != nil {
+ t.Fatalf("failed to create file: %v: %v", filename, err)
+ }
+ _, err = f.WriteString(spec.contents)
+ if err != nil {
+ t.Fatalf("failed to write contents to %v: %v", filename, err)
+ }
+ f.Close()
+ }
+ // now that we've created the repo, expect our .gitignore file too
+ files[turbopath.AnchoredUnixPath("child-dir/libA/.gitignore")] = fileHash{contents: "", hash: "3237694bc3312ded18386964a855074af7b066af"}
+
+ pkg := &fs.PackageJSON{
+ Dir: pkgName,
+ }
+ hashes, err := manuallyHashPackage(pkg, []string{}, repoRoot)
+ if err != nil {
+ t.Fatalf("failed to calculate manual hashes: %v", err)
+ }
+
+ count := 0
+ for path, spec := range files {
+ systemPath := path.ToSystemPath()
+ if systemPath.HasPrefix(pkgName) {
+ relPath := systemPath[len(pkgName)+1:]
+ got, ok := hashes[relPath.ToUnixPath()]
+ if !ok {
+ if spec.hash != "" {
+ t.Errorf("did not find hash for %v, but wanted one", path)
+ }
+ } else if got != spec.hash {
+ t.Errorf("hash of %v, got %v want %v", path, got, spec.hash)
+ } else {
+ count++
+ }
+ }
+ }
+ if count != len(hashes) {
+ t.Errorf("found extra hashes in %v", hashes)
+ }
+
+ count = 0
+ justFileHashes, err := manuallyHashPackage(pkg, []string{filepath.FromSlash("**/*file"), "!" + filepath.FromSlash("some-dir/excluded-file")}, repoRoot)
+ if err != nil {
+ t.Fatalf("failed to calculate manual hashes: %v", err)
+ }
+ for path, spec := range files {
+ systemPath := path.ToSystemPath()
+ if systemPath.HasPrefix(pkgName) {
+ shouldInclude := strings.HasSuffix(systemPath.ToString(), "file") && !strings.HasSuffix(systemPath.ToString(), "excluded-file")
+ relPath := systemPath[len(pkgName)+1:]
+ got, ok := justFileHashes[relPath.ToUnixPath()]
+ if !ok && shouldInclude {
+ if spec.hash != "" {
+ t.Errorf("did not find hash for %v, but wanted one", path)
+ }
+ } else if shouldInclude && got != spec.hash {
+ t.Errorf("hash of %v, got %v want %v", path, got, spec.hash)
+ } else if shouldInclude {
+ count++
+ }
+ }
+ }
+ if count != len(justFileHashes) {
+ t.Errorf("found extra hashes in %v", hashes)
+ }
+}
diff --git a/cli/internal/turbodprotocol/turbod.proto b/cli/internal/turbodprotocol/turbod.proto
new file mode 100644
index 0000000..cf7c554
--- /dev/null
+++ b/cli/internal/turbodprotocol/turbod.proto
@@ -0,0 +1,53 @@
+syntax = "proto3";
+
+option go_package = "github.com/vercel/turbo/cli/internal/turbodprotocol";
+
+package turbodprotocol;
+
+service Turbod {
+ rpc Hello (HelloRequest) returns (HelloResponse);
+ rpc Shutdown (ShutdownRequest) returns (ShutdownResponse);
+ rpc Status (StatusRequest) returns (StatusResponse);
+ // Implement cache watching
+ rpc NotifyOutputsWritten (NotifyOutputsWrittenRequest) returns (NotifyOutputsWrittenResponse);
+ rpc GetChangedOutputs (GetChangedOutputsRequest) returns (GetChangedOutputsResponse);
+}
+
+message HelloRequest {
+ string version = 1;
+ string session_id = 2;
+}
+
+message HelloResponse {}
+
+message ShutdownRequest {}
+
+message ShutdownResponse {}
+
+message StatusRequest {}
+
+message StatusResponse {
+ DaemonStatus daemonStatus = 1;
+}
+
+message NotifyOutputsWrittenRequest {
+ repeated string output_globs = 1;
+ string hash = 2;
+ repeated string output_exclusion_globs = 3;
+}
+
+message NotifyOutputsWrittenResponse {}
+
+message GetChangedOutputsRequest {
+ repeated string output_globs = 1;
+ string hash = 2;
+}
+
+message GetChangedOutputsResponse {
+ repeated string changed_output_globs = 1;
+}
+
+message DaemonStatus {
+ string log_file = 1;
+ uint64 uptime_msec = 2;
+}
diff --git a/cli/internal/turbopath/absolute_system_path.go b/cli/internal/turbopath/absolute_system_path.go
new file mode 100644
index 0000000..df65827
--- /dev/null
+++ b/cli/internal/turbopath/absolute_system_path.go
@@ -0,0 +1,258 @@
+package turbopath
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// AbsoluteSystemPath is a root-relative path using system separators.
+type AbsoluteSystemPath string
+
+// _dirPermissions are the default permission bits we apply to directories.
+const _dirPermissions = os.ModeDir | 0775
+
+// _nonRelativeSentinel is the leading sentinel that indicates traversal.
+const _nonRelativeSentinel = ".."
+
+// ToString returns a string represenation of this Path.
+// Used for interfacing with APIs that require a string.
+func (p AbsoluteSystemPath) ToString() string {
+ return string(p)
+}
+
+// RelativeTo calculates the relative path between two `AbsoluteSystemPath`s.
+func (p AbsoluteSystemPath) RelativeTo(basePath AbsoluteSystemPath) (AnchoredSystemPath, error) {
+ processed, err := filepath.Rel(basePath.ToString(), p.ToString())
+ return AnchoredSystemPath(processed), err
+}
+
+// Join appends relative path segments to this AbsoluteSystemPath.
+func (p AbsoluteSystemPath) Join(additional ...RelativeSystemPath) AbsoluteSystemPath {
+ cast := RelativeSystemPathArray(additional)
+ return AbsoluteSystemPath(filepath.Join(p.ToString(), filepath.Join(cast.ToStringArray()...)))
+}
+
+// ToStringDuringMigration returns a string representation of this path.
+// These instances should eventually be removed.
+func (p AbsoluteSystemPath) ToStringDuringMigration() string {
+ return p.ToString()
+}
+
+// UntypedJoin is a Join that does not constrain the type of the arguments.
+// This enables you to pass in strings, but does not protect you from garbage in.
+func (p AbsoluteSystemPath) UntypedJoin(args ...string) AbsoluteSystemPath {
+ return AbsoluteSystemPath(filepath.Join(p.ToString(), filepath.Join(args...)))
+}
+
+// Dir implements filepath.Dir() for an AbsoluteSystemPath
+func (p AbsoluteSystemPath) Dir() AbsoluteSystemPath {
+ return AbsoluteSystemPath(filepath.Dir(p.ToString()))
+}
+
+// Mkdir implements os.Mkdir(p, perm)
+func (p AbsoluteSystemPath) Mkdir(perm os.FileMode) error {
+ return os.Mkdir(p.ToString(), perm)
+}
+
+// MkdirAll implements os.MkdirAll(p, perm)
+func (p AbsoluteSystemPath) MkdirAll(perm os.FileMode) error {
+ return os.MkdirAll(p.ToString(), perm)
+}
+
+// Open implements os.Open(p) for an AbsoluteSystemPath
+func (p AbsoluteSystemPath) Open() (*os.File, error) {
+ return os.Open(p.ToString())
+}
+
+// OpenFile implements os.OpenFile for an absolute path
+func (p AbsoluteSystemPath) OpenFile(flags int, mode os.FileMode) (*os.File, error) {
+ return os.OpenFile(p.ToString(), flags, mode)
+}
+
+// Lstat implements os.Lstat for absolute path
+func (p AbsoluteSystemPath) Lstat() (os.FileInfo, error) {
+ return os.Lstat(p.ToString())
+}
+
+// Stat implements os.Stat for absolute path
+func (p AbsoluteSystemPath) Stat() (os.FileInfo, error) {
+ return os.Stat(p.ToString())
+}
+
+// Findup checks all parent directories for a file.
+func (p AbsoluteSystemPath) Findup(name RelativeSystemPath) (AbsoluteSystemPath, error) {
+ path, err := FindupFrom(name.ToString(), p.ToString())
+
+ return AbsoluteSystemPath(path), err
+
+}
+
+// Exists returns true if the given path exists.
+func (p AbsoluteSystemPath) Exists() bool {
+ _, err := p.Lstat()
+ return err == nil
+}
+
+// DirExists returns true if the given path exists and is a directory.
+func (p AbsoluteSystemPath) DirExists() bool {
+ info, err := p.Lstat()
+ return err == nil && info.IsDir()
+}
+
+// FileExists returns true if the given path exists and is a file.
+func (p AbsoluteSystemPath) FileExists() bool {
+ info, err := os.Lstat(p.ToString())
+ return err == nil && !info.IsDir()
+}
+
+// ContainsPath returns true if this absolute path is a parent of the
+// argument.
+func (p AbsoluteSystemPath) ContainsPath(other AbsoluteSystemPath) (bool, error) {
+ // In Go, filepath.Rel can return a path that starts with "../" or equivalent.
+ // Checking filesystem-level contains can get extremely complicated
+ // (see https://github.com/golang/dep/blob/f13583b555deaa6742f141a9c1185af947720d60/internal/fs/fs.go#L33)
+ // As a compromise, rely on the stdlib to generate a relative path and then check
+ // if the first step is "../".
+ rel, err := filepath.Rel(p.ToString(), other.ToString())
+ if err != nil {
+ return false, err
+ }
+ return !strings.HasPrefix(rel, _nonRelativeSentinel), nil
+}
+
+// ReadFile reads the contents of the specified file
+func (p AbsoluteSystemPath) ReadFile() ([]byte, error) {
+ return ioutil.ReadFile(p.ToString())
+}
+
+// VolumeName returns the volume of the specified path
+func (p AbsoluteSystemPath) VolumeName() string {
+ return filepath.VolumeName(p.ToString())
+}
+
+// WriteFile writes the contents of the specified file
+func (p AbsoluteSystemPath) WriteFile(contents []byte, mode os.FileMode) error {
+ return ioutil.WriteFile(p.ToString(), contents, mode)
+}
+
+// EnsureDir ensures that the directory containing this file exists
+func (p AbsoluteSystemPath) EnsureDir() error {
+ dir := p.Dir()
+ err := os.MkdirAll(dir.ToString(), _dirPermissions)
+ if err != nil && dir.FileExists() {
+ // It looks like this is a file and not a directory. Attempt to remove it; this can
+ // happen in some cases if you change a rule from outputting a file to a directory.
+ if err2 := dir.Remove(); err2 == nil {
+ err = os.MkdirAll(dir.ToString(), _dirPermissions)
+ } else {
+ return err
+ }
+ }
+ return err
+}
+
+// MkdirAllMode Create directory at path and all necessary parents ensuring that path has the correct mode set
+func (p AbsoluteSystemPath) MkdirAllMode(mode os.FileMode) error {
+ info, err := p.Lstat()
+ if err == nil {
+ if info.IsDir() && info.Mode() == mode {
+ // Dir exists with the correct mode
+ return nil
+ } else if info.IsDir() {
+ // Dir exists with incorrect mode
+ return os.Chmod(p.ToString(), mode)
+ } else {
+ // Path exists as file, remove it
+ if err := p.Remove(); err != nil {
+ return err
+ }
+ }
+ }
+ if err := os.MkdirAll(p.ToString(), mode); err != nil {
+ return err
+ }
+ // This is necessary only when umask results in creating a directory with permissions different than the one passed by the user
+ return os.Chmod(p.ToString(), mode)
+}
+
+// Create is the AbsoluteSystemPath wrapper for os.Create
+func (p AbsoluteSystemPath) Create() (*os.File, error) {
+ return os.Create(p.ToString())
+}
+
+// Ext implements filepath.Ext(p) for an absolute path
+func (p AbsoluteSystemPath) Ext() string {
+ return filepath.Ext(p.ToString())
+}
+
+// RelativePathString returns the relative path from this AbsoluteSystemPath to another absolute path in string form as a string
+func (p AbsoluteSystemPath) RelativePathString(path string) (string, error) {
+ return filepath.Rel(p.ToString(), path)
+}
+
+// PathTo returns the relative path between two absolute paths
+// This should likely eventually return an AnchoredSystemPath
+func (p AbsoluteSystemPath) PathTo(other AbsoluteSystemPath) (string, error) {
+ return p.RelativePathString(other.ToString())
+}
+
+// Symlink implements os.Symlink(target, p) for absolute path
+func (p AbsoluteSystemPath) Symlink(target string) error {
+ return os.Symlink(target, p.ToString())
+}
+
+// Readlink implements os.Readlink(p) for an absolute path
+func (p AbsoluteSystemPath) Readlink() (string, error) {
+ return os.Readlink(p.ToString())
+}
+
+// Remove removes the file or (empty) directory at the given path
+func (p AbsoluteSystemPath) Remove() error {
+ return os.Remove(p.ToString())
+}
+
+// RemoveAll implements os.RemoveAll for absolute paths.
+func (p AbsoluteSystemPath) RemoveAll() error {
+ return os.RemoveAll(p.ToString())
+}
+
+// Base implements filepath.Base for an absolute path
+func (p AbsoluteSystemPath) Base() string {
+ return filepath.Base(p.ToString())
+}
+
+// Rename implements os.Rename(p, dest) for absolute paths
+func (p AbsoluteSystemPath) Rename(dest AbsoluteSystemPath) error {
+ return os.Rename(p.ToString(), dest.ToString())
+}
+
+// EvalSymlinks implements filepath.EvalSymlinks for absolute path
+func (p AbsoluteSystemPath) EvalSymlinks() (AbsoluteSystemPath, error) {
+ result, err := filepath.EvalSymlinks(p.ToString())
+ if err != nil {
+ return "", err
+ }
+ return AbsoluteSystemPath(result), nil
+}
+
+// HasPrefix is strings.HasPrefix for paths, ensuring that it matches on separator boundaries.
+// This does NOT perform Clean in advance.
+func (p AbsoluteSystemPath) HasPrefix(prefix AbsoluteSystemPath) bool {
+ prefixLen := len(prefix)
+ pathLen := len(p)
+
+ if prefixLen > pathLen {
+ // Can't be a prefix if longer.
+ return false
+ } else if prefixLen == pathLen {
+ // Can be a prefix if they're equal, but otherwise no.
+ return p == prefix
+ }
+
+ // otherPath is definitely shorter than p.
+ // We need to confirm that p[len(otherPath)] is a system separator.
+
+ return strings.HasPrefix(p.ToString(), prefix.ToString()) && os.IsPathSeparator(p[prefixLen])
+}
diff --git a/cli/internal/turbopath/absolute_system_path_darwin.go b/cli/internal/turbopath/absolute_system_path_darwin.go
new file mode 100644
index 0000000..e2c3bff
--- /dev/null
+++ b/cli/internal/turbopath/absolute_system_path_darwin.go
@@ -0,0 +1,23 @@
+//go:build darwin
+// +build darwin
+
+// Adapted from https://github.com/containerd/continuity/blob/b4ca35286886296377de39e6eafd1affae019fc3/driver/lchmod_unix.go
+// Copyright The containerd Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package turbopath
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// Lchmod changes the mode of a file not following symlinks.
+func (p AbsoluteSystemPath) Lchmod(mode os.FileMode) error {
+ err := unix.Fchmodat(unix.AT_FDCWD, p.ToString(), uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
+ if err != nil {
+ err = &os.PathError{Op: "lchmod", Path: p.ToString(), Err: err}
+ }
+ return err
+}
diff --git a/cli/internal/turbopath/absolute_system_path_notdarwin.go b/cli/internal/turbopath/absolute_system_path_notdarwin.go
new file mode 100644
index 0000000..1195888
--- /dev/null
+++ b/cli/internal/turbopath/absolute_system_path_notdarwin.go
@@ -0,0 +1,13 @@
+//go:build !darwin
+// +build !darwin
+
+package turbopath
+
+import (
+ "os"
+)
+
+// Lchmod changes the mode of a file not following symlinks.
+func (p AbsoluteSystemPath) Lchmod(mode os.FileMode) error {
+ return nil
+}
diff --git a/cli/internal/turbopath/absolute_system_path_test.go b/cli/internal/turbopath/absolute_system_path_test.go
new file mode 100644
index 0000000..4ca36f9
--- /dev/null
+++ b/cli/internal/turbopath/absolute_system_path_test.go
@@ -0,0 +1,174 @@
+package turbopath
+
+import (
+ "os"
+ "runtime"
+ "testing"
+
+ "gotest.tools/v3/assert"
+ "gotest.tools/v3/fs"
+)
+
+func Test_Mkdir(t *testing.T) {
+ type Case struct {
+ name string
+ isDir bool
+ exists bool
+ mode os.FileMode
+ expectedMode os.FileMode
+ }
+
+ cases := []Case{
+ {
+ name: "dir doesn't exist",
+ exists: false,
+ expectedMode: os.ModeDir | 0777,
+ },
+ {
+ name: "path exists as file",
+ exists: true,
+ isDir: false,
+ mode: 0666,
+ expectedMode: os.ModeDir | 0755,
+ },
+ {
+ name: "dir exists with incorrect mode",
+ exists: true,
+ isDir: false,
+ mode: os.ModeDir | 0755,
+ expectedMode: os.ModeDir | 0655,
+ },
+ {
+ name: "dir exists with correct mode",
+ exists: true,
+ isDir: false,
+ mode: os.ModeDir | 0755,
+ expectedMode: os.ModeDir | 0755,
+ },
+ }
+
+ for _, testCase := range cases {
+ testDir := fs.NewDir(t, "system-path-mkdir-test")
+ testName := testCase.name
+ path := testDir.Join("foo")
+ if testCase.isDir {
+ err := os.Mkdir(path, testCase.mode)
+ assert.NilError(t, err, "%s: Mkdir", testName)
+ } else if testCase.exists {
+ file, err := os.Create(path)
+ assert.NilError(t, err, "%s: Create", testName)
+ err = file.Chmod(testCase.mode)
+ assert.NilError(t, err, "%s: Chmod", testName)
+ err = file.Close()
+ assert.NilError(t, err, "%s: Close", testName)
+ }
+
+ testPath := AbsoluteSystemPath(path)
+ err := testPath.MkdirAllMode(testCase.expectedMode)
+ assert.NilError(t, err, "%s: Mkdir", testName)
+
+ stat, err := testPath.Lstat()
+ assert.NilError(t, err, "%s: Lstat", testName)
+ assert.Assert(t, stat.IsDir(), testName)
+
+ assert.Assert(t, stat.IsDir(), testName)
+
+ if runtime.GOOS == "windows" {
+ // For windows os.Chmod will only change the writable bit so that's all we check
+ assert.Equal(t, stat.Mode().Perm()&0200, testCase.expectedMode.Perm()&0200, testName)
+ } else {
+ assert.Equal(t, stat.Mode(), testCase.expectedMode, testName)
+ }
+
+ }
+}
+
+func TestAbsoluteSystemPath_Findup(t *testing.T) {
+ tests := []struct {
+ name string
+ fs []AnchoredSystemPath
+ executionDirectory AnchoredSystemPath
+ fileName RelativeSystemPath
+ want AnchoredSystemPath
+ wantErr bool
+ }{
+ {
+ name: "hello world",
+ fs: []AnchoredSystemPath{
+ AnchoredUnixPath("one/two/three/four/.file").ToSystemPath(),
+ AnchoredUnixPath("one/two/three/four/.target").ToSystemPath(),
+ },
+ executionDirectory: AnchoredUnixPath("one/two/three/four").ToSystemPath(),
+ fileName: RelativeUnixPath(".target").ToSystemPath(),
+ want: AnchoredUnixPath("one/two/three/four/.target").ToSystemPath(),
+ },
+ {
+ name: "parent",
+ fs: []AnchoredSystemPath{
+ AnchoredUnixPath("one/two/three/four/.file").ToSystemPath(),
+ AnchoredUnixPath("one/two/three/.target").ToSystemPath(),
+ },
+ executionDirectory: AnchoredUnixPath("one/two/three/four").ToSystemPath(),
+ fileName: RelativeUnixPath(".target").ToSystemPath(),
+ want: AnchoredUnixPath("one/two/three/.target").ToSystemPath(),
+ },
+ {
+ name: "gets the closest",
+ fs: []AnchoredSystemPath{
+ AnchoredUnixPath("one/two/three/four/.file").ToSystemPath(),
+ AnchoredUnixPath("one/two/three/.target").ToSystemPath(),
+ AnchoredUnixPath("one/two/.target").ToSystemPath(),
+ },
+ executionDirectory: AnchoredUnixPath("one/two/three/four").ToSystemPath(),
+ fileName: RelativeUnixPath(".target").ToSystemPath(),
+ want: AnchoredUnixPath("one/two/three/.target").ToSystemPath(),
+ },
+ {
+ name: "nonexistent",
+ fs: []AnchoredSystemPath{
+ AnchoredUnixPath("one/two/three/four/.file").ToSystemPath(),
+ },
+ executionDirectory: AnchoredUnixPath("one/two/three/four").ToSystemPath(),
+ fileName: RelativeUnixPath(".nonexistent").ToSystemPath(),
+ want: "",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ fsRoot := AbsoluteSystemPath(t.TempDir())
+ for _, file := range tt.fs {
+ path := file.RestoreAnchor(fsRoot)
+ assert.NilError(t, path.Dir().MkdirAll(0777))
+ assert.NilError(t, path.WriteFile(nil, 0777))
+ }
+
+ got, err := tt.executionDirectory.RestoreAnchor(fsRoot).Findup(tt.fileName)
+ if tt.wantErr {
+ assert.ErrorIs(t, err, os.ErrNotExist)
+ return
+ }
+ if got != "" && got != tt.want.RestoreAnchor(fsRoot) {
+ t.Errorf("AbsoluteSystemPath.Findup() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestJoin(t *testing.T) {
+ rawRoot, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("cwd %v", err)
+ }
+ root := AbsoluteSystemPathFromUpstream(rawRoot)
+ testRoot := root.Join("a", "b", "c")
+ dot := testRoot.Join(".")
+ if dot != testRoot {
+ t.Errorf(". path got %v, want %v", dot, testRoot)
+ }
+
+ doubleDot := testRoot.Join("..")
+ expectedDoubleDot := root.Join("a", "b")
+ if doubleDot != expectedDoubleDot {
+ t.Errorf(".. path got %v, want %v", doubleDot, expectedDoubleDot)
+ }
+}
diff --git a/cli/internal/turbopath/anchored_system_path.go b/cli/internal/turbopath/anchored_system_path.go
new file mode 100644
index 0000000..0957ead
--- /dev/null
+++ b/cli/internal/turbopath/anchored_system_path.go
@@ -0,0 +1,75 @@
+package turbopath
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// AnchoredSystemPath is a path stemming from a specified root using system separators.
+type AnchoredSystemPath string
+
+// ToString returns a string represenation of this Path.
+// Used for interfacing with APIs that require a string.
+func (p AnchoredSystemPath) ToString() string {
+ return string(p)
+}
+
+// ToStringDuringMigration returns the string representation of this path, and is for
+// use in situations where we expect a future path migration to remove the need for the
+// string representation
+func (p AnchoredSystemPath) ToStringDuringMigration() string {
+ return string(p)
+}
+
+// ToSystemPath returns itself.
+func (p AnchoredSystemPath) ToSystemPath() AnchoredSystemPath {
+ return p
+}
+
+// ToUnixPath converts a AnchoredSystemPath to a AnchoredUnixPath.
+func (p AnchoredSystemPath) ToUnixPath() AnchoredUnixPath {
+ return AnchoredUnixPath(filepath.ToSlash(p.ToString()))
+}
+
+// RelativeTo calculates the relative path between two AnchoredSystemPath`s.
+func (p AnchoredSystemPath) RelativeTo(basePath AnchoredSystemPath) (AnchoredSystemPath, error) {
+ processed, err := filepath.Rel(basePath.ToString(), p.ToString())
+ return AnchoredSystemPath(processed), err
+}
+
+// RestoreAnchor prefixes the AnchoredSystemPath with its anchor to return an AbsoluteSystemPath.
+func (p AnchoredSystemPath) RestoreAnchor(anchor AbsoluteSystemPath) AbsoluteSystemPath {
+ return AbsoluteSystemPath(filepath.Join(anchor.ToString(), p.ToString()))
+}
+
+// Dir returns filepath.Dir for the path.
+func (p AnchoredSystemPath) Dir() AnchoredSystemPath {
+ return AnchoredSystemPath(filepath.Dir(p.ToString()))
+}
+
+// Join appends relative path segments to this AnchoredSystemPath.
+func (p AnchoredSystemPath) Join(additional ...RelativeSystemPath) AnchoredSystemPath {
+ cast := RelativeSystemPathArray(additional)
+ return AnchoredSystemPath(filepath.Join(p.ToString(), filepath.Join(cast.ToStringArray()...)))
+}
+
+// HasPrefix is strings.HasPrefix for paths, ensuring that it matches on separator boundaries.
+// This does NOT perform Clean in advance.
+func (p AnchoredSystemPath) HasPrefix(prefix AnchoredSystemPath) bool {
+ prefixLen := len(prefix)
+ pathLen := len(p)
+
+ if prefixLen > pathLen {
+ // Can't be a prefix if longer.
+ return false
+ } else if prefixLen == pathLen {
+ // Can be a prefix if they're equal, but otherwise no.
+ return p == prefix
+ }
+
+ // otherPath is definitely shorter than p.
+ // We need to confirm that p[len(otherPath)] is a system separator.
+
+ return strings.HasPrefix(p.ToString(), prefix.ToString()) && os.IsPathSeparator(p[prefixLen])
+}
diff --git a/cli/internal/turbopath/anchored_unix_path.go b/cli/internal/turbopath/anchored_unix_path.go
new file mode 100644
index 0000000..23e371a
--- /dev/null
+++ b/cli/internal/turbopath/anchored_unix_path.go
@@ -0,0 +1,31 @@
+package turbopath
+
+import (
+ "path"
+ "path/filepath"
+)
+
+// AnchoredUnixPath is a path stemming from a specified root using Unix `/` separators.
+type AnchoredUnixPath string
+
+// ToString returns a string represenation of this Path.
+// Used for interfacing with APIs that require a string.
+func (p AnchoredUnixPath) ToString() string {
+ return string(p)
+}
+
+// ToSystemPath converts a AnchoredUnixPath to a AnchoredSystemPath.
+func (p AnchoredUnixPath) ToSystemPath() AnchoredSystemPath {
+ return AnchoredSystemPath(filepath.FromSlash(p.ToString()))
+}
+
+// ToUnixPath returns itself.
+func (p AnchoredUnixPath) ToUnixPath() AnchoredUnixPath {
+ return p
+}
+
+// Join appends relative path segments to this RelativeUnixPath.
+func (p AnchoredUnixPath) Join(additional ...RelativeUnixPath) AnchoredUnixPath {
+ cast := RelativeUnixPathArray(additional)
+ return AnchoredUnixPath(path.Join(p.ToString(), path.Join(cast.ToStringArray()...)))
+}
diff --git a/cli/internal/turbopath/find_up.go b/cli/internal/turbopath/find_up.go
new file mode 100644
index 0000000..bf7c39c
--- /dev/null
+++ b/cli/internal/turbopath/find_up.go
@@ -0,0 +1,50 @@
+package turbopath
+
+import (
+ "os"
+ "path/filepath"
+)
+
+func hasFile(name, dir string) (bool, error) {
+ files, err := os.ReadDir(dir)
+
+ if err != nil {
+ return false, err
+ }
+
+ for _, f := range files {
+ if name == f.Name() {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func findupFrom(name, dir string) (string, error) {
+ for {
+ found, err := hasFile(name, dir)
+
+ if err != nil {
+ return "", err
+ }
+
+ if found {
+ return filepath.Join(dir, name), nil
+ }
+
+ parent := filepath.Dir(dir)
+
+ if parent == dir {
+ return "", nil
+ }
+
+ dir = parent
+ }
+}
+
+// FindupFrom Recursively finds a file by walking up parents in the file tree
+// starting from a specific directory.
+func FindupFrom(name, dir string) (string, error) {
+ return findupFrom(name, dir)
+}
diff --git a/cli/internal/turbopath/relative_system_path.go b/cli/internal/turbopath/relative_system_path.go
new file mode 100644
index 0000000..d6115db
--- /dev/null
+++ b/cli/internal/turbopath/relative_system_path.go
@@ -0,0 +1,44 @@
+package turbopath
+
+import (
+ "fmt"
+ "path/filepath"
+)
+
+// RelativeSystemPath is a relative path using system separators.
+type RelativeSystemPath string
+
+// CheckedToRelativeSystemPath inspects a string and determines if it is a relative path.
+func CheckedToRelativeSystemPath(s string) (RelativeSystemPath, error) {
+ if filepath.IsAbs(s) {
+ return "", fmt.Errorf("%v is not a relative path", s)
+ }
+ return RelativeSystemPath(filepath.Clean(s)), nil
+}
+
+// MakeRelativeSystemPath joins the given segments in a system-appropriate way
+func MakeRelativeSystemPath(segments ...string) RelativeSystemPath {
+ return RelativeSystemPath(filepath.Join(segments...))
+}
+
+// ToString returns a string represenation of this Path.
+// Used for interfacing with APIs that require a string.
+func (p RelativeSystemPath) ToString() string {
+ return string(p)
+}
+
+// ToSystemPath returns itself.
+func (p RelativeSystemPath) ToSystemPath() RelativeSystemPath {
+ return p
+}
+
+// ToUnixPath converts from RelativeSystemPath to RelativeUnixPath.
+func (p RelativeSystemPath) ToUnixPath() RelativeUnixPath {
+ return RelativeUnixPath(filepath.ToSlash(p.ToString()))
+}
+
+// Join appends relative path segments to this RelativeSystemPath.
+func (p RelativeSystemPath) Join(additional ...RelativeSystemPath) RelativeSystemPath {
+ cast := RelativeSystemPathArray(additional)
+ return RelativeSystemPath(filepath.Join(p.ToString(), filepath.Join(cast.ToStringArray()...)))
+}
diff --git a/cli/internal/turbopath/relative_unix_path.go b/cli/internal/turbopath/relative_unix_path.go
new file mode 100644
index 0000000..05829e2
--- /dev/null
+++ b/cli/internal/turbopath/relative_unix_path.go
@@ -0,0 +1,31 @@
+package turbopath
+
+import (
+ "path"
+ "path/filepath"
+)
+
+// RelativeUnixPath is a relative path using Unix `/` separators.
+type RelativeUnixPath string
+
+// ToString returns a string represenation of this Path.
+// Used for interfacing with APIs that require a string.
+func (p RelativeUnixPath) ToString() string {
+ return string(p)
+}
+
+// ToSystemPath converts a RelativeUnixPath to a RelativeSystemPath.
+func (p RelativeUnixPath) ToSystemPath() RelativeSystemPath {
+ return RelativeSystemPath(filepath.FromSlash(p.ToString()))
+}
+
+// ToUnixPath converts a RelativeUnixPath to a RelativeSystemPath.
+func (p RelativeUnixPath) ToUnixPath() RelativeUnixPath {
+ return p
+}
+
+// Join appends relative path segments to this RelativeUnixPath.
+func (p RelativeUnixPath) Join(additional ...RelativeUnixPath) RelativeUnixPath {
+ cast := RelativeUnixPathArray(additional)
+ return RelativeUnixPath(path.Join(p.ToString(), path.Join(cast.ToStringArray()...)))
+}
diff --git a/cli/internal/turbopath/turbopath.go b/cli/internal/turbopath/turbopath.go
new file mode 100644
index 0000000..f50b75f
--- /dev/null
+++ b/cli/internal/turbopath/turbopath.go
@@ -0,0 +1,112 @@
+// Package turbopath teaches the Go type system about six
+// different types of paths:
+// - AbsoluteSystemPath
+// - RelativeSystemPath
+// - AnchoredSystemPath
+// - AbsoluteUnixPath
+// - RelativeUnixPath
+// - AnchoredUnixPath
+//
+// Between these two things it is assumed that we will be able to
+// reasonably describe file paths being used within the system and
+// have the type system enforce correctness instead of relying upon
+// runtime code to accomplish the task.
+//
+// Absolute paths are, "absolute, including volume root." They are not
+// portable between System and Unix.
+//
+// Relative paths are simply arbitrary path segments using a particular
+// path delimiter. They are portable between System and Unix.
+//
+// Anchored paths are, "absolute, starting at a particular root."
+// They are not aware of *what* their anchor is. It could be a repository,
+// an `os.dirFS`, a package, `cwd`, or more. They are stored *without*
+// a preceding delimiter for compatibility with `io/fs`. They are portable
+// between System and Unix.
+//
+// In some future world everything in here can be optimized out at compile time.
+// Everything is either `string` or `[]string`
+//
+// Much of this is dreadfully repetitive because of intentional
+// limitations in the Go type system.
+package turbopath
+
+// AnchoredUnixPathArray is a type used to enable transform operations on arrays of paths.
+type AnchoredUnixPathArray []AnchoredUnixPath
+
+// RelativeSystemPathArray is a type used to enable transform operations on arrays of paths.
+type RelativeSystemPathArray []RelativeSystemPath
+
+// RelativeUnixPathArray is a type used to enable transform operations on arrays of paths.
+type RelativeUnixPathArray []RelativeUnixPath
+
+// ToStringArray enables ergonomic operations on arrays of RelativeSystemPath
+func (source RelativeSystemPathArray) ToStringArray() []string {
+ output := make([]string, len(source))
+ for index, path := range source {
+ output[index] = path.ToString()
+ }
+ return output
+}
+
+// ToStringArray enables ergonomic operations on arrays of RelativeUnixPath
+func (source RelativeUnixPathArray) ToStringArray() []string {
+ output := make([]string, len(source))
+ for index, path := range source {
+ output[index] = path.ToString()
+ }
+ return output
+}
+
+// ToSystemPathArray enables ergonomic operations on arrays of AnchoredUnixPath
+func (source AnchoredUnixPathArray) ToSystemPathArray() []AnchoredSystemPath {
+ output := make([]AnchoredSystemPath, len(source))
+ for index, path := range source {
+ output[index] = path.ToSystemPath()
+ }
+ return output
+}
+
+// The following methods exist to import a path string and cast it to the appropriate
+// type. They exist to communicate intent and make it explicit that this is an
+// intentional action, not a "helpful" insertion by the IDE.
+//
+// This is intended to map closely to the `unsafe` keyword, without the denotative
+// meaning of `unsafe` in English. These are "trust me, I've checkex it" places, and
+// intend to mark the places where we smuggle paths from outside the world of safe
+// path handling into the world where we carefully consider the path to ensure safety.
+
+// AbsoluteSystemPathFromUpstream takes a path string and casts it to an
+// AbsoluteSystemPath without checking. If the input to this function is
+// not an AbsoluteSystemPath it will result in downstream errors.
+func AbsoluteSystemPathFromUpstream(path string) AbsoluteSystemPath {
+ return AbsoluteSystemPath(path)
+}
+
+// AnchoredSystemPathFromUpstream takes a path string and casts it to an
+// AnchoredSystemPath without checking. If the input to this function is
+// not an AnchoredSystemPath it will result in downstream errors.
+func AnchoredSystemPathFromUpstream(path string) AnchoredSystemPath {
+ return AnchoredSystemPath(path)
+}
+
+// AnchoredUnixPathFromUpstream takes a path string and casts it to an
+// AnchoredUnixPath without checking. If the input to this function is
+// not an AnchoredUnixPath it will result in downstream errors.
+func AnchoredUnixPathFromUpstream(path string) AnchoredUnixPath {
+ return AnchoredUnixPath(path)
+}
+
+// RelativeSystemPathFromUpstream takes a path string and casts it to an
+// RelativeSystemPath without checking. If the input to this function is
+// not an RelativeSystemPath it will result in downstream errors.
+func RelativeSystemPathFromUpstream(path string) RelativeSystemPath {
+ return RelativeSystemPath(path)
+}
+
+// RelativeUnixPathFromUpstream takes a path string and casts it to an
+// RelativeUnixPath without checking. If the input to this function is
+// not an RelativeUnixPath it will result in downstream errors.
+func RelativeUnixPathFromUpstream(path string) RelativeUnixPath {
+ return RelativeUnixPath(path)
+}
diff --git a/cli/internal/turbostate/turbostate.go b/cli/internal/turbostate/turbostate.go
new file mode 100644
index 0000000..dad5b47
--- /dev/null
+++ b/cli/internal/turbostate/turbostate.go
@@ -0,0 +1,141 @@
+// Package turbostate holds all of the state given from the Rust CLI
+// that is necessary to execute turbo. We transfer this state from Rust
+// to Go via a JSON payload.
+package turbostate
+
+import (
+ "fmt"
+
+ "github.com/vercel/turbo/cli/internal/util"
+)
+
+// RepoState is the state for repository. Consists of the root for the repo
+// along with the mode (single package or multi package)
+type RepoState struct {
+ Root string `json:"root"`
+ Mode string `json:"mode"`
+}
+
+// DaemonPayload is the extra flags and command that are
+// passed for the `daemon` subcommand
+type DaemonPayload struct {
+ IdleTimeout string `json:"idle_time"`
+ JSON bool `json:"json"`
+}
+
+// PrunePayload is the extra flags passed for the `prune` subcommand
+type PrunePayload struct {
+ Scope []string `json:"scope"`
+ Docker bool `json:"docker"`
+ OutputDir string `json:"output_dir"`
+}
+
+// RunPayload is the extra flags passed for the `run` subcommand
+type RunPayload struct {
+ CacheDir string `json:"cache_dir"`
+ CacheWorkers int `json:"cache_workers"`
+ Concurrency string `json:"concurrency"`
+ ContinueExecution bool `json:"continue_execution"`
+ DryRun string `json:"dry_run"`
+ Filter []string `json:"filter"`
+ Force bool `json:"force"`
+ GlobalDeps []string `json:"global_deps"`
+ EnvMode util.EnvMode `json:"env_mode"`
+ // NOTE: Graph has three effective states that is modeled using a *string:
+ // nil -> no flag passed
+ // "" -> flag passed but no file name attached: print to stdout
+ // "foo" -> flag passed and file name attached: emit to file
+ // The mirror for this in Rust is `Option<String>` with the default value
+ // for the flag being `Some("")`.
+ Graph *string `json:"graph"`
+ Ignore []string `json:"ignore"`
+ IncludeDependencies bool `json:"include_dependencies"`
+ NoCache bool `json:"no_cache"`
+ NoDaemon bool `json:"no_daemon"`
+ NoDeps bool `json:"no_deps"`
+ Only bool `json:"only"`
+ OutputLogs string `json:"output_logs"`
+ PassThroughArgs []string `json:"pass_through_args"`
+ Parallel bool `json:"parallel"`
+ Profile string `json:"profile"`
+ RemoteOnly bool `json:"remote_only"`
+ Scope []string `json:"scope"`
+ Since string `json:"since"`
+ SinglePackage bool `json:"single_package"`
+ Summarize bool `json:"summarize"`
+ Tasks []string `json:"tasks"`
+ PkgInferenceRoot string `json:"pkg_inference_root"`
+ LogPrefix string `json:"log_prefix"`
+ ExperimentalSpaceID string `json:"experimental_space_id"`
+}
+
+// Command consists of the data necessary to run a command.
+// Only one of these fields should be initialized at a time.
+type Command struct {
+ Daemon *DaemonPayload `json:"daemon"`
+ Prune *PrunePayload `json:"prune"`
+ Run *RunPayload `json:"run"`
+}
+
+// ParsedArgsFromRust are the parsed command line arguments passed
+// from the Rust shim
+type ParsedArgsFromRust struct {
+ API string `json:"api"`
+ Color bool `json:"color"`
+ CPUProfile string `json:"cpu_profile"`
+ CWD string `json:"cwd"`
+ Heap string `json:"heap"`
+ Login string `json:"login"`
+ NoColor bool `json:"no_color"`
+ Preflight bool `json:"preflight"`
+ RemoteCacheTimeout uint64 `json:"remote_cache_timeout"`
+ Team string `json:"team"`
+ Token string `json:"token"`
+ Trace string `json:"trace"`
+ Verbosity int `json:"verbosity"`
+ TestRun bool `json:"test_run"`
+ Command Command `json:"command"`
+}
+
+// GetColor returns the value of the `color` flag.
+func (a ParsedArgsFromRust) GetColor() bool {
+ return a.Color
+}
+
+// GetNoColor returns the value of the `token` flag.
+func (a ParsedArgsFromRust) GetNoColor() bool {
+ return a.NoColor
+}
+
+// GetLogin returns the value of the `login` flag.
+func (a ParsedArgsFromRust) GetLogin() (string, error) {
+ return a.Login, nil
+}
+
+// GetAPI returns the value of the `api` flag.
+func (a ParsedArgsFromRust) GetAPI() (string, error) {
+ return a.API, nil
+}
+
+// GetTeam returns the value of the `team` flag.
+func (a ParsedArgsFromRust) GetTeam() (string, error) {
+ return a.Team, nil
+}
+
+// GetToken returns the value of the `token` flag.
+func (a ParsedArgsFromRust) GetToken() (string, error) {
+ return a.Token, nil
+}
+
+// GetCwd returns the value of the `cwd` flag.
+func (a ParsedArgsFromRust) GetCwd() (string, error) {
+ return a.CWD, nil
+}
+
+// GetRemoteCacheTimeout returns the value of the `remote-cache-timeout` flag.
+func (a ParsedArgsFromRust) GetRemoteCacheTimeout() (uint64, error) {
+ if a.RemoteCacheTimeout != 0 {
+ return a.RemoteCacheTimeout, nil
+ }
+ return 0, fmt.Errorf("no remote cache timeout provided")
+}
diff --git a/cli/internal/ui/charset.go b/cli/internal/ui/charset.go
new file mode 100644
index 0000000..0207c10
--- /dev/null
+++ b/cli/internal/ui/charset.go
@@ -0,0 +1,3 @@
+package ui
+
+var charset = []string{" ", "> ", ">> ", ">>>"}
diff --git a/cli/internal/ui/colors.go b/cli/internal/ui/colors.go
new file mode 100644
index 0000000..4b2eccd
--- /dev/null
+++ b/cli/internal/ui/colors.go
@@ -0,0 +1,54 @@
+package ui
+
+import (
+ "os"
+
+ "github.com/fatih/color"
+)
+
+type ColorMode int
+
+const (
+ ColorModeUndefined ColorMode = iota + 1
+ ColorModeSuppressed
+ ColorModeForced
+)
+
+func GetColorModeFromEnv() ColorMode {
+ // The FORCED_COLOR behavior and accepted values are taken from the supports-color NodeJS Package:
+ // The accepted values as documented are "0" to disable, and "1", "2", or "3" to force-enable color
+ // at the specified support level (1 = 16 colors, 2 = 256 colors, 3 = 16M colors).
+ // We don't currently use the level for anything specific, and just treat things as on and off.
+ //
+ // Note: while "false" and "true" aren't documented, the library coerces these values to 0 and 1
+ // respectively, so that behavior is reproduced here as well.
+ // https://www.npmjs.com/package/supports-color
+
+ switch forceColor := os.Getenv("FORCE_COLOR"); {
+ case forceColor == "false" || forceColor == "0":
+ return ColorModeSuppressed
+ case forceColor == "true" || forceColor == "1" || forceColor == "2" || forceColor == "3":
+ return ColorModeForced
+ default:
+ return ColorModeUndefined
+ }
+}
+
+func applyColorMode(colorMode ColorMode) ColorMode {
+ switch colorMode {
+ case ColorModeForced:
+ color.NoColor = false
+ case ColorModeSuppressed:
+ color.NoColor = true
+ case ColorModeUndefined:
+ default:
+ // color.NoColor already gets its default value based on
+ // isTTY and/or the presence of the NO_COLOR env variable.
+ }
+
+ if color.NoColor {
+ return ColorModeSuppressed
+ } else {
+ return ColorModeForced
+ }
+}
diff --git a/cli/internal/ui/spinner.go b/cli/internal/ui/spinner.go
new file mode 100644
index 0000000..6e47d2d
--- /dev/null
+++ b/cli/internal/ui/spinner.go
@@ -0,0 +1,80 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package ui
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/briandowns/spinner"
+)
+
+// startStopper is the interface to interact with the spinner.
+type startStopper interface {
+ Start()
+ Stop()
+}
+
+// Spinner represents an indicator that an asynchronous operation is taking place.
+//
+// For short operations, less than 4 seconds, display only the spinner with the Start and Stop methods.
+// For longer operations, display intermediate progress events using the Events method.
+type Spinner struct {
+ spin startStopper
+}
+
+// NewSpinner returns a spinner that outputs to w.
+func NewSpinner(w io.Writer) *Spinner {
+ interval := 125 * time.Millisecond
+ if os.Getenv("CI") == "true" {
+ interval = 30 * time.Second
+ }
+ s := spinner.New(charset, interval, spinner.WithHiddenCursor(true))
+ s.Writer = w
+ s.Color("faint")
+ return &Spinner{
+ spin: s,
+ }
+}
+
+// Start starts the spinner suffixed with a label.
+func (s *Spinner) Start(label string) {
+ s.suffix(fmt.Sprintf(" %s", label))
+ s.spin.Start()
+}
+
+// Stop stops the spinner and replaces it with a label.
+func (s *Spinner) Stop(label string) {
+ s.finalMSG(fmt.Sprint(label))
+ s.spin.Stop()
+}
+
+func (s *Spinner) lock() {
+ if spinner, ok := s.spin.(*spinner.Spinner); ok {
+ spinner.Lock()
+ }
+}
+
+func (s *Spinner) unlock() {
+ if spinner, ok := s.spin.(*spinner.Spinner); ok {
+ spinner.Unlock()
+ }
+}
+
+func (s *Spinner) suffix(label string) {
+ s.lock()
+ defer s.unlock()
+ if spinner, ok := s.spin.(*spinner.Spinner); ok {
+ spinner.Suffix = label
+ }
+}
+
+func (s *Spinner) finalMSG(label string) {
+ s.lock()
+ defer s.unlock()
+ if spinner, ok := s.spin.(*spinner.Spinner); ok {
+ spinner.FinalMSG = label
+ }
+}
diff --git a/cli/internal/ui/term/cursor.go b/cli/internal/ui/term/cursor.go
new file mode 100644
index 0000000..253f043
--- /dev/null
+++ b/cli/internal/ui/term/cursor.go
@@ -0,0 +1,73 @@
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+
+// Package cursor provides functionality to interact with the terminal cursor.
+package cursor
+
+import (
+ "io"
+ "os"
+
+ "github.com/AlecAivazis/survey/v2/terminal"
+)
+
+type cursor interface {
+ Up(n int) error
+ Down(n int) error
+ Hide() error
+ Show() error
+}
+
+// fakeFileWriter is a terminal.FileWriter.
+// If the underlying writer w does not implement Fd() then a dummy value is returned.
+type fakeFileWriter struct {
+ w io.Writer
+}
+
+// Write delegates to the internal writer.
+func (w *fakeFileWriter) Write(p []byte) (int, error) {
+ return w.w.Write(p)
+}
+
+// Fd is required to be implemented to satisfy the terminal.FileWriter interface.
+// If the underlying writer is a file, like os.Stdout, then invoke it. Otherwise, this method allows us to create
+// a Cursor that can write to any io.Writer like a bytes.Buffer by returning a dummy value.
+func (w *fakeFileWriter) Fd() uintptr {
+ if v, ok := w.w.(terminal.FileWriter); ok {
+ return v.Fd()
+ }
+ return 0
+}
+
+// Cursor represents the terminal's cursor.
+type Cursor struct {
+ c cursor
+}
+
+// New creates a new cursor that writes to stderr.
+func New() *Cursor {
+ return &Cursor{
+ c: &terminal.Cursor{
+ Out: os.Stderr,
+ },
+ }
+}
+
+// EraseLine erases a line from a FileWriter.
+func EraseLine(fw terminal.FileWriter) {
+ terminal.EraseLine(fw, terminal.ERASE_LINE_ALL)
+}
+
+// EraseLinesAbove erases a line and moves the cursor up from fw, repeated n times.
+func EraseLinesAbove(fw terminal.FileWriter, n int) {
+ c := Cursor{
+ c: &terminal.Cursor{
+ Out: fw,
+ },
+ }
+ for i := 0; i < n; i += 1 {
+ EraseLine(fw)
+ c.c.Up(1)
+ }
+ EraseLine(fw) // Erase the nth line as well.
+}
diff --git a/cli/internal/ui/term/cursor_test.go b/cli/internal/ui/term/cursor_test.go
new file mode 100644
index 0000000..270ebe8
--- /dev/null
+++ b/cli/internal/ui/term/cursor_test.go
@@ -0,0 +1,43 @@
+//go:build !windows
+// +build !windows
+
+// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package cursor
+
+import (
+ "io"
+ "strings"
+ "testing"
+
+ "github.com/AlecAivazis/survey/v2/terminal"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEraseLine(t *testing.T) {
+ testCases := map[string]struct {
+ inWriter func(writer io.Writer) terminal.FileWriter
+ shouldErase bool
+ }{
+ "should erase a line if the writer is a file": {
+ inWriter: func(writer io.Writer) terminal.FileWriter {
+ return &fakeFileWriter{w: writer}
+ },
+ shouldErase: true,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ // GIVEN
+ buf := new(strings.Builder)
+
+ // WHEN
+ EraseLine(tc.inWriter(buf))
+
+ // THEN
+ isErased := buf.String() != ""
+ require.Equal(t, tc.shouldErase, isErased)
+ })
+ }
+}
diff --git a/cli/internal/ui/ui.go b/cli/internal/ui/ui.go
new file mode 100644
index 0000000..9084c76
--- /dev/null
+++ b/cli/internal/ui/ui.go
@@ -0,0 +1,121 @@
+package ui
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "regexp"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/mattn/go-isatty"
+ "github.com/mitchellh/cli"
+ "github.com/vercel/turbo/cli/internal/ci"
+)
+
+const ansiEscapeStr = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
+
+// IsTTY is true when stdout appears to be a tty
+var IsTTY = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
+
+// IsCI is true when we appear to be running in a non-interactive context.
+var IsCI = !IsTTY || ci.IsCi()
+var gray = color.New(color.Faint)
+var bold = color.New(color.Bold)
+var ERROR_PREFIX = color.New(color.Bold, color.FgRed, color.ReverseVideo).Sprint(" ERROR ")
+var WARNING_PREFIX = color.New(color.Bold, color.FgYellow, color.ReverseVideo).Sprint(" WARNING ")
+
+// InfoPrefix is a colored string for warning level log messages
+var InfoPrefix = color.New(color.Bold, color.FgWhite, color.ReverseVideo).Sprint(" INFO ")
+
+var ansiRegex = regexp.MustCompile(ansiEscapeStr)
+
+// Dim prints out dimmed text
+func Dim(str string) string {
+ return gray.Sprint(str)
+}
+
+func Bold(str string) string {
+ return bold.Sprint(str)
+}
+
+// Adapted from go-rainbow
+// Copyright (c) 2017 Raphael Amorim
+// Source: https://github.com/raphamorim/go-rainbow
+// SPDX-License-Identifier: MIT
+func rgb(i int) (int, int, int) {
+ var f = 0.275
+
+ return int(math.Sin(f*float64(i)+4*math.Pi/3)*127 + 128),
+ // int(math.Sin(f*float64(i)+2*math.Pi/3)*127 + 128),
+ int(45),
+ int(math.Sin(f*float64(i)+0)*127 + 128)
+}
+
+// Rainbow function returns a formated colorized string ready to print it to the shell/terminal
+//
+// Adapted from go-rainbow
+// Copyright (c) 2017 Raphael Amorim
+// Source: https://github.com/raphamorim/go-rainbow
+// SPDX-License-Identifier: MIT
+func Rainbow(text string) string {
+ var rainbowStr []string
+ for index, value := range text {
+ r, g, b := rgb(index)
+ str := fmt.Sprintf("\033[1m\033[38;2;%d;%d;%dm%c\033[0m\033[0;1m", r, g, b, value)
+ rainbowStr = append(rainbowStr, str)
+ }
+
+ return strings.Join(rainbowStr, "")
+}
+
+type stripAnsiWriter struct {
+ wrappedWriter io.Writer
+}
+
+func (into *stripAnsiWriter) Write(p []byte) (int, error) {
+ n, err := into.wrappedWriter.Write(ansiRegex.ReplaceAll(p, []byte{}))
+ if err != nil {
+ // The number of bytes returned here isn't directly related to the input bytes
+ // if ansi color codes were being stripped out, but we are counting on Stdout.Write
+ // not failing under typical operation as well.
+ return n, err
+ }
+
+ // Write must return a non-nil error if it returns n < len(p). Consequently, if the
+ // wrappedWrite.Write call succeeded we will return len(p) as the number of bytes
+ // written.
+ return len(p), nil
+}
+
+// Default returns the default colored ui
+func Default() *cli.ColoredUi {
+ return BuildColoredUi(ColorModeUndefined)
+}
+
+func BuildColoredUi(colorMode ColorMode) *cli.ColoredUi {
+ colorMode = applyColorMode(colorMode)
+
+ var outWriter, errWriter io.Writer
+
+ if colorMode == ColorModeSuppressed {
+ outWriter = &stripAnsiWriter{wrappedWriter: os.Stdout}
+ errWriter = &stripAnsiWriter{wrappedWriter: os.Stderr}
+ } else {
+ outWriter = os.Stdout
+ errWriter = os.Stderr
+ }
+
+ return &cli.ColoredUi{
+ Ui: &cli.BasicUi{
+ Reader: os.Stdin,
+ Writer: outWriter,
+ ErrorWriter: errWriter,
+ },
+ OutputColor: cli.UiColorNone,
+ InfoColor: cli.UiColorNone,
+ WarnColor: cli.UiColor{Code: int(color.FgYellow), Bold: false},
+ ErrorColor: cli.UiColorRed,
+ }
+}
diff --git a/cli/internal/util/backends.go b/cli/internal/util/backends.go
new file mode 100644
index 0000000..66941ad
--- /dev/null
+++ b/cli/internal/util/backends.go
@@ -0,0 +1,30 @@
+package util
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/vercel/turbo/cli/internal/yaml"
+)
+
+// YarnRC Represents contents of .yarnrc.yml
+type YarnRC struct {
+ NodeLinker string `yaml:"nodeLinker"`
+}
+
+// IsNMLinker Checks that Yarn is set to use the node-modules linker style
+func IsNMLinker(cwd string) (bool, error) {
+ yarnRC := &YarnRC{}
+
+ bytes, err := ioutil.ReadFile(filepath.Join(cwd, ".yarnrc.yml"))
+ if err != nil {
+ return false, fmt.Errorf(".yarnrc.yml: %w", err)
+ }
+
+ if yaml.Unmarshal(bytes, yarnRC) != nil {
+ return false, fmt.Errorf(".yarnrc.yml: %w", err)
+ }
+
+ return yarnRC.NodeLinker == "node-modules", nil
+}
diff --git a/cli/internal/util/browser/open.go b/cli/internal/util/browser/open.go
new file mode 100644
index 0000000..a6171e9
--- /dev/null
+++ b/cli/internal/util/browser/open.go
@@ -0,0 +1,37 @@
+package browser
+
+import (
+ "fmt"
+ "os/exec"
+ "runtime"
+)
+
+// OpenBrowser attempts to interactively open a browser window at the given URL
+func OpenBrowser(url string) error {
+ var err error
+
+ switch runtime.GOOS {
+ case "linux":
+ if posixBinExists("wslview") {
+ err = exec.Command("wslview", url).Start()
+ } else {
+ err = exec.Command("xdg-open", url).Start()
+ }
+ case "windows":
+ err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
+ case "darwin":
+ err = exec.Command("open", url).Start()
+ default:
+ err = fmt.Errorf("unsupported platform")
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func posixBinExists(bin string) bool {
+ err := exec.Command("which", bin).Run()
+ // we mostly don't care what the error is, it suggests the binary is not usable
+ return err == nil
+}
diff --git a/cli/internal/util/closer.go b/cli/internal/util/closer.go
new file mode 100644
index 0000000..996760b
--- /dev/null
+++ b/cli/internal/util/closer.go
@@ -0,0 +1,15 @@
+package util
+
+// CloseAndIgnoreError is a utility to tell our linter that we explicitly deem it okay
+// to not check a particular error on closing of a resource.
+//
+// We use `errcheck` as a linter, which is super-opinionated about checking errors,
+// even in places where we don't necessarily care to check the error.
+//
+// `golangci-lint` has a default ignore list for this lint problem (EXC0001) which
+// can be used to sidestep this problem but it's possibly a little too-heavy-handed
+// in exclusion. At the expense of discoverability, this utility function forces
+// opt-in to ignoring errors on closing of things that can be `Close`d.
+func CloseAndIgnoreError(closer interface{ Close() error }) {
+ _ = closer.Close()
+}
diff --git a/cli/internal/util/cmd.go b/cli/internal/util/cmd.go
new file mode 100644
index 0000000..ae79aa0
--- /dev/null
+++ b/cli/internal/util/cmd.go
@@ -0,0 +1,24 @@
+package util
+
+import (
+ "bytes"
+
+ "github.com/spf13/cobra"
+)
+
+// ExitCodeError is a specific error that is returned by the command to specify the exit code
+type ExitCodeError struct {
+ ExitCode int
+}
+
+func (e *ExitCodeError) Error() string { return "exit code error" }
+
+// HelpForCobraCmd returns the help string for a given command
+// Note that this overwrites the output for the command
+func HelpForCobraCmd(cmd *cobra.Command) string {
+ f := cmd.HelpFunc()
+ buf := bytes.NewBufferString("")
+ cmd.SetOut(buf)
+ f(cmd, []string{})
+ return buf.String()
+}
diff --git a/cli/internal/util/filter/filter.go b/cli/internal/util/filter/filter.go
new file mode 100644
index 0000000..fbc475d
--- /dev/null
+++ b/cli/internal/util/filter/filter.go
@@ -0,0 +1,133 @@
+// Copyright (c) 2015-2020 InfluxData Inc. MIT License (MIT)
+// https://github.com/influxdata/telegraf
+package filter
+
+import (
+ "strings"
+
+ "github.com/gobwas/glob"
+)
+
+type Filter interface {
+ Match(string) bool
+}
+
+// Compile takes a list of string filters and returns a Filter interface
+// for matching a given string against the filter list. The filter list
+// supports glob matching too, ie:
+//
+// f, _ := Compile([]string{"cpu", "mem", "net*"})
+// f.Match("cpu") // true
+// f.Match("network") // true
+// f.Match("memory") // false
+func Compile(filters []string) (Filter, error) {
+ // return if there is nothing to compile
+ if len(filters) == 0 {
+ return nil, nil
+ }
+
+ // check if we can compile a non-glob filter
+ noGlob := true
+ for _, filter := range filters {
+ if hasMeta(filter) {
+ noGlob = false
+ break
+ }
+ }
+
+ switch {
+ case noGlob:
+ // return non-globbing filter if not needed.
+ return compileFilterNoGlob(filters), nil
+ case len(filters) == 1:
+ return glob.Compile(filters[0])
+ default:
+ return glob.Compile("{" + strings.Join(filters, ",") + "}")
+ }
+}
+
+// hasMeta reports whether path contains any magic glob characters.
+func hasMeta(s string) bool {
+ return strings.ContainsAny(s, "*?[")
+}
+
+type filter struct {
+ m map[string]struct{}
+}
+
+func (f *filter) Match(s string) bool {
+ _, ok := f.m[s]
+ return ok
+}
+
+type filtersingle struct {
+ s string
+}
+
+func (f *filtersingle) Match(s string) bool {
+ return f.s == s
+}
+
+func compileFilterNoGlob(filters []string) Filter {
+ if len(filters) == 1 {
+ return &filtersingle{s: filters[0]}
+ }
+ out := filter{m: make(map[string]struct{})}
+ for _, filter := range filters {
+ out.m[filter] = struct{}{}
+ }
+ return &out
+}
+
+type IncludeExcludeFilter struct {
+ include Filter
+ exclude Filter
+ includeDefault bool
+ excludeDefault bool
+}
+
+func NewIncludeExcludeFilter(
+ include []string,
+ exclude []string,
+) (Filter, error) {
+ return NewIncludeExcludeFilterDefaults(include, exclude, true, false)
+}
+
+func NewIncludeExcludeFilterDefaults(
+ include []string,
+ exclude []string,
+ includeDefault bool,
+ excludeDefault bool,
+) (Filter, error) {
+ in, err := Compile(include)
+ if err != nil {
+ return nil, err
+ }
+
+ ex, err := Compile(exclude)
+ if err != nil {
+ return nil, err
+ }
+
+ return &IncludeExcludeFilter{in, ex, includeDefault, excludeDefault}, nil
+}
+
+func (f *IncludeExcludeFilter) Match(s string) bool {
+ if f.include != nil {
+ if !f.include.Match(s) {
+ return false
+ }
+ } else if !f.includeDefault {
+ return false
+ }
+
+ if f.exclude != nil {
+ if f.exclude.Match(s) {
+ return false
+ }
+ } else if f.excludeDefault {
+ return false
+ }
+
+ return true
+}
diff --git a/cli/internal/util/filter/filter_test.go b/cli/internal/util/filter/filter_test.go
new file mode 100644
index 0000000..727a4b6
--- /dev/null
+++ b/cli/internal/util/filter/filter_test.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2015-2020 InfluxData Inc. MIT License (MIT)
+// https://github.com/influxdata/telegraf
+package filter
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCompile(t *testing.T) {
+ f, err := Compile([]string{})
+ assert.NoError(t, err)
+ assert.Nil(t, f)
+
+ f, err = Compile([]string{"cpu"})
+ assert.NoError(t, err)
+ assert.True(t, f.Match("cpu"))
+ assert.False(t, f.Match("cpu0"))
+ assert.False(t, f.Match("mem"))
+
+ f, err = Compile([]string{"cpu*"})
+ assert.NoError(t, err)
+ assert.True(t, f.Match("cpu"))
+ assert.True(t, f.Match("cpu0"))
+ assert.False(t, f.Match("mem"))
+
+ f, err = Compile([]string{"cpu", "mem"})
+ assert.NoError(t, err)
+ assert.True(t, f.Match("cpu"))
+ assert.False(t, f.Match("cpu0"))
+ assert.True(t, f.Match("mem"))
+
+ f, err = Compile([]string{"cpu", "mem", "net*"})
+ assert.NoError(t, err)
+ assert.True(t, f.Match("cpu"))
+ assert.False(t, f.Match("cpu0"))
+ assert.True(t, f.Match("mem"))
+ assert.True(t, f.Match("network"))
+}
+
+func TestIncludeExclude(t *testing.T) {
+ tags := []string{}
+ labels := []string{"best", "com_influxdata", "timeseries", "com_influxdata_telegraf", "ever"}
+
+ filter, err := NewIncludeExcludeFilter([]string{}, []string{"com_influx*"})
+ if err != nil {
+ t.Fatalf("Failed to create include/exclude filter - %v", err)
+ }
+
+ for i := range labels {
+ if filter.Match(labels[i]) {
+ tags = append(tags, labels[i])
+ }
+ }
+
+ assert.Equal(t, []string{"best", "timeseries", "ever"}, tags)
+}
+
+var benchbool bool
+
+func BenchmarkFilterSingleNoGlobFalse(b *testing.B) {
+ f, _ := Compile([]string{"cpu"})
+ var tmp bool
+ for n := 0; n < b.N; n++ {
+ tmp = f.Match("network")
+ }
+ benchbool = tmp
+}
+
+func BenchmarkFilterSingleNoGlobTrue(b *testing.B) {
+ f, _ := Compile([]string{"cpu"})
+ var tmp bool
+ for n := 0; n < b.N; n++ {
+ tmp = f.Match("cpu")
+ }
+ benchbool = tmp
+}
+
+func BenchmarkFilter(b *testing.B) {
+ f, _ := Compile([]string{"cpu", "mem", "net*"})
+ var tmp bool
+ for n := 0; n < b.N; n++ {
+ tmp = f.Match("network")
+ }
+ benchbool = tmp
+}
+
+func BenchmarkFilterNoGlob(b *testing.B) {
+ f, _ := Compile([]string{"cpu", "mem", "net"})
+ var tmp bool
+ for n := 0; n < b.N; n++ {
+ tmp = f.Match("net")
+ }
+ benchbool = tmp
+}
+
+func BenchmarkFilter2(b *testing.B) {
+ f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
+ "aw", "az", "axxx", "ab", "cpu", "mem", "net*"})
+ var tmp bool
+ for n := 0; n < b.N; n++ {
+ tmp = f.Match("network")
+ }
+ benchbool = tmp
+}
+
+func BenchmarkFilter2NoGlob(b *testing.B) {
+ f, _ := Compile([]string{"aa", "bb", "c", "ad", "ar", "at", "aq",
+ "aw", "az", "axxx", "ab", "cpu", "mem", "net"})
+ var tmp bool
+ for n := 0; n < b.N; n++ {
+ tmp = f.Match("net")
+ }
+ benchbool = tmp
+}
diff --git a/cli/internal/util/graph.go b/cli/internal/util/graph.go
new file mode 100644
index 0000000..89de18c
--- /dev/null
+++ b/cli/internal/util/graph.go
@@ -0,0 +1,35 @@
+package util
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/pyr-sh/dag"
+)
+
+// ValidateGraph checks that a given DAG has no cycles and no self-referential edges.
+// We differ from the underlying DAG Validate method in that we allow multiple roots.
+func ValidateGraph(graph *dag.AcyclicGraph) error {
+ // We use Cycles instead of Validate because
+ // our DAG has multiple roots (entrypoints).
+ // Validate mandates that there is only a single root node.
+ cycles := graph.Cycles()
+ if len(cycles) > 0 {
+ cycleLines := make([]string, len(cycles))
+ for i, cycle := range cycles {
+ vertices := make([]string, len(cycle))
+ for j, vertex := range cycle {
+ vertices[j] = vertex.(string)
+ }
+ cycleLines[i] = "\t" + strings.Join(vertices, ",")
+ }
+ return fmt.Errorf("cyclic dependency detected:\n%s", strings.Join(cycleLines, "\n"))
+ }
+
+ for _, e := range graph.Edges() {
+ if e.Source() == e.Target() {
+ return fmt.Errorf("%s depends on itself", e.Source())
+ }
+ }
+ return nil
+}
diff --git a/cli/internal/util/modulo.go b/cli/internal/util/modulo.go
new file mode 100644
index 0000000..ec2957a
--- /dev/null
+++ b/cli/internal/util/modulo.go
@@ -0,0 +1,13 @@
+package util
+
+// PostitiveMod returns a modulo operator like JavaScripts
+func PositiveMod(x, d int) int {
+ x = x % d
+ if x >= 0 {
+ return x
+ }
+ if d < 0 {
+ return x - d
+ }
+ return x + d
+}
diff --git a/cli/internal/util/parse_concurrency.go b/cli/internal/util/parse_concurrency.go
new file mode 100644
index 0000000..6917600
--- /dev/null
+++ b/cli/internal/util/parse_concurrency.go
@@ -0,0 +1,39 @@
+package util
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+var (
+ // alias so we can mock in tests
+ runtimeNumCPU = runtime.NumCPU
+ // positive values check for +Inf
+ _positiveInfinity = 1
+)
+
+// ParseConcurrency parses a concurrency value, which can be a number (e.g. 2) or a percentage (e.g. 50%).
+func ParseConcurrency(concurrencyRaw string) (int, error) {
+ if strings.HasSuffix(concurrencyRaw, "%") {
+ if percent, err := strconv.ParseFloat(concurrencyRaw[:len(concurrencyRaw)-1], 64); err != nil {
+ return 0, fmt.Errorf("invalid value for --concurrency CLI flag. This should be a number --concurrency=4 or percentage of CPU cores --concurrency=50%% : %w", err)
+ } else {
+ if percent > 0 && !math.IsInf(percent, _positiveInfinity) {
+ return int(math.Max(1, float64(runtimeNumCPU())*percent/100)), nil
+ } else {
+ return 0, fmt.Errorf("invalid percentage value for --concurrency CLI flag. This should be a percentage of CPU cores, between 1%% and 100%% : %w", err)
+ }
+ }
+ } else if i, err := strconv.Atoi(concurrencyRaw); err != nil {
+ return 0, fmt.Errorf("invalid value for --concurrency CLI flag. This should be a positive integer greater than or equal to 1: %w", err)
+ } else {
+ if i >= 1 {
+ return i, nil
+ } else {
+ return 0, fmt.Errorf("invalid value %v for --concurrency CLI flag. This should be a positive integer greater than or equal to 1", i)
+ }
+ }
+}
diff --git a/cli/internal/util/parse_concurrency_test.go b/cli/internal/util/parse_concurrency_test.go
new file mode 100644
index 0000000..b732724
--- /dev/null
+++ b/cli/internal/util/parse_concurrency_test.go
@@ -0,0 +1,79 @@
+package util
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseConcurrency(t *testing.T) {
+ cases := []struct {
+ Input string
+ Expected int
+ }{
+ {
+ "12",
+ 12,
+ },
+ {
+ "200%",
+ 20,
+ },
+ {
+ "100%",
+ 10,
+ },
+ {
+ "50%",
+ 5,
+ },
+ {
+ "25%",
+ 2,
+ },
+ {
+ "1%",
+ 1,
+ },
+ {
+ "0644", // we parse in base 10
+ 644,
+ },
+ }
+
+ // mock runtime.NumCPU() to 10
+ runtimeNumCPU = func() int {
+ return 10
+ }
+
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d) '%s' should be parsed at '%d'", i, tc.Input, tc.Expected), func(t *testing.T) {
+ if result, err := ParseConcurrency(tc.Input); err != nil {
+ t.Fatalf("invalid parse: %#v", err)
+ } else {
+ assert.EqualValues(t, tc.Expected, result)
+ }
+ })
+ }
+}
+
+func TestInvalidPercents(t *testing.T) {
+ inputs := []string{
+ "asdf",
+ "-1",
+ "-l%",
+ "infinity%",
+ "-infinity%",
+ "nan%",
+ "0b01",
+ "0o644",
+ "0xFF",
+ }
+ for _, tc := range inputs {
+ t.Run(tc, func(t *testing.T) {
+ val, err := ParseConcurrency(tc)
+ assert.Error(t, err, "input %v got %v", tc, val)
+ })
+ }
+}
diff --git a/cli/internal/util/printf.go b/cli/internal/util/printf.go
new file mode 100644
index 0000000..9cd6dce
--- /dev/null
+++ b/cli/internal/util/printf.go
@@ -0,0 +1,63 @@
+// Copyright Thought Machine, Inc. or its affiliates. All Rights Reserved.
+// SPDX-License-Identifier: Apache-2.0
+package util
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/vercel/turbo/cli/internal/ui"
+)
+
+// initPrintf sets up the replacements used by printf.
+func InitPrintf() {
+ if !ui.IsTTY {
+ replacements = map[string]string{}
+ }
+}
+
+// printf is used throughout this package to print something to stderr with some
+// replacements for pseudo-shell variables for ANSI formatting codes.
+func Sprintf(format string, args ...interface{}) string {
+ return os.Expand(fmt.Sprintf(format, args...), replace)
+}
+
+func Printf(format string, args ...interface{}) {
+ fmt.Fprint(os.Stderr, os.Expand(fmt.Sprintf(format, args...), replace))
+}
+
+func Fprintf(writer io.Writer, format string, args ...interface{}) {
+ fmt.Fprint(writer, os.Expand(fmt.Sprintf(format, args...), replace))
+}
+
+func replace(s string) string {
+ return replacements[s]
+}
+
+// These are the standard set of replacements we use.
+var replacements = map[string]string{
+ "BOLD": "\x1b[1m",
+ "BOLD_GREY": "\x1b[30;1m",
+ "BOLD_RED": "\x1b[31;1m",
+ "BOLD_GREEN": "\x1b[32;1m",
+ "BOLD_YELLOW": "\x1b[33;1m",
+ "BOLD_BLUE": "\x1b[34;1m",
+ "BOLD_MAGENTA": "\x1b[35;1m",
+ "BOLD_CYAN": "\x1b[36;1m",
+ "BOLD_WHITE": "\x1b[37;1m",
+ "UNDERLINE": "\x1b[4m",
+ "GREY": "\x1b[2m",
+ "RED": "\x1b[31m",
+ "GREEN": "\x1b[32m",
+ "YELLOW": "\x1b[33m",
+ "BLUE": "\x1b[34m",
+ "MAGENTA": "\x1b[35m",
+ "CYAN": "\x1b[36m",
+ "WHITE": "\x1b[37m",
+ "WHITE_ON_RED": "\x1b[37;41;1m",
+ "RED_NO_BG": "\x1b[31;49;1m",
+ "RESET": "\x1b[0m",
+ "ERASE_AFTER": "\x1b[K",
+ "CLEAR_END": "\x1b[0J",
+}
diff --git a/cli/internal/util/run_opts.go b/cli/internal/util/run_opts.go
new file mode 100644
index 0000000..08676a0
--- /dev/null
+++ b/cli/internal/util/run_opts.go
@@ -0,0 +1,53 @@
+package util
+
+import "strings"
+
+// EnvMode specifies if we will be using strict env vars
+type EnvMode string
+
+const (
+ // Infer - infer environment variable constraints from turbo.json
+ Infer EnvMode = "Infer"
+ // Loose - environment variables are unconstrained
+ Loose EnvMode = "Loose"
+ // Strict - environment variables are limited
+ Strict EnvMode = "Strict"
+)
+
+// MarshalText implements TextMarshaler for the struct.
+func (s EnvMode) MarshalText() (text []byte, err error) {
+ return []byte(strings.ToLower(string(s))), nil
+}
+
+// RunOpts holds the options that control the execution of a turbo run
+type RunOpts struct {
+ // Force execution to be serially one-at-a-time
+ Concurrency int
+ // Whether to execute in parallel (defaults to false)
+ Parallel bool
+
+ EnvMode EnvMode
+ // The filename to write a perf profile.
+ Profile string
+ // If true, continue task executions even if a task fails.
+ ContinueOnError bool
+ PassThroughArgs []string
+ // Restrict execution to only the listed task names. Default false
+ Only bool
+ // Dry run flags
+ DryRun bool
+ DryRunJSON bool
+ // Graph flags
+ GraphDot bool
+ GraphFile string
+ NoDaemon bool
+ SinglePackage bool
+
+ // logPrefix controls whether we should print a prefix in task logs
+ LogPrefix string
+
+ // Whether turbo should create a run summary
+ Summarize bool
+
+ ExperimentalSpaceID string
+}
diff --git a/cli/internal/util/semaphore.go b/cli/internal/util/semaphore.go
new file mode 100644
index 0000000..ef29df0
--- /dev/null
+++ b/cli/internal/util/semaphore.go
@@ -0,0 +1,43 @@
+package util
+
+// Semaphore is a wrapper around a channel to provide
+// utility methods to clarify that we are treating the
+// channel as a semaphore
+type Semaphore chan struct{}
+
+// NewSemaphore creates a semaphore that allows up
+// to a given limit of simultaneous acquisitions
+func NewSemaphore(n int) Semaphore {
+ if n <= 0 {
+ panic("semaphore with limit <=0")
+ }
+ ch := make(chan struct{}, n)
+ return Semaphore(ch)
+}
+
+// Acquire is used to acquire an available slot.
+// Blocks until available.
+func (s Semaphore) Acquire() {
+ s <- struct{}{}
+}
+
+// TryAcquire is used to do a non-blocking acquire.
+// Returns a bool indicating success
+func (s Semaphore) TryAcquire() bool {
+ select {
+ case s <- struct{}{}:
+ return true
+ default:
+ return false
+ }
+}
+
+// Release is used to return a slot. Acquire must
+// be called as a pre-condition.
+func (s Semaphore) Release() {
+ select {
+ case <-s:
+ default:
+ panic("release without an acquire")
+ }
+}
diff --git a/cli/internal/util/set.go b/cli/internal/util/set.go
new file mode 100644
index 0000000..b6c5f86
--- /dev/null
+++ b/cli/internal/util/set.go
@@ -0,0 +1,147 @@
+package util
+
+// Set is a set data structure.
+type Set map[interface{}]interface{}
+
+// SetFromStrings creates a Set containing the strings from the given slice
+func SetFromStrings(sl []string) Set {
+ set := make(Set, len(sl))
+ for _, item := range sl {
+ set.Add(item)
+ }
+ return set
+}
+
+// Hashable is the interface used by set to get the hash code of a value.
+// If this isn't given, then the value of the item being added to the set
+// itself is used as the comparison value.
+type Hashable interface {
+ Hashcode() interface{}
+}
+
+// hashcode returns the hashcode used for set elements.
+func hashcode(v interface{}) interface{} {
+ if h, ok := v.(Hashable); ok {
+ return h.Hashcode()
+ }
+
+ return v
+}
+
+// Add adds an item to the set
+func (s Set) Add(v interface{}) {
+ s[hashcode(v)] = v
+}
+
+// Delete removes an item from the set.
+func (s Set) Delete(v interface{}) {
+ delete(s, hashcode(v))
+}
+
+// Includes returns true/false of whether a value is in the set.
+func (s Set) Includes(v interface{}) bool {
+ _, ok := s[hashcode(v)]
+ return ok
+}
+
+// Intersection computes the set intersection with other.
+func (s Set) Intersection(other Set) Set {
+ result := make(Set)
+ if s == nil || other == nil {
+ return result
+ }
+ // Iteration over a smaller set has better performance.
+ if other.Len() < s.Len() {
+ s, other = other, s
+ }
+ for _, v := range s {
+ if other.Includes(v) {
+ result.Add(v)
+ }
+ }
+ return result
+}
+
+// Difference returns a set with the elements that s has but
+// other doesn't.
+func (s Set) Difference(other Set) Set {
+ result := make(Set)
+ for k, v := range s {
+ var ok bool
+ if other != nil {
+ _, ok = other[k]
+ }
+ if !ok {
+ result.Add(v)
+ }
+ }
+
+ return result
+}
+
+// Some tests whether at least one element in the array passes the test implemented by the provided function.
+// It returns a Boolean value.
+func (s Set) Some(cb func(interface{}) bool) bool {
+ for _, v := range s {
+ if cb(v) {
+ return true
+ }
+ }
+ return false
+}
+
+// Filter returns a set that contains the elements from the receiver
+// where the given callback returns true.
+func (s Set) Filter(cb func(interface{}) bool) Set {
+ result := make(Set)
+
+ for _, v := range s {
+ if cb(v) {
+ result.Add(v)
+ }
+ }
+
+ return result
+}
+
+// Len is the number of items in the set.
+func (s Set) Len() int {
+ return len(s)
+}
+
+// List returns the list of set elements.
+func (s Set) List() []interface{} {
+ if s == nil {
+ return nil
+ }
+
+ r := make([]interface{}, 0, len(s))
+ for _, v := range s {
+ r = append(r, v)
+ }
+
+ return r
+}
+
+// UnsafeListOfStrings dangerously casts list to a string
+func (s Set) UnsafeListOfStrings() []string {
+ if s == nil {
+ return nil
+ }
+
+ r := make([]string, 0, len(s))
+ for _, v := range s {
+ r = append(r, v.(string))
+ }
+
+ return r
+}
+
+// Copy returns a shallow copy of the set.
+func (s Set) Copy() Set {
+ c := make(Set)
+ for k, v := range s {
+ c[k] = v
+ }
+ return c
+}
diff --git a/cli/internal/util/set_test.go b/cli/internal/util/set_test.go
new file mode 100644
index 0000000..52736b4
--- /dev/null
+++ b/cli/internal/util/set_test.go
@@ -0,0 +1,149 @@
+package util
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestSetDifference(t *testing.T) {
+ cases := []struct {
+ Name string
+ A, B []interface{}
+ Expected []interface{}
+ }{
+ {
+ "same",
+ []interface{}{1, 2, 3},
+ []interface{}{3, 1, 2},
+ []interface{}{},
+ },
+
+ {
+ "A has extra elements",
+ []interface{}{1, 2, 3},
+ []interface{}{3, 2},
+ []interface{}{1},
+ },
+
+ {
+ "B has extra elements",
+ []interface{}{1, 2, 3},
+ []interface{}{3, 2, 1, 4},
+ []interface{}{},
+ },
+ }
+
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) {
+ one := make(Set)
+ two := make(Set)
+ expected := make(Set)
+ for _, v := range tc.A {
+ one.Add(v)
+ }
+ for _, v := range tc.B {
+ two.Add(v)
+ }
+ for _, v := range tc.Expected {
+ expected.Add(v)
+ }
+
+ actual := one.Difference(two)
+ match := actual.Intersection(expected)
+ if match.Len() != expected.Len() {
+ t.Fatalf("bad: %#v", actual.List())
+ }
+ })
+ }
+}
+
+func TestSetFilter(t *testing.T) {
+ cases := []struct {
+ Input []interface{}
+ Expected []interface{}
+ }{
+ {
+ []interface{}{1, 2, 3},
+ []interface{}{1, 2, 3},
+ },
+
+ {
+ []interface{}{4, 5, 6},
+ []interface{}{4},
+ },
+
+ {
+ []interface{}{7, 8, 9},
+ []interface{}{},
+ },
+ }
+
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d-%#v", i, tc.Input), func(t *testing.T) {
+ input := make(Set)
+ expected := make(Set)
+ for _, v := range tc.Input {
+ input.Add(v)
+ }
+ for _, v := range tc.Expected {
+ expected.Add(v)
+ }
+
+ actual := input.Filter(func(v interface{}) bool {
+ return v.(int) < 5
+ })
+ match := actual.Intersection(expected)
+ if match.Len() != expected.Len() {
+ t.Fatalf("bad: %#v", actual.List())
+ }
+ })
+ }
+}
+
+func TestSetCopy(t *testing.T) {
+ a := make(Set)
+ a.Add(1)
+ a.Add(2)
+
+ b := a.Copy()
+ b.Add(3)
+
+ diff := b.Difference(a)
+
+ if diff.Len() != 1 {
+ t.Fatalf("expected single diff value, got %#v", diff)
+ }
+
+ if !diff.Includes(3) {
+ t.Fatalf("diff does not contain 3, got %#v", diff)
+ }
+
+}
+
+func makeSet(n int) Set {
+ ret := make(Set, n)
+ for i := 0; i < n; i++ {
+ ret.Add(i)
+ }
+ return ret
+}
+
+func BenchmarkSetIntersection_100_100000(b *testing.B) {
+ small := makeSet(100)
+ large := makeSet(100000)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ small.Intersection(large)
+ }
+}
+
+func BenchmarkSetIntersection_100000_100(b *testing.B) {
+ small := makeSet(100)
+ large := makeSet(100000)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ large.Intersection(small)
+ }
+}
diff --git a/cli/internal/util/status.go b/cli/internal/util/status.go
new file mode 100644
index 0000000..23ae165
--- /dev/null
+++ b/cli/internal/util/status.go
@@ -0,0 +1,47 @@
+package util
+
+import "fmt"
+
+// CachingStatus represents the api server's perspective
+// on whether remote caching should be allowed
+type CachingStatus int
+
+const (
+ // CachingStatusDisabled indicates that the server will not accept or serve artifacts
+ CachingStatusDisabled CachingStatus = iota
+ // CachingStatusEnabled indicates that the server will accept and serve artifacts
+ CachingStatusEnabled
+ // CachingStatusOverLimit indicates that a usage limit has been hit and the
+ // server will temporarily not accept or serve artifacts
+ CachingStatusOverLimit
+ // CachingStatusPaused indicates that a customer's spending has been paused and the
+ // server will temporarily not accept or serve artifacts
+ CachingStatusPaused
+)
+
+// CachingStatusFromString parses a raw string to a caching status enum value
+func CachingStatusFromString(raw string) (CachingStatus, error) {
+ switch raw {
+ case "disabled":
+ return CachingStatusDisabled, nil
+ case "enabled":
+ return CachingStatusEnabled, nil
+ case "over_limit":
+ return CachingStatusOverLimit, nil
+ case "paused":
+ return CachingStatusPaused, nil
+ default:
+ return CachingStatusDisabled, fmt.Errorf("unknown caching status: %v", raw)
+ }
+}
+
+// CacheDisabledError is an error used to indicate that remote caching
+// is not available.
+type CacheDisabledError struct {
+ Status CachingStatus
+ Message string
+}
+
+func (cd *CacheDisabledError) Error() string {
+ return cd.Message
+}
diff --git a/cli/internal/util/task_id.go b/cli/internal/util/task_id.go
new file mode 100644
index 0000000..e4415b6
--- /dev/null
+++ b/cli/internal/util/task_id.go
@@ -0,0 +1,66 @@
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ // TaskDelimiter separates a package name from a task name in a task id
+ TaskDelimiter = "#"
+ // RootPkgName is the reserved name that specifies the root package
+ RootPkgName = "//"
+)
+
+// GetTaskId returns a package-task identifier (e.g @feed/thing#build).
+func GetTaskId(pkgName interface{}, target string) string {
+ if IsPackageTask(target) {
+ return target
+ }
+ return fmt.Sprintf("%v%v%v", pkgName, TaskDelimiter, target)
+}
+
+// RootTaskID returns the task id for running the given task in the root package
+func RootTaskID(target string) string {
+ return GetTaskId(RootPkgName, target)
+}
+
+// GetPackageTaskFromId returns a tuple of the package name and target task
+func GetPackageTaskFromId(taskId string) (packageName string, task string) {
+ arr := strings.Split(taskId, TaskDelimiter)
+ return arr[0], arr[1]
+}
+
+// RootTaskTaskName returns the task portion of a root task taskID
+func RootTaskTaskName(taskID string) string {
+ return strings.TrimPrefix(taskID, RootPkgName+TaskDelimiter)
+}
+
+// IsPackageTask returns true if input is a package-specific task
+// whose name has a length greater than 0.
+//
+// Accepted: myapp#build
+// Rejected: #build, build
+func IsPackageTask(task string) bool {
+ return strings.Index(task, TaskDelimiter) > 0
+}
+
+// IsTaskInPackage returns true if the task does not belong to a different package
+// note that this means unscoped tasks will always return true
+func IsTaskInPackage(task string, packageName string) bool {
+ if !IsPackageTask(task) {
+ return true
+ }
+ packageNameExpected, _ := GetPackageTaskFromId(task)
+ return packageNameExpected == packageName
+}
+
+// StripPackageName removes the package portion of a taskID if it
+// is a package task. Non-package tasks are returned unmodified
+func StripPackageName(taskID string) string {
+ if IsPackageTask(taskID) {
+ _, task := GetPackageTaskFromId(taskID)
+ return task
+ }
+ return taskID
+}
diff --git a/cli/internal/util/task_output_mode.go b/cli/internal/util/task_output_mode.go
new file mode 100644
index 0000000..eee42e0
--- /dev/null
+++ b/cli/internal/util/task_output_mode.go
@@ -0,0 +1,100 @@
+package util
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// TaskOutputMode defines the ways turbo can display task output during a run
+type TaskOutputMode int
+
+const (
+ // FullTaskOutput will show all task output
+ FullTaskOutput TaskOutputMode = iota
+ // NoTaskOutput will hide all task output
+ NoTaskOutput
+ // HashTaskOutput will display turbo-computed task hashes
+ HashTaskOutput
+ // NewTaskOutput will show all new task output and turbo-computed task hashes for cached output
+ NewTaskOutput
+ // ErrorTaskOutput will show task output for failures only; no cache miss/hit messages are emitted
+ ErrorTaskOutput
+)
+
+const (
+ fullTaskOutputString = "full"
+ noTaskOutputString = "none"
+ hashTaskOutputString = "hash-only"
+ newTaskOutputString = "new-only"
+ errorTaskOutputString = "errors-only"
+)
+
+// TaskOutputModeStrings is an array containing the string representations for task output modes
+var TaskOutputModeStrings = []string{
+ fullTaskOutputString,
+ noTaskOutputString,
+ hashTaskOutputString,
+ newTaskOutputString,
+ errorTaskOutputString,
+}
+
+// FromTaskOutputModeString converts a task output mode's string representation into the enum value
+func FromTaskOutputModeString(value string) (TaskOutputMode, error) {
+ switch value {
+ case fullTaskOutputString:
+ return FullTaskOutput, nil
+ case noTaskOutputString:
+ return NoTaskOutput, nil
+ case hashTaskOutputString:
+ return HashTaskOutput, nil
+ case newTaskOutputString:
+ return NewTaskOutput, nil
+ case errorTaskOutputString:
+ return ErrorTaskOutput, nil
+ }
+
+ return FullTaskOutput, fmt.Errorf("invalid task output mode: %v", value)
+}
+
+// ToTaskOutputModeString converts a task output mode enum value into the string representation
+func ToTaskOutputModeString(value TaskOutputMode) (string, error) {
+ switch value {
+ case FullTaskOutput:
+ return fullTaskOutputString, nil
+ case NoTaskOutput:
+ return noTaskOutputString, nil
+ case HashTaskOutput:
+ return hashTaskOutputString, nil
+ case NewTaskOutput:
+ return newTaskOutputString, nil
+ case ErrorTaskOutput:
+ return errorTaskOutputString, nil
+ }
+
+ return "", fmt.Errorf("invalid task output mode: %v", value)
+}
+
+// UnmarshalJSON converts a task output mode string representation into an enum
+func (c *TaskOutputMode) UnmarshalJSON(data []byte) error {
+ var rawTaskOutputMode string
+ if err := json.Unmarshal(data, &rawTaskOutputMode); err != nil {
+ return err
+ }
+
+ taskOutputMode, err := FromTaskOutputModeString(rawTaskOutputMode)
+ if err != nil {
+ return err
+ }
+
+ *c = taskOutputMode
+ return nil
+}
+
+// MarshalJSON converts a task output mode to its string representation
+func (c TaskOutputMode) MarshalJSON() ([]byte, error) {
+ outputModeString, err := ToTaskOutputModeString(c)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(outputModeString)
+}
diff --git a/cli/internal/workspace/workspace.go b/cli/internal/workspace/workspace.go
new file mode 100644
index 0000000..fcd1eb8
--- /dev/null
+++ b/cli/internal/workspace/workspace.go
@@ -0,0 +1,10 @@
+// Package workspace contains some utilities around managing workspaces
+package workspace
+
+import "github.com/vercel/turbo/cli/internal/fs"
+
+// Catalog holds information about each workspace in the monorepo.
+type Catalog struct {
+ PackageJSONs map[string]*fs.PackageJSON
+ TurboConfigs map[string]*fs.TurboJSON
+}
diff --git a/cli/internal/xxhash/xxhash.go b/cli/internal/xxhash/xxhash.go
new file mode 100644
index 0000000..642ac73
--- /dev/null
+++ b/cli/internal/xxhash/xxhash.go
@@ -0,0 +1,202 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+
+// Adapted from https://cs.github.com/evanw/esbuild/blob/0c9ced59c8b3ea3bd8dd5feebafed1f47ed279dd/internal/xxhash
+// Copyright (c) 2016 Caleb Spare. All rights reserved.
+// SPDX-License-Identifier: MIT
+package xxhash
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var prime1v = prime1
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = prime1v + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -prime1v
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(d.mem[d.n:], b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ copy(d.mem[d.n:], b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[32-d.n:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ i, end := 0, d.n
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(d.mem[i:i+8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for i < end {
+ h ^= uint64(d.mem[i]) * prime5
+ h = rol11(h) * prime1
+ i++
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/cli/internal/yaml/apic.go b/cli/internal/yaml/apic.go
new file mode 100644
index 0000000..05fd305
--- /dev/null
+++ b/cli/internal/yaml/apic.go
@@ -0,0 +1,747 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/cli/internal/yaml/decode.go b/cli/internal/yaml/decode.go
new file mode 100644
index 0000000..0173b69
--- /dev/null
+++ b/cli/internal/yaml/decode.go
@@ -0,0 +1,1000 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+ textless bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ // It's curious choice from the underlying API to generally return a
+ // positive result on success, but on this case return true in an error
+ // scenario. This was the source of bugs in the past (issue #666).
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ n := &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ }
+ if !p.textless {
+ n.Line = p.event.start_mark.line + 1
+ n.Column = p.event.start_mark.column + 1
+ n.HeadComment = string(p.event.head_comment)
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ }
+ return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+
+ mergedFields map[interface{}]bool
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ case 0:
+ if n.IsZero() {
+ return d.null(out)
+ }
+ fallthrough
+ default:
+ failf("cannot decode node with unknown kind %d", n.Kind)
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return d.null(out)
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+
+ var mergeNode *Node
+
+ mapIsNew := false
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ mapIsNew = true
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ if mergedFields != nil {
+ ki := k.Interface()
+ if mergedFields[ki] {
+ continue
+ }
+ mergedFields[ki] = true
+ }
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ shortTag := n.Content[i].ShortTag()
+ if shortTag != strTag && shortTag != mergeTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ mergedFields := d.mergedFields
+ d.mergedFields = nil
+ var mergeNode *Node
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ mergeNode = n.Content[i+1]
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ sname := name.String()
+ if mergedFields != nil {
+ if mergedFields[sname] {
+ continue
+ }
+ mergedFields[sname] = true
+ }
+ if info, ok := sinfo.FieldsMap[sname]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+
+ d.mergedFields = mergedFields
+ if mergeNode != nil {
+ d.merge(n, mergeNode, out)
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
+ mergedFields := d.mergedFields
+ if mergedFields == nil {
+ d.mergedFields = make(map[interface{}]bool)
+ for i := 0; i < len(parent.Content); i += 2 {
+ k := reflect.New(ifaceType).Elem()
+ if d.unmarshal(parent.Content[i], k) {
+ d.mergedFields[k.Interface()] = true
+ }
+ }
+ }
+
+ switch merge.Kind {
+ case MappingNode:
+ d.unmarshal(merge, out)
+ case AliasNode:
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(merge, out)
+ case SequenceNode:
+ for i := 0; i < len(merge.Content); i++ {
+ ni := merge.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+
+ d.mergedFields = mergedFields
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/cli/internal/yaml/emitterc.go b/cli/internal/yaml/emitterc.go
new file mode 100644
index 0000000..dde20e5
--- /dev/null
+++ b/cli/internal/yaml/emitterc.go
@@ -0,0 +1,2019 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ // [Go] This was changed so that indentations are more regular.
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ // The first indent inside a sequence will just skip the "- " indicator.
+ emitter.indent += 2
+ } else {
+ // Everything else aligns to the chosen indentation.
+ emitter.indent = emitter.best_indent * ((emitter.indent + emitter.best_indent) / emitter.best_indent)
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if len(emitter.line_comment) > 0 {
+ // [Go] A line comment was provided for the key. That's unusual as the
+ // scanner associates line comments with the value. Either way,
+ // save the line comment and render it appropriately later.
+ emitter.key_line_comment = emitter.line_comment
+ emitter.line_comment = nil
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ if len(emitter.key_line_comment) > 0 {
+ // [Go] Line comments are generally associated with the value, but when there's
+ // no value on the same line as a mapping key they end up attached to the
+ // key itself.
+ if event.typ == yaml_SCALAR_EVENT {
+ if len(emitter.line_comment) == 0 {
+ // A scalar is coming and it has no line comments by itself yet,
+ // so just let it handle the line comment as usual. If it has a
+ // line comment, we can't have both so the one from the key is lost.
+ emitter.line_comment = emitter.key_line_comment
+ emitter.key_line_comment = nil
+ }
+ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+ // An indented block follows, so write the comment right now.
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.line_comment) == 0 {
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/cli/internal/yaml/encode.go b/cli/internal/yaml/encode.go
new file mode 100644
index 0000000..de9e72a
--- /dev/null
+++ b/cli/internal/yaml/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case Node:
+ if !in.CanAddr() {
+ var n = reflect.New(in.Type()).Elem()
+ n.Set(in)
+ in = n
+ }
+ e.nodev(in.Addr())
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // Zero nodes behave as nil.
+ if node.Kind == 0 && node.IsZero() {
+ e.nilv()
+ return
+ }
+
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ := resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ var rtag string
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if stag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if stag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ default:
+ failf("cannot encode node with unknown kind %d", node.Kind)
+ }
+}
diff --git a/cli/internal/yaml/parserc.go b/cli/internal/yaml/parserc.go
new file mode 100644
index 0000000..25fe823
--- /dev/null
+++ b/cli/internal/yaml/parserc.go
@@ -0,0 +1,1274 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+// ***********
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+//
+// *************
+//
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+//
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+//
+// block_node ::= ALIAS
+//
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+//
+// flow_node ::= ALIAS
+//
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+//
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+//
+// *************************
+//
+// block_content ::= block_collection | flow_collection | SCALAR
+//
+// ******
+//
+// flow_content ::= flow_collection | SCALAR
+//
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//
+// ******************** *********** * *********
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+//
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+ if stem_len == 0 {
+ return
+ }
+
+ token := peek_token(parser)
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ return
+ }
+
+ parser.stem_comment = parser.head_comment[:stem_len]
+ if len(parser.head_comment) == stem_len {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+ }
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+//
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+//
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *** *
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// ***** *
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//
+// *
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+//
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+//
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - *** *
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// - ***** *
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/cli/internal/yaml/readerc.go b/cli/internal/yaml/readerc.go
new file mode 100644
index 0000000..56af245
--- /dev/null
+++ b/cli/internal/yaml/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/cli/internal/yaml/resolve.go b/cli/internal/yaml/resolve.go
new file mode 100644
index 0000000..64ae888
--- /dev/null
+++ b/cli/internal/yaml/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/cli/internal/yaml/scannerc.go b/cli/internal/yaml/scannerc.go
new file mode 100644
index 0000000..87e46ef
--- /dev/null
+++ b/cli/internal/yaml/scannerc.go
@@ -0,0 +1,3040 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+ // Sequence indicators alone have no line comments. It becomes
+ // a head comment for whatever follows.
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_line_comment(parser, start_mark) {
+ return false
+ }
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+ var next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line - parser.newlines + 1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+ // Got line break or terminator.
+ if close_flow || !recent_empty {
+ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ // Alternatively, this might also be the last comment inside a flow
+ // scope, so it must be a footer.
+ if len(text) > 0 {
+ if start_mark.column-1 < next_indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index + peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/cli/internal/yaml/sorter.go b/cli/internal/yaml/sorter.go
new file mode 100644
index 0000000..9210ece
--- /dev/null
+++ b/cli/internal/yaml/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/cli/internal/yaml/writerc.go b/cli/internal/yaml/writerc.go
new file mode 100644
index 0000000..266d0b0
--- /dev/null
+++ b/cli/internal/yaml/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/cli/internal/yaml/yaml.go b/cli/internal/yaml/yaml.go
new file mode 100644
index 0000000..f0bedf3
--- /dev/null
+++ b/cli/internal/yaml/yaml.go
@@ -0,0 +1,693 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(v))
+ e.finish()
+ p := newParser(e.out)
+ p.textless = true
+ defer p.destroy()
+ doc := p.parse()
+ *n = *doc.Content[0]
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ case 0:
+ // Special case to make the zero value convenient.
+ if n.IsZero() {
+ return nullTag
+ }
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/cli/internal/yaml/yamlh.go b/cli/internal/yaml/yamlh.go
new file mode 100644
index 0000000..ddcd551
--- /dev/null
+++ b/cli/internal/yaml/yamlh.go
@@ -0,0 +1,809 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+//
+// yaml_parser_set_input().
+//
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+//
+// yaml_emitter_set_output().
+//
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ key_line_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/cli/internal/yaml/yamlprivateh.go b/cli/internal/yaml/yamlprivateh.go
new file mode 100644
index 0000000..dea1ba9
--- /dev/null
+++ b/cli/internal/yaml/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}