diff options
Diffstat (limited to 'cli/internal/context')
8 files changed, 688 insertions, 0 deletions
diff --git a/cli/internal/context/context.go b/cli/internal/context/context.go new file mode 100644 index 0000000..2376d2d --- /dev/null +++ b/cli/internal/context/context.go @@ -0,0 +1,480 @@ +package context + +import ( + "fmt" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/vercel/turbo/cli/internal/core" + "github.com/vercel/turbo/cli/internal/fs" + "github.com/vercel/turbo/cli/internal/lockfile" + "github.com/vercel/turbo/cli/internal/packagemanager" + "github.com/vercel/turbo/cli/internal/turbopath" + "github.com/vercel/turbo/cli/internal/util" + "github.com/vercel/turbo/cli/internal/workspace" + + "github.com/Masterminds/semver" + mapset "github.com/deckarep/golang-set" + "github.com/pyr-sh/dag" + "golang.org/x/sync/errgroup" +) + +// Warnings Error type for errors that don't prevent the creation of a functional Context +type Warnings struct { + warns *multierror.Error + mu sync.Mutex +} + +var _ error = (*Warnings)(nil) + +func (w *Warnings) Error() string { + return w.warns.Error() +} + +func (w *Warnings) errorOrNil() error { + if w.warns != nil { + return w + } + return nil +} + +func (w *Warnings) append(err error) { + w.mu.Lock() + defer w.mu.Unlock() + w.warns = multierror.Append(w.warns, err) +} + +// Context of the CLI +type Context struct { + // WorkspaceInfos contains the contents of package.json for every workspace + // TODO(gsoltis): should the RootPackageJSON be included in WorkspaceInfos? + WorkspaceInfos workspace.Catalog + + // WorkspaceNames is all the names of the workspaces + WorkspaceNames []string + + // WorkspaceGraph is a graph of workspace dependencies + // (based on package.json dependencies and devDependencies) + WorkspaceGraph dag.AcyclicGraph + + // RootNode is a sigil identifying the root workspace + RootNode string + + // Lockfile is a struct to read the lockfile based on the package manager + Lockfile lockfile.Lockfile + + // PackageManager is an abstraction for all the info a package manager + // can give us about the repo. + PackageManager *packagemanager.PackageManager + + // Used to arbitrate access to the graph. We parallelise most build operations + // and Go maps aren't natively threadsafe so this is needed. + mutex sync.Mutex +} + +// Splits "npm:^1.2.3" and "github:foo/bar.git" into a protocol part and a version part. +func parseDependencyProtocol(version string) (string, string) { + parts := strings.Split(version, ":") + if len(parts) == 1 { + return "", parts[0] + } + + return parts[0], strings.Join(parts[1:], ":") +} + +func isProtocolExternal(protocol string) bool { + // The npm protocol for yarn by default still uses the workspace package if the workspace + // version is in a compatible semver range. See https://github.com/yarnpkg/berry/discussions/4015 + // For now, we will just assume if the npm protocol is being used and the version matches + // its an internal dependency which matches the existing behavior before this additional + // logic was added. + + // TODO: extend this to support the `enableTransparentWorkspaces` yarn option + return protocol != "" && protocol != "npm" +} + +func isWorkspaceReference(packageVersion string, dependencyVersion string, cwd string, rootpath string) bool { + protocol, dependencyVersion := parseDependencyProtocol(dependencyVersion) + + if protocol == "workspace" { + // TODO: Since support at the moment is non-existent for workspaces that contain multiple + // versions of the same package name, just assume its a match and don't check the range + // for an exact match. + return true + } else if protocol == "file" || protocol == "link" { + abs, err := filepath.Abs(filepath.Join(cwd, dependencyVersion)) + if err != nil { + // Default to internal if we have the package but somehow cannot get the path + // TODO(gsoltis): log this? + return true + } + isWithinRepo, err := fs.DirContainsPath(rootpath, filepath.FromSlash(abs)) + if err != nil { + // Default to internal if we have the package but somehow cannot get the path + // TODO(gsoltis): log this? + return true + } + return isWithinRepo + } else if isProtocolExternal(protocol) { + // Other protocols are assumed to be external references ("github:", etc) + return false + } else if dependencyVersion == "*" { + return true + } + + // If we got this far, then we need to check the workspace package version to see it satisfies + // the dependencies range to determin whether or not its an internal or external dependency. + + constraint, constraintErr := semver.NewConstraint(dependencyVersion) + pkgVersion, packageVersionErr := semver.NewVersion(packageVersion) + if constraintErr != nil || packageVersionErr != nil { + // For backwards compatibility with existing behavior, if we can't parse the version then we + // treat the dependency as an internal package reference and swallow the error. + + // TODO: some package managers also support tags like "latest". Does extra handling need to be + // added for this corner-case + return true + } + + return constraint.Check(pkgVersion) +} + +// SinglePackageGraph constructs a Context instance from a single package. +func SinglePackageGraph(repoRoot turbopath.AbsoluteSystemPath, rootPackageJSON *fs.PackageJSON) (*Context, error) { + workspaceInfos := workspace.Catalog{ + PackageJSONs: map[string]*fs.PackageJSON{util.RootPkgName: rootPackageJSON}, + TurboConfigs: map[string]*fs.TurboJSON{}, + } + c := &Context{ + WorkspaceInfos: workspaceInfos, + RootNode: core.ROOT_NODE_NAME, + } + c.WorkspaceGraph.Connect(dag.BasicEdge(util.RootPkgName, core.ROOT_NODE_NAME)) + packageManager, err := packagemanager.GetPackageManager(repoRoot, rootPackageJSON) + if err != nil { + return nil, err + } + c.PackageManager = packageManager + return c, nil +} + +// BuildPackageGraph constructs a Context instance with information about the package dependency graph +func BuildPackageGraph(repoRoot turbopath.AbsoluteSystemPath, rootPackageJSON *fs.PackageJSON) (*Context, error) { + c := &Context{} + rootpath := repoRoot.ToStringDuringMigration() + c.WorkspaceInfos = workspace.Catalog{ + PackageJSONs: map[string]*fs.PackageJSON{}, + TurboConfigs: map[string]*fs.TurboJSON{}, + } + c.RootNode = core.ROOT_NODE_NAME + + var warnings Warnings + + packageManager, err := packagemanager.GetPackageManager(repoRoot, rootPackageJSON) + if err != nil { + return nil, err + } + c.PackageManager = packageManager + + if lockfile, err := c.PackageManager.ReadLockfile(repoRoot, rootPackageJSON); err != nil { + warnings.append(err) + } else { + c.Lockfile = lockfile + } + + if err := c.resolveWorkspaceRootDeps(rootPackageJSON, &warnings); err != nil { + // TODO(Gaspar) was this the intended return error? + return nil, fmt.Errorf("could not resolve workspaces: %w", err) + } + + // Get the workspaces from the package manager. + // workspaces are absolute paths + workspaces, err := c.PackageManager.GetWorkspaces(repoRoot) + + if err != nil { + return nil, fmt.Errorf("workspace configuration error: %w", err) + } + + // We will parse all package.json's simultaneously. We use a + // wait group because we cannot fully populate the graph (the next step) + // until all parsing is complete + parseJSONWaitGroup := &errgroup.Group{} + for _, workspace := range workspaces { + pkgJSONPath := fs.UnsafeToAbsoluteSystemPath(workspace) + parseJSONWaitGroup.Go(func() error { + return c.parsePackageJSON(repoRoot, pkgJSONPath) + }) + } + + if err := parseJSONWaitGroup.Wait(); err != nil { + return nil, err + } + populateGraphWaitGroup := &errgroup.Group{} + for _, pkg := range c.WorkspaceInfos.PackageJSONs { + pkg := pkg + populateGraphWaitGroup.Go(func() error { + return c.populateWorkspaceGraphForPackageJSON(pkg, rootpath, pkg.Name, &warnings) + }) + } + + if err := populateGraphWaitGroup.Wait(); err != nil { + return nil, err + } + // Resolve dependencies for the root package. We override the vertexName in the graph + // for the root package, since it can have an arbitrary name. We need it to have our + // RootPkgName so that we can identify it as the root later on. + err = c.populateWorkspaceGraphForPackageJSON(rootPackageJSON, rootpath, util.RootPkgName, &warnings) + if err != nil { + return nil, fmt.Errorf("failed to resolve dependencies for root package: %v", err) + } + c.WorkspaceInfos.PackageJSONs[util.RootPkgName] = rootPackageJSON + + return c, warnings.errorOrNil() +} + +func (c *Context) resolveWorkspaceRootDeps(rootPackageJSON *fs.PackageJSON, warnings *Warnings) error { + pkg := rootPackageJSON + pkg.UnresolvedExternalDeps = make(map[string]string) + for dep, version := range pkg.DevDependencies { + pkg.UnresolvedExternalDeps[dep] = version + } + for dep, version := range pkg.OptionalDependencies { + pkg.UnresolvedExternalDeps[dep] = version + } + for dep, version := range pkg.Dependencies { + pkg.UnresolvedExternalDeps[dep] = version + } + if c.Lockfile != nil { + depSet, err := lockfile.TransitiveClosure( + pkg.Dir.ToUnixPath(), + pkg.UnresolvedExternalDeps, + c.Lockfile, + ) + if err != nil { + warnings.append(err) + // Return early to skip using results of incomplete dep graph resolution + return nil + } + pkg.TransitiveDeps = make([]lockfile.Package, 0, depSet.Cardinality()) + for _, v := range depSet.ToSlice() { + dep := v.(lockfile.Package) + pkg.TransitiveDeps = append(pkg.TransitiveDeps, dep) + } + sort.Sort(lockfile.ByKey(pkg.TransitiveDeps)) + hashOfExternalDeps, err := fs.HashObject(pkg.TransitiveDeps) + if err != nil { + return err + } + pkg.ExternalDepsHash = hashOfExternalDeps + } else { + pkg.TransitiveDeps = []lockfile.Package{} + pkg.ExternalDepsHash = "" + } + + return nil +} + +// populateWorkspaceGraphForPackageJSON fills in the edges for the dependencies of the given package +// that are within the monorepo, as well as collecting and hashing the dependencies of the package +// that are not within the monorepo. The vertexName is used to override the package name in the graph. +// This can happen when adding the root package, which can have an arbitrary name. +func (c *Context) populateWorkspaceGraphForPackageJSON(pkg *fs.PackageJSON, rootpath string, vertexName string, warnings *Warnings) error { + c.mutex.Lock() + defer c.mutex.Unlock() + depMap := make(map[string]string) + internalDepsSet := make(dag.Set) + externalUnresolvedDepsSet := make(dag.Set) + pkg.UnresolvedExternalDeps = make(map[string]string) + + for dep, version := range pkg.DevDependencies { + depMap[dep] = version + } + + for dep, version := range pkg.OptionalDependencies { + depMap[dep] = version + } + + for dep, version := range pkg.Dependencies { + depMap[dep] = version + } + + // split out internal vs. external deps + for depName, depVersion := range depMap { + if item, ok := c.WorkspaceInfos.PackageJSONs[depName]; ok && isWorkspaceReference(item.Version, depVersion, pkg.Dir.ToStringDuringMigration(), rootpath) { + internalDepsSet.Add(depName) + c.WorkspaceGraph.Connect(dag.BasicEdge(vertexName, depName)) + } else { + externalUnresolvedDepsSet.Add(depName) + } + } + + for _, name := range externalUnresolvedDepsSet.List() { + name := name.(string) + if item, ok := pkg.DevDependencies[name]; ok { + pkg.UnresolvedExternalDeps[name] = item + } + + if item, ok := pkg.OptionalDependencies[name]; ok { + pkg.UnresolvedExternalDeps[name] = item + } + + if item, ok := pkg.Dependencies[name]; ok { + pkg.UnresolvedExternalDeps[name] = item + } + } + + externalDeps, err := lockfile.TransitiveClosure( + pkg.Dir.ToUnixPath(), + pkg.UnresolvedExternalDeps, + c.Lockfile, + ) + if err != nil { + warnings.append(err) + // reset external deps to original state + externalDeps = mapset.NewSet() + } + + // when there are no internal dependencies, we need to still add these leafs to the graph + if internalDepsSet.Len() == 0 { + c.WorkspaceGraph.Connect(dag.BasicEdge(pkg.Name, core.ROOT_NODE_NAME)) + } + pkg.TransitiveDeps = make([]lockfile.Package, 0, externalDeps.Cardinality()) + for _, dependency := range externalDeps.ToSlice() { + dependency := dependency.(lockfile.Package) + pkg.TransitiveDeps = append(pkg.TransitiveDeps, dependency) + } + pkg.InternalDeps = make([]string, 0, internalDepsSet.Len()) + for _, v := range internalDepsSet.List() { + pkg.InternalDeps = append(pkg.InternalDeps, fmt.Sprintf("%v", v)) + } + sort.Strings(pkg.InternalDeps) + sort.Sort(lockfile.ByKey(pkg.TransitiveDeps)) + hashOfExternalDeps, err := fs.HashObject(pkg.TransitiveDeps) + if err != nil { + return err + } + pkg.ExternalDepsHash = hashOfExternalDeps + return nil +} + +func (c *Context) parsePackageJSON(repoRoot turbopath.AbsoluteSystemPath, pkgJSONPath turbopath.AbsoluteSystemPath) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if pkgJSONPath.FileExists() { + pkg, err := fs.ReadPackageJSON(pkgJSONPath) + if err != nil { + return fmt.Errorf("parsing %s: %w", pkgJSONPath, err) + } + + relativePkgJSONPath, err := repoRoot.PathTo(pkgJSONPath) + if err != nil { + return err + } + c.WorkspaceGraph.Add(pkg.Name) + pkg.PackageJSONPath = turbopath.AnchoredSystemPathFromUpstream(relativePkgJSONPath) + pkg.Dir = turbopath.AnchoredSystemPathFromUpstream(filepath.Dir(relativePkgJSONPath)) + if c.WorkspaceInfos.PackageJSONs[pkg.Name] != nil { + existing := c.WorkspaceInfos.PackageJSONs[pkg.Name] + return fmt.Errorf("Failed to add workspace \"%s\" from %s, it already exists at %s", pkg.Name, pkg.Dir, existing.Dir) + } + c.WorkspaceInfos.PackageJSONs[pkg.Name] = pkg + c.WorkspaceNames = append(c.WorkspaceNames, pkg.Name) + } + return nil +} + +// InternalDependencies finds all dependencies required by the slice of starting +// packages, as well as the starting packages themselves. +func (c *Context) InternalDependencies(start []string) ([]string, error) { + vertices := make(dag.Set) + for _, v := range start { + vertices.Add(v) + } + s := make(dag.Set) + memoFunc := func(v dag.Vertex, d int) error { + s.Add(v) + return nil + } + + if err := c.WorkspaceGraph.DepthFirstWalk(vertices, memoFunc); err != nil { + return nil, err + } + + // Use for loop so we can coerce to string + // .List() returns a list of interface{} types, but + // we know they are strings. + targets := make([]string, 0, s.Len()) + for _, dep := range s.List() { + targets = append(targets, dep.(string)) + } + sort.Strings(targets) + + return targets, nil +} + +// ChangedPackages returns a list of changed packages based on the contents of a previous lockfile +// This assumes that none of the package.json in the workspace change, it is +// the responsibility of the caller to verify this. +func (c *Context) ChangedPackages(previousLockfile lockfile.Lockfile) ([]string, error) { + if lockfile.IsNil(previousLockfile) || lockfile.IsNil(c.Lockfile) { + return nil, fmt.Errorf("Cannot detect changed packages without previous and current lockfile") + } + + didPackageChange := func(pkgName string, pkg *fs.PackageJSON) bool { + previousDeps, err := lockfile.TransitiveClosure( + pkg.Dir.ToUnixPath(), + pkg.UnresolvedExternalDeps, + previousLockfile, + ) + if err != nil || previousDeps.Cardinality() != len(pkg.TransitiveDeps) { + return true + } + + prevExternalDeps := make([]lockfile.Package, 0, previousDeps.Cardinality()) + for _, d := range previousDeps.ToSlice() { + prevExternalDeps = append(prevExternalDeps, d.(lockfile.Package)) + } + sort.Sort(lockfile.ByKey(prevExternalDeps)) + + for i := range prevExternalDeps { + if prevExternalDeps[i] != pkg.TransitiveDeps[i] { + return true + } + } + return false + } + + changedPkgs := make([]string, 0, len(c.WorkspaceInfos.PackageJSONs)) + + // check if prev and current have "global" changes e.g. lockfile bump + globalChange := c.Lockfile.GlobalChange(previousLockfile) + + for pkgName, pkg := range c.WorkspaceInfos.PackageJSONs { + if globalChange { + break + } + if didPackageChange(pkgName, pkg) { + if pkgName == util.RootPkgName { + globalChange = true + } else { + changedPkgs = append(changedPkgs, pkgName) + } + } + } + + if globalChange { + changedPkgs = make([]string, 0, len(c.WorkspaceInfos.PackageJSONs)) + for pkgName := range c.WorkspaceInfos.PackageJSONs { + changedPkgs = append(changedPkgs, pkgName) + } + sort.Strings(changedPkgs) + return changedPkgs, nil + } + + sort.Strings(changedPkgs) + return changedPkgs, nil +} diff --git a/cli/internal/context/context_test.go b/cli/internal/context/context_test.go new file mode 100644 index 0000000..692c0a8 --- /dev/null +++ b/cli/internal/context/context_test.go @@ -0,0 +1,162 @@ +package context + +import ( + "os" + "path/filepath" + "regexp" + "testing" + + testifyAssert "github.com/stretchr/testify/assert" + "github.com/vercel/turbo/cli/internal/fs" + "github.com/vercel/turbo/cli/internal/turbopath" +) + +func Test_isWorkspaceReference(t *testing.T) { + rootpath, err := filepath.Abs(filepath.FromSlash("/some/repo")) + if err != nil { + t.Fatalf("failed to create absolute root path %v", err) + } + pkgDir, err := filepath.Abs(filepath.FromSlash("/some/repo/packages/libA")) + if err != nil { + t.Fatalf("failed to create absolute pkgDir %v", err) + } + tests := []struct { + name string + packageVersion string + dependencyVersion string + want bool + }{ + { + name: "handles exact match", + packageVersion: "1.2.3", + dependencyVersion: "1.2.3", + want: true, + }, + { + name: "handles semver range satisfied", + packageVersion: "1.2.3", + dependencyVersion: "^1.0.0", + want: true, + }, + { + name: "handles semver range not-satisfied", + packageVersion: "2.3.4", + dependencyVersion: "^1.0.0", + want: false, + }, + { + name: "handles workspace protocol with version", + packageVersion: "1.2.3", + dependencyVersion: "workspace:1.2.3", + want: true, + }, + { + name: "handles workspace protocol with relative path", + packageVersion: "1.2.3", + dependencyVersion: "workspace:../other-package/", + want: true, + }, + { + name: "handles npm protocol with satisfied semver range", + packageVersion: "1.2.3", + dependencyVersion: "npm:^1.2.3", + want: true, // default in yarn is to use the workspace version unless `enableTransparentWorkspaces: true`. This isn't currently being checked. + }, + { + name: "handles npm protocol with non-satisfied semver range", + packageVersion: "2.3.4", + dependencyVersion: "npm:^1.2.3", + want: false, + }, + { + name: "handles pre-release versions", + packageVersion: "1.2.3", + dependencyVersion: "1.2.2-alpha-1234abcd.0", + want: false, + }, + { + name: "handles non-semver package version", + packageVersion: "sometag", + dependencyVersion: "1.2.3", + want: true, // for backwards compatability with the code before versions were verified + }, + { + name: "handles non-semver package version", + packageVersion: "1.2.3", + dependencyVersion: "sometag", + want: true, // for backwards compatability with the code before versions were verified + }, + { + name: "handles file:... inside repo", + packageVersion: "1.2.3", + dependencyVersion: "file:../libB", + want: true, // this is a sibling package + }, + { + name: "handles file:... outside repo", + packageVersion: "1.2.3", + dependencyVersion: "file:../../../otherproject", + want: false, // this is not within the repo root + }, + { + name: "handles link:... inside repo", + packageVersion: "1.2.3", + dependencyVersion: "link:../libB", + want: true, // this is a sibling package + }, + { + name: "handles link:... outside repo", + packageVersion: "1.2.3", + dependencyVersion: "link:../../../otherproject", + want: false, // this is not within the repo root + }, + { + name: "handles development versions", + packageVersion: "0.0.0-development", + dependencyVersion: "*", + want: true, // "*" should always match + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isWorkspaceReference(tt.packageVersion, tt.dependencyVersion, pkgDir, rootpath) + if got != tt.want { + t.Errorf("isWorkspaceReference(%v, %v, %v, %v) got = %v, want %v", tt.packageVersion, tt.dependencyVersion, pkgDir, rootpath, got, tt.want) + } + }) + } +} + +func TestBuildPackageGraph_DuplicateNames(t *testing.T) { + path := getTestDir(t, "dupe-workspace-names") + pkgJSON := &fs.PackageJSON{ + Name: "dupe-workspace-names", + PackageManager: "pnpm@7.15.0", + } + + _, actualErr := BuildPackageGraph(path, pkgJSON) + + // Not asserting the full error message, because it includes a path with slashes and backslashes + // getting the regex incantation to check that is not worth it. + // We have to use regex because the actual error may be different depending on which workspace was + // added first and which one was second, causing the error. + testifyAssert.Regexp(t, regexp.MustCompile("^Failed to add workspace \"same-name\".+$"), actualErr) +} + +// This is duplicated from fs.turbo_json_test.go. +// I wasn't able to pull it into a helper file/package because +// it requires the `fs` package and it would cause cyclical dependencies +// when used in turbo_json_test.go and would require more changes to fix that. +func getTestDir(t *testing.T, testName string) turbopath.AbsoluteSystemPath { + defaultCwd, err := os.Getwd() + if err != nil { + t.Errorf("failed to get cwd: %v", err) + } + cwd, err := fs.CheckedToAbsoluteSystemPath(defaultCwd) + if err != nil { + t.Fatalf("cwd is not an absolute directory %v: %v", defaultCwd, err) + } + + return cwd.UntypedJoin("testdata", testName) +} diff --git a/cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json b/cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json new file mode 100644 index 0000000..94301a3 --- /dev/null +++ b/cli/internal/context/testdata/dupe-workspace-names/apps/a/package.json @@ -0,0 +1,6 @@ +{ + "name": "same-name", + "dependencies": { + "ui": "workspace:*" + } +} diff --git a/cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json b/cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json new file mode 100644 index 0000000..94301a3 --- /dev/null +++ b/cli/internal/context/testdata/dupe-workspace-names/apps/b/package.json @@ -0,0 +1,6 @@ +{ + "name": "same-name", + "dependencies": { + "ui": "workspace:*" + } +} diff --git a/cli/internal/context/testdata/dupe-workspace-names/package.json b/cli/internal/context/testdata/dupe-workspace-names/package.json new file mode 100644 index 0000000..3bf7403 --- /dev/null +++ b/cli/internal/context/testdata/dupe-workspace-names/package.json @@ -0,0 +1,7 @@ +{ + "name": "dupe-workspace-names", + "workspaces": [ + "apps/*" + ], + "packageManager": "pnpm@7.15.0" +} diff --git a/cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json b/cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json new file mode 100644 index 0000000..1cd75b5 --- /dev/null +++ b/cli/internal/context/testdata/dupe-workspace-names/packages/ui/package.json @@ -0,0 +1,3 @@ +{ + "name": "ui" +} diff --git a/cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml b/cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml new file mode 100644 index 0000000..0909cde --- /dev/null +++ b/cli/internal/context/testdata/dupe-workspace-names/pnpm-lock.yaml @@ -0,0 +1,21 @@ +lockfileVersion: 5.4 + +importers: + + .: + specifiers: {} + + apps/a: + specifiers: + ui: workspace:* + dependencies: + ui: link:../../packages/ui + + apps/b: + specifiers: + ui: workspace:* + dependencies: + ui: link:../../packages/ui + + packages/ui: + specifiers: {} diff --git a/cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml b/cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml new file mode 100644 index 0000000..3ff5faa --- /dev/null +++ b/cli/internal/context/testdata/dupe-workspace-names/pnpm-workspace.yaml @@ -0,0 +1,3 @@ +packages: + - "apps/*" + - "packages/*" |
