diff options
| author | 2023-04-28 01:36:44 +0800 | |
|---|---|---|
| committer | 2023-04-28 01:36:44 +0800 | |
| commit | dd84b9d64fb98746a230cd24233ff50a562c39c9 (patch) | |
| tree | b583261ef00b3afe72ec4d6dacb31e57779a6faf /cli/internal/runsummary | |
| parent | 0b46fcd72ac34382387b2bcf9095233efbcc52f4 (diff) | |
| download | HydroRoll-dd84b9d64fb98746a230cd24233ff50a562c39c9.tar.gz HydroRoll-dd84b9d64fb98746a230cd24233ff50a562c39c9.zip | |
Diffstat (limited to 'cli/internal/runsummary')
| -rw-r--r-- | cli/internal/runsummary/execution_summary.go | 282 | ||||
| -rw-r--r-- | cli/internal/runsummary/format_execution_summary.go | 70 | ||||
| -rw-r--r-- | cli/internal/runsummary/format_json.go | 66 | ||||
| -rw-r--r-- | cli/internal/runsummary/format_text.go | 100 | ||||
| -rw-r--r-- | cli/internal/runsummary/globalhash_summary.go | 38 | ||||
| -rw-r--r-- | cli/internal/runsummary/run_summary.go | 320 | ||||
| -rw-r--r-- | cli/internal/runsummary/spaces.go | 96 | ||||
| -rw-r--r-- | cli/internal/runsummary/task_summary.go | 117 |
8 files changed, 1089 insertions, 0 deletions
diff --git a/cli/internal/runsummary/execution_summary.go b/cli/internal/runsummary/execution_summary.go new file mode 100644 index 0000000..fabb690 --- /dev/null +++ b/cli/internal/runsummary/execution_summary.go @@ -0,0 +1,282 @@ +package runsummary + +import ( + "encoding/json" + "fmt" + "os" + "sync" + "time" + + "github.com/vercel/turbo/cli/internal/chrometracing" + "github.com/vercel/turbo/cli/internal/fs" + "github.com/vercel/turbo/cli/internal/turbopath" + + "github.com/mitchellh/cli" +) + +// executionEvent represents a single event in the build process, i.e. a target starting or finishing +// building, or reaching some milestone within those steps. +type executionEvent struct { + // Timestamp of this event + Time time.Time + // Duration of this event + Duration time.Duration + // Target which has just changed + Label string + // Its current status + Status executionEventName + // Error, only populated for failure statuses + Err string + + exitCode *int +} + +// executionEventName represents the status of a target when we log a build result. +type executionEventName int + +// The collection of expected build result statuses. +const ( + targetInitialized executionEventName = iota + TargetBuilding + TargetBuildStopped + TargetExecuted + TargetBuilt + TargetCached + TargetBuildFailed +) + +func (en executionEventName) toString() string { + switch en { + case targetInitialized: + return "initialized" + case TargetBuilding: + return "building" + case TargetBuildStopped: + return "buildStopped" + case TargetExecuted: + return "executed" + case TargetBuilt: + return "built" + case TargetCached: + return "cached" + case TargetBuildFailed: + return "buildFailed" + } + + return "" +} + +// TaskExecutionSummary contains data about the state of a single task in a turbo run. +// Some fields are updated over time as the task prepares to execute and finishes execution. +type TaskExecutionSummary struct { + startAt time.Time // set once + status executionEventName // current status, updated during execution + err string // only populated for failure statuses + Duration time.Duration // updated during the task execution + exitCode *int // pointer so we can distinguish between 0 and unknown. +} + +func (ts *TaskExecutionSummary) endTime() time.Time { + return ts.startAt.Add(ts.Duration) +} + +// MarshalJSON munges the TaskExecutionSummary into a format we want +// We'll use an anonmyous, private struct for this, so it's not confusingly duplicated +func (ts *TaskExecutionSummary) MarshalJSON() ([]byte, error) { + serializable := struct { + Start int64 `json:"startTime"` + End int64 `json:"endTime"` + Err string `json:"error,omitempty"` + ExitCode *int `json:"exitCode"` + }{ + Start: ts.startAt.UnixMilli(), + End: ts.endTime().UnixMilli(), + Err: ts.err, + ExitCode: ts.exitCode, + } + + return json.Marshal(&serializable) +} + +// ExitCode access exit code nil means no exit code was received +func (ts *TaskExecutionSummary) ExitCode() *int { + var exitCode int + if ts.exitCode == nil { + return nil + } + exitCode = *ts.exitCode + return &exitCode +} + +// executionSummary is the state of the entire `turbo run`. Individual task state in `Tasks` field +type executionSummary struct { + // mu guards reads/writes to the `state` field + mu sync.Mutex + tasks map[string]*TaskExecutionSummary // key is a taskID + profileFilename string + + // These get serialized to JSON + command string // a synthesized turbo command to produce this invocation + repoPath turbopath.RelativeSystemPath // the (possibly empty) path from the turborepo root to where the command was run + success int // number of tasks that exited successfully (does not include cache hits) + failure int // number of tasks that exited with failure + cached int // number of tasks that had a cache hit + attempted int // number of tasks that started + startedAt time.Time + endedAt time.Time + exitCode int +} + +// MarshalJSON munges the executionSummary into a format we want +// We'll use an anonmyous, private struct for this, so it's not confusingly duplicated. +func (es *executionSummary) MarshalJSON() ([]byte, error) { + serializable := struct { + Command string `json:"command"` + RepoPath string `json:"repoPath"` + Success int `json:"success"` + Failure int `json:"failed"` + Cached int `json:"cached"` + Attempted int `json:"attempted"` + StartTime int64 `json:"startTime"` + EndTime int64 `json:"endTime"` + ExitCode int `json:"exitCode"` + }{ + Command: es.command, + RepoPath: es.repoPath.ToString(), + StartTime: es.startedAt.UnixMilli(), + EndTime: es.endedAt.UnixMilli(), + Success: es.success, + Failure: es.failure, + Cached: es.cached, + Attempted: es.attempted, + ExitCode: es.exitCode, + } + + return json.Marshal(&serializable) +} + +// newExecutionSummary creates a executionSummary instance to track events in a `turbo run`.` +func newExecutionSummary(command string, repoPath turbopath.RelativeSystemPath, start time.Time, tracingProfile string) *executionSummary { + if tracingProfile != "" { + chrometracing.EnableTracing() + } + + return &executionSummary{ + command: command, + repoPath: repoPath, + success: 0, + failure: 0, + cached: 0, + attempted: 0, + tasks: make(map[string]*TaskExecutionSummary), + startedAt: start, + profileFilename: tracingProfile, + } +} + +// Run starts the Execution of a single task. It returns a function that can +// be used to update the state of a given taskID with the executionEventName enum +func (es *executionSummary) run(taskID string) (func(outcome executionEventName, err error, exitCode *int), *TaskExecutionSummary) { + start := time.Now() + taskExecutionSummary := es.add(&executionEvent{ + Time: start, + Label: taskID, + Status: targetInitialized, + }) + + tracer := chrometracing.Event(taskID) + + // This function can be called with an enum and an optional error to update + // the state of a given taskID. + tracerFn := func(outcome executionEventName, err error, exitCode *int) { + defer tracer.Done() + now := time.Now() + result := &executionEvent{ + Time: now, + Duration: now.Sub(start), + Label: taskID, + Status: outcome, + // We'll assign this here regardless of whether it is nil, but we'll check for nil + // when we assign it to the taskExecutionSummary. + exitCode: exitCode, + } + + if err != nil { + result.Err = err.Error() + } + + // Ignore the return value here + es.add(result) + } + + return tracerFn, taskExecutionSummary +} + +func (es *executionSummary) add(event *executionEvent) *TaskExecutionSummary { + es.mu.Lock() + defer es.mu.Unlock() + + var taskExecSummary *TaskExecutionSummary + if ts, ok := es.tasks[event.Label]; ok { + // If we already know about this task, we'll update it with the new event + taskExecSummary = ts + } else { + // If we don't know about it yet, init and add it into the parent struct + // (event.Status should always be `targetBuilding` here.) + taskExecSummary = &TaskExecutionSummary{startAt: event.Time} + es.tasks[event.Label] = taskExecSummary + } + + // Update the Status, Duration, and Err fields + taskExecSummary.status = event.Status + taskExecSummary.err = event.Err + taskExecSummary.Duration = event.Duration + + if event.exitCode != nil { + taskExecSummary.exitCode = event.exitCode + } + + switch { + case event.Status == TargetBuilding: + es.attempted++ + case event.Status == TargetBuildFailed: + es.failure++ + case event.Status == TargetCached: + es.cached++ + case event.Status == TargetBuilt: + es.success++ + } + + return es.tasks[event.Label] +} + +// writeChromeTracing writes to a profile name if the `--profile` flag was passed to turbo run +func writeChrometracing(filename string, terminal cli.Ui) error { + outputPath := chrometracing.Path() + if outputPath == "" { + // tracing wasn't enabled + return nil + } + + name := fmt.Sprintf("turbo-%s.trace", time.Now().Format(time.RFC3339)) + if filename != "" { + name = filename + } + if err := chrometracing.Close(); err != nil { + terminal.Warn(fmt.Sprintf("Failed to flush tracing data: %v", err)) + } + cwdRaw, err := os.Getwd() + if err != nil { + return err + } + root, err := fs.GetCwd(cwdRaw) + if err != nil { + return err + } + // chrometracing.Path() is absolute by default, but can still be relative if overriden via $CHROMETRACING_DIR + // so we have to account for that before converting to turbopath.AbsoluteSystemPath + if err := fs.CopyFile(&fs.LstatCachedFile{Path: fs.ResolveUnknownPath(root, outputPath)}, name); err != nil { + return err + } + return nil +} diff --git a/cli/internal/runsummary/format_execution_summary.go b/cli/internal/runsummary/format_execution_summary.go new file mode 100644 index 0000000..37092be --- /dev/null +++ b/cli/internal/runsummary/format_execution_summary.go @@ -0,0 +1,70 @@ +package runsummary + +import ( + "os" + "time" + + "github.com/fatih/color" + internalUI "github.com/vercel/turbo/cli/internal/ui" + "github.com/vercel/turbo/cli/internal/util" +) + +func (rsm *Meta) printExecutionSummary() { + maybeFullTurbo := "" + summary := rsm.RunSummary + ui := rsm.ui + + attempted := summary.ExecutionSummary.attempted + successful := summary.ExecutionSummary.cached + summary.ExecutionSummary.success + cached := summary.ExecutionSummary.cached + // TODO: can we use a method on ExecutionSummary here? + duration := time.Since(summary.ExecutionSummary.startedAt).Truncate(time.Millisecond) + + if cached == attempted && attempted > 0 { + terminalProgram := os.Getenv("TERM_PROGRAM") + // On the macOS Terminal, the rainbow colors show up as a magenta background + // with a gray background on a single letter. Instead, we print in bold magenta + if terminalProgram == "Apple_Terminal" { + fallbackTurboColor := color.New(color.FgHiMagenta, color.Bold).SprintFunc() + maybeFullTurbo = fallbackTurboColor(">>> FULL TURBO") + } else { + maybeFullTurbo = internalUI.Rainbow(">>> FULL TURBO") + } + } + + if attempted == 0 { + ui.Output("") // Clear the line + ui.Warn("No tasks were executed as part of this run.") + } + + ui.Output("") // Clear the line + spacer := " " // 4 chars + + var lines []string + + // The only difference between these two branches is that when there is a run summary + // we print the path to that file and we adjust the whitespace in the printed text so it aligns. + // We could just always align to account for the summary line, but that would require a whole + // bunch of test output assertions to change. + if rsm.getPath().FileExists() { + lines = []string{ + util.Sprintf("${BOLD} Tasks:${BOLD_GREEN}%s%v successful${RESET}${GRAY}, %v total${RESET}", spacer, successful, attempted), + util.Sprintf("${BOLD} Cached:%s%v cached${RESET}${GRAY}, %v total${RESET}", spacer, cached, attempted), + util.Sprintf("${BOLD} Time:%s%v${RESET} %v${RESET}", spacer, duration, maybeFullTurbo), + util.Sprintf("${BOLD}Summary:%s%s${RESET}", spacer, rsm.getPath()), + } + } else { + lines = []string{ + util.Sprintf("${BOLD} Tasks:${BOLD_GREEN}%s%v successful${RESET}${GRAY}, %v total${RESET}", spacer, successful, attempted), + util.Sprintf("${BOLD}Cached:%s%v cached${RESET}${GRAY}, %v total${RESET}", spacer, cached, attempted), + util.Sprintf("${BOLD} Time:%s%v${RESET} %v${RESET}", spacer, duration, maybeFullTurbo), + } + } + + // Print the real thing + for _, line := range lines { + ui.Output(line) + } + + ui.Output("") +} diff --git a/cli/internal/runsummary/format_json.go b/cli/internal/runsummary/format_json.go new file mode 100644 index 0000000..76a0a40 --- /dev/null +++ b/cli/internal/runsummary/format_json.go @@ -0,0 +1,66 @@ +package runsummary + +import ( + "encoding/json" + + "github.com/pkg/errors" + "github.com/segmentio/ksuid" + "github.com/vercel/turbo/cli/internal/util" +) + +// FormatJSON returns a json string representing a RunSummary +func (rsm *Meta) FormatJSON() ([]byte, error) { + rsm.normalize() // normalize data + + var bytes []byte + var err error + + if rsm.singlePackage { + bytes, err = json.MarshalIndent(nonMonorepoRunSummary(*rsm.RunSummary), "", " ") + } else { + bytes, err = json.MarshalIndent(rsm.RunSummary, "", " ") + } + + if err != nil { + return nil, errors.Wrap(err, "failed to render JSON") + } + return bytes, nil +} + +func (rsm *Meta) normalize() { + for _, t := range rsm.RunSummary.Tasks { + t.EnvVars.Global = rsm.RunSummary.GlobalHashSummary.envVars + t.EnvVars.GlobalPassthrough = rsm.RunSummary.GlobalHashSummary.passthroughEnvVars + } + + // Remove execution summary for dry runs + if rsm.runType == runTypeDryJSON { + rsm.RunSummary.ExecutionSummary = nil + } + + // For single packages, we don't need the Packages + // and each task summary needs some cleaning. + if rsm.singlePackage { + rsm.RunSummary.Packages = []string{} + + for _, task := range rsm.RunSummary.Tasks { + task.cleanForSinglePackage() + } + } +} + +// nonMonorepoRunSummary is an exact copy of RunSummary, but the JSON tags are structured +// for rendering a single-package run of turbo. Notably, we want to always omit packages +// since there is no concept of packages in a single-workspace repo. +// This struct exists solely for the purpose of serializing to JSON and should not be +// used anywhere else. +type nonMonorepoRunSummary struct { + ID ksuid.KSUID `json:"id"` + Version string `json:"version"` + TurboVersion string `json:"turboVersion"` + GlobalHashSummary *GlobalHashSummary `json:"globalCacheInputs"` + Packages []string `json:"-"` + EnvMode util.EnvMode `json:"envMode"` + ExecutionSummary *executionSummary `json:"execution,omitempty"` + Tasks []*TaskSummary `json:"tasks"` +} diff --git a/cli/internal/runsummary/format_text.go b/cli/internal/runsummary/format_text.go new file mode 100644 index 0000000..28b1638 --- /dev/null +++ b/cli/internal/runsummary/format_text.go @@ -0,0 +1,100 @@ +package runsummary + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "text/tabwriter" + + "github.com/vercel/turbo/cli/internal/util" + "github.com/vercel/turbo/cli/internal/workspace" +) + +// FormatAndPrintText prints a Run Summary to the Terminal UI +func (rsm Meta) FormatAndPrintText(workspaceInfos workspace.Catalog) error { + ui := rsm.ui + summary := rsm.RunSummary + + rsm.normalize() // normalize data + + if !rsm.singlePackage { + ui.Output("") + ui.Info(util.Sprintf("${CYAN}${BOLD}Packages in Scope${RESET}")) + p := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(p, "Name\tPath\t") + for _, pkg := range summary.Packages { + fmt.Fprintf(p, "%s\t%s\t\n", pkg, workspaceInfos.PackageJSONs[pkg].Dir) + } + if err := p.Flush(); err != nil { + return err + } + } + + fileCount := 0 + for range summary.GlobalHashSummary.GlobalFileHashMap { + fileCount = fileCount + 1 + } + w1 := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + ui.Output("") + ui.Info(util.Sprintf("${CYAN}${BOLD}Global Hash Inputs${RESET}")) + fmt.Fprintln(w1, util.Sprintf(" ${GREY}Global Files\t=\t%d${RESET}", fileCount)) + fmt.Fprintln(w1, util.Sprintf(" ${GREY}External Dependencies Hash\t=\t%s${RESET}", summary.GlobalHashSummary.RootExternalDepsHash)) + fmt.Fprintln(w1, util.Sprintf(" ${GREY}Global Cache Key\t=\t%s${RESET}", summary.GlobalHashSummary.GlobalCacheKey)) + if bytes, err := json.Marshal(summary.GlobalHashSummary.Pipeline); err == nil { + fmt.Fprintln(w1, util.Sprintf(" ${GREY}Root pipeline\t=\t%s${RESET}", bytes)) + } + if err := w1.Flush(); err != nil { + return err + } + + ui.Output("") + ui.Info(util.Sprintf("${CYAN}${BOLD}Tasks to Run${RESET}")) + + for _, task := range summary.Tasks { + taskName := task.TaskID + + if rsm.singlePackage { + taskName = task.Task + } + + ui.Info(util.Sprintf("${BOLD}%s${RESET}", taskName)) + w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Task\t=\t%s\t${RESET}", task.Task)) + + if !rsm.singlePackage { + fmt.Fprintln(w, util.Sprintf(" ${GREY}Package\t=\t%s\t${RESET}", task.Package)) + } + fmt.Fprintln(w, util.Sprintf(" ${GREY}Hash\t=\t%s\t${RESET}", task.Hash)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Cached (Local)\t=\t%s\t${RESET}", strconv.FormatBool(task.CacheSummary.Local))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Cached (Remote)\t=\t%s\t${RESET}", strconv.FormatBool(task.CacheSummary.Remote))) + + if !rsm.singlePackage { + fmt.Fprintln(w, util.Sprintf(" ${GREY}Directory\t=\t%s\t${RESET}", task.Dir)) + } + + fmt.Fprintln(w, util.Sprintf(" ${GREY}Command\t=\t%s\t${RESET}", task.Command)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Outputs\t=\t%s\t${RESET}", strings.Join(task.Outputs, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Log File\t=\t%s\t${RESET}", task.LogFile)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependencies\t=\t%s\t${RESET}", strings.Join(task.Dependencies, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependendents\t=\t%s\t${RESET}", strings.Join(task.Dependents, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Inputs Files Considered\t=\t%d\t${RESET}", len(task.ExpandedInputs))) + + fmt.Fprintln(w, util.Sprintf(" ${GREY}Configured Environment Variables\t=\t%s\t${RESET}", strings.Join(task.EnvVars.Configured, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Inferred Environment Variables\t=\t%s\t${RESET}", strings.Join(task.EnvVars.Inferred, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Global Environment Variables\t=\t%s\t${RESET}", strings.Join(task.EnvVars.Global, ", "))) + + bytes, err := json.Marshal(task.ResolvedTaskDefinition) + // If there's an error, we can silently ignore it, we don't need to block the entire print. + if err == nil { + fmt.Fprintln(w, util.Sprintf(" ${GREY}ResolvedTaskDefinition\t=\t%s\t${RESET}", string(bytes))) + } + + fmt.Fprintln(w, util.Sprintf(" ${GREY}Framework\t=\t%s\t${RESET}", task.Framework)) + if err := w.Flush(); err != nil { + return err + } + } + return nil +} diff --git a/cli/internal/runsummary/globalhash_summary.go b/cli/internal/runsummary/globalhash_summary.go new file mode 100644 index 0000000..e24976d5 --- /dev/null +++ b/cli/internal/runsummary/globalhash_summary.go @@ -0,0 +1,38 @@ +package runsummary + +import ( + "github.com/vercel/turbo/cli/internal/env" + "github.com/vercel/turbo/cli/internal/fs" + "github.com/vercel/turbo/cli/internal/turbopath" +) + +// GlobalHashSummary contains the pieces of data that impacted the global hash (then then impacted the task hash) +type GlobalHashSummary struct { + GlobalCacheKey string `json:"rootKey"` + GlobalFileHashMap map[turbopath.AnchoredUnixPath]string `json:"files"` + RootExternalDepsHash string `json:"hashOfExternalDependencies"` + Pipeline fs.PristinePipeline `json:"rootPipeline"` + + // This is a private field because and not in JSON, because we'll add it to each task + envVars env.EnvironmentVariablePairs + passthroughEnvVars env.EnvironmentVariablePairs +} + +// NewGlobalHashSummary creates a GlobalHashSummary struct from a set of fields. +func NewGlobalHashSummary( + fileHashMap map[turbopath.AnchoredUnixPath]string, + rootExternalDepsHash string, + envVars env.DetailedMap, + passthroughEnvVars env.EnvironmentVariableMap, + globalCacheKey string, + pipeline fs.PristinePipeline, +) *GlobalHashSummary { + return &GlobalHashSummary{ + envVars: envVars.All.ToSecretHashable(), + passthroughEnvVars: passthroughEnvVars.ToSecretHashable(), + GlobalFileHashMap: fileHashMap, + RootExternalDepsHash: rootExternalDepsHash, + GlobalCacheKey: globalCacheKey, + Pipeline: pipeline, + } +} diff --git a/cli/internal/runsummary/run_summary.go b/cli/internal/runsummary/run_summary.go new file mode 100644 index 0000000..a297114 --- /dev/null +++ b/cli/internal/runsummary/run_summary.go @@ -0,0 +1,320 @@ +// Package runsummary implements structs that report on a `turbo run` and `turbo run --dry` +package runsummary + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "sync" + "time" + + "github.com/mitchellh/cli" + "github.com/segmentio/ksuid" + "github.com/vercel/turbo/cli/internal/client" + "github.com/vercel/turbo/cli/internal/spinner" + "github.com/vercel/turbo/cli/internal/turbopath" + "github.com/vercel/turbo/cli/internal/util" + "github.com/vercel/turbo/cli/internal/workspace" +) + +// MissingTaskLabel is printed when a package is missing a definition for a task that is supposed to run +// E.g. if `turbo run build --dry` is run, and package-a doesn't define a `build` script in package.json, +// the RunSummary will print this, instead of the script (e.g. `next build`). +const MissingTaskLabel = "<NONEXISTENT>" + +// MissingFrameworkLabel is a string to identify when a workspace doesn't detect a framework +const MissingFrameworkLabel = "<NO FRAMEWORK DETECTED>" + +const runSummarySchemaVersion = "0" +const runsEndpoint = "/v0/spaces/%s/runs" +const runsPatchEndpoint = "/v0/spaces/%s/runs/%s" +const tasksEndpoint = "/v0/spaces/%s/runs/%s/tasks" + +type runType int + +const ( + runTypeReal runType = iota + runTypeDryText + runTypeDryJSON +) + +// Meta is a wrapper around the serializable RunSummary, with some extra information +// about the Run and references to other things that we need. +type Meta struct { + RunSummary *RunSummary + ui cli.Ui + repoRoot turbopath.AbsoluteSystemPath // used to write run summary + repoPath turbopath.RelativeSystemPath + singlePackage bool + shouldSave bool + apiClient *client.APIClient + spaceID string + runType runType + synthesizedCommand string +} + +// RunSummary contains a summary of what happens in the `turbo run` command and why. +type RunSummary struct { + ID ksuid.KSUID `json:"id"` + Version string `json:"version"` + TurboVersion string `json:"turboVersion"` + GlobalHashSummary *GlobalHashSummary `json:"globalCacheInputs"` + Packages []string `json:"packages"` + EnvMode util.EnvMode `json:"envMode"` + ExecutionSummary *executionSummary `json:"execution,omitempty"` + Tasks []*TaskSummary `json:"tasks"` +} + +// NewRunSummary returns a RunSummary instance +func NewRunSummary( + startAt time.Time, + ui cli.Ui, + repoRoot turbopath.AbsoluteSystemPath, + repoPath turbopath.RelativeSystemPath, + turboVersion string, + apiClient *client.APIClient, + runOpts util.RunOpts, + packages []string, + globalEnvMode util.EnvMode, + globalHashSummary *GlobalHashSummary, + synthesizedCommand string, +) Meta { + singlePackage := runOpts.SinglePackage + profile := runOpts.Profile + shouldSave := runOpts.Summarize + spaceID := runOpts.ExperimentalSpaceID + + runType := runTypeReal + if runOpts.DryRun { + runType = runTypeDryText + if runOpts.DryRunJSON { + runType = runTypeDryJSON + } + } + + executionSummary := newExecutionSummary(synthesizedCommand, repoPath, startAt, profile) + + return Meta{ + RunSummary: &RunSummary{ + ID: ksuid.New(), + Version: runSummarySchemaVersion, + ExecutionSummary: executionSummary, + TurboVersion: turboVersion, + Packages: packages, + EnvMode: globalEnvMode, + Tasks: []*TaskSummary{}, + GlobalHashSummary: globalHashSummary, + }, + ui: ui, + runType: runType, + repoRoot: repoRoot, + singlePackage: singlePackage, + shouldSave: shouldSave, + apiClient: apiClient, + spaceID: spaceID, + synthesizedCommand: synthesizedCommand, + } +} + +// getPath returns a path to where the runSummary is written. +// The returned path will always be relative to the dir passsed in. +// We don't do a lot of validation, so `../../` paths are allowed. +func (rsm *Meta) getPath() turbopath.AbsoluteSystemPath { + filename := fmt.Sprintf("%s.json", rsm.RunSummary.ID) + return rsm.repoRoot.UntypedJoin(filepath.Join(".turbo", "runs"), filename) +} + +// Close wraps up the RunSummary at the end of a `turbo run`. +func (rsm *Meta) Close(ctx context.Context, exitCode int, workspaceInfos workspace.Catalog) error { + if rsm.runType == runTypeDryJSON || rsm.runType == runTypeDryText { + return rsm.closeDryRun(workspaceInfos) + } + + rsm.RunSummary.ExecutionSummary.exitCode = exitCode + rsm.RunSummary.ExecutionSummary.endedAt = time.Now() + + summary := rsm.RunSummary + if err := writeChrometracing(summary.ExecutionSummary.profileFilename, rsm.ui); err != nil { + rsm.ui.Error(fmt.Sprintf("Error writing tracing data: %v", err)) + } + + // TODO: printing summary to local, writing to disk, and sending to API + // are all the same thng, we should use a strategy similar to cache save/upload to + // do this in parallel. + + // Otherwise, attempt to save the summary + // Warn on the error, but we don't need to throw an error + if rsm.shouldSave { + if err := rsm.save(); err != nil { + rsm.ui.Warn(fmt.Sprintf("Error writing run summary: %v", err)) + } + } + + rsm.printExecutionSummary() + + // If we're not supposed to save or if there's no spaceID + if !rsm.shouldSave || rsm.spaceID == "" { + return nil + } + + if !rsm.apiClient.IsLinked() { + rsm.ui.Warn("Failed to post to space because repo is not linked to a Space. Run `turbo link` first.") + return nil + } + + // Wrap the record function so we can hoist out url/errors but keep + // the function signature/type the spinner.WaitFor expects. + var url string + var errs []error + record := func() { + url, errs = rsm.record() + } + + func() { + _ = spinner.WaitFor(ctx, record, rsm.ui, "...sending run summary...", 1000*time.Millisecond) + }() + + // After the spinner is done, print any errors and the url + if len(errs) > 0 { + rsm.ui.Warn("Errors recording run to Spaces") + for _, err := range errs { + rsm.ui.Warn(fmt.Sprintf("%v", err)) + } + } + + if url != "" { + rsm.ui.Output(fmt.Sprintf("Run: %s", url)) + rsm.ui.Output("") + } + + return nil +} + +// closeDryRun wraps up the Run Summary at the end of `turbo run --dry`. +// Ideally this should be inlined into Close(), but RunSummary doesn't currently +// have context about whether a run was real or dry. +func (rsm *Meta) closeDryRun(workspaceInfos workspace.Catalog) error { + // Render the dry run as json + if rsm.runType == runTypeDryJSON { + rendered, err := rsm.FormatJSON() + if err != nil { + return err + } + + rsm.ui.Output(string(rendered)) + return nil + } + + return rsm.FormatAndPrintText(workspaceInfos) +} + +// TrackTask makes it possible for the consumer to send information about the execution of a task. +func (summary *RunSummary) TrackTask(taskID string) (func(outcome executionEventName, err error, exitCode *int), *TaskExecutionSummary) { + return summary.ExecutionSummary.run(taskID) +} + +// Save saves the run summary to a file +func (rsm *Meta) save() error { + json, err := rsm.FormatJSON() + if err != nil { + return err + } + + // summaryPath will always be relative to the dir passsed in. + // We don't do a lot of validation, so `../../` paths are allowed + summaryPath := rsm.getPath() + + if err := summaryPath.EnsureDir(); err != nil { + return err + } + + return summaryPath.WriteFile(json, 0644) +} + +// record sends the summary to the API +func (rsm *Meta) record() (string, []error) { + errs := []error{} + + // Right now we'll send the POST to create the Run and the subsequent task payloads + // after all execution is done, but in the future, this first POST request + // can happen when the Run actually starts, so we can send updates to the associated Space + // as tasks complete. + createRunEndpoint := fmt.Sprintf(runsEndpoint, rsm.spaceID) + response := &spacesRunResponse{} + + payload := rsm.newSpacesRunCreatePayload() + if startPayload, err := json.Marshal(payload); err == nil { + if resp, err := rsm.apiClient.JSONPost(createRunEndpoint, startPayload); err != nil { + errs = append(errs, fmt.Errorf("POST %s: %w", createRunEndpoint, err)) + } else { + if err := json.Unmarshal(resp, response); err != nil { + errs = append(errs, fmt.Errorf("Error unmarshaling response: %w", err)) + } + } + } + + if response.ID != "" { + if taskErrs := rsm.postTaskSummaries(response.ID); len(taskErrs) > 0 { + errs = append(errs, taskErrs...) + } + + if donePayload, err := json.Marshal(newSpacesDonePayload(rsm.RunSummary)); err == nil { + patchURL := fmt.Sprintf(runsPatchEndpoint, rsm.spaceID, response.ID) + if _, err := rsm.apiClient.JSONPatch(patchURL, donePayload); err != nil { + errs = append(errs, fmt.Errorf("PATCH %s: %w", patchURL, err)) + } + } + } + + if len(errs) > 0 { + return response.URL, errs + } + + return response.URL, nil +} + +func (rsm *Meta) postTaskSummaries(runID string) []error { + errs := []error{} + // We make at most 8 requests at a time. + maxParallelRequests := 8 + taskSummaries := rsm.RunSummary.Tasks + taskCount := len(taskSummaries) + taskURL := fmt.Sprintf(tasksEndpoint, rsm.spaceID, runID) + + parallelRequestCount := maxParallelRequests + if taskCount < maxParallelRequests { + parallelRequestCount = taskCount + } + + queue := make(chan int, taskCount) + + wg := &sync.WaitGroup{} + for i := 0; i < parallelRequestCount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for index := range queue { + task := taskSummaries[index] + payload := newSpacesTaskPayload(task) + if taskPayload, err := json.Marshal(payload); err == nil { + if _, err := rsm.apiClient.JSONPost(taskURL, taskPayload); err != nil { + errs = append(errs, fmt.Errorf("Error sending %s summary to space: %w", task.TaskID, err)) + } + } + } + }() + } + + for index := range taskSummaries { + queue <- index + } + close(queue) + wg.Wait() + + if len(errs) > 0 { + return errs + } + + return nil +} diff --git a/cli/internal/runsummary/spaces.go b/cli/internal/runsummary/spaces.go new file mode 100644 index 0000000..bf19941 --- /dev/null +++ b/cli/internal/runsummary/spaces.go @@ -0,0 +1,96 @@ +package runsummary + +import ( + "github.com/vercel/turbo/cli/internal/ci" +) + +// spacesRunResponse deserialized the response from POST Run endpoint +type spacesRunResponse struct { + ID string + URL string +} + +type spacesRunPayload struct { + StartTime int64 `json:"startTime,omitempty"` // when the run was started + EndTime int64 `json:"endTime,omitempty"` // when the run ended. we should never submit start and end at the same time. + Status string `json:"status,omitempty"` // Status is "running" or "completed" + Type string `json:"type,omitempty"` // hardcoded to "TURBO" + ExitCode int `json:"exitCode,omitempty"` // exit code for the full run + Command string `json:"command,omitempty"` // the thing that kicked off the turbo run + RepositoryPath string `json:"repositoryPath,omitempty"` // where the command was invoked from + Context string `json:"context,omitempty"` // the host on which this Run was executed (e.g. Github Action, Vercel, etc) + + // TODO: we need to add these in + // originationUser string + // gitBranch string + // gitSha string +} + +// spacesCacheStatus is the same as TaskCacheSummary so we can convert +// spacesCacheStatus(cacheSummary), but change the json tags, to omit local and remote fields +type spacesCacheStatus struct { + // omitted fields, but here so we can convert from TaskCacheSummary easily + Local bool `json:"-"` + Remote bool `json:"-"` + Status string `json:"status"` // should always be there + Source string `json:"source,omitempty"` + TimeSaved int `json:"timeSaved"` +} + +type spacesTask struct { + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` + Workspace string `json:"workspace,omitempty"` + Hash string `json:"hash,omitempty"` + StartTime int64 `json:"startTime,omitempty"` + EndTime int64 `json:"endTime,omitempty"` + Cache spacesCacheStatus `json:"cache,omitempty"` + ExitCode int `json:"exitCode,omitempty"` + Dependencies []string `json:"dependencies,omitempty"` + Dependents []string `json:"dependents,omitempty"` + Logs string `json:"log"` +} + +func (rsm *Meta) newSpacesRunCreatePayload() *spacesRunPayload { + startTime := rsm.RunSummary.ExecutionSummary.startedAt.UnixMilli() + context := "LOCAL" + if name := ci.Constant(); name != "" { + context = name + } + return &spacesRunPayload{ + StartTime: startTime, + Status: "running", + Command: rsm.synthesizedCommand, + RepositoryPath: rsm.repoPath.ToString(), + Type: "TURBO", + Context: context, + } +} + +func newSpacesDonePayload(runsummary *RunSummary) *spacesRunPayload { + endTime := runsummary.ExecutionSummary.endedAt.UnixMilli() + return &spacesRunPayload{ + Status: "completed", + EndTime: endTime, + ExitCode: runsummary.ExecutionSummary.exitCode, + } +} + +func newSpacesTaskPayload(taskSummary *TaskSummary) *spacesTask { + startTime := taskSummary.Execution.startAt.UnixMilli() + endTime := taskSummary.Execution.endTime().UnixMilli() + + return &spacesTask{ + Key: taskSummary.TaskID, + Name: taskSummary.Task, + Workspace: taskSummary.Package, + Hash: taskSummary.Hash, + StartTime: startTime, + EndTime: endTime, + Cache: spacesCacheStatus(taskSummary.CacheSummary), // wrapped so we can remove fields + ExitCode: *taskSummary.Execution.exitCode, + Dependencies: taskSummary.Dependencies, + Dependents: taskSummary.Dependents, + Logs: string(taskSummary.GetLogs()), + } +} diff --git a/cli/internal/runsummary/task_summary.go b/cli/internal/runsummary/task_summary.go new file mode 100644 index 0000000..fb0cb30 --- /dev/null +++ b/cli/internal/runsummary/task_summary.go @@ -0,0 +1,117 @@ +package runsummary + +import ( + "os" + + "github.com/vercel/turbo/cli/internal/cache" + "github.com/vercel/turbo/cli/internal/fs" + "github.com/vercel/turbo/cli/internal/turbopath" + "github.com/vercel/turbo/cli/internal/util" +) + +// TaskCacheSummary is an extended version of cache.ItemStatus +// that includes TimeSaved and some better data. +type TaskCacheSummary struct { + Local bool `json:"local"` // Deprecated, but keeping around for --dry=json + Remote bool `json:"remote"` // Deprecated, but keeping around for --dry=json + Status string `json:"status"` // should always be there + Source string `json:"source,omitempty"` // can be empty on status:miss + TimeSaved int `json:"timeSaved"` // always include, but can be 0 +} + +// NewTaskCacheSummary decorates a cache.ItemStatus into a TaskCacheSummary +// Importantly, it adds the derived keys of `source` and `status` based on +// the local/remote booleans. It would be nice if these were just included +// from upstream, but that is a more invasive change. +func NewTaskCacheSummary(itemStatus cache.ItemStatus, timeSaved *int) TaskCacheSummary { + status := cache.CacheEventMiss + if itemStatus.Local || itemStatus.Remote { + status = cache.CacheEventHit + } + + var source string + if itemStatus.Local { + source = cache.CacheSourceFS + } else if itemStatus.Remote { + source = cache.CacheSourceRemote + } + + cs := TaskCacheSummary{ + // copy these over + Local: itemStatus.Local, + Remote: itemStatus.Remote, + Status: status, + Source: source, + } + // add in a dereferences timeSaved, should be 0 if nil + if timeSaved != nil { + cs.TimeSaved = *timeSaved + } + return cs +} + +// TaskSummary contains information about the task that was about to run +// TODO(mehulkar): `Outputs` and `ExcludedOutputs` are slightly redundant +// as the information is also available in ResolvedTaskDefinition. We could remove them +// and favor a version of Outputs that is the fully expanded list of files. +type TaskSummary struct { + TaskID string `json:"taskId,omitempty"` + Task string `json:"task"` + Package string `json:"package,omitempty"` + Hash string `json:"hash"` + ExpandedInputs map[turbopath.AnchoredUnixPath]string `json:"inputs"` + ExternalDepsHash string `json:"hashOfExternalDependencies"` + CacheSummary TaskCacheSummary `json:"cache"` + Command string `json:"command"` + CommandArguments []string `json:"cliArguments"` + Outputs []string `json:"outputs"` + ExcludedOutputs []string `json:"excludedOutputs"` + LogFile string `json:"logFile"` + Dir string `json:"directory,omitempty"` + Dependencies []string `json:"dependencies"` + Dependents []string `json:"dependents"` + ResolvedTaskDefinition *fs.TaskDefinition `json:"resolvedTaskDefinition"` + ExpandedOutputs []turbopath.AnchoredSystemPath `json:"expandedOutputs"` + Framework string `json:"framework"` + EnvMode util.EnvMode `json:"envMode"` + EnvVars TaskEnvVarSummary `json:"environmentVariables"` + Execution *TaskExecutionSummary `json:"execution,omitempty"` // omit when it's not set +} + +// GetLogs reads the Logfile and returns the data +func (ts *TaskSummary) GetLogs() []byte { + bytes, err := os.ReadFile(ts.LogFile) + if err != nil { + return []byte{} + } + return bytes +} + +// TaskEnvVarSummary contains the environment variables that impacted a task's hash +type TaskEnvVarSummary struct { + Configured []string `json:"configured"` + Inferred []string `json:"inferred"` + Global []string `json:"global"` + Passthrough []string `json:"passthrough"` + GlobalPassthrough []string `json:"globalPassthrough"` +} + +// cleanForSinglePackage converts a TaskSummary to remove references to workspaces +func (ts *TaskSummary) cleanForSinglePackage() { + dependencies := make([]string, len(ts.Dependencies)) + for i, dependency := range ts.Dependencies { + dependencies[i] = util.StripPackageName(dependency) + } + dependents := make([]string, len(ts.Dependents)) + for i, dependent := range ts.Dependents { + dependents[i] = util.StripPackageName(dependent) + } + task := util.StripPackageName(ts.TaskID) + + ts.TaskID = task + ts.Task = task + ts.Dependencies = dependencies + ts.Dependents = dependents + ts.Dir = "" + ts.Package = "" +} |
