2022-08-23 14:34:47 +02:00
|
|
|
package runtime
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2022-09-03 09:57:53 +02:00
|
|
|
"sync"
|
|
|
|
"time"
|
2022-08-23 14:34:47 +02:00
|
|
|
|
2022-09-03 09:57:53 +02:00
|
|
|
"gitea.com/gitea/act_runner/client"
|
2022-09-03 14:57:32 +02:00
|
|
|
runnerv1 "gitea.com/gitea/proto-go/runner/v1"
|
|
|
|
|
2022-08-23 14:34:47 +02:00
|
|
|
"github.com/nektos/act/pkg/artifacts"
|
|
|
|
"github.com/nektos/act/pkg/common"
|
|
|
|
"github.com/nektos/act/pkg/model"
|
|
|
|
"github.com/nektos/act/pkg/runner"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
|
|
|
type TaskInput struct {
|
|
|
|
repoDirectory string
|
|
|
|
actor string
|
|
|
|
// workdir string
|
|
|
|
// workflowsPath string
|
|
|
|
// autodetectEvent bool
|
|
|
|
// eventPath string
|
|
|
|
reuseContainers bool
|
|
|
|
bindWorkdir bool
|
|
|
|
// secrets []string
|
|
|
|
// envs []string
|
|
|
|
// platforms []string
|
|
|
|
// dryrun bool
|
|
|
|
forcePull bool
|
|
|
|
forceRebuild bool
|
|
|
|
// noOutput bool
|
|
|
|
// envfile string
|
|
|
|
// secretfile string
|
|
|
|
insecureSecrets bool
|
|
|
|
// defaultBranch string
|
|
|
|
privileged bool
|
|
|
|
usernsMode string
|
|
|
|
containerArchitecture string
|
|
|
|
containerDaemonSocket string
|
|
|
|
// noWorkflowRecurse bool
|
|
|
|
useGitIgnore bool
|
|
|
|
containerCapAdd []string
|
|
|
|
containerCapDrop []string
|
|
|
|
autoRemove bool
|
|
|
|
artifactServerPath string
|
|
|
|
artifactServerPort string
|
|
|
|
jsonLogger bool
|
|
|
|
noSkipCheckout bool
|
|
|
|
// remoteName string
|
|
|
|
|
|
|
|
ForgeInstance string
|
|
|
|
EnvFile string
|
|
|
|
}
|
|
|
|
|
|
|
|
type taskLogHook struct {
|
|
|
|
entries []*log.Entry
|
2022-09-03 09:57:53 +02:00
|
|
|
lock sync.Mutex
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *taskLogHook) Levels() []log.Level {
|
|
|
|
return log.AllLevels
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *taskLogHook) Fire(entry *log.Entry) error {
|
|
|
|
if flag, ok := entry.Data["raw_output"]; ok {
|
2022-09-03 09:57:53 +02:00
|
|
|
h.lock.Lock()
|
2022-08-23 14:34:47 +02:00
|
|
|
if flagVal, ok := flag.(bool); flagVal && ok {
|
|
|
|
log.Infof("task log: %s", entry.Message)
|
|
|
|
h.entries = append(h.entries, entry)
|
|
|
|
}
|
2022-09-03 09:57:53 +02:00
|
|
|
h.lock.Unlock()
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-03 09:57:53 +02:00
|
|
|
func (h *taskLogHook) swapLogs() []*log.Entry {
|
|
|
|
if len(h.entries) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
h.lock.Lock()
|
|
|
|
entries := h.entries
|
|
|
|
h.entries = nil
|
|
|
|
h.lock.Unlock()
|
|
|
|
return entries
|
|
|
|
}
|
|
|
|
|
|
|
|
type TaskState int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// TaskStateUnknown is the default state
|
|
|
|
TaskStateUnknown TaskState = iota
|
|
|
|
// TaskStatePending is the pending state
|
|
|
|
// pending means task is received, parsing actions and preparing to run
|
|
|
|
TaskStatePending
|
|
|
|
// TaskStateRunning is the state when the task is running
|
|
|
|
// running means task is running
|
|
|
|
TaskStateRunning
|
|
|
|
// TaskStateSuccess is the state when the task is successful
|
|
|
|
// success means task is successful without any error
|
|
|
|
TaskStateSuccess
|
|
|
|
// TaskStateFailure is the state when the task is failed
|
|
|
|
// failure means task is failed with error
|
|
|
|
TaskStateFailure
|
|
|
|
)
|
|
|
|
|
2022-08-23 14:34:47 +02:00
|
|
|
type Task struct {
|
2022-09-03 09:57:53 +02:00
|
|
|
BuildID int64
|
2022-08-23 14:34:47 +02:00
|
|
|
Input *TaskInput
|
2022-09-03 09:57:53 +02:00
|
|
|
|
|
|
|
logHook *taskLogHook
|
|
|
|
state TaskState
|
|
|
|
client client.Client
|
2022-09-03 14:57:32 +02:00
|
|
|
log *log.Entry
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
|
2022-09-03 09:57:53 +02:00
|
|
|
// newTask creates a new task
|
2022-09-03 14:57:32 +02:00
|
|
|
func NewTask(buildID int64, client client.Client) *Task {
|
2022-08-23 14:34:47 +02:00
|
|
|
task := &Task{
|
|
|
|
Input: &TaskInput{
|
|
|
|
reuseContainers: true,
|
|
|
|
ForgeInstance: "gitea",
|
|
|
|
},
|
2022-09-03 09:57:53 +02:00
|
|
|
BuildID: buildID,
|
|
|
|
|
|
|
|
state: TaskStatePending,
|
2022-09-03 14:57:32 +02:00
|
|
|
client: client,
|
|
|
|
log: log.WithField("buildID", buildID),
|
2022-09-03 09:57:53 +02:00
|
|
|
logHook: &taskLogHook{},
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
task.Input.repoDirectory, _ = os.Getwd()
|
|
|
|
return task
|
|
|
|
}
|
|
|
|
|
|
|
|
// getWorkflowsPath return the workflows directory, it will try .gitea first and then fallback to .github
|
|
|
|
func getWorkflowsPath(dir string) (string, error) {
|
|
|
|
p := filepath.Join(dir, ".gitea/workflows")
|
|
|
|
_, err := os.Stat(p)
|
|
|
|
if err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return filepath.Join(dir, ".github/workflows"), nil
|
|
|
|
}
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func demoPlatforms() map[string]string {
|
|
|
|
return map[string]string{
|
|
|
|
"ubuntu-latest": "node:16-buster-slim",
|
|
|
|
"ubuntu-20.04": "node:16-buster-slim",
|
|
|
|
"ubuntu-18.04": "node:16-buster-slim",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-03 09:57:53 +02:00
|
|
|
// reportFailure reports the failure of the task
|
|
|
|
func (t *Task) reportFailure(ctx context.Context, err error) {
|
|
|
|
t.state = TaskStateFailure
|
|
|
|
finishTask(t.BuildID)
|
|
|
|
|
|
|
|
t.log.Errorf("task failed: %v", err)
|
|
|
|
|
|
|
|
if t.client == nil {
|
|
|
|
// TODO: fill the step request
|
|
|
|
stepRequest := &runnerv1.UpdateStepRequest{}
|
2022-09-03 14:57:32 +02:00
|
|
|
_ = t.client.UpdateStep(ctx, stepRequest)
|
2022-09-03 09:57:53 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-03 14:57:32 +02:00
|
|
|
func (t *Task) startReporting(ctx context.Context, interval int64) {
|
2022-09-03 09:57:53 +02:00
|
|
|
for {
|
|
|
|
time.Sleep(time.Duration(interval) * time.Second)
|
|
|
|
if t.state == TaskStateSuccess || t.state == TaskStateFailure {
|
|
|
|
t.log.Debugf("task reporting stopped")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
t.reportStep(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reportStep reports the step of the task
|
|
|
|
func (t *Task) reportStep(ctx context.Context) {
|
|
|
|
if t.client == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
logValues := t.logHook.swapLogs()
|
|
|
|
if len(logValues) == 0 {
|
|
|
|
t.log.Debugf("no log to report")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.log.Infof("reporting %d logs", len(logValues))
|
|
|
|
|
|
|
|
// TODO: fill the step request
|
|
|
|
stepRequest := &runnerv1.UpdateStepRequest{}
|
2022-09-03 14:57:32 +02:00
|
|
|
_ = t.client.UpdateStep(ctx, stepRequest)
|
2022-09-03 09:57:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// reportSuccess reports the success of the task
|
|
|
|
func (t *Task) reportSuccess(ctx context.Context) {
|
|
|
|
t.state = TaskStateSuccess
|
|
|
|
finishTask(t.BuildID)
|
|
|
|
|
|
|
|
t.log.Infof("task success")
|
|
|
|
|
|
|
|
if t.client == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: fill the step request
|
|
|
|
stepRequest := &runnerv1.UpdateStepRequest{}
|
2022-09-03 14:57:32 +02:00
|
|
|
_ = t.client.UpdateStep(ctx, stepRequest)
|
2022-09-03 09:57:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (t *Task) Run(ctx context.Context) {
|
2022-08-23 14:34:47 +02:00
|
|
|
workflowsPath, err := getWorkflowsPath(t.Input.repoDirectory)
|
|
|
|
if err != nil {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.reportFailure(ctx, err)
|
|
|
|
return
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
2022-09-03 09:57:53 +02:00
|
|
|
t.log.Debugf("workflows path: %s", workflowsPath)
|
|
|
|
|
2022-08-23 14:34:47 +02:00
|
|
|
planner, err := model.NewWorkflowPlanner(workflowsPath, false)
|
|
|
|
if err != nil {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.reportFailure(ctx, err)
|
|
|
|
return
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var eventName string
|
|
|
|
events := planner.GetEvents()
|
|
|
|
if len(events) > 0 {
|
|
|
|
// set default event type to first event
|
|
|
|
// this way user dont have to specify the event.
|
2022-09-03 09:57:53 +02:00
|
|
|
t.log.Debugf("Using detected workflow event: %s", events[0])
|
2022-08-23 14:34:47 +02:00
|
|
|
eventName = events[0]
|
|
|
|
} else {
|
|
|
|
if plan := planner.PlanEvent("push"); plan != nil {
|
|
|
|
eventName = "push"
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// build the plan for this run
|
|
|
|
var plan *model.Plan
|
2022-09-03 09:57:53 +02:00
|
|
|
jobID := ""
|
|
|
|
if t.BuildID > 0 {
|
|
|
|
jobID = fmt.Sprintf("%d", t.BuildID)
|
|
|
|
}
|
2022-08-23 14:34:47 +02:00
|
|
|
if jobID != "" {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.log.Infof("Planning job: %s", jobID)
|
2022-08-23 14:34:47 +02:00
|
|
|
plan = planner.PlanJob(jobID)
|
|
|
|
} else {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.log.Infof("Planning event: %s", eventName)
|
2022-08-23 14:34:47 +02:00
|
|
|
plan = planner.PlanEvent(eventName)
|
|
|
|
}
|
|
|
|
|
|
|
|
curDir, err := os.Getwd()
|
|
|
|
if err != nil {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.reportFailure(ctx, err)
|
|
|
|
return
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// run the plan
|
|
|
|
input := t.Input
|
|
|
|
config := &runner.Config{
|
|
|
|
Actor: input.actor,
|
|
|
|
EventName: eventName,
|
|
|
|
EventPath: "",
|
|
|
|
DefaultBranch: "",
|
|
|
|
ForcePull: input.forcePull,
|
|
|
|
ForceRebuild: input.forceRebuild,
|
|
|
|
ReuseContainers: input.reuseContainers,
|
|
|
|
Workdir: curDir,
|
|
|
|
BindWorkdir: input.bindWorkdir,
|
|
|
|
LogOutput: true,
|
|
|
|
JSONLogger: input.jsonLogger,
|
|
|
|
// Env: envs,
|
|
|
|
// Secrets: secrets,
|
|
|
|
InsecureSecrets: input.insecureSecrets,
|
|
|
|
Platforms: demoPlatforms(),
|
|
|
|
Privileged: input.privileged,
|
|
|
|
UsernsMode: input.usernsMode,
|
|
|
|
ContainerArchitecture: input.containerArchitecture,
|
|
|
|
ContainerDaemonSocket: input.containerDaemonSocket,
|
|
|
|
UseGitIgnore: input.useGitIgnore,
|
|
|
|
GitHubInstance: input.ForgeInstance,
|
|
|
|
ContainerCapAdd: input.containerCapAdd,
|
|
|
|
ContainerCapDrop: input.containerCapDrop,
|
|
|
|
AutoRemove: input.autoRemove,
|
|
|
|
ArtifactServerPath: input.artifactServerPath,
|
|
|
|
ArtifactServerPort: input.artifactServerPort,
|
|
|
|
NoSkipCheckout: input.noSkipCheckout,
|
|
|
|
// RemoteName: input.remoteName,
|
|
|
|
}
|
|
|
|
r, err := runner.New(config)
|
|
|
|
if err != nil {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.reportFailure(ctx, err)
|
|
|
|
return
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
cancel := artifacts.Serve(ctx, input.artifactServerPath, input.artifactServerPort)
|
2022-09-03 09:57:53 +02:00
|
|
|
t.log.Debugf("artifacts server started at %s:%s", input.artifactServerPath, input.artifactServerPort)
|
2022-08-23 14:34:47 +02:00
|
|
|
|
|
|
|
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
|
|
|
|
cancel()
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2022-09-03 09:57:53 +02:00
|
|
|
t.log.Infof("workflow prepared")
|
|
|
|
|
|
|
|
// add logger recorders
|
|
|
|
ctx = common.WithLoggerHook(ctx, t.logHook)
|
|
|
|
|
2022-09-03 14:57:32 +02:00
|
|
|
go t.startReporting(ctx, 1)
|
2022-09-03 09:57:53 +02:00
|
|
|
|
2022-08-23 14:34:47 +02:00
|
|
|
if err := executor(ctx); err != nil {
|
2022-09-03 09:57:53 +02:00
|
|
|
t.reportFailure(ctx, err)
|
|
|
|
return
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|
2022-09-03 09:57:53 +02:00
|
|
|
|
|
|
|
t.reportSuccess(ctx)
|
2022-08-23 14:34:47 +02:00
|
|
|
}
|