Skip to content

Commit fd37ed4

Browse files
committed
Sync the pipelinerun status from the informers
When we reconcile a pipelinerun, we should ensure that the pipelinerun status is always in sync with the actual list of taskruns that can be provided by the taskrun informer. The only way to filter taskruns is by labels tekton.dev/pipelinerun. In case an orphaned taskrun is found, we can use the other labels on the taskrun to reconstruct the missing entry in the pipelinerun status.
1 parent 1fbac2a commit fd37ed4

File tree

4 files changed

+382
-9
lines changed

4 files changed

+382
-9
lines changed

pkg/reconciler/pipelinerun/pipelinerun.go

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"fmt"
2222
"path/filepath"
2323
"reflect"
24+
"strconv"
2425
"strings"
2526
"time"
2627

@@ -45,6 +46,7 @@ import (
4546
"k8s.io/apimachinery/pkg/api/equality"
4647
"k8s.io/apimachinery/pkg/api/errors"
4748
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
49+
"k8s.io/apimachinery/pkg/labels"
4850
"k8s.io/client-go/tools/cache"
4951
"knative.dev/pkg/apis"
5052
"knative.dev/pkg/configmap"
@@ -204,6 +206,14 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error {
204206
return err
205207
}
206208

209+
// Make sure that the PipelineRun status is in sync with the actual TaskRuns
210+
err = c.updatePipelineRunStatusFromInformer(pr)
211+
if err != nil {
212+
// This should not fail. Return the error so we can re-try later.
213+
c.Logger.Errorf("Error while syncing the pipelinerun status: %v", err.Error())
214+
return err
215+
}
216+
207217
// Reconcile this copy of the pipelinerun and then write back any status or label
208218
// updates regardless of whether the reconciliation errored out.
209219
if err = c.reconcile(ctx, pr); err != nil {
@@ -935,3 +945,82 @@ func storePipelineSpec(ctx context.Context, pr *v1beta1.PipelineRun, ps *v1beta1
935945
}
936946
return nil
937947
}
948+
949+
func (c *Reconciler) updatePipelineRunStatusFromInformer(pr *v1beta1.PipelineRun) error {
950+
pipelineRunLabels := getTaskrunLabels(pr, "")
951+
taskRuns, err := c.taskRunLister.TaskRuns(pr.Namespace).List(labels.SelectorFromSet(pipelineRunLabels))
952+
if err != nil {
953+
c.Logger.Errorf("Could not list TaskRuns %#v", err)
954+
return err
955+
}
956+
// Store a list of Condition TaskRuns for each PipelineTask (by name)
957+
conditionTaskRuns := make(map[string][]*v1beta1.TaskRun)
958+
// Map PipelineTask names to TaskRun names that were already in the status
959+
taskRunByPipelineTask := make(map[string]string)
960+
// First loop over all the TaskRuns associated to Tasks
961+
for _, taskrun := range taskRuns {
962+
lbls := taskrun.GetLabels()
963+
pipelineTaskName := lbls[pipeline.GroupName+pipeline.PipelineTaskLabelKey]
964+
if _, ok := lbls[pipeline.GroupName+pipeline.ConditionCheckKey]; ok {
965+
// Save condition for looping over them after this
966+
if _, ok := conditionTaskRuns[pipelineTaskName]; !ok {
967+
// If it's the first condition taskrun, initialise the slice
968+
conditionTaskRuns[pipelineTaskName] = []*v1beta1.TaskRun{}
969+
}
970+
conditionTaskRuns[pipelineTaskName] = append(conditionTaskRuns[pipelineTaskName], taskrun)
971+
continue
972+
}
973+
// Map pipeline task to taskrun name
974+
taskRunByPipelineTask[pipelineTaskName] = taskrun.Name
975+
if _, ok := pr.Status.TaskRuns[taskrun.Name]; !ok {
976+
// This taskrun was missing from the status.
977+
// Add it without conditions, which are handled in the next loop
978+
pr.Status.TaskRuns[taskrun.Name] = &v1alpha1.PipelineRunTaskRunStatus{
979+
PipelineTaskName: pipelineTaskName,
980+
Status: &taskrun.Status,
981+
ConditionChecks: nil,
982+
}
983+
}
984+
}
985+
// Then loop by pipelinetask name over all the TaskRuns associated to Conditions
986+
for pipelineTaskName, actualConditionTaskRuns := range conditionTaskRuns {
987+
taskRunName, ok := taskRunByPipelineTask[pipelineTaskName]
988+
if !ok {
989+
// The pipelineTask associated to the conditions was not found in the pipelinerun
990+
// status. This means that the conditions were orphaned, and never added to the
991+
// status. In this case we need to generate a new TaskRun name, that will be used
992+
// to run the TaskRun if the conditions are passed.
993+
taskRunName = resources.GetTaskRunName(pr.Status.TaskRuns, pipelineTaskName, pr.Name)
994+
pr.Status.TaskRuns[taskRunName] = &v1alpha1.PipelineRunTaskRunStatus{
995+
PipelineTaskName: pipelineTaskName,
996+
Status: nil,
997+
ConditionChecks: nil,
998+
}
999+
}
1000+
// Build the map of condition checks for the taskrun
1001+
// If there were no other condition, initialise the map
1002+
conditionChecks := pr.Status.TaskRuns[taskRunName].ConditionChecks
1003+
if conditionChecks == nil {
1004+
conditionChecks = make(map[string]*v1alpha1.PipelineRunConditionCheckStatus)
1005+
}
1006+
for i, foundTaskRun := range actualConditionTaskRuns {
1007+
lbls := foundTaskRun.GetLabels()
1008+
if _, ok := conditionChecks[foundTaskRun.Name]; !ok {
1009+
// The condition check was not found, so we need to add it
1010+
// We only add the condition name, the status can now be gathered by the
1011+
// normal reconcile process
1012+
if conditionName, ok := lbls[pipeline.GroupName+pipeline.ConditionNameKey]; ok {
1013+
conditionChecks[foundTaskRun.Name] = &v1alpha1.PipelineRunConditionCheckStatus{
1014+
ConditionName: fmt.Sprintf("%s-%s", conditionName, strconv.Itoa(i)),
1015+
}
1016+
} else {
1017+
// The condition name label is missing, so we cannot recover this
1018+
c.Logger.Warnf("found an orphaned condition taskrun %#v with missing %s label",
1019+
foundTaskRun, pipeline.ConditionNameKey)
1020+
}
1021+
}
1022+
}
1023+
pr.Status.TaskRuns[taskRunName].ConditionChecks = conditionChecks
1024+
}
1025+
return nil
1026+
}

0 commit comments

Comments
 (0)