Skip to content

Commit 4d619b2

Browse files
authored
Fix Node custom labels for v26 nightly; add test for nightly (#168)
This refactors node_test.go to add a new "nightly" test, that gets the latest nightly build of Node, runs a docker container, and then runs the test as normal. While creating this, I also discovered a real issue in our support for the v26 prerelease (slated for release in April): nodejs/node#56290 causes one of the symbols we rely on to no longer be exported, so we need to grab it a different way. This commit also fixes that issue.
1 parent 1dde38f commit 4d619b2

File tree

5 files changed

+249
-113
lines changed

5 files changed

+249
-113
lines changed

interpreter/customlabels/integrationtests/node_test.go

Lines changed: 189 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ package customlabels_test
1313

1414
import (
1515
"context"
16+
"encoding/json"
1617
"errors"
1718
"fmt"
1819
"io"
@@ -29,6 +30,7 @@ import (
2930

3031
"time"
3132

33+
"github.com/docker/go-connections/nat"
3234
"github.com/stretchr/testify/require"
3335
testcontainers "github.com/testcontainers/testcontainers-go"
3436
"github.com/testcontainers/testcontainers-go/wait"
@@ -50,6 +52,113 @@ var files = []string{
5052
"broken.md",
5153
}
5254

55+
func runTest(t *testing.T, ctx context.Context, host string, port nat.Port) {
56+
enabledTracers, err := tracertypes.Parse("labels,v8")
57+
require.NoError(t, err)
58+
59+
r := &testutils.MockReporter{}
60+
traceCh, trc := testutils.StartTracer(ctx, t, enabledTracers, r, false)
61+
62+
testHTTPEndpoint(t, host, port)
63+
framesPerWorkerId := make(map[int]int)
64+
framesPerFileName := make(map[string]int)
65+
66+
totalWorkloadFrames := 0
67+
unlabeledWorkloadFrames := 0
68+
69+
timer := time.NewTimer(3 * time.Second)
70+
defer timer.Stop()
71+
72+
for {
73+
select {
74+
case <-timer.C:
75+
goto done
76+
case trace := <-traceCh:
77+
if trace == nil {
78+
continue
79+
}
80+
ct, err := trc.TraceProcessor().ConvertTrace(trace)
81+
require.NotNil(t, ct)
82+
require.NoError(t, err)
83+
workerId, okWid := trace.CustomLabels["workerId"]
84+
filePath, okFname := trace.CustomLabels["filePath"]
85+
var fileName string
86+
if okFname {
87+
fileName = path.Base(filePath)
88+
}
89+
knownWorkloadFrames := []string{
90+
"lex",
91+
"parse",
92+
"blockTokens",
93+
"readFile",
94+
"readFileHandle",
95+
}
96+
hasWorkloadFrame := false
97+
98+
for i := range ct.Frames {
99+
if ct.Frames[i].Value().Type == libpf.V8Frame {
100+
name := ct.Frames[i].Value().FunctionName.String()
101+
if slices.Contains(knownWorkloadFrames, name) {
102+
hasWorkloadFrame = true
103+
}
104+
}
105+
}
106+
107+
if hasWorkloadFrame {
108+
totalWorkloadFrames++
109+
if !(okWid && okFname) {
110+
unlabeledWorkloadFrames++
111+
}
112+
}
113+
114+
if okWid {
115+
val, err := strconv.Atoi(workerId)
116+
require.NoError(t, err)
117+
118+
require.GreaterOrEqual(t, val, 0)
119+
require.Less(t, val, N_WORKERS)
120+
121+
framesPerWorkerId[val]++
122+
}
123+
124+
if okFname {
125+
require.Contains(t, files, fileName)
126+
framesPerFileName[fileName]++
127+
}
128+
}
129+
}
130+
done:
131+
totalWidFrames := 0
132+
// for 8 workers, each should have roughly 1/8
133+
// of the labeled frames. There will be a bit of skew,
134+
// so accept anything above 60% of that.
135+
for i := 0; i < N_WORKERS; i++ {
136+
totalWidFrames += framesPerWorkerId[i]
137+
}
138+
expectedWorkerAvg := float64(totalWidFrames) / float64(N_WORKERS)
139+
for i := 0; i < N_WORKERS; i++ {
140+
require.Less(t, expectedWorkerAvg*0.60, float64(framesPerWorkerId[i]))
141+
}
142+
// Each of the documents should account for some nontrivial amount of time,
143+
// but since they aren't all the same length, we are less strict.
144+
totalFnameFrames := 0
145+
for _, v := range framesPerFileName {
146+
totalFnameFrames += v
147+
}
148+
expectedFnameAvg := float64(totalFnameFrames) / float64(len(framesPerFileName))
149+
for _, v := range framesPerFileName {
150+
require.Less(t, expectedFnameAvg*0.2, float64(v))
151+
}
152+
153+
// Really, there should be zero frames in the
154+
// `marked` workload that aren't under labels,
155+
// but accept a 5% slop because the unwinder
156+
// isn't perfect (e.g. it might interrupt the
157+
// process when the Node environment is in an
158+
// undefined state)
159+
require.Less(t, 20*unlabeledWorkloadFrames, totalWorkloadFrames)
160+
}
161+
53162
func TestIntegration(t *testing.T) {
54163
if !testutils.IsRoot() {
55164
t.Skip("root privileges required")
@@ -74,112 +183,69 @@ func TestIntegration(t *testing.T) {
74183

75184
cont := startContainer(ctx, t, nodeVersion)
76185

77-
enabledTracers, err := tracertypes.Parse("labels,v8")
186+
host, err := cont.Host(ctx)
187+
require.NoError(t, err)
188+
port, err := cont.MappedPort(ctx, "80")
78189
require.NoError(t, err)
79190

80-
r := &testutils.MockReporter{}
81-
traceCh, trc := testutils.StartTracer(ctx, t, enabledTracers, r, false)
82-
83-
testHTTPEndpoint(ctx, t, cont)
84-
framesPerWorkerId := make(map[int]int)
85-
framesPerFileName := make(map[string]int)
86-
87-
totalWorkloadFrames := 0
88-
unlabeledWorkloadFrames := 0
89-
90-
timer := time.NewTimer(3 * time.Second)
91-
defer timer.Stop()
92-
93-
for {
94-
select {
95-
case <-timer.C:
96-
goto done
97-
case trace := <-traceCh:
98-
if trace == nil {
99-
continue
100-
}
101-
ct, err := trc.TraceProcessor().ConvertTrace(trace)
102-
require.NotNil(t, ct)
103-
require.NoError(t, err)
104-
workerId, okWid := trace.CustomLabels["workerId"]
105-
filePath, okFname := trace.CustomLabels["filePath"]
106-
var fileName string
107-
if okFname {
108-
fileName = path.Base(filePath)
109-
}
110-
knownWorkloadFrames := []string{
111-
"lex",
112-
"parse",
113-
"blockTokens",
114-
"readFile",
115-
"readFileHandle",
116-
}
117-
hasWorkloadFrame := false
118-
119-
for i := range ct.Frames {
120-
if ct.Frames[i].Value().Type == libpf.V8Frame {
121-
name := ct.Frames[i].Value().FunctionName.String()
122-
if slices.Contains(knownWorkloadFrames, name) {
123-
hasWorkloadFrame = true
124-
}
125-
}
126-
}
191+
runTest(t, ctx, host, port)
192+
})
193+
}
194+
t.Run("node-local-nightly", func(t *testing.T) {
195+
type NodejsNightly struct {
196+
Version string `json:"version"`
197+
Files []string `json:"files"`
198+
}
199+
200+
resp, err := http.Get("https://nodejs.org/download/nightly/index.json")
201+
require.NoError(t, err)
202+
defer resp.Body.Close()
203+
204+
var nightlies []NodejsNightly
205+
err = json.NewDecoder(resp.Body).Decode(&nightlies)
206+
require.NoError(t, err)
207+
require.NotEmpty(t, nightlies)
208+
209+
latest := nightlies[0]
210+
211+
var nodeArch string
212+
switch runtime.GOARCH {
213+
case "arm64":
214+
nodeArch = "linux-arm64"
215+
case "amd64":
216+
nodeArch = "linux-x64"
217+
default:
218+
t.Fatalf("Unsupported GOARCH: %s", runtime.GOARCH)
219+
}
220+
221+
tarballName := fmt.Sprintf("node-%s-%s", latest.Version, nodeArch)
222+
223+
var tarballURL string
224+
for _, file := range latest.Files {
225+
if file == nodeArch {
226+
tarballURL = fmt.Sprintf(
227+
"https://nodejs.org/download/nightly/%s/%s.tar.gz",
228+
latest.Version, tarballName)
229+
break
230+
}
231+
}
232+
require.NotEmpty(t, tarballURL, "No tarball found for latest nightly")
127233

128-
if hasWorkloadFrame {
129-
totalWorkloadFrames++
130-
if !(okWid && okFname) {
131-
unlabeledWorkloadFrames++
132-
}
133-
}
234+
ctx, cancel := context.WithCancel(t.Context())
235+
t.Cleanup(cancel)
134236

135-
if okWid {
136-
val, err := strconv.Atoi(workerId)
137-
require.NoError(t, err)
237+
defer cancel()
138238

139-
require.GreaterOrEqual(t, val, 0)
140-
require.Less(t, val, N_WORKERS)
239+
cont := startNightlyContainer(ctx, t, tarballURL)
141240

142-
framesPerWorkerId[val]++
143-
}
241+
host, err := cont.Host(ctx)
242+
require.NoError(t, err)
243+
port, err := cont.MappedPort(ctx, "80")
244+
require.NoError(t, err)
144245

145-
if okFname {
146-
require.Contains(t, files, fileName)
147-
framesPerFileName[fileName]++
148-
}
149-
}
150-
}
151-
done:
152-
totalWidFrames := 0
153-
// for 8 workers, each should have roughly 1/8
154-
// of the labeled frames. There will be a bit of skew,
155-
// so accept anything above 60% of that.
156-
for i := 0; i < N_WORKERS; i++ {
157-
totalWidFrames += framesPerWorkerId[i]
158-
}
159-
expectedWorkerAvg := float64(totalWidFrames) / float64(N_WORKERS)
160-
for i := 0; i < N_WORKERS; i++ {
161-
require.Less(t, expectedWorkerAvg*0.60, float64(framesPerWorkerId[i]))
162-
}
163-
// Each of the documents should account for some nontrivial amount of time,
164-
// but since they aren't all the same length, we are less strict.
165-
totalFnameFrames := 0
166-
for _, v := range framesPerFileName {
167-
totalFnameFrames += v
168-
}
169-
expectedFnameAvg := float64(totalFnameFrames) / float64(len(framesPerFileName))
170-
for _, v := range framesPerFileName {
171-
require.Less(t, expectedFnameAvg*0.2, float64(v))
172-
}
246+
runTest(t, ctx, host, port)
173247

174-
// Really, there should be zero frames in the
175-
// `marked` workload that aren't under labels,
176-
// but accept a 5% slop because the unwinder
177-
// isn't perfect (e.g. it might interrupt the
178-
// process when the Node environment is in an
179-
// undefined state)
180-
require.Less(t, 20*unlabeledWorkloadFrames, totalWorkloadFrames)
181-
})
182-
}
248+
})
183249
}
184250

185251
func startContainer(ctx context.Context, t *testing.T,
@@ -204,15 +270,34 @@ func startContainer(ctx context.Context, t *testing.T,
204270
return cont
205271
}
206272

207-
func testHTTPEndpoint(ctx context.Context, t *testing.T, cont testcontainers.Container) {
208-
const numGoroutines = 10
209-
const requestsPerGoroutine = 10000
273+
func startNightlyContainer(ctx context.Context,
274+
t *testing.T,
275+
nodeURL string) testcontainers.Container {
276+
t.Log("starting container for node nightly at URL", nodeURL)
210277

211-
host, err := cont.Host(ctx)
278+
//nolint:dogsled
279+
_, path, _, _ := runtime.Caller(0)
280+
cont, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
281+
ContainerRequest: testcontainers.ContainerRequest{
282+
FromDockerfile: testcontainers.FromDockerfile{
283+
Dockerfile: "Dockerfile.nightly",
284+
Context: filepath.Dir(path) + "/testdata/node-md-render/",
285+
BuildArgs: map[string]*string{
286+
"NODE_URL": &nodeURL,
287+
},
288+
},
289+
ExposedPorts: []string{"80/tcp"},
290+
WaitingFor: wait.ForHTTP("/docs/AUTHORS.md"),
291+
},
292+
Started: true,
293+
})
212294
require.NoError(t, err)
295+
return cont
296+
}
213297

214-
port, err := cont.MappedPort(ctx, "80")
215-
require.NoError(t, err)
298+
func testHTTPEndpoint(t *testing.T, host string, port nat.Port) {
299+
const numGoroutines = 10
300+
const requestsPerGoroutine = 10000
216301

217302
baseURL := "http://" + net.JoinHostPort(host, port.Port())
218303

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
FROM ubuntu:24.04
2+
3+
RUN apt-get update && apt-get install -y git python3 make g++ libatomic1 curl
4+
5+
6+
WORKDIR /app
7+
ENV PATH /app/node/bin:$PATH
8+
9+
COPY package.json ./
10+
11+
RUN git clone https://github.com/markedjs/marked.git /tmp/marked && \
12+
cd /tmp/marked && \
13+
git checkout 0a0da515346d2b3dd1662531043fa6925cb73fe3 && \
14+
cp -r docs /app/docs && \
15+
rm -rf /tmp/marked
16+
17+
COPY *.js ./
18+
19+
ARG NODE_URL
20+
21+
RUN curl $NODE_URL > out.tar.gz
22+
23+
RUN tar zxf out.tar.gz
24+
25+
RUN mv node* node
26+
27+
RUN npm install
28+
29+
EXPOSE 80
30+
31+
CMD node index.js

interpreter/customlabels/integrationtests/testdata/node-md-render/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
"author": "The Parca Authors",
1010
"license": "Apache-2.0",
1111
"dependencies": {
12-
"@polarsignals/custom-labels": "^0.3.2",
12+
"@polarsignals/custom-labels": "^0.3.4",
1313
"marked": "^16.1.2"
1414
}
1515
}

0 commit comments

Comments
 (0)