Skip to content

Commit

Permalink
Merge pull request #40 from GoogleChromeLabs/add/benchmark-web-vitals
Browse files Browse the repository at this point in the history
Add command to benchmark Web Vitals via `web-vitals` library
  • Loading branch information
mukeshpanchal27 authored Mar 1, 2023
2 parents a77d21f + afa6239 commit d315f59
Show file tree
Hide file tree
Showing 7 changed files with 380 additions and 70 deletions.
40 changes: 34 additions & 6 deletions cli/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,13 +95,13 @@ Get Server-Timing header medians _and_ all individual run values:
wpt-server-timing --test 221011_AiDcV7_GGM --include-runs
```

### `benchmark-url`
### `benchmark-server-timing`

Sends the selected number of requests with a certain concurrency to provided URLs to find out the median response time for each URL. It can also track Server-Timing metrics and get median values for each of them.
Sends the selected number of requests with a certain concurrency to provided URLs to find out the median response time for each URL. It also tracks medians for any Server-Timing metrics present in the response.

#### Arguments

* `--url` (`-u`): An URL to benchmark.
* `--url` (`-u`): A URL to benchmark.
* `--concurrency` (`-c`): Number of requests to make at the same time.
* `--number` (`-n`): Total number of requests to send.
* `--file` (`-f`): File with URLs (one URL per line) to run benchmark tests for.
Expand All @@ -111,15 +111,43 @@ Sends the selected number of requests with a certain concurrency to provided URL

Send 10 request, 2 requests at the same time:
```
benchmark-url --url https://example.com/ -n 10 -c 2
benchmark-server-timing --url https://example.com/ -n 10 -c 2
```

Same as above, but results are formatted as CSV:
```
benchmark-url --url https://example.com/ -n 10 -c 2 --output csv
benchmark-server-timing --url https://example.com/ -n 10 -c 2 --output csv
```

To run benchmark tests for URLs from a file:
```
benchmark-url -f path/to/urls.txt -n 5
benchmark-server-timing -f path/to/urls.txt -n 5
```

### `benchmark-web-vitals`

Loads the provided URLs in a headless browser several times to measure median Web Vitals metrics for each URL. Currently the results cover load time metrics FCP, LCP, and TTFB. Including additional metrics is explored in a [follow up pull request](https://github.com/GoogleChromeLabs/wpp-research/pull/41).

#### Arguments

* `--url` (`-u`): A URL to benchmark.
* `--number` (`-n`): Total number of requests to send.
* `--file` (`-f`): File with URLs (one URL per line) to run benchmark tests for.
* `--output` (`-o`): The output format.

#### Examples

Send 10 requests to a single URL:
```
benchmark-web-vitals --url https://example.com/ -n 10
```

Same as above, but results are formatted as CSV:
```
benchmark-web-vitals --url https://example.com/ -n 10 --output csv
```

To run benchmark tests for URLs from a file:
```
benchmark-web-vitals -f path/to/urls.txt -n 5
```
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,13 @@
/**
* External dependencies
*/
import fs from 'fs';
import readline from 'readline';
import autocannon from 'autocannon';
import round from 'lodash-es/round.js';

/**
* Internal dependencies
*/
import { getURLs } from '../lib/cli/args.mjs';
import {
log,
formats,
Expand All @@ -36,15 +35,10 @@ import {
} from '../lib/cli/logger.mjs';
import { calcMedian } from '../lib/util/math.mjs';

/**
* Example for how to use this command in a GitHub workflow:
* https://gist.github.com/eugene-manuilov/7a2dded1cbe5e78ac51c39140e443c9b
*/

export const options = [
{
argname: '-u, --url <url>',
description: 'An URL to run benchmark tests for',
description: 'A URL to run benchmark tests for',
},
{
argname: '-c, --concurrency <concurrency>',
Expand Down Expand Up @@ -101,39 +95,8 @@ export async function handler( opt ) {
} else {
outputResults( opt, results );
}
};

/**
* Generates URLs to benchmark based on command arguments. If both "<url>" and "<file>" arguments
* are passed to the command, then both will be used to generate URLs.
*
* @param {BenchmarkCommandOptions} opt Command options.
*/
async function* getURLs( opt ) {
if ( !! opt.url ) {
yield opt.url;
}

if ( !! opt.file ) {
const rl = readline.createInterface( {
input: fs.createReadStream( opt.file ),
crlfDelay: Infinity,
} );

for await ( const url of rl ) {
if ( url.length > 0 ) {
yield url;
}
}
}
}

/**
* Benchmarks an URL and returns response time and server-timing metrics for every request.
*
* @param {BenchmarkOptions} params Benchmark parameters.
* @return {BenchmarkResults} Response times and metrics arrays.
*/
function benchmarkURL( params ) {
const metrics = {};
const responseTimes = [];
Expand Down Expand Up @@ -209,12 +172,6 @@ function getServerTimingMetricsFromHeaders( headers ) {
return {};
}

/**
* Outputs results of benchmarking.
*
* @param {BenchmarkCommandOptions} opt Command options.
* @param {Array.<Array>} results A collection of benchmark results for each URL.
*/
function outputResults( opt, results ) {
const len = results.length;
const allMetricNames = {};
Expand Down Expand Up @@ -254,12 +211,5 @@ function outputResults( opt, results ) {
] );
}

log(
table(
headings,
tableData,
opt.output,
true
)
);
log( table( headings, tableData, opt.output, true ) );
}
220 changes: 220 additions & 0 deletions cli/commands/benchmark-web-vitals.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
/**
* CLI command to benchmark several URLs for Core Web Vitals and other key metrics.
*
* WPP Research, Copyright 2023 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/**
* External dependencies
*/
import puppeteer from 'puppeteer';
import round from 'lodash-es/round.js';

/**
* Internal dependencies
*/
import { getURLs } from '../lib/cli/args.mjs';
import {
log,
formats,
table,
isValidTableFormat,
OUTPUT_FORMAT_TABLE,
} from '../lib/cli/logger.mjs';
import { calcMedian } from '../lib/util/math.mjs';

export const options = [
{
argname: '-u, --url <url>',
description: 'A URL to run benchmark tests for',
},
{
argname: '-n, --number <number>',
description: 'Number of requests to perform',
defaults: 1,
},
{
argname: '-f, --file <file>',
description: 'File with URLs to run benchmark tests for',
},
{
argname: '-o, --output <output>',
description: 'Output format: csv or table',
defaults: OUTPUT_FORMAT_TABLE,
},
];

export async function handler( opt ) {
if ( ! isValidTableFormat( opt.output ) ) {
log(
formats.error(
'The output format provided via the --output (-o) argument must be either "table" or "csv".'
)
);
return;
}

const { number: amount } = opt;
const results = [];

const browser = await puppeteer.launch();

for await ( const url of getURLs( opt ) ) {
const { completeRequests, metrics } = await benchmarkURL(
browser,
{
url,
amount,
}
);

results.push( [ url, completeRequests, metrics ] );
}

await browser.close();

if ( results.length === 0 ) {
log(
formats.error(
'You need to provide a URL to benchmark via the --url (-u) argument, or a file with multiple URLs via the --file (-f) argument.'
)
);
} else {
outputResults( opt, results );
}
}

async function benchmarkURL( browser, params ) {
/*
* For now this only includes load time metrics.
* In the future, additional Web Vitals like CLS, FID, and INP should be
* added, however they are slightly more complex to retrieve through an
* automated headless browser test.
* See https://github.com/GoogleChromeLabs/wpp-research/pull/41.
*/
const metricsDefinition = {
FCP: {
listen: 'onFCP',
global: 'webVitalsFCP',
get: () => window.webVitalsFCP,
results: [],
},
LCP: {
listen: 'onLCP',
global: 'webVitalsLCP',
get: () => window.webVitalsLCP,
results: [],
},
TTFB: {
listen: 'onTTFB',
global: 'webVitalsTTFB',
get: () => window.webVitalsTTFB,
results: [],
},
};

let completeRequests = 0;
let requestNum = 0;

let scriptTag = `import { ${ Object.values( metricsDefinition ).map( ( value ) => value.listen ).join( ', ' ) } } from "https://unpkg.com/web-vitals@3?module";`;
Object.values( metricsDefinition ).forEach( ( value ) => {
scriptTag += `${ value.listen }( ( { name, delta } ) => { window.${ value.global } = name === 'CLS' ? delta * 1000 : delta; } );`;
} )

for ( requestNum = 0; requestNum < params.amount; requestNum++ ) {
const page = await browser.newPage();

// Set viewport similar to @wordpress/e2e-test-utils 'large' configuration.
await page.setViewport( { width: 960, height: 700 } );
await page.mainFrame().waitForFunction( 'window.innerWidth === 960 && window.innerHeight === 700' );

// Load the page.
const response = await page.goto( `${ params.url }?rnd=${ requestNum }`, { waitUntil: 'networkidle0' } );
await page.addScriptTag( { content: scriptTag, type: 'module' } );

if ( response.status() !== 200 ) {
continue;
}

completeRequests++;

await Promise.all(
Object.values( metricsDefinition ).map( async ( value ) => {
// Wait until global is populated.
await page.waitForFunction( `window.${ value.global } !== undefined` );

/*
* Do a random click, since only that triggers certain metrics
* like LCP, as only a user interaction stops reporting new LCP
* entries. See https://web.dev/lcp/.
*/
await page.click( 'body' );

// Get the metric value from the global.
const metric = await page.evaluate( value.get );
value.results.push( metric );
} )
).catch( ( err ) => { /* Ignore errors. */ } );
}

const metrics = {};
Object.entries( metricsDefinition ).forEach( ( [ key, value ] ) => {
if ( value.results.length ) {
metrics[ key ] = value.results;
}
} );

return { completeRequests, metrics };
}

function outputResults( opt, results ) {
const len = results.length;
const allMetricNames = {};

for ( let i = 0; i < len; i++ ) {
for ( const metric of Object.keys( results[ i ][ 2 ] ) ) {
allMetricNames[ metric ] = '';
}
}

const headings = [
'URL',
'Success Rate',
...Object.keys( allMetricNames ),
];

const tableData = [];

for ( let i = 0; i < len; i++ ) {
const [ url, completeRequests, metrics ] = results[ i ];
const completionRate = round(
( 100 * completeRequests ) / ( opt.number || 1 ),
1
);

const vals = { ...allMetricNames };
for ( const metric of Object.keys( metrics ) ) {
vals[ metric ] = `${ round( calcMedian( metrics[ metric ] ), 2 ) }`;
}

tableData.push( [
url,
`${ completionRate }%`,
...Object.values( vals ),
] );
}

log( table( headings, tableData, opt.output, true ) );
}
Loading

0 comments on commit d315f59

Please sign in to comment.