Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core(lcp-lazy-loaded): add LCP savings estimate #15064

Merged
merged 5 commits into from
Aug 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 34 additions & 5 deletions core/audits/lcp-lazy-loaded.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

import {Audit} from './audit.js';
import * as i18n from '../lib/i18n/i18n.js';
import {LCPBreakdown} from '../computed/metrics/lcp-breakdown.js';
import {LargestContentfulPaint} from '../computed/metrics/largest-contentful-paint.js';

const UIStrings = {
/** Title of a Lighthouse audit that provides detail on whether the largest above-the-fold image was loaded with sufficient priority. This descriptive title is shown to users when the image was loaded properly. */
Expand All @@ -18,6 +20,8 @@

const str_ = i18n.createIcuMessageFn(import.meta.url, UIStrings);

const ESTIMATED_PERCENT_SAVINGS = 0.15;

class LargestContentfulPaintLazyLoaded extends Audit {
/**
* @return {LH.Audit.Meta}
Expand All @@ -29,7 +33,8 @@
failureTitle: str_(UIStrings.failureTitle),
description: str_(UIStrings.description),
supportedModes: ['navigation'],
requiredArtifacts: ['TraceElements', 'ViewportDimensions', 'ImageElements'],
requiredArtifacts: ['TraceElements', 'ViewportDimensions', 'ImageElements',
'traces', 'devtoolsLogs', 'GatherContext', 'URL'],
};
}

Expand All @@ -46,9 +51,10 @@

/**
* @param {LH.Artifacts} artifacts
* @return {LH.Audit.Product}
* @param {LH.Audit.Context} context
* @return {Promise<LH.Audit.Product>}
*/
static audit(artifacts) {
static async audit(artifacts, context) {
const lcpElement = artifacts.TraceElements.find(element => {
return element.traceEventType === 'largest-contentful-paint' && element.type === 'image';
});
Expand All @@ -59,7 +65,11 @@

if (!lcpElementImage ||
!this.isImageInViewport(lcpElementImage, artifacts.ViewportDimensions)) {
return {score: null, notApplicable: true};
return {
score: null,
notApplicable: true,
metricSavings: {LCP: 0},
};
}

/** @type {LH.Audit.Details.Table['headings']} */
Expand All @@ -73,8 +83,27 @@
},
]);

const wasLazyLoaded = lcpElementImage.loading === 'lazy';

const metricComputationData = Audit.makeMetricComputationDataInput(artifacts, context);
const {timing: metricLcp} =
await LargestContentfulPaint.request(metricComputationData, context);
const lcpBreakdown = await LCPBreakdown.request(metricComputationData, context);
let lcpSavings = 0;

Check warning on line 92 in core/audits/lcp-lazy-loaded.js

View check run for this annotation

Codecov / codecov/patch

core/audits/lcp-lazy-loaded.js#L86-L92

Added lines #L86 - L92 were not covered by tests
if (wasLazyLoaded && lcpBreakdown.loadStart !== undefined) {
// Estimate the LCP savings using a statistical percentage.
// https://web.dev/lcp-lazy-loading/#causal-performance
//
// LCP savings will be at most the LCP load delay.
const lcpLoadDelay = lcpBreakdown.loadStart - lcpBreakdown.ttfb;
lcpSavings = Math.min(metricLcp * ESTIMATED_PERCENT_SAVINGS, lcpLoadDelay);
}

Check warning on line 101 in core/audits/lcp-lazy-loaded.js

View check run for this annotation

Codecov / codecov/patch

core/audits/lcp-lazy-loaded.js#L94-L101

Added lines #L94 - L101 were not covered by tests
return {
score: lcpElementImage.loading === 'lazy' ? 0 : 1,
score: wasLazyLoaded ? 0 : 1,
metricSavings: {
LCP: lcpSavings,
},
details,
};
}
Expand Down
100 changes: 98 additions & 2 deletions core/test/audits/lcp-lazy-loaded-test.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,18 @@
*/

import LargestContentfulPaintLazyLoaded from '../../audits/lcp-lazy-loaded.js';
import {defaultSettings} from '../../config/constants.js';
import {createTestTrace, rootFrame} from '../create-test-trace.js';
import {networkRecordsToDevtoolsLog} from '../network-records-to-devtools-log.js';

const SAMPLE_NODE = {
devtoolsNodePath: '1,HTML,1,BODY,3,DIV,2,IMG',
selector: 'div.l-header > div.chorus-emc__content',
nodeLabel: 'My Test Label',
snippet: '<img class="test-class">',
};
const mainDocumentUrl = 'http://www.example.com';

function generateImage(loading, clientRectTop) {
return {
src: 'test',
Expand All @@ -25,6 +30,7 @@ function generateImage(loading, clientRectTop) {
node: SAMPLE_NODE,
};
}

describe('Performance: lcp-lazy-loaded audit', () => {
it('correctly surfaces the lazy loaded LCP element', async () => {
const artifacts = {
Expand All @@ -40,10 +46,52 @@ describe('Performance: lcp-lazy-loaded audit', () => {
innerHeight: 500,
innerWidth: 300,
},
traces: {
defaultPass: createTestTrace({
largestContentfulPaint: 1000,
topLevelTasks: [{ts: 10, duration: 1000}],
}),
},
devtoolsLogs: {
defaultPass: networkRecordsToDevtoolsLog([
{
url: mainDocumentUrl,
priority: 'High',
networkRequestTime: 100,
networkEndTime: 200,
timing: {sendEnd: 0},
frameId: rootFrame,
},
{
url: 'http://www.example.com/image.png',
priority: 'Low',
resourceType: 'Image',
networkRequestTime: 800,
networkEndTime: 900,
timing: {sendEnd: 0},
frameId: rootFrame,
},
]),
},
URL: {
requestedUrl: mainDocumentUrl,
mainDocumentUrl,
finalDisplayedUrl: mainDocumentUrl,
},
GatherContext: {gatherMode: 'navigation'},
};

const auditResult = await LargestContentfulPaintLazyLoaded.audit(artifacts);
const settings = JSON.parse(JSON.stringify(defaultSettings));
settings.throttlingMethod = 'devtools';

const context = {
computedCache: new Map(),
settings,
};

const auditResult = await LargestContentfulPaintLazyLoaded.audit(artifacts, context);
expect(auditResult.score).toEqual(0);
expect(auditResult.metricSavings).toEqual({LCP: 150});
expect(auditResult.details.items).toHaveLength(1);
expect(auditResult.details.items[0].node.path).toEqual('1,HTML,1,BODY,3,DIV,2,IMG');
expect(auditResult.details.items[0].node.nodeLabel).toEqual('My Test Label');
Expand All @@ -64,9 +112,52 @@ describe('Performance: lcp-lazy-loaded audit', () => {
innerHeight: 500,
innerWidth: 300,
},
traces: {
defaultPass: createTestTrace({
largestContentfulPaint: 1000,
topLevelTasks: [{ts: 10, duration: 1000}],
}),
},
devtoolsLogs: {
defaultPass: networkRecordsToDevtoolsLog([
{
url: mainDocumentUrl,
priority: 'High',
networkRequestTime: 100,
networkEndTime: 200,
timing: {sendEnd: 0},
frameId: rootFrame,
},
{
url: 'http://www.example.com/image.png',
priority: 'Low',
resourceType: 'Image',
networkRequestTime: 800,
networkEndTime: 900,
timing: {sendEnd: 0},
frameId: rootFrame,
},
]),
},
URL: {
requestedUrl: mainDocumentUrl,
mainDocumentUrl,
finalDisplayedUrl: mainDocumentUrl,
},
GatherContext: {gatherMode: 'navigation'},
};
const auditResult = await LargestContentfulPaintLazyLoaded.audit(artifacts);

const settings = JSON.parse(JSON.stringify(defaultSettings));
settings.throttlingMethod = 'devtools';

const context = {
computedCache: new Map(),
settings,
};

const auditResult = await LargestContentfulPaintLazyLoaded.audit(artifacts, context);
expect(auditResult.score).toEqual(1);
expect(auditResult.metricSavings).toEqual({LCP: 0});
expect(auditResult.details.items).toHaveLength(1);
});

Expand All @@ -86,6 +177,7 @@ describe('Performance: lcp-lazy-loaded audit', () => {
},
};
const auditResult = await LargestContentfulPaintLazyLoaded.audit(artifacts);
expect(auditResult.metricSavings).toEqual({LCP: 0});
expect(auditResult.notApplicable).toEqual(true);
});

Expand All @@ -97,6 +189,7 @@ describe('Performance: lcp-lazy-loaded audit', () => {

const auditResult = await LargestContentfulPaintLazyLoaded.audit(artifacts);
expect(auditResult.score).toEqual(null);
expect(auditResult.metricSavings).toEqual({LCP: 0});
expect(auditResult.notApplicable).toEqual(true);
});

Expand All @@ -119,6 +212,9 @@ describe('Performance: lcp-lazy-loaded audit', () => {
expect(auditResult).toEqual({
score: null,
notApplicable: true,
metricSavings: {
LCP: 0,
},
});
});
});
Loading