Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
5 years ago
|
|
|
#!/usr/bin/env node
|
|
|
|
|
|
|
|
const path = require('path')
|
|
|
|
const { promises: fs, constants: fsConstants } = require('fs')
|
|
|
|
const ttest = require('ttest')
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
5 years ago
|
|
|
const { By, Key } = require('selenium-webdriver')
|
|
|
|
const { withFixtures } = require('./helpers')
|
|
|
|
const { PAGES } = require('./webdriver/driver')
|
|
|
|
|
|
|
|
const DEFAULT_NUM_SAMPLES = 20
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
5 years ago
|
|
|
const ALL_PAGES = Object.values(PAGES)
|
|
|
|
|
|
|
|
async function measurePage (pageName) {
|
|
|
|
let metrics
|
|
|
|
await withFixtures({ fixtures: 'imported-account' }, async ({ driver }) => {
|
|
|
|
const passwordField = await driver.findElement(By.css('#password'))
|
|
|
|
await passwordField.sendKeys('correct horse battery staple')
|
|
|
|
await passwordField.sendKeys(Key.ENTER)
|
|
|
|
await driver.findElement(By.css('.account-details__account-name'))
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
5 years ago
|
|
|
await driver.navigate(pageName)
|
|
|
|
await driver.delay(1000)
|
|
|
|
metrics = await driver.collectMetrics()
|
|
|
|
})
|
|
|
|
return metrics
|
|
|
|
}
|
|
|
|
|
|
|
|
function calculateResult (calc) {
|
|
|
|
return (result) => {
|
|
|
|
const calculatedResult = {}
|
|
|
|
for (const key of Object.keys(result)) {
|
|
|
|
calculatedResult[key] = calc(result[key])
|
|
|
|
}
|
|
|
|
return calculatedResult
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const calculateSum = (array) => array.reduce((sum, val) => sum + val)
|
|
|
|
const calculateAverage = (array) => calculateSum(array) / array.length
|
|
|
|
const minResult = calculateResult((array) => Math.min(...array))
|
|
|
|
const maxResult = calculateResult((array) => Math.max(...array))
|
|
|
|
const averageResult = calculateResult(array => calculateAverage(array))
|
|
|
|
const standardDeviationResult = calculateResult((array) => {
|
|
|
|
const average = calculateAverage(array)
|
|
|
|
const squareDiffs = array.map(value => Math.pow(value - average, 2))
|
|
|
|
return Math.sqrt(calculateAverage(squareDiffs))
|
|
|
|
})
|
|
|
|
// 95% margin of error calculated using Student's t-distrbution
|
|
|
|
const calculateMarginOfError = (array) => ttest(array).confidence()[1] - calculateAverage(array)
|
|
|
|
const marginOfErrorResult = calculateResult((array) => calculateMarginOfError(array))
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
5 years ago
|
|
|
|
|
|
|
async function profilePageLoad (pages, numSamples) {
|
|
|
|
const results = {}
|
|
|
|
for (const pageName of pages) {
|
|
|
|
const runResults = []
|
|
|
|
for (let i = 0; i < numSamples; i += 1) {
|
|
|
|
runResults.push(await measurePage(pageName))
|
|
|
|
}
|
|
|
|
|
|
|
|
if (runResults.some(result => result.navigation.lenth > 1)) {
|
|
|
|
throw new Error(`Multiple navigations not supported`)
|
|
|
|
} else if (runResults.some(result => result.navigation[0].type !== 'navigate')) {
|
|
|
|
throw new Error(`Navigation type ${runResults.find(result => result.navigation[0].type !== 'navigate').navigation[0].type} not supported`)
|
|
|
|
}
|
|
|
|
|
|
|
|
const result = {
|
|
|
|
firstPaint: runResults.map(result => result.paint['first-paint']),
|
|
|
|
domContentLoaded: runResults.map(result => result.navigation[0] && result.navigation[0].domContentLoaded),
|
|
|
|
load: runResults.map(result => result.navigation[0] && result.navigation[0].load),
|
|
|
|
domInteractive: runResults.map(result => result.navigation[0] && result.navigation[0].domInteractive),
|
|
|
|
}
|
|
|
|
|
|
|
|
results[pageName] = {
|
|
|
|
min: minResult(result),
|
|
|
|
max: maxResult(result),
|
|
|
|
average: averageResult(result),
|
|
|
|
standardDeviation: standardDeviationResult(result),
|
|
|
|
marginOfError: marginOfErrorResult(result),
|
Add benchmark script (#7869)
The script `benchmark.js` will collect page load metrics from the
extension, and print them to a file or the console. A method for
collecting metrics was added to the web driver to help with this.
This script will calculate the min, max, average, and standard
deviation for four metrics: 'firstPaint', 'domContentLoaded', 'load',
and 'domInteractive'. The variation between samples is sometimes high,
with the results varying between samples if only 3 were taken. However,
all tests I've done locally with 5 samples have produced results within
one standard deviation of each other. The default number of samples has
been set to 10, which should be more than enough to produce consistent
results.
The benchmark can be run with the npm script `benchmark:chrome` or
`benchmark:firefox`, e.g. `yarn benchmark:chrome`.
5 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
async function isWritable (directory) {
|
|
|
|
try {
|
|
|
|
await fs.access(directory, fsConstants.W_OK)
|
|
|
|
return true
|
|
|
|
} catch (error) {
|
|
|
|
if (error.code !== 'EACCES') {
|
|
|
|
throw error
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async function getFirstParentDirectoryThatExists (directory) {
|
|
|
|
while (true) {
|
|
|
|
try {
|
|
|
|
await fs.access(directory, fsConstants.F_OK)
|
|
|
|
return directory
|
|
|
|
} catch (error) {
|
|
|
|
if (error.code !== 'ENOENT') {
|
|
|
|
throw error
|
|
|
|
} else if (directory === path.dirname(directory)) {
|
|
|
|
throw new Error('Failed to find parent directory that exists')
|
|
|
|
}
|
|
|
|
directory = path.dirname(directory)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async function main () {
|
|
|
|
const args = process.argv.slice(2)
|
|
|
|
|
|
|
|
let pages = ['notification']
|
|
|
|
let numSamples = DEFAULT_NUM_SAMPLES
|
|
|
|
let outputPath
|
|
|
|
let outputDirectory
|
|
|
|
let existingParentDirectory
|
|
|
|
|
|
|
|
while (args.length) {
|
|
|
|
if (/^(--pages|-p)$/i.test(args[0])) {
|
|
|
|
if (args[1] === undefined) {
|
|
|
|
throw new Error('Missing pages argument')
|
|
|
|
}
|
|
|
|
pages = args[1].split(',')
|
|
|
|
for (const page of pages) {
|
|
|
|
if (!ALL_PAGES.includes(page)) {
|
|
|
|
throw new Error(`Invalid page: '${page}`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
args.splice(0, 2)
|
|
|
|
} else if (/^(--samples|-s)$/i.test(args[0])) {
|
|
|
|
if (args[1] === undefined) {
|
|
|
|
throw new Error('Missing number of samples')
|
|
|
|
}
|
|
|
|
numSamples = parseInt(args[1], 10)
|
|
|
|
if (isNaN(numSamples)) {
|
|
|
|
throw new Error(`Invalid 'samples' argument given: '${args[1]}'`)
|
|
|
|
}
|
|
|
|
args.splice(0, 2)
|
|
|
|
} else if (/^(--out|-o)$/i.test(args[0])) {
|
|
|
|
if (args[1] === undefined) {
|
|
|
|
throw new Error('Missing output filename')
|
|
|
|
}
|
|
|
|
outputPath = path.resolve(args[1])
|
|
|
|
outputDirectory = path.dirname(outputPath)
|
|
|
|
existingParentDirectory = await getFirstParentDirectoryThatExists(outputDirectory)
|
|
|
|
if (!await isWritable(existingParentDirectory)) {
|
|
|
|
throw new Error(`Specified directory is not writable: '${args[1]}'`)
|
|
|
|
}
|
|
|
|
args.splice(0, 2)
|
|
|
|
} else {
|
|
|
|
throw new Error(`Unrecognized argument: '${args[0]}'`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const results = await profilePageLoad(pages, numSamples)
|
|
|
|
|
|
|
|
if (outputPath) {
|
|
|
|
if (outputDirectory !== existingParentDirectory) {
|
|
|
|
await fs.mkdir(outputDirectory, { recursive: true })
|
|
|
|
}
|
|
|
|
await fs.writeFile(outputPath, JSON.stringify(results, null, 2))
|
|
|
|
} else {
|
|
|
|
console.log(JSON.stringify(results, null, 2))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
main()
|
|
|
|
.catch(e => {
|
|
|
|
console.error(e)
|
|
|
|
process.exit(1)
|
|
|
|
})
|