WebKit Bugzilla
Attachment 339255 Details for
Bug 184419
: Write a script that detects chart changes by using v3 API.
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-184419-20180501190116.patch (text/plain), 36.66 KB, created by
dewei_zhu
on 2018-05-01 19:01:17 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
dewei_zhu
Created:
2018-05-01 19:01:17 PDT
Size:
36.66 KB
patch
obsolete
>Subversion Revision: 231187 >diff --git a/Websites/perf.webkit.org/ChangeLog b/Websites/perf.webkit.org/ChangeLog >index 04442ec99c21f9391e16980a8db57c1f91a14e8b..1d5fb93dc202373fc6785482199c8cdf7900f6f8 100644 >--- a/Websites/perf.webkit.org/ChangeLog >+++ b/Websites/perf.webkit.org/ChangeLog >@@ -1,3 +1,36 @@ >+2018-05-01 Dewei Zhu <dewei_zhu@apple.com> >+ >+ Write a script that detects chart changes by using v3 API. >+ https://bugs.webkit.org/show_bug.cgi?id=184419 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Added a script that detects chart changes and schedule confirming analysis task. >+ >+ * browser-tests/index.html: Added import for 'AsyncTask' for testing. >+ * browser-tests/time-series-chart-tests.js: Added unit test for 'AsyncTask.isAvailable' to make sure it is >+ available in browser. >+ * public/v3/async-task.js: >+ (AsyncTask.isAvailable): Helper function to determine whether AsyncTask is available or not as 'Worker' is >+ not available in nodejs. >+ (AsyncTask): >+ * public/v3/models/bug.js: Export as a module. >+ * public/v3/models/measurement-set.js: >+ (MeasurementSet.prototype._invokeSegmentationAlgorithm): Added a check to avoid using AsyncTask when running in Nodejs. >+ (MeasurementSet): >+ * tools/js/v3-models.js: Added import for 'Bug' object. >+ * tools/run-analysis.js: Added this script to detect measurement set changes. >+ (main): >+ (async.analysisLoop): >+ (prototype.async.analyzeOnce): >+ (measurementSetListForAnalysis): >+ (prototype.async._analyzeMeasurementSet): Analyzes a configuration in given date range and creates confirming A/B >+ task when there is no existing analysis task for the same range. >+ * unit-tests/measurement-set-analyzer-tests.js: Added unit tests for MeasurementSetAnalyzer. >+ * unit-tests/resources/mock-v3-models.js: Reset 'MeasurementSet._set' every time. >+ Update mock platform to include lastModifiedByMetric information. >+ (MockModels.inject): >+ > 2018-04-30 Ryosuke Niwa <rniwa@webkit.org> > > Creating a custom analysis task after fetching all analysis tasks fail >diff --git a/Websites/perf.webkit.org/browser-tests/index.html b/Websites/perf.webkit.org/browser-tests/index.html >index 6b25815f7e52a2d3eabebca8b910a9a3d463aaa2..597235335ec2f53b271a2865ff16cec7bb26c61f 100644 >--- a/Websites/perf.webkit.org/browser-tests/index.html >+++ b/Websites/perf.webkit.org/browser-tests/index.html >@@ -208,6 +208,7 @@ const ChartTest = { > importChartScripts(context) > { > return context.importScripts([ >+ 'async-task.js', > '../shared/statistics.js', > 'lazily-evaluated-function.js', > 'instrumentation.js', >@@ -226,7 +227,7 @@ const ChartTest = { > 'components/time-series-chart.js', > 'components/interactive-time-series-chart.js'], > 'ComponentBase', 'TimeSeriesChart', 'InteractiveTimeSeriesChart', >- 'Platform', 'Metric', 'Test', 'Repository', 'MeasurementSet', 'MockRemoteAPI').then(() => { >+ 'Platform', 'Metric', 'Test', 'Repository', 'MeasurementSet', 'MockRemoteAPI', 'AsyncTask').then(() => { > return context.symbols.TimeSeriesChart; > }) > }, >diff --git a/Websites/perf.webkit.org/browser-tests/time-series-chart-tests.js b/Websites/perf.webkit.org/browser-tests/time-series-chart-tests.js >index 2759a0c4d318745682e29ccd1df6f49d18912290..9ce9da1f27d278e6d5ce63f93a170c42d538cdf2 100644 >--- a/Websites/perf.webkit.org/browser-tests/time-series-chart-tests.js >+++ b/Websites/perf.webkit.org/browser-tests/time-series-chart-tests.js >@@ -1,5 +1,10 @@ > > describe('TimeSeriesChart', () => { >+ it('should have "AsyncTask" available for computing segmentation', async () => { >+ const context = new BrowsingContext; >+ await ChartTest.importChartScripts(context); >+ expect(context.symbols.AsyncTask.isAvailable()).to.be(true); >+ }); > > it('should be constructible with an empty sourec list and an empty options', () => { > return ChartTest.importChartScripts(new BrowsingContext).then((TimeSeriesChart) => { >diff --git a/Websites/perf.webkit.org/public/v3/async-task.js b/Websites/perf.webkit.org/public/v3/async-task.js >index 11b54ffe34fe590d5b03718ba4da7263b84dea75..ee81c8405b9d5c8bd24ca1d96721062e51207bf0 100644 >--- a/Websites/perf.webkit.org/public/v3/async-task.js >+++ b/Websites/perf.webkit.org/public/v3/async-task.js >@@ -29,6 +29,10 @@ class AsyncTask { > }); > } > >+ static isAvailable() >+ { >+ return typeof Worker !== 'undefined'; >+ } > } > > AsyncTask._asyncMessageId = 0; >@@ -149,3 +153,6 @@ if (typeof module == 'undefined' && typeof window == 'undefined' && typeof impor > importScripts('/shared/statistics.js'); > onmessage = AsyncTaskWorker.workerDidRecieveMessage.bind(AsyncTaskWorker); > } >+ >+if (typeof module != 'undefined') >+ module.exports.AsyncTask = AsyncTask; >\ No newline at end of file >diff --git a/Websites/perf.webkit.org/public/v3/models/bug.js b/Websites/perf.webkit.org/public/v3/models/bug.js >index 0329b7e142c69d17abe3f9e028f34a713ccdbea4..c158867192a6d0f77de93af8a78c9982e051f165 100644 >--- a/Websites/perf.webkit.org/public/v3/models/bug.js >+++ b/Websites/perf.webkit.org/public/v3/models/bug.js >@@ -29,3 +29,6 @@ class Bug extends DataModelObject { > label() { return this.bugNumber(); } > title() { return `${this._bugTracker.label()}: ${this.bugNumber()}`; } > } >+ >+if (typeof module != 'undefined') >+ module.exports.Bug = Bug; >\ No newline at end of file >diff --git a/Websites/perf.webkit.org/public/v3/models/measurement-set.js b/Websites/perf.webkit.org/public/v3/models/measurement-set.js >index 65c95bc224f1b714dc6515ba2b6f7314c7cce0db..e4a8a076ad10081d0e1c9def7d7c506c472b36f5 100644 >--- a/Websites/perf.webkit.org/public/v3/models/measurement-set.js >+++ b/Websites/perf.webkit.org/public/v3/models/measurement-set.js >@@ -290,7 +290,7 @@ class MeasurementSet { > var args = [timeSeriesValues].concat(parameters || []); > > var timeSeriesIsShortEnoughForSyncComputation = timeSeriesValues.length < 100; >- if (timeSeriesIsShortEnoughForSyncComputation) { >+ if (timeSeriesIsShortEnoughForSyncComputation || !AsyncTask.isAvailable()) { > Instrumentation.startMeasuringTime('_invokeSegmentationAlgorithm', 'syncSegmentation'); > var segmentation = Statistics[segmentationName].apply(timeSeriesValues, args); > Instrumentation.endMeasuringTime('_invokeSegmentationAlgorithm', 'syncSegmentation'); >diff --git a/Websites/perf.webkit.org/tools/js/v3-models.js b/Websites/perf.webkit.org/tools/js/v3-models.js >index 6cfd83b66dc71482c398f1be737cf6097c4a970a..e1b249c4f174549ad0689be60e9f79b79f7ebe84 100644 >--- a/Websites/perf.webkit.org/tools/js/v3-models.js >+++ b/Websites/perf.webkit.org/tools/js/v3-models.js >@@ -11,6 +11,7 @@ importFromV3('models/data-model.js', 'DataModelObject'); > importFromV3('models/data-model.js', 'LabeledObject'); > > importFromV3('models/analysis-task.js', 'AnalysisTask'); >+importFromV3('models/bug.js', 'Bug'); > importFromV3('models/bug-tracker.js', 'BugTracker'); > importFromV3('models/build-request.js', 'BuildRequest'); > importFromV3('models/builder.js', 'Build'); >@@ -37,5 +38,6 @@ importFromV3('models/uploaded-file.js', 'UploadedFile'); > importFromV3('instrumentation.js', 'Instrumentation'); > importFromV3('lazily-evaluated-function.js', 'LazilyEvaluatedFunction'); > importFromV3('commit-set-range-bisector.js', 'CommitSetRangeBisector'); >+importFromV3('async-task.js', 'AsyncTask'); > > global.Statistics = require('../../public/shared/statistics.js'); >\ No newline at end of file >diff --git a/Websites/perf.webkit.org/tools/run-analysis.js b/Websites/perf.webkit.org/tools/run-analysis.js >new file mode 100644 >index 0000000000000000000000000000000000000000..75ce3542b01137571121aabb4229e85354c27e96 >--- /dev/null >+++ b/Websites/perf.webkit.org/tools/run-analysis.js >@@ -0,0 +1,169 @@ >+#!/usr/local/bin/node >+ >+const fs = require('fs'); >+const Statistics = require('../public/shared/statistics'); >+const parseArguments = require('./js/parse-arguments.js').parseArguments; >+const RemoteAPI = require('./js/remote.js').RemoteAPI; >+require('./js/v3-models.js'); >+global.PrivilegedAPI = require('./js/privileged-api.js').PrivilegedAPI; >+ >+function main(argv) >+{ >+ const options = parseArguments(argv, [ >+ {name: '--server-config-json', required: true}, >+ {name: '--change-detection-config-json', required: true}, >+ {name: '--seconds-to-sleep', type: parseFloat, default: 1200}, >+ ]); >+ >+ if (!options) >+ return; >+ >+ analysisLoop(options); >+} >+ >+async function analysisLoop(options) >+{ >+ let secondsToSleep; >+ let analyzer = null; >+ try { >+ const changeDetectionConfigs = JSON.parse(fs.readFileSync(options['--change-detection-config-json'], 'utf-8')); >+ const serverConfig = JSON.parse(fs.readFileSync(options['--server-config-json'], 'utf-8')); >+ secondsToSleep = options['--seconds-to-sleep']; >+ global.RemoteAPI = new RemoteAPI(serverConfig.server); >+ PrivilegedAPI.configure(serverConfig.slave.name, serverConfig.slave.password); >+ >+ const manifest = await Manifest.fetch(); >+ const measurementSetList = MeasurementSetAnalyzer.measurementSetListForAnalysis(manifest); >+ >+ analyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, measurementSetList, console); >+ await analyzer.analyzeOnce(); >+ } catch(error) { >+ console.error(`Failed analyze measurement sets due to ${error}`); >+ } >+ >+ console.log(`Sleeping for ${secondsToSleep} seconds.`); >+ setTimeout(() => analysisLoop(options), secondsToSleep * 1000); >+} >+ >+ >+class MeasurementSetAnalyzer { >+ constructor(changeDetectionConfigs, measurementSetList, logger) >+ { >+ this._changeDetectionConfigs = changeDetectionConfigs; >+ this._measurementSetList = measurementSetList; >+ this._logger = logger; >+ >+ this._endTime = Date.now(); >+ this._startTime = this._endTime - this._changeDetectionConfigs.maxDays * 24 * 3600 * 1000; >+ } >+ >+ async analyzeOnce() >+ { >+ this._logger.info(`Start analyzing last ${this._changeDetectionConfigs.maxDays} days measurement sets.`); >+ for (const measurementSet of this._measurementSetList) >+ await this._analyzeMeasurementSet(measurementSet); >+ } >+ >+ // FIXME: This code should be shared with DashboardPage. >+ static measurementSetListForAnalysis(manifest) >+ { >+ const measurementSetList = []; >+ for (const dashboard of Object.values(manifest.dashboards)) { >+ for (const row of dashboard) { >+ for (const cell of row) { >+ if (cell instanceof Array) { >+ if (cell.length < 2) >+ continue; >+ const platformId = parseInt(cell[0]); >+ const metricId = parseInt(cell[1]); >+ if (isNaN(platformId) || isNaN(metricId)) >+ continue; >+ const platform = Platform.findById(platformId); >+ const metric = Metric.findById(metricId); >+ console.assert(platform); >+ console.assert(metric); >+ >+ const measurementSet = MeasurementSet.findSet(platform.id(), metric.id(), platform.lastModified(metric)); >+ console.assert(measurementSet); >+ measurementSetList.push(measurementSet); >+ } >+ } >+ } >+ } >+ return measurementSetList; >+ } >+ >+ async _analyzeMeasurementSet(measurementSet) >+ { >+ const metric = Metric.findById(measurementSet.metricId()); >+ const platform = Platform.findById(measurementSet.platformId()); >+ this._logger.info(`==== "${metric.fullName()}" on "${platform.name()}" ====`); >+ await measurementSet.fetchBetween(this._startTime, this._endTime); >+ const currentTimeSeries = measurementSet.fetchedTimeSeries('current', false, false); >+ const rawValues = currentTimeSeries.values(); >+ const segmentedValues = await measurementSet.fetchSegmentation('segmentTimeSeriesByMaximizingSchwarzCriterion', [], 'current', false); >+ if (!segmentedValues || !segmentedValues.length) { >+ this._logger.warn(`Failed fetching segmentations for ${metric.fullName()}" on "${platform.name()}"`); >+ return; >+ } >+ >+ const progressionString = 'progression'; >+ const regressionString = 'regression'; >+ const ranges = Statistics.findRangesForChangeDetectionsWithWelchsTTest(rawValues, segmentedValues, >+ this._changeDetectionConfigs.tTestSignificance).map((range) =>({ >+ startPoint: currentTimeSeries.findPointByIndex(range.startIndex), >+ endPoint: currentTimeSeries.findPointByIndex(range.endIndex), >+ valueChangeSummary: metric.labelForDifference(range.segmentationStartValue, range.segmentationEndValue, >+ progressionString, regressionString) >+ })); >+ >+ const analysisTasks = await AnalysisTask.fetchByPlatformAndMetric(platform.id(), metric.id()); >+ const filteredRanges = ranges.filter((range) => { >+ const rangeEndsBeforeAnalysisStarts = range.endPoint.time < this._startTime; >+ if (rangeEndsBeforeAnalysisStarts) >+ return false; >+ for (const task of analysisTasks) { >+ const taskEndsBeforeRangeStart = task.endTime() < range.startPoint.time; >+ const taskStartsAfterRangeEnd = range.endPoint.time < task.startTime(); >+ if (!(taskEndsBeforeRangeStart || taskStartsAfterRangeEnd)) >+ return false; >+ } >+ return true; >+ }); >+ >+ let rangeWithMostSignificantChange = null; >+ let largestWeightedSignificance = 0; >+ filteredRanges.forEach((range) => { >+ const relativeChangeAbsoluteValue = Math.abs(range.valueChangeSummary.relativeChange); >+ >+ // Take a square root on progression relative value, so that we would not favor regression so >+ // much that we miss some huge progressions. >+ const weightedSignificance = range.valueChangeSummary.changeType === regressionString ? >+ relativeChangeAbsoluteValue : Math.sqrt(relativeChangeAbsoluteValue); >+ >+ if (weightedSignificance > largestWeightedSignificance) { >+ largestWeightedSignificance = weightedSignificance; >+ rangeWithMostSignificantChange = range; >+ } >+ }); >+ >+ if (!rangeWithMostSignificantChange) { >+ this._logger.info('Nothing to analyze'); >+ return; >+ } >+ >+ const startCommitSet = rangeWithMostSignificantChange.startPoint.commitSet(); >+ const endCommitSet = rangeWithMostSignificantChange.endPoint.commitSet(); >+ const summary = `Potential ${rangeWithMostSignificantChange.valueChangeSummary.changeLabel} on ${platform.name()} between ${CommitSet.diff(startCommitSet, endCommitSet)}`; >+ >+ this._logger.info(`Creating analysis task and confirming: "${summary}".`); >+ await AnalysisTask.create(summary, rangeWithMostSignificantChange.startPoint, rangeWithMostSignificantChange.endPoint, >+ 'Confirm', this._changeDetectionConfigs.confirmTaskRepeatCount); >+ } >+} >+ >+if (require.main === module) >+ main(process.argv); >+ >+if (typeof module !== 'undefined') >+ module.exports.MeasurementSetAnalyzer = MeasurementSetAnalyzer; >\ No newline at end of file >diff --git a/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js b/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js >new file mode 100644 >index 0000000000000000000000000000000000000000..74b903eda9b4b329825c03b5c0e711c78e2f7754 >--- /dev/null >+++ b/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js >@@ -0,0 +1,365 @@ >+'use strict'; >+ >+const assert = require('assert'); >+const MockRemoteAPI = require('./resources/mock-remote-api.js').MockRemoteAPI; >+const MockModels = require('./resources/mock-v3-models.js').MockModels; >+require('../tools/js/v3-models.js'); >+const MeasurementSetAnalyzer = require('../tools/run-analysis').MeasurementSetAnalyzer; >+const NodePrivilegedAPI = require('../tools/js/privileged-api.js').PrivilegedAPI; >+ >+describe('MeasurementSetAnalyzer', () => { >+ MockModels.inject(); >+ const requests = MockRemoteAPI.inject('http://build.webkit.org', NodePrivilegedAPI); >+ beforeEach(() => { >+ PrivilegedAPI.configure('test', 'password'); >+ }); >+ >+ describe('measurementSetListForAnalysis', () => { >+ it('should generate empty list if no dashboard configurations', () => { >+ const configurations = MeasurementSetAnalyzer.measurementSetListForAnalysis({dashboards: {}}); >+ assert.equal(configurations.length, 0); >+ }); >+ >+ it('should generate a list of measurement set', () => { >+ const configurations = MeasurementSetAnalyzer.measurementSetListForAnalysis({dashboards: { >+ "macOS": [["some metric", "plt-mean"], [['Some Platform'], [65, 2884], [65, 1158]]] >+ }}); >+ assert.equal(configurations.length, 2); >+ const [measurementSet0, measurementSet1] = configurations; >+ assert.equal(measurementSet0.metricId(), MockModels.someMetric.id()); >+ assert.equal(measurementSet0.platformId(), MockModels.somePlatform.id()); >+ assert.equal(measurementSet1.metricId(), MockModels.pltMean.id()); >+ assert.equal(measurementSet1.platformId(), MockModels.somePlatform.id()); >+ }); >+ }); >+ >+ function mockLogger() >+ { >+ const info_logs = []; >+ const error_logs =[]; >+ const warn_logs = []; >+ return { >+ info: (message) => info_logs.push(message), >+ warn: (message) => warn_logs.push(message), >+ error: (message) => error_logs.push(message), >+ info_logs, error_logs, warn_logs >+ }; >+ } >+ >+ describe('analyzeOnce', () => { >+ const simpleSegmentableValues = [ >+ 1546.5603, 1548.1536, 1563.5452, 1539.7823, 1546.4184, 1548.9299, 1532.5444, 1546.2800, 1547.1760, 1551.3507, >+ 1548.3277, 1544.7673, 1542.7157, 1538.1700, 1538.0948, 1543.0364, 1537.9737, 1542.2611, 1543.9685, 1546.4901, >+ 1544.4080, 1540.8671, 1537.3353, 1549.4331, 1541.4436, 1544.1299, 1550.1770, 1553.1872, 1549.3417, 1542.3788, >+ 1543.5094, 1541.7905, 1537.6625, 1547.3840, 1538.5185, 1549.6764, 1556.6138, 1552.0476, 1541.7629, 1544.7006, >+ /* segments changes here */ >+ 1587.1390, 1594.5451, 1586.2430, 1596.7310, 1548.1423 >+ ]; >+ >+ const dataBeforeSmallProgression = [1587.1390, 1594.5451, 1586.2430, 1596.7310, 1548.1423]; >+ const dataBeforeHughProgression = [1700.1390, 1704.5451, 1703.2430, 1706.7310, 1689.1423]; >+ >+ function makeSampleRuns(values, startRunId, startTime, timeIncrement) >+ { >+ let runId = startRunId; >+ let buildId = 3400; >+ let buildNumber = 1; >+ let commit_id = 1; >+ let revision = 1; >+ const makeRun = (value, commitTime) => [runId++, value, 1, value, value, false, [[commit_id++, MockModels.webkit.id(), revision++, 0, 0]], commitTime, commitTime + 10, buildId++, buildNumber++, MockModels.builder.id()]; >+ timeIncrement = Math.floor(timeIncrement); >+ return values.map((value, index) => makeRun(value, startTime + index * timeIncrement)); >+ } >+ >+ it('should not analyze and show a warning message if failed to fetch segnmentation', async () => { >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const changeDetectionConfigs = {tTestSignificance: 0.99, maxDays: 10, confirmTaskRepeatCount: 4}; >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, [measurementSet], logger); >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 946684800000, >+ 'clusterSize': 5184000000, >+ 'formatMap': [], >+ 'configurations': {current: []}, >+ 'startTime': 1525211754989, >+ 'endTime': 1525211772000, >+ 'lastModified': 1525211774989, >+ 'clusterCount': 5, >+ 'status': 'OK'}); >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['Start analyzing last 10 days measurement sets.', '==== "Some test : Some metric" on "Some platform" ====']); >+ assert.deepEqual(logger.error_logs, []); >+ assert.deepEqual(logger.warn_logs, ['Failed fetching segmentations for Some test : Some metric" on "Some platform"']); >+ }); >+ >+ it('should not analyze if no regression is detected', async () => { >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const changeDetectionConfigs = {tTestSignificance: 0.99, maxDays: 10, confirmTaskRepeatCount: 4}; >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, [measurementSet], logger); >+ measurementSetAnalyzer._startTime = 4000; >+ measurementSetAnalyzer._endTime = 5000; >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 1000, >+ 'clusterSize': 1000, >+ 'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'], >+ 'configurations': {current: makeSampleRuns(simpleSegmentableValues.slice(0, 39), 6400, 4000, 1000 / 50)}, >+ 'startTime': 4000, >+ 'endTime': 5000, >+ 'lastModified': 5000, >+ 'clusterCount': 4, >+ 'status': 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 2); >+ assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884'); >+ requests[1].resolve({ >+ analysisTasks: [], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['Start analyzing last 10 days measurement sets.', >+ '==== "Some test : Some metric" on "Some platform" ====', 'Nothing to analyze']); >+ assert.equal(logger.warn_logs.length, 0); >+ assert.equal(logger.error_logs.length, 0); >+ }); >+ >+ it('should analyze if a new regression is detected', async () => { >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const changeDetectionConfigs = {tTestSignificance: 0.99, maxDays: 10, confirmTaskRepeatCount: 4}; >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, [measurementSet], logger); >+ measurementSetAnalyzer._startTime = 4000; >+ measurementSetAnalyzer._endTime = 5000; >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 1000, >+ 'clusterSize': 1000, >+ 'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'], >+ 'configurations': {current: makeSampleRuns(simpleSegmentableValues, 6400, 4000, 1000 / 50)}, >+ 'startTime': 4000, >+ 'endTime': 5000, >+ 'lastModified': 5000, >+ 'clusterCount': 4, >+ 'status': 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 2); >+ assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884'); >+ requests[1].resolve({ >+ analysisTasks: [], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 3); >+ assert.equal(requests[2].url, '/privileged-api/create-analysis-task'); >+ assert.deepEqual(requests[2].data, { >+ slaveName: 'test', >+ slavePassword: 'password', >+ name: 'Potential 2.38% regression on Some platform between WebKit: r35-r44', >+ startRun: 6434, >+ endRun: 6443, >+ repetitionCount: 4, >+ testGroupName: 'Confirm', >+ revisionSets: [{'11': {revision: 35, ownerRevision: null, patch: null}}, >+ {'11': {revision: 44, ownerRevision: null, patch: null}}] >+ }); >+ requests[2].resolve(); >+ >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['Start analyzing last 10 days measurement sets.', >+ '==== "Some test : Some metric" on "Some platform" ====', >+ 'Creating analysis task and confirming: "Potential 2.38% regression on Some platform between WebKit: r35-r44".']); >+ assert.equal(logger.warn_logs.length, 0); >+ assert.equal(logger.error_logs.length, 0); >+ }); >+ >+ it('should not analyze if there is an overlapped existing analysis task', async () => { >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const changeDetectionConfigs = {tTestSignificance: 0.99, maxDays: 10, confirmTaskRepeatCount: 4}; >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, [measurementSet], logger); >+ measurementSetAnalyzer._startTime = 4000; >+ measurementSetAnalyzer._endTime = 5000; >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 1000, >+ 'clusterSize': 1000, >+ 'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'], >+ 'configurations': {current: makeSampleRuns(simpleSegmentableValues, 6400, 4000, 1000 / 50)}, >+ 'startTime': 4000, >+ 'endTime': 5000, >+ 'lastModified': 5000, >+ 'clusterCount': 4, >+ 'status': 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 2); >+ assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884'); >+ requests[1].resolve({ >+ analysisTasks: [{ >+ author: null, >+ bugs: [], >+ buildRequestCount: 14, >+ finishedBuildRequestCount: 6, >+ category: 'identified', >+ causes: [], >+ createdAt: 4500, >+ endRun: 6434, >+ endRunTime: 5000, >+ fixes: [], >+ id: 1082, >+ metric: MockModels.someMetric.id(), >+ name: 'Potential 2.38% regression on Some platform between WebKit: r35-r44', >+ needed: null, >+ platform: MockModels.somePlatform.id(), >+ result: 'regression', >+ segmentationStrategy: 1, >+ startRun: 6434, >+ statrRunTime: 4000, >+ testRangeStrategy: 2 >+ }], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['Start analyzing last 10 days measurement sets.', >+ '==== "Some test : Some metric" on "Some platform" ====', 'Nothing to analyze']); >+ assert.equal(logger.warn_logs.length, 0); >+ assert.equal(logger.error_logs.length, 0); >+ }); >+ >+ it('should favor regression if the progression is not big enough', async () => { >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const changeDetectionConfigs = {tTestSignificance: 0.99, maxDays: 10, confirmTaskRepeatCount: 4}; >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, [measurementSet], logger); >+ measurementSetAnalyzer._startTime = 4000; >+ measurementSetAnalyzer._endTime = 5000; >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 1000, >+ 'clusterSize': 1000, >+ 'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'], >+ 'configurations': {current: makeSampleRuns(dataBeforeSmallProgression.concat(simpleSegmentableValues), 6400, 4000, 1000 / 50)}, >+ 'startTime': 4000, >+ 'endTime': 5000, >+ 'lastModified': 5000, >+ 'clusterCount': 4, >+ 'status': 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 2); >+ assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884'); >+ requests[1].resolve({ >+ analysisTasks: [], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 3); >+ assert.equal(requests[2].url, '/privileged-api/create-analysis-task'); >+ assert.deepEqual(requests[2].data, { >+ slaveName: 'test', >+ slavePassword: 'password', >+ name: 'Potential 2.38% regression on Some platform between WebKit: r40-r49', >+ startRun: 6439, >+ endRun: 6448, >+ repetitionCount: 4, >+ testGroupName: 'Confirm', >+ revisionSets: [{'11': {revision: 40, ownerRevision: null, patch: null}}, >+ {'11': {revision: 49, ownerRevision: null, patch: null}}] >+ }); >+ requests[2].resolve(); >+ >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['Start analyzing last 10 days measurement sets.', >+ '==== "Some test : Some metric" on "Some platform" ====', >+ 'Creating analysis task and confirming: "Potential 2.38% regression on Some platform between WebKit: r40-r49".']); >+ assert.equal(logger.warn_logs.length, 0); >+ assert.equal(logger.error_logs.length, 0); >+ }); >+ >+ it('should choose analyze progression when it is big enough', async () => { >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const changeDetectionConfigs = {tTestSignificance: 0.99, maxDays: 10, confirmTaskRepeatCount: 4}; >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer(changeDetectionConfigs, [measurementSet], logger); >+ measurementSetAnalyzer._startTime = 4000; >+ measurementSetAnalyzer._endTime = 5000; >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 1000, >+ 'clusterSize': 1000, >+ 'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'], >+ 'configurations': {current: makeSampleRuns(dataBeforeHughProgression.concat(simpleSegmentableValues), 6400, 4000, 1000 / 50)}, >+ 'startTime': 4000, >+ 'endTime': 5000, >+ 'lastModified': 5000, >+ 'clusterCount': 4, >+ 'status': 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 2); >+ assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884'); >+ requests[1].resolve({ >+ analysisTasks: [], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 3); >+ assert.equal(requests[2].url, '/privileged-api/create-analysis-task'); >+ assert.deepEqual(requests[2].data, { >+ slaveName: 'test', >+ slavePassword: 'password', >+ name: 'Potential 9.15% progression on Some platform between WebKit: r3-r8', >+ startRun: 6402, >+ endRun: 6407, >+ repetitionCount: 4, >+ testGroupName: 'Confirm', >+ revisionSets: [{'11': {revision: 3, ownerRevision: null, patch: null}}, >+ {'11': {revision: 8, ownerRevision: null, patch: null}}] >+ }); >+ requests[2].resolve(); >+ >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['Start analyzing last 10 days measurement sets.', >+ '==== "Some test : Some metric" on "Some platform" ====', >+ 'Creating analysis task and confirming: "Potential 9.15% progression on Some platform between WebKit: r3-r8".']); >+ assert.equal(logger.warn_logs.length, 0); >+ assert.equal(logger.error_logs.length, 0); >+ }); >+ }); >+}); >\ No newline at end of file >diff --git a/Websites/perf.webkit.org/unit-tests/resources/mock-v3-models.js b/Websites/perf.webkit.org/unit-tests/resources/mock-v3-models.js >index c4277b0813b04f7fe6b24d19e43178f2abb4676a..b7c39a32073f53a6c8d6362497117c1bb2ce690e 100644 >--- a/Websites/perf.webkit.org/unit-tests/resources/mock-v3-models.js >+++ b/Websites/perf.webkit.org/unit-tests/resources/mock-v3-models.js >@@ -14,6 +14,7 @@ var MockModels = { > TestGroup.clearStaticMap(); > BuildRequest.clearStaticMap(); > Triggerable.clearStaticMap(); >+ MeasurementSet._set = null; > > MockModels.osx = Repository.ensureSingleton(9, {name: 'OS X'}); > MockModels.ios = Repository.ensureSingleton(22, {name: 'iOS'}); >@@ -27,7 +28,8 @@ var MockModels = { > > MockModels.someTest = Test.ensureSingleton(1, {name: 'Some test'}); > MockModels.someMetric = Metric.ensureSingleton(2884, {name: 'Some metric', test: MockModels.someTest}); >- MockModels.somePlatform = Platform.ensureSingleton(65, {name: 'Some platform', metrics: [MockModels.someMetric]}); >+ MockModels.somePlatform = Platform.ensureSingleton(65, {name: 'Some platform', metrics: [MockModels.someMetric], >+ lastModifiedByMetric: {'2884': 5000, '1158': 5000}}); > > MockModels.speedometer = Test.ensureSingleton(1928, {name: 'Speedometer'}); > MockModels.jetstream = Test.ensureSingleton(1886, {name: 'JetStream'});
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Flags:
rniwa
:
review+
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 184419
:
337503
|
338979
|
339165
| 339255