WebKit Bugzilla
Attachment 343573 Details for
Bug 187028
: MeasurementSetAnalyzer should check triggerable availability before creating confirming A/B tests.
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Patch
bug-187028-20180625192957.patch (text/plain), 10.42 KB, created by
dewei_zhu
on 2018-06-25 19:29:57 PDT
(
hide
)
Description:
Patch
Filename:
MIME Type:
Creator:
dewei_zhu
Created:
2018-06-25 19:29:57 PDT
Size:
10.42 KB
patch
obsolete
>Subversion Revision: 233184 >diff --git a/Websites/perf.webkit.org/ChangeLog b/Websites/perf.webkit.org/ChangeLog >index 097aa3a72576759762be0a58d678bafef2c44aba..a1c5cb71387b671d9cf9af7565d6b47acab66ecb 100644 >--- a/Websites/perf.webkit.org/ChangeLog >+++ b/Websites/perf.webkit.org/ChangeLog >@@ -1,3 +1,18 @@ >+2018-06-25 Dewei Zhu <dewei_zhu@apple.com> >+ >+ MeasurementSetAnalyzer should check triggerable availability before creating confirming A/B tests. >+ https://bugs.webkit.org/show_bug.cgi?id=187028 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ If the triggerable is not available, MeasurmentSetAnalyzer should only create analysis task without >+ confirming A/B tests. >+ >+ * tools/js/measurement-set-analyzer.js: Added logic to check triggerable availability. >+ (MeasurementSetAnalyzer.prototype.async._analyzeMeasurementSet): >+ (MeasurementSetAnalyzer): >+ * unit-tests/measurement-set-analyzer-tests.js: Updated unit tests and added a new unit test for this change. >+ > 2018-06-11 Dewei Zhu <dewei_zhu@apple.com> > > Extend test group rule to support 'userInitiated' field. >diff --git a/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js b/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js >index c93b90becf8b9d670f453f9bc3e4739ff9ccc856..ae3a2c4cbd00f14bd22f2ed3ff71695fd35b235d 100644 >--- a/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js >+++ b/Websites/perf.webkit.org/tools/js/measurement-set-analyzer.js >@@ -101,10 +101,11 @@ class MeasurementSetAnalyzer { > const startCommitSet = rangeWithMostSignificantChange.startPoint.commitSet(); > const endCommitSet = rangeWithMostSignificantChange.endPoint.commitSet(); > const summary = `Potential ${rangeWithMostSignificantChange.valueChangeSummary.changeLabel} on ${platform.name()} between ${CommitSet.diff(startCommitSet, endCommitSet)}`; >+ const confirmingTaskArguments = Triggerable.findByTestConfiguration(metric.test(), platform) ? ['Confirm', 4, true] : []; > > // FIXME: The iteration count should be smarter than hard-coding. > const analysisTask = await AnalysisTask.create(summary, rangeWithMostSignificantChange.startPoint, >- rangeWithMostSignificantChange.endPoint, 'Confirm', 4, true); >+ rangeWithMostSignificantChange.endPoint, ...confirmingTaskArguments); > > this._logger.info(`Created analysis task with id "${analysisTask.id()}" to confirm: "${summary}".`); > } >diff --git a/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js b/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js >index aa50081d0c9fc5f6edbaa0675504a38581572754..1f21491c559eea9128694be2134cb816bbba603e 100644 >--- a/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js >+++ b/Websites/perf.webkit.org/unit-tests/measurement-set-analyzer-tests.js >@@ -148,6 +148,11 @@ describe('MeasurementSetAnalyzer', () => { > > it('should not show created analysis task logging if failed to create analysis task', async () => { > PrivilegedAPI.configure('test', 'password'); >+ >+ Triggerable.ensureSingleton(4, {name: 'some-triggerable', >+ repositoryGroups: [MockModels.osRepositoryGroup, MockModels.svnRepositoryGroup, MockModels.gitRepositoryGroup, MockModels.svnRepositoryWithOwnedRepositoryGroup], >+ configurations: [{test: MockModels.someMetric.test(), platform: MockModels.somePlatform}]}); >+ > const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); > const logger = mockLogger(); > const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger); >@@ -202,6 +207,11 @@ describe('MeasurementSetAnalyzer', () => { > > it('should analyze if a new regression is detected', async () => { > PrivilegedAPI.configure('test', 'password'); >+ >+ Triggerable.ensureSingleton(4, {name: 'some-triggerable', >+ repositoryGroups: [MockModels.osRepositoryGroup, MockModels.svnRepositoryGroup, MockModels.gitRepositoryGroup, MockModels.svnRepositoryWithOwnedRepositoryGroup], >+ configurations: [{test: MockModels.someMetric.test(), platform: MockModels.somePlatform}]}); >+ > const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); > const logger = mockLogger(); > const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger); >@@ -287,6 +297,89 @@ describe('MeasurementSetAnalyzer', () => { > assert.deepEqual(logger.error_logs, []); > }); > >+ >+ it('should not create confirming A/B tests if a new regression is detected but no triggerable available', async () => { >+ PrivilegedAPI.configure('test', 'password'); >+ const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); >+ const logger = mockLogger(); >+ const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger); >+ measurementSetAnalyzer._startTime = 4000; >+ measurementSetAnalyzer._endTime = 5000; >+ const analysisPromise = measurementSetAnalyzer.analyzeOnce(measurementSet); >+ >+ assert.equal(requests.length, 1); >+ assert.equal(requests[0].url, `/data/measurement-set-${MockModels.somePlatform.id()}-${MockModels.someMetric.id()}.json`); >+ requests[0].resolve({ >+ 'clusterStart': 1000, >+ 'clusterSize': 1000, >+ 'formatMap': ['id', 'mean', 'iterationCount', 'sum', 'squareSum', 'markedOutlier', 'revisions', 'commitTime', 'build', 'buildTime', 'buildNumber', 'builder'], >+ 'configurations': {current: makeSampleRuns(simpleSegmentableValues, 6400, 4000, 1000 / 50)}, >+ 'startTime': 4000, >+ 'endTime': 5000, >+ 'lastModified': 5000, >+ 'clusterCount': 4, >+ 'status': 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 2); >+ assert.equal(requests[1].url, '/api/analysis-tasks?platform=65&metric=2884'); >+ requests[1].resolve({ >+ analysisTasks: [], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 3); >+ assert.equal(requests[2].url, '/privileged-api/create-analysis-task'); >+ assert.deepEqual(requests[2].data, { >+ slaveName: 'test', >+ slavePassword: 'password', >+ name: 'Potential 2.38% regression on Some platform between WebKit: r35-r44', >+ startRun: 6434, >+ endRun: 6443 >+ }); >+ requests[2].resolve({taskId: '5255', status: 'OK'}); >+ >+ await MockRemoteAPI.waitForRequest(); >+ assert.equal(requests.length, 4); >+ assert.equal(requests[3].url, '/api/analysis-tasks?id=5255'); >+ >+ requests[3].resolve({ >+ analysisTasks: [{ >+ author: null, >+ bugs: [], >+ buildRequestCount: 0, >+ finishedBuildRequestCount: 0, >+ category: 'identified', >+ causes: [], >+ createdAt: 4500, >+ endRun: 6448, >+ endRunTime: 5000, >+ fixes: [], >+ id: 5255, >+ metric: MockModels.someMetric.id(), >+ name: 'Potential 2.38% regression on Some platform between WebKit: r40-r49', >+ needed: null, >+ platform: MockModels.somePlatform.id(), >+ result: 'regression', >+ segmentationStrategy: 1, >+ startRun: 6439, >+ startRunTime: 4000, >+ testRangeStrategy: 2 >+ }], >+ bugs: [], >+ commits: [], >+ status: 'OK' >+ }); >+ >+ await analysisPromise; >+ assert.deepEqual(logger.info_logs, ['==== "Some test : Some metric" on "Some platform" ====', >+ 'Created analysis task with id "5255" to confirm: "Potential 2.38% regression on Some platform between WebKit: r35-r44".']); >+ assert.deepEqual(logger.error_logs, []); >+ }); >+ > it('should not analyze if there is an overlapped existing analysis task', async () => { > const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); > const logger = mockLogger(); >@@ -347,6 +440,11 @@ describe('MeasurementSetAnalyzer', () => { > > it('should favor regression if the progression is not big enough', async () => { > PrivilegedAPI.configure('test', 'password'); >+ >+ Triggerable.ensureSingleton(4, {name: 'some-triggerable', >+ repositoryGroups: [MockModels.osRepositoryGroup, MockModels.svnRepositoryGroup, MockModels.gitRepositoryGroup, MockModels.svnRepositoryWithOwnedRepositoryGroup], >+ configurations: [{test: MockModels.someMetric.test(), platform: MockModels.somePlatform}]}); >+ > const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); > const logger = mockLogger(); > const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger); >@@ -434,6 +532,11 @@ describe('MeasurementSetAnalyzer', () => { > > it('should choose analyze progression when it is big enough', async () => { > PrivilegedAPI.configure('test', 'password'); >+ >+ Triggerable.ensureSingleton(4, {name: 'some-triggerable', >+ repositoryGroups: [MockModels.osRepositoryGroup, MockModels.svnRepositoryGroup, MockModels.gitRepositoryGroup, MockModels.svnRepositoryWithOwnedRepositoryGroup], >+ configurations: [{test: MockModels.someMetric.test(), platform: MockModels.somePlatform}]}); >+ > const measurementSet = MeasurementSet.findSet(MockModels.somePlatform.id(), MockModels.someMetric.id(), 5000); > const logger = mockLogger(); > const measurementSetAnalyzer = new MeasurementSetAnalyzer([measurementSet], 4000, 5000, logger);
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Flags:
rniwa
:
review+
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 187028
: 343573