Following the recent publication of the webwork.js library by @kolodny, I felt curious about the relative performance of web workers compared to regular sequential JavaScript. With this code snippet you can try by yourself – just inject the JS library in your web app:
var now = 0;
var testLength = 1000000;
var batches = 10;
var fooArray = [];
var i = 0;
var j = 0;
var worker = webwork(function(data){
var fooArray = [];
var testLength = data[0];
var batches = data[1];
for (j=0; j<testLength/batches; j++){
fooArray.push(j);
}
return fooArray;
});
// First test, good-old sequential JavaScript
now = new Date().getTime();
for (i=0; i<testLength; i++){
fooArray[i] = i;
}
console.log('It took %@ milliseconds to run without web workers'.fmt((new Date().getTime()) - now))
// second test, paralelized with web workers
now = new Date().getTime();
fooArray = [];
for (i = 0; i < batches; i++) {
worker([testLength, batches], function (err, result) {
if (err) return console.log("goodWorker Errored with " + err.message);
fooArray = fooArray.concat(result);
if (fooArray.length == testLength) {
console.log('It took %@ milliseconds to run with web workers'.fmt((new Date().getTime()) - now));
}
});
}
The results, splitting a batch of one million array inserts into ten parallel workers, is quite amazing:
It took 1039 milliseconds to run without web workers
It took 391 milliseconds to run with web workers
So in this simple example, using web workers increased processing speed around three times. Yet another reason to consider them in heavyweight client-side processing tasks!