var toSizeString = (function() {
var KB = 1024.0,
MB = 1024 * KB,
GB = 1024 * MB;
return function(size) {
var gbSize = size / GB,
gbMod = size % GB,
mbSize = gbMod / MB,
mbMod = gbMod % MB,
kbSize = mbMod / KB;
if (Math.floor(gbSize)) {
return gbSize.toFixed(1) + 'GB';
} else if (Math.floor(mbSize)) {
return mbSize.toFixed(1) + 'MB';
} else if (Math.floor(kbSize)) {
return kbSize.toFixed(1) + 'KB';
} else {
return size + 'B';
}
};
})();
And the faster function:(note that it must always compute the same variables kb/mb/gb over and over again). Where does it gain performance?
function toSizeString (size) {
var KB = 1024.0,
MB = 1024 * KB,
GB = 1024 * MB;
var gbSize = size / GB,
gbMod = size % GB,
mbSize = gbMod / MB,
mbMod = gbMod % MB,
kbSize = mbMod / KB;
if (Math.floor(gbSize)) {
return gbSize.toFixed(1) + 'GB';
} else if (Math.floor(mbSize)) {
return mbSize.toFixed(1) + 'MB';
} else if (Math.floor(kbSize)) {
return kbSize.toFixed(1) + 'KB';
} else {
return size + 'B';
}
};
8
Modern JavaScript engines all do just-in-time compilation. You can’t make any presumptions about what it “must create over and over again.” That sort of calculation is relatively easy to optimize out, in either case.
On the other hand, closing over constant variables is not a typical case you would target JIT compilation for. You typically create a closure when you want to be able to change those variables on different invocations. You’re also creating an additional pointer dereference to access those variables, like the difference between accessing a member variable and a local int in OOP.
This sort of situation is why people throw out the “premature optimization” line. The easy optimizations are already done by the compiler.
2
Variables are cheap. Execution contexts and scope chains are expensive.
There are various answers that essentially boil down to “because closures”, and those are essentially true, but the problem isn’t specifically with the closure, it’s the fact that you have a function referencing variables in a different scope. You’d have the same problem if these were global variables on the window
object, as opposed to local variables inside the IIFE. Try it and see.
So in your first function, when the engine sees this statement:
var gbSize = size / GB;
It has to take the following steps:
- Search for a variable
size
in the current scope. (Found it.) - Search for a variable
GB
in the current scope. (Not found.) - Search for a variable
GB
in the parent scope. (Found it.) - Do the calculation and assign to
gbSize
.
Step 3 is considerably more expensive than just allocating a variable. Moreover, you do this five times, including twice for both GB
and MB
. I suspect that if you aliased these at the beginning of the function (e.g. var gb = GB
) and referenced the alias instead, it would actually produce a small speedup, although it’s also possible that some JS engines already perform this optimization. And of course, the most effective way to speed up execution is simply not to traverse the scope chain at all.
Keep in mind that JavaScript is not like a compiled, statically-typed language where the compiler resolves these variable addresses at compile time. The JS engine has to resolve them by name, and these lookups happen at runtime, every time. So you want to avoid them when possible.
Variable assignment is extremely cheap in JavaScript. It might actually be the cheapest operation, although I have nothing to back up that statement. Nonetheless, it’s safe to say that it’s almost never a good idea to try to avoid creating variables; almost any optimization you try to do in that area is actually going to end up making things worse, performance-wise.
4
One example involves a closure, the other does not. Implementing closures is kinda tricky, since closed over variables do not work like normal variables. This is more obvious in a low-level language like C, but I’ll use JavaScript to illustrate this.
A closure does not only consist of a function, but also of all variables it closed over. When we want to invoke that function, we also need to provide all closed over variables. We can model a closure by a function that receives an object as first argument that represents these closed over variables:
function add(vars, y) {
vars.x += y;
}
function getSum(vars) {
return vars.x;
}
function makeAdder(x) {
return { x: x, add: add, getSum: getSum };
}
var adder = makeAdder(40);
adder.add(adder, 2);
console.log(adder.getSum(adder)); //=> 42
Note the awkward calling convention closure.apply(closure, ...realArgs)
this requires
JavaScript’s builtin object support makes it possible to omit the explicit vars
argument, and lets us use this
instead:
function add(y) {
this.x += y;
}
function getSum() {
return this.x;
}
function makeAdder(x) {
return { x: x, add: add, getSum: getSum };
}
var adder = makeAdder(40);
adder.add(2);
console.log(adder.getSum()); //=> 42
Those examples are equivalent to this code actually using closures:
function makeAdder(x) {
return {
add: function (y) { x += y },
getSum: function () { return x },
};
}
var adder = makeAdder(40);
adder.add(2);
console.log(adder.getSum()); //=> 42
In this last example, the object is only used to group the two returned function; the this
binding is irrelevant. All the details of making closures possible – passing in hidden data to the actual function, changing all accesses to closure variables to lookups in that hidden data – are taken care of by the language.
But calling closures involves the overhead of passing in that extra data, and running a closure involves the overhead of lookups in that extra data – made worse by bad cache locality and usually a pointer dereference when compared with ordinary variables – so that it’s not surprising that a solution that does not rely on closures performs better. Especially since everything your closure saves you to do is a few extremely cheap arithmetic operations, which might even be constant-folded during parsing.