It's very simple, just check the assembly:
First, using direct access:
operator.cpp
1 2 3 4 5 6 7 8 9 10 11
|
struct matrix
{
float elements[16];
float& operator[](int index) { return elements[index]; }
} m1;
int main()
{
m1.elements[1] = 2;
return 0;
}
|
The main function looks like:
1 2 3 4 5 6 7 8 9 10
|
main:
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
call __main
movl $0x40000000, %eax
movl %eax, 4+m1(%rip)
movl $0, %eax
leave
ret
|
Now with operator overloading:
1 2 3 4 5 6 7 8 9 10 11
|
struct matrix
{
float elements[16];
float& operator[](int index) { return elements[index]; }
} m1;
int main()
{
m1[1] = 2;
return 0;
}
|
Assembly looks like:
this is the main function
1 2 3 4 5 6 7 8 9 10 11 12 13
|
main:
pushq %rbp
movq %rsp, %rbp
subq $32, %rsp
call __main
movl $1, %edx
leaq m1(%rip), %rcx
call _ZN6matrixixEi
movl $0x40000000, %edx
movl %edx, (%rax)
movl $0, %eax
leave
ret
|
and here is the assembly for the overloaded operator.
1 2 3 4 5 6 7 8 9 10 11
|
_ZN6matrixixEi:
pushq %rbp
movq %rsp, %rbp
movq %rcx, 16(%rbp)
movl %edx, 24(%rbp)
movl 24(%rbp), %eax
cltq
salq $2, %rax
addq 16(%rbp), %rax
leave
ret
|
The version with operator overloading requires a function call, whereas the version with direct access the compiler will simply look up the address of m1.elements and assign directly. Clearly, direct access is faster.
Of course, this is without compiler optimizations, and frankly, the difference in real world applications is pretty much nothing.