Files
cryptsetup/lib/utils_safe_memory.c
Samanta Navarro fc5f9cc46d lib: always clear size in crypt_safe_free
Writing into allocated memory right before calling free can be optimized
away by smart compilers. To prevent this, a volatile access must be
performed. This happens already in crypt_safe_memzero.

It was difficult to provoke GCC to remove the assignment, but I was able
to find a way to prove the theory:

* Build cryptsetup with: CFLAGS="-flto -O3 -g" ./configure --enable-static
* Create main.c:

#include <libcryptsetup.h>

int
main(void) {
        char *x = crypt_safe_alloc(64);
        crypt_safe_free(x);
        return 0;
}

* Build the program with: gcc -O3 -flto -static -o main main.c -lcryptsetup
* Disassemble: objdump -d main

My output on an amd64 system is:

0000000000401670 <main>:
  401670:       41 54                   push   %r12
  401672:       bf f0 03 00 00          mov    $0x3f0,%edi
  401677:       55                      push   %rbp
  401678:       48 83 ec 08             sub    $0x8,%rsp
  40167c:       e8 ff 4d 01 00          callq  416480 <__libc_malloc>
  401681:       48 85 c0                test   %rax,%rax
  401684:       74 2f                   je     4016b5 <main+0x45>
  401686:       48 c7 00 e8 03 00 00    movq   $0x3e8,(%rax)
  40168d:       4c 8d 60 08             lea    0x8(%rax),%r12
  401691:       48 89 c5                mov    %rax,%rbp
  401694:       be e8 03 00 00          mov    $0x3e8,%esi
  401699:       4c 89 e7                mov    %r12,%rdi
  40169c:       e8 4f 76 01 00          callq  418cf0 <explicit_bzero>
  4016a1:       48 8b 75 00             mov    0x0(%rbp),%rsi
  4016a5:       4c 89 e7                mov    %r12,%rdi
  4016a8:       e8 43 76 01 00          callq  418cf0 <explicit_bzero>
  4016ad:       48 89 ef                mov    %rbp,%rdi
  4016b0:       e8 3b 54 01 00          callq  416af0 <__free>
  4016b5:       48 83 c4 08             add    $0x8,%rsp
  4016b9:       31 c0                   xor    %eax,%eax
  4016bb:       5d                      pop    %rbp
  4016bc:       41 5c                   pop    %r12
  4016be:       c3                      retq
  4016bf:       90                      nop

You can see that the memory allocation and explicit_bzero calls were not
optimized away. But the size assignment disappeared.

Compiling without -O3 or without -flto does not inline the calls and
keeps the assignment. Also the shared library shipped with my
distribution has the assignment.
2020-12-02 11:57:03 +00:00

105 lines
2.4 KiB
C

/*
* utils_safe_memory - safe memory helpers
*
* Copyright (C) 2009-2020 Red Hat, Inc. All rights reserved.
* Copyright (C) 2009-2020 Milan Broz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdlib.h>
#include <string.h>
#include "libcryptsetup.h"
struct safe_allocation {
size_t size;
char data[0];
};
/*
* Replacement for memset(s, 0, n) on stack that can be optimized out
* Also used in safe allocations for explicit memory wipe.
*/
void crypt_safe_memzero(void *data, size_t size)
{
#ifdef HAVE_EXPLICIT_BZERO
explicit_bzero(data, size);
#else
volatile uint8_t *p = (volatile uint8_t *)data;
while(size--)
*p++ = 0;
#endif
}
/* safe allocations */
void *crypt_safe_alloc(size_t size)
{
struct safe_allocation *alloc;
if (!size || size > (SIZE_MAX - offsetof(struct safe_allocation, data)))
return NULL;
alloc = malloc(size + offsetof(struct safe_allocation, data));
if (!alloc)
return NULL;
alloc->size = size;
crypt_safe_memzero(&alloc->data, size);
/* coverity[leaked_storage] */
return &alloc->data;
}
void crypt_safe_free(void *data)
{
struct safe_allocation *alloc;
volatile size_t *s;
if (!data)
return;
alloc = (struct safe_allocation *)
((char *)data - offsetof(struct safe_allocation, data));
crypt_safe_memzero(data, alloc->size);
s = (volatile size_t *)&alloc->size;
*s = 0x55aa55aa;
free(alloc);
}
void *crypt_safe_realloc(void *data, size_t size)
{
struct safe_allocation *alloc;
void *new_data;
new_data = crypt_safe_alloc(size);
if (new_data && data) {
alloc = (struct safe_allocation *)
((char *)data - offsetof(struct safe_allocation, data));
if (size > alloc->size)
size = alloc->size;
memcpy(new_data, data, size);
}
crypt_safe_free(data);
return new_data;
}