crypto: add aes (ecb, ctr, xts)

This commit is contained in:
Michael Scire 2020-04-05 23:25:28 -07:00
parent 8d1ada2a1b
commit e04679f05a
19 changed files with 3191 additions and 1 deletions

View file

@ -0,0 +1,92 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <vapours.hpp>
namespace ams::crypto {
size_t EncryptAes128Ctr(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, const void *src, size_t src_size) {
Aes128CtrEncryptor aes;
aes.Initialize(key, key_size, iv, iv_size);
return aes.Update(dst, dst_size, src, src_size);
}
size_t EncryptAes192Ctr(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, const void *src, size_t src_size) {
Aes192CtrEncryptor aes;
aes.Initialize(key, key_size, iv, iv_size);
return aes.Update(dst, dst_size, src, src_size);
}
size_t EncryptAes256Ctr(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, const void *src, size_t src_size) {
Aes256CtrEncryptor aes;
aes.Initialize(key, key_size, iv, iv_size);
return aes.Update(dst, dst_size, src, src_size);
}
size_t DecryptAes128Ctr(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, const void *src, size_t src_size) {
Aes128CtrDecryptor aes;
aes.Initialize(key, key_size, iv, iv_size);
return aes.Update(dst, dst_size, src, src_size);
}
size_t DecryptAes192Ctr(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, const void *src, size_t src_size) {
Aes192CtrDecryptor aes;
aes.Initialize(key, key_size, iv, iv_size);
return aes.Update(dst, dst_size, src, src_size);
}
size_t DecryptAes256Ctr(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, const void *src, size_t src_size) {
Aes256CtrDecryptor aes;
aes.Initialize(key, key_size, iv, iv_size);
return aes.Update(dst, dst_size, src, src_size);
}
size_t EncryptAes128CtrPartial(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, s64 offset, const void *src, size_t src_size) {
Aes128CtrEncryptor aes;
aes.Initialize(key, key_size, iv, iv_size, offset);
return aes.Update(dst, dst_size, src, src_size);
}
size_t EncryptAes192CtrPartial(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, s64 offset, const void *src, size_t src_size) {
Aes192CtrEncryptor aes;
aes.Initialize(key, key_size, iv, iv_size, offset);
return aes.Update(dst, dst_size, src, src_size);
}
size_t EncryptAes256CtrPartial(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, s64 offset, const void *src, size_t src_size) {
Aes256CtrEncryptor aes;
aes.Initialize(key, key_size, iv, iv_size, offset);
return aes.Update(dst, dst_size, src, src_size);
}
size_t DecryptAes128CtrPartial(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, s64 offset, const void *src, size_t src_size) {
Aes128CtrDecryptor aes;
aes.Initialize(key, key_size, iv, iv_size, offset);
return aes.Update(dst, dst_size, src, src_size);
}
size_t DecryptAes192CtrPartial(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, s64 offset, const void *src, size_t src_size) {
Aes192CtrDecryptor aes;
aes.Initialize(key, key_size, iv, iv_size, offset);
return aes.Update(dst, dst_size, src, src_size);
}
size_t DecryptAes256CtrPartial(void *dst, size_t dst_size, const void *key, size_t key_size, const void *iv, size_t iv_size, s64 offset, const void *src, size_t src_size) {
Aes256CtrDecryptor aes;
aes.Initialize(key, key_size, iv, iv_size, offset);
return aes.Update(dst, dst_size, src, src_size);
}
}

View file

@ -0,0 +1,118 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <vapours.hpp>
namespace ams::crypto::impl {
#ifdef ATMOSPHERE_IS_STRATOSPHERE
namespace {
constexpr bool IsSupportedKeySize(size_t size) {
return size == 16 || size == 24 || size == 32;
}
}
template<size_t KeySize>
AesImpl<KeySize>::~AesImpl() {
ClearMemory(this, sizeof(*this));
}
template<size_t KeySize>
void AesImpl<KeySize>::Initialize(const void *key, size_t key_size, bool is_encrypt) {
static_assert(IsSupportedKeySize(KeySize));
AMS_ASSERT(key_size == KeySize);
if constexpr (KeySize == 16) {
/* Aes 128. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes128Context));
aes128ContextCreate(reinterpret_cast<Aes128Context *>(this->round_keys), key, is_encrypt);
} else if constexpr (KeySize == 24) {
/* Aes 192. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes192Context));
aes192ContextCreate(reinterpret_cast<Aes192Context *>(this->round_keys), key, is_encrypt);
} else if constexpr (KeySize == 32) {
/* Aes 256. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes256Context));
aes256ContextCreate(reinterpret_cast<Aes256Context *>(this->round_keys), key, is_encrypt);
} else {
/* Invalid key size. */
static_assert(!std::is_same<AesImpl<KeySize>, AesImpl<KeySize>>::value);
}
}
template<size_t KeySize>
void AesImpl<KeySize>::EncryptBlock(void *dst, size_t dst_size, const void *src, size_t src_size) const {
static_assert(IsSupportedKeySize(KeySize));
AMS_ASSERT(src_size >= BlockSize);
AMS_ASSERT(dst_size >= BlockSize);
if constexpr (KeySize == 16) {
/* Aes 128. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes128Context));
aes128EncryptBlock(reinterpret_cast<const Aes128Context *>(this->round_keys), dst, src);
} else if constexpr (KeySize == 24) {
/* Aes 192. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes192Context));
aes192EncryptBlock(reinterpret_cast<const Aes192Context *>(this->round_keys), dst, src);
} else if constexpr (KeySize == 32) {
/* Aes 256. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes256Context));
aes256EncryptBlock(reinterpret_cast<const Aes256Context *>(this->round_keys), dst, src);
} else {
/* Invalid key size. */
static_assert(!std::is_same<AesImpl<KeySize>, AesImpl<KeySize>>::value);
}
}
template<size_t KeySize>
void AesImpl<KeySize>::DecryptBlock(void *dst, size_t dst_size, const void *src, size_t src_size) const {
static_assert(IsSupportedKeySize(KeySize));
AMS_ASSERT(src_size >= BlockSize);
AMS_ASSERT(dst_size >= BlockSize);
if constexpr (KeySize == 16) {
/* Aes 128. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes128Context));
aes128DecryptBlock(reinterpret_cast<const Aes128Context *>(this->round_keys), dst, src);
} else if constexpr (KeySize == 24) {
/* Aes 192. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes192Context));
aes192DecryptBlock(reinterpret_cast<const Aes192Context *>(this->round_keys), dst, src);
} else if constexpr (KeySize == 32) {
/* Aes 256. */
static_assert(sizeof(this->round_keys) == sizeof(::Aes256Context));
aes256DecryptBlock(reinterpret_cast<const Aes256Context *>(this->round_keys), dst, src);
} else {
/* Invalid key size. */
static_assert(!std::is_same<AesImpl<KeySize>, AesImpl<KeySize>>::value);
}
}
/* Explicitly instantiate the three supported key sizes. */
template class AesImpl<16>;
template class AesImpl<24>;
template class AesImpl<32>;
#else
/* TODO: Non-EL0 implementation. */
#endif
}

View file

@ -0,0 +1,588 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <vapours.hpp>
#include <arm_neon.h>
namespace ams::crypto::impl {
#ifdef ATMOSPHERE_IS_STRATOSPHERE
/* Variable management macros. */
#define DECLARE_ROUND_KEY_VAR(n) \
const uint8x16_t round_key_##n = vld1q_u8(keys + (BlockSize * n))
#define AES_ENC_DEC_OUTPUT_THREE_BLOCKS() \
[tmp0]"+w"(tmp0), [tmp1]"+w"(tmp1), [tmp2]"+w"(tmp2)
#define AES_ENC_DEC_OUTPUT_THREE_CTRS() \
[ctr0]"+w"(ctr0), [ctr1]"+w"(ctr1), [ctr2]"+w"(ctr2)
#define AES_ENC_DEC_OUTPUT_ONE_BLOCK() \
[tmp0]"+w"(tmp0)
#define AES_ENC_DEC_OUTPUT_ONE_CTR() \
[ctr0]"+w"(ctr0)
#define CTR_INCREMENT_OUTPUT_HIGH_LOW() \
[high]"=&r"(high), [low]"=&r"(low)
#define CTR_INCREMENT_OUTPUT_HIGH_LOW_TMP() \
[high_tmp]"=&r"(high_tmp), [low_tmp]"=&r"(low_tmp)
#define CTR_INCREMENT_OUTPUT_HL_SINGLE_TMP() \
[hl_tmp]"=&r"(hl_tmp)
#define AES_ENC_DEC_INPUT_ROUND_KEY(n) \
[round_key_##n]"w"(round_key_##n)
/* AES Encryption macros. */
#define AES_ENC_ROUND(n, i) \
"aese %[tmp" #i "].16b, %[round_key_" #n "].16b\n" \
"aesmc %[tmp" #i "].16b, %[tmp" #i "].16b\n"
#define AES_ENC_SECOND_LAST_ROUND(n, i) \
"aese %[tmp" #i "].16b, %[round_key_" #n "].16b\n"
#define AES_ENC_LAST_ROUND(n, i) \
"eor %[tmp" #i "].16b, %[tmp" #i "].16b, %[round_key_" #n "].16b\n"
namespace {
ALWAYS_INLINE uint8x16_t IncrementCounterOptimized(const uint8x16_t ctr) {
uint8x16_t inc;
uint64_t high, low;
/* Use ASM. TODO: Better than using intrinsics? */
__asm__ __volatile__ (
"mov %[high], %[ctr].d[0]\n"
"mov %[low], %[ctr].d[1]\n"
"rev %[high], %[high]\n"
"rev %[low], %[low]\n"
"adds %[low], %[low], 1\n"
"cinc %[high], %[high], cs\n"
"rev %[high], %[high]\n"
"rev %[low], %[low]\n"
"mov %[inc].d[0], %[high]\n"
"mov %[inc].d[1], %[low]\n"
: [inc]"=w"(inc),
CTR_INCREMENT_OUTPUT_HIGH_LOW()
: [ctr]"w"(ctr)
: "cc"
);
return inc;
}
}
template<>
void CtrModeImpl<AesEncryptor128>::ProcessBlocks(u8 *dst, const u8 *src, size_t num_blocks) {
/* Preload all round keys + iv into neon registers. */
const u8 *keys = this->block_cipher->GetRoundKey();
DECLARE_ROUND_KEY_VAR(0);
DECLARE_ROUND_KEY_VAR(1);
DECLARE_ROUND_KEY_VAR(2);
DECLARE_ROUND_KEY_VAR(3);
DECLARE_ROUND_KEY_VAR(4);
DECLARE_ROUND_KEY_VAR(5);
DECLARE_ROUND_KEY_VAR(6);
DECLARE_ROUND_KEY_VAR(7);
DECLARE_ROUND_KEY_VAR(8);
DECLARE_ROUND_KEY_VAR(9);
DECLARE_ROUND_KEY_VAR(10);
uint8x16_t ctr0 = vld1q_u8(this->counter);
uint64_t high, low;
/* Process three blocks at a time, when possible. */
if (num_blocks >= 3) {
/* Increment CTR twice. */
uint8x16_t ctr1 = IncrementCounterOptimized(ctr0);
uint8x16_t ctr2 = IncrementCounterOptimized(ctr1);
uint64_t high_tmp, low_tmp;
while (num_blocks >= 3) {
/* Read blocks in. Keep them in registers for XOR later. */
const uint8x16_t block0 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
const uint8x16_t block1 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
const uint8x16_t block2 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
/* We'll be encrypting the three CTRs. */
uint8x16_t tmp0 = ctr0, tmp1 = ctr1, tmp2 = ctr2;
/* Actually do encryption, use optimized asm. */
/* Interleave CTR calculations with AES ones, to mask latencies. */
__asm__ __volatile__ (
AES_ENC_ROUND(0, 0) "mov %[high], %[ctr2].d[0]\n"
AES_ENC_ROUND(0, 1) "mov %[low], %[ctr2].d[1]\n"
AES_ENC_ROUND(0, 2) "rev %[high], %[high]\n"
AES_ENC_ROUND(1, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(1, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(1, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(2, 0) "rev %[high_tmp], %[high]\n"
AES_ENC_ROUND(2, 1) "rev %[low_tmp], %[low]\n"
AES_ENC_ROUND(2, 2) "mov %[ctr0].d[0], %[high_tmp]\n"
AES_ENC_ROUND(3, 0) "mov %[ctr0].d[1], %[low_tmp]\n"
AES_ENC_ROUND(3, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(3, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(4, 0) "rev %[high_tmp], %[high]\n"
AES_ENC_ROUND(4, 1) "rev %[low_tmp], %[low]\n"
AES_ENC_ROUND(4, 2) "mov %[ctr1].d[0], %[high_tmp]\n"
AES_ENC_ROUND(5, 0) "mov %[ctr1].d[1], %[low_tmp]\n"
AES_ENC_ROUND(5, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(5, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(6, 0) "rev %[high_tmp], %[high]\n"
AES_ENC_ROUND(6, 1) "rev %[low_tmp], %[low]\n"
AES_ENC_ROUND(6, 2) "mov %[ctr2].d[0], %[high_tmp]\n"
AES_ENC_ROUND(7, 0) "mov %[ctr2].d[1], %[low_tmp]\n"
AES_ENC_ROUND(7, 1)
AES_ENC_ROUND(7, 2)
AES_ENC_ROUND(8, 0) AES_ENC_ROUND(8, 1) AES_ENC_ROUND(8, 2)
AES_ENC_SECOND_LAST_ROUND(9, 0) AES_ENC_SECOND_LAST_ROUND(9, 1) AES_ENC_SECOND_LAST_ROUND(9, 2)
AES_ENC_LAST_ROUND(10, 0) AES_ENC_LAST_ROUND(10, 1) AES_ENC_LAST_ROUND(10, 2)
: AES_ENC_DEC_OUTPUT_THREE_BLOCKS(),
AES_ENC_DEC_OUTPUT_THREE_CTRS(),
CTR_INCREMENT_OUTPUT_HIGH_LOW(),
CTR_INCREMENT_OUTPUT_HIGH_LOW_TMP()
: AES_ENC_DEC_INPUT_ROUND_KEY(0),
AES_ENC_DEC_INPUT_ROUND_KEY(1),
AES_ENC_DEC_INPUT_ROUND_KEY(2),
AES_ENC_DEC_INPUT_ROUND_KEY(3),
AES_ENC_DEC_INPUT_ROUND_KEY(4),
AES_ENC_DEC_INPUT_ROUND_KEY(5),
AES_ENC_DEC_INPUT_ROUND_KEY(6),
AES_ENC_DEC_INPUT_ROUND_KEY(7),
AES_ENC_DEC_INPUT_ROUND_KEY(8),
AES_ENC_DEC_INPUT_ROUND_KEY(9),
AES_ENC_DEC_INPUT_ROUND_KEY(10)
: "cc"
);
/* XOR blocks. */
tmp0 = veorq_u8(block0, tmp0);
tmp1 = veorq_u8(block1, tmp1);
tmp2 = veorq_u8(block2, tmp2);
/* Store to output. */
vst1q_u8(dst, tmp0);
dst += AES_BLOCK_SIZE;
vst1q_u8(dst, tmp1);
dst += AES_BLOCK_SIZE;
vst1q_u8(dst, tmp2);
dst += AES_BLOCK_SIZE;
num_blocks -= 3;
}
}
while (num_blocks >= 1) {
/* Read block in, keep in register for XOR. */
const uint8x16_t block0 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
/* We'll be encrypting the CTR. */
uint8x16_t tmp0 = ctr0;
/* Actually do encryption, use optimized asm. */
/* Interleave CTR calculations with AES ones, to mask latencies. */
__asm__ __volatile__ (
AES_ENC_ROUND(0, 0) "mov %[high], %[ctr0].d[0]\n"
AES_ENC_ROUND(1, 0) "mov %[low], %[ctr0].d[1]\n"
AES_ENC_ROUND(2, 0) "rev %[high], %[high]\n"
AES_ENC_ROUND(3, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(4, 0) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(5, 0) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(6, 0) "rev %[high], %[high]\n"
AES_ENC_ROUND(7, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(8, 0) "mov %[ctr0].d[0], %[high]\n"
AES_ENC_SECOND_LAST_ROUND(9, 0) "mov %[ctr0].d[1], %[low]\n"
AES_ENC_LAST_ROUND(10, 0)
: AES_ENC_DEC_OUTPUT_ONE_BLOCK(),
AES_ENC_DEC_OUTPUT_ONE_CTR(),
CTR_INCREMENT_OUTPUT_HIGH_LOW()
: AES_ENC_DEC_INPUT_ROUND_KEY(0),
AES_ENC_DEC_INPUT_ROUND_KEY(1),
AES_ENC_DEC_INPUT_ROUND_KEY(2),
AES_ENC_DEC_INPUT_ROUND_KEY(3),
AES_ENC_DEC_INPUT_ROUND_KEY(4),
AES_ENC_DEC_INPUT_ROUND_KEY(5),
AES_ENC_DEC_INPUT_ROUND_KEY(6),
AES_ENC_DEC_INPUT_ROUND_KEY(7),
AES_ENC_DEC_INPUT_ROUND_KEY(8),
AES_ENC_DEC_INPUT_ROUND_KEY(9),
AES_ENC_DEC_INPUT_ROUND_KEY(10)
: "cc"
);
/* XOR blocks. */
tmp0 = veorq_u8(block0, tmp0);
/* Store to output. */
vst1q_u8(dst, tmp0);
dst += AES_BLOCK_SIZE;
num_blocks--;
}
vst1q_u8(this->counter, ctr0);
}
template<>
void CtrModeImpl<AesEncryptor192>::ProcessBlocks(u8 *dst, const u8 *src, size_t num_blocks) {
/* Preload all round keys + iv into neon registers. */
const u8 *keys = this->block_cipher->GetRoundKey();
DECLARE_ROUND_KEY_VAR(0);
DECLARE_ROUND_KEY_VAR(1);
DECLARE_ROUND_KEY_VAR(2);
DECLARE_ROUND_KEY_VAR(3);
DECLARE_ROUND_KEY_VAR(4);
DECLARE_ROUND_KEY_VAR(5);
DECLARE_ROUND_KEY_VAR(6);
DECLARE_ROUND_KEY_VAR(7);
DECLARE_ROUND_KEY_VAR(8);
DECLARE_ROUND_KEY_VAR(9);
DECLARE_ROUND_KEY_VAR(10);
DECLARE_ROUND_KEY_VAR(11);
DECLARE_ROUND_KEY_VAR(12);
uint8x16_t ctr0 = vld1q_u8(this->counter);
uint64_t high, low;
/* Process three blocks at a time, when possible. */
if (num_blocks >= 3) {
/* Increment CTR twice. */
uint8x16_t ctr1 = IncrementCounterOptimized(ctr0);
uint8x16_t ctr2 = IncrementCounterOptimized(ctr1);
uint64_t high_tmp, low_tmp;
while (num_blocks >= 3) {
/* Read blocks in. Keep them in registers for XOR later. */
const uint8x16_t block0 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
const uint8x16_t block1 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
const uint8x16_t block2 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
/* We'll be encrypting the three CTRs. */
uint8x16_t tmp0 = ctr0, tmp1 = ctr1, tmp2 = ctr2;
/* Actually do encryption, use optimized asm. */
/* Interleave CTR calculations with AES ones, to mask latencies. */
__asm__ __volatile__ (
AES_ENC_ROUND(0, 0) "mov %[high], %[ctr2].d[0]\n"
AES_ENC_ROUND(0, 1) "mov %[low], %[ctr2].d[1]\n"
AES_ENC_ROUND(0, 2) "rev %[high], %[high]\n"
AES_ENC_ROUND(1, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(1, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(1, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(2, 0) "rev %[high_tmp], %[high]\n"
AES_ENC_ROUND(2, 1) "rev %[low_tmp], %[low]\n"
AES_ENC_ROUND(2, 2) "mov %[ctr0].d[0], %[high_tmp]\n"
AES_ENC_ROUND(3, 0) "mov %[ctr0].d[1], %[low_tmp]\n"
AES_ENC_ROUND(3, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(3, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(4, 0) "rev %[high_tmp], %[high]\n"
AES_ENC_ROUND(4, 1) "rev %[low_tmp], %[low]\n"
AES_ENC_ROUND(4, 2) "mov %[ctr1].d[0], %[high_tmp]\n"
AES_ENC_ROUND(5, 0) "mov %[ctr1].d[1], %[low_tmp]\n"
AES_ENC_ROUND(5, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(5, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(6, 0) "rev %[high_tmp], %[high]\n"
AES_ENC_ROUND(6, 1) "rev %[low_tmp], %[low]\n"
AES_ENC_ROUND(6, 2) "mov %[ctr2].d[0], %[high_tmp]\n"
AES_ENC_ROUND(7, 0) "mov %[ctr2].d[1], %[low_tmp]\n"
AES_ENC_ROUND(7, 1)
AES_ENC_ROUND(7, 2)
AES_ENC_ROUND(8, 0) AES_ENC_ROUND(8, 1) AES_ENC_ROUND(8, 2)
AES_ENC_ROUND(9, 0) AES_ENC_ROUND(9, 1) AES_ENC_ROUND(9, 2)
AES_ENC_ROUND(10, 0) AES_ENC_ROUND(10, 1) AES_ENC_ROUND(10, 2)
AES_ENC_SECOND_LAST_ROUND(11, 0) AES_ENC_SECOND_LAST_ROUND(11, 1) AES_ENC_SECOND_LAST_ROUND(11, 2)
AES_ENC_LAST_ROUND(12, 0) AES_ENC_LAST_ROUND(12, 1) AES_ENC_LAST_ROUND(12, 2)
: AES_ENC_DEC_OUTPUT_THREE_BLOCKS(),
AES_ENC_DEC_OUTPUT_THREE_CTRS(),
CTR_INCREMENT_OUTPUT_HIGH_LOW(),
CTR_INCREMENT_OUTPUT_HIGH_LOW_TMP()
: AES_ENC_DEC_INPUT_ROUND_KEY(0),
AES_ENC_DEC_INPUT_ROUND_KEY(1),
AES_ENC_DEC_INPUT_ROUND_KEY(2),
AES_ENC_DEC_INPUT_ROUND_KEY(3),
AES_ENC_DEC_INPUT_ROUND_KEY(4),
AES_ENC_DEC_INPUT_ROUND_KEY(5),
AES_ENC_DEC_INPUT_ROUND_KEY(6),
AES_ENC_DEC_INPUT_ROUND_KEY(7),
AES_ENC_DEC_INPUT_ROUND_KEY(8),
AES_ENC_DEC_INPUT_ROUND_KEY(9),
AES_ENC_DEC_INPUT_ROUND_KEY(10),
AES_ENC_DEC_INPUT_ROUND_KEY(11),
AES_ENC_DEC_INPUT_ROUND_KEY(12)
: "cc"
);
/* XOR blocks. */
tmp0 = veorq_u8(block0, tmp0);
tmp1 = veorq_u8(block1, tmp1);
tmp2 = veorq_u8(block2, tmp2);
/* Store to output. */
vst1q_u8(dst, tmp0);
dst += AES_BLOCK_SIZE;
vst1q_u8(dst, tmp1);
dst += AES_BLOCK_SIZE;
vst1q_u8(dst, tmp2);
dst += AES_BLOCK_SIZE;
num_blocks -= 3;
}
}
while (num_blocks >= 1) {
/* Read block in, keep in register for XOR. */
const uint8x16_t block0 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
/* We'll be encrypting the CTR. */
uint8x16_t tmp0 = ctr0;
/* Actually do encryption, use optimized asm. */
/* Interleave CTR calculations with AES ones, to mask latencies. */
__asm__ __volatile__ (
AES_ENC_ROUND(0, 0) "mov %[high], %[ctr0].d[0]\n"
AES_ENC_ROUND(1, 0) "mov %[low], %[ctr0].d[1]\n"
AES_ENC_ROUND(2, 0) "rev %[high], %[high]\n"
AES_ENC_ROUND(3, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(4, 0) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(5, 0) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(6, 0) "rev %[high], %[high]\n"
AES_ENC_ROUND(7, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(8, 0) "mov %[ctr0].d[0], %[high]\n"
AES_ENC_ROUND(9, 0) "mov %[ctr0].d[1], %[low]\n"
AES_ENC_ROUND(10, 0)
AES_ENC_SECOND_LAST_ROUND(11, 0)
AES_ENC_LAST_ROUND(12, 0)
: AES_ENC_DEC_OUTPUT_ONE_BLOCK(),
AES_ENC_DEC_OUTPUT_ONE_CTR(),
CTR_INCREMENT_OUTPUT_HIGH_LOW()
: AES_ENC_DEC_INPUT_ROUND_KEY(0),
AES_ENC_DEC_INPUT_ROUND_KEY(1),
AES_ENC_DEC_INPUT_ROUND_KEY(2),
AES_ENC_DEC_INPUT_ROUND_KEY(3),
AES_ENC_DEC_INPUT_ROUND_KEY(4),
AES_ENC_DEC_INPUT_ROUND_KEY(5),
AES_ENC_DEC_INPUT_ROUND_KEY(6),
AES_ENC_DEC_INPUT_ROUND_KEY(7),
AES_ENC_DEC_INPUT_ROUND_KEY(8),
AES_ENC_DEC_INPUT_ROUND_KEY(9),
AES_ENC_DEC_INPUT_ROUND_KEY(10),
AES_ENC_DEC_INPUT_ROUND_KEY(11),
AES_ENC_DEC_INPUT_ROUND_KEY(12)
: "cc"
);
/* XOR blocks. */
tmp0 = veorq_u8(block0, tmp0);
/* Store to output. */
vst1q_u8(dst, tmp0);
dst += AES_BLOCK_SIZE;
num_blocks--;
}
vst1q_u8(this->counter, ctr0);
}
template<>
void CtrModeImpl<AesEncryptor256>::ProcessBlocks(u8 *dst, const u8 *src, size_t num_blocks) {
/* Preload all round keys + iv into neon registers. */
const u8 *keys = this->block_cipher->GetRoundKey();
DECLARE_ROUND_KEY_VAR(0);
DECLARE_ROUND_KEY_VAR(1);
DECLARE_ROUND_KEY_VAR(2);
DECLARE_ROUND_KEY_VAR(3);
DECLARE_ROUND_KEY_VAR(4);
DECLARE_ROUND_KEY_VAR(5);
DECLARE_ROUND_KEY_VAR(6);
DECLARE_ROUND_KEY_VAR(7);
DECLARE_ROUND_KEY_VAR(8);
DECLARE_ROUND_KEY_VAR(9);
DECLARE_ROUND_KEY_VAR(10);
DECLARE_ROUND_KEY_VAR(11);
DECLARE_ROUND_KEY_VAR(12);
DECLARE_ROUND_KEY_VAR(13);
DECLARE_ROUND_KEY_VAR(14);
uint8x16_t ctr0 = vld1q_u8(this->counter);
uint64_t high, low;
/* Process three blocks at a time, when possible. */
if (num_blocks >= 3) {
/* Increment CTR twice. */
uint8x16_t ctr1 = IncrementCounterOptimized(ctr0);
uint8x16_t ctr2 = IncrementCounterOptimized(ctr1);
uint64_t hl_tmp;
while (num_blocks >= 3) {
/* Read blocks in. Keep them in registers for XOR later. */
const uint8x16_t block0 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
const uint8x16_t block1 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
const uint8x16_t block2 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
/* We'll be encrypting the three CTRs. */
uint8x16_t tmp0 = ctr0, tmp1 = ctr1, tmp2 = ctr2;
/* Actually do encryption, use optimized asm. */
/* Interleave CTR calculations with AES ones, to mask latencies. */
/* Note: ASM here only uses one temporary u64 instead of two, due to 30 operand limit. */
__asm__ __volatile__ (
AES_ENC_ROUND(0, 0) "mov %[high], %[ctr2].d[0]\n"
AES_ENC_ROUND(0, 1) "mov %[low], %[ctr2].d[1]\n"
AES_ENC_ROUND(0, 2) "rev %[high], %[high]\n"
AES_ENC_ROUND(1, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(1, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(1, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(2, 0) "rev %[hl_tmp], %[high]\n"
AES_ENC_ROUND(2, 1) "mov %[ctr0].d[0], %[hl_tmp]\n"
AES_ENC_ROUND(2, 2) "rev %[hl_tmp], %[low]\n"
AES_ENC_ROUND(3, 0) "mov %[ctr0].d[1], %[hl_tmp]\n"
AES_ENC_ROUND(3, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(3, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(4, 0) "rev %[hl_tmp], %[high]\n"
AES_ENC_ROUND(4, 1) "mov %[ctr1].d[0], %[hl_tmp]\n"
AES_ENC_ROUND(4, 2) "rev %[hl_tmp], %[low]\n"
AES_ENC_ROUND(5, 0) "mov %[ctr1].d[1], %[hl_tmp]\n"
AES_ENC_ROUND(5, 1) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(5, 2) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(6, 0) "rev %[hl_tmp], %[high]\n"
AES_ENC_ROUND(6, 1) "mov %[ctr2].d[0], %[hl_tmp]\n"
AES_ENC_ROUND(6, 2) "rev %[hl_tmp], %[low]\n"
AES_ENC_ROUND(7, 0) "mov %[ctr2].d[1], %[hl_tmp]\n"
AES_ENC_ROUND(7, 1)
AES_ENC_ROUND(7, 2)
AES_ENC_ROUND(8, 0) AES_ENC_ROUND(8, 1) AES_ENC_ROUND(8, 2)
AES_ENC_ROUND(9, 0) AES_ENC_ROUND(9, 1) AES_ENC_ROUND(9, 2)
AES_ENC_ROUND(10, 0) AES_ENC_ROUND(10, 1) AES_ENC_ROUND(10, 2)
AES_ENC_ROUND(11, 0) AES_ENC_ROUND(11, 1) AES_ENC_ROUND(11, 2)
AES_ENC_ROUND(12, 0) AES_ENC_ROUND(12, 1) AES_ENC_ROUND(12, 2)
AES_ENC_SECOND_LAST_ROUND(13, 0) AES_ENC_SECOND_LAST_ROUND(13, 1) AES_ENC_SECOND_LAST_ROUND(13, 2)
AES_ENC_LAST_ROUND(14, 0) AES_ENC_LAST_ROUND(14, 1) AES_ENC_LAST_ROUND(14, 2)
: AES_ENC_DEC_OUTPUT_THREE_BLOCKS(),
AES_ENC_DEC_OUTPUT_THREE_CTRS(),
CTR_INCREMENT_OUTPUT_HIGH_LOW(),
CTR_INCREMENT_OUTPUT_HL_SINGLE_TMP()
: AES_ENC_DEC_INPUT_ROUND_KEY(0),
AES_ENC_DEC_INPUT_ROUND_KEY(1),
AES_ENC_DEC_INPUT_ROUND_KEY(2),
AES_ENC_DEC_INPUT_ROUND_KEY(3),
AES_ENC_DEC_INPUT_ROUND_KEY(4),
AES_ENC_DEC_INPUT_ROUND_KEY(5),
AES_ENC_DEC_INPUT_ROUND_KEY(6),
AES_ENC_DEC_INPUT_ROUND_KEY(7),
AES_ENC_DEC_INPUT_ROUND_KEY(8),
AES_ENC_DEC_INPUT_ROUND_KEY(9),
AES_ENC_DEC_INPUT_ROUND_KEY(10),
AES_ENC_DEC_INPUT_ROUND_KEY(11),
AES_ENC_DEC_INPUT_ROUND_KEY(12),
AES_ENC_DEC_INPUT_ROUND_KEY(13),
AES_ENC_DEC_INPUT_ROUND_KEY(14)
: "cc"
);
/* XOR blocks. */
tmp0 = veorq_u8(block0, tmp0);
tmp1 = veorq_u8(block1, tmp1);
tmp2 = veorq_u8(block2, tmp2);
/* Store to output. */
vst1q_u8(dst, tmp0);
dst += AES_BLOCK_SIZE;
vst1q_u8(dst, tmp1);
dst += AES_BLOCK_SIZE;
vst1q_u8(dst, tmp2);
dst += AES_BLOCK_SIZE;
num_blocks -= 3;
}
}
while (num_blocks >= 1) {
/* Read block in, keep in register for XOR. */
const uint8x16_t block0 = vld1q_u8(src);
src += AES_BLOCK_SIZE;
/* We'll be encrypting the CTR. */
uint8x16_t tmp0 = ctr0;
/* Actually do encryption, use optimized asm. */
/* Interleave CTR calculations with AES ones, to mask latencies. */
__asm__ __volatile__ (
AES_ENC_ROUND(0, 0) "mov %[high], %[ctr0].d[0]\n"
AES_ENC_ROUND(1, 0) "mov %[low], %[ctr0].d[1]\n"
AES_ENC_ROUND(2, 0) "rev %[high], %[high]\n"
AES_ENC_ROUND(3, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(4, 0) "adds %[low], %[low], 1\n"
AES_ENC_ROUND(5, 0) "cinc %[high], %[high], cs\n"
AES_ENC_ROUND(6, 0) "rev %[high], %[high]\n"
AES_ENC_ROUND(7, 0) "rev %[low], %[low]\n"
AES_ENC_ROUND(8, 0) "mov %[ctr0].d[0], %[high]\n"
AES_ENC_ROUND(9, 0) "mov %[ctr0].d[1], %[low]\n"
AES_ENC_ROUND(10, 0)
AES_ENC_ROUND(11, 0)
AES_ENC_ROUND(12, 0)
AES_ENC_SECOND_LAST_ROUND(13, 0)
AES_ENC_LAST_ROUND(14, 0)
: AES_ENC_DEC_OUTPUT_ONE_BLOCK(),
AES_ENC_DEC_OUTPUT_ONE_CTR(),
CTR_INCREMENT_OUTPUT_HIGH_LOW()
: AES_ENC_DEC_INPUT_ROUND_KEY(0),
AES_ENC_DEC_INPUT_ROUND_KEY(1),
AES_ENC_DEC_INPUT_ROUND_KEY(2),
AES_ENC_DEC_INPUT_ROUND_KEY(3),
AES_ENC_DEC_INPUT_ROUND_KEY(4),
AES_ENC_DEC_INPUT_ROUND_KEY(5),
AES_ENC_DEC_INPUT_ROUND_KEY(6),
AES_ENC_DEC_INPUT_ROUND_KEY(7),
AES_ENC_DEC_INPUT_ROUND_KEY(8),
AES_ENC_DEC_INPUT_ROUND_KEY(9),
AES_ENC_DEC_INPUT_ROUND_KEY(10),
AES_ENC_DEC_INPUT_ROUND_KEY(11),
AES_ENC_DEC_INPUT_ROUND_KEY(12),
AES_ENC_DEC_INPUT_ROUND_KEY(13),
AES_ENC_DEC_INPUT_ROUND_KEY(14)
: "cc"
);
/* XOR blocks. */
tmp0 = veorq_u8(block0, tmp0);
/* Store to output. */
vst1q_u8(dst, tmp0);
dst += AES_BLOCK_SIZE;
num_blocks--;
}
vst1q_u8(this->counter, ctr0);
}
#else
/* TODO: Non-EL0 implementation. */
#endif
}

View file

@ -0,0 +1,95 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
namespace ams::crypto::impl {
template<typename Cipher, typename Self>
void UpdateImpl(Self *self, const void *src, size_t src_size) {
const size_t BlockSize = self->GetBlockSize();
const u8 *src_u8 = static_cast<const u8 *>(src);
size_t remaining = src_size;
if (const size_t buffered = self->GetBufferedDataSize(); buffered > 0) {
const size_t partial = std::min(BlockSize - buffered, remaining);
self->ProcessPartialData(src_u8, partial);
src_u8 += partial;
remaining -= partial;
}
if (remaining >= BlockSize) {
const size_t num_blocks = remaining / BlockSize;
self->template ProcessBlocks<Cipher>(src_u8, num_blocks);
const size_t processed = num_blocks * BlockSize;
src_u8 += processed;
remaining -= processed;
}
if (remaining > 0) {
self->ProcessRemainingData(src_u8, remaining);
}
}
template<typename Cipher, typename Self>
size_t UpdateImpl(Self *self, void *dst, size_t dst_size, const void *src, size_t src_size) {
const size_t BlockSize = self->GetBlockSize();
const u8 *src_u8 = static_cast<const u8 *>(src);
u8 *dst_u8 = static_cast<u8 *>(dst);
size_t remaining = src_size;
size_t total_processed = 0;
if (const size_t buffered = self->GetBufferedDataSize(); buffered > 0) {
const size_t partial = std::min(BlockSize - buffered, remaining);
const size_t processed = self->ProcessPartialData(dst_u8, src_u8, partial);
dst_u8 += processed;
total_processed += processed;
src_u8 += partial;
remaining -= partial;
}
if (remaining >= BlockSize) {
const size_t num_blocks = remaining / BlockSize;
const size_t input_size = num_blocks * BlockSize;
const size_t processed = self->template ProcessBlocks<Cipher>(dst_u8, src_u8, num_blocks);
dst_u8 += processed;
total_processed += processed;
src_u8 += input_size;
remaining -= input_size;
}
if (remaining > 0) {
const size_t processed = self->ProcessRemainingData(dst_u8, src_u8, remaining);
total_processed += processed;
}
return total_processed;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,140 @@
/*
* Copyright (c) 2018-2020 Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <vapours.hpp>
namespace ams::crypto::impl {
namespace {
/* TODO: Support non-Nintendo Endianness */
void MultiplyTweakGeneric(u64 *tweak) {
const u64 carry = tweak[1] & (static_cast<u64>(1) << (BITSIZEOF(u64) - 1));
tweak[1] = ((tweak[1] << 1) | (tweak[0] >> (BITSIZEOF(u64) - 1)));
tweak[0] = (tweak[0] << 1);
if (carry) {
tweak[0] ^= static_cast<u64>(0x87);
}
}
}
void XtsModeImpl::ProcessBlock(u8 *dst, const u8 *src) {
u8 tmp[BlockSize];
/* Xor. */
for (size_t i = 0; i < BlockSize; i++) {
tmp[i] = this->tweak[i] ^ src[i];
}
/* Crypt */
this->cipher_func(tmp, tmp, this->cipher_ctx);
/* Xor. */
for (size_t i = 0; i < BlockSize; i++) {
dst[i] = this->tweak[i] ^ tmp[i];
}
MultiplyTweakGeneric(reinterpret_cast<u64 *>(this->tweak));
}
size_t XtsModeImpl::FinalizeEncryption(void *dst, size_t dst_size) {
AMS_ASSERT(this->state == State_Processing);
u8 *dst_u8 = static_cast<u8 *>(dst);
size_t processed = 0;
if (this->num_buffered == 0) {
this->ProcessBlock(dst_u8, this->last_block);
processed = BlockSize;
} else {
this->ProcessBlock(this->last_block, this->last_block);
std::memcpy(this->buffer + this->num_buffered, this->last_block + this->num_buffered, BlockSize - this->num_buffered);
this->ProcessBlock(dst_u8, this->buffer);
std::memcpy(dst_u8 + BlockSize, this->last_block, this->num_buffered);
processed = BlockSize + this->num_buffered;
}
this->state = State_Done;
return processed;
}
size_t XtsModeImpl::FinalizeDecryption(void *dst, size_t dst_size) {
AMS_ASSERT(this->state == State_Processing);
u8 *dst_u8 = static_cast<u8 *>(dst);
size_t processed = 0;
if (this->num_buffered == 0) {
this->ProcessBlock(dst_u8, this->last_block);
processed = BlockSize;
} else {
u8 tmp_tweak[BlockSize];
std::memcpy(tmp_tweak, this->tweak, BlockSize);
MultiplyTweakGeneric(reinterpret_cast<u64 *>(this->tweak));
this->ProcessBlock(this->last_block, this->last_block);
std::memcpy(this->buffer + this->num_buffered, this->last_block + this->num_buffered, BlockSize - this->num_buffered);
std::memcpy(this->tweak, tmp_tweak, BlockSize);
this->ProcessBlock(dst_u8, this->buffer);
std::memcpy(dst_u8 + BlockSize, this->last_block, this->num_buffered);
processed = BlockSize + this->num_buffered;
}
this->state = State_Done;
return processed;
}
size_t XtsModeImpl::ProcessPartialData(u8 *dst, const u8 *src, size_t size) {
size_t processed = 0;
std::memcpy(this->buffer + this->num_buffered, src, size);
this->num_buffered += size;
if (this->num_buffered == BlockSize) {
if (this->state == State_Processing) {
this->ProcessBlock(dst, this->last_block);
processed += BlockSize;
}
std::memcpy(this->last_block, this->buffer, BlockSize);
this->num_buffered = 0;
this->state = State_Processing;
}
return processed;
}
size_t XtsModeImpl::ProcessRemainingData(u8 *dst, const u8 *src, size_t size) {
std::memcpy(this->buffer, src, size);
this->num_buffered = size;
return 0;
}
}