314 lines
12 KiB
C++
314 lines
12 KiB
C++
/*
|
|
* Copyright 2017 - 2021 Justas Masiulis
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#ifndef JM_XORSTR_HPP
|
|
#define JM_XORSTR_HPP
|
|
#define JM_XORSTR_DISABLE_AVX_INTRINSICS
|
|
|
|
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || \
|
|
defined(__arm__)
|
|
#include <arm_neon.h>
|
|
#elif defined(_M_X64) || defined(__amd64__) || defined(_M_IX86) || \
|
|
defined(__i386__)
|
|
#include <immintrin.h>
|
|
#else
|
|
#error Unsupported platform
|
|
#endif
|
|
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
|
|
|
|
#define xorstr(str) \
|
|
::jm::xor_string( \
|
|
[]() { return str; }, \
|
|
std::integral_constant<std::size_t, sizeof(str) / sizeof(*str)>{}, \
|
|
std::make_index_sequence<::jm::detail::_buffer_size<sizeof(str)>()>{})
|
|
#define xorstr2(str) \
|
|
::jm::xor_string( \
|
|
[]() { return str; }, \
|
|
std::integral_constant<std::size_t, (sizeof(str) / sizeof(*str)) + 1>{}, \
|
|
std::make_index_sequence< \
|
|
::jm::detail::_buffer_size<sizeof(str) + 1>()>{})
|
|
#define xorstr_(str) xorstr(str).crypt_get()
|
|
#define _xor_(str) xorstr(str).crypt_get()
|
|
#define _x(str) xorstr(str).crypt_get()
|
|
|
|
#ifdef _MSC_VER
|
|
#define XORSTR_FORCEINLINE __forceinline
|
|
#else
|
|
#define XORSTR_FORCEINLINE __attribute__((always_inline)) inline
|
|
#endif
|
|
|
|
#if defined(__clang__) || defined(__GNUC__)
|
|
#define JM_XORSTR_LOAD_FROM_REG(x) ::jm::detail::load_from_reg(x)
|
|
#else
|
|
#define JM_XORSTR_LOAD_FROM_REG
|
|
#endif
|
|
|
|
namespace jm {
|
|
|
|
namespace detail {
|
|
|
|
template <std::size_t Size>
|
|
XORSTR_FORCEINLINE constexpr std::size_t _buffer_size() {
|
|
return ((Size / 16) + (Size % 16 != 0)) * 2;
|
|
}
|
|
|
|
template <std::uint32_t Seed>
|
|
XORSTR_FORCEINLINE constexpr std::uint32_t key4() noexcept {
|
|
std::uint32_t value = Seed;
|
|
for (char c : __TIME__)
|
|
value = static_cast<std::uint32_t>((value ^ c) * 16777619ull);
|
|
return value;
|
|
}
|
|
|
|
template <std::size_t S> XORSTR_FORCEINLINE constexpr std::uint64_t key8() {
|
|
constexpr auto first_part = key4<2166136261 + S>();
|
|
constexpr auto second_part = key4<first_part>();
|
|
return (static_cast<std::uint64_t>(first_part) << 32) | second_part;
|
|
}
|
|
|
|
// loads up to 8 characters of string into uint64 and xors it with the key
|
|
template <std::size_t N, class CharT>
|
|
XORSTR_FORCEINLINE constexpr std::uint64_t
|
|
load_xored_str8(std::uint64_t key, std::size_t idx, const CharT *str) noexcept {
|
|
using cast_type = typename std::make_unsigned<CharT>::type;
|
|
constexpr auto value_size = sizeof(CharT);
|
|
constexpr auto idx_offset = 8 / value_size;
|
|
|
|
std::uint64_t value = key;
|
|
for (std::size_t i = 0; i < idx_offset && i + idx * idx_offset < N; ++i)
|
|
value ^= (std::uint64_t{static_cast<cast_type>(str[i + idx * idx_offset])}
|
|
<< ((i % idx_offset) * 8 * value_size));
|
|
|
|
return value;
|
|
}
|
|
|
|
// forces compiler to use registers instead of stuffing constants in rdata
|
|
XORSTR_FORCEINLINE std::uint64_t load_from_reg(std::uint64_t value) noexcept {
|
|
#if defined(__clang__) || defined(__GNUC__)
|
|
asm("" : "=r"(value) : "0"(value) :);
|
|
#endif
|
|
return value;
|
|
}
|
|
|
|
} // namespace detail
|
|
|
|
template <class CharT, std::size_t Size, class Keys, class Indices>
|
|
class xor_string;
|
|
|
|
template <class CharT, std::size_t Size, std::uint64_t... Keys,
|
|
std::size_t... Indices>
|
|
class xor_string<CharT, Size, std::integer_sequence<std::uint64_t, Keys...>,
|
|
std::index_sequence<Indices...>> {
|
|
#ifndef JM_XORSTR_DISABLE_AVX_INTRINSICS
|
|
constexpr static inline std::uint64_t alignment = ((Size > 16) ? 32 : 16);
|
|
#else
|
|
constexpr static inline std::uint64_t alignment = 16;
|
|
#endif
|
|
|
|
alignas(alignment) std::uint64_t _storage[sizeof...(Keys)];
|
|
|
|
public:
|
|
using value_type = CharT;
|
|
using size_type = std::size_t;
|
|
using pointer = CharT *;
|
|
using const_pointer = const CharT *;
|
|
|
|
template <class L>
|
|
XORSTR_FORCEINLINE xor_string(L l, std::integral_constant<std::size_t, Size>,
|
|
std::index_sequence<Indices...>) noexcept
|
|
: _storage{JM_XORSTR_LOAD_FROM_REG(
|
|
(std::integral_constant<std::uint64_t,
|
|
detail::load_xored_str8<Size>(
|
|
Keys, Indices, l())>::value))...} {}
|
|
|
|
XORSTR_FORCEINLINE constexpr size_type size() const noexcept {
|
|
return Size - 1;
|
|
}
|
|
|
|
XORSTR_FORCEINLINE void crypt() noexcept {
|
|
// everything is inlined by hand because a certain compiler with a certain
|
|
// linker is _very_ slow
|
|
#if defined(__clang__)
|
|
alignas(alignment) std::uint64_t arr[]{JM_XORSTR_LOAD_FROM_REG(Keys)...};
|
|
std::uint64_t *keys =
|
|
(std::uint64_t *)JM_XORSTR_LOAD_FROM_REG((std::uint64_t)arr);
|
|
#else
|
|
alignas(alignment) std::uint64_t keys[]{JM_XORSTR_LOAD_FROM_REG(Keys)...};
|
|
#endif
|
|
|
|
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || \
|
|
defined(__arm__)
|
|
#if defined(__clang__)
|
|
((Indices >= sizeof(_storage) / 16
|
|
? static_cast<void>(0)
|
|
: __builtin_neon_vst1q_v(
|
|
reinterpret_cast<uint64_t *>(_storage) + Indices * 2,
|
|
veorq_u64(
|
|
__builtin_neon_vld1q_v(
|
|
reinterpret_cast<const uint64_t *>(_storage) +
|
|
Indices * 2,
|
|
51),
|
|
__builtin_neon_vld1q_v(
|
|
reinterpret_cast<const uint64_t *>(keys) + Indices * 2,
|
|
51)),
|
|
51)),
|
|
...);
|
|
#else // GCC, MSVC
|
|
((Indices >= sizeof(_storage) / 16
|
|
? static_cast<void>(0)
|
|
: vst1q_u64(
|
|
reinterpret_cast<uint64_t *>(_storage) + Indices * 2,
|
|
veorq_u64(
|
|
vld1q_u64(reinterpret_cast<const uint64_t *>(_storage) +
|
|
Indices * 2),
|
|
vld1q_u64(reinterpret_cast<const uint64_t *>(keys) +
|
|
Indices * 2)))),
|
|
...);
|
|
#endif
|
|
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
|
|
((Indices >= sizeof(_storage) / 32
|
|
? static_cast<void>(0)
|
|
: _mm256_store_si256(
|
|
reinterpret_cast<__m256i *>(_storage) + Indices,
|
|
_mm256_xor_si256(
|
|
_mm256_load_si256(
|
|
reinterpret_cast<const __m256i *>(_storage) + Indices),
|
|
_mm256_load_si256(reinterpret_cast<const __m256i *>(keys) +
|
|
Indices)))),
|
|
...);
|
|
|
|
if constexpr (sizeof(_storage) % 32 != 0)
|
|
_mm_store_si128(
|
|
reinterpret_cast<__m128i *>(_storage + sizeof...(Keys) - 2),
|
|
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i *>(
|
|
_storage + sizeof...(Keys) - 2)),
|
|
_mm_load_si128(reinterpret_cast<const __m128i *>(
|
|
keys + sizeof...(Keys) - 2))));
|
|
#else
|
|
((Indices >= sizeof(_storage) / 16
|
|
? static_cast<void>(0)
|
|
: _mm_store_si128(
|
|
reinterpret_cast<__m128i *>(_storage) + Indices,
|
|
_mm_xor_si128(
|
|
_mm_load_si128(reinterpret_cast<const __m128i *>(_storage) +
|
|
Indices),
|
|
_mm_load_si128(reinterpret_cast<const __m128i *>(keys) +
|
|
Indices)))),
|
|
...);
|
|
#endif
|
|
}
|
|
|
|
XORSTR_FORCEINLINE const_pointer get() const noexcept {
|
|
return reinterpret_cast<const_pointer>(_storage);
|
|
}
|
|
|
|
XORSTR_FORCEINLINE pointer get() noexcept {
|
|
return reinterpret_cast<pointer>(_storage);
|
|
}
|
|
|
|
XORSTR_FORCEINLINE pointer crypt_get() noexcept {
|
|
// crypt() is inlined by hand because a certain compiler with a certain
|
|
// linker is _very_ slow
|
|
#if defined(__clang__)
|
|
alignas(alignment) std::uint64_t arr[]{JM_XORSTR_LOAD_FROM_REG(Keys)...};
|
|
std::uint64_t *keys =
|
|
(std::uint64_t *)JM_XORSTR_LOAD_FROM_REG((std::uint64_t)arr);
|
|
#else
|
|
alignas(alignment) std::uint64_t keys[]{JM_XORSTR_LOAD_FROM_REG(Keys)...};
|
|
#endif
|
|
|
|
#if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || \
|
|
defined(__arm__)
|
|
#if defined(__clang__)
|
|
((Indices >= sizeof(_storage) / 16
|
|
? static_cast<void>(0)
|
|
: __builtin_neon_vst1q_v(
|
|
reinterpret_cast<uint64_t *>(_storage) + Indices * 2,
|
|
veorq_u64(
|
|
__builtin_neon_vld1q_v(
|
|
reinterpret_cast<const uint64_t *>(_storage) +
|
|
Indices * 2,
|
|
51),
|
|
__builtin_neon_vld1q_v(
|
|
reinterpret_cast<const uint64_t *>(keys) + Indices * 2,
|
|
51)),
|
|
51)),
|
|
...);
|
|
#else // GCC, MSVC
|
|
((Indices >= sizeof(_storage) / 16
|
|
? static_cast<void>(0)
|
|
: vst1q_u64(
|
|
reinterpret_cast<uint64_t *>(_storage) + Indices * 2,
|
|
veorq_u64(
|
|
vld1q_u64(reinterpret_cast<const uint64_t *>(_storage) +
|
|
Indices * 2),
|
|
vld1q_u64(reinterpret_cast<const uint64_t *>(keys) +
|
|
Indices * 2)))),
|
|
...);
|
|
#endif
|
|
#elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS)
|
|
((Indices >= sizeof(_storage) / 32
|
|
? static_cast<void>(0)
|
|
: _mm256_store_si256(
|
|
reinterpret_cast<__m256i *>(_storage) + Indices,
|
|
_mm256_xor_si256(
|
|
_mm256_load_si256(
|
|
reinterpret_cast<const __m256i *>(_storage) + Indices),
|
|
_mm256_load_si256(reinterpret_cast<const __m256i *>(keys) +
|
|
Indices)))),
|
|
...);
|
|
|
|
if constexpr (sizeof(_storage) % 32 != 0)
|
|
_mm_store_si128(
|
|
reinterpret_cast<__m128i *>(_storage + sizeof...(Keys) - 2),
|
|
_mm_xor_si128(_mm_load_si128(reinterpret_cast<const __m128i *>(
|
|
_storage + sizeof...(Keys) - 2)),
|
|
_mm_load_si128(reinterpret_cast<const __m128i *>(
|
|
keys + sizeof...(Keys) - 2))));
|
|
#else
|
|
((Indices >= sizeof(_storage) / 16
|
|
? static_cast<void>(0)
|
|
: _mm_store_si128(
|
|
reinterpret_cast<__m128i *>(_storage) + Indices,
|
|
_mm_xor_si128(
|
|
_mm_load_si128(reinterpret_cast<const __m128i *>(_storage) +
|
|
Indices),
|
|
_mm_load_si128(reinterpret_cast<const __m128i *>(keys) +
|
|
Indices)))),
|
|
...);
|
|
#endif
|
|
|
|
return (pointer)(_storage);
|
|
}
|
|
};
|
|
|
|
template <class L, std::size_t Size, std::size_t... Indices>
|
|
xor_string(L l, std::integral_constant<std::size_t, Size>,
|
|
std::index_sequence<Indices...>)
|
|
-> xor_string<
|
|
std::remove_const_t<std::remove_reference_t<decltype(l()[0])>>, Size,
|
|
std::integer_sequence<std::uint64_t, detail::key8<Indices>()...>,
|
|
std::index_sequence<Indices...>>;
|
|
|
|
} // namespace jm
|
|
|
|
#endif // include guard
|