/* * Copyright 2017 - 2021 Justas Masiulis * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef JM_XORSTR_HPP #define JM_XORSTR_HPP #define JM_XORSTR_DISABLE_AVX_INTRINSICS #if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || \ defined(__arm__) #include #elif defined(_M_X64) || defined(__amd64__) || defined(_M_IX86) || \ defined(__i386__) #include #else #error Unsupported platform #endif #include #include #include #include #define xorstr(str) \ ::jm::xor_string( \ []() { return str; }, \ std::integral_constant{}, \ std::make_index_sequence<::jm::detail::_buffer_size()>{}) #define xorstr2(str) \ ::jm::xor_string( \ []() { return str; }, \ std::integral_constant{}, \ std::make_index_sequence< \ ::jm::detail::_buffer_size()>{}) #define xorstr_(str) xorstr(str).crypt_get() #define _xor_(str) xorstr(str).crypt_get() #define _x(str) xorstr(str).crypt_get() #ifdef _MSC_VER #define XORSTR_FORCEINLINE __forceinline #else #define XORSTR_FORCEINLINE __attribute__((always_inline)) inline #endif #if defined(__clang__) || defined(__GNUC__) #define JM_XORSTR_LOAD_FROM_REG(x) ::jm::detail::load_from_reg(x) #else #define JM_XORSTR_LOAD_FROM_REG #endif namespace jm { namespace detail { template XORSTR_FORCEINLINE constexpr std::size_t _buffer_size() { return ((Size / 16) + (Size % 16 != 0)) * 2; } template XORSTR_FORCEINLINE constexpr std::uint32_t key4() noexcept { std::uint32_t value = Seed; for (char c : __TIME__) value = static_cast((value ^ c) * 16777619ull); return value; } template XORSTR_FORCEINLINE constexpr std::uint64_t key8() { constexpr auto first_part = key4<2166136261 + S>(); constexpr auto second_part = key4(); return (static_cast(first_part) << 32) | second_part; } // loads up to 8 characters of string into uint64 and xors it with the key template XORSTR_FORCEINLINE constexpr std::uint64_t load_xored_str8(std::uint64_t key, std::size_t idx, const CharT *str) noexcept { using cast_type = typename std::make_unsigned::type; constexpr auto value_size = sizeof(CharT); constexpr auto idx_offset = 8 / value_size; std::uint64_t value = key; for (std::size_t i = 0; i < idx_offset && i + idx * idx_offset < N; ++i) value ^= (std::uint64_t{static_cast(str[i + idx * idx_offset])} << ((i % idx_offset) * 8 * value_size)); return value; } // forces compiler to use registers instead of stuffing constants in rdata XORSTR_FORCEINLINE std::uint64_t load_from_reg(std::uint64_t value) noexcept { #if defined(__clang__) || defined(__GNUC__) asm("" : "=r"(value) : "0"(value) :); #endif return value; } } // namespace detail template class xor_string; template class xor_string, std::index_sequence> { #ifndef JM_XORSTR_DISABLE_AVX_INTRINSICS constexpr static inline std::uint64_t alignment = ((Size > 16) ? 32 : 16); #else constexpr static inline std::uint64_t alignment = 16; #endif alignas(alignment) std::uint64_t _storage[sizeof...(Keys)]; public: using value_type = CharT; using size_type = std::size_t; using pointer = CharT *; using const_pointer = const CharT *; template XORSTR_FORCEINLINE xor_string(L l, std::integral_constant, std::index_sequence) noexcept : _storage{JM_XORSTR_LOAD_FROM_REG( (std::integral_constant( Keys, Indices, l())>::value))...} {} XORSTR_FORCEINLINE constexpr size_type size() const noexcept { return Size - 1; } XORSTR_FORCEINLINE void crypt() noexcept { // everything is inlined by hand because a certain compiler with a certain // linker is _very_ slow #if defined(__clang__) alignas(alignment) std::uint64_t arr[]{JM_XORSTR_LOAD_FROM_REG(Keys)...}; std::uint64_t *keys = (std::uint64_t *)JM_XORSTR_LOAD_FROM_REG((std::uint64_t)arr); #else alignas(alignment) std::uint64_t keys[]{JM_XORSTR_LOAD_FROM_REG(Keys)...}; #endif #if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || \ defined(__arm__) #if defined(__clang__) ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : __builtin_neon_vst1q_v( reinterpret_cast(_storage) + Indices * 2, veorq_u64( __builtin_neon_vld1q_v( reinterpret_cast(_storage) + Indices * 2, 51), __builtin_neon_vld1q_v( reinterpret_cast(keys) + Indices * 2, 51)), 51)), ...); #else // GCC, MSVC ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : vst1q_u64( reinterpret_cast(_storage) + Indices * 2, veorq_u64( vld1q_u64(reinterpret_cast(_storage) + Indices * 2), vld1q_u64(reinterpret_cast(keys) + Indices * 2)))), ...); #endif #elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS) ((Indices >= sizeof(_storage) / 32 ? static_cast(0) : _mm256_store_si256( reinterpret_cast<__m256i *>(_storage) + Indices, _mm256_xor_si256( _mm256_load_si256( reinterpret_cast(_storage) + Indices), _mm256_load_si256(reinterpret_cast(keys) + Indices)))), ...); if constexpr (sizeof(_storage) % 32 != 0) _mm_store_si128( reinterpret_cast<__m128i *>(_storage + sizeof...(Keys) - 2), _mm_xor_si128(_mm_load_si128(reinterpret_cast( _storage + sizeof...(Keys) - 2)), _mm_load_si128(reinterpret_cast( keys + sizeof...(Keys) - 2)))); #else ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : _mm_store_si128( reinterpret_cast<__m128i *>(_storage) + Indices, _mm_xor_si128( _mm_load_si128(reinterpret_cast(_storage) + Indices), _mm_load_si128(reinterpret_cast(keys) + Indices)))), ...); #endif } XORSTR_FORCEINLINE const_pointer get() const noexcept { return reinterpret_cast(_storage); } XORSTR_FORCEINLINE pointer get() noexcept { return reinterpret_cast(_storage); } XORSTR_FORCEINLINE pointer crypt_get() noexcept { // crypt() is inlined by hand because a certain compiler with a certain // linker is _very_ slow #if defined(__clang__) alignas(alignment) std::uint64_t arr[]{JM_XORSTR_LOAD_FROM_REG(Keys)...}; std::uint64_t *keys = (std::uint64_t *)JM_XORSTR_LOAD_FROM_REG((std::uint64_t)arr); #else alignas(alignment) std::uint64_t keys[]{JM_XORSTR_LOAD_FROM_REG(Keys)...}; #endif #if defined(_M_ARM64) || defined(__aarch64__) || defined(_M_ARM) || \ defined(__arm__) #if defined(__clang__) ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : __builtin_neon_vst1q_v( reinterpret_cast(_storage) + Indices * 2, veorq_u64( __builtin_neon_vld1q_v( reinterpret_cast(_storage) + Indices * 2, 51), __builtin_neon_vld1q_v( reinterpret_cast(keys) + Indices * 2, 51)), 51)), ...); #else // GCC, MSVC ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : vst1q_u64( reinterpret_cast(_storage) + Indices * 2, veorq_u64( vld1q_u64(reinterpret_cast(_storage) + Indices * 2), vld1q_u64(reinterpret_cast(keys) + Indices * 2)))), ...); #endif #elif !defined(JM_XORSTR_DISABLE_AVX_INTRINSICS) ((Indices >= sizeof(_storage) / 32 ? static_cast(0) : _mm256_store_si256( reinterpret_cast<__m256i *>(_storage) + Indices, _mm256_xor_si256( _mm256_load_si256( reinterpret_cast(_storage) + Indices), _mm256_load_si256(reinterpret_cast(keys) + Indices)))), ...); if constexpr (sizeof(_storage) % 32 != 0) _mm_store_si128( reinterpret_cast<__m128i *>(_storage + sizeof...(Keys) - 2), _mm_xor_si128(_mm_load_si128(reinterpret_cast( _storage + sizeof...(Keys) - 2)), _mm_load_si128(reinterpret_cast( keys + sizeof...(Keys) - 2)))); #else ((Indices >= sizeof(_storage) / 16 ? static_cast(0) : _mm_store_si128( reinterpret_cast<__m128i *>(_storage) + Indices, _mm_xor_si128( _mm_load_si128(reinterpret_cast(_storage) + Indices), _mm_load_si128(reinterpret_cast(keys) + Indices)))), ...); #endif return (pointer)(_storage); } }; template xor_string(L l, std::integral_constant, std::index_sequence) -> xor_string< std::remove_const_t>, Size, std::integer_sequence()...>, std::index_sequence>; } // namespace jm #endif // include guard