deps: update simdjson to 3.9.2

PR-URL: https://github.com/nodejs/node/pull/52947
Reviewed-By: Marco Ippolito <marcoippolito54@gmail.com>
Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com>
Reviewed-By: Luigi Pinca <luigipinca@gmail.com>
This commit is contained in:
Node.js GitHub Bot 2024-05-14 03:45:50 +03:00 committed by GitHub
parent 9987a2f359
commit e28292572e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 256 additions and 182 deletions

View File

@ -1,4 +1,4 @@
/* auto-generated on 2024-04-05 15:17:57 -0400. Do not edit! */
/* auto-generated on 2024-05-07 18:04:59 -0400. Do not edit! */
/* including simdjson.cpp: */
/* begin file simdjson.cpp */
#define SIMDJSON_SRC_SIMDJSON_CPP
@ -105,9 +105,9 @@
#endif // __clang__
#endif // _MSC_VER
#if defined(__x86_64__) || defined(_M_AMD64)
#if (defined(__x86_64__) || defined(_M_AMD64)) && !defined(_M_ARM64EC)
#define SIMDJSON_IS_X86_64 1
#elif defined(__aarch64__) || defined(_M_ARM64)
#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
#define SIMDJSON_IS_ARM64 1
#elif defined(__riscv) && __riscv_xlen == 64
#define SIMDJSON_IS_RISCV64 1
@ -6139,7 +6139,7 @@ public:
*
* @return the name of the implementation, e.g. "haswell", "westmere", "arm64".
*/
virtual const std::string &name() const { return _name; }
virtual std::string name() const { return std::string(_name); }
/**
* The description of this implementation.
@ -6149,7 +6149,7 @@ public:
*
* @return the description of the implementation, e.g. "Intel/AMD AVX2", "Intel/AMD SSE4.2", "ARM NEON".
*/
virtual const std::string &description() const { return _description; }
virtual std::string description() const { return std::string(_description); }
/**
* The instruction sets this implementation is compiled against
@ -6225,18 +6225,19 @@ protected:
_required_instruction_sets(required_instruction_sets)
{
}
virtual ~implementation()=default;
protected:
~implementation() = default;
private:
/**
* The name of this implementation.
*/
const std::string _name;
std::string_view _name;
/**
* The description of this implementation.
*/
const std::string _description;
std::string_view _description;
/**
* Instruction sets required for this implementation.
@ -6712,7 +6713,7 @@ static inline uint32_t detect_supported_architectures() {
return instruction_set::ALTIVEC;
}
#elif defined(__aarch64__) || defined(_M_ARM64)
#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
static inline uint32_t detect_supported_architectures() {
return instruction_set::NEON;
@ -7346,8 +7347,8 @@ namespace internal {
*/
class detect_best_supported_implementation_on_first_use final : public implementation {
public:
const std::string &name() const noexcept final { return set_best()->name(); }
const std::string &description() const noexcept final { return set_best()->description(); }
std::string name() const noexcept final { return set_best()->name(); }
std::string description() const noexcept final { return set_best()->description(); }
uint32_t required_instruction_sets() const noexcept final { return set_best()->required_instruction_sets(); }
simdjson_warn_unused error_code create_dom_parser_implementation(
size_t capacity,
@ -7367,6 +7368,8 @@ private:
const implementation *set_best() const noexcept;
};
static_assert(std::is_trivially_destructible<detect_best_supported_implementation_on_first_use>::value, "detect_best_supported_implementation_on_first_use should be trivially destructible");
static const std::initializer_list<const implementation *>& get_available_implementation_pointers() {
static const std::initializer_list<const implementation *> available_implementation_pointers {
#if SIMDJSON_IMPLEMENTATION_ICELAKE
@ -7423,6 +7426,8 @@ public:
unsupported_implementation() : implementation("unsupported", "Unsupported CPU (no detected SIMD instructions)", 0) {}
};
static_assert(std::is_trivially_destructible<unsupported_implementation>::value, "unsupported_singleton should be trivially destructible");
const unsupported_implementation* get_unsupported_singleton() {
static const unsupported_implementation unsupported_singleton{};
return &unsupported_singleton;
@ -7739,10 +7744,10 @@ simdjson_inline uint64_t prefix_xor(uint64_t bitmask) {
#include <cstring>
#if _M_ARM64
#if SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
// __umulh requires intrin.h
#include <intrin.h>
#endif // _M_ARM64
#endif // SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
namespace simdjson {
namespace arm64 {
@ -7762,13 +7767,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -7923,7 +7928,7 @@ namespace {
tmp = vpaddq_u8(tmp, tmp);
return vgetq_lane_u16(vreinterpretq_u16_u8(tmp), 0);
}
simdjson_inline bool any() const { return vmaxvq_u8(*this) != 0; }
simdjson_inline bool any() const { return vmaxvq_u32(vreinterpretq_u32_u8(*this)) != 0; }
};
// Unsigned bytes
@ -10495,10 +10500,10 @@ simdjson_inline uint64_t prefix_xor(uint64_t bitmask) {
#include <cstring>
#if _M_ARM64
#if SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
// __umulh requires intrin.h
#include <intrin.h>
#endif // _M_ARM64
#endif // SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
namespace simdjson {
namespace arm64 {
@ -10518,13 +10523,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -10679,7 +10684,7 @@ namespace {
tmp = vpaddq_u8(tmp, tmp);
return vgetq_lane_u16(vreinterpretq_u16_u8(tmp), 0);
}
simdjson_inline bool any() const { return vmaxvq_u8(*this) != 0; }
simdjson_inline bool any() const { return vmaxvq_u32(vreinterpretq_u32_u8(*this)) != 0; }
};
// Unsigned bytes
@ -14236,13 +14241,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -14328,10 +14333,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm256_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm256_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm256_testz_si256(*this, *this); }
@ -16869,13 +16874,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -16961,10 +16966,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm256_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm256_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm256_testz_si256(*this, *this); }
@ -20510,10 +20515,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm512_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m512i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m512i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline bool any() const { return !!_mm512_test_epi8_mask (*this, *this); }
simdjson_inline simd8<bool> operator~() const { return *this ^ true; }
};
@ -20876,13 +20881,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -23141,10 +23146,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm512_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m512i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m512i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline bool any() const { return !!_mm512_test_epi8_mask (*this, *this); }
simdjson_inline simd8<bool> operator~() const { return *this ^ true; }
};
@ -23507,13 +23512,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -26701,13 +26706,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -26824,11 +26829,11 @@ template <> struct simd8<bool> : base8<bool> {
return (__m128i)vec_splats((unsigned char)(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8<bool>() {}
simdjson_inline simd8<bool>(const __m128i _value)
simdjson_inline simd8() : base8<bool>() {}
simdjson_inline simd8(const __m128i _value)
: base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value)
simdjson_inline simd8(bool _value)
: base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
@ -29445,13 +29450,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -29568,11 +29573,11 @@ template <> struct simd8<bool> : base8<bool> {
return (__m128i)vec_splats((unsigned char)(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8<bool>() {}
simdjson_inline simd8<bool>(const __m128i _value)
simdjson_inline simd8() : base8<bool>() {}
simdjson_inline simd8(const __m128i _value)
: base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value)
simdjson_inline simd8(bool _value)
: base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
@ -33170,13 +33175,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -33256,10 +33261,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -33684,10 +33689,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -36235,13 +36240,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -36321,10 +36326,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -36749,10 +36754,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -40219,10 +40224,10 @@ namespace simd {
return __lsx_vreplgr2vr_b(uint8_t(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return __lsx_vpickve2gr_w(__lsx_vmskltz_b(*this), 0); }
simdjson_inline bool any() const { return 0 == __lsx_vpickve2gr_hu(__lsx_vmsknz_b(*this), 0); }
@ -42748,10 +42753,10 @@ namespace simd {
return __lsx_vreplgr2vr_b(uint8_t(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return __lsx_vpickve2gr_w(__lsx_vmskltz_b(*this), 0); }
simdjson_inline bool any() const { return 0 == __lsx_vpickve2gr_hu(__lsx_vmsknz_b(*this), 0); }
@ -46204,10 +46209,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return __lasx_xvreplgr2vr_b(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
__m256i mask = __lasx_xvmskltz_b(*this);
@ -48749,10 +48754,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return __lasx_xvreplgr2vr_b(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
__m256i mask = __lasx_xvmskltz_b(*this);
@ -52139,13 +52144,13 @@ static simdjson_inline uint64_t _umul128(uint64_t ab, uint64_t cd, uint64_t *hi)
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -54273,13 +54278,13 @@ static simdjson_inline uint64_t _umul128(uint64_t ab, uint64_t cd, uint64_t *hi)
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);

View File

@ -1,4 +1,4 @@
/* auto-generated on 2024-04-05 15:17:57 -0400. Do not edit! */
/* auto-generated on 2024-05-07 18:04:59 -0400. Do not edit! */
/* including simdjson.h: */
/* begin file simdjson.h */
#ifndef SIMDJSON_H
@ -125,9 +125,9 @@
#endif // __clang__
#endif // _MSC_VER
#if defined(__x86_64__) || defined(_M_AMD64)
#if (defined(__x86_64__) || defined(_M_AMD64)) && !defined(_M_ARM64EC)
#define SIMDJSON_IS_X86_64 1
#elif defined(__aarch64__) || defined(_M_ARM64)
#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)
#define SIMDJSON_IS_ARM64 1
#elif defined(__riscv) && __riscv_xlen == 64
#define SIMDJSON_IS_RISCV64 1
@ -2346,7 +2346,7 @@ namespace std {
#define SIMDJSON_SIMDJSON_VERSION_H
/** The version of simdjson being used (major.minor.revision) */
#define SIMDJSON_VERSION "3.9.1"
#define SIMDJSON_VERSION "3.9.2"
namespace simdjson {
enum {
@ -2361,7 +2361,7 @@ enum {
/**
* The revision (major.minor.REVISION) of simdjson being used.
*/
SIMDJSON_VERSION_REVISION = 1
SIMDJSON_VERSION_REVISION = 2
};
} // namespace simdjson
@ -3284,7 +3284,7 @@ public:
*
* @return the name of the implementation, e.g. "haswell", "westmere", "arm64".
*/
virtual const std::string &name() const { return _name; }
virtual std::string name() const { return std::string(_name); }
/**
* The description of this implementation.
@ -3294,7 +3294,7 @@ public:
*
* @return the description of the implementation, e.g. "Intel/AMD AVX2", "Intel/AMD SSE4.2", "ARM NEON".
*/
virtual const std::string &description() const { return _description; }
virtual std::string description() const { return std::string(_description); }
/**
* The instruction sets this implementation is compiled against
@ -3370,18 +3370,19 @@ protected:
_required_instruction_sets(required_instruction_sets)
{
}
virtual ~implementation()=default;
protected:
~implementation() = default;
private:
/**
* The name of this implementation.
*/
const std::string _name;
std::string_view _name;
/**
* The description of this implementation.
*/
const std::string _description;
std::string_view _description;
/**
* Instruction sets required for this implementation.
@ -8015,7 +8016,11 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
return std::string_view(start, next_doc_index - current_index() + 1);
} else {
size_t next_doc_index = stream->batch_start + stream->parser->implementation->structural_indexes[stream->parser->implementation->next_structural_index];
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), next_doc_index - current_index() - 1);
size_t svlen = next_doc_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
@ -9916,10 +9921,10 @@ simdjson_inline uint64_t prefix_xor(uint64_t bitmask) {
#include <cstring>
#if _M_ARM64
#if SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
// __umulh requires intrin.h
#include <intrin.h>
#endif // _M_ARM64
#endif // SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
namespace simdjson {
namespace arm64 {
@ -9939,13 +9944,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -10100,7 +10105,7 @@ namespace {
tmp = vpaddq_u8(tmp, tmp);
return vgetq_lane_u16(vreinterpretq_u16_u8(tmp), 0);
}
simdjson_inline bool any() const { return vmaxvq_u8(*this) != 0; }
simdjson_inline bool any() const { return vmaxvq_u32(vreinterpretq_u32_u8(*this)) != 0; }
};
// Unsigned bytes
@ -12600,13 +12605,13 @@ static simdjson_inline uint64_t _umul128(uint64_t ab, uint64_t cd, uint64_t *hi)
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -14769,13 +14774,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -14861,10 +14866,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm256_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm256_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm256_testz_si256(*this, *this); }
@ -17423,10 +17428,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm512_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m512i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m512i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline bool any() const { return !!_mm512_test_epi8_mask (*this, *this); }
simdjson_inline simd8<bool> operator~() const { return *this ^ true; }
};
@ -17789,13 +17794,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -19953,13 +19958,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -20076,11 +20081,11 @@ template <> struct simd8<bool> : base8<bool> {
return (__m128i)vec_splats((unsigned char)(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8<bool>() {}
simdjson_inline simd8<bool>(const __m128i _value)
simdjson_inline simd8() : base8<bool>() {}
simdjson_inline simd8(const __m128i _value)
: base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value)
simdjson_inline simd8(bool _value)
: base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
@ -22718,13 +22723,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -22804,10 +22809,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -23232,10 +23237,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -25710,10 +25715,10 @@ namespace simd {
return __lsx_vreplgr2vr_b(uint8_t(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return __lsx_vpickve2gr_w(__lsx_vmskltz_b(*this), 0); }
simdjson_inline bool any() const { return 0 == __lsx_vpickve2gr_hu(__lsx_vmsknz_b(*this), 0); }
@ -28212,10 +28217,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return __lasx_xvreplgr2vr_b(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
__m256i mask = __lasx_xvmskltz_b(*this);
@ -30730,10 +30735,10 @@ simdjson_inline uint64_t prefix_xor(uint64_t bitmask) {
#include <cstring>
#if _M_ARM64
#if SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
// __umulh requires intrin.h
#include <intrin.h>
#endif // _M_ARM64
#endif // SIMDJSON_REGULAR_VISUAL_STUDIO && SIMDJSON_IS_ARM64
namespace simdjson {
namespace arm64 {
@ -30753,13 +30758,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -30914,7 +30919,7 @@ namespace {
tmp = vpaddq_u8(tmp, tmp);
return vgetq_lane_u16(vreinterpretq_u16_u8(tmp), 0);
}
simdjson_inline bool any() const { return vmaxvq_u8(*this) != 0; }
simdjson_inline bool any() const { return vmaxvq_u32(vreinterpretq_u32_u8(*this)) != 0; }
};
// Unsigned bytes
@ -37721,9 +37726,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -41640,13 +41653,13 @@ static simdjson_inline uint64_t _umul128(uint64_t ab, uint64_t cd, uint64_t *hi)
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -48050,9 +48063,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -52035,13 +52056,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -52127,10 +52148,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm256_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm256_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm256_testz_si256(*this, *this); }
@ -58871,9 +58892,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -62915,10 +62944,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm512_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m512i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m512i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline bool any() const { return !!_mm512_test_epi8_mask (*this, *this); }
simdjson_inline simd8<bool> operator~() const { return *this ^ true; }
};
@ -63281,13 +63310,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -69691,9 +69720,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -73671,13 +73708,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -73794,11 +73831,11 @@ template <> struct simd8<bool> : base8<bool> {
return (__m128i)vec_splats((unsigned char)(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8<bool>() {}
simdjson_inline simd8<bool>(const __m128i _value)
simdjson_inline simd8() : base8<bool>() {}
simdjson_inline simd8(const __m128i _value)
: base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value)
simdjson_inline simd8(bool _value)
: base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
@ -80626,9 +80663,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -84662,13 +84707,13 @@ static simdjson_inline uint32_t parse_eight_digits_unrolled(const uint8_t *chars
simdjson_inline internal::value128 full_multiplication(uint64_t value1, uint64_t value2) {
internal::value128 answer;
#if SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
#ifdef _M_ARM64
#if SIMDJSON_IS_ARM64
// ARM64 has native support for 64-bit multiplications, no need to emultate
answer.high = __umulh(value1, value2);
answer.low = value1 * value2;
#else
answer.low = _umul128(value1, value2, &answer.high); // _umul128 not available on ARM64
#endif // _M_ARM64
#endif // SIMDJSON_IS_ARM64
#else // SIMDJSON_REGULAR_VISUAL_STUDIO || SIMDJSON_IS_32BITS
__uint128_t r = (static_cast<__uint128_t>(value1)) * value2;
answer.low = uint64_t(r);
@ -84748,10 +84793,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -85176,10 +85221,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return _mm_set1_epi8(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return _mm_movemask_epi8(*this); }
simdjson_inline bool any() const { return !_mm_testz_si128(*this, *this); }
@ -91884,9 +91929,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -95880,10 +95933,10 @@ namespace simd {
return __lsx_vreplgr2vr_b(uint8_t(-(!!_value)));
}
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m128i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m128i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const { return __lsx_vpickve2gr_w(__lsx_vmskltz_b(*this), 0); }
simdjson_inline bool any() const { return 0 == __lsx_vpickve2gr_hu(__lsx_vmsknz_b(*this), 0); }
@ -102613,9 +102666,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}
@ -106608,10 +106669,10 @@ namespace simd {
struct simd8<bool>: base8<bool> {
static simdjson_inline simd8<bool> splat(bool _value) { return __lasx_xvreplgr2vr_b(uint8_t(-(!!_value))); }
simdjson_inline simd8<bool>() : base8() {}
simdjson_inline simd8<bool>(const __m256i _value) : base8<bool>(_value) {}
simdjson_inline simd8() : base8() {}
simdjson_inline simd8(const __m256i _value) : base8<bool>(_value) {}
// Splat constructor
simdjson_inline simd8<bool>(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline simd8(bool _value) : base8<bool>(splat(_value)) {}
simdjson_inline int to_bitmask() const {
__m256i mask = __lasx_xvmskltz_b(*this);
@ -113355,9 +113416,17 @@ simdjson_inline std::string_view document_stream::iterator::source() const noexc
depth--;
break;
default: // Scalar value document
// TODO: Remove any trailing whitespaces
// TODO: We could remove trailing whitespaces
// This returns a string spanning from start of value to the beginning of the next document (excluded)
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), stream->parser->implementation->structural_indexes[++cur_struct_index] - current_index() - 1);
{
auto next_index = stream->parser->implementation->structural_indexes[++cur_struct_index];
// normally the length would be next_index - current_index() - 1, except for the last document
size_t svlen = next_index - current_index();
if(svlen > 1) {
svlen--;
}
return std::string_view(reinterpret_cast<const char*>(stream->buf) + current_index(), svlen);
}
}
cur_struct_index++;
}