deps: patch V8 to support compilation with MSVC

This patches V8 v12.3 for Windows, by fixing multiple compilation
errors caused by V8 being a Clang-oriented project. There are various
types of errors fixed by this going from changing `using` directives
and renaming to overcoming the differences in which Clang and MSVC see
templates and metaprogramming.

The changes introduced here are strictly meant as a patch only, so they
shouldn't be pushed upstream.

Refs: https://github.com/targos/node/pull/13
Refs: https://github.com/targos/node/pull/14
Refs: https://github.com/targos/node/pull/15
PR-URL: https://github.com/nodejs/node/pull/52293
Reviewed-By: Moshe Atlow <moshe@atlow.co.il>
Reviewed-By: Rafael Gonzaga <rafael.nunu@hotmail.com>
Reviewed-By: Richard Lau <rlau@redhat.com>
This commit is contained in:
Stefan Stojanovic 2024-04-16 14:28:37 +02:00 committed by Node.js GitHub Bot
parent 16f1d65102
commit c72dd1a73b
17 changed files with 106 additions and 121 deletions

View File

@ -37,7 +37,7 @@
# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.6',
'v8_embedder_string': '-node.7',
##### V8 defaults for Node.js #####

View File

@ -2782,10 +2782,9 @@ TNode<Word32T> WeakCollectionsBuiltinsAssembler::ShouldShrink(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex(
TNode<IntPtrT> key_index) {
return IntPtrAdd(
key_index,
IntPtrConstant(EphemeronHashTable::TodoShape::kEntryValueIndex -
EphemeronHashTable::kEntryKeyIndex));
return IntPtrAdd(key_index,
IntPtrConstant(EphemeronHashTable::ShapeT::kEntryValueIndex -
EphemeronHashTable::kEntryKeyIndex));
}
TF_BUILTIN(WeakMapConstructor, WeakCollectionsBuiltinsAssembler) {

View File

@ -9505,7 +9505,7 @@ void CodeStubAssembler::NameDictionaryLookup(
CAST(UnsafeLoadFixedArrayElement(dictionary, index));
GotoIf(TaggedEqual(current, undefined), if_not_found_with_insertion_index);
if (mode == kFindExisting) {
if (Dictionary::TodoShape::kMatchNeedsHoleCheck) {
if (Dictionary::ShapeT::kMatchNeedsHoleCheck) {
GotoIf(TaggedEqual(current, TheHoleConstant()), &next_probe);
}
current = LoadName<Dictionary>(current);

View File

@ -1547,7 +1547,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Smi> LoadFixedArrayBaseLength(TNode<FixedArrayBase> array);
template <typename Array>
TNode<Smi> LoadArrayCapacity(TNode<Array> array) {
return LoadObjectField<Smi>(array, Array::Shape::kCapacityOffset);
return LoadObjectField<Smi>(array, Array::ShapeT::kCapacityOffset);
}
// Load the length of a fixed array base instance.
TNode<IntPtrT> LoadAndUntagFixedArrayBaseLength(TNode<FixedArrayBase> array);

View File

@ -1121,8 +1121,7 @@ class TurboshaftAssemblerOpInterface
template <typename... Args>
explicit TurboshaftAssemblerOpInterface(Args... args)
: GenericAssemblerOpInterface<Next>(args...),
matcher_(Asm().output_graph()) {}
: matcher_(Asm().output_graph()) {}
const OperationMatcher& matcher() const { return matcher_; }
@ -2245,11 +2244,11 @@ class TurboshaftAssemblerOpInterface
// Helpers to read the most common fields.
// TODO(nicohartmann@): Strengthen this to `V<HeapObject>`.
V<Map> LoadMapField(V<Object> object) {
return LoadField<Map>(object, AccessBuilder::ForMap());
V<v8::internal::Map> LoadMapField(V<Object> object) {
return LoadField<v8::internal::Map>(object, AccessBuilder::ForMap());
}
V<Word32> LoadInstanceTypeField(V<Map> map) {
V<Word32> LoadInstanceTypeField(V<v8::internal::Map> map) {
return LoadField<Word32>(map, AccessBuilder::ForMapInstanceType());
}
@ -2849,7 +2848,7 @@ class TurboshaftAssemblerOpInterface
V<Object> CallRuntime_TransitionElementsKind(Isolate* isolate,
V<Context> context,
V<HeapObject> object,
V<Map> target_map) {
V<v8::internal::Map> target_map) {
return CallRuntime<typename RuntimeCallDescriptor::TransitionElementsKind>(
isolate, context, {object, target_map});
}
@ -3267,8 +3266,8 @@ class TurboshaftAssemblerOpInterface
void TransitionAndStoreArrayElement(
V<Object> array, V<WordPtr> index, OpIndex value,
TransitionAndStoreArrayElementOp::Kind kind, MaybeHandle<Map> fast_map,
MaybeHandle<Map> double_map) {
TransitionAndStoreArrayElementOp::Kind kind, MaybeHandle<v8::internal::Map> fast_map,
MaybeHandle<v8::internal::Map> double_map) {
ReduceIfReachableTransitionAndStoreArrayElement(array, index, value, kind,
fast_map, double_map);
}
@ -3281,17 +3280,17 @@ class TurboshaftAssemblerOpInterface
}
V<Word32> CompareMaps(V<HeapObject> heap_object,
const ZoneRefSet<Map>& maps) {
const ZoneRefSet<v8::internal::Map>& maps) {
return ReduceIfReachableCompareMaps(heap_object, maps);
}
void CheckMaps(V<HeapObject> heap_object, OpIndex frame_state,
const ZoneRefSet<Map>& maps, CheckMapsFlags flags,
const ZoneRefSet<v8::internal::Map>& maps, CheckMapsFlags flags,
const FeedbackSource& feedback) {
ReduceIfReachableCheckMaps(heap_object, frame_state, maps, flags, feedback);
}
void AssumeMap(V<HeapObject> heap_object, const ZoneRefSet<Map>& maps) {
void AssumeMap(V<HeapObject> heap_object, const ZoneRefSet<v8::internal::Map>& maps) {
ReduceIfReachableAssumeMap(heap_object, maps);
}
@ -3400,16 +3399,16 @@ class TurboshaftAssemblerOpInterface
return ReduceIfReachableAssertNotNull(object, type, trap_id);
}
V<Map> RttCanon(V<FixedArray> rtts, uint32_t type_index) {
V<v8::internal::Map> RttCanon(V<FixedArray> rtts, uint32_t type_index) {
return ReduceIfReachableRttCanon(rtts, type_index);
}
V<Word32> WasmTypeCheck(V<Tagged> object, OptionalV<Map> rtt,
V<Word32> WasmTypeCheck(V<Tagged> object, OptionalV<v8::internal::Map> rtt,
WasmTypeCheckConfig config) {
return ReduceIfReachableWasmTypeCheck(object, rtt, config);
}
V<Tagged> WasmTypeCast(V<Tagged> object, OptionalV<Map> rtt,
V<Tagged> WasmTypeCast(V<Tagged> object, OptionalV<v8::internal::Map> rtt,
WasmTypeCheckConfig config) {
return ReduceIfReachableWasmTypeCast(object, rtt, config);
}
@ -3454,12 +3453,12 @@ class TurboshaftAssemblerOpInterface
return ReduceIfReachableArrayLength(array, null_check);
}
V<HeapObject> WasmAllocateArray(V<Map> rtt, ConstOrV<Word32> length,
V<HeapObject> WasmAllocateArray(V<v8::internal::Map> rtt, ConstOrV<Word32> length,
const wasm::ArrayType* array_type) {
return ReduceIfReachableWasmAllocateArray(rtt, resolve(length), array_type);
}
V<HeapObject> WasmAllocateStruct(V<Map> rtt,
V<HeapObject> WasmAllocateStruct(V<v8::internal::Map> rtt,
const wasm::StructType* struct_type) {
return ReduceIfReachableWasmAllocateStruct(rtt, struct_type);
}
@ -4044,8 +4043,14 @@ class TSAssembler
: public Assembler<reducer_list<TurboshaftAssemblerOpInterface, Reducers...,
TSReducerBase>> {
public:
using Assembler<reducer_list<TurboshaftAssemblerOpInterface, Reducers...,
TSReducerBase>>::Assembler;
#ifdef _WIN32
explicit TSAssembler(Graph& input_graph, Graph& output_graph,
Zone* phase_zone)
: Assembler(input_graph, output_graph, phase_zone) {}
#else
using Assembler<reducer_list<TurboshaftAssemblerOpInterface, Reducers...,
TSReducerBase>>::Assembler;
#endif
};
#include "src/compiler/turboshaft/undef-assembler-macros.inc"

View File

@ -1349,26 +1349,11 @@ class MachineOptimizationReducer : public Next {
if (matcher.MatchConstantShiftRightArithmeticShiftOutZeros(
left, &x, rep_w, &k1) &&
matcher.MatchIntegralWordConstant(right, rep_w, &k2) &&
CountLeadingSignBits(k2, rep_w) > k1) {
if (matcher.Get(left).saturated_use_count.IsZero()) {
return __ Comparison(
x, __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), kind,
rep_w);
} else if constexpr (reducer_list_contains<
ReducerList, ValueNumberingReducer>::value) {
// If the shift has uses, we only apply the transformation if the
// result would be GVNed away.
OpIndex rhs =
__ WordConstant(base::bits::Unsigned(k2) << k1, rep_w);
static_assert(ComparisonOp::input_count == 2);
static_assert(sizeof(ComparisonOp) == 8);
base::SmallVector<OperationStorageSlot, 32> storage;
ComparisonOp* cmp =
CreateOperation<ComparisonOp>(storage, x, rhs, kind, rep_w);
if (__ WillGVNOp(*cmp)) {
return __ Comparison(x, rhs, kind, rep_w);
}
}
CountLeadingSignBits(k2, rep_w) > k1 &&
matcher.Get(left).saturated_use_count.IsZero()) {
return __ Comparison(
x, __ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), kind,
rep_w);
}
// k2 </<= (x >> k1) => (k2 << k1) </<= x if shifts reversible
// Only perform the transformation if the shift is not used yet, to
@ -1376,26 +1361,11 @@ class MachineOptimizationReducer : public Next {
if (matcher.MatchConstantShiftRightArithmeticShiftOutZeros(
right, &x, rep_w, &k1) &&
matcher.MatchIntegralWordConstant(left, rep_w, &k2) &&
CountLeadingSignBits(k2, rep_w) > k1) {
if (matcher.Get(right).saturated_use_count.IsZero()) {
return __ Comparison(
__ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), x, kind,
rep_w);
} else if constexpr (reducer_list_contains<
ReducerList, ValueNumberingReducer>::value) {
// If the shift has uses, we only apply the transformation if the
// result would be GVNed away.
OpIndex lhs =
__ WordConstant(base::bits::Unsigned(k2) << k1, rep_w);
static_assert(ComparisonOp::input_count == 2);
static_assert(sizeof(ComparisonOp) == 8);
base::SmallVector<OperationStorageSlot, 32> storage;
ComparisonOp* cmp =
CreateOperation<ComparisonOp>(storage, lhs, x, kind, rep_w);
if (__ WillGVNOp(*cmp)) {
return __ Comparison(lhs, x, kind, rep_w);
}
}
CountLeadingSignBits(k2, rep_w) > k1 &&
matcher.Get(right).saturated_use_count.IsZero()) {
return __ Comparison(
__ WordConstant(base::bits::Unsigned(k2) << k1, rep_w), x, kind,
rep_w);
}
}
// Map 64bit to 32bit comparisons.

View File

@ -32,10 +32,10 @@ class SimplifiedLoweringReducer : public Next {
OpIndex ig_index, const SpeculativeNumberBinopOp& op) {
DCHECK_EQ(op.kind, SpeculativeNumberBinopOp::Kind::kSafeIntegerAdd);
OpIndex frame_state = Map(op.frame_state());
V<Word32> left = ProcessInput(Map(op.left()), Rep::Word32(),
OpIndex frame_state = MapImpl(op.frame_state());
V<Word32> left = ProcessInput(MapImpl(op.left()), Rep::Word32(),
CheckKind::kSigned32, frame_state);
V<Word32> right = ProcessInput(Map(op.right()), Rep::Word32(),
V<Word32> right = ProcessInput(MapImpl(op.right()), Rep::Word32(),
CheckKind::kSigned32, frame_state);
V<Word32> result = __ OverflowCheckedBinop(
@ -43,7 +43,7 @@ class SimplifiedLoweringReducer : public Next {
WordRepresentation::Word32());
V<Word32> overflow = __ Projection(result, 1, Rep::Word32());
__ DeoptimizeIf(overflow, Map(op.frame_state()),
__ DeoptimizeIf(overflow, MapImpl(op.frame_state()),
DeoptimizeReason::kOverflow, FeedbackSource{});
return __ Projection(result, 0, Rep::Word32());
}
@ -52,10 +52,10 @@ class SimplifiedLoweringReducer : public Next {
base::SmallVector<OpIndex, 8> return_values;
for (OpIndex input : ret.return_values()) {
return_values.push_back(
ProcessInput(Map(input), Rep::Tagged(), CheckKind::kNone, {}));
ProcessInput(MapImpl(input), Rep::Tagged(), CheckKind::kNone, {}));
}
__ Return(Map(ret.pop_count()), base::VectorOf(return_values));
__ Return(MapImpl(ret.pop_count()), base::VectorOf(return_values));
return OpIndex::Invalid();
}
@ -94,7 +94,7 @@ class SimplifiedLoweringReducer : public Next {
}
}
inline OpIndex Map(OpIndex ig_index) { return __ MapToNewGraph(ig_index); }
inline OpIndex MapImpl(OpIndex ig_index) { return __ MapToNewGraph(ig_index); }
};
#include "src/compiler/turboshaft/undef-assembler-macros.inc"

View File

@ -55,9 +55,11 @@ namespace v8::internal::compiler::turboshaft {
// with constant inputs introduced by `VariableReducer` need to be eliminated.
template <class AfterNext>
class VariableReducer : public RequiredOptimizationReducer<AfterNext> {
protected:
using Next = RequiredOptimizationReducer<AfterNext>;
using Snapshot = SnapshotTable<OpIndex, VariableData>::Snapshot;
private:
struct GetActiveLoopVariablesIndex {
IntrusiveSetIndex& operator()(Variable var) const {
return var.data().active_loop_variables_index;

View File

@ -3570,7 +3570,7 @@ void Heap::RightTrimArray(Tagged<Array> object, int new_capacity,
}
const int bytes_to_trim =
(old_capacity - new_capacity) * Array::Shape::kElementSize;
(old_capacity - new_capacity) * Array::HotfixShape::kElementSize;
// Calculate location of new array end.
const int old_size = Array::SizeFor(old_capacity);

View File

@ -32,8 +32,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
using DerivedHashTable = HashTable<Derived, Shape>;
public:
using TodoShape = Shape;
using Key = typename TodoShape::Key;
using Key = typename Shape::Key;
inline Tagged<Object> ValueAt(InternalIndex entry);
inline Tagged<Object> ValueAt(PtrComprCageBase cage_base,
InternalIndex entry);
@ -126,7 +125,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
Key key, Handle<Object> value,
PropertyDetails details);
OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, TodoShape>);
OBJECT_CONSTRUCTORS(Dictionary, HashTable<Derived, Shape>);
};
#define EXTERN_DECLARE_DICTIONARY(DERIVED, SHAPE) \

View File

@ -24,18 +24,18 @@ namespace internal {
#include "torque-generated/src/objects/fixed-array-tq.inc"
// Derived: must have a Smi slot at kCapacityOffset.
template <class Derived, class ShapeT, class Super = HeapObject>
template <class Derived, class Shape, class Super = HeapObject>
class TaggedArrayBase : public Super {
static_assert(std::is_base_of<HeapObject, Super>::value);
OBJECT_CONSTRUCTORS(TaggedArrayBase, Super);
using ElementT = typename ShapeT::ElementT;
static_assert(ShapeT::kElementSize == kTaggedSize);
using ElementT = typename Shape::ElementT;
static_assert(Shape::kElementSize == kTaggedSize);
static_assert(is_subtype_v<ElementT, Object> ||
is_subtype_v<ElementT, MaybeObject>);
using ElementFieldT =
TaggedField<ElementT, 0, typename ShapeT::CompressionScheme>;
TaggedField<ElementT, 0, typename Shape::CompressionScheme>;
static constexpr bool kSupportsSmiElements =
std::is_convertible_v<Smi, ElementT>;
@ -56,7 +56,7 @@ class TaggedArrayBase : public Super {
std::conditional_t<kElementsAreMaybeObject, MaybeObjectSlot, ObjectSlot>;
public:
using Shape = ShapeT;
using ShapeT = Shape;
inline int capacity() const;
inline int capacity(AcquireLoadTag) const;
@ -187,6 +187,7 @@ class FixedArray : public TaggedArrayBase<FixedArray, TaggedArrayShape> {
OBJECT_CONSTRUCTORS(FixedArray, Super);
public:
using HotfixShape = TaggedArrayShape;
template <class IsolateT>
static inline Handle<FixedArray> New(
IsolateT* isolate, int capacity,
@ -232,7 +233,7 @@ class FixedArray : public TaggedArrayBase<FixedArray, TaggedArrayShape> {
class BodyDescriptor;
static constexpr int kLengthOffset = Shape::kCapacityOffset;
static constexpr int kLengthOffset = ShapeT::kCapacityOffset;
static constexpr int kMaxLength = FixedArray::kMaxCapacity;
static constexpr int kMaxRegularLength = FixedArray::kMaxRegularCapacity;
@ -283,7 +284,7 @@ class TrustedFixedArray
class BodyDescriptor;
static constexpr int kLengthOffset =
TrustedFixedArray::Shape::kCapacityOffset;
TrustedFixedArray::ShapeT::kCapacityOffset;
static constexpr int kMaxLength = TrustedFixedArray::kMaxCapacity;
static constexpr int kMaxRegularLength =
TrustedFixedArray::kMaxRegularCapacity;
@ -331,7 +332,7 @@ class ProtectedFixedArray
class BodyDescriptor;
static constexpr int kLengthOffset =
ProtectedFixedArray::Shape::kCapacityOffset;
ProtectedFixedArray::ShapeT::kCapacityOffset;
static constexpr int kMaxLength = ProtectedFixedArray::kMaxCapacity;
static constexpr int kMaxRegularLength =
ProtectedFixedArray::kMaxRegularCapacity;
@ -388,6 +389,7 @@ class PrimitiveArrayBase : public Super {
public:
using Shape = ShapeT;
using HotfixShape = ShapeT;
static constexpr bool kElementsAreMaybeObject = false;
inline int length() const;
@ -523,6 +525,8 @@ class WeakFixedArray
OBJECT_CONSTRUCTORS(WeakFixedArray, Super);
public:
using Shape = WeakFixedArrayShape;
using HotfixShape = WeakFixedArrayShape;
template <class IsolateT>
static inline Handle<WeakFixedArray> New(
IsolateT* isolate, int capacity,
@ -534,7 +538,7 @@ class WeakFixedArray
class BodyDescriptor;
static constexpr int kLengthOffset = Shape::kCapacityOffset;
static constexpr int kLengthOffset = ShapeT::kCapacityOffset;
};
// WeakArrayList is like a WeakFixedArray with static convenience methods for
@ -671,6 +675,7 @@ class ArrayList : public TaggedArrayBase<ArrayList, ArrayListShape> {
public:
using Shape = ArrayListShape;
using HotfixShape = ArrayListShape;
template <class IsolateT>
static inline Handle<ArrayList> New(
@ -742,6 +747,7 @@ class ByteArray : public PrimitiveArrayBase<ByteArray, ByteArrayShape> {
public:
using Shape = ByteArrayShape;
using HotfixShape = ByteArrayShape;
template <class IsolateT>
static inline Handle<ByteArray> New(

View File

@ -170,7 +170,7 @@ template <typename Derived, typename Shape>
template <typename IsolateT>
InternalIndex HashTable<Derived, Shape>::FindEntry(IsolateT* isolate, Key key) {
ReadOnlyRoots roots(isolate);
return FindEntry(isolate, roots, key, TodoShape::Hash(roots, key));
return FindEntry(isolate, roots, key, Shape::Hash(roots, key));
}
// Find entry for key otherwise return kNotFound.
@ -183,7 +183,7 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(PtrComprCageBase cage_base,
uint32_t count = 1;
Tagged<Object> undefined = roots.undefined_value();
Tagged<Object> the_hole = roots.the_hole_value();
DCHECK_EQ(TodoShape::Hash(roots, key), static_cast<uint32_t>(hash));
DCHECK_EQ(Shape::Hash(roots, key), static_cast<uint32_t>(hash));
// EnsureCapacity will guarantee the hash table is never full.
for (InternalIndex entry = FirstProbe(hash, capacity);;
entry = NextProbe(entry, count++, capacity)) {
@ -191,8 +191,8 @@ InternalIndex HashTable<Derived, Shape>::FindEntry(PtrComprCageBase cage_base,
// Empty entry. Uses raw unchecked accessors because it is called by the
// string table during bootstrapping.
if (element == undefined) return InternalIndex::NotFound();
if (TodoShape::kMatchNeedsHoleCheck && element == the_hole) continue;
if (TodoShape::IsMatch(key, element)) return entry;
if (Shape::kMatchNeedsHoleCheck && element == the_hole) continue;
if (Shape::IsMatch(key, element)) return entry;
}
}
@ -216,7 +216,7 @@ bool HashTable<Derived, Shape>::ToKey(ReadOnlyRoots roots, InternalIndex entry,
Tagged<Object>* out_k) {
Tagged<Object> k = KeyAt(entry);
if (!IsKey(roots, k)) return false;
*out_k = TodoShape::Unwrap(k);
*out_k = Shape::Unwrap(k);
return true;
}
@ -226,7 +226,7 @@ bool HashTable<Derived, Shape>::ToKey(PtrComprCageBase cage_base,
Tagged<Object>* out_k) {
Tagged<Object> k = KeyAt(cage_base, entry);
if (!IsKey(GetReadOnlyRoots(cage_base), k)) return false;
*out_k = TodoShape::Unwrap(k);
*out_k = Shape::Unwrap(k);
return true;
}

View File

@ -126,15 +126,15 @@ class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
OBJECT_CONSTRUCTORS(HashTableBase, FixedArray);
};
template <typename Derived, typename ShapeT>
template <typename Derived, typename Shape>
class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
: public HashTableBase {
public:
// TODO(jgruber): Derive from TaggedArrayBase instead of FixedArray, and
// merge with TaggedArraryBase's Shape class. Once the naming conflict is
// resolved rename all TodoShape occurrences back to Shape.
using TodoShape = ShapeT;
using Key = typename TodoShape::Key;
using ShapeT = Shape;
using Key = typename Shape::Key;
// Returns a new HashTable object.
template <typename IsolateT>
@ -177,9 +177,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
inline void SetKeyAt(InternalIndex entry, Tagged<Object> value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
static const int kElementsStartIndex =
kPrefixStartIndex + TodoShape::kPrefixSize;
static const int kEntrySize = TodoShape::kEntrySize;
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
static_assert(kEntrySize > 0);
static const int kEntryKeyIndex = 0;
static const int kElementsStartOffset =

View File

@ -5137,11 +5137,11 @@ void HashTable<Derived, Shape>::Rehash(PtrComprCageBase cage_base,
uint32_t from_index = EntryToIndex(i);
Tagged<Object> k = this->get(from_index);
if (!IsKey(roots, k)) continue;
uint32_t hash = TodoShape::HashForObject(roots, k);
uint32_t hash = Shape::HashForObject(roots, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(cage_base, roots, hash));
new_table->set_key(insertion_index, get(from_index), mode);
for (int j = 1; j < TodoShape::kEntrySize; j++) {
for (int j = 1; j < Shape::kEntrySize; j++) {
new_table->set(insertion_index + j, get(from_index + j), mode);
}
}
@ -5154,7 +5154,7 @@ InternalIndex HashTable<Derived, Shape>::EntryForProbe(ReadOnlyRoots roots,
Tagged<Object> k,
int probe,
InternalIndex expected) {
uint32_t hash = TodoShape::HashForObject(roots, k);
uint32_t hash = Shape::HashForObject(roots, k);
uint32_t capacity = this->Capacity();
InternalIndex entry = FirstProbe(hash, capacity);
for (int i = 1; i < probe; i++) {
@ -5169,17 +5169,17 @@ void HashTable<Derived, Shape>::Swap(InternalIndex entry1, InternalIndex entry2,
WriteBarrierMode mode) {
int index1 = EntryToIndex(entry1);
int index2 = EntryToIndex(entry2);
Tagged<Object> temp[TodoShape::kEntrySize];
Tagged<Object> temp[Shape::kEntrySize];
Derived* self = static_cast<Derived*>(this);
for (int j = 0; j < TodoShape::kEntrySize; j++) {
for (int j = 0; j < Shape::kEntrySize; j++) {
temp[j] = get(index1 + j);
}
self->set_key(index1, get(index2), mode);
for (int j = 1; j < TodoShape::kEntrySize; j++) {
for (int j = 1; j < Shape::kEntrySize; j++) {
set(index1 + j, get(index2 + j), mode);
}
self->set_key(index2, temp[0], mode);
for (int j = 1; j < TodoShape::kEntrySize; j++) {
for (int j = 1; j < Shape::kEntrySize; j++) {
set(index2 + j, temp[j], mode);
}
}
@ -5341,7 +5341,7 @@ GlobalDictionary::TryFindPropertyCellForConcurrentLookupIterator(
DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base{isolate};
ReadOnlyRoots roots(isolate);
const int32_t hash = TodoShape::Hash(roots, name);
const int32_t hash = ShapeT::Hash(roots, name);
const uint32_t capacity = Capacity();
uint32_t count = 1;
Tagged<Object> undefined = roots.undefined_value();
@ -5352,8 +5352,8 @@ GlobalDictionary::TryFindPropertyCellForConcurrentLookupIterator(
Tagged<Object> element = KeyAt(cage_base, entry, kRelaxedLoad);
if (isolate->heap()->IsPendingAllocation(element)) return {};
if (element == undefined) return {};
if (TodoShape::kMatchNeedsHoleCheck && element == the_hole) continue;
if (!TodoShape::IsMatch(name, element)) continue;
if (ShapeT::kMatchNeedsHoleCheck && element == the_hole) continue;
if (!ShapeT::IsMatch(name, element)) continue;
CHECK(IsPropertyCell(element, cage_base));
return PropertyCell::cast(element);
}
@ -5367,7 +5367,7 @@ Handle<StringSet> StringSet::Add(Isolate* isolate, Handle<StringSet> stringset,
Handle<String> name) {
if (!stringset->Has(isolate, name)) {
stringset = EnsureCapacity(isolate, stringset);
uint32_t hash = TodoShape::Hash(ReadOnlyRoots(isolate), *name);
uint32_t hash = ShapeT::Hash(ReadOnlyRoots(isolate), *name);
InternalIndex entry = stringset->FindInsertionEntry(isolate, hash);
stringset->set(EntryToIndex(entry), *name);
stringset->ElementAdded();
@ -5386,7 +5386,7 @@ Handle<RegisteredSymbolTable> RegisteredSymbolTable::Add(
SLOW_DCHECK(table->FindEntry(isolate, key).is_not_found());
table = EnsureCapacity(isolate, table);
uint32_t hash = TodoShape::Hash(ReadOnlyRoots(isolate), key);
uint32_t hash = ShapeT::Hash(ReadOnlyRoots(isolate), key);
InternalIndex entry = table->FindInsertionEntry(isolate, hash);
table->set(EntryToIndex(entry), *key);
table->set(EntryToValueIndex(entry), *symbol);
@ -5455,7 +5455,7 @@ int BaseNameDictionary<Derived, Shape>::NextEnumerationIndex(
template <typename Derived, typename Shape>
Handle<Derived> Dictionary<Derived, Shape>::DeleteEntry(
Isolate* isolate, Handle<Derived> dictionary, InternalIndex entry) {
DCHECK(TodoShape::kEntrySize != 3 ||
DCHECK(Shape::kEntrySize != 3 ||
dictionary->DetailsAt(entry).IsConfigurable());
dictionary->ClearEntry(entry);
dictionary->ElementRemoved();
@ -5476,7 +5476,7 @@ Handle<Derived> Dictionary<Derived, Shape>::AtPut(Isolate* isolate,
// We don't need to copy over the enumeration index.
dictionary->ValueAtPut(entry, *value);
if (TodoShape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
return dictionary;
}
@ -5493,7 +5493,7 @@ void Dictionary<Derived, Shape>::UncheckedAtPut(Isolate* isolate,
} else {
// We don't need to copy over the enumeration index.
dictionary->ValueAtPut(entry, *value);
if (TodoShape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
if (Shape::kEntrySize == 3) dictionary->DetailsAtPut(entry, details);
}
}
@ -5534,19 +5534,19 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(IsolateT* isolate,
PropertyDetails details,
InternalIndex* entry_out) {
ReadOnlyRoots roots(isolate);
uint32_t hash = TodoShape::Hash(roots, key);
uint32_t hash = Shape::Hash(roots, key);
// Validate that the key is absent.
SLOW_DCHECK(dictionary->FindEntry(isolate, key).is_not_found());
// Check whether the dictionary should be extended.
dictionary = Derived::EnsureCapacity(isolate, dictionary);
// Compute the key object.
Handle<Object> k = TodoShape::template AsHandle<key_allocation>(isolate, key);
Handle<Object> k = Shape::template AsHandle<key_allocation>(isolate, key);
InternalIndex entry = dictionary->FindInsertionEntry(isolate, roots, hash);
dictionary->SetEntry(entry, *k, *value, details);
DCHECK(IsNumber(dictionary->KeyAt(isolate, entry)) ||
IsUniqueName(TodoShape::Unwrap(dictionary->KeyAt(isolate, entry))));
IsUniqueName(Shape::Unwrap(dictionary->KeyAt(isolate, entry))));
dictionary->ElementAdded();
if (entry_out) *entry_out = entry;
return dictionary;
@ -5559,18 +5559,18 @@ void Dictionary<Derived, Shape>::UncheckedAdd(IsolateT* isolate,
Key key, Handle<Object> value,
PropertyDetails details) {
ReadOnlyRoots roots(isolate);
uint32_t hash = TodoShape::Hash(roots, key);
uint32_t hash = Shape::Hash(roots, key);
// Validate that the key is absent and we capacity is sufficient.
SLOW_DCHECK(dictionary->FindEntry(isolate, key).is_not_found());
DCHECK(dictionary->HasSufficientCapacityToAdd(1));
// Compute the key object.
Handle<Object> k = TodoShape::template AsHandle<key_allocation>(isolate, key);
Handle<Object> k = Shape::template AsHandle<key_allocation>(isolate, key);
InternalIndex entry = dictionary->FindInsertionEntry(isolate, roots, hash);
dictionary->SetEntry(entry, *k, *value, details);
DCHECK(IsNumber(dictionary->KeyAt(isolate, entry)) ||
IsUniqueName(TodoShape::Unwrap(dictionary->KeyAt(isolate, entry))));
IsUniqueName(Shape::Unwrap(dictionary->KeyAt(isolate, entry))));
}
template <typename Derived, typename Shape>

View File

@ -100,12 +100,10 @@ static_assert(sizeof(UnalignedDoubleMember) == sizeof(double));
#define FLEXIBLE_ARRAY_MEMBER(Type, name) \
using FlexibleDataReturnType = Type[0]; \
FlexibleDataReturnType& name() { \
static_assert(alignof(Type) <= alignof(decltype(*this))); \
using ReturnType = Type[0]; \
return reinterpret_cast<ReturnType&>(*(this + 1)); \
} \
const FlexibleDataReturnType& name() const { \
static_assert(alignof(Type) <= alignof(decltype(*this))); \
using ReturnType = Type[0]; \
return reinterpret_cast<const ReturnType&>(*(this + 1)); \
} \

View File

@ -54,7 +54,7 @@ Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
// Check the template weakmap to see if the template object already exists.
Handle<Script> script(Script::cast(shared_info->script(isolate)), isolate);
int32_t hash =
EphemeronHashTable::TodoShape::Hash(ReadOnlyRoots(isolate), script);
EphemeronHashTable::ShapeT::Hash(ReadOnlyRoots(isolate), script);
MaybeHandle<ArrayList> maybe_cached_templates;
if (!IsUndefined(native_context->template_weakmap(), isolate)) {

View File

@ -930,6 +930,13 @@
],
}],
],
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [
'/bigobj'
],
},
},
}, # v8_turboshaft
{
'target_name': 'v8_compiler_for_mksnapshot',