Skip to content

[RISC-V][LoongArch64] Handle reversed fields in lowered structs #115933

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions src/coreclr/jit/codegenloongarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5846,6 +5846,16 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
{
params.retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
params.secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));

if (pRetTypeDesc->GetABIReturnReg(1, call->GetUnmanagedCallConv()) == REG_INTRET)
{
// If the second return register is REG_INTRET, then the first return is expected to be in a floating
// register. The emitter has hardcoded belief that params.retSize corresponds to REG_INTRET and
// secondRetSize to REG_INTRET_1, so fix up the situation here.
assert(!EA_IS_GCREF_OR_BYREF(params.retSize));
params.retSize = params.secondRetSize;
params.secondRetSize = EA_UNKNOWN;
}
}
else
{
Expand Down
10 changes: 10 additions & 0 deletions src/coreclr/jit/codegenriscv64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6035,6 +6035,16 @@ void CodeGen::genCallInstruction(GenTreeCall* call)
{
params.retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
params.secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));

if (pRetTypeDesc->GetABIReturnReg(1, call->GetUnmanagedCallConv()) == REG_INTRET)
{
// If the second return register is REG_INTRET, then the first return is expected to be in a floating
// register. The emitter has hardcoded belief that params.retSize corresponds to REG_INTRET and
// secondRetSize to REG_INTRET_1, so fix up the situation here.
assert(!EA_IS_GCREF_OR_BYREF(params.retSize));
params.retSize = params.secondRetSize;
params.secondRetSize = EA_UNKNOWN;
}
}
else
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ private static void SetFpStructInRegistersInfoField(ref FpStructInRegistersInfo
(index == 0 ? ref info.offset1st : ref info.offset2nd) = offset;
}

private static bool HandleInlineArray(int elementTypeIndex, int nElements, ref FpStructInRegistersInfo info, ref int typeIndex)
private static bool HandleInlineArray(int elementTypeIndex, int nElements,
ref FpStructInRegistersInfo info, ref int typeIndex, ref uint occupiedBytesMap)
{
int nFlattenedFieldsPerElement = typeIndex - elementTypeIndex;
if (nFlattenedFieldsPerElement == 0)
Expand Down Expand Up @@ -110,6 +111,16 @@ private static bool HandleInlineArray(int elementTypeIndex, int nElements, ref F
int sizeShiftMask = (int)(info.flags & FpStruct.SizeShift1stMask) << 2;
info.flags |= (FpStruct)(floatFlag | sizeShiftMask); // merge with 1st field
info.offset2nd = info.offset1st + info.Size1st(); // bump up the field offset

Debug.Assert(info.Size1st() == info.Size2nd());
uint startOffset = info.offset2nd;
uint endOffset = startOffset + info.Size2nd();

uint fieldOccupation = (~0u << (int)startOffset) ^ (~0u << (int)endOffset);
if ((occupiedBytesMap & fieldOccupation) != 0)
return false; // duplicated array element overlaps with other fields

occupiedBytesMap |= fieldOccupation;
}
return true;
}
Expand All @@ -119,23 +130,30 @@ private static bool FlattenFields(TypeDesc td, uint offset, ref FpStructInRegist
IEnumerable<FieldDesc> fields = td.GetFields();
int nFields = 0;
int elementTypeIndex = typeIndex;
FieldDesc prevField = null;
FieldDesc lastField = null;
uint occupiedBytesMap = 0;
foreach (FieldDesc field in fields)
{
if (field.IsStatic)
continue;
nFields++;

if (prevField != null && prevField.Offset.AsInt + prevField.FieldType.GetElementSize().AsInt > field.Offset.AsInt)
uint startOffset = offset + (uint)field.Offset.AsInt;
uint endOffset = startOffset + (uint)field.FieldType.GetElementSize().AsInt;

uint fieldOccupation = (~0u << (int)startOffset) ^ (~0u << (int)endOffset);
if ((occupiedBytesMap & fieldOccupation) != 0)
return false; // fields overlap, treat as union

prevField = field;
occupiedBytesMap |= fieldOccupation;

lastField = field;

TypeFlags category = field.FieldType.Category;
if (category == TypeFlags.ValueType)
{
TypeDesc nested = field.FieldType;
if (!FlattenFields(nested, offset + (uint)field.Offset.AsInt, ref info, ref typeIndex))
if (!FlattenFields(nested, startOffset, ref info, ref typeIndex))
return false;
}
else if (field.FieldType.GetElementSize().AsInt <= TARGET_POINTER_SIZE)
Expand All @@ -145,7 +163,7 @@ private static bool FlattenFields(TypeDesc td, uint offset, ref FpStructInRegist

bool isFloating = category is TypeFlags.Single or TypeFlags.Double;
SetFpStructInRegistersInfoField(ref info, typeIndex++,
isFloating, (uint)field.FieldType.GetElementSize().AsInt, offset + (uint)field.Offset.AsInt);
isFloating, (uint)field.FieldType.GetElementSize().AsInt, startOffset);
}
else
{
Expand All @@ -156,7 +174,7 @@ private static bool FlattenFields(TypeDesc td, uint offset, ref FpStructInRegist
if ((td as MetadataType).HasImpliedRepeatedFields())
{
Debug.Assert(nFields == 1);
int nElements = td.GetElementSize().AsInt / prevField.FieldType.GetElementSize().AsInt;
int nElements = td.GetElementSize().AsInt / lastField.FieldType.GetElementSize().AsInt;

// Only InlineArrays can have element type of empty struct, fixed-size buffers take only primitives
if ((typeIndex - elementTypeIndex) == 0 && (td as MetadataType).IsInlineArray)
Expand All @@ -165,7 +183,7 @@ private static bool FlattenFields(TypeDesc td, uint offset, ref FpStructInRegist
return false; // struct containing an array of empty structs is passed by integer calling convention
}

if (!HandleInlineArray(elementTypeIndex, nElements, ref info, ref typeIndex))
if (!HandleInlineArray(elementTypeIndex, nElements, ref info, ref typeIndex, ref occupiedBytesMap))
return false;
}
return true;
Expand All @@ -189,6 +207,20 @@ public static FpStructInRegistersInfo GetFpStructInRegistersInfo(TypeDesc td, Ta
return new FpStructInRegistersInfo{}; // struct has no floating fields

Debug.Assert(nFields == 1 || nFields == 2);
if (nFields == 2 && info.offset1st > info.offset2nd)
{
// swap fields to match memory order
info.flags = (FpStruct)(
((uint)(info.flags & FloatInt) << (PosIntFloat - PosFloatInt)) |
((uint)(info.flags & IntFloat) >> (PosIntFloat - PosFloatInt)) |
((uint)(info.flags & SizeShift1stMask) << (PosSizeShift2nd - PosSizeShift1st)) |
((uint)(info.flags & SizeShift2ndMask) >> (PosSizeShift2nd - PosSizeShift1st))
);
(info.offset2nd, info.offset1st) = (info.offset1st, info.offset2nd);
}
Debug.Assert((info.flags & (OnlyOne | BothFloat)) == 0);
Debug.Assert((info.flags & FloatInt) == 0 || info.Size1st() == sizeof(float) || info.Size1st() == sizeof(double));
Debug.Assert((info.flags & IntFloat) == 0 || info.Size2nd() == sizeof(float) || info.Size2nd() == sizeof(double));

if ((info.flags & (FloatInt | IntFloat)) == (FloatInt | IntFloat))
{
Expand Down
89 changes: 61 additions & 28 deletions src/coreclr/vm/methodtable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2716,8 +2716,8 @@ static void SetFpStructInRegistersInfoField(FpStructInRegistersInfo& info, int i
(index == 0 ? info.offset1st : info.offset2nd) = offset;
}

static bool HandleInlineArray(int elementTypeIndex, int nElements, FpStructInRegistersInfo& info, int& typeIndex
DEBUG_ARG(int nestingLevel))
static bool HandleInlineArray(int elementTypeIndex, int nElements,
FpStructInRegistersInfo& info, int& typeIndex, uint32_t& occupiedBytesMap DEBUG_ARG(int nestingLevel))
{
int nFlattenedFieldsPerElement = typeIndex - elementTypeIndex;
if (nFlattenedFieldsPerElement == 0)
Expand Down Expand Up @@ -2759,45 +2759,65 @@ static bool HandleInlineArray(int elementTypeIndex, int nElements, FpStructInReg
int sizeShiftMask = (info.flags & FpStruct::SizeShift1stMask) << 2;
info.flags = FpStruct::Flags(info.flags | floatFlag | sizeShiftMask); // merge with 1st field
info.offset2nd = info.offset1st + info.Size1st(); // bump up the field offset

assert(info.Size1st() == info.Size2nd());
uint32_t startOffset = info.offset2nd;
uint32_t endOffset = startOffset + info.Size2nd();

uint32_t fieldOccupation = (~0u << startOffset) ^ (~0u << endOffset);
if ((occupiedBytesMap & fieldOccupation) != 0)
{
LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s "
" * duplicated array element [%i..%i) overlaps with other fields (occupied bytes map: 0x%04x), treat as union\n",
nestingLevel * 4, "", startOffset, endOffset, occupiedBytesMap));
return false;
}
occupiedBytesMap |= fieldOccupation;

LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s * duplicated array element type\n",
nestingLevel * 4, ""));
}
return true;
}

static bool FlattenFields(TypeHandle th, uint32_t offset, FpStructInRegistersInfo& info, int& typeIndex
static bool FlattenFields(TypeHandle th, uint32_t structOffset, FpStructInRegistersInfo& info, int& typeIndex
DEBUG_ARG(int nestingLevel))
{
bool isManaged = !th.IsTypeDesc();
MethodTable* pMT = isManaged ? th.AsMethodTable() : th.AsNativeValueType();
int nFields = isManaged ? pMT->GetNumIntroducedInstanceFields() : pMT->GetNativeLayoutInfo()->GetNumFields();

LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s flattening %s (%s, %i fields)\n",
nestingLevel * 4, "", pMT->GetDebugClassName(), (isManaged ? "managed" : "native"), nFields));
LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s flattening %s (%s, %i fields) at offset %u\n",
nestingLevel * 4, "", pMT->GetDebugClassName(), (isManaged ? "managed" : "native"), nFields, structOffset));

// TODO: templatize isManaged and use if constexpr for differences when we migrate to C++17
// because the logic for both branches is nearly the same.

uint32_t occupiedBytesMap = 0;
if (isManaged)
{
FieldDesc* fields = pMT->GetApproxFieldDescListRaw();
int elementTypeIndex = typeIndex;
for (int i = 0; i < nFields; ++i)
{
if (i > 0 && fields[i-1].GetOffset() + fields[i-1].GetSize() > fields[i].GetOffset())
uint32_t startOffset = structOffset + fields[i].GetOffset();
uint32_t endOffset = startOffset + fields[i].GetSize();

uint32_t fieldOccupation = (~0u << startOffset) ^ (~0u << endOffset);
if ((occupiedBytesMap & fieldOccupation) != 0)
{
LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s "
" * fields %s [%i..%i) and %s [%i..%i) overlap, treat as union\n",
nestingLevel * 4, "",
fields[i-1].GetDebugName(), fields[i-1].GetOffset(), fields[i-1].GetOffset() + fields[i-1].GetSize(),
fields[i].GetDebugName(), fields[i].GetOffset(), fields[i].GetOffset() + fields[i].GetSize()));
" * field %s [%i..%i) overlaps with other fields (occupied bytes map: 0x%04x), treat as union\n",
nestingLevel * 4, "", fields[i].GetDebugName(), startOffset, endOffset, occupiedBytesMap));
return false;
}
occupiedBytesMap |= fieldOccupation;

CorElementType type = fields[i].GetFieldType();
if (type == ELEMENT_TYPE_VALUETYPE)
{
MethodTable* nested = fields[i].GetApproxFieldTypeHandleThrowing().GetMethodTable();
if (!FlattenFields(TypeHandle(nested), offset + fields[i].GetOffset(), info, typeIndex DEBUG_ARG(nestingLevel + 1)))
if (!FlattenFields(TypeHandle(nested), startOffset, info, typeIndex DEBUG_ARG(nestingLevel + 1)))
return false;
}
else if (fields[i].GetSize() <= TARGET_POINTER_SIZE)
Expand All @@ -2810,12 +2830,10 @@ static bool FlattenFields(TypeHandle th, uint32_t offset, FpStructInRegistersInf
}

bool isFloating = CorTypeInfo::IsFloat_NoThrow(type);
SetFpStructInRegistersInfoField(info, typeIndex++,
isFloating, CorTypeInfo::Size_NoThrow(type), offset + fields[i].GetOffset());
SetFpStructInRegistersInfoField(info, typeIndex++, isFloating, CorTypeInfo::Size_NoThrow(type), startOffset);

LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s * found field %s [%i..%i), type: %s\n",
nestingLevel * 4, "", fields[i].GetDebugName(),
fields[i].GetOffset(), fields[i].GetOffset() + fields[i].GetSize(), CorTypeInfo::GetName(type)));
nestingLevel * 4, "", fields[i].GetDebugName(), startOffset, endOffset, CorTypeInfo::GetName(type)));
}
else
{
Expand All @@ -2842,7 +2860,7 @@ static bool FlattenFields(TypeHandle th, uint32_t offset, FpStructInRegistersInf
return false;
}

if (!HandleInlineArray(elementTypeIndex, nElements, info, typeIndex DEBUG_ARG(nestingLevel + 1)))
if (!HandleInlineArray(elementTypeIndex, nElements, info, typeIndex, occupiedBytesMap DEBUG_ARG(nestingLevel + 1)))
return false;
}
}
Expand All @@ -2851,15 +2869,18 @@ static bool FlattenFields(TypeHandle th, uint32_t offset, FpStructInRegistersInf
const NativeFieldDescriptor* fields = pMT->GetNativeLayoutInfo()->GetNativeFieldDescriptors();
for (int i = 0; i < nFields; ++i)
{
if (i > 0 && fields[i-1].GetExternalOffset() + fields[i-1].NativeSize() > fields[i].GetExternalOffset())
uint32_t startOffset = structOffset + fields[i].GetExternalOffset();
uint32_t endOffset = startOffset + fields[i].NativeSize();

uint32_t fieldOccupation = (~0u << startOffset) ^ (~0u << endOffset);
if ((occupiedBytesMap & fieldOccupation) != 0)
{
LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s "
" * fields %s [%i..%i) and %s [%i..%i) overlap, treat as union\n",
nestingLevel * 4, "",
fields[i-1].GetFieldDesc()->GetDebugName(), fields[i-1].GetExternalOffset(), fields[i-1].GetExternalOffset() + fields[i-1].NativeSize(),
fields[i].GetFieldDesc()->GetDebugName(), fields[i].GetExternalOffset(), fields[i].GetExternalOffset() + fields[i].NativeSize()));
" * field %s [%i..%i) overlaps with other fields (occupied bytes map: 0x%04x), treat as union\n",
nestingLevel * 4, "", fields[i].GetFieldDesc()->GetDebugName(), startOffset, endOffset));
return false;
}
occupiedBytesMap |= fieldOccupation;

static const char* categoryNames[] = {"FLOAT", "NESTED", "INTEGER", "ILLEGAL"};
NativeFieldCategory category = fields[i].GetCategory();
Expand All @@ -2868,12 +2889,12 @@ static bool FlattenFields(TypeHandle th, uint32_t offset, FpStructInRegistersInf
int elementTypeIndex = typeIndex;

MethodTable* nested = fields[i].GetNestedNativeMethodTable();
if (!FlattenFields(TypeHandle(nested), offset + fields[i].GetExternalOffset(), info, typeIndex DEBUG_ARG(nestingLevel + 1)))
if (!FlattenFields(TypeHandle(nested), startOffset, info, typeIndex DEBUG_ARG(nestingLevel + 1)))
return false;

// In native layout fixed arrays are marked as NESTED just like structs
int nElements = fields[i].GetNumElements();
if (!HandleInlineArray(elementTypeIndex, nElements, info, typeIndex DEBUG_ARG(nestingLevel + 1)))
if (!HandleInlineArray(elementTypeIndex, nElements, info, typeIndex, occupiedBytesMap DEBUG_ARG(nestingLevel + 1)))
return false;
}
else if (fields[i].NativeSize() <= TARGET_POINTER_SIZE)
Expand All @@ -2886,13 +2907,10 @@ static bool FlattenFields(TypeHandle th, uint32_t offset, FpStructInRegistersInf
}

bool isFloating = (category == NativeFieldCategory::FLOAT);

SetFpStructInRegistersInfoField(info, typeIndex++,
isFloating, fields[i].NativeSize(), offset + fields[i].GetExternalOffset());
SetFpStructInRegistersInfoField(info, typeIndex++, isFloating, fields[i].NativeSize(), startOffset);

LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo:%*s * found field %s [%i..%i), type: %s\n",
nestingLevel * 4, "", fields[i].GetFieldDesc()->GetDebugName(),
fields[i].GetExternalOffset(), fields[i].GetExternalOffset() + fields[i].NativeSize(), categoryNames[(int)category]));
nestingLevel * 4, "", fields[i].GetFieldDesc()->GetDebugName(), startOffset, endOffset, categoryNames[(int)category]));
}
else
{
Expand Down Expand Up @@ -2930,6 +2948,21 @@ FpStructInRegistersInfo MethodTable::GetFpStructInRegistersInfo(TypeHandle th)
}

assert(nFields == 1 || nFields == 2);
if (nFields == 2 && info.offset1st > info.offset2nd)
{
LOG((LF_JIT, LL_EVERYTHING, "FpStructInRegistersInfo: struct %s (%u bytes): swap fields to match memory order\n",
(!th.IsTypeDesc() ? th.AsMethodTable() : th.AsNativeValueType())->GetDebugClassName(), th.GetSize()));
info.flags = FpStruct::Flags(
((info.flags & FloatInt) << (PosIntFloat - PosFloatInt)) |
((info.flags & IntFloat) >> (PosIntFloat - PosFloatInt)) |
((info.flags & SizeShift1stMask) << (PosSizeShift2nd - PosSizeShift1st)) |
((info.flags & SizeShift2ndMask) >> (PosSizeShift2nd - PosSizeShift1st))
);
std::swap(info.offset1st, info.offset2nd);
}
Comment on lines +2951 to +2962
Copy link
Contributor Author

@tomeksowi tomeksowi May 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The order matters for two floats and the calling convention wasn't clear. The only precedent I could find was GCC/gnat compiling Ada of all places, I made a proposal following it: riscv-non-isa/riscv-elf-psabi-doc#463, LoongArch gnat also behaves this way.

For int-float structs it doesn't matter but I'm ordering the fields as they come in memory anyway for consistency.

assert((info.flags & (OnlyOne | BothFloat)) == 0);
assert((info.flags & FloatInt) == 0 || info.Size1st() == sizeof(float) || info.Size1st() == sizeof(double));
assert((info.flags & IntFloat) == 0 || info.Size2nd() == sizeof(float) || info.Size2nd() == sizeof(double));

if ((info.flags & (FloatInt | IntFloat)) == (FloatInt | IntFloat))
{
Expand Down
Loading