Skip to content

Commit

Permalink
[llvm] Improve llvm.objectsize computation by computing GEP, alloca a… (
Browse files Browse the repository at this point in the history
#117849)

…nd malloc parameters bound

Using a naive expression walker, it is possible to compute valuable
information for allocation functions, GEP and alloca, even in the
presence of some dynamic information.

We don't rely on computeConstantRange to avoid taking advantage of
undefined behavior, which would be counter-productive wrt. usual
llvm.objectsize usage.

llvm.objectsize plays an important role in _FORTIFY_SOURCE definitions,
so improving its diagnostic in turns improves the security of compiled
application.

As a side note, as a result of recent optimization improvements, clang
no longer passes
https://github.com/serge-sans-paille/builtin_object_size-test-suite This
commit restores the situation and greatly improves the scope of code
handled by the static version of __builtin_object_size.

This is a recommit of #115522
with fix applied.
  • Loading branch information
serge-sans-paille authored Dec 10, 2024
1 parent ef2e590 commit f8c1a22
Show file tree
Hide file tree
Showing 3 changed files with 217 additions and 8 deletions.
12 changes: 8 additions & 4 deletions llvm/include/llvm/IR/Value.h
Original file line number Diff line number Diff line change
Expand Up @@ -723,12 +723,16 @@ class Value {
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) const;
Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
bool AllowNonInbounds,
bool AllowInvariantGroup = false) {

Value *stripAndAccumulateConstantOffsets(
const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
bool AllowInvariantGroup = false,
function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
nullptr) {
return const_cast<Value *>(
static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
DL, Offset, AllowNonInbounds, AllowInvariantGroup));
DL, Offset, AllowNonInbounds, AllowInvariantGroup,
ExternalAnalysis));
}

/// This is a wrapper around stripAndAccumulateConstantOffsets with the
Expand Down
104 changes: 100 additions & 4 deletions llvm/lib/Analysis/MemoryBuiltins.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,65 @@ STATISTIC(ObjectVisitorArgument,
STATISTIC(ObjectVisitorLoad,
"Number of load instructions with unsolved size and offset");

static std::optional<APInt>
combinePossibleConstantValues(std::optional<APInt> LHS,
std::optional<APInt> RHS,
ObjectSizeOpts::Mode EvalMode) {
if (!LHS || !RHS)
return std::nullopt;
if (EvalMode == ObjectSizeOpts::Mode::Max)
return LHS->sge(*RHS) ? *LHS : *RHS;
else
return LHS->sle(*RHS) ? *LHS : *RHS;
}

static std::optional<APInt> aggregatePossibleConstantValuesImpl(
const Value *V, ObjectSizeOpts::Mode EvalMode, unsigned recursionDepth) {
constexpr unsigned maxRecursionDepth = 4;
if (recursionDepth == maxRecursionDepth)
return std::nullopt;

if (const auto *CI = dyn_cast<ConstantInt>(V)) {
return CI->getValue();
} else if (const auto *SI = dyn_cast<SelectInst>(V)) {
return combinePossibleConstantValues(
aggregatePossibleConstantValuesImpl(SI->getTrueValue(), EvalMode,
recursionDepth + 1),
aggregatePossibleConstantValuesImpl(SI->getFalseValue(), EvalMode,
recursionDepth + 1),
EvalMode);
} else if (const auto *PN = dyn_cast<PHINode>(V)) {
unsigned Count = PN->getNumIncomingValues();
if (Count == 0)
return std::nullopt;
auto Acc = aggregatePossibleConstantValuesImpl(
PN->getIncomingValue(0), EvalMode, recursionDepth + 1);
for (unsigned I = 1; Acc && I < Count; ++I) {
auto Tmp = aggregatePossibleConstantValuesImpl(
PN->getIncomingValue(I), EvalMode, recursionDepth + 1);
Acc = combinePossibleConstantValues(Acc, Tmp, EvalMode);
}
return Acc;
}

return std::nullopt;
}

static std::optional<APInt>
aggregatePossibleConstantValues(const Value *V, ObjectSizeOpts::Mode EvalMode) {
if (auto *CI = dyn_cast<ConstantInt>(V))
return CI->getValue();

if (EvalMode != ObjectSizeOpts::Mode::Min &&
EvalMode != ObjectSizeOpts::Mode::Max)
return std::nullopt;

// Not using computeConstantRange here because we cannot guarantee it's not
// doing optimization based on UB which we want to avoid when expanding
// __builtin_object_size.
return aggregatePossibleConstantValuesImpl(V, EvalMode, 0u);
}

/// Align \p Size according to \p Alignment. If \p Size is greater than
/// getSignedMaxValue(), set it as unknown as we can only represent signed value
/// in OffsetSpan.
Expand Down Expand Up @@ -717,11 +776,36 @@ OffsetSpan ObjectSizeOffsetVisitor::computeImpl(Value *V) {
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);

// Give it another try with approximated analysis. We don't start with this
// one because stripAndAccumulateConstantOffsets behaves differently wrt.
// overflows if we provide an external Analysis.
if ((Options.EvalMode == ObjectSizeOpts::Mode::Min ||
Options.EvalMode == ObjectSizeOpts::Mode::Max) &&
isa<GEPOperator>(V)) {
// External Analysis used to compute the Min/Max value of individual Offsets
// within a GEP.
ObjectSizeOpts::Mode EvalMode =
Options.EvalMode == ObjectSizeOpts::Mode::Min
? ObjectSizeOpts::Mode::Max
: ObjectSizeOpts::Mode::Min;
auto OffsetRangeAnalysis = [EvalMode](Value &VOffset, APInt &Offset) {
if (auto PossibleOffset =
aggregatePossibleConstantValues(&VOffset, EvalMode)) {
Offset = *PossibleOffset;
return true;
}
return false;
};

V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true,
/*ExternalAnalysis=*/OffsetRangeAnalysis);
}

// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);

OffsetSpan ORT = computeValue(V);

bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
Expand Down Expand Up @@ -813,8 +897,9 @@ OffsetSpan ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
return OffsetSpan(Zero, align(Size, I.getAlign()));

Value *ArraySize = I.getArraySize();
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
APInt NumElems = C->getValue();
if (auto PossibleSize =
aggregatePossibleConstantValues(ArraySize, Options.EvalMode)) {
APInt NumElems = *PossibleSize;
if (!CheckedZextOrTrunc(NumElems))
return ObjectSizeOffsetVisitor::unknown();

Expand All @@ -840,7 +925,18 @@ OffsetSpan ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
}

OffsetSpan ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) {
if (std::optional<APInt> Size = getAllocSize(&CB, TLI)) {
auto Mapper = [this](const Value *V) -> const Value * {
if (!V->getType()->isIntegerTy())
return V;

if (auto PossibleBound =
aggregatePossibleConstantValues(V, Options.EvalMode))
return ConstantInt::get(V->getType(), *PossibleBound);

return V;
};

if (std::optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) {
// Very large unsigned value cannot be represented as OffsetSpan.
if (Size->isNegative())
return ObjectSizeOffsetVisitor::unknown();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s

target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"

declare i64 @llvm.objectsize.i64.p0(ptr, i1 immarg, i1 immarg, i1 immarg)
declare noalias ptr @malloc(i64 noundef) #0

define i64 @select_alloc_size(i1 %cond) {
; CHECK-LABEL: @select_alloc_size(
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 [[SIZE]], align 1
; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
; CHECK-NEXT: ret i64 [[RES]]
;
%size = select i1 %cond, i64 3, i64 4
%ptr = alloca i8, i64 %size
%objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
%objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
%res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
ret i64 %res
}

define i64 @select_malloc_size(i1 %cond) {
; CHECK-LABEL: @select_malloc_size(
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
; CHECK-NEXT: [[PTR:%.*]] = call noalias ptr @malloc(i64 noundef [[SIZE]])
; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 4, i64 3
; CHECK-NEXT: ret i64 [[RES]]
;
%size = select i1 %cond, i64 3, i64 4
%ptr = call noalias ptr @malloc(i64 noundef %size)
%objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 false, i1 true, i1 false)
%objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr, i1 true, i1 true, i1 false)
%res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
ret i64 %res
}

define i64 @select_gep_offset(i1 %cond) {
; CHECK-LABEL: @select_gep_offset(
; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 3, i64 4
; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 7, i64 6
; CHECK-NEXT: ret i64 [[RES]]
;
%ptr = alloca i8, i64 10
%offset = select i1 %cond, i64 3, i64 4
%ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
%objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
%objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
%res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
ret i64 %res
}

define i64 @select_gep_neg_offset(i1 %c0, i1 %c1) {
; CHECK-LABEL: @select_gep_neg_offset(
; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
; CHECK-NEXT: [[PTR_SLIDE_1:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 5
; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i64 -3, i64 -4
; CHECK-NEXT: [[PTR_SLIDE_2:%.*]] = getelementptr inbounds i8, ptr [[PTR_SLIDE_1]], i64 [[OFFSET]]
; CHECK-NEXT: [[RES:%.*]] = select i1 [[C1:%.*]], i64 9, i64 8
; CHECK-NEXT: ret i64 [[RES]]
;
%ptr = alloca i8, i64 10
%ptr.slide.1 = getelementptr inbounds i8, ptr %ptr, i64 5
%offset = select i1 %c0, i64 -3, i64 -4
%ptr.slide.2 = getelementptr inbounds i8, ptr %ptr.slide.1, i64 %offset
%objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 false, i1 true, i1 false)
%objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide.2, i1 true, i1 true, i1 false)
%res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
ret i64 %res
}

define i64 @select_neg_oob_offset(i1 %c0, i1 %c1) {
; CHECK-LABEL: @select_neg_oob_offset(
; CHECK-NEXT: [[PTR:%.*]] = alloca i8, i64 10, align 1
; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[C0:%.*]], i64 -3, i64 -4
; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[OFFSET]]
; CHECK-NEXT: ret i64 0
;
%ptr = alloca i8, i64 10
%offset = select i1 %c0, i64 -3, i64 -4
%ptr.slide = getelementptr inbounds i8, ptr %ptr, i64 %offset
%objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
%objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
%res = select i1 %c1, i64 %objsize_max, i64 %objsize_min
ret i64 %res
}

define i64 @select_gep_offsets(i1 %cond) {
; CHECK-LABEL: @select_gep_offsets(
; CHECK-NEXT: [[PTR:%.*]] = alloca [10 x i8], i64 2, align 1
; CHECK-NEXT: [[OFFSET:%.*]] = select i1 [[COND:%.*]], i32 0, i32 1
; CHECK-NEXT: [[PTR_SLIDE:%.*]] = getelementptr inbounds [10 x i8], ptr [[PTR]], i32 [[OFFSET]], i32 5
; CHECK-NEXT: [[RES:%.*]] = select i1 [[COND]], i64 15, i64 5
; CHECK-NEXT: ret i64 [[RES]]
;
%ptr = alloca [10 x i8], i64 2
%offset = select i1 %cond, i32 0, i32 1
%ptr.slide = getelementptr inbounds [10 x i8], ptr %ptr, i32 %offset, i32 5
%objsize_max = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 false, i1 true, i1 false)
%objsize_min = call i64 @llvm.objectsize.i64.p0(ptr %ptr.slide, i1 true, i1 true, i1 false)
%res = select i1 %cond, i64 %objsize_max, i64 %objsize_min
ret i64 %res
}

attributes #0 = { nounwind allocsize(0) }

0 comments on commit f8c1a22

Please sign in to comment.