26
26
#include " llvm/IR/IRBuilder.h"
27
27
#include " llvm/IR/Instruction.h"
28
28
#include " llvm/IR/Instructions.h"
29
+ #include " llvm/IR/IntrinsicInst.h"
29
30
#include " llvm/IR/IntrinsicsBPF.h"
30
31
#include " llvm/IR/Module.h"
31
32
#include " llvm/IR/Type.h"
@@ -478,9 +479,74 @@ static void aspaceWrapOperand(DenseMap<Value *, Value *> &Cache, Instruction *I,
478
479
}
479
480
}
480
481
482
+ static Value *wrapPtrIfASNotZero (DenseMap<Value *, Value *> &Cache,
483
+ CallInst *CI, Value *P) {
484
+ if (auto *PTy = dyn_cast<PointerType>(P->getType ())) {
485
+ if (PTy->getAddressSpace () == 0 )
486
+ return P;
487
+ }
488
+ return aspaceWrapValue (Cache, CI->getFunction (), P);
489
+ }
490
+
491
+ static Instruction *aspaceMemSet (DenseMap<Value *, Value *> &Cache,
492
+ CallInst *CI) {
493
+ auto *MI = cast<MemIntrinsic>(CI);
494
+ IRBuilder<> B (CI);
495
+
496
+ // memset(dst, val, len, align, isvolatile, md)
497
+ Value *Dst = wrapPtrIfASNotZero (Cache, CI, CI->getArgOperand (0 ));
498
+ Value *Val = CI->getArgOperand (1 );
499
+ Value *Len = CI->getArgOperand (2 );
500
+
501
+ auto *MS = cast<MemSetInst>(CI);
502
+ MaybeAlign Align = MS->getDestAlign ();
503
+ bool IsVolatile = MS->isVolatile ();
504
+
505
+ return B.CreateMemSet (Dst, Val, Len, Align, IsVolatile, MI->getAAMetadata ());
506
+ }
507
+
508
+ static Instruction *aspaceMemCpy (DenseMap<Value *, Value *> &Cache,
509
+ CallInst *CI) {
510
+ auto *MI = cast<MemIntrinsic>(CI);
511
+ IRBuilder<> B (CI);
512
+
513
+ // memcpy(dst, dst_align, src, src_align, len, isvolatile, md)
514
+ Value *Dst = wrapPtrIfASNotZero (Cache, CI, CI->getArgOperand (0 ));
515
+ Value *Src = wrapPtrIfASNotZero (Cache, CI, CI->getArgOperand (1 ));
516
+ Value *Len = CI->getArgOperand (2 );
517
+
518
+ auto *MT = cast<MemTransferInst>(CI);
519
+ MaybeAlign DstAlign = MT->getDestAlign ();
520
+ MaybeAlign SrcAlign = MT->getSourceAlign ();
521
+ bool IsVolatile = MT->isVolatile ();
522
+
523
+ return B.CreateMemCpy (Dst, DstAlign, Src, SrcAlign, Len, IsVolatile,
524
+ MI->getAAMetadata ());
525
+ }
526
+
527
+ static Instruction *aspaceMemMove (DenseMap<Value *, Value *> &Cache,
528
+ CallInst *CI) {
529
+ auto *MI = cast<MemIntrinsic>(CI);
530
+ IRBuilder<> B (CI);
531
+
532
+ // memmove(dst, dst_align, src, src_align, len, isvolatile, md)
533
+ Value *Dst = wrapPtrIfASNotZero (Cache, CI, CI->getArgOperand (0 ));
534
+ Value *Src = wrapPtrIfASNotZero (Cache, CI, CI->getArgOperand (1 ));
535
+ Value *Len = CI->getArgOperand (2 );
536
+
537
+ auto *MT = cast<MemTransferInst>(CI);
538
+ MaybeAlign DstAlign = MT->getDestAlign ();
539
+ MaybeAlign SrcAlign = MT->getSourceAlign ();
540
+ bool IsVolatile = MT->isVolatile ();
541
+
542
+ return B.CreateMemMove (Dst, DstAlign, Src, SrcAlign, Len, IsVolatile,
543
+ MI->getAAMetadata ());
544
+ }
545
+
481
546
// Support for BPF address spaces:
482
547
// - for each function in the module M, update pointer operand of
483
548
// each memory access instruction (load/store/cmpxchg/atomicrmw)
549
+ // or intrinsic call insns (memset/memcpy/memmove)
484
550
// by casting it from non-zero address space to zero address space, e.g:
485
551
//
486
552
// (load (ptr addrspace (N) %p) ...)
@@ -493,21 +559,69 @@ bool BPFCheckAndAdjustIR::insertASpaceCasts(Module &M) {
493
559
for (Function &F : M) {
494
560
DenseMap<Value *, Value *> CastsCache;
495
561
for (BasicBlock &BB : F) {
496
- for (Instruction &I : BB ) {
562
+ for (Instruction &I : llvm::make_early_inc_range (BB) ) {
497
563
unsigned PtrOpNum;
498
564
499
- if (auto *LD = dyn_cast<LoadInst>(&I))
565
+ if (auto *LD = dyn_cast<LoadInst>(&I)) {
500
566
PtrOpNum = LD->getPointerOperandIndex ();
501
- else if (auto *ST = dyn_cast<StoreInst>(&I))
567
+ aspaceWrapOperand (CastsCache, &I, PtrOpNum);
568
+ continue ;
569
+ }
570
+ if (auto *ST = dyn_cast<StoreInst>(&I)) {
502
571
PtrOpNum = ST->getPointerOperandIndex ();
503
- else if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(&I))
572
+ aspaceWrapOperand (CastsCache, &I, PtrOpNum);
573
+ continue ;
574
+ }
575
+ if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(&I)) {
504
576
PtrOpNum = CmpXchg->getPointerOperandIndex ();
505
- else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
577
+ aspaceWrapOperand (CastsCache, &I, PtrOpNum);
578
+ continue ;
579
+ }
580
+ if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) {
506
581
PtrOpNum = RMW->getPointerOperandIndex ();
507
- else
582
+ aspaceWrapOperand (CastsCache, &I, PtrOpNum);
508
583
continue ;
584
+ }
585
+
586
+ auto *CI = dyn_cast<CallInst>(&I);
587
+ if (!CI)
588
+ continue ;
589
+
590
+ Function *Callee = CI->getCalledFunction ();
591
+ if (!Callee || !Callee->isIntrinsic ())
592
+ continue ;
593
+
594
+ // Check memset/memcpy/memmove
595
+ Intrinsic::ID ID = Callee->getIntrinsicID ();
596
+ bool IsSet = ID == Intrinsic::memset;
597
+ bool IsCpy = ID == Intrinsic::memcpy;
598
+ bool IsMove = ID == Intrinsic::memmove;
599
+ if (!IsSet && !IsCpy && !IsMove)
600
+ continue ;
601
+
602
+ auto isAS = [&](unsigned ArgIdx) {
603
+ Value *V = CI->getArgOperand (ArgIdx);
604
+ if (auto *PTy = dyn_cast<PointerType>(V->getType ()))
605
+ return PTy->getAddressSpace () != 0 ;
606
+ return false ;
607
+ };
608
+
609
+ // For memset: only dest is a pointer; for memcpy/memmove: dest & src.
610
+ bool HasAS = IsSet ? isAS (0 ) : (isAS (0 ) || isAS (1 ));
611
+ if (!HasAS)
612
+ continue ;
613
+
614
+ Instruction *New;
615
+ if (IsSet)
616
+ New = aspaceMemSet (CastsCache, CI);
617
+ else if (IsCpy)
618
+ New = aspaceMemCpy (CastsCache, CI);
619
+ else
620
+ New = aspaceMemMove (CastsCache, CI);
509
621
510
- aspaceWrapOperand (CastsCache, &I, PtrOpNum);
622
+ I.replaceAllUsesWith (New);
623
+ New->takeName (&I);
624
+ I.eraseFromParent ();
511
625
}
512
626
}
513
627
Changed |= !CastsCache.empty ();
0 commit comments