Skip to content

[CIR] Fix alignment when lowering set/get bitfield operations #148999

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 18 additions & 9 deletions clang/include/clang/CIR/Dialect/IR/CIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1720,20 +1720,24 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> {
%2 = cir.load %0 : !cir.ptr<!cir.ptr<!record_type>>, !cir.ptr<!record_type>
%3 = cir.get_member %2[1] {name = "e"} : !cir.ptr<!record_type>
-> !cir.ptr<!u16i>
%4 = cir.set_bitfield(#bfi_e, %3 : !cir.ptr<!u16i>, %1 : !s32i) -> !s32i
%4 = cir.set_bitfield align(4) (#bfi_e, %3 : !cir.ptr<!u16i>, %1 : !s32i)
-> !s32i
```
}];

let arguments = (ins
Arg<CIR_PointerType, "the address to store the value", [MemWrite]>:$addr,
CIR_AnyType:$src,
BitfieldInfoAttr:$bitfield_info,
DefaultValuedOptionalAttr<I64Attr, "0">:$alignment,
UnitAttr:$is_volatile
);

let results = (outs CIR_IntType:$result);

let assemblyFormat = [{ `(`$bitfield_info`,` $addr`:`qualified(type($addr))`,`
let assemblyFormat = [{
(`align` `(` $alignment^ `)`)?
`(`$bitfield_info`,` $addr`:`qualified(type($addr))`,`
$src`:`type($src) `)` attr-dict `->` type($result) }];

let builders = [
Expand All @@ -1745,14 +1749,15 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> {
"unsigned":$size,
"unsigned":$offset,
"bool":$is_signed,
"bool":$is_volatile
"bool":$is_volatile,
CArg<"unsigned", "0">:$alignment
),
[{
BitfieldInfoAttr info =
BitfieldInfoAttr::get($_builder.getContext(),
name, storage_type,
size, offset, is_signed);
build($_builder, $_state, type, addr, src, info, is_volatile);
build($_builder, $_state, type, addr, src, info, alignment, is_volatile);
}]>
];
}
Expand Down Expand Up @@ -1804,20 +1809,23 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> {
%2 = cir.load %0 : !cir.ptr<!cir.ptr<!record_type>>, !cir.ptr<!record_type>
%3 = cir.get_member %2[1] {name = "e"} : !cir.ptr<!record_type>
-> !cir.ptr<!u16i>
%4 = cir.get_bitfield(#bfi_e, %3 : !cir.ptr<!u16i>) -> !s32i
%4 = cir.get_bitfield align(4) (#bfi_e, %3 : !cir.ptr<!u16i>) -> !s32i
```
}];

let arguments = (ins
Arg<CIR_PointerType, "the address to load from", [MemRead]>:$addr,
BitfieldInfoAttr:$bitfield_info,
DefaultValuedOptionalAttr<I64Attr, "0">:$alignment,
UnitAttr:$is_volatile
);

let results = (outs CIR_IntType:$result);

let assemblyFormat = [{ `(`$bitfield_info `,` $addr attr-dict `:`
qualified(type($addr)) `)` `->` type($result) }];
let assemblyFormat = [{
(`align` `(` $alignment^ `)`)?
`(`$bitfield_info `,` $addr attr-dict `:`
qualified(type($addr)) `)` `->` type($result) }];

let builders = [
OpBuilder<(ins "mlir::Type":$type,
Expand All @@ -1827,14 +1835,15 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> {
"unsigned":$size,
"unsigned":$offset,
"bool":$is_signed,
"bool":$is_volatile
"bool":$is_volatile,
CArg<"unsigned", "0">:$alignment
),
[{
BitfieldInfoAttr info =
BitfieldInfoAttr::get($_builder.getContext(),
name, storage_type,
size, offset, is_signed);
build($_builder, $_state, type, addr, info, is_volatile);
build($_builder, $_state, type, addr, info, alignment, is_volatile);
}]>
];
}
Expand Down
22 changes: 12 additions & 10 deletions clang/lib/CIR/CodeGen/CIRGenBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -424,21 +424,23 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
}

mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType,
mlir::Value dstAddr, mlir::Type storageType,
Address dstAddr, mlir::Type storageType,
mlir::Value src, const CIRGenBitFieldInfo &info,
bool isLvalueVolatile, bool useVolatile) {
return create<cir::SetBitfieldOp>(loc, resultType, dstAddr, storageType,
src, info.name, info.size, info.offset,
info.isSigned, isLvalueVolatile);
bool isLvalueVolatile) {
return create<cir::SetBitfieldOp>(
loc, resultType, dstAddr.getPointer(), storageType, src, info.name,
info.size, info.offset, info.isSigned, isLvalueVolatile,
dstAddr.getAlignment().getAsAlign().value());
}

mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType,
mlir::Value addr, mlir::Type storageType,
Address addr, mlir::Type storageType,
const CIRGenBitFieldInfo &info,
bool isLvalueVolatile, bool useVolatile) {
return create<cir::GetBitfieldOp>(loc, resultType, addr, storageType,
info.name, info.size, info.offset,
info.isSigned, isLvalueVolatile);
bool isLvalueVolatile) {
return create<cir::GetBitfieldOp>(
loc, resultType, addr.getPointer(), storageType, info.name, info.size,
info.offset, info.isSigned, isLvalueVolatile,
addr.getAlignment().getAsAlign().value());
}
};

Expand Down
13 changes: 7 additions & 6 deletions clang/lib/CIR/CodeGen/CIRGenExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -333,13 +333,12 @@ mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src,
Address ptr = dst.getBitFieldAddress();

assert(!cir::MissingFeatures::armComputeVolatileBitfields());
const bool useVolatile = false;

mlir::Value dstAddr = dst.getAddress().getPointer();

return builder.createSetBitfield(dstAddr.getLoc(), resLTy, dstAddr,
return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
ptr.getElementType(), src.getValue(), info,
dst.isVolatileQualified(), useVolatile);
dst.isVolatileQualified());
}

RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
Expand All @@ -352,8 +351,7 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) {
assert(!cir::MissingFeatures::armComputeVolatileBitfields());

mlir::Value field = builder.createGetBitfield(
getLoc(loc), resLTy, ptr.getPointer(), ptr.getElementType(), info,
lv.isVolatile(), false);
getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile());
assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI");
return RValue::get(field);
}
Expand All @@ -366,7 +364,10 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base,
cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
cir::GetMemberOp sea = getBuilder().createGetMember(
loc, fieldPtr, base.getPointer(), field->getName(), index);
return Address(sea, CharUnits::One());
auto rec = cast<cir::RecordType>(base.getAddress().getElementType());
CharUnits offset = CharUnits::fromQuantity(
rec.getElementOffset(cgm.getDataLayout().layout, index));
return Address(sea, base.getAlignment().alignmentAtOffset(offset));
}

LValue CIRGenFunction::emitLValueForBitField(LValue base,
Expand Down
7 changes: 4 additions & 3 deletions clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2555,7 +2555,7 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite(
assert(storageSize > size && "Invalid bitfield size.");

mlir::Value val = rewriter.create<mlir::LLVM::LoadOp>(
op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0,
op.getLoc(), intType, adaptor.getAddr(), op.getAlignment(),
op.getIsVolatile());

srcVal =
Expand All @@ -2572,7 +2572,7 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite(
}

rewriter.create<mlir::LLVM::StoreOp>(op.getLoc(), srcVal, adaptor.getAddr(),
/* alignment */ 0, op.getIsVolatile());
op.getAlignment(), op.getIsVolatile());

mlir::Type resultTy = getTypeConverter()->convertType(op.getType());

Expand Down Expand Up @@ -2646,7 +2646,8 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite(
computeBitfieldIntType(storageType, context, storageSize);

mlir::Value val = rewriter.create<mlir::LLVM::LoadOp>(
op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile());
op.getLoc(), intType, adaptor.getAddr(), op.getAlignment(),
op.getIsVolatile());
val = rewriter.create<mlir::LLVM::BitcastOp>(op.getLoc(), intType, val);

if (info.getIsSigned()) {
Expand Down
46 changes: 23 additions & 23 deletions clang/test/CIR/CodeGen/bitfields.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,14 +87,14 @@ int load_field(S* s) {
// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["s", init]
// CIR: [[TMP1:%.*]] = cir.load{{.*}} [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i

// LLVM: define dso_local i32 @load_field
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = alloca i32, i64 1, align 4
// LLVM: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP3:%.*]] = getelementptr %struct.S, ptr [[TMP2]], i32 0, i32 0
// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8
// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 4
// LLVM: [[TMP5:%.*]] = shl i64 [[TMP4]], 15
// LLVM: [[TMP6:%.*]] = ashr i64 [[TMP5]], 47
// LLVM: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32
Expand All @@ -115,13 +115,13 @@ unsigned int load_field_unsigned(A* s) {
//CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_A>, !cir.ptr<!cir.ptr<!rec_A>>, ["s", init] {alignment = 8 : i64}
//CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_A>>, !cir.ptr<!rec_A>
//CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][3] {name = "more_bits"} : !cir.ptr<!rec_A> -> !cir.ptr<!u16i>
//CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_more_bits, [[TMP2]] : !cir.ptr<!u16i>) -> !u32i
//CIR: [[TMP3:%.*]] = cir.get_bitfield align(1) (#bfi_more_bits, [[TMP2]] : !cir.ptr<!u16i>) -> !u32i

//LLVM: define dso_local i32 @load_field_unsigned
//LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
//LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
//LLVM: [[TMP2:%.*]] = getelementptr %struct.A, ptr [[TMP1]], i32 0, i32 3
//LLVM: [[TMP3:%.*]] = load i16, ptr [[TMP2]], align 2
//LLVM: [[TMP3:%.*]] = load i16, ptr [[TMP2]], align 1
//LLVM: [[TMP4:%.*]] = lshr i16 [[TMP3]], 3
//LLVM: [[TMP5:%.*]] = and i16 [[TMP4]], 15
//LLVM: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
Expand All @@ -143,15 +143,15 @@ void store_field() {
// CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>
// CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP0]][1] {name = "e"} : !cir.ptr<!rec_S> -> !cir.ptr<!u16i>
// CIR: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr<!u16i>, [[TMP1]] : !s32i)
// CIR: cir.set_bitfield align(4) (#bfi_e, [[TMP2]] : !cir.ptr<!u16i>, [[TMP1]] : !s32i)

// LLVM: define dso_local void @store_field()
// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4
// LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 1
// LLVM: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2
// LLVM: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 4
// LLVM: [[TMP3:%.*]] = and i16 [[TMP2]], -32768
// LLVM: [[TMP4:%.*]] = or i16 [[TMP3]], 3
// LLVM: store i16 [[TMP4]], ptr [[TMP1]], align 2
// LLVM: store i16 [[TMP4]], ptr [[TMP1]], align 4

// OGCG: define dso_local void @store_field()
// OGCG: [[TMP0:%.*]] = alloca %struct.S, align 4
Expand All @@ -169,24 +169,24 @@ void store_bitfield_to_bitfield() {
// CIR: cir.func {{.*@store_bitfield_to_bitfield}}
// CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr<!rec_S>, ["s"] {alignment = 4 : i64}
// CIR: [[TMP1:%.*]] = cir.get_member [[TMP0]][0] {name = "c"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
// CIR: [[TMP2:%.*]] = cir.get_bitfield(#bfi_c, [[TMP1]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP2:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP1]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP3:%.*]] = cir.get_member [[TMP0]][0] {name = "a"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_a, [[TMP3]] : !cir.ptr<!u64i>, [[TMP2]] : !s32i) -> !s32i
// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_a, [[TMP3]] : !cir.ptr<!u64i>, [[TMP2]] : !s32i) -> !s32i

// LLVM: define dso_local void @store_bitfield_to_bitfield()
// LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4
// LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0
// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4
// LLVM: [[TMP3:%.*]] = shl i64 [[TMP2]], 15
// LLVM: [[TMP4:%.*]] = ashr i64 [[TMP3]], 47
// LLVM: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32
// LLVM: [[TMP6:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0
// LLVM: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP6]], align 8
// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP6]], align 4
// LLVM: [[TMP9:%.*]] = and i64 [[TMP7]], 15
// LLVM: [[TMP10:%.*]] = and i64 [[TMP8]], -16
// LLVM: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]]
// LLVM: store i64 [[TMP11]], ptr [[TMP6]], align 8
// LLVM: store i64 [[TMP11]], ptr [[TMP6]], align 4
// LLVM: [[TMP12:%.*]] = shl i64 [[TMP9]], 60
// LLVM: [[TMP13:%.*]] = ashr i64 [[TMP12]], 60
// LLVM: [[TMP15:%.*]] = trunc i64 [[TMP13]] to i32
Expand Down Expand Up @@ -222,16 +222,16 @@ void get_volatile(V* v) {
// CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
// CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_V>>, !cir.ptr<!rec_V>
// CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr<!rec_V> -> !cir.ptr<!u64i>
// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i
// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i

// LLVM: define dso_local void @get_volatile
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP2:%.*]] = getelementptr %struct.V, ptr [[TMP1]], i32 0, i32 0
// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8
// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 4
// LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -1095216660481
// LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 12884901888
// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 8
// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 4

// OCGC: define dso_local void @get_volatile
// OCGC: [[TMP0:%.*]] = alloca ptr, align 8
Expand All @@ -249,16 +249,16 @@ void set_volatile(V* v) {
//CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i
//CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_V>>, !cir.ptr<!rec_V>
//CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr<!rec_V> -> !cir.ptr<!u64i>
//CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i
//CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr<!u64i>, [[TMP1]] : !s32i) {is_volatile} -> !s32i

// LLVM: define dso_local void @set_volatile
// LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8
// LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8
// LLVM: [[TMP2:%.*]] = getelementptr %struct.V, ptr [[TMP1]], i32 0, i32 0
// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8
// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 4
// LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -1095216660481
// LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 12884901888
// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 8
// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 4

// OGCG: define dso_local void @set_volatile
// OGCG: [[TMP0:%.*]] = alloca ptr, align 8
Expand All @@ -276,24 +276,24 @@ void unOp(S* s) {
// CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr<!rec_S>, !cir.ptr<!cir.ptr<!rec_S>>, ["s", init] {alignment = 8 : i64}
// CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr<!cir.ptr<!rec_S>>, !cir.ptr<!rec_S>
// CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "d"} : !cir.ptr<!rec_S> -> !cir.ptr<!u64i>
// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr<!u64i>) -> !s32i
// CIR: [[TMP4:%.*]] = cir.unary(inc, [[TMP3]]) nsw : !s32i, !s32i
// CIR: cir.set_bitfield(#bfi_d, [[TMP2]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i)
// CIR: cir.set_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr<!u64i>, [[TMP4]] : !s32i)

// LLVM: define {{.*@unOp}}
// LLVM: [[TMP0:%.*]] = getelementptr %struct.S, ptr [[LOAD0:%.*]], i32 0, i32 0
// LLVM: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8
// LLVM: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 4
// LLVM: [[TMP2:%.*]] = shl i64 [[TMP1]], 13
// LLVM: [[TMP3:%.*]] = ashr i64 [[TMP2]], 62
// LLVM: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
// LLVM: [[TMP5:%.*]] = add nsw i32 [[TMP4]], 1
// LLVM: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64
// LLVM: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 8
// LLVM: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 4
// LLVM: [[TMP8:%.*]] = and i64 [[TMP6]], 3
// LLVM: [[TMP9:%.*]] = shl i64 [[TMP8]], 49
// LLVM: [[TMP10:%.*]] = and i64 [[TMP7]], -1688849860263937
// LLVM: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]]
// LLVM: store i64 [[TMP11]], ptr [[TMP0]], align 8
// LLVM: store i64 [[TMP11]], ptr [[TMP0]], align 4
// LLVM: [[TMP12:%.*]] = shl i64 [[TMP8]], 62
// LLVM: [[TMP13:%.*]] = ashr i64 [[TMP12]], 62
// LLVM: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32
Expand Down
Loading
Loading