diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 676ff76dff661..bbbfd96d97315 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1720,7 +1720,8 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { %2 = cir.load %0 : !cir.ptr>, !cir.ptr %3 = cir.get_member %2[1] {name = "e"} : !cir.ptr -> !cir.ptr - %4 = cir.set_bitfield(#bfi_e, %3 : !cir.ptr, %1 : !s32i) -> !s32i + %4 = cir.set_bitfield align(4) (#bfi_e, %3 : !cir.ptr, %1 : !s32i) + -> !s32i ``` }]; @@ -1728,12 +1729,15 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { Arg:$addr, CIR_AnyType:$src, BitfieldInfoAttr:$bitfield_info, + DefaultValuedOptionalAttr:$alignment, UnitAttr:$is_volatile ); let results = (outs CIR_IntType:$result); - let assemblyFormat = [{ `(`$bitfield_info`,` $addr`:`qualified(type($addr))`,` + let assemblyFormat = [{ + (`align` `(` $alignment^ `)`)? + `(`$bitfield_info`,` $addr`:`qualified(type($addr))`,` $src`:`type($src) `)` attr-dict `->` type($result) }]; let builders = [ @@ -1745,14 +1749,15 @@ def SetBitfieldOp : CIR_Op<"set_bitfield"> { "unsigned":$size, "unsigned":$offset, "bool":$is_signed, - "bool":$is_volatile + "bool":$is_volatile, + CArg<"unsigned", "0">:$alignment ), [{ BitfieldInfoAttr info = BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); - build($_builder, $_state, type, addr, src, info, is_volatile); + build($_builder, $_state, type, addr, src, info, alignment, is_volatile); }]> ]; } @@ -1804,20 +1809,23 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { %2 = cir.load %0 : !cir.ptr>, !cir.ptr %3 = cir.get_member %2[1] {name = "e"} : !cir.ptr -> !cir.ptr - %4 = cir.get_bitfield(#bfi_e, %3 : !cir.ptr) -> !s32i + %4 = cir.get_bitfield align(4) (#bfi_e, %3 : !cir.ptr) -> !s32i ``` }]; let arguments = (ins Arg:$addr, BitfieldInfoAttr:$bitfield_info, + DefaultValuedOptionalAttr:$alignment, UnitAttr:$is_volatile ); let results = (outs CIR_IntType:$result); - let assemblyFormat = [{ `(`$bitfield_info `,` $addr attr-dict `:` - qualified(type($addr)) `)` `->` type($result) }]; + let assemblyFormat = [{ + (`align` `(` $alignment^ `)`)? + `(`$bitfield_info `,` $addr attr-dict `:` + qualified(type($addr)) `)` `->` type($result) }]; let builders = [ OpBuilder<(ins "mlir::Type":$type, @@ -1827,14 +1835,15 @@ def GetBitfieldOp : CIR_Op<"get_bitfield"> { "unsigned":$size, "unsigned":$offset, "bool":$is_signed, - "bool":$is_volatile + "bool":$is_volatile, + CArg<"unsigned", "0">:$alignment ), [{ BitfieldInfoAttr info = BitfieldInfoAttr::get($_builder.getContext(), name, storage_type, size, offset, is_signed); - build($_builder, $_state, type, addr, info, is_volatile); + build($_builder, $_state, type, addr, info, alignment, is_volatile); }]> ]; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 5bd53ebc52ab5..a46643a89b881 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -424,21 +424,23 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { } mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, - mlir::Value dstAddr, mlir::Type storageType, + Address dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, - bool isLvalueVolatile, bool useVolatile) { - return create(loc, resultType, dstAddr, storageType, - src, info.name, info.size, info.offset, - info.isSigned, isLvalueVolatile); + bool isLvalueVolatile) { + return create( + loc, resultType, dstAddr.getPointer(), storageType, src, info.name, + info.size, info.offset, info.isSigned, isLvalueVolatile, + dstAddr.getAlignment().getAsAlign().value()); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, - mlir::Value addr, mlir::Type storageType, + Address addr, mlir::Type storageType, const CIRGenBitFieldInfo &info, - bool isLvalueVolatile, bool useVolatile) { - return create(loc, resultType, addr, storageType, - info.name, info.size, info.offset, - info.isSigned, isLvalueVolatile); + bool isLvalueVolatile) { + return create( + loc, resultType, addr.getPointer(), storageType, info.name, info.size, + info.offset, info.isSigned, isLvalueVolatile, + addr.getAlignment().getAsAlign().value()); } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 51da48d330f55..d63c18fc5056b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -333,13 +333,12 @@ mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src, Address ptr = dst.getBitFieldAddress(); assert(!cir::MissingFeatures::armComputeVolatileBitfields()); - const bool useVolatile = false; mlir::Value dstAddr = dst.getAddress().getPointer(); - return builder.createSetBitfield(dstAddr.getLoc(), resLTy, dstAddr, + return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr, ptr.getElementType(), src.getValue(), info, - dst.isVolatileQualified(), useVolatile); + dst.isVolatileQualified()); } RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { @@ -352,8 +351,7 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { assert(!cir::MissingFeatures::armComputeVolatileBitfields()); mlir::Value field = builder.createGetBitfield( - getLoc(loc), resLTy, ptr.getPointer(), ptr.getElementType(), info, - lv.isVolatile(), false); + getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile()); assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI"); return RValue::get(field); } @@ -366,7 +364,10 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, cir::PointerType fieldPtr = cir::PointerType::get(fieldType); cir::GetMemberOp sea = getBuilder().createGetMember( loc, fieldPtr, base.getPointer(), field->getName(), index); - return Address(sea, CharUnits::One()); + auto rec = cast(base.getAddress().getElementType()); + CharUnits offset = CharUnits::fromQuantity( + rec.getElementOffset(cgm.getDataLayout().layout, index)); + return Address(sea, base.getAlignment().alignmentAtOffset(offset)); } LValue CIRGenFunction::emitLValueForBitField(LValue base, diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7dcea0c8eb529..06ea46d28404b 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2555,7 +2555,7 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( assert(storageSize > size && "Invalid bitfield size."); mlir::Value val = rewriter.create( - op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0, + op.getLoc(), intType, adaptor.getAddr(), op.getAlignment(), op.getIsVolatile()); srcVal = @@ -2572,7 +2572,7 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( } rewriter.create(op.getLoc(), srcVal, adaptor.getAddr(), - /* alignment */ 0, op.getIsVolatile()); + op.getAlignment(), op.getIsVolatile()); mlir::Type resultTy = getTypeConverter()->convertType(op.getType()); @@ -2646,7 +2646,8 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite( computeBitfieldIntType(storageType, context, storageSize); mlir::Value val = rewriter.create( - op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile()); + op.getLoc(), intType, adaptor.getAddr(), op.getAlignment(), + op.getIsVolatile()); val = rewriter.create(op.getLoc(), intType, val); if (info.getIsSigned()) { diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 896acbfc854a4..a73c076ea81ab 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -87,14 +87,14 @@ int load_field(S* s) { // CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] // CIR: [[TMP1:%.*]] = cir.load{{.*}} [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr) -> !s32i +// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr) -> !s32i // LLVM: define dso_local i32 @load_field // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[TMP1:%.*]] = alloca i32, i64 1, align 4 // LLVM: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8 // LLVM: [[TMP3:%.*]] = getelementptr %struct.S, ptr [[TMP2]], i32 0, i32 0 -// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 4 // LLVM: [[TMP5:%.*]] = shl i64 [[TMP4]], 15 // LLVM: [[TMP6:%.*]] = ashr i64 [[TMP5]], 47 // LLVM: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32 @@ -115,13 +115,13 @@ unsigned int load_field_unsigned(A* s) { //CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} //CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr //CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][3] {name = "more_bits"} : !cir.ptr -> !cir.ptr -//CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_more_bits, [[TMP2]] : !cir.ptr) -> !u32i +//CIR: [[TMP3:%.*]] = cir.get_bitfield align(1) (#bfi_more_bits, [[TMP2]] : !cir.ptr) -> !u32i //LLVM: define dso_local i32 @load_field_unsigned //LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 //LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8 //LLVM: [[TMP2:%.*]] = getelementptr %struct.A, ptr [[TMP1]], i32 0, i32 3 -//LLVM: [[TMP3:%.*]] = load i16, ptr [[TMP2]], align 2 +//LLVM: [[TMP3:%.*]] = load i16, ptr [[TMP2]], align 1 //LLVM: [[TMP4:%.*]] = lshr i16 [[TMP3]], 3 //LLVM: [[TMP5:%.*]] = and i16 [[TMP4]], 15 //LLVM: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32 @@ -143,15 +143,15 @@ void store_field() { // CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr // CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i // CIR: [[TMP2:%.*]] = cir.get_member [[TMP0]][1] {name = "e"} : !cir.ptr -> !cir.ptr -// CIR: cir.set_bitfield(#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) +// CIR: cir.set_bitfield align(4) (#bfi_e, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) // LLVM: define dso_local void @store_field() // LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4 // LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 1 -// LLVM: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 2 +// LLVM: [[TMP2:%.*]] = load i16, ptr [[TMP1]], align 4 // LLVM: [[TMP3:%.*]] = and i16 [[TMP2]], -32768 // LLVM: [[TMP4:%.*]] = or i16 [[TMP3]], 3 -// LLVM: store i16 [[TMP4]], ptr [[TMP1]], align 2 +// LLVM: store i16 [[TMP4]], ptr [[TMP1]], align 4 // OGCG: define dso_local void @store_field() // OGCG: [[TMP0:%.*]] = alloca %struct.S, align 4 @@ -169,24 +169,24 @@ void store_bitfield_to_bitfield() { // CIR: cir.func {{.*@store_bitfield_to_bitfield}} // CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr, ["s"] {alignment = 4 : i64} // CIR: [[TMP1:%.*]] = cir.get_member [[TMP0]][0] {name = "c"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP2:%.*]] = cir.get_bitfield(#bfi_c, [[TMP1]] : !cir.ptr) -> !s32i +// CIR: [[TMP2:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP1]] : !cir.ptr) -> !s32i // CIR: [[TMP3:%.*]] = cir.get_member [[TMP0]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_a, [[TMP3]] : !cir.ptr, [[TMP2]] : !s32i) -> !s32i +// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_a, [[TMP3]] : !cir.ptr, [[TMP2]] : !s32i) -> !s32i // LLVM: define dso_local void @store_bitfield_to_bitfield() // LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4 // LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0 -// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8 +// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4 // LLVM: [[TMP3:%.*]] = shl i64 [[TMP2]], 15 // LLVM: [[TMP4:%.*]] = ashr i64 [[TMP3]], 47 // LLVM: [[TMP5:%.*]] = trunc i64 [[TMP4]] to i32 // LLVM: [[TMP6:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0 // LLVM: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64 -// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP6]], align 8 +// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP6]], align 4 // LLVM: [[TMP9:%.*]] = and i64 [[TMP7]], 15 // LLVM: [[TMP10:%.*]] = and i64 [[TMP8]], -16 // LLVM: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]] -// LLVM: store i64 [[TMP11]], ptr [[TMP6]], align 8 +// LLVM: store i64 [[TMP11]], ptr [[TMP6]], align 4 // LLVM: [[TMP12:%.*]] = shl i64 [[TMP9]], 60 // LLVM: [[TMP13:%.*]] = ashr i64 [[TMP12]], 60 // LLVM: [[TMP15:%.*]] = trunc i64 [[TMP13]] to i32 @@ -222,16 +222,16 @@ void get_volatile(V* v) { // CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i // CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr, [[TMP1]] : !s32i) {is_volatile} -> !s32i +// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr, [[TMP1]] : !s32i) {is_volatile} -> !s32i // LLVM: define dso_local void @get_volatile // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8 // LLVM: [[TMP2:%.*]] = getelementptr %struct.V, ptr [[TMP1]], i32 0, i32 0 -// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8 +// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 4 // LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -1095216660481 // LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 12884901888 -// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 8 +// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 4 // OCGC: define dso_local void @get_volatile // OCGC: [[TMP0:%.*]] = alloca ptr, align 8 @@ -249,16 +249,16 @@ void set_volatile(V* v) { //CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i //CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr //CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr -> !cir.ptr -//CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr, [[TMP1]] : !s32i) {is_volatile} -> !s32i +//CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr, [[TMP1]] : !s32i) {is_volatile} -> !s32i // LLVM: define dso_local void @set_volatile // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8 // LLVM: [[TMP2:%.*]] = getelementptr %struct.V, ptr [[TMP1]], i32 0, i32 0 -// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 8 +// LLVM: [[TMP3:%.*]] = load volatile i64, ptr [[TMP2]], align 4 // LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -1095216660481 // LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 12884901888 -// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 8 +// LLVM: store volatile i64 [[TMP5]], ptr [[TMP2]], align 4 // OGCG: define dso_local void @set_volatile // OGCG: [[TMP0:%.*]] = alloca ptr, align 8 @@ -276,24 +276,24 @@ void unOp(S* s) { // CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} // CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "d"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_d, [[TMP2]] : !cir.ptr) -> !s32i +// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr) -> !s32i // CIR: [[TMP4:%.*]] = cir.unary(inc, [[TMP3]]) nsw : !s32i, !s32i -// CIR: cir.set_bitfield(#bfi_d, [[TMP2]] : !cir.ptr, [[TMP4]] : !s32i) +// CIR: cir.set_bitfield align(4) (#bfi_d, [[TMP2]] : !cir.ptr, [[TMP4]] : !s32i) // LLVM: define {{.*@unOp}} // LLVM: [[TMP0:%.*]] = getelementptr %struct.S, ptr [[LOAD0:%.*]], i32 0, i32 0 -// LLVM: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8 +// LLVM: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 4 // LLVM: [[TMP2:%.*]] = shl i64 [[TMP1]], 13 // LLVM: [[TMP3:%.*]] = ashr i64 [[TMP2]], 62 // LLVM: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 // LLVM: [[TMP5:%.*]] = add nsw i32 [[TMP4]], 1 // LLVM: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64 -// LLVM: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 8 +// LLVM: [[TMP7:%.*]] = load i64, ptr [[TMP0]], align 4 // LLVM: [[TMP8:%.*]] = and i64 [[TMP6]], 3 // LLVM: [[TMP9:%.*]] = shl i64 [[TMP8]], 49 // LLVM: [[TMP10:%.*]] = and i64 [[TMP7]], -1688849860263937 // LLVM: [[TMP11:%.*]] = or i64 [[TMP10]], [[TMP9]] -// LLVM: store i64 [[TMP11]], ptr [[TMP0]], align 8 +// LLVM: store i64 [[TMP11]], ptr [[TMP0]], align 4 // LLVM: [[TMP12:%.*]] = shl i64 [[TMP8]], 62 // LLVM: [[TMP13:%.*]] = ashr i64 [[TMP12]], 62 // LLVM: [[TMP14:%.*]] = trunc i64 [[TMP13]] to i32 diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 6715ebf1f48b6..7650e0b83faf6 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -39,14 +39,14 @@ int load_field(S* s) { // CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] // CIR: [[TMP1:%.*]] = cir.load{{.*}} [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr) -> !s32i +// CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr) -> !s32i // LLVM: define dso_local i32 @_Z10load_fieldP1S // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[TMP1:%.*]] = alloca i32, i64 1, align 4 // LLVM: [[TMP2:%.*]] = load ptr, ptr [[TMP0]], align 8 // LLVM: [[TMP3:%.*]] = getelementptr %struct.S, ptr [[TMP2]], i32 0, i32 0 -// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// LLVM: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 4 // LLVM: [[TMP5:%.*]] = shl i64 [[TMP4]], 15 // LLVM: [[TMP6:%.*]] = ashr i64 [[TMP5]], 47 // LLVM: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i32 @@ -67,15 +67,15 @@ void store_field() { // CIR: [[TMP0:%.*]] = cir.alloca !rec_S, !cir.ptr // CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i // CIR: [[TMP2:%.*]] = cir.get_member [[TMP0]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: cir.set_bitfield(#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) +// CIR: cir.set_bitfield align(4) (#bfi_a, [[TMP2]] : !cir.ptr, [[TMP1]] : !s32i) // LLVM: define dso_local void @_Z11store_fieldv // LLVM: [[TMP0:%.*]] = alloca %struct.S, i64 1, align 4 // LLVM: [[TMP1:%.*]] = getelementptr %struct.S, ptr [[TMP0]], i32 0, i32 0 -// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8 +// LLVM: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 4 // LLVM: [[TMP3:%.*]] = and i64 [[TMP2]], -16 // LLVM: [[TMP4:%.*]] = or i64 [[TMP3]], 3 -// LLVM: store i64 [[TMP4]], ptr [[TMP1]], align 8 +// LLVM: store i64 [[TMP4]], ptr [[TMP1]], align 4 // OGCG: define dso_local void @_Z11store_fieldv() // OGCG: [[TMP0:%.*]] = alloca %struct.S, align 4 @@ -93,25 +93,25 @@ void store_bitfield_to_bitfield(S* s) { // CIR: [[TMP1:%.*]] = cir.const #cir.int<3> : !s32i // CIR: [[TMP2:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP3:%.*]] = cir.get_member [[TMP2]][0] {name = "b"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP4:%.*]] = cir.set_bitfield(#bfi_b, [[TMP3]] : !cir.ptr, [[TMP1]] : !s32i) -> !s32i +// CIR: [[TMP4:%.*]] = cir.set_bitfield align(4) (#bfi_b, [[TMP3]] : !cir.ptr, [[TMP1]] : !s32i) -> !s32i // CIR: [[TMP5:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr // CIR: [[TMP6:%.*]] = cir.get_member [[TMP5]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: [[TMP7:%.*]] = cir.set_bitfield(#bfi_a, [[TMP6]] : !cir.ptr, [[TMP4]] : !s32i) -> !s32i +// CIR: [[TMP7:%.*]] = cir.set_bitfield align(4) (#bfi_a, [[TMP6]] : !cir.ptr, [[TMP4]] : !s32i) -> !s32i // LLVM: define dso_local void @_Z26store_bitfield_to_bitfieldP1S // LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 // LLVM: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8 // LLVM: [[TMP2:%.*]] = getelementptr %struct.S, ptr [[TMP1]], i32 0, i32 0 -// LLVM: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// LLVM: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 4 // LLVM: [[TMP4:%.*]] = and i64 [[TMP3]], -2147483633 // LLVM: [[TMP5:%.*]] = or i64 [[TMP4]], 48 -// LLVM: store i64 [[TMP5]], ptr [[TMP2]], align 8 +// LLVM: store i64 [[TMP5]], ptr [[TMP2]], align 4 // LLVM: [[TMP6:%.*]] = load ptr, ptr [[TMP0]], align 8 // LLVM: [[TMP7:%.*]] = getelementptr %struct.S, ptr [[TMP6]], i32 0, i32 0 -// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP7]], align 8 +// LLVM: [[TMP8:%.*]] = load i64, ptr [[TMP7]], align 4 // LLVM: [[TMP9:%.*]] = and i64 [[TMP8]], -16 // LLVM: [[TMP10:%.*]] = or i64 [[TMP9]], 3 -// LLVM: store i64 [[TMP10]], ptr [[TMP7]], align 8 +// LLVM: store i64 [[TMP10]], ptr [[TMP7]], align 4 // OGCG: define dso_local void @_Z26store_bitfield_to_bitfieldP1S // OGCG: [[TMP0:%.*]] = alloca ptr, align 8 diff --git a/clang/test/CIR/CodeGen/bitfields_be.c b/clang/test/CIR/CodeGen/bitfields_be.c index 6133927b67d21..77741ba74870b 100644 --- a/clang/test/CIR/CodeGen/bitfields_be.c +++ b/clang/test/CIR/CodeGen/bitfields_be.c @@ -25,7 +25,7 @@ int init(S* s) { //CIR: [[TMP0:%.*]] = cir.alloca !cir.ptr, !cir.ptr>, ["s", init] {alignment = 8 : i64} //CIR: [[TMP1:%.*]] = cir.load align(8) [[TMP0]] : !cir.ptr>, !cir.ptr //CIR: [[TMP2:%.*]] = cir.get_member [[TMP1]][0] {name = "c"} : !cir.ptr -> !cir.ptr -//CIR: [[TMP3:%.*]] = cir.get_bitfield(#bfi_c, [[TMP2]] : !cir.ptr) -> !s32i +//CIR: [[TMP3:%.*]] = cir.get_bitfield align(4) (#bfi_c, [[TMP2]] : !cir.ptr) -> !s32i //LLVM: define dso_local i32 @init(ptr %0) { //LLVM: [[TMP0:%.*]] = alloca ptr, i64 1, align 8 @@ -57,7 +57,7 @@ void load(S* s) { // CIR: %[[MIN1:.*]] = cir.unary(minus, %[[CONST1]]) nsw : !s32i, !s32i // CIR: %[[VAL0:.*]] = cir.load align(8) %[[PTR0]] : !cir.ptr>, !cir.ptr // CIR: %[[GET0:.*]] = cir.get_member %[[VAL0]][0] {name = "a"} : !cir.ptr -> !cir.ptr -// CIR: %[[SET0:.*]] = cir.set_bitfield(#bfi_a, %[[GET0]] : !cir.ptr, %[[MIN1]] : !s32i) -> !s32i +// CIR: %[[SET0:.*]] = cir.set_bitfield align(4) (#bfi_a, %[[GET0]] : !cir.ptr, %[[MIN1]] : !s32i) -> !s32i // LLVM: define dso_local void @load // LLVM: %[[PTR0:.*]] = load ptr @@ -65,50 +65,50 @@ void load(S* s) { // LLVM: %[[VAL0:.*]] = load i32, ptr %[[GET0]], align 4 // LLVM: %[[AND0:.*]] = and i32 %[[VAL0]], 268435455 // LLVM: %[[OR0:.*]] = or i32 %[[AND0]], -1073741824 -// LLVM: store i32 %[[OR0]], ptr %[[GET0]] +// LLVM: store i32 %[[OR0]], ptr %[[GET0]], align 4 // OGCG: define dso_local void @load // OGCG: %[[PTR0:.*]] = load ptr -// OGCG: %[[VAL0:.*]] = load i32, ptr %[[PTR0]] +// OGCG: %[[VAL0:.*]] = load i32, ptr %[[PTR0]], align 4 // OGCG: %[[AND0:.*]] = and i32 %[[VAL0]], 268435455 // OGCG: %[[OR0:.*]] = or i32 %[[AND0]], -1073741824 -// OGCG: store i32 %[[OR0]], ptr %[[PTR0]] +// OGCG: store i32 %[[OR0]], ptr %[[PTR0]], align 4 // field 'b' // CIR: %[[CONST2:.*]] = cir.const #cir.int<42> : !s32i // CIR: %[[VAL1:.*]] = cir.load align(8) %[[PTR0]] : !cir.ptr>, !cir.ptr // CIR: %[[GET1:.*]] = cir.get_member %[[VAL1]][0] {name = "b"} : !cir.ptr -> !cir.ptr -// CIR: %[[SET1:.*]] = cir.set_bitfield(#bfi_b, %[[GET1]] : !cir.ptr, %[[CONST2]] : !s32i) -> !s32i +// CIR: %[[SET1:.*]] = cir.set_bitfield align(4) (#bfi_b, %[[GET1]] : !cir.ptr, %[[CONST2]] : !s32i) -> !s32i // LLVM: %[[PTR1:.*]] = load ptr // LLVM: %[[GET1:.*]] = getelementptr %struct.S, ptr %[[PTR1]], i32 0, i32 0 // LLVM: %[[VAL1:.*]] = load i32, ptr %[[GET1]], align 4 // LLVM: %[[AND1:.*]] = and i32 %[[VAL1]], -268304385 // LLVM: %[[OR1:.*]] = or i32 %[[AND1]], 5505024 -// LLVM: store i32 %[[OR1]], ptr %[[GET1]] +// LLVM: store i32 %[[OR1]], ptr %[[GET1]], align 4 // OGCG: %[[PTR1:.*]] = load ptr -// OGCG: %[[VAL1:.*]] = load i32, ptr %[[PTR1]] +// OGCG: %[[VAL1:.*]] = load i32, ptr %[[PTR1]], align 4 // OGCG: %[[AND1:.*]] = and i32 %[[VAL1]], -268304385 // OGCG: %[[OR1:.*]] = or i32 %[[AND1]], 5505024 -// OGCG: store i32 %[[OR1]], ptr %[[PTR1]] +// OGCG: store i32 %[[OR1]], ptr %[[PTR1]], align 4 // field 'c' // CIR: %[[CONST3:.*]] = cir.const #cir.int<12345> : !s32i // CIR: %[[MIN2:.*]] = cir.unary(minus, %[[CONST3]]) nsw : !s32i, !s32i // CIR: %[[VAL2:.*]] = cir.load align(8) %[[PTR0]] : !cir.ptr>, !cir.ptr // CIR: %[[GET2:.*]] = cir.get_member %[[VAL2]][0] {name = "c"} : !cir.ptr -> !cir.ptr -// CIR: %[[SET2:.*]] = cir.set_bitfield(#bfi_c, %[[GET2]] : !cir.ptr, %[[MIN2]] : !s32i) -> !s32i +// CIR: %[[SET2:.*]] = cir.set_bitfield align(4) (#bfi_c, %[[GET2]] : !cir.ptr, %[[MIN2]] : !s32i) -> !s32i // LLVM: %[[PTR2:.*]] = load ptr // LLVM: %[[GET2:.*]] = getelementptr %struct.S, ptr %[[PTR2]], i32 0, i32 0 // LLVM: %[[VAL2:.*]] = load i32, ptr %[[GET2]], align 4 // LLVM: %[[AND2:.*]] = and i32 %[[VAL2]], -131072 // LLVM: %[[OR2:.*]] = or i32 %[[AND2]], 118727 -// LLVM: store i32 %[[OR2]], ptr %[[GET2]] +// LLVM: store i32 %[[OR2]], ptr %[[GET2]], align 4 // OGCG: %[[PTR2:.*]] = load ptr -// OGCG: %[[VAL2:.*]] = load i32, ptr %[[PTR2]] +// OGCG: %[[VAL2:.*]] = load i32, ptr %[[PTR2]], align 4 // OGCG: %[[AND2:.*]] = and i32 %[[VAL2]], -131072 // OGCG: %[[OR2:.*]] = or i32 %[[AND2]], 118727 -// OGCG: store i32 %[[OR2]], ptr %[[PTR2]] +// OGCG: store i32 %[[OR2]], ptr %[[PTR2]], align 4