|
25 | 25 | #include "asm/macroAssembler.inline.hpp" |
26 | 26 | #include "interpreter/interp_masm.hpp" |
27 | 27 | #include "mmtkBarrierSet.hpp" |
28 | | -#include "mmtkBarrierSetAssembler_aarch64.hpp" |
29 | 28 | #include "mmtkBarrierSetC1.hpp" |
30 | 29 | #include "mmtkMutator.hpp" |
31 | 30 | #include "runtime/sharedRuntime.hpp" |
32 | 31 | #include "utilities/macros.hpp" |
33 | 32 | #include "c1/c1_LIRAssembler.hpp" |
34 | 33 | #include "c1/c1_MacroAssembler.hpp" |
35 | 34 |
|
| 35 | +#include "utilities/macros.hpp" |
| 36 | +#include CPU_HEADER(mmtkBarrierSetAssembler) |
| 37 | + |
36 | 38 | #define __ masm-> |
37 | 39 |
|
38 | 40 | void MMTkBarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, Label& slow_case) { |
@@ -129,61 +131,95 @@ void MMTkBarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, |
129 | 131 |
|
130 | 132 | #undef __ |
131 | 133 |
|
132 | | -#define __ sasm-> |
133 | | - |
134 | | -void MMTkBarrierSetAssembler::generate_c1_write_barrier_runtime_stub(StubAssembler* sasm) const { |
135 | | - // printf("xxx MMTkBarrierSetAssembler::generate_c1_write_barrier_runtime_stub\n"); |
136 | | - // See also void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) |
137 | | - __ prologue("mmtk_write_barrier", false); |
| 134 | +//////////////////// Assembler for C1 //////////////////// |
138 | 135 |
|
139 | | - Label done, runtime; |
| 136 | +// Generate runtime stubs for the "runtime code blobs" in MMTkBarrierSetC1 |
140 | 137 |
|
141 | | - // void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) |
142 | | - // ld(reg, Address(fp, offset_in_words * BytesPerWord)); |
143 | | - // ra is free to use here, because call prologue/epilogue handles it |
144 | | - // Zheyuan: Code works by swaping rscratch2 and rscratch1, and I dont know why |
145 | | - const Register src = rscratch2; |
146 | | - const Register slot = rscratch1; |
147 | | - const Register new_val = lr; |
148 | | - __ load_parameter(0, src); |
149 | | - __ load_parameter(1, slot); |
150 | | - __ load_parameter(2, new_val); |
151 | | - |
152 | | - __ bind(runtime); |
| 138 | +#define __ sasm-> |
153 | 139 |
|
154 | | - // Push integer registers x7, x10-x17, x28-x31. |
155 | | - // t2, a0-a7, t3-t6 |
| 140 | +void MMTkBarrierSetAssembler::generate_c1_runtime_stub_general(StubAssembler* sasm, const char* name, address entry_point, int argc) { |
| 141 | + __ prologue(name, false); |
156 | 142 | __ push_call_clobbered_registers(); |
157 | 143 |
|
158 | | - if (mmtk_enable_barrier_fastpath) { |
159 | | - __ call_VM_leaf(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_slow_call), src, slot, new_val); |
160 | | - } else { |
161 | | - __ call_VM_leaf(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_post_call), src, slot, new_val); |
| 144 | + if (argc > 0) __ load_parameter(0, c_rarg0); |
| 145 | + if (argc > 1) __ load_parameter(1, c_rarg1); |
| 146 | + if (argc > 2) __ load_parameter(2, c_rarg2); |
| 147 | + if (argc > 3) { |
| 148 | + guarantee(false, "Too many args"); |
162 | 149 | } |
163 | 150 |
|
| 151 | + __ call_VM_leaf(entry_point, 3); |
| 152 | + |
164 | 153 | __ pop_call_clobbered_registers(); |
| 154 | + __ epilogue(); |
| 155 | +} |
165 | 156 |
|
166 | | - __ bind(done); |
| 157 | +// void foo(){ |
| 158 | +// Label done, runtime; |
167 | 159 |
|
168 | | - __ epilogue(); |
| 160 | +// // void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) |
| 161 | +// // ld(reg, Address(fp, offset_in_words * BytesPerWord)); |
| 162 | +// // ra is free to use here, because call prologue/epilogue handles it |
| 163 | +// // Zheyuan: Code works by swaping rscratch2 and rscratch1, and I dont know why |
| 164 | +// const Register src = rscratch2; |
| 165 | +// const Register slot = rscratch1; |
| 166 | +// const Register new_val = lr; |
| 167 | +// __ load_parameter(0, src); |
| 168 | +// __ load_parameter(1, slot); |
| 169 | +// __ load_parameter(2, new_val); |
| 170 | + |
| 171 | +// __ bind(runtime); |
| 172 | + |
| 173 | +// // Push integer registers x7, x10-x17, x28-x31. |
| 174 | +// // t2, a0-a7, t3-t6 |
| 175 | +// __ push_call_clobbered_registers(); |
| 176 | + |
| 177 | +// if (mmtk_enable_barrier_fastpath) { |
| 178 | +// __ call_VM_leaf(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_slow_call), src, slot, new_val); |
| 179 | +// } else { |
| 180 | +// __ call_VM_leaf(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_post_call), src, slot, new_val); |
| 181 | +// } |
| 182 | + |
| 183 | +// __ pop_call_clobbered_registers(); |
| 184 | + |
| 185 | +// __ bind(done); |
| 186 | + |
| 187 | +// __ epilogue(); |
| 188 | +// } |
| 189 | + |
| 190 | +void MMTkBarrierSetAssembler::generate_c1_load_reference_runtime_stub(StubAssembler* sasm) { |
| 191 | + generate_c1_runtime_stub_general(sasm, "c1_load_reference_runtime_stub", FN_ADDR(MMTkBarrierSetRuntime::load_reference_call), 1); |
| 192 | +} |
| 193 | + |
| 194 | +void MMTkBarrierSetAssembler::generate_c1_object_reference_write_pre_runtime_stub(StubAssembler* sasm) { |
| 195 | + generate_c1_runtime_stub_general(sasm, "c1_object_reference_write_pre_stub", FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_pre_call), 3); |
| 196 | +} |
| 197 | + |
| 198 | +void MMTkBarrierSetAssembler::generate_c1_object_reference_write_post_runtime_stub(StubAssembler* sasm) { |
| 199 | + generate_c1_runtime_stub_general(sasm, "c1_object_reference_write_post_stub", FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_post_call), 3); |
| 200 | +} |
| 201 | + |
| 202 | +void MMTkBarrierSetAssembler::generate_c1_object_reference_write_slow_runtime_stub(StubAssembler* sasm) { |
| 203 | + generate_c1_runtime_stub_general(sasm, "c1_object_reference_write_slow_stub", FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_slow_call), 3); |
169 | 204 | } |
170 | 205 |
|
171 | 206 | #undef __ |
172 | 207 |
|
| 208 | +// Generate code stubs |
| 209 | + |
173 | 210 | #define __ ce->masm()-> |
174 | 211 |
|
175 | | -void MMTkBarrierSetAssembler::generate_c1_write_barrier_stub_call(LIR_Assembler* ce, MMTkC1BarrierStub* stub) { |
176 | | - // printf("xxx MMTkBarrierSetAssembler::generate_c1_write_barrier_stub_call\n"); |
177 | | - // See also void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) |
178 | | - MMTkBarrierSetC1* bs = (MMTkBarrierSetC1*) BarrierSet::barrier_set()->barrier_set_c1(); |
| 212 | +void MMTkBarrierSetAssembler::generate_c1_ref_load_barrier_stub_call(LIR_Assembler* ce, MMTkC1ReferenceLoadBarrierStub* stub) { |
| 213 | + MMTkBarrierSetC1* bs = (MMTkBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1(); |
| 214 | + |
179 | 215 | __ bind(*stub->entry()); |
180 | | - assert(stub->src->is_register(), "Precondition"); |
181 | | - assert(stub->slot->is_register(), "Precondition"); |
182 | | - assert(stub->new_val->is_register(), "Precondition"); |
183 | | - ce->store_parameter(stub->src->as_pointer_register(), 0); |
184 | | - ce->store_parameter(stub->slot->as_pointer_register(), 1); |
185 | | - ce->store_parameter(stub->new_val->as_pointer_register(), 2); |
186 | | - __ far_call(RuntimeAddress(bs->_write_barrier_c1_runtime_code_blob->code_begin())); |
| 216 | + assert(stub->val->is_register(), "Precondition."); |
| 217 | + |
| 218 | + Register val_reg = stub->val->as_register(); |
| 219 | + |
| 220 | + __ cbz(val_reg, *stub->continuation()); |
| 221 | + ce->store_parameter(stub->val->as_register(), 0); |
| 222 | + __ far_call(RuntimeAddress(bs->load_reference_c1_runtime_code_blob()->code_begin())); |
187 | 223 | __ b(*stub->continuation()); |
188 | 224 | } |
189 | 225 |
|
|
0 commit comments