Refactoring: Make predictable code flag handling architecture-independent.

Review URL: https://codereview.chromium.org/11359127

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12920 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 92c6fe5c
......@@ -326,8 +326,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
......
......@@ -652,11 +652,6 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
......@@ -1185,8 +1180,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
bool predictable_code_size() const { return predictable_code_size_; }
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
#ifdef USE_BLX
......@@ -1499,7 +1492,6 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
friend class EnsureSpace;
......@@ -1514,26 +1506,6 @@ class EnsureSpace BASE_EMBEDDED {
};
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(Assembler* assembler)
: asm_(assembler) {
old_value_ = assembler->predictable_code_size();
assembler->set_predictable_code_size(true);
}
~PredictableCodeSizeScope() {
if (!old_value_) {
asm_->set_predictable_code_size(false);
}
}
private:
Assembler* asm_;
bool old_value_;
};
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_
......@@ -108,7 +108,8 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
AssemblerBase::AssemblerBase(Isolate* isolate)
: isolate_(isolate),
jit_cookie_(0) {
jit_cookie_(0),
predictable_code_size_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
......
......@@ -59,7 +59,10 @@ class AssemblerBase: public Malloced {
explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
int jit_cookie() { return jit_cookie_; }
int jit_cookie() const { return jit_cookie_; }
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
......@@ -68,6 +71,27 @@ class AssemblerBase: public Malloced {
private:
Isolate* isolate_;
int jit_cookie_;
bool predictable_code_size_;
};
// Avoids using instructions that vary in size in unpredictable ways between the
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(AssemblerBase* assembler)
: assembler_(assembler) {
old_value_ = assembler_->predictable_code_size();
assembler_->set_predictable_code_size(true);
}
~PredictableCodeSizeScope() {
assembler_->set_predictable_code_size(old_value_);
}
private:
AssemblerBase* assembler_;
bool old_value_;
};
......
......@@ -587,11 +587,6 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
......@@ -1126,7 +1121,6 @@ class Assembler : public AssemblerBase {
protected:
bool emit_debug_code() const { return emit_debug_code_; }
bool predictable_code_size() const { return predictable_code_size_ ; }
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
......@@ -1202,7 +1196,6 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
};
......
......@@ -350,8 +350,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
code_targets_(100),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
......@@ -1238,13 +1237,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
// Determine whether we can use 1-byte offsets for backwards branches,
// which have a max range of 128 bytes.
// We also need to check the predictable_code_size_ flag here, because
// on x64, when the full code generator recompiles code for debugging, some
// places need to be padded out to a certain size. The debugger is keeping
// track of how often it did this so that it can adjust return addresses on
// the stack, but if the size of jump instructions can also change, that's
// not enough and the calculated offsets would be incorrect.
if (is_int8(offs - short_size) && !predictable_code_size_) {
// We also need to check predictable_code_size() flag here, because on x64,
// when the full code generator recompiles code for debugging, some places
// need to be padded out to a certain size. The debugger is keeping track of
// how often it did this so that it can adjust return addresses on the
// stack, but if the size of jump instructions can also change, that's not
// enough and the calculated offsets would be incorrect.
if (is_int8(offs - short_size) && !predictable_code_size()) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
......@@ -1301,7 +1300,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
if (is_int8(offs - short_size) && !predictable_code_size_) {
if (is_int8(offs - short_size) && !predictable_code_size()) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
......
......@@ -561,11 +561,6 @@ class Assembler : public AssemblerBase {
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
......@@ -1451,7 +1446,6 @@ class Assembler : public AssemblerBase {
protected:
bool emit_debug_code() const { return emit_debug_code_; }
bool predictable_code_size() const { return predictable_code_size_; }
private:
byte* addr_at(int pos) { return buffer_ + pos; }
......@@ -1656,7 +1650,6 @@ class Assembler : public AssemblerBase {
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment