diff --git a/src/compiler/byte_gen.cc b/src/compiler/byte_gen.cc index f6741f818..a1e9f6ae1 100644 --- a/src/compiler/byte_gen.cc +++ b/src/compiler/byte_gen.cc @@ -593,7 +593,7 @@ void ByteGen::_generate_call(Call* node, if (node->range().is_valid()) { int bytecode_position = emitter()->position(); - method_mapper_.register_call(bytecode_position, node->range()); + method_mapper_.register_call(node, bytecode_position); } if (is_for_effect()) __ pop(1); @@ -811,9 +811,7 @@ void ByteGen::visit_Typecheck(Typecheck* node) { int height = local_height(target->as_Local()->index()); bytecode_position = __ typecheck_local(height, typecheck_index); } - method_mapper_.register_as_check(bytecode_position, - node->range(), - node->type_name().c_str()); + method_mapper_.register_as_check(node, bytecode_position); return; } @@ -828,9 +826,7 @@ void ByteGen::visit_Typecheck(Typecheck* node) { if (is_as_check) { int bytecode_position = emitter()->position(); - method_mapper_.register_as_check(bytecode_position, - node->range(), - node->type_name().c_str()); + method_mapper_.register_as_check(node, bytecode_position); } if (is_for_effect()) __ pop(1); } @@ -974,7 +970,7 @@ void ByteGen::visit_ReferenceGlobal(ReferenceGlobal* node) { __ load_global_var(node->target()->global_id(), is_lazy); int bytecode_position = emitter()->position(); - method_mapper_.register_call(bytecode_position, node->range()); + method_mapper_.register_call(node, bytecode_position); if (is_for_effect()) __ pop(1); } diff --git a/src/compiler/compiler.cc b/src/compiler/compiler.cc index a8091f762..0052eef5c 100644 --- a/src/compiler/compiler.cc +++ b/src/compiler/compiler.cc @@ -1504,6 +1504,7 @@ static void check_sdk(const std::string& constraint, Diagnostics* diagnostics) { toit::Program* construct_program(ir::Program* ir_program, SourceMapper* source_mapper, + TypeOracle* oracle, TypeDatabase* propagated_types, bool run_optimizations) { source_mapper->register_selectors(ir_program->classes()); @@ -1515,9 +1516,21 @@ toit::Program* construct_program(ir::Program* ir_program, ASSERT(_sorted_by_inheritance(ir_program->classes())); - if (run_optimizations) optimize(ir_program, propagated_types); + if (run_optimizations) optimize(ir_program, oracle); tree_shake(ir_program); + // It is important that we seed and finalize the oracle in the same + // state, so the IR nodes used to produce the somewhat unoptimized + // program that we propagate types through can be matched up to the + // corresponding IR nodes for the fully optimized version. + if (propagated_types) { + oracle->finalize(ir_program, propagated_types); + optimize(ir_program, oracle); + tree_shake(ir_program); + } else { + oracle->seed(ir_program); + } + // We assign the field ids very late in case we can inline field-accesses. assign_field_indexes(ir_program->classes()); // Similarly, assign the global ids at the end, in case they can be tree @@ -1606,7 +1619,8 @@ Pipeline::Result Pipeline::run(List source_paths, bool propagate) { SourceMapper unoptimized_source_mapper(source_manager()); auto source_mapper = &unoptimized_source_mapper; - auto program = construct_program(ir_program, source_mapper, null, run_optimizations); + TypeOracle oracle(source_mapper); + auto program = construct_program(ir_program, source_mapper, &oracle, null, run_optimizations); SourceMapper optimized_source_mapper(source_manager()); if (run_optimizations && configuration_.optimization_level >= 2) { @@ -1621,14 +1635,14 @@ Pipeline::Result Pipeline::run(List source_paths, bool propagate) { // to behave the same way for the output to be correct. check_types_and_deprecations(ir_program, quiet); ASSERT(!diagnostics()->encountered_error()); - TypeDatabase* types = TypeDatabase::compute(program, source_mapper); + TypeDatabase* types = TypeDatabase::compute(program); source_mapper = &optimized_source_mapper; - program = construct_program(ir_program, source_mapper, types, true); + program = construct_program(ir_program, source_mapper, &oracle, types, true); delete types; } if (propagate) { - TypeDatabase* types = TypeDatabase::compute(program, null); + TypeDatabase* types = TypeDatabase::compute(program); auto json = types->as_json(); printf("%s", json.c_str()); delete types; diff --git a/src/compiler/optimizations/dead_code.cc b/src/compiler/optimizations/dead_code.cc index fa04025cf..de45fd41a 100644 --- a/src/compiler/optimizations/dead_code.cc +++ b/src/compiler/optimizations/dead_code.cc @@ -105,8 +105,8 @@ class DeadCodeEliminator : public ReturningVisitor { } }; - DeadCodeEliminator(TypeDatabase* propagated_types) - : propagated_types_(propagated_types) + explicit DeadCodeEliminator(TypeOracle* oracle) + : oracle_(oracle) , terminator_(null, Symbol::invalid()) {} Expression* visit(Expression* node, bool* terminates) { @@ -254,9 +254,7 @@ class DeadCodeEliminator : public ReturningVisitor { Node* visit_ReferenceGlobal(ReferenceGlobal* node) { Global* global = node->target(); - if (global->is_dead()) { - return is_for_effect() ? terminate(null) : terminate(_new Nop(node->range())); - } + if (global->is_dead()) return terminate(null); return (global->is_lazy() || is_for_value()) ? node : null; } @@ -304,8 +302,7 @@ class DeadCodeEliminator : public ReturningVisitor { int used = 0; while (used < length && !terminates) { Expression* result = visit_for_value(arguments[used], &terminates); - arguments[used] = result; - used++; + if (result) arguments[used++] = result; } Expression* result = node; @@ -321,11 +318,11 @@ class DeadCodeEliminator : public ReturningVisitor { } result = _new Sequence(arguments.sublist(0, used), node->range()); ASSERT(terminates); - } else if (propagated_types_ != null && !node->is_CallBuiltin()) { + } else if (oracle_ != null && !node->is_CallBuiltin()) { // If we have propagated type information, we might know that // this call does not return. If so, we make sure to tag the // result correctly, so we drop code that follows the call. - terminates = propagated_types_->does_not_return(node); + terminates = oracle_->does_not_return(node); } return tag(result, terminates); } @@ -350,7 +347,7 @@ class DeadCodeEliminator : public ReturningVisitor { if (terminates) break; } if (index == 0) { - return is_for_effect() ? terminate(null) : terminate(_new Nop(node->range())); + return terminate(null); } else { return terminate(_new Sequence(arguments.sublist(0, index), node->range())); } @@ -428,7 +425,7 @@ class DeadCodeEliminator : public ReturningVisitor { Node* visit_FieldStub(FieldStub* node) { return visit_Method(node); } private: - TypeDatabase* propagated_types_; + TypeOracle* const oracle_; bool is_for_value_ = false; bool is_for_value() const { return is_for_value_; } @@ -450,8 +447,8 @@ class DeadCodeEliminator : public ReturningVisitor { } }; -void eliminate_dead_code(Method* method, TypeDatabase* propagated_types) { - DeadCodeEliminator eliminator(propagated_types); +void eliminate_dead_code(Method* method, TypeOracle* oracle) { + DeadCodeEliminator eliminator(oracle); Expression* body = method->body(); if (body == null) return; diff --git a/src/compiler/optimizations/dead_code.h b/src/compiler/optimizations/dead_code.h index 30f4b32d5..3c4b73ad8 100644 --- a/src/compiler/optimizations/dead_code.h +++ b/src/compiler/optimizations/dead_code.h @@ -21,7 +21,7 @@ namespace toit { namespace compiler { -void eliminate_dead_code(ir::Method* method, TypeDatabase* propagated_types); +void eliminate_dead_code(ir::Method* method, TypeOracle* oracle); } // namespace toit::compiler } // namespace toit diff --git a/src/compiler/optimizations/optimizations.cc b/src/compiler/optimizations/optimizations.cc index 7cbbbe031..e0761cc48 100644 --- a/src/compiler/optimizations/optimizations.cc +++ b/src/compiler/optimizations/optimizations.cc @@ -33,34 +33,27 @@ using namespace ir; class KillerVisitor : public TraversingVisitor { public: - KillerVisitor(TypeDatabase* propagated_types) - : propagated_types_(propagated_types) {} + explicit KillerVisitor(TypeOracle* oracle) + : oracle_(oracle) {} void visit_Method(Method* node) { TraversingVisitor::visit_Method(node); - if (propagated_types_ && propagated_types_->is_dead(node)) { - node->kill(); - } + if (oracle_->is_dead(node)) node->kill(); } void visit_Code(Code* node) { TraversingVisitor::visit_Code(node); - if (propagated_types_ && propagated_types_->is_dead(node)) { - node->kill(); - } + if (oracle_->is_dead(node)) node->kill(); } void visit_Global(Global* node) { TraversingVisitor::visit_Method(node); mark_if_eager(node); - if (!node->is_lazy()) return; - if (propagated_types_ && propagated_types_->is_dead(node)) { - node->kill(); - } + if (node->is_lazy() && oracle_->is_dead(node)) node->kill(); } private: - TypeDatabase* const propagated_types_; + TypeOracle* const oracle_; void mark_if_eager(Global* global) { // This runs after the constant propagation phase, so it is @@ -83,10 +76,10 @@ class KillerVisitor : public TraversingVisitor { class OptimizationVisitor : public ReplacingVisitor { public: - OptimizationVisitor(TypeDatabase* propagated_types, + OptimizationVisitor(TypeOracle* oracle, const UnorderedMap queryables, const UnorderedSet& field_names) - : propagated_types_(propagated_types) + : oracle_(oracle) , holder_(null) , method_(null) , queryables_(queryables) @@ -95,9 +88,9 @@ class OptimizationVisitor : public ReplacingVisitor { Node* visit_Method(Method* node) { if (node->is_dead()) return node; method_ = node; - eliminate_dead_code(node, null); + eliminate_dead_code(node, oracle_); Node* result = ReplacingVisitor::visit_Method(node); - eliminate_dead_code(node, propagated_types_); + eliminate_dead_code(node, oracle_); method_ = null; return result; } @@ -134,7 +127,7 @@ class OptimizationVisitor : public ReplacingVisitor { void set_class(Class* klass) { holder_ = klass; } private: - TypeDatabase* const propagated_types_; + TypeOracle* const oracle_; Class* holder_; // Null, if not in class (or a static method/field). Method* method_; @@ -142,11 +135,11 @@ class OptimizationVisitor : public ReplacingVisitor { UnorderedSet field_names_; }; -void optimize(Program* program, TypeDatabase* propagated_types) { +void optimize(Program* program, TypeOracle* oracle) { // The constant propagation runs independently, as it builds up its own // dependency graph. propagate_constants(program); - KillerVisitor killer(propagated_types); + KillerVisitor killer(oracle); killer.visit(program); auto classes = program->classes(); @@ -176,7 +169,7 @@ void optimize(Program* program, TypeDatabase* propagated_types) { } } - OptimizationVisitor visitor(propagated_types, queryables, field_names); + OptimizationVisitor visitor(oracle, queryables, field_names); for (auto klass : classes) { visitor.set_class(klass); diff --git a/src/compiler/optimizations/optimizations.h b/src/compiler/optimizations/optimizations.h index 5ac0631f8..20cf53988 100644 --- a/src/compiler/optimizations/optimizations.h +++ b/src/compiler/optimizations/optimizations.h @@ -22,7 +22,7 @@ namespace toit { namespace compiler { // Optimizes the program by combining all available sub-optimizations. -void optimize(ir::Program* program, TypeDatabase* propagated_types); +void optimize(ir::Program* program, TypeOracle* oracle); } // namespace toit::compiler } // namespace toit diff --git a/src/compiler/propagation/type_database.cc b/src/compiler/propagation/type_database.cc index 3f4098a80..88bc1b5a1 100644 --- a/src/compiler/propagation/type_database.cc +++ b/src/compiler/propagation/type_database.cc @@ -31,9 +31,8 @@ static int opcode_length[] { BYTECODES(BYTECODE_LENGTH) -1 }; static const int TYPES_BLOCK_SIZE = 1024; std::unordered_map TypeDatabase::cache_; -TypeDatabase::TypeDatabase(Program* program, SourceMapper* source_mapper, int words_per_type) +TypeDatabase::TypeDatabase(Program* program, int words_per_type) : program_(program) - , source_mapper_(source_mapper) , words_per_type_(words_per_type) { add_types_block(); } @@ -120,14 +119,14 @@ void TypeDatabase::check_method_entry(Method method, Object** sp) const { } } -TypeDatabase* TypeDatabase::compute(Program* program, SourceMapper* source_mapper) { +TypeDatabase* TypeDatabase::compute(Program* program) { auto probe = cache_.find(program); if (probe != cache_.end()) return probe->second; AllowThrowingNew allow; uint64 start = OS::get_monotonic_time(); TypePropagator propagator(program); - TypeDatabase* types = new TypeDatabase(program, source_mapper, propagator.words_per_type()); + TypeDatabase* types = new TypeDatabase(program, propagator.words_per_type()); propagator.propagate(types); uint64 elapsed = OS::get_monotonic_time() - start; if (false) { @@ -169,32 +168,21 @@ const TypeSet TypeDatabase::usage(int position) const { } } -bool TypeDatabase::is_dead(ir::Method* method) const { - if (method->is_IsInterfaceStub()) return false; - int id = source_mapper_->id_for_method(method); - if (id < 0) return true; - auto probe = methods_.find(id); - return probe == methods_.end(); -} - -bool TypeDatabase::is_dead(ir::Code* code) const { - int id = source_mapper_->id_for_code(code); - if (id < 0) return true; - auto probe = methods_.find(id); +bool TypeDatabase::is_dead_method(int position) const { + if (position < 0) return true; + auto probe = methods_.find(position); return probe == methods_.end(); } -bool TypeDatabase::is_dead(ir::Call* call) const { - int id = source_mapper_->id_for_call(call); - if (id < 0) return true; - auto probe = returns_.find(id); - return (probe == returns_.end()); +bool TypeDatabase::is_dead_call(int position) const { + if (position < 0) return true; + auto probe = returns_.find(position); + return probe == returns_.end(); } -bool TypeDatabase::does_not_return(ir::Call* call) const { - int id = source_mapper_->id_for_call(call); - if (id < 0) return true; - auto probe = returns_.find(id); +bool TypeDatabase::does_not_return(int position) const { + if (position < 0) return true; + auto probe = returns_.find(position); if (probe == returns_.end()) return true; TypeSet type = probe->second; return type.is_empty(words_per_type_); @@ -280,5 +268,91 @@ TypeStack* TypeDatabase::add_types_block() { return stack; } +class TypeOraclePopulator : public ir::TraversingVisitor { + public: + explicit TypeOraclePopulator(TypeOracle* oracle) + : oracle_(oracle) {} + + void visit_Method(ir::Method* node) { + ir::TraversingVisitor::visit_Method(node); + oracle_->add(node); + } + + void visit_Code(ir::Code* node) { + ir::TraversingVisitor::visit_Code(node); + oracle_->add(node); + } + + void visit_Call(ir::Call* node) { + ir::TraversingVisitor::visit_Call(node); + oracle_->add(node); + } + + private: + TypeOracle* const oracle_; +}; + +void TypeOracle::seed(ir::Program* program) { + ASSERT(types_ == null); + TypeOraclePopulator populator(this); + program->accept(&populator); +} + +void TypeOracle::finalize(ir::Program* program, TypeDatabase* types) { + types_ = types; + TypeOraclePopulator populator(this); + program->accept(&populator); + ASSERT(nodes_.size() == map_.size()); +} + +void TypeOracle::add(ir::Node* node) { + if (types_ == null) { + nodes_.push_back(node); + } else { + int index = map_.size(); + ir::Node* existing = nodes_[index]; + map_[node] = existing; + ASSERT(strcmp(node->node_type(), existing->node_type()) == 0); + ASSERT(!node->is_Method() || node->as_Method()->range() == existing->as_Method()->range()); + ASSERT(!node->is_Expression() || node->as_Expression()->range() == existing->as_Expression()->range()); + } +} + +ir::Node* TypeOracle::lookup(ir::Node* node) const { + if (types_ == null) return null; + auto probe = map_.find(node); + if (probe == map_.end()) return null; + return probe->second; +} + +bool TypeOracle::is_dead(ir::Method* method) const { + if (method->is_IsInterfaceStub()) return false; + auto probe = lookup(method); + if (!probe) return false; + int position = source_mapper_->position_for_method(probe->as_Method()); + return types_->is_dead_method(position); +} + +bool TypeOracle::is_dead(ir::Code* code) const { + auto probe = lookup(code); + if (!probe) return false; + int position = source_mapper_->position_for_method(probe->as_Code()); + return types_->is_dead_method(position); +} + +bool TypeOracle::is_dead(ir::Call* call) const { + auto probe = lookup(call); + if (!probe) return false; + int position = source_mapper_->position_for_expression(probe->as_Call()); + return types_->is_dead_call(position); +} + +bool TypeOracle::does_not_return(ir::Call* call) const { + auto probe = lookup(call); + if (!probe) return false; + int position = source_mapper_->position_for_expression(probe->as_Call()); + return types_->does_not_return(position); +} + } // namespace toit::compiler } // namespace toit diff --git a/src/compiler/propagation/type_database.h b/src/compiler/propagation/type_database.h index 2a7fdd779..21e2040b7 100644 --- a/src/compiler/propagation/type_database.h +++ b/src/compiler/propagation/type_database.h @@ -35,7 +35,7 @@ class SourceMapper; class TypeDatabase { public: - static TypeDatabase* compute(Program* program, SourceMapper* source_mapper); + static TypeDatabase* compute(Program* program); ~TypeDatabase(); const std::vector methods() const; @@ -46,10 +46,9 @@ class TypeDatabase { std::string as_json() const; // Helpers for optimization phase. - bool is_dead(ir::Method* method) const; - bool is_dead(ir::Code* code) const; - bool is_dead(ir::Call* call) const; - bool does_not_return(ir::Call* call) const; + bool is_dead_method(int position) const; + bool is_dead_call(int position) const; + bool does_not_return(int position) const; // Helpers for type checking interpreter variant. void check_top(uint8* bcp, Object* top) const; @@ -58,8 +57,6 @@ class TypeDatabase { private: Program* const program_; - SourceMapper* const source_mapper_; - const int words_per_type_; std::vector types_; @@ -69,7 +66,7 @@ class TypeDatabase { static std::unordered_map cache_; - TypeDatabase(Program* program, SourceMapper* source_mapper, int words_per_type); + TypeDatabase(Program* program, int words_per_type); void add_method(Method method); void add_argument(Method method, int n, const TypeSet type); @@ -81,5 +78,32 @@ class TypeDatabase { friend class TypePropagator; }; +class TypeOracle { + public: + explicit TypeOracle(SourceMapper* source_mapper) + : source_mapper_(source_mapper) {} + + void seed(ir::Program* program); + void finalize(ir::Program* program, TypeDatabase* types); + + // Helpers for optimization phase. + bool is_dead(ir::Method* method) const; + bool is_dead(ir::Code* code) const; + bool is_dead(ir::Call* call) const; + bool does_not_return(ir::Call* call) const; + + private: + SourceMapper* const source_mapper_; + TypeDatabase* types_ = null; + + std::vector nodes_; + std::unordered_map map_; + + void add(ir::Node* node); + ir::Node* lookup(ir::Node* node) const; + + friend class TypeOraclePopulator; +}; + } // namespace toit::compiler } // namespace toit diff --git a/src/compiler/source_mapper.cc b/src/compiler/source_mapper.cc index b1a89b0c8..8bad1944d 100644 --- a/src/compiler/source_mapper.cc +++ b/src/compiler/source_mapper.cc @@ -130,8 +130,20 @@ class SourceInfoEmitter: public SourceInfoCollector { } }; +void SourceMapper::MethodMapper::register_call(ir::Call* call, int bytecode_offset) { + source_mapper()->register_expression(call, method_index_, bytecode_offset); +} + +void SourceMapper::MethodMapper::register_call(ir::ReferenceGlobal* call, int bytecode_offset) { + source_mapper()->register_expression(call, method_index_, bytecode_offset); +} + +void SourceMapper::MethodMapper::register_as_check(ir::Typecheck* check, int bytecode_offset) { + source_mapper()->register_expression(check, method_index_, bytecode_offset); + source_mapper()->register_as_check(check, method_index_, bytecode_offset); +} + void SourceMapper::visit_selectors(SourceInfoCollector* collector) { - // For now just write unique collector->write_int(selectors_.size()); for (auto location_id : selectors_.keys()) { collector->write_int(location_id); @@ -288,25 +300,16 @@ uint8* SourceMapper::cook(int* size) { } -SourceMapper::MethodEntry SourceMapper::build_method_entry(int index, +SourceMapper::MethodEntry SourceMapper::build_method_entry(ir::Node* node, + int index, MethodType type, int outer, const char* name, const char* holder_name, Source::Range range) { - auto from = range.from(); - - // TODO(kasper): We end up registering multiple different methods with the same - // source position because of adapter stubs, etc. Things work out okay if we - // prefer the first method because these aren't the synthetic stubs that the - // compiler inserts. We need to base this on something unique instead of the - // source positions that we tend to reuse. - auto probe = method_positions_.find(from.token()); - if (probe == method_positions_.end()) { - method_positions_[from.token()] = index; - } - - auto location = manager_->compute_location(from); + ASSERT(!method_indexes_.contains_key(node)); + method_indexes_[node] = index; + auto location = manager_->compute_location(range.from()); return { .index = index, .id = -1, // Set to -1, and must be updated later. @@ -390,24 +393,17 @@ void SourceMapper::add_global_entry(ir::Global* global) { }); } -int SourceMapper::id_for_method(ir::Method* method) { - auto probe = method_positions_.find(method->range().from().token()); - if (probe == method_positions_.end()) return -1; +int SourceMapper::position_for_method(ir::Node* node) const { + auto probe = method_indexes_.find(node); + if (probe == method_indexes_.end()) return -1; auto& method_data = source_information_[probe->second]; return method_data.id; } -int SourceMapper::id_for_code(ir::Code* code) { - auto probe = method_positions_.find(code->range().from().token()); - if (probe == method_positions_.end()) return -1; - auto& method_data = source_information_[probe->second]; - return method_data.id; -} - -int SourceMapper::id_for_call(ir::Call* call) { - auto probe = bytecode_positions_.find(call->range().from().token()); - if (probe == bytecode_positions_.end()) return -1; - std::pair& entry = probe->second; +int SourceMapper::position_for_expression(ir::Expression* expression) const { + auto probe = expression_positions_.find(expression); + if (probe == expression_positions_.end()) return -1; + const std::pair& entry = probe->second; int method_index = entry.first; int bytecode_offset = entry.second; auto& method_data = source_information_[method_index]; @@ -446,7 +442,7 @@ SourceMapper::MethodMapper SourceMapper::register_method(ir::Method* method) { int holder_id; const char* holder_name; extract_holder_information(method->holder(), &holder_id, &holder_name); - source_information_.push_back(build_method_entry(index, type, holder_id, name, holder_name, range)); + source_information_.push_back(build_method_entry(method, index, type, holder_id, name, holder_name, range)); return MethodMapper(this, index); } @@ -459,7 +455,7 @@ SourceMapper::MethodMapper SourceMapper::register_global(ir::Global* global) { int holder_id; const char* holder_name; extract_holder_information(global->holder(), &holder_id, &holder_name); - source_information_.push_back(build_method_entry(index, MethodType::GLOBAL, holder_id, name, holder_name, range)); + source_information_.push_back(build_method_entry(global, index, MethodType::GLOBAL, holder_id, name, holder_name, range)); return MethodMapper(this, index); } @@ -468,7 +464,7 @@ SourceMapper::MethodMapper SourceMapper::register_lambda(int outer_index, ir::Co auto name = ""; auto range = code->range(); int encoded_outer = encode_outer_index(outer_index); - source_information_.push_back(build_method_entry(index, MethodType::LAMBDA, encoded_outer, name, "", range)); + source_information_.push_back(build_method_entry(code, index, MethodType::LAMBDA, encoded_outer, name, "", range)); return MethodMapper(this, index); } @@ -477,37 +473,27 @@ SourceMapper::MethodMapper SourceMapper::register_block(int outer_index, ir::Cod auto name = ""; auto range = code->range(); int encoded_outer = encode_outer_index(outer_index); - source_information_.push_back(build_method_entry(index, MethodType::BLOCK, encoded_outer, name, "", range)); + source_information_.push_back(build_method_entry(code, index, MethodType::BLOCK, encoded_outer, name, "", range)); return MethodMapper(this, index); } -void SourceMapper::register_bytecode(int method_index, int bytecode_offset, Source::Range range) { +void SourceMapper::register_expression(ir::Expression* expression, int method_index, int bytecode_offset) { + ASSERT(!expression_positions_.contains_key(expression)); + expression_positions_[expression] = std::pair(method_index, bytecode_offset); ASSERT(method_index >= 0); - auto from = range.from(); - - // TODO(kasper): We end up registering multiple different bytecodes with the same - // source position. This is not ideal, but things work out okay if we prefer to - // keep the information for the first bytecode in the first method. This should - // be reworked and depend on something that is unique -- unlike source positions. - auto probe = bytecode_positions_.find(from.token()); - if (probe == bytecode_positions_.end() || - ((probe->second).first > method_index) || - ((probe->second).first == method_index && (probe->second).second > bytecode_offset)) { - bytecode_positions_[from.token()] = std::pair(method_index, bytecode_offset); - } - auto& method_data = source_information_[method_index]; - auto location = manager_->compute_location(from); + auto range = expression->range(); + auto location = manager_->compute_location(range.from()); method_data.bytecode_positions[bytecode_offset] = { .line = location.line_number, .column = location.offset_in_line + 1, // Offsets are 0-based, but columns are 1-based. }; } -void SourceMapper::register_as(int method_index, int bytecode_offset, const char* class_name) { +void SourceMapper::register_as_check(ir::Typecheck* check, int method_index, int bytecode_offset) { ASSERT(method_index >= 0); auto& method_data = source_information_[method_index]; - method_data.as_class_names[bytecode_offset] = class_name; + method_data.as_class_names[bytecode_offset] = check->type_name().c_str(); } void SourceMapper::extract_holder_information(ir::Class* holder, diff --git a/src/compiler/source_mapper.h b/src/compiler/source_mapper.h index b227239fb..54c944862 100644 --- a/src/compiler/source_mapper.h +++ b/src/compiler/source_mapper.h @@ -26,11 +26,15 @@ namespace toit { namespace compiler { namespace ir { +class Call; +class Class; +class Code; +class Expression; class Global; class Method; -class Code; -class Class; -class Call; +class Node; +class ReferenceGlobal; +class Typecheck; } // namespace toit::compiler::ir class SourceInfoCollector; @@ -51,38 +55,27 @@ class SourceMapper { bool is_valid() const { return source_mapper_ != null; } - void register_call(int bytecode_offset, Source::Range range) { - ASSERT(is_valid()); - ASSERT(!is_finalized_); - source_mapper_->register_bytecode(method_index_, bytecode_offset, range); + void register_call(ir::Call* call, int bytecode_offset); + void register_call(ir::ReferenceGlobal* call, int bytecode_offset); + void register_as_check(ir::Typecheck* check, int bytecode_offset); + + MethodMapper register_lambda(ir::Code* code) { + return source_mapper()->register_lambda(method_index_, code); } - void register_as_check(int bytecode_offset, Source::Range range, const char* class_name) { - ASSERT(is_valid()); - ASSERT(!is_finalized_); - source_mapper_->register_bytecode(method_index_, bytecode_offset, range); - source_mapper_->register_as(method_index_, bytecode_offset, class_name); + MethodMapper register_block(ir::Code* code) { + return source_mapper()->register_block(method_index_, code); } void finalize(int method_id, int size) { - ASSERT(is_valid()); - ASSERT(!is_finalized_); + SourceMapper* mapper = source_mapper(); is_finalized_ = true; ASSERT(method_id >= 0); ASSERT(size >= 0); - ASSERT(source_mapper_->source_information_[method_index_].id == -1); - source_mapper_->source_information_[method_index_].id = method_id; - ASSERT(source_mapper_->source_information_[method_index_].bytecode_size == -1); - source_mapper_->source_information_[method_index_].bytecode_size = size; - } - - MethodMapper register_lambda(ir::Code* code) { - ASSERT(is_valid()); - return source_mapper_->register_lambda(method_index_, code); - } - MethodMapper register_block(ir::Code* code) { - ASSERT(is_valid()); - return source_mapper_->register_block(method_index_, code); + ASSERT(mapper->source_information_[method_index_].id == -1); + mapper->source_information_[method_index_].id = method_id; + ASSERT(mapper->source_information_[method_index_].bytecode_size == -1); + mapper->source_information_[method_index_].bytecode_size = size; } private: @@ -93,6 +86,12 @@ class SourceMapper { SourceMapper* source_mapper_; int method_index_; bool is_finalized_ = false; + + SourceMapper* source_mapper() const { + ASSERT(is_valid()); + ASSERT(!is_finalized_); + return source_mapper_; + } }; explicit SourceMapper(SourceManager* manager) : manager_(manager) {} @@ -109,22 +108,21 @@ class SourceMapper { // introducing stub-methods. (At least as much as possible). void register_selectors(List classes); + void register_selector_offset(int offset, const char* name) { + selector_offsets_[offset] = name; + } + void add_class_entry(int id, ir::Class* klass); void add_global_entry(ir::Global* global); - int id_for_class(ir::Class* klass) { + int id_for_class(ir::Class* klass) const { auto probe = class_information_.find(klass); if (probe == class_information_.end()) return -1; return probe->second.id; } - int id_for_method(ir::Method* method); - int id_for_code(ir::Code* code); - int id_for_call(ir::Call* call); - - void register_selector_offset(int offset, const char* name) { - selector_offsets_[offset] = name; - } + int position_for_method(ir::Node* node) const; + int position_for_expression(ir::Expression* expression) const; private: struct FilePosition { @@ -200,14 +198,15 @@ class SourceMapper { void visit_selector_offset_info(SourceInfoCollector* collector); void visit_global_info(SourceInfoCollector* collector); - MethodEntry build_method_entry(int id, + MethodEntry build_method_entry(ir::Node* node, + int id, MethodType type, int outer, const char* name, const char* holder_name, Source::Range range); - void register_bytecode(int method_id, int bytecode_offset, Source::Range range); - void register_as(int method_id, int bytecode_offset, const char* class_name); + void register_expression(ir::Expression* expression, int method_id, int bytecode_offset); + void register_as_check(ir::Typecheck* check, int method_id, int bytecode_offset); std::vector source_information_; Map class_information_; @@ -216,10 +215,10 @@ class SourceMapper { // Map from location-id to selector class-entry. Map selectors_; - // Map from method source position to bytecode index. - Map method_positions_; - // Map from call source position to method index and bytecode offset. - Map> bytecode_positions_; + // Map from method or code to method index. + Map method_indexes_; + // Map from expressions to method index and bytecode offset. + Map> expression_positions_; void extract_holder_information(ir::Class* holder, int* holder_id, diff --git a/src/interpreter_run.cc b/src/interpreter_run.cc index dba912299..695755957 100644 --- a/src/interpreter_run.cc +++ b/src/interpreter_run.cc @@ -278,8 +278,7 @@ Interpreter::Result Interpreter::run() { // Interpretation state. Program* program = process_->program(); #ifdef TOIT_CHECK_PROPAGATED_TYPES - compiler::TypeDatabase* propagated_types = - compiler::TypeDatabase::compute(program, null); + compiler::TypeDatabase* propagated_types = compiler::TypeDatabase::compute(program); #endif preemption_method_header_bcp_ = null; uword index__ = 0; diff --git a/tests/type_propagation/gold/nlr_test.gold-O2 b/tests/type_propagation/gold/nlr_test.gold-O2 index 0dfb75660..9d1415117 100644 --- a/tests/type_propagation/gold/nlr_test.gold-O2 +++ b/tests/type_propagation/gold/nlr_test.gold-O2 @@ -20,7 +20,7 @@ always_return tests/type_propagation/nlr_test.toit 0[029] - load [block] in always_return tests/type_propagation/nlr_test.toit 5[038] - load block 0 7[053] - invoke static invoke tests/type_propagation/nlr_test.toit // {} - 10[040] - pop 2 + 10[041] - pop 1 [block] in always_return tests/type_propagation/nlr_test.toit - argument 0: [block] diff --git a/tests/type_propagation/gold/non_local_return_test.gold-O2 b/tests/type_propagation/gold/non_local_return_test.gold-O2 index 39f17350a..8c9c56980 100644 --- a/tests/type_propagation/gold/non_local_return_test.gold-O2 +++ b/tests/type_propagation/gold/non_local_return_test.gold-O2 @@ -8,7 +8,7 @@ test_simple tests/type_propagation/non_local_return_test.toit 0[029] - load [block] in test_simple tests/type_propagation/non_local_return_test.toit 5[038] - load block 0 7[053] - invoke static invoke tests/type_propagation/non_local_return_test.toit // {} - 10[040] - pop 2 + 10[041] - pop 1 [block] in test_simple tests/type_propagation/non_local_return_test.toit - argument 0: [block]