diff --git a/Compiler/src/typeinfer.jl b/Compiler/src/typeinfer.jl index 1afe4c297e5ba..43b743982ea89 100644 --- a/Compiler/src/typeinfer.jl +++ b/Compiler/src/typeinfer.jl @@ -399,7 +399,7 @@ function transform_result_for_cache(interp::AbstractInterpreter, result::Inferen if isa(src, OptimizationState) opt = src inlining_cost = compute_inlining_cost(interp, result, opt.optresult) - discard_optimized_result(interp, opt, inlining_cost) && return nothing + discard_optimized_result(interp, opt, inlining_cost, result.ipo_effects) && return nothing src = ir_to_codeinf!(opt) end if isa(src, CodeInfo) @@ -409,7 +409,7 @@ function transform_result_for_cache(interp::AbstractInterpreter, result::Inferen return src end -function discard_optimized_result(interp::AbstractInterpreter, opt#=::OptimizationState=#, inlining_cost#=::InlineCostType=#) +function discard_optimized_result(interp::AbstractInterpreter, opt#=::OptimizationState=#, inlining_cost#=::InlineCostType=#, effects::Effects) may_discard_trees(interp) || return false return inlining_cost == MAX_INLINE_COST end diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index d687f44808409..5cdecda9d8582 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -675,20 +675,6 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm fargs[0] = (jl_value_t*)codeinfos; void *data = jl_emit_native(codeinfos, llvmmod, &cgparams, external_linkage); - // examine everything just emitted and save it to the caches - if (!external_linkage) { - for (size_t i = 0, l = jl_array_nrows(codeinfos); i < l; i++) { - jl_value_t *item = jl_array_ptr_ref(codeinfos, i); - if (jl_is_code_instance(item)) { - // now add it to our compilation results - jl_code_instance_t *codeinst = (jl_code_instance_t*)item; - jl_code_info_t *src = (jl_code_info_t*)jl_array_ptr_ref(codeinfos, ++i); - assert(jl_is_code_info(src)); - jl_add_codeinst_to_cache(codeinst, src); - } - } - } - // move everything inside, now that we've merged everything // (before adding the exported headers) ((jl_native_code_desc_t*)data)->M.withModuleDo([&](Module &M) { diff --git a/src/gf.c b/src/gf.c index 6583262798806..1a51f7cd817ec 100644 --- a/src/gf.c +++ b/src/gf.c @@ -2836,30 +2836,10 @@ void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_c jl_method_instance_t *jl_normalize_to_compilable_mi(jl_method_instance_t *mi JL_PROPAGATES_ROOT); -JL_DLLEXPORT void jl_add_codeinst_to_cache(jl_code_instance_t *codeinst, jl_code_info_t *src) -{ - assert(jl_is_code_info(src)); - jl_method_instance_t *mi = jl_get_ci_mi(codeinst); - if (jl_generating_output() && jl_is_method(mi->def.method) && jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) { - jl_value_t *compressed = jl_compress_ir(mi->def.method, src); - // These should already be compatible (and should be an assert), but make sure of it anyways - if (jl_is_svec(src->edges)) { - jl_atomic_store_release(&codeinst->edges, (jl_svec_t*)src->edges); - jl_gc_wb(codeinst, src->edges); - } - jl_atomic_store_release(&codeinst->debuginfo, src->debuginfo); - jl_gc_wb(codeinst, src->debuginfo); - jl_atomic_store_release(&codeinst->inferred, compressed); - jl_gc_wb(codeinst, compressed); - } -} - - JL_DLLEXPORT void jl_add_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_info_t *src) { assert(jl_is_code_info(src)); jl_emit_codeinst_to_jit(codeinst, src); - jl_add_codeinst_to_cache(codeinst, src); } jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t world) diff --git a/src/julia_internal.h b/src/julia_internal.h index 9966d36027473..fd8720b7f1701 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -685,7 +685,6 @@ JL_DLLEXPORT jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROP JL_DLLEXPORT void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile); JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache); JL_DLLEXPORT void jl_add_codeinst_to_jit(jl_code_instance_t *codeinst, jl_code_info_t *src); -JL_DLLEXPORT void jl_add_codeinst_to_cache(jl_code_instance_t *codeinst, jl_code_info_t *src); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_uninit(jl_method_instance_t *mi, jl_value_t *owner); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( diff --git a/src/staticdata.c b/src/staticdata.c index c96faf5a71a54..4c1489770aa7e 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -863,40 +863,60 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ } goto done_fields; // for now } - if (s->incremental && jl_is_method_instance(v)) { + if (jl_is_method_instance(v)) { jl_method_instance_t *mi = (jl_method_instance_t*)v; - jl_value_t *def = mi->def.value; - if (needs_uniquing(v, s->query_cache)) { - // we only need 3 specific fields of this (the rest are not used) - jl_queue_for_serialization(s, mi->def.value); - jl_queue_for_serialization(s, mi->specTypes); - jl_queue_for_serialization(s, (jl_value_t*)mi->sparam_vals); - goto done_fields; - } - else if (jl_is_method(def) && jl_object_in_image(def)) { - // we only need 3 specific fields of this (the rest are restored afterward, if valid) - // in particular, cache is repopulated by jl_mi_cache_insert for all foreign function, - // so must not be present here - record_field_change((jl_value_t**)&mi->backedges, NULL); - record_field_change((jl_value_t**)&mi->cache, NULL); + if (s->incremental) { + jl_value_t *def = mi->def.value; + if (needs_uniquing(v, s->query_cache)) { + // we only need 3 specific fields of this (the rest are not used) + jl_queue_for_serialization(s, mi->def.value); + jl_queue_for_serialization(s, mi->specTypes); + jl_queue_for_serialization(s, (jl_value_t*)mi->sparam_vals); + goto done_fields; + } + else if (jl_is_method(def) && jl_object_in_image(def)) { + // we only need 3 specific fields of this (the rest are restored afterward, if valid) + // in particular, cache is repopulated by jl_mi_cache_insert for all foreign function, + // so must not be present here + record_field_change((jl_value_t**)&mi->backedges, NULL); + record_field_change((jl_value_t**)&mi->cache, NULL); + } + else { + assert(!needs_recaching(v, s->query_cache)); + } + // n.b. opaque closures cannot be inspected and relied upon like a + // normal method since they can get improperly introduced by generated + // functions, so if they appeared at all, we will probably serialize + // them wrong and segfault. The jl_code_for_staged function should + // prevent this from happening, so we do not need to detect that user + // error now. } - else { - assert(!needs_recaching(v, s->query_cache)); + // don't recurse into all backedges memory (yet) + jl_value_t *backedges = get_replaceable_field((jl_value_t**)&mi->backedges, 1); + if (backedges) { + jl_queue_for_serialization_(s, (jl_value_t*)((jl_array_t*)backedges)->ref.mem, 0, 1); + size_t i = 0, n = jl_array_nrows(backedges); + while (i < n) { + jl_value_t *invokeTypes; + jl_code_instance_t *caller; + i = get_next_edge((jl_array_t*)backedges, i, &invokeTypes, &caller); + if (invokeTypes) + jl_queue_for_serialization(s, invokeTypes); + } } - // n.b. opaque closures cannot be inspected and relied upon like a - // normal method since they can get improperly introduced by generated - // functions, so if they appeared at all, we will probably serialize - // them wrong and segfault. The jl_code_for_staged function should - // prevent this from happening, so we do not need to detect that user - // error now. - } - if (s->incremental && jl_is_binding(v)) { - if (needs_uniquing(v, s->query_cache)) { - jl_binding_t *b = (jl_binding_t*)v; + } + if (jl_is_binding(v)) { + jl_binding_t *b = (jl_binding_t*)v; + if (s->incremental && needs_uniquing(v, s->query_cache)) { jl_queue_for_serialization(s, b->globalref->mod); jl_queue_for_serialization(s, b->globalref->name); goto done_fields; } + // don't recurse into backedges memory (yet) + jl_value_t *backedges = get_replaceable_field((jl_value_t**)&b->backedges, 1); + if (backedges) { + jl_queue_for_serialization_(s, (jl_value_t*)((jl_array_t*)backedges)->ref.mem, 0, 1); + } } if (s->incremental && jl_is_globalref(v)) { jl_globalref_t *gr = (jl_globalref_t*)v; @@ -914,18 +934,20 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ assert(!jl_object_in_image((jl_value_t*)tn->wrapper)); } } - if (s->incremental && jl_is_code_instance(v)) { + if (jl_is_code_instance(v)) { jl_code_instance_t *ci = (jl_code_instance_t*)v; jl_method_instance_t *mi = jl_get_ci_mi(ci); - // make sure we don't serialize other reachable cache entries of foreign methods - // Should this now be: - // if (ci !in ci->defs->cache) - // record_field_change((jl_value_t**)&ci->next, NULL); - // Why are we checking that the method/module this originates from is in_image? - // and then disconnect this CI? - if (jl_object_in_image((jl_value_t*)mi->def.value)) { - // TODO: if (ci in ci->defs->cache) - record_field_change((jl_value_t**)&ci->next, NULL); + if (s->incremental) { + // make sure we don't serialize other reachable cache entries of foreign methods + // Should this now be: + // if (ci !in ci->defs->cache) + // record_field_change((jl_value_t**)&ci->next, NULL); + // Why are we checking that the method/module this originates from is in_image? + // and then disconnect this CI? + if (jl_object_in_image((jl_value_t*)mi->def.value)) { + // TODO: if (ci in ci->defs->cache) + record_field_change((jl_value_t**)&ci->next, NULL); + } } jl_value_t *inferred = jl_atomic_load_relaxed(&ci->inferred); if (inferred && inferred != jl_nothing) { // disregard if there is nothing here to delete (e.g. builtins, unspecialized) @@ -953,7 +975,7 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ if (inferred == jl_nothing) { record_field_change((jl_value_t**)&ci->inferred, jl_nothing); } - else if (jl_is_string(inferred)) { + else if (s->incremental && jl_is_string(inferred)) { // New roots for external methods if (jl_object_in_image((jl_value_t*)def)) { void **pfound = ptrhash_bp(&s->method_roots_index, def); @@ -2572,6 +2594,35 @@ static void jl_prune_type_cache_linear(jl_svec_t *cache) jl_svecset(cache, ins++, jl_nothing); } +static void jl_prune_mi_backedges(jl_array_t *backedges) +{ + if (backedges == NULL) + return; + size_t i = 0, ins = 0, n = jl_array_nrows(backedges); + while (i < n) { + jl_value_t *invokeTypes; + jl_code_instance_t *caller; + i = get_next_edge(backedges, i, &invokeTypes, &caller); + if (ptrhash_get(&serialization_order, caller) != HT_NOTFOUND) + ins = set_next_edge(backedges, ins, invokeTypes, caller); + } + jl_array_del_end(backedges, n - ins); +} + +static void jl_prune_binding_backedges(jl_array_t *backedges) +{ + if (backedges == NULL) + return; + size_t i = 0, ins = 0, n = jl_array_nrows(backedges); + for (i = 0; i < n; i++) { + jl_value_t *b = jl_array_ptr_ref(backedges, i); + if (ptrhash_get(&serialization_order, b) != HT_NOTFOUND) + jl_array_ptr_set(backedges, ins, b); + } + jl_array_del_end(backedges, n - ins); +} + + uint_t bindingkey_hash(size_t idx, jl_value_t *data); static void jl_prune_module_bindings(jl_module_t * m) JL_GC_DISABLED @@ -3145,12 +3196,11 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, jl_queue_for_serialization(&s, global_roots_keyset); jl_serialize_reachable(&s); } - // step 1.5: prune (garbage collect) some special weak references from - // built-in type caches too + // step 1.5: prune (garbage collect) some special weak references known caches for (i = 0; i < serialization_queue.len; i++) { jl_value_t *v = (jl_value_t*)serialization_queue.items[i]; if (jl_options.trim) { - if (jl_is_method(v)){ + if (jl_is_method(v)) { jl_method_t *m = (jl_method_t*)v; jl_value_t *specializations_ = jl_atomic_load_relaxed(&m->specializations); if (!jl_is_svec(specializations_)) @@ -3178,6 +3228,16 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, jl_gc_wb(tn, jl_atomic_load_relaxed(&tn->cache)); jl_prune_type_cache_linear(jl_atomic_load_relaxed(&tn->linearcache)); } + else if (jl_is_method_instance(v)) { + jl_method_instance_t *mi = (jl_method_instance_t*)v; + jl_value_t *backedges = get_replaceable_field((jl_value_t**)&mi->backedges, 1); + jl_prune_mi_backedges((jl_array_t*)backedges); + } + else if (jl_is_binding(v)) { + jl_binding_t *b = (jl_binding_t*)v; + jl_value_t *backedges = get_replaceable_field((jl_value_t**)&b->backedges, 1); + jl_prune_binding_backedges((jl_array_t*)backedges); + } } }