|
24 | 24 | extern "C" {
|
25 | 25 | #endif
|
26 | 26 |
|
| 27 | +_Atomic(int) allow_new_worlds = 1; |
27 | 28 | JL_DLLEXPORT _Atomic(size_t) jl_world_counter = 1; // uses atomic acquire/release
|
28 | 29 | jl_mutex_t world_counter_lock;
|
29 | 30 | JL_DLLEXPORT size_t jl_get_world_counter(void) JL_NOTSAFEPOINT
|
@@ -1819,76 +1820,84 @@ static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_w
|
1819 | 1820 | // add a backedge from callee to caller
|
1820 | 1821 | JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_code_instance_t *caller)
|
1821 | 1822 | {
|
| 1823 | + if (!jl_atomic_load_relaxed(&allow_new_worlds)) |
| 1824 | + return; |
1822 | 1825 | if (invokesig == jl_nothing)
|
1823 | 1826 | invokesig = NULL; // julia uses `nothing` but C uses NULL (#undef)
|
1824 | 1827 | assert(jl_is_method_instance(callee));
|
1825 | 1828 | assert(jl_is_code_instance(caller));
|
1826 | 1829 | assert(invokesig == NULL || jl_is_type(invokesig));
|
1827 | 1830 | JL_LOCK(&callee->def.method->writelock);
|
1828 |
| - int found = 0; |
1829 |
| - // TODO: use jl_cache_type_(invokesig) like cache_method does to save memory |
1830 |
| - if (!callee->backedges) { |
1831 |
| - // lazy-init the backedges array |
1832 |
| - callee->backedges = jl_alloc_vec_any(0); |
1833 |
| - jl_gc_wb(callee, callee->backedges); |
1834 |
| - } |
1835 |
| - else { |
1836 |
| - size_t i = 0, l = jl_array_nrows(callee->backedges); |
1837 |
| - for (i = 0; i < l; i++) { |
1838 |
| - // optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi); |
1839 |
| - jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i); |
1840 |
| - if (mi != (jl_value_t*)caller) |
1841 |
| - continue; |
1842 |
| - jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL; |
1843 |
| - if (invokeTypes && jl_is_method_instance(invokeTypes)) |
1844 |
| - invokeTypes = NULL; |
1845 |
| - if ((invokesig == NULL && invokeTypes == NULL) || |
1846 |
| - (invokesig && invokeTypes && jl_types_equal(invokesig, invokeTypes))) { |
1847 |
| - found = 1; |
1848 |
| - break; |
| 1831 | + if (jl_atomic_load_relaxed(&allow_new_worlds)) { |
| 1832 | + int found = 0; |
| 1833 | + // TODO: use jl_cache_type_(invokesig) like cache_method does to save memory |
| 1834 | + if (!callee->backedges) { |
| 1835 | + // lazy-init the backedges array |
| 1836 | + callee->backedges = jl_alloc_vec_any(0); |
| 1837 | + jl_gc_wb(callee, callee->backedges); |
| 1838 | + } |
| 1839 | + else { |
| 1840 | + size_t i = 0, l = jl_array_nrows(callee->backedges); |
| 1841 | + for (i = 0; i < l; i++) { |
| 1842 | + // optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi); |
| 1843 | + jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i); |
| 1844 | + if (mi != (jl_value_t*)caller) |
| 1845 | + continue; |
| 1846 | + jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL; |
| 1847 | + if (invokeTypes && jl_is_method_instance(invokeTypes)) |
| 1848 | + invokeTypes = NULL; |
| 1849 | + if ((invokesig == NULL && invokeTypes == NULL) || |
| 1850 | + (invokesig && invokeTypes && jl_types_equal(invokesig, invokeTypes))) { |
| 1851 | + found = 1; |
| 1852 | + break; |
| 1853 | + } |
1849 | 1854 | }
|
1850 | 1855 | }
|
| 1856 | + if (!found) |
| 1857 | + push_edge(callee->backedges, invokesig, caller); |
1851 | 1858 | }
|
1852 |
| - if (!found) |
1853 |
| - push_edge(callee->backedges, invokesig, caller); |
1854 | 1859 | JL_UNLOCK(&callee->def.method->writelock);
|
1855 | 1860 | }
|
1856 | 1861 |
|
1857 | 1862 | // add a backedge from a non-existent signature to caller
|
1858 | 1863 | JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_code_instance_t *caller)
|
1859 | 1864 | {
|
1860 | 1865 | assert(jl_is_code_instance(caller));
|
| 1866 | + if (!jl_atomic_load_relaxed(&allow_new_worlds)) |
| 1867 | + return; |
1861 | 1868 | JL_LOCK(&mt->writelock);
|
1862 |
| - if (!mt->backedges) { |
1863 |
| - // lazy-init the backedges array |
1864 |
| - mt->backedges = jl_alloc_vec_any(2); |
1865 |
| - jl_gc_wb(mt, mt->backedges); |
1866 |
| - jl_array_ptr_set(mt->backedges, 0, typ); |
1867 |
| - jl_array_ptr_set(mt->backedges, 1, caller); |
1868 |
| - } |
1869 |
| - else { |
1870 |
| - // check if the edge is already present and avoid adding a duplicate |
1871 |
| - size_t i, l = jl_array_nrows(mt->backedges); |
1872 |
| - for (i = 1; i < l; i += 2) { |
1873 |
| - if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) { |
1874 |
| - if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { |
1875 |
| - JL_UNLOCK(&mt->writelock); |
1876 |
| - return; |
| 1869 | + if (jl_atomic_load_relaxed(&allow_new_worlds)) { |
| 1870 | + if (!mt->backedges) { |
| 1871 | + // lazy-init the backedges array |
| 1872 | + mt->backedges = jl_alloc_vec_any(2); |
| 1873 | + jl_gc_wb(mt, mt->backedges); |
| 1874 | + jl_array_ptr_set(mt->backedges, 0, typ); |
| 1875 | + jl_array_ptr_set(mt->backedges, 1, caller); |
| 1876 | + } |
| 1877 | + else { |
| 1878 | + // check if the edge is already present and avoid adding a duplicate |
| 1879 | + size_t i, l = jl_array_nrows(mt->backedges); |
| 1880 | + for (i = 1; i < l; i += 2) { |
| 1881 | + if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) { |
| 1882 | + if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { |
| 1883 | + JL_UNLOCK(&mt->writelock); |
| 1884 | + return; |
| 1885 | + } |
1877 | 1886 | }
|
1878 | 1887 | }
|
1879 |
| - } |
1880 |
| - // reuse an already cached instance of this type, if possible |
1881 |
| - // TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan? |
1882 |
| - for (i = 1; i < l; i += 2) { |
1883 |
| - if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) { |
1884 |
| - if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { |
1885 |
| - typ = jl_array_ptr_ref(mt->backedges, i - 1); |
1886 |
| - break; |
| 1888 | + // reuse an already cached instance of this type, if possible |
| 1889 | + // TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan? |
| 1890 | + for (i = 1; i < l; i += 2) { |
| 1891 | + if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) { |
| 1892 | + if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { |
| 1893 | + typ = jl_array_ptr_ref(mt->backedges, i - 1); |
| 1894 | + break; |
| 1895 | + } |
1887 | 1896 | }
|
1888 | 1897 | }
|
| 1898 | + jl_array_ptr_1d_push(mt->backedges, typ); |
| 1899 | + jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller); |
1889 | 1900 | }
|
1890 |
| - jl_array_ptr_1d_push(mt->backedges, typ); |
1891 |
| - jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller); |
1892 | 1901 | }
|
1893 | 1902 | JL_UNLOCK(&mt->writelock);
|
1894 | 1903 | }
|
@@ -2024,10 +2033,55 @@ static void jl_method_table_invalidate(jl_methtable_t *mt, jl_method_t *replaced
|
2024 | 2033 | }
|
2025 | 2034 | }
|
2026 | 2035 |
|
| 2036 | +static int erase_method_backedges(jl_typemap_entry_t *def, void *closure) |
| 2037 | +{ |
| 2038 | + jl_method_t *method = def->func.method; |
| 2039 | + JL_LOCK(&method->writelock); |
| 2040 | + jl_value_t *specializations = jl_atomic_load_relaxed(&method->specializations); |
| 2041 | + if (jl_is_svec(specializations)) { |
| 2042 | + size_t i, l = jl_svec_len(specializations); |
| 2043 | + for (i = 0; i < l; i++) { |
| 2044 | + jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i); |
| 2045 | + if ((jl_value_t*)mi != jl_nothing) { |
| 2046 | + mi->backedges = NULL; |
| 2047 | + } |
| 2048 | + } |
| 2049 | + } |
| 2050 | + else { |
| 2051 | + jl_method_instance_t *mi = (jl_method_instance_t*)specializations; |
| 2052 | + mi->backedges = NULL; |
| 2053 | + } |
| 2054 | + JL_UNLOCK(&method->writelock); |
| 2055 | + return 1; |
| 2056 | +} |
| 2057 | + |
| 2058 | +static int erase_all_backedges(jl_methtable_t *mt, void *env) |
| 2059 | +{ |
| 2060 | + // removes all method caches |
| 2061 | + // this might not be entirely safe (GC or MT), thus we only do it very early in bootstrapping |
| 2062 | + JL_LOCK(&mt->writelock); |
| 2063 | + mt->backedges = NULL; |
| 2064 | + JL_UNLOCK(&mt->writelock); |
| 2065 | + jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), erase_method_backedges, env); |
| 2066 | + return 1; |
| 2067 | +} |
| 2068 | + |
| 2069 | +JL_DLLEXPORT void jl_disable_new_worlds(void) |
| 2070 | +{ |
| 2071 | + if (jl_generating_output()) |
| 2072 | + jl_error("Disabling Method changes is not possible when generating output."); |
| 2073 | + JL_LOCK(&world_counter_lock); |
| 2074 | + jl_atomic_store_relaxed(&allow_new_worlds, 0); |
| 2075 | + JL_UNLOCK(&world_counter_lock); |
| 2076 | + jl_foreach_reachable_mtable(erase_all_backedges, (void*)NULL); |
| 2077 | +} |
| 2078 | + |
2027 | 2079 | JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *method)
|
2028 | 2080 | {
|
2029 | 2081 | jl_typemap_entry_t *methodentry = do_typemap_search(mt, method);
|
2030 | 2082 | JL_LOCK(&world_counter_lock);
|
| 2083 | + if (!jl_atomic_load_relaxed(&allow_new_worlds)) |
| 2084 | + jl_error("Method changes have been disabled via a call to disable_new_worlds."); |
2031 | 2085 | JL_LOCK(&mt->writelock);
|
2032 | 2086 | // Narrow the world age on the method to make it uncallable
|
2033 | 2087 | size_t world = jl_atomic_load_relaxed(&jl_world_counter);
|
@@ -2341,6 +2395,8 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method
|
2341 | 2395 | jl_typemap_entry_t *newentry = jl_method_table_add(mt, method, simpletype);
|
2342 | 2396 | JL_GC_PUSH1(&newentry);
|
2343 | 2397 | JL_LOCK(&world_counter_lock);
|
| 2398 | + if (!jl_atomic_load_relaxed(&allow_new_worlds)) |
| 2399 | + jl_error("Method changes have been disabled via a call to disable_new_worlds."); |
2344 | 2400 | size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
|
2345 | 2401 | jl_atomic_store_relaxed(&method->primary_world, world);
|
2346 | 2402 | jl_atomic_store_relaxed(&method->deleted_world, ~(size_t)0);
|
|
0 commit comments