Skip to content

Commit 70e404b

Browse files
committed
* **BREAKING** total cleanup of the various lookup and refresh functions, too many to mention
* TAO (thread affine objects) added. Your multi-threaded code will now bail, if a method accesses an instance that is living in a different thread * the methodcache of a class has been renamed to an impcache, because that's what its really caching * you can now specify that a class should NOT search through its own (non-category) methods with `MULLE_OBJC_CLASS_DONT_INHERIT_CLASS` you can also specify to keep the search scope instead of using the superclass inheritance with `MULLE_OBJC_CLASS_DONT_INHERIT_INHERITANCE` * redid the method calls to support TAO and improved tracing. `mulle_objc_implementation_invoke` should now always be used instead of just `(*imp)( self, _cmd, _param` so that tracing works everywhere (memo: could typedef IMP to `void *` maybe) * changed informational cache statistics code
1 parent 8a914d9 commit 70e404b

File tree

104 files changed

+3205
-667
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

104 files changed

+3205
-667
lines changed

src/mulle-objc-cache.c

+144-1
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,58 @@ unsigned int mulle_objc_cache_calculate_fillpercentage( struct _mulle_objc_cac
237237
}
238238

239239

240-
unsigned int mulle_objc_cache_calculate_hitpercentage( struct _mulle_objc_cache *cache,
240+
unsigned int mulle_objc_cache_calculate_hits( struct _mulle_objc_cache *cache,
241+
unsigned int counts[ 4])
242+
{
243+
unsigned int total;
244+
mulle_objc_cache_uint_t offset;
245+
mulle_objc_cache_uint_t mask;
246+
mulle_objc_cache_uint_t steps;
247+
mulle_objc_cache_uint_t expected;
248+
struct _mulle_objc_cacheentry *p;
249+
struct _mulle_objc_cacheentry *sentinel;
250+
251+
memset( counts, 0, sizeof( unsigned int) * 4);
252+
253+
if( ! cache || ! cache->size)
254+
return( 0);
255+
256+
if( ! _mulle_atomic_pointer_read( &cache->n))
257+
return( 0);
258+
259+
p = cache->entries;
260+
sentinel = &p[ cache->size];
261+
mask = cache->mask;
262+
total = 0;
263+
offset = 0;
264+
265+
for( ; p < sentinel; p++)
266+
{
267+
if( p->key.uniqueid)
268+
{
269+
expected = (p->key.uniqueid & mask);
270+
if( expected > offset) // wrap around
271+
{
272+
steps = cache->size * sizeof( struct _mulle_objc_cacheentry) - expected;
273+
steps += offset;
274+
}
275+
else
276+
steps = (offset - expected);
277+
steps /= sizeof( struct _mulle_objc_cacheentry);
278+
if( steps > 3)
279+
steps = 3;
280+
counts[ steps]++;
281+
++total;
282+
}
283+
284+
offset += sizeof( struct _mulle_objc_cacheentry);
285+
}
286+
287+
return( total);
288+
}
289+
290+
291+
unsigned int mulle_objc_cache_calculate_percentage( struct _mulle_objc_cache *cache,
241292
unsigned int *percentages,
242293
unsigned int size)
243294
{
@@ -287,6 +338,7 @@ unsigned int mulle_objc_cache_calculate_hitpercentage( struct _mulle_objc_cach
287338
}
288339

289340

341+
290342
# pragma mark - add
291343

292344
// this only works for a cache, that isn't active in the universe yet and that
@@ -452,6 +504,97 @@ struct _mulle_objc_cacheentry *
452504
}
453505

454506

507+
// MEMO: used by MulleThreadSafeObject
508+
509+
int _mulle_objc_cachepivot_swap( struct _mulle_objc_cachepivot *pivot,
510+
struct _mulle_objc_cache *cache,
511+
struct _mulle_objc_cache *old_cache,
512+
struct mulle_allocator *allocator)
513+
{
514+
//
515+
// an initial_impcache ? this is getting called too early
516+
// an empty_cache ? this is getting called wrong
517+
//
518+
if( _mulle_objc_cachepivot_cas_entries( pivot,
519+
cache->entries,
520+
old_cache ? old_cache->entries : NULL))
521+
{
522+
// cas failed, so get rid of this and punt
523+
_mulle_objc_cache_free( cache, allocator); // sic, can be unsafe deleted now
524+
return( -1);
525+
}
526+
527+
528+
_mulle_objc_cache_abafree( old_cache, allocator);
529+
return( 0);
530+
}
531+
532+
533+
struct _mulle_objc_cache *
534+
_mulle_objc_cache_grow_with_strategy( struct _mulle_objc_cache *old_cache,
535+
enum mulle_objc_cachesizing_t strategy,
536+
struct mulle_allocator *allocator)
537+
{
538+
struct _mulle_objc_cache *cache;
539+
mulle_objc_cache_uint_t new_size;
540+
541+
// a new beginning.. let it be filled anew
542+
// could ask the universe here what to do as new size
543+
544+
new_size = _mulle_objc_cache_get_resize( old_cache, strategy);
545+
cache = mulle_objc_cache_new( new_size, allocator);
546+
547+
return( cache);
548+
}
549+
550+
551+
552+
// uniqueid can be a methodid or superid!
553+
struct _mulle_objc_cacheentry *
554+
_mulle_objc_cachepivot_fill_functionpointer( struct _mulle_objc_cachepivot *pivot,
555+
mulle_functionpointer_t imp,
556+
mulle_objc_uniqueid_t uniqueid,
557+
unsigned int fillrate,
558+
struct mulle_allocator *allocator)
559+
{
560+
struct _mulle_objc_cache *cache;
561+
struct _mulle_objc_cache *old_cache;
562+
struct _mulle_objc_cacheentry *entry;
563+
564+
assert( pivot);
565+
assert( imp);
566+
567+
//
568+
// try to get most up to date value
569+
//
570+
for(;;)
571+
{
572+
cache = _mulle_objc_cachepivot_get_cache_atomic( pivot);
573+
if( _mulle_objc_cache_should_grow( cache, fillrate))
574+
{
575+
old_cache = cache;
576+
cache = _mulle_objc_cache_grow_with_strategy( old_cache,
577+
MULLE_OBJC_CACHESIZE_GROW,
578+
allocator);
579+
580+
// doesn't really matter, if this fails or succeeds we just try
581+
// again
582+
_mulle_objc_cachepivot_swap( pivot, cache, old_cache, allocator);
583+
continue;
584+
}
585+
586+
entry = _mulle_objc_cache_add_functionpointer_entry( cache,
587+
(mulle_functionpointer_t) imp,
588+
uniqueid);
589+
if( entry)
590+
break;
591+
}
592+
593+
return( entry);
594+
}
595+
596+
597+
455598

456599
// #1#
457600
// the atomicity of this.

0 commit comments

Comments
 (0)