1313
1414namespace SkSL {
1515
16+ #if defined(SK_BUILD_FOR_IOS) && \
17+ (!defined (__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
18+
19+ // iOS did not support for C++11 `thread_local` variables until iOS 9.
20+ // Pooling is not supported here; we allocate all nodes directly.
21+ struct PoolData {};
22+
23+ Pool::~Pool () {}
24+ void Pool* Pool::CreatePoolOnThread (int nodesInPool) { return new Pool; }
25+ void Pool::detachFromThread () {}
26+ void Pool::attachToThread () {}
27+ void * Pool::AllocIRNode () { return ::operator new (sizeof (IRNode)); }
28+ void Pool::FreeIRNode (void * node) { ::operator delete (node); }
29+
30+ #else // !defined(SK_BUILD_FOR_IOS)...
31+
1632namespace { struct IRNodeData {
1733 union {
1834 uint8_t fBuffer [sizeof (IRNode)];
@@ -33,52 +49,15 @@ struct PoolData {
3349 // Accessors.
3450 ptrdiff_t nodeCount () { return fNodesEnd - fNodes ; }
3551
36- int nodeIndex (IRNodeData* node) {
52+ ptrdiff_t nodeIndex (IRNodeData* node) {
3753 SkASSERT (node >= fNodes );
3854 SkASSERT (node < fNodesEnd );
39- return SkToInt ( node - fNodes ) ;
55+ return node - fNodes ;
4056 }
4157};
4258
43- #if defined(SK_BUILD_FOR_IOS) && \
44- (!defined (__IPHONE_9_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
45-
46- #include < pthread.h>
47-
48- static pthread_key_t get_pthread_key () {
49- static pthread_key_t sKey = []{
50- pthread_key_t key;
51- int result = pthread_key_create (&key, /* destructor=*/ nullptr );
52- if (result != 0 ) {
53- SK_ABORT (" pthread_key_create failure: %d" , result);
54- }
55- return key;
56- }();
57- return sKey ;
58- }
59-
60- static PoolData* get_thread_local_pool_data () {
61- return static_cast <PoolData*>(pthread_getspecific (get_pthread_key ()));
62- }
63-
64- static void set_thread_local_pool_data (PoolData* poolData) {
65- pthread_setspecific (get_pthread_key (), poolData);
66- }
67-
68- #else
69-
7059static thread_local PoolData* sPoolData = nullptr ;
7160
72- static PoolData* get_thread_local_pool_data () {
73- return sPoolData ;
74- }
75-
76- static void set_thread_local_pool_data (PoolData* poolData) {
77- sPoolData = poolData;
78- }
79-
80- #endif
81-
8261static PoolData* create_pool_data (int nodesInPool) {
8362 // Create a PoolData structure with extra space at the end for additional IRNode data.
8463 int numExtraIRNodes = nodesInPool - 1 ;
@@ -98,9 +77,9 @@ static PoolData* create_pool_data(int nodesInPool) {
9877
9978
10079Pool::~Pool () {
101- if (get_thread_local_pool_data () == fData ) {
80+ if (sPoolData == fData ) {
10281 SkDEBUGFAIL (" SkSL pool is being destroyed while it is still attached to the thread" );
103- set_thread_local_pool_data ( nullptr ) ;
82+ sPoolData = nullptr ;
10483 }
10584
10685 // In debug mode, report any leaked nodes.
@@ -138,59 +117,59 @@ std::unique_ptr<Pool> Pool::CreatePoolOnThread(int nodesInPool) {
138117}
139118
140119void Pool::detachFromThread () {
141- VLOG (" DETACH Pool:0x%016llX\n " , (uint64_t )get_thread_local_pool_data () );
142- SkASSERT (get_thread_local_pool_data () != nullptr );
143- set_thread_local_pool_data ( nullptr ) ;
120+ VLOG (" DETACH Pool:0x%016llX\n " , (uint64_t )sPoolData );
121+ SkASSERT (sPoolData != nullptr );
122+ sPoolData = nullptr ;
144123}
145124
146125void Pool::attachToThread () {
147126 VLOG (" ATTACH Pool:0x%016llX\n " , (uint64_t )fData );
148- SkASSERT (get_thread_local_pool_data () == nullptr );
149- set_thread_local_pool_data ( fData ) ;
127+ SkASSERT (sPoolData == nullptr );
128+ sPoolData = fData ;
150129}
151130
152131void * Pool::AllocIRNode () {
153132 // Is a pool attached?
154- PoolData* poolData = get_thread_local_pool_data ();
155- if (poolData) {
133+ if (sPoolData ) {
156134 // Does the pool contain a free node?
157- IRNodeData* node = poolData ->fFreeListHead ;
135+ IRNodeData* node = sPoolData ->fFreeListHead ;
158136 if (node) {
159137 // Yes. Take a node from the freelist.
160- poolData ->fFreeListHead = node->fFreeListNext ;
138+ sPoolData ->fFreeListHead = node->fFreeListNext ;
161139 VLOG (" ALLOC Pool:0x%016llX Index:%04d 0x%016llX\n " ,
162- (uint64_t )poolData, poolData-> nodeIndex ( node), (uint64_t )node);
140+ (uint64_t )sPoolData , ( int )( node - & sPoolData -> fNodes [ 0 ] ), (uint64_t )node);
163141 return node->fBuffer ;
164142 }
165143 }
166144
167145 // The pool is detached or full; allocate nodes using malloc.
168146 void * ptr = ::operator new (sizeof (IRNode));
169147 VLOG (" ALLOC Pool:0x%016llX Index:____ malloc 0x%016llX\n " ,
170- (uint64_t )poolData , (uint64_t )ptr);
148+ (uint64_t )sPoolData , (uint64_t )ptr);
171149 return ptr;
172150}
173151
174152void Pool::FreeIRNode (void * node_v) {
175153 // Is a pool attached?
176- PoolData* poolData = get_thread_local_pool_data ();
177- if (poolData) {
154+ if (sPoolData ) {
178155 // Did this node come from our pool?
179156 auto * node = static_cast <IRNodeData*>(node_v);
180- if (node >= &poolData ->fNodes [0 ] && node < poolData ->fNodesEnd ) {
157+ if (node >= &sPoolData ->fNodes [0 ] && node < sPoolData ->fNodesEnd ) {
181158 // Yes. Push it back onto the freelist.
182159 VLOG (" FREE Pool:0x%016llX Index:%04d 0x%016llX\n " ,
183- (uint64_t )poolData, poolData-> nodeIndex ( node), (uint64_t )node);
184- node->fFreeListNext = poolData ->fFreeListHead ;
185- poolData ->fFreeListHead = node;
160+ (uint64_t )sPoolData , ( int )( node - & sPoolData -> fNodes [ 0 ] ), (uint64_t )node);
161+ node->fFreeListNext = sPoolData ->fFreeListHead ;
162+ sPoolData ->fFreeListHead = node;
186163 return ;
187164 }
188165 }
189166
190167 // No pool is attached or the node was malloced; it must be freed.
191168 VLOG (" FREE Pool:0x%016llX Index:____ free 0x%016llX\n " ,
192- (uint64_t )poolData , (uint64_t )node_v);
169+ (uint64_t )sPoolData , (uint64_t )node_v);
193170 ::operator delete (node_v);
194171}
195172
173+ #endif // !defined(SK_BUILD_FOR_IOS)...
174+
196175} // namespace SkSL
0 commit comments