Skip to content

Commit 4289259

Browse files
committed
lint
1 parent fc70f1c commit 4289259

File tree

1 file changed

+37
-37
lines changed

1 file changed

+37
-37
lines changed

spec/memory_leak_prevention_spec.rb

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
describe 'Memory Leak Prevention' do
66
let(:datafile) { '{"version": "4", "experiments": [], "groups": [], "events": [], "featureFlags": []}' }
7-
7+
88
before do
99
# Clean up any existing instances
1010
Optimizely::Project.clear_instance_cache!
@@ -18,31 +18,31 @@
1818
describe 'Thread Creation Prevention' do
1919
it 'should not create new threads when using get_or_create_instance repeatedly' do
2020
initial_thread_count = Thread.list.size
21-
21+
2222
# Simulate the problematic pattern that was causing memory leaks
2323
# In the real world, this would be called once per request
2424
threads_created = []
25-
25+
2626
10.times do |i|
2727
# Use the safe caching method
2828
optimizely = Optimizely::Project.get_or_create_instance(datafile: datafile)
29-
29+
3030
# Make a decision to trigger thread creation if any
3131
optimizely.create_user_context("user_#{i}")
32-
32+
3333
# Track thread count after each creation
3434
threads_created << Thread.list.size
3535
end
36-
36+
3737
final_thread_count = Thread.list.size
38-
38+
3939
# Should only have created one cached instance
4040
expect(Optimizely::Project.cached_instance_count).to eq(1)
41-
41+
4242
# Thread count should not have grown significantly per instance
4343
# Allow for some variance due to initialization of first instance
4444
expect(final_thread_count).to be <= initial_thread_count + 5
45-
45+
4646
# Verify that we're not creating more threads with each call
4747
# After the first few calls, thread count should stabilize
4848
stable_count = threads_created[3]
@@ -51,22 +51,22 @@
5151

5252
it 'demonstrates the memory leak that would occur with repeated Project.new calls' do
5353
instances = []
54-
54+
5555
# Simulate the problematic pattern (commented out to avoid actual leak in tests)
5656
# This is what users were doing that caused the memory leak:
5757
5.times do
5858
# instances << Optimizely::Project.new(datafile: datafile)
59-
#
59+
#
6060
# Instead, show what happens when we create instances without caching
6161
# and don't clean them up (simulating the leak condition)
6262
instances << Optimizely::Project.new(datafile: datafile)
6363
end
64-
64+
6565
# Each instance would create its own background threads
6666
# In the real memory leak scenario, these would accumulate indefinitely
6767
expect(instances.size).to eq(5)
6868
expect(instances.uniq.size).to eq(5) # All different instances
69-
69+
7070
# Clean up instances to prevent actual memory leak in test
7171
instances.each(&:close)
7272
end
@@ -76,7 +76,7 @@
7676
it 'should create same cache key for identical configurations' do
7777
instance1 = Optimizely::Project.get_or_create_instance(datafile: datafile)
7878
instance2 = Optimizely::Project.get_or_create_instance(datafile: datafile)
79-
79+
8080
expect(instance1).to be(instance2)
8181
expect(Optimizely::Project.cached_instance_count).to eq(1)
8282
end
@@ -90,7 +90,7 @@
9090
datafile: datafile,
9191
skip_json_validation: false
9292
)
93-
93+
9494
expect(instance1).not_to be(instance2)
9595
expect(Optimizely::Project.cached_instance_count).to eq(2)
9696
end
@@ -99,14 +99,14 @@
9999
describe 'Resource Cleanup' do
100100
it 'should properly stop background threads when instance is closed' do
101101
instance = Optimizely::Project.get_or_create_instance(datafile: datafile)
102-
102+
103103
# Trigger thread creation by making a decision
104104
instance.create_user_context('test_user')
105-
105+
106106
expect(instance.stopped).to be_falsy
107-
107+
108108
instance.close
109-
109+
110110
expect(instance.stopped).to be_truthy
111111
expect(Optimizely::Project.cached_instance_count).to eq(0)
112112
end
@@ -116,13 +116,13 @@
116116
instance2 = Optimizely::Project.get_or_create_instance(
117117
datafile: '{"version": "4", "experiments": [{"id": "test"}], "groups": [], "events": [], "featureFlags": []}'
118118
)
119-
119+
120120
expect(Optimizely::Project.cached_instance_count).to eq(2)
121121
expect(instance1.stopped).to be_falsy
122122
expect(instance2.stopped).to be_falsy
123-
123+
124124
Optimizely::Project.clear_instance_cache!
125-
125+
126126
expect(Optimizely::Project.cached_instance_count).to eq(0)
127127
expect(instance1.stopped).to be_truthy
128128
expect(instance2.stopped).to be_truthy
@@ -132,61 +132,61 @@
132132
describe 'Production Usage Patterns' do
133133
it 'should handle Rails-like request pattern efficiently' do
134134
initial_thread_count = Thread.list.size
135-
135+
136136
# Simulate Rails controller pattern with cached datafile
137137
cached_datafile = datafile
138138
request_results = []
139-
139+
140140
# Simulate 50 requests (what would cause significant memory growth before)
141141
50.times do |request_id|
142142
# This is the safe pattern that should be used in production
143143
optimizely = Optimizely::Project.get_or_create_instance(datafile: cached_datafile)
144-
144+
145145
# Simulate making decisions in the request
146146
optimizely.create_user_context("user_#{request_id}")
147-
147+
148148
# Store result (in real app this would be returned to user)
149149
request_results << {
150150
request_id: request_id,
151151
optimizely_instance_id: optimizely.object_id,
152152
thread_count: Thread.list.size
153153
}
154154
end
155-
155+
156156
# Verify efficiency:
157157
# 1. All requests should use the same instance
158158
unique_instance_ids = request_results.map { |r| r[:optimizely_instance_id] }.uniq
159159
expect(unique_instance_ids.size).to eq(1)
160-
160+
161161
# 2. Only one instance should be cached
162162
expect(Optimizely::Project.cached_instance_count).to eq(1)
163-
163+
164164
# 3. Thread count should be stable after initial ramp-up
165165
final_thread_counts = request_results.last(10).map { |r| r[:thread_count] }
166166
expect(final_thread_counts.uniq.size).to be <= 2 # Allow for minimal variance
167-
167+
168168
# 4. No significant thread growth
169169
final_thread_count = Thread.list.size
170170
expect(final_thread_count).to be <= initial_thread_count + 10
171171
end
172172
end
173-
173+
174174
describe 'Memory Safety Guarantees' do
175175
it 'should not cache instances with dynamic configuration' do
176176
# These should not be cached due to having dynamic config
177177
instance_with_sdk_key = Optimizely::Project.get_or_create_instance(
178178
datafile: datafile,
179179
sdk_key: 'test_key'
180180
)
181-
181+
182182
instance_with_user_profile = Optimizely::Project.get_or_create_instance(
183183
datafile: datafile,
184184
user_profile_service: double('user_profile_service')
185185
)
186-
186+
187187
# Should have 0 cached instances since these shouldn't be cached
188188
expect(Optimizely::Project.cached_instance_count).to eq(0)
189-
189+
190190
# Clean up the non-cached instances
191191
instance_with_sdk_key.close
192192
instance_with_user_profile.close
@@ -195,12 +195,12 @@
195195
it 'should handle finalizer cleanup gracefully' do
196196
# Test that finalizers work when instances are not explicitly closed
197197
Optimizely::Project.get_or_create_instance(datafile: datafile)
198-
198+
199199
expect(Optimizely::Project.cached_instance_count).to eq(1)
200-
200+
201201
# Force garbage collection to trigger finalizer
202202
GC.start
203-
203+
204204
# The finalizer should have been called, but the instance might still be
205205
# in cache until explicitly removed. This tests that the finalizer
206206
# doesn't crash the system.

0 commit comments

Comments
 (0)