@@ -48,11 +48,10 @@ def __init__(
48
48
command_id : CommandId ,
49
49
status : CommandState ,
50
50
has_been_closed_server_side : bool = False ,
51
+ has_more_rows : bool = False ,
51
52
results_queue = None ,
52
53
description = None ,
53
54
is_staging_operation : bool = False ,
54
- lz4_compressed : bool = False ,
55
- arrow_schema_bytes : bytes = b"" ,
56
55
):
57
56
"""
58
57
A ResultSet manages the results of a single command.
@@ -80,10 +79,9 @@ def __init__(
80
79
self .command_id = command_id
81
80
self .status = status
82
81
self .has_been_closed_server_side = has_been_closed_server_side
82
+ self .has_more_rows = has_more_rows
83
83
self .results = results_queue
84
84
self ._is_staging_operation = is_staging_operation
85
- self .lz4_compressed = lz4_compressed
86
- self ._arrow_schema_bytes = arrow_schema_bytes
87
85
88
86
def __iter__ (self ):
89
87
while True :
@@ -187,24 +185,9 @@ def __init__(
187
185
has_more_rows: Whether there are more rows to fetch
188
186
"""
189
187
# Initialize ThriftResultSet-specific attributes
188
+ self ._arrow_schema_bytes = execute_response .arrow_schema_bytes
190
189
self ._use_cloud_fetch = use_cloud_fetch
191
- self .has_more_rows = has_more_rows
192
-
193
- # Build the results queue if t_row_set is provided
194
- results_queue = None
195
- if t_row_set and execute_response .result_format is not None :
196
- from databricks .sql .utils import ThriftResultSetQueueFactory
197
-
198
- # Create the results queue using the provided format
199
- results_queue = ThriftResultSetQueueFactory .build_queue (
200
- row_set_type = execute_response .result_format ,
201
- t_row_set = t_row_set ,
202
- arrow_schema_bytes = execute_response .arrow_schema_bytes or b"" ,
203
- max_download_threads = max_download_threads ,
204
- lz4_compressed = execute_response .lz4_compressed ,
205
- description = execute_response .description ,
206
- ssl_options = ssl_options ,
207
- )
190
+ self .lz4_compressed = execute_response .lz4_compressed
208
191
209
192
# Call parent constructor with common attributes
210
193
super ().__init__ (
@@ -215,6 +198,7 @@ def __init__(
215
198
command_id = execute_response .command_id ,
216
199
status = execute_response .status ,
217
200
has_been_closed_server_side = execute_response .has_been_closed_server_side ,
201
+ has_more_rows = has_more_rows ,
218
202
results_queue = results_queue ,
219
203
description = execute_response .description ,
220
204
is_staging_operation = execute_response .is_staging_operation ,
@@ -450,7 +434,7 @@ def map_col_type(type_):
450
434
451
435
452
436
class SeaResultSet (ResultSet ):
453
- """ResultSet implementation for SEA backend."""
437
+ """ResultSet implementation for the SEA backend."""
454
438
455
439
def __init__ (
456
440
self ,
@@ -467,12 +451,11 @@ def __init__(
467
451
468
452
Args:
469
453
connection: The parent connection
470
- execute_response: Response from the execute command
471
454
sea_client: The SeaDatabricksClient instance for direct access
472
455
buffer_size_bytes: Buffer size for fetching results
473
456
arraysize: Default number of rows to fetch
474
- result_data: Result data from SEA response (optional )
475
- manifest: Manifest from SEA response (optional )
457
+ execute_response: Response from the execute command (new style )
458
+ sea_response: Direct SEA response (legacy style )
476
459
"""
477
460
478
461
if result_data :
@@ -498,8 +481,6 @@ def __init__(
498
481
results_queue = queue ,
499
482
description = execute_response .description ,
500
483
is_staging_operation = execute_response .is_staging_operation ,
501
- lz4_compressed = execute_response .lz4_compressed ,
502
- arrow_schema_bytes = execute_response .arrow_schema_bytes ,
503
484
)
504
485
505
486
def _convert_to_row_objects (self , rows ):
0 commit comments