Skip to content

Commit 7ba92b0

Browse files
committed
Merge branch 'comma-missing-in-trainer-docs' of https://github.com/Yacklin/transformers into comma-missing-in-trainer-docs
2 parents 896d862 + 4fe0092 commit 7ba92b0

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

src/transformers/models/owlv2/image_processing_owlv2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,7 @@ def pad(
291291
image = pad(
292292
image=image,
293293
padding=((0, size - height), (0, size - width)),
294-
constant_values=0.5,
294+
constant_values=0.0,
295295
data_format=data_format,
296296
input_data_format=input_data_format,
297297
)

src/transformers/models/owlv2/image_processing_owlv2_fast.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_thresh
228228

229229
return results
230230

231-
def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.5) -> "torch.Tensor":
231+
def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.0) -> "torch.Tensor":
232232
"""
233233
Pad an image with zeros to the given size.
234234
"""
@@ -245,7 +245,7 @@ def pad(
245245
self,
246246
images: list["torch.Tensor"],
247247
disable_grouping: Optional[bool],
248-
constant_value: float = 0.5,
248+
constant_value: float = 0.0,
249249
**kwargs,
250250
) -> list["torch.Tensor"]:
251251
"""
@@ -351,7 +351,7 @@ def _preprocess(
351351
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
352352

353353
if do_pad:
354-
processed_images = self.pad(processed_images, constant_value=0.5, disable_grouping=disable_grouping)
354+
processed_images = self.pad(processed_images, constant_value=0.0, disable_grouping=disable_grouping)
355355

356356
grouped_images, grouped_images_index = group_images_by_shape(
357357
processed_images, disable_grouping=disable_grouping

src/transformers/models/owlv2/modular_owlv2.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class Owlv2ImageProcessorFast(OwlViTImageProcessorFast):
5252
crop_size = None
5353
do_center_crop = None
5454

55-
def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.5) -> "torch.Tensor":
55+
def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.0) -> "torch.Tensor":
5656
"""
5757
Pad an image with zeros to the given size.
5858
"""
@@ -69,7 +69,7 @@ def pad(
6969
self,
7070
images: list["torch.Tensor"],
7171
disable_grouping: Optional[bool],
72-
constant_value: float = 0.5,
72+
constant_value: float = 0.0,
7373
**kwargs,
7474
) -> list["torch.Tensor"]:
7575
"""
@@ -175,7 +175,7 @@ def _preprocess(
175175
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
176176

177177
if do_pad:
178-
processed_images = self.pad(processed_images, constant_value=0.5, disable_grouping=disable_grouping)
178+
processed_images = self.pad(processed_images, constant_value=0.0, disable_grouping=disable_grouping)
179179

180180
grouped_images, grouped_images_index = group_images_by_shape(
181181
processed_images, disable_grouping=disable_grouping

0 commit comments

Comments
 (0)