Skip to content

Commit

Permalink
Revert "update batch evaluate (#154)"
Browse files Browse the repository at this point in the history
This reverts commit b5c716c.
  • Loading branch information
Bobholamovic authored Aug 17, 2023
1 parent b7d4ad7 commit a84a2ba
Show file tree
Hide file tree
Showing 6 changed files with 171 additions and 353 deletions.
31 changes: 16 additions & 15 deletions paddlers/tasks/change_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
import paddlers.utils.logging as logging
from paddlers.models import seg_losses
from paddlers.transforms import Resize, decode_image, construct_sample
from paddlers.utils import to_data_parallel
from paddlers.utils import get_single_card_bs
from paddlers.utils.checkpoint import cd_pretrain_weights_dict
from .base import BaseModel
from .utils import seg_metrics as metrics
Expand Down Expand Up @@ -447,22 +447,25 @@ def evaluate(self, eval_dataset, batch_size=1, return_details=False):
"""

self._check_transforms(eval_dataset.transforms)
net = self.net
net.eval()

# XXX: Hard-coding
self.net.eval()
nranks = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
if nranks > 1:
# Initialize parallel environment if not done.
if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
):
if not (paddle.distributed.parallel.parallel_helper.
_is_parallel_ctx_initialized()):
paddle.distributed.init_parallel_env()
net = to_data_parallel(
net, find_unused_parameters=self.find_unused_parameters)
else:
net = to_data_parallel(
net, find_unused_parameters=self.find_unused_parameters)

batch_size_each_card = get_single_card_bs(batch_size)
if batch_size_each_card > 1:
batch_size_each_card = 1
batch_size = batch_size_each_card * paddlers.env_info['num']
logging.warning(
"ChangeDetector only supports batch_size=1 for each gpu/cpu card " \
"during evaluation, so batch_size " \
"is forcibly set to {}.".format(batch_size)
)
self.eval_data_loader = self.build_data_loader(
eval_dataset, batch_size=batch_size, mode='eval')

Expand All @@ -482,9 +485,9 @@ def evaluate(self, eval_dataset, batch_size=1, return_details=False):
enable=True,
custom_white_list=self.custom_white_list,
custom_black_list=self.custom_black_list):
outputs = self.run(net, data, 'eval')
outputs = self.run(self.net, data, 'eval')
else:
outputs = self.run(net, data, 'eval')
outputs = self.run(self.net, data, 'eval')
pred_area = outputs['pred_area']
label_area = outputs['label_area']
intersect_area = outputs['intersect_area']
Expand Down Expand Up @@ -691,8 +694,6 @@ def postprocess(self, batch_pred, batch_restore_list):
else:
raise RuntimeError
results.append(pred)
if len(results) > 1:
results = [paddle.concat(results, axis=0)]
return results

def _infer_postprocess(self, batch_label_map, batch_score_map,
Expand Down
94 changes: 40 additions & 54 deletions paddlers/tasks/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import paddlers.rs_models.clas as cmcls
import paddlers.utils.logging as logging
from paddlers.models.ppcls.metric import build_metrics
from paddlers.utils import to_data_parallel
from paddlers.models import clas_losses
from paddlers.models.ppcls.data.postprocess import build_postprocess
from paddlers.utils.checkpoint import cls_pretrain_weights_dict
Expand Down Expand Up @@ -403,67 +402,54 @@ def evaluate(self, eval_dataset, batch_size=1, return_details=False):
"""

self._check_transforms(eval_dataset.transforms)
net = self.net
net.eval()

# XXX: Hard-coding
self.net.eval()
nranks = paddle.distributed.get_world_size()
local_rank = paddle.distributed.get_rank()
if nranks > 1:
# Initialize parallel environment if not done.
if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized(
):
paddle.distributed.init_parallel_env()
net = to_data_parallel(
net, find_unused_parameters=self.find_unused_parameters)
else:
net = to_data_parallel(
net, find_unused_parameters=self.find_unused_parameters)

self.eval_data_loader = self.build_data_loader(
eval_dataset, batch_size=batch_size, mode='eval')
logging.info("Start to evaluate (total_samples={}, total_steps={})...".
format(eval_dataset.num_samples, eval_dataset.num_samples))

top1s = []
top5s = []
with paddle.no_grad():
for step, data in enumerate(self.eval_data_loader):
if self.precision == 'fp16':
with paddle.amp.auto_cast(
level=self.amp_level,
enable=True,
custom_white_list=self.custom_white_list,
custom_black_list=self.custom_black_list):
outputs = self.run(net, data, 'eval')
else:
outputs = self.run(net, data, 'eval')
if nranks > 1:
t1 = outputs["top1"]
t5 = outputs["top5"]
t1s = []
t5s = []
paddle.distributed.all_gather(t1s, t1)
paddle.distributed.all_gather(t5s, t5)
for rank_id in range(nranks):
top1 = t1s[rank_id]
top5 = t5s[rank_id]
for i in range(data['image'].shape[0]):
top1s.append(top1)
top5s.append(top5)
else:
for i in range(data['image'].shape[0]):
top1s.append(outputs["top1"])
top5s.append(outputs["top5"])

top1 = np.mean(top1s)
top5 = np.mean(top5s)
eval_metrics = OrderedDict(zip(['top1', 'top5'], [top1, top5]))

if return_details:
# TODO: Add details
return eval_metrics, None

return eval_metrics
if batch_size > 1:
logging.warning(
"Classifier only supports single card evaluation with batch_size=1 "
"during evaluation, so batch_size is forcibly set to 1.")
batch_size = 1

if nranks < 2 or local_rank == 0:
self.eval_data_loader = self.build_data_loader(
eval_dataset, batch_size=batch_size, mode='eval')
logging.info(
"Start to evaluate (total_samples={}, total_steps={})...".
format(eval_dataset.num_samples, eval_dataset.num_samples))

top1s = []
top5s = []
with paddle.no_grad():
for step, data in enumerate(self.eval_data_loader):
if self.precision == 'fp16':
with paddle.amp.auto_cast(
level=self.amp_level,
enable=True,
custom_white_list=self.custom_white_list,
custom_black_list=self.custom_black_list):
outputs = self.run(self.net, data, 'eval')
else:
outputs = self.run(self.net, data, 'eval')
top1s.append(outputs["top1"])
top5s.append(outputs["top5"])

top1 = np.mean(top1s)
top5 = np.mean(top5s)
eval_metrics = OrderedDict(zip(['top1', 'top5'], [top1, top5]))

if return_details:
# TODO: Add details
return eval_metrics, None

return eval_metrics

@paddle.no_grad()
def predict(self, img_file, transforms=None):
Expand Down
Loading

0 comments on commit a84a2ba

Please sign in to comment.