-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpredict.py
More file actions
72 lines (55 loc) · 2.42 KB
/
predict.py
File metadata and controls
72 lines (55 loc) · 2.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import os
import argparse
import torch
from PIL import Image
from torchvision import transforms
from model import UNet
from tqdm import tqdm
def predict(args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')
# Load Model
model = UNet(n_channels=3, n_classes=3, bilinear=True)
if not os.path.exists(args.model_path):
print(f"Error: Model not found at {args.model_path}")
return
model.load_state_dict(torch.load(args.model_path, map_location=device))
model.to(device)
model.eval()
# Setup output directory
os.makedirs(args.output_dir, exist_ok=True)
# Transforms
transform = transforms.Compose([
transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor()
])
# Process images
images = sorted([f for f in os.listdir(args.input_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png'))])
print(f"Found {len(images)} images in {args.input_dir}")
with torch.no_grad():
for img_name in tqdm(images):
img_path = os.path.join(args.input_dir, img_name)
image = Image.open(img_path).convert("RGB")
original_size = image.size
# Preprocess
input_tensor = transform(image).unsqueeze(0).to(device)
# Inference
output = model(input_tensor)
# Postprocess
output = output.squeeze(0).cpu()
output = torch.clamp(output, 0, 1)
output_img = transforms.ToPILImage()(output)
# Resize back to original size
output_img = output_img.resize(original_size, Image.BICUBIC)
# Save
save_path = os.path.join(args.output_dir, img_name)
output_img.save(save_path)
print(f"Done! Results saved to {args.output_dir}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference for Handwriting Removal')
parser.add_argument('--input_dir', type=str, default='data/test', help='Path to input images')
parser.add_argument('--output_dir', type=str, default='results', help='Path to save results')
parser.add_argument('--model_path', type=str, default='checkpoints/best_model.pth', help='Path to trained model')
parser.add_argument('--img_size', type=int, default=576, help='Image size used during training')
args = parser.parse_args()
predict(args)