mirror of
https://github.com/20kaushik02/CSE546_Cloud_Computing_Projects.git
synced 2026-01-25 14:44:04 +00:00
did not do part 2. but must complete even if of no use.
This commit is contained in:
39
Tooling/model/facenet_pytorch/tests/perf_test.py
Normal file
39
Tooling/model/facenet_pytorch/tests/perf_test.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from facenet_pytorch import MTCNN, training
|
||||
import torch
|
||||
from torchvision import datasets, transforms
|
||||
from torch.utils.data import DataLoader, RandomSampler
|
||||
from tqdm import tqdm
|
||||
import time
|
||||
|
||||
|
||||
def main():
|
||||
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
print(f'Running on device "{device}"')
|
||||
|
||||
mtcnn = MTCNN(device=device)
|
||||
|
||||
batch_size = 32
|
||||
|
||||
# Generate data loader
|
||||
ds = datasets.ImageFolder(
|
||||
root='data/test_images/',
|
||||
transform=transforms.Resize((512, 512))
|
||||
)
|
||||
dl = DataLoader(
|
||||
dataset=ds,
|
||||
num_workers=4,
|
||||
collate_fn=training.collate_pil,
|
||||
batch_size=batch_size,
|
||||
sampler=RandomSampler(ds, replacement=True, num_samples=960),
|
||||
)
|
||||
|
||||
start = time.time()
|
||||
faces = []
|
||||
for x, _ in tqdm(dl):
|
||||
faces.extend(mtcnn(x))
|
||||
elapsed = time.time() - start
|
||||
print(f'Elapsed: {elapsed} | EPS: {len(dl) * batch_size / elapsed}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
14
Tooling/model/facenet_pytorch/tests/travis_requirements.txt
Normal file
14
Tooling/model/facenet_pytorch/tests/travis_requirements.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
numpy==1.16.2
|
||||
requests==2.21.0
|
||||
torch==1.3.1
|
||||
torchvision==0.4.2
|
||||
Pillow==6.1.0
|
||||
opencv-python>=4.1.0
|
||||
scipy==1.3.0
|
||||
pandas==0.24.2
|
||||
coverage==4.5.3
|
||||
codecov==2.0.15
|
||||
jupyter==1.0.0
|
||||
tensorboard==1.14.0
|
||||
future==0.17.1
|
||||
./
|
||||
230
Tooling/model/facenet_pytorch/tests/travis_test.py
Normal file
230
Tooling/model/facenet_pytorch/tests/travis_test.py
Normal file
@@ -0,0 +1,230 @@
|
||||
"""
|
||||
The following code is intended to be run only by travis for continuius intengration and testing
|
||||
purposes. For implementation examples see notebooks in the examples folder.
|
||||
"""
|
||||
|
||||
from PIL import Image, ImageDraw
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import transforms, datasets
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from time import time
|
||||
import sys, os
|
||||
import glob
|
||||
|
||||
from models.mtcnn import MTCNN, fixed_image_standardization
|
||||
from models.inception_resnet_v1 import InceptionResnetV1, get_torch_home
|
||||
|
||||
|
||||
#### CLEAR ALL OUTPUT FILES ####
|
||||
|
||||
checkpoints = glob.glob(os.path.join(get_torch_home(), 'checkpoints/*'))
|
||||
for c in checkpoints:
|
||||
print('Removing {}'.format(c))
|
||||
os.remove(c)
|
||||
|
||||
crop_files = glob.glob('data/test_images_aligned/**/*.png')
|
||||
for c in crop_files:
|
||||
print('Removing {}'.format(c))
|
||||
os.remove(c)
|
||||
|
||||
|
||||
#### TEST EXAMPLE IPYNB'S ####
|
||||
|
||||
os.system('jupyter nbconvert --to script --stdout examples/infer.ipynb examples/finetune.ipynb > examples/tmptest.py')
|
||||
os.chdir('examples')
|
||||
try:
|
||||
import examples.tmptest
|
||||
except:
|
||||
import tmptest
|
||||
os.chdir('..')
|
||||
|
||||
|
||||
#### TEST MTCNN ####
|
||||
|
||||
def get_image(path, trans):
|
||||
img = Image.open(path)
|
||||
img = trans(img)
|
||||
return img
|
||||
|
||||
trans = transforms.Compose([
|
||||
transforms.Resize(512)
|
||||
])
|
||||
|
||||
trans_cropped = transforms.Compose([
|
||||
np.float32,
|
||||
transforms.ToTensor(),
|
||||
fixed_image_standardization
|
||||
])
|
||||
|
||||
dataset = datasets.ImageFolder('data/test_images', transform=trans)
|
||||
dataset.idx_to_class = {k: v for v, k in dataset.class_to_idx.items()}
|
||||
|
||||
mtcnn_pt = MTCNN(device=torch.device('cpu'))
|
||||
|
||||
names = []
|
||||
aligned = []
|
||||
aligned_fromfile = []
|
||||
for img, idx in dataset:
|
||||
name = dataset.idx_to_class[idx]
|
||||
start = time()
|
||||
img_align = mtcnn_pt(img, save_path='data/test_images_aligned/{}/1.png'.format(name))
|
||||
print('MTCNN time: {:6f} seconds'.format(time() - start))
|
||||
|
||||
# Comparison between types
|
||||
img_box = mtcnn_pt.detect(img)[0]
|
||||
assert (img_box - mtcnn_pt.detect(np.array(img))[0]).sum() < 1e-2
|
||||
assert (img_box - mtcnn_pt.detect(torch.as_tensor(np.array(img)))[0]).sum() < 1e-2
|
||||
|
||||
# Batching test
|
||||
assert (img_box - mtcnn_pt.detect([img, img])[0]).sum() < 1e-2
|
||||
assert (img_box - mtcnn_pt.detect(np.array([np.array(img), np.array(img)]))[0]).sum() < 1e-2
|
||||
assert (img_box - mtcnn_pt.detect(torch.as_tensor([np.array(img), np.array(img)]))[0]).sum() < 1e-2
|
||||
|
||||
# Box selection
|
||||
mtcnn_pt.selection_method = 'probability'
|
||||
print('\nprobability - ', mtcnn_pt.detect(img))
|
||||
mtcnn_pt.selection_method = 'largest'
|
||||
print('largest - ', mtcnn_pt.detect(img))
|
||||
mtcnn_pt.selection_method = 'largest_over_theshold'
|
||||
print('largest_over_theshold - ', mtcnn_pt.detect(img))
|
||||
mtcnn_pt.selection_method = 'center_weighted_size'
|
||||
print('center_weighted_size - ', mtcnn_pt.detect(img))
|
||||
|
||||
if img_align is not None:
|
||||
names.append(name)
|
||||
aligned.append(img_align)
|
||||
aligned_fromfile.append(get_image('data/test_images_aligned/{}/1.png'.format(name), trans_cropped))
|
||||
|
||||
aligned = torch.stack(aligned)
|
||||
aligned_fromfile = torch.stack(aligned_fromfile)
|
||||
|
||||
|
||||
#### TEST EMBEDDINGS ####
|
||||
|
||||
expected = [
|
||||
[
|
||||
[0.000000, 1.482895, 0.886342, 1.438450, 1.437583],
|
||||
[1.482895, 0.000000, 1.345686, 1.029880, 1.061939],
|
||||
[0.886342, 1.345686, 0.000000, 1.363125, 1.338803],
|
||||
[1.438450, 1.029880, 1.363125, 0.000000, 1.066040],
|
||||
[1.437583, 1.061939, 1.338803, 1.066040, 0.000000]
|
||||
],
|
||||
[
|
||||
[0.000000, 1.430769, 0.992931, 1.414197, 1.329544],
|
||||
[1.430769, 0.000000, 1.253911, 1.144899, 1.079755],
|
||||
[0.992931, 1.253911, 0.000000, 1.358875, 1.337322],
|
||||
[1.414197, 1.144899, 1.358875, 0.000000, 1.204118],
|
||||
[1.329544, 1.079755, 1.337322, 1.204118, 0.000000]
|
||||
]
|
||||
]
|
||||
|
||||
for i, ds in enumerate(['vggface2', 'casia-webface']):
|
||||
resnet_pt = InceptionResnetV1(pretrained=ds).eval()
|
||||
|
||||
start = time()
|
||||
embs = resnet_pt(aligned)
|
||||
print('\nResnet time: {:6f} seconds\n'.format(time() - start))
|
||||
|
||||
embs_fromfile = resnet_pt(aligned_fromfile)
|
||||
|
||||
dists = [[(emb - e).norm().item() for e in embs] for emb in embs]
|
||||
dists_fromfile = [[(emb - e).norm().item() for e in embs_fromfile] for emb in embs_fromfile]
|
||||
|
||||
print('\nOutput:')
|
||||
print(pd.DataFrame(dists, columns=names, index=names))
|
||||
print('\nOutput (from file):')
|
||||
print(pd.DataFrame(dists_fromfile, columns=names, index=names))
|
||||
print('\nExpected:')
|
||||
print(pd.DataFrame(expected[i], columns=names, index=names))
|
||||
|
||||
total_error = (torch.tensor(dists) - torch.tensor(expected[i])).norm()
|
||||
total_error_fromfile = (torch.tensor(dists_fromfile) - torch.tensor(expected[i])).norm()
|
||||
|
||||
print('\nTotal error: {}, {}'.format(total_error, total_error_fromfile))
|
||||
|
||||
if sys.platform != 'win32':
|
||||
assert total_error < 1e-4
|
||||
assert total_error_fromfile < 1e-4
|
||||
|
||||
|
||||
#### TEST CLASSIFICATION ####
|
||||
|
||||
resnet_pt = InceptionResnetV1(pretrained=ds, classify=True).eval()
|
||||
prob = resnet_pt(aligned)
|
||||
|
||||
|
||||
#### MULTI-FACE TEST ####
|
||||
|
||||
mtcnn = MTCNN(keep_all=True)
|
||||
img = Image.open('data/multiface.jpg')
|
||||
boxes, probs = mtcnn.detect(img)
|
||||
|
||||
draw = ImageDraw.Draw(img)
|
||||
for i, box in enumerate(boxes):
|
||||
draw.rectangle(box.tolist())
|
||||
|
||||
mtcnn(img, save_path='data/tmp.png')
|
||||
|
||||
|
||||
#### MTCNN TYPES TEST ####
|
||||
|
||||
img = Image.open('data/multiface.jpg')
|
||||
|
||||
mtcnn = MTCNN(keep_all=True)
|
||||
boxes_ref, _ = mtcnn.detect(img)
|
||||
_ = mtcnn(img)
|
||||
|
||||
mtcnn = MTCNN(keep_all=True).double()
|
||||
boxes_test, _ = mtcnn.detect(img)
|
||||
_ = mtcnn(img)
|
||||
|
||||
box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]
|
||||
total_error = np.sum(np.abs(box_diff))
|
||||
print('\nfp64 Total box error: {}'.format(total_error))
|
||||
|
||||
assert total_error < 1e-2
|
||||
|
||||
|
||||
# half is not supported on CPUs, only GPUs
|
||||
if torch.cuda.is_available():
|
||||
|
||||
mtcnn = MTCNN(keep_all=True, device='cuda').half()
|
||||
boxes_test, _ = mtcnn.detect(img)
|
||||
_ = mtcnn(img)
|
||||
|
||||
box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]
|
||||
print('fp16 Total box error: {}'.format(np.sum(np.abs(box_diff))))
|
||||
|
||||
# test new automatic multi precision to compare
|
||||
if hasattr(torch.cuda, 'amp'):
|
||||
with torch.cuda.amp.autocast():
|
||||
mtcnn = MTCNN(keep_all=True, device='cuda')
|
||||
boxes_test, _ = mtcnn.detect(img)
|
||||
_ = mtcnn(img)
|
||||
|
||||
box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]
|
||||
print('AMP total box error: {}'.format(np.sum(np.abs(box_diff))))
|
||||
|
||||
|
||||
#### MULTI-IMAGE TEST ####
|
||||
|
||||
mtcnn = MTCNN(keep_all=True)
|
||||
img = [
|
||||
Image.open('data/multiface.jpg'),
|
||||
Image.open('data/multiface.jpg')
|
||||
]
|
||||
batch_boxes, batch_probs = mtcnn.detect(img)
|
||||
|
||||
mtcnn(img, save_path=['data/tmp1.png', 'data/tmp1.png'])
|
||||
tmp_files = glob.glob('data/tmp*')
|
||||
for f in tmp_files:
|
||||
os.remove(f)
|
||||
|
||||
|
||||
#### NO-FACE TEST ####
|
||||
|
||||
img = Image.new('RGB', (512, 512))
|
||||
mtcnn(img)
|
||||
mtcnn(img, return_prob=True)
|
||||
Reference in New Issue
Block a user