AI Blitz XIII
Face Mask Detection using Detecto Pytorch 98.1 AP
BBox and Classification of Face mask Ai-Blitz-xiii
Face Mask Segmentation BBox using Object Detection¶
!pip install detecto==1.1.6
Offical Documentation : https://detecto.readthedocs.io/en/latest/index.html
In [2]:
!pip install detecto==1.1.6
In [3]:
import os
import time
import ast
import random
import shutil
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
from IPython.display import clear_output
import cv2
import sklearn
from detecto import core, utils
from torchvision import transforms
from detecto.visualize import show_labeled_image
from detecto.utils import read_image
In [ ]:
print(os.mkdir('train'))
In [ ]:
base="../input/face-mask-bbox/train/"
for iname in tqdm(os.listdir(base)):
src = base+iname
dst = "./train/"+iname
shutil.copy(src, dst)
Annotation processing¶
Importing annotations from CSV¶
In [4]:
train_df = pd.read_csv("../input/face-mask-bbox/train.csv")
val_df = pd.read_csv("../input/face-mask-bbox/val.csv")
print(train_df)
print()
print(val_df)
In [ ]:
base = "../input/face-mask-bbox/val/"
for iname in tqdm(val_df['ImageID']):
src = base+iname+".jpg"
dst = "./train/"+iname+".jpg"
shutil.copy(src, dst)
In [ ]:
train_df = train_df.append(val_df[:-10])
val_df = val_df[2000:]
print(train_df)
print()
print(val_df)
In [5]:
def df2labels(df):
labels_lst = []
for idx in range(len(df)):
row = list(df.iloc[idx])
fname = row[0]+".jpg"
wd=512
ht=512
cl=row[2]
bbox=ast.literal_eval(row[1])
xmin,ymin, xmax,ymax = bbox[0],bbox[1], bbox[2],bbox[3]
label=[fname,wd,ht,cl,xmin,ymin,xmax,ymax]
labels_lst.append(label)
labels_df = pd.DataFrame(labels_lst)
labels_df.columns = ["filename","width","height","class","xmin","ymin","xmax","ymax"]
return labels_df
In [6]:
train_labels = df2labels(train_df)
val_labels = df2labels(val_df)
print(train_labels.head(), "\n")
print(val_labels.head())
In [7]:
classes = list(set(train_labels['class']))
print(classes)
sns.countplot(x=train_labels['class'])
plt.show()
Verifying Labels¶
In [ ]:
train_images_path = "./train"
val_images_path = "../input/face-mask-bbox/val/"
def verify_labels(images_path,input_labels):
df=input_labels
drop_lst=[]
for i in tqdm(range(len(df))):
try:
imgname=df['filename'].iloc[i]
fname=images_path+"/"+imgname
img=cv2.imread(fname)
if len(img)<1:
pass
except Exception as e:
print("Error", e)
drop_lst.append(imgname)
print("Error File : ", fname)
continue
if len(drop_lst)==0:
print("\nLabels verified - None dropped")
else:
print("\nDiscrepancies Found -",len(drop_lst))
print(drop_lst)
for fname in drop_lst:
df=df.drop(df[df['filename']==fname].index)
print("Labels dropped :", len(drop_lst),"\n")
return df
train_df=verify_labels(train_images_path,train_labels)
val_df=verify_labels(val_images_path,val_labels)
In [ ]:
# !nvidia-smi
Train-Test Split¶
In [8]:
#from sklearn.model_selection import train_test_split
#train_labels, valid_labels = train_test_split(labels_df, test_size=0.15, random_state=42)
train_labels.to_csv("train_labels.csv", index=False)
val_labels.to_csv("val_labels.csv", index=False)
Transforms¶
In [9]:
custom_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(324),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(saturation=0.2),
transforms.ToTensor(),
utils.normalize_transform(),
])
In [11]:
train_images_path = "../input/face-mask-bbox/train"
val_images_path = "../input/face-mask-bbox/val/"
print("Train Images Count :", len(os.listdir(train_images_path)))
print("Val Images Count :", len(os.listdir(val_images_path)))
Training Dataset Prep¶
In [12]:
from detecto.core import Model, Dataset, DataLoader
train_dataset = Dataset(label_data='train_labels.csv',
image_folder=train_images_path,
transform=custom_transforms)
valid_dataset = Dataset(label_data='val_labels.csv',
image_folder=val_images_path,
transform=custom_transforms)
In [14]:
samples_count=1
for _ in range(samples_count):
rand_idx = random.randint(0, len(train_dataset)-1)
image, targets = train_dataset[rand_idx]
print(rand_idx, targets)
show_labeled_image(image, targets['boxes'], targets['labels'])
time.sleep(1)
clear_output(wait=True)
Model Training¶
In [ ]:
batch_size = 16
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
In [ ]:
# loading model
#model_save_path='/home/advenio/Desktop/Hemanth/OD_segmentation/'
#model=Model.load(model_save_path+"OD_seg_pool_v1.pth",['od'])
# creating new model
model = core.Model(classes)
model.fit(dataset=train_loader,
val_dataset=valid_loader,
epochs=7,
learning_rate=0.0003,
verbose=True)
In [ ]:
losses = model.fit(dataset=train_loader,
val_dataset=valid_loader,
epochs=6,
learning_rate=0.0001,
verbose=True)
Inference¶
In [15]:
test_df = pd.read_csv("../input/face-mask-bbox/test.csv")
test_df.head()
Out[15]:
In [ ]:
test_images_path = "../input/face-mask-bbox/test/"
for idx in tqdm(range(len(test_df))):
try:
iname = test_df.iloc[idx, 0]+".jpg"
fname = test_images_path+iname
image = utils.read_image(fname)
mask_prediction = model.predict(image)
label = mask_prediction[0][0]
bbox = str(list(map(int, mask_prediction[1][0])))
test_df.iloc[idx, 1]=bbox
test_df.iloc[idx, 2]=label
except:
test_df.iloc[idx, 1]=bbox
test_df.iloc[idx, 2]=label
continue
test_df.head()
In [ ]:
test_df.to_csv("submission2.csv", index=False)
In [ ]:
Content
Comments
You must login before you can post a comment.