Starship Detection
Solution for submission 152422
A detailed solution for submission 152422 submitted for challenge Starship Detection
Loading the dataset¶
In [1]:
!pip install aicrowd-cli
%load_ext aicrowd.magic
In [2]:
%aicrowd login
In [3]:
# Downloading the Dataset
!rm -rf data
!mkdir data
%aicrowd ds dl -c starship-detection -o data
In [4]:
!unzip data/data.zip -d /content/images >> /dev/null
Actual code starts from here¶
In [5]:
# Image Reading & Preprocessing
from PIL import Image, ImageDraw
import cv2
import random
import matplotlib.pyplot as plt
import numpy as np
# Misc.
import pandas as pd
from tqdm.notebook import tqdm
import os
from natsort import natsorted
In [5]:
In [42]:
def load_image_n_transform(img):
np_img = np.array(img)
gray = cv2.cvtColor(np_img, cv2.COLOR_BGR2GRAY)
if gray.mean()>200:
# gray = cv2.cvtColor(np_img, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 120,150)
else:
# gray = cv2.cvtColor(np_img, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 50,200)
contours, hierarchy = cv2.findContours(canny,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea)
return contours
In [42]:
In [43]:
def get_parameters(contours,extent):
'''
Input the contours and the extent till which we need to extract bounding-boxes
'''
bounds = min(len(contours),extent)
x_axis = []
y_axis = []
width = []
height = []
for i in range(1,bounds+1,1):
x,y,w,h = list(cv2.boundingRect(contours[-1*i]))
x_axis.append(x)
y_axis.append(y)
width.append(w)
height.append(h)
return (x_axis,y_axis,width,height)
def get_optimal_parameters(contours,extent):
parameters = get_parameters(contours,extent)
x_parameter = np.array(parameters[0])
y_parameter = np.array(parameters[1])
width_param = np.array(parameters[2])
height_param = np.array(parameters[3])
length = len(x_parameter)
x = np.min(x_parameter)
y = np.min(y_parameter)
width = np.max(x_parameter+width_param)-x
height = np.max(y_parameter+height_param)-y
return [x,y,x+width,y+height]
In [52]:
image_ind = str(random.choice(range(1,10000+1)))
for i in range(20):
image_ind = str(random.choice(range(1,10000+1)))
print(f'Reading image {image_ind}')
# image_ind = 2001
img = Image.open(f"images/{image_ind}.jpg")
contours = load_image_n_transform(img)#,50,200,120,150)
# contours = load_image_n_transform(img,100,150,100,150)
parameters = get_optimal_parameters(contours,13)
# Showing the contour
draw_img = img.copy()
draw = ImageDraw.Draw(draw_img)
draw.rectangle(parameters, outline ="red")
# draw_img
plt.imshow(draw_img)
plt.show()
In [38]:
In [ ]:
In [53]:
test_imgs = natsorted(os.listdir("images"))
print(len(test_imgs))
In [54]:
# Function to generate the bounding boxes
def gen_bounding_boxes(img):
# Converting the image to numpy array
img = np.array(img)
try:
# Return the boundong boxes of the biggest contour
contours = load_image_n_transform(img)
parameters = get_optimal_parameters(contours,13)
x = parameters[0]
y = parameters[1]
w = parameters[2]-x
h = parameters[3]-y
except:
x,y,w,h = [1, 1, 1, 1]
return x,y,w,h
In [55]:
bboxes = []
image_ids = []
# Ground through each test image
for img_name in tqdm(test_imgs):
# Reading the test image
img = Image.open(os.path.join("images", img_name))
# Generating the bounding boxes
x,y,w,h = gen_bounding_boxes(img)
# Adding the boundong boxes and image id
bboxes.append([x,y,w,h])
image_ids.append(int(img_name.split(".")[0]))
In [56]:
# Adding the image id and bounding boxes to a dataframe
df = pd.DataFrame({"ImageID":image_ids, "bbox":bboxes})
df = df.sort_values("ImageID").reset_index(drop=True)
df
Out[56]:
In [57]:
!rm -rf assets
!mkdir assets
df.to_csv(os.path.join("assets", "submission.csv"), index=False)
In [ ]:
In [ ]:
Content
Comments
You must login before you can post a comment.