Practical Aspects of OpenCV
--
1.Angle Finder
Code
import cv2
import math
path = 'test.jpg'
img = cv2.imread(path)
pointsList = []
def mousePoints(event,x,y,flags,params):
if event == cv2.EVENT_LBUTTONDOWN:
size = len(pointsList)
if size != 0 and size % 3 != 0:
cv2.line(img,tuple(pointsList[round((size-1)/3)*3]),(x,y),(0,0,255),2)
cv2.circle(img,(x,y),5,(0,0,255),cv2.FILLED)
pointsList.append([x,y])
def gradient(pt1,pt2):
return (pt2[1]-pt1[1])/(pt2[0]-pt1[0])
def getAngle(pointsList):
pt1, pt2, pt3 = pointsList[-3:]
m1 = gradient(pt1,pt2)
m2 = gradient(pt1,pt3)
angR = math.atan((m2-m1)/(1+(m2*m1)))
angD = round(math.degrees(angR))
cv2.putText(img,str(angD),(pt1[0]-40,pt1[1]-20),cv2.FONT_HERSHEY_COMPLEX,
1.5,(0,0,255),2)
while True:
if len(pointsList) % 3 == 0 and len(pointsList) !=0:
getAngle(pointsList)
cv2.imshow('Image',img)
cv2.setMouseCallback('Image',mousePoints)
if cv2.waitKey(1) & 0xFF == ord('q'):
pointsList = []
img = cv2.imread(path)
Thresholding, Binarization & Adaptive Thresholding
# Load our new image
image = cv2.imread('imagePath', 0)
plt.figure(figsize=(30, 30))
plt.subplot(3, 2, 1)
plt.title("Original")
plt.imshow(image)
# Values below 127 goes to 0 (black, everything above goes to 255 (white)
ret,thresh1 = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
plt.subplot(3, 2, 2)
plt.title("Threshold Binary")
plt.imshow(thresh1)
# It's good practice to blur images as it removes noise
image = cv2.GaussianBlur(image, (3, 3), 0)
# Using adaptiveThreshold
thresh = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 5)
plt.subplot(3, 2, 3)
plt.title("Adaptive Mean Thresholding")
plt.imshow(thresh)
_, th2 = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
plt.subplot(3, 2, 4)
plt.title("Otsu's Thresholding")
plt.imshow(th2)
plt.subplot(3, 2, 5)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(image, (5,5), 0)
_, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
plt.title("Guassian Otsu's Thresholding")
plt.imshow(th3)
plt.show()
Edge Detection & Image Gradients
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)#Convert image to gray scale
canny_det_edges=cv2.Canny(img_gray,40,250)
cv2.imshow(‘Canny Edges’,canny_det_edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
contours, hierarchy = cv2.findContours(edge_dup, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
k=cv2.drawContours(img,contours,-1,(0,255,0),3)
cv2.imshow(‘Countors’, img)
# #cv2.imshow(‘contours_none_image1.jpg’, img_gray)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
plt.figure(figsize=(10, 10))
plt.imshow(k)
plt.show()
Identifiy Contours by Shape
image = cv2.imread('sampleImage')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.figure(figsize=(20, 20))
plt.subplot(2, 2, 1)
plt.title("Original")
plt.imshow(image)
ret, thresh = cv2.threshold(gray, 127, 255, 1)
# Extract Contours
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
# Get approximate polygons
approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt,True),True)
if len(approx) == 3:
shape_name = "Triangle"
cv2.drawContours(image,[cnt],0,(0,255,0),-1)
# Find contour center to place text at the center
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.putText(image, shape_name, (cx-50, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
elif len(approx) == 4:
x,y,w,h = cv2.boundingRect(cnt)
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# Check to see if 4-side polygon is square or rectangle
# cv2.boundingRect returns the top left and then width and
if abs(w-h) <= 3:
shape_name = "Square"
# Find contour center to place text at the center
cv2.drawContours(image, [cnt], 0, (0, 125 ,255), -1)
cv2.putText(image, shape_name, (cx-50, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
else:
shape_name = "Rectangle"
# Find contour center to place text at the center
cv2.drawContours(image, [cnt], 0, (0, 0, 255), -1)
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.putText(image, shape_name, (cx-50, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
elif len(approx) == 10:
shape_name = "Star"
cv2.drawContours(image, [cnt], 0, (255, 255, 0), -1)
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.putText(image, shape_name, (cx-50, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
elif len(approx) >= 15:
shape_name = "Circle"
cv2.drawContours(image, [cnt], 0, (0, 255, 255), -1)
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
cv2.putText(image, shape_name, (cx-50, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
plt.subplot(2, 2, 2)
plt.title("Identifying Shapes")
plt.imshow(image)
Scale Invariant Feature Transform (SIFT)
Keypoints is a concept you should be aware of when working with images. These are basically the points of interest in an image. Keypoints are analogous to the features of a given image.
They are locations that define what is interesting in the image. Keypoints are important, because no matter how the image is modified (rotation, shrinking, expanding, distortion), we will always find the same keypoints for the image.
Scale Invariant Feature Transform (SIFT) is a very popular keypoint detection algorithm. It consists of the following steps:
- Scale-space extrema detection
- Keypoint localization
- Orientation assignment
- Keypoint descriptor
- Keypoint matching