load and image and create a generator of a random walk trajectory ....
from nbdev.showdoc import *

Basic functions

show_img[source]

show_img(im, figsize=None, ax=None, alpha=None)

Show a numpy image

sliceImage[source]

sliceImage(img, size, pos)

Take a small rectangular slice of a larger image img

the other args are thesize and position of the slice. If the position overlaps an edge the center is adjusted. The slice anf actual position are returned,

rotate[source]

rotate(image, angle, center=None, scale=1.0)

Rotate an image, about center and optionally scale

get_image_patch[source]

get_image_patch(img, pos, size=[400, 600], rot=0, scale=1.0)

get smaller image patch from img

class Trajectory[source]

Trajectory(img, length, xyscale=1, zscale=0.1, zcycles=3, acycles=3, deg=5, startpos=[1000, 1000])

Creates a random walk trajectory of length steps based on the img

Examples

make a trajectory of 5 steps

  
f = Trajectory(np.asarray(PIL.Image.open("Yosemite_NP_M.jpg")), 5)

for i in f:
    # print(next(f)[1]) 
    print(i[1]) 
    
(1000, 1000, 2.5)
(1000, 1000, 0.0)
(1001, 1000, 2.500000000000001)
(1002, 1000, 5.0)
(1003, 1000, 2.4999999999999982)

make a trajectory of 50 steps and plot xy

f = Trajectory(np.asarray(PIL.Image.open("Yosemite_NP_M.jpg")), 50)
plt.plot(f.xyza[:,0], f.xyza[:,1])
plt.legend(['Ground Truth XY trajectory'])
plt.title('2d XY')
plt.show()
# Show the last image
ax = show_img(i[0])
t = ax.set_title('The last image')

mark_key_points[source]

mark_key_points(cur_img, key_pts)

def new_track_image_keypnts(img, trajectory, i):
    # global last_img
    cur_img, last_img, move_true = next_image(img, trajectory, i)

    move_calc, key_pts = track_features(cur_img, last_img)
    # last_img = np.copy(cur_img)

    img_marked = mark_key_points(cur_img, key_pts)

    return img_marked, move_true, move_calc, key_pts
def track_features(img0, img1):
    img0_gray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
    # Detect feature points in previous frame
    img0_pts = cv2.goodFeaturesToTrack(img0_gray,
                                       maxCorners=200,
                                       qualityLevel=0.2,
                                       minDistance=30,
                                       blockSize=5)

    # Convert to grayscale
    img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)

    # Calculate optical flow (i.e. track feature points)
    img1_pts, status, err = cv2.calcOpticalFlowPyrLK(img0_gray, img1_gray, img0_pts, None)

    # Sanity check
    assert img0_pts.shape == img1_pts.shape

    # Filter only valid points
    idx = np.where(status == 1)[0]
    img0_pts = img0_pts[idx]
    img1_pts = img1_pts[idx]

    # Find transformation matrix
    (m, inliers) = cv2.estimateAffinePartial2D(img0_pts, img1_pts)  # will only work with OpenCV-3 or less

    # Extract translation
    dx = m[0, 2]
    dy = m[1, 2]

    # Extract rotation angle
    da = np.arctan2(m[1, 0], m[0, 0])
    # print("Tracked points : " + str(len(img0_pts)))

    return [dx, dy, da], img0_pts