Step Linking: Difference between revisions
Jump to navigation
Jump to search
No edit summary |
No edit summary |
||
(14 intermediate revisions by the same user not shown) | |||
Line 1: | Line 1: | ||
Back to [[2014]]. | |||
This python script reads in the lists of particles for each image generated by the python image processing step and outputs a list of steps in "steps.txt" and also images of the particle associations, in pixel units, in each step between figures in "match*.png". In version 1.1, the match.png has color coding to show which steps are marked as outliers, to enable checking of whether they are misindentified steps or actual steps. Also the associations are labeled so you can match them to the steps.txt list. | |||
---- | |||
Version 1.11 (No longer reads in steps.txt together with data) | |||
<pre> | <pre> | ||
# -*- coding: utf-8 -*- | |||
import numpy as np | import numpy as np | ||
import pylab as plt | import pylab as plt | ||
from glob import glob | from glob import glob | ||
import os | |||
#Adjustable parameters: | #Adjustable parameters: | ||
Line 8: | Line 18: | ||
image_height=1200 | image_height=1200 | ||
min_pix = 40 #minimum number of pixels a particle must have | min_pix = 40 #minimum number of pixels a particle must have | ||
max_dist = | max_dist = 60 #particle association radius (pixel units) | ||
max_pix_diff = 10 #maximum difference in pixel count in particle association | max_pix_diff = 10 #maximum difference in pixel count in particle association | ||
max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) | max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) | ||
outlier_threshold = 40 #maximum dx or dy allowed for step consult xy_step_histogram.png | outlier_threshold = 35 #maximum dx or dy allowed for step consult ... | ||
# xy_step_histogram.png & variance calculation. ... | |||
# For ~100 steps a 3.5 sigma cut seems reasonable. | |||
#Read in the particle locations and pixel count for all images | |||
filelist = np.array(glob('[!steps]*.txt')) | |||
print filelist | |||
filtelist = filelist[inds] | |||
particle_lists=[] #list of lists for all images | |||
for filename in filelist: | |||
particle_lists.append(np.loadtxt(filename)) | |||
dx = [] | |||
dy = [] | |||
nparticles=0 | |||
for i in range(len(particle_lists)-1): | |||
matches = [] | |||
print "Processing steps between %s and %s." % (filelist[i], filelist[i+1]) | |||
for p1 in particle_lists[i]: | |||
displacements = [] | |||
diffs = [] | |||
p2s = [] | |||
if p1[2] < min_pix: #Skip if pixels in p1 are too few | |||
continue | |||
for p2 in particle_lists[i+1]: | |||
if p2[2] <= min_pix: #Skip if pixels in p2 are too few | |||
continue | |||
dist = np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2) | |||
if dist <= max_dist: # Are p1 and p2 associated? | |||
displacements.append([(p1[0]-p2[0]),(p1[1]-p2[1])]) | |||
diffs.append(abs(p1[2]-p2[2])) | |||
p2s.append(p2) | |||
#Veto p1 if it has no associations or if it has more than one | |||
if len(displacements)==0 or len(displacements) > 1: | |||
#print displacements | |||
continue | |||
if diffs[0] > max_pix_diff: | |||
continue | |||
#If the particle 2 has already been matched to another p1 veto both p1s | |||
double_match=False | |||
for k in range(len(matches)): | |||
if (p2s[0][0] == matches[k][0]) and (p2s[0][1] == matches[k][1]): | |||
matches.pop(k) | |||
double_match=True | |||
break | |||
if double_match: continue | |||
#Otherwise we've found a good association! Record the displacements. | |||
matches.append(p2s[0]) | |||
dy.append(displacements[0][0]) | |||
dx.append(displacements[0][1]) | |||
#Plot the association in a scatter plot "matches*.png" | |||
dist = np.sqrt(displacements[0][0]**2 + displacements[0][1]**2) | |||
if dist < outlier_threshold: | |||
nparticles+=1 | |||
plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') | |||
plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') | |||
plt.text(p1[1]+5, image_height- p1[0]+5, "%d" % nparticles) | |||
else: #Plot outliers but don't number them b/c we want to reference the output list from the figure | |||
plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') | |||
plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') | |||
plt.xlim([0,image_width]) | |||
plt.ylim([0,image_height]) | |||
if len(dx)>max_num_steps: #Stop after we have 100 | |||
break | |||
plt.savefig('matches_%s_%s.png' % (filelist[i].split('.')[0],filelist[i+1].split('.')[0])) | |||
plt.clf() | |||
if len(dx)>max_num_steps: #Stop after we have 100 | |||
break | |||
#Convert from list type to numpy array and calibrate | |||
dx = np.array(dx) | |||
dy = np.array(dy) | |||
print "Standard deviation in dx:", dx.std() | |||
print "Standard deviation in dy:", dy.std() | |||
#Plot histograms | |||
plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') | |||
plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') | |||
plt.xlabel('Distance (pixels)') | |||
plt.legend() | |||
plt.savefig('xy_step_histogram.png') | |||
plt.clf() | |||
#Remove outlier steps: require dx AND dy be less than outlier threshold | |||
inds = np.where((abs(dx)<outlier_threshold)*(abs(dy)<outlier_threshold)) | |||
dx = dx[inds] | |||
dy = dy[inds] | |||
print "%d steps remain after outlier cut." % len(dx) | |||
print "New standard deviation in dx:", dx.std() | |||
print "New standard deviation in dy:", dy.std() | |||
#Write out steps: | |||
np.savetxt('steps.txt', np.array([dx,dy]).transpose()) | |||
</pre> | |||
---- | |||
Version 1.1 (Some improvements and bug fixes pertaining to particular cases of user input.) | |||
<pre> | |||
# -*- coding: utf-8 -*- | |||
import numpy as np | |||
import pylab as plt | |||
from glob import glob | |||
#Adjustable parameters: | |||
image_width=1600 | |||
image_height=1200 | |||
min_pix = 40 #minimum number of pixels a particle must have | |||
max_dist = 60 #particle association radius (pixel units) | |||
max_pix_diff = 10 #maximum difference in pixel count in particle association | |||
max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) | |||
outlier_threshold = 35 #maximum dx or dy allowed for step consult ... | |||
# xy_step_histogram.png & variance calculation. ... | |||
# For ~100 steps a 3.5 sigma cut seems reasonable. | |||
#Read in the particle locations and pixel count for all images | |||
filelist = glob('*.txt') | |||
particle_lists=[] #list of lists for all images | |||
for filename in filelist: | |||
particle_lists.append(np.loadtxt(filename)) | |||
dx = [] | |||
dy = [] | |||
nparticles=0 | |||
for i in range(len(particle_lists)-1): | |||
matches = [] | |||
print "Processing steps between %s and %s." % (filelist[i], filelist[i+1]) | |||
for p1 in particle_lists[i]: | |||
displacements = [] | |||
diffs = [] | |||
p2s = [] | |||
if p1[2] < min_pix: #Skip if pixels in p1 are too few | |||
continue | |||
for p2 in particle_lists[i+1]: | |||
if p2[2] <= min_pix: #Skip if pixels in p2 are too few | |||
continue | |||
dist = np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2) | |||
if dist <= max_dist: # Are p1 and p2 associated? | |||
displacements.append([(p1[0]-p2[0]),(p1[1]-p2[1])]) | |||
diffs.append(abs(p1[2]-p2[2])) | |||
p2s.append(p2) | |||
#Veto p1 if it has no associations or if it has more than one | |||
if len(displacements)==0 or len(displacements) > 1: | |||
#print displacements | |||
continue | |||
if diffs[0] > max_pix_diff: | |||
continue | |||
#If the particle 2 has already been matched to another p1 veto both p1s | |||
double_match=False | |||
for k in range(len(matches)): | |||
if (p2s[0][0] == matches[k][0]) and (p2s[0][1] == matches[k][1]): | |||
matches.pop(k) | |||
double_match=True | |||
break | |||
if double_match: continue | |||
#Otherwise we've found a good association! Record the displacements. | |||
matches.append(p2s[0]) | |||
dy.append(displacements[0][0]) | |||
dx.append(displacements[0][1]) | |||
#Plot the association in a scatter plot "matches*.png" | |||
dist = np.sqrt(displacements[0][0]**2 + displacements[0][1]**2) | |||
if dist < outlier_threshold: | |||
nparticles+=1 | |||
plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') | |||
plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') | |||
plt.text(p1[1]+5, image_height- p1[0]+5, "%d" % nparticles) | |||
else: #Plot outliers but don't number them b/c we want to reference the output list from the figure | |||
plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') | |||
plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') | |||
plt.xlim([0,image_width]) | |||
plt.ylim([0,image_height]) | |||
if len(dx)>max_num_steps: #Stop after we have 100 | |||
break | |||
plt.savefig('matches_%s_%s.png' % (filelist[i].split('.')[0],filelist[i+1].split('.')[0])) | |||
plt.clf() | |||
if len(dx)>max_num_steps: #Stop after we have 100 | |||
break | |||
#Convert from list type to numpy array and calibrate | |||
dx = np.array(dx) | |||
dy = np.array(dy) | |||
print "Standard deviation in dx:", dx.std() | |||
print "Standard deviation in dy:", dy.std() | |||
#Plot histograms | |||
plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') | |||
plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') | |||
plt.xlabel('Distance (pixels)') | |||
plt.legend() | |||
plt.savefig('xy_step_histogram.png') | |||
plt.clf() | |||
#Remove outlier steps: require dx AND dy be less than outlier threshold | |||
inds = np.where((abs(dx)<outlier_threshold)*(abs(dy)<outlier_threshold)) | |||
dx = dx[inds] | |||
dy = dy[inds] | |||
print "%d steps remain after outlier cut." % len(dx) | |||
print "New standard deviation in dx:", dx.std() | |||
print "New standard deviation in dy:", dy.std() | |||
#Write out steps: | |||
np.savetxt('steps.txt', np.array([dx,dy]).transpose()) | |||
</pre> | |||
---- | |||
Version 1.0 (Original version) | |||
<pre> | |||
# -*- coding: utf-8 -*- | |||
import numpy as np | |||
import pylab as plt | |||
from glob import glob | |||
#Adjustable parameters: | |||
image_width=1600 | |||
image_height=1200 | |||
min_pix = 40 #minimum number of pixels a particle must have | |||
max_dist = 60 #particle association radius (pixel units) | |||
max_pix_diff = 10 #maximum difference in pixel count in particle association | |||
max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) | |||
outlier_threshold = 35 #maximum dx or dy allowed for step consult ... | |||
# xy_step_histogram.png & variance calculation. ... | |||
# For ~100 steps a 3.5 sigma cut seems reasonable. | |||
# | #Read in the particle locations and pixel count for all images | ||
filelist = glob('*.txt') | filelist = glob('*.txt') | ||
particle_lists=[] #list of lists for all images | particle_lists=[] #list of lists for all images | ||
Line 43: | Line 293: | ||
dy.append(displacements[0][0]) | dy.append(displacements[0][0]) | ||
dx.append(displacements[0][1]) | dx.append(displacements[0][1]) | ||
#Plot the association in a scatter plot "matches*.png" | |||
plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])]) | plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])]) | ||
plt.xlim([0,image_width]) | plt.xlim([0,image_width]) | ||
Line 64: | Line 315: | ||
plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') | plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') | ||
plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') | plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') | ||
plt.xlabel( | plt.xlabel('Distance (pixels)') | ||
plt.legend() | plt.legend() | ||
plt.savefig('xy_step_histogram.png') | plt.savefig('xy_step_histogram.png') |
Latest revision as of 02:11, 18 February 2014
Back to 2014.
This python script reads in the lists of particles for each image generated by the python image processing step and outputs a list of steps in "steps.txt" and also images of the particle associations, in pixel units, in each step between figures in "match*.png". In version 1.1, the match.png has color coding to show which steps are marked as outliers, to enable checking of whether they are misindentified steps or actual steps. Also the associations are labeled so you can match them to the steps.txt list.
Version 1.11 (No longer reads in steps.txt together with data)
# -*- coding: utf-8 -*- import numpy as np import pylab as plt from glob import glob import os #Adjustable parameters: image_width=1600 image_height=1200 min_pix = 40 #minimum number of pixels a particle must have max_dist = 60 #particle association radius (pixel units) max_pix_diff = 10 #maximum difference in pixel count in particle association max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) outlier_threshold = 35 #maximum dx or dy allowed for step consult ... # xy_step_histogram.png & variance calculation. ... # For ~100 steps a 3.5 sigma cut seems reasonable. #Read in the particle locations and pixel count for all images filelist = np.array(glob('[!steps]*.txt')) print filelist filtelist = filelist[inds] particle_lists=[] #list of lists for all images for filename in filelist: particle_lists.append(np.loadtxt(filename)) dx = [] dy = [] nparticles=0 for i in range(len(particle_lists)-1): matches = [] print "Processing steps between %s and %s." % (filelist[i], filelist[i+1]) for p1 in particle_lists[i]: displacements = [] diffs = [] p2s = [] if p1[2] < min_pix: #Skip if pixels in p1 are too few continue for p2 in particle_lists[i+1]: if p2[2] <= min_pix: #Skip if pixels in p2 are too few continue dist = np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2) if dist <= max_dist: # Are p1 and p2 associated? displacements.append([(p1[0]-p2[0]),(p1[1]-p2[1])]) diffs.append(abs(p1[2]-p2[2])) p2s.append(p2) #Veto p1 if it has no associations or if it has more than one if len(displacements)==0 or len(displacements) > 1: #print displacements continue if diffs[0] > max_pix_diff: continue #If the particle 2 has already been matched to another p1 veto both p1s double_match=False for k in range(len(matches)): if (p2s[0][0] == matches[k][0]) and (p2s[0][1] == matches[k][1]): matches.pop(k) double_match=True break if double_match: continue #Otherwise we've found a good association! Record the displacements. matches.append(p2s[0]) dy.append(displacements[0][0]) dx.append(displacements[0][1]) #Plot the association in a scatter plot "matches*.png" dist = np.sqrt(displacements[0][0]**2 + displacements[0][1]**2) if dist < outlier_threshold: nparticles+=1 plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') plt.text(p1[1]+5, image_height- p1[0]+5, "%d" % nparticles) else: #Plot outliers but don't number them b/c we want to reference the output list from the figure plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') plt.xlim([0,image_width]) plt.ylim([0,image_height]) if len(dx)>max_num_steps: #Stop after we have 100 break plt.savefig('matches_%s_%s.png' % (filelist[i].split('.')[0],filelist[i+1].split('.')[0])) plt.clf() if len(dx)>max_num_steps: #Stop after we have 100 break #Convert from list type to numpy array and calibrate dx = np.array(dx) dy = np.array(dy) print "Standard deviation in dx:", dx.std() print "Standard deviation in dy:", dy.std() #Plot histograms plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') plt.xlabel('Distance (pixels)') plt.legend() plt.savefig('xy_step_histogram.png') plt.clf() #Remove outlier steps: require dx AND dy be less than outlier threshold inds = np.where((abs(dx)<outlier_threshold)*(abs(dy)<outlier_threshold)) dx = dx[inds] dy = dy[inds] print "%d steps remain after outlier cut." % len(dx) print "New standard deviation in dx:", dx.std() print "New standard deviation in dy:", dy.std() #Write out steps: np.savetxt('steps.txt', np.array([dx,dy]).transpose())
Version 1.1 (Some improvements and bug fixes pertaining to particular cases of user input.)
# -*- coding: utf-8 -*- import numpy as np import pylab as plt from glob import glob #Adjustable parameters: image_width=1600 image_height=1200 min_pix = 40 #minimum number of pixels a particle must have max_dist = 60 #particle association radius (pixel units) max_pix_diff = 10 #maximum difference in pixel count in particle association max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) outlier_threshold = 35 #maximum dx or dy allowed for step consult ... # xy_step_histogram.png & variance calculation. ... # For ~100 steps a 3.5 sigma cut seems reasonable. #Read in the particle locations and pixel count for all images filelist = glob('*.txt') particle_lists=[] #list of lists for all images for filename in filelist: particle_lists.append(np.loadtxt(filename)) dx = [] dy = [] nparticles=0 for i in range(len(particle_lists)-1): matches = [] print "Processing steps between %s and %s." % (filelist[i], filelist[i+1]) for p1 in particle_lists[i]: displacements = [] diffs = [] p2s = [] if p1[2] < min_pix: #Skip if pixels in p1 are too few continue for p2 in particle_lists[i+1]: if p2[2] <= min_pix: #Skip if pixels in p2 are too few continue dist = np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2) if dist <= max_dist: # Are p1 and p2 associated? displacements.append([(p1[0]-p2[0]),(p1[1]-p2[1])]) diffs.append(abs(p1[2]-p2[2])) p2s.append(p2) #Veto p1 if it has no associations or if it has more than one if len(displacements)==0 or len(displacements) > 1: #print displacements continue if diffs[0] > max_pix_diff: continue #If the particle 2 has already been matched to another p1 veto both p1s double_match=False for k in range(len(matches)): if (p2s[0][0] == matches[k][0]) and (p2s[0][1] == matches[k][1]): matches.pop(k) double_match=True break if double_match: continue #Otherwise we've found a good association! Record the displacements. matches.append(p2s[0]) dy.append(displacements[0][0]) dx.append(displacements[0][1]) #Plot the association in a scatter plot "matches*.png" dist = np.sqrt(displacements[0][0]**2 + displacements[0][1]**2) if dist < outlier_threshold: nparticles+=1 plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='blue') plt.text(p1[1]+5, image_height- p1[0]+5, "%d" % nparticles) else: #Plot outliers but don't number them b/c we want to reference the output list from the figure plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') plt.plot([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])], color='red') plt.xlim([0,image_width]) plt.ylim([0,image_height]) if len(dx)>max_num_steps: #Stop after we have 100 break plt.savefig('matches_%s_%s.png' % (filelist[i].split('.')[0],filelist[i+1].split('.')[0])) plt.clf() if len(dx)>max_num_steps: #Stop after we have 100 break #Convert from list type to numpy array and calibrate dx = np.array(dx) dy = np.array(dy) print "Standard deviation in dx:", dx.std() print "Standard deviation in dy:", dy.std() #Plot histograms plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') plt.xlabel('Distance (pixels)') plt.legend() plt.savefig('xy_step_histogram.png') plt.clf() #Remove outlier steps: require dx AND dy be less than outlier threshold inds = np.where((abs(dx)<outlier_threshold)*(abs(dy)<outlier_threshold)) dx = dx[inds] dy = dy[inds] print "%d steps remain after outlier cut." % len(dx) print "New standard deviation in dx:", dx.std() print "New standard deviation in dy:", dy.std() #Write out steps: np.savetxt('steps.txt', np.array([dx,dy]).transpose())
Version 1.0 (Original version)
# -*- coding: utf-8 -*- import numpy as np import pylab as plt from glob import glob #Adjustable parameters: image_width=1600 image_height=1200 min_pix = 40 #minimum number of pixels a particle must have max_dist = 60 #particle association radius (pixel units) max_pix_diff = 10 #maximum difference in pixel count in particle association max_num_steps = 120 #maximum number of steps to find (b/f removing outliers) outlier_threshold = 35 #maximum dx or dy allowed for step consult ... # xy_step_histogram.png & variance calculation. ... # For ~100 steps a 3.5 sigma cut seems reasonable. #Read in the particle locations and pixel count for all images filelist = glob('*.txt') particle_lists=[] #list of lists for all images for filename in filelist: particle_lists.append(np.loadtxt(filename)) dx = [] dy = [] for i in range(len(particle_lists)-1): print "Processing steps between %s and %s." % (filelist[i], filelist[i+1]) for p1 in particle_lists[i]: displacements = [] if p1[2] < min_pix: #Skip if pixels in p1 are too few continue for p2 in particle_lists[i+1]: if p2[2] <= min_pix: #Skip if pixels in p2 are too few continue if abs(p1[2]-p2[2])>max_pix_diff: continue dist = np.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2) if dist <= max_dist: # Are p1 and p2 associated? displacements.append([(p1[0]-p2[0]),(p1[1]-p2[1])]) #Veto p1 if it has no associations or if it has more than one if len(displacements)==0 or len(displacements) > 1: #print displacements continue #Otherwise we've found a good association! Record the displacements. dy.append(displacements[0][0]) dx.append(displacements[0][1]) #Plot the association in a scatter plot "matches*.png" plt.scatter([p1[1], (p1[1]-dx[-1])], [image_height- p1[0],image_height-(p1[0]-dy[-1])]) plt.xlim([0,image_width]) plt.ylim([0,image_height]) if len(dx)>max_num_steps: #Stop after we have 100 break if len(dx)>max_num_steps: #Stop after we have 100 break plt.savefig('matches_%s_%s.png' % (filelist[i].split('.')[0],filelist[i+1].split('.')[0])) plt.clf() plt.clf() #Convert from list type to numpy array and calibrate dx = np.array(dx) dy = np.array(dy) print "Standard deviation in dx:", dx.std() print "Standard deviation in dy:", dy.std() #Plot histograms plt.hist(dx, bins=15, range=[-max_dist,max_dist], histtype='step', color='blue', label=r'$\Delta$x') plt.hist(dy, bins=15, range=[-max_dist,max_dist], histtype='step', color='red', label=r'$\Delta$y') plt.xlabel('Distance (pixels)') plt.legend() plt.savefig('xy_step_histogram.png') plt.clf() #Remove outlier steps: require dx AND dy be less than outlier threshold inds = np.where((abs(dx)<outlier_threshold)*(abs(dy)<outlier_threshold)) dx = dx[inds] dy = dy[inds] print "%d steps remain after outlier cut." % len(dx) #Write out steps: np.savetxt('steps.txt', np.array([dx,dy]).transpose())