Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_analyze3dmarkers(self):
t = np.arange(0,10,0.1)
translation = (np.c_[[1,1,0]]*t).T
M = np.empty((3,3))
M[0] = np.r_[0,0,0]
M[1]= np.r_[1,0,0]
M[2] = np.r_[1,1,0]
M -= np.mean(M, axis=0)
q = np.vstack((np.zeros_like(t), np.zeros_like(t),quat.deg2quat(100*t))).T
M0 = vector.rotate_vector(M[0], q) + translation
M1 = vector.rotate_vector(M[1], q) + translation
M2 = vector.rotate_vector(M[2], q) + translation
data = np.hstack((M0,M1,M2))
(pos, ori) = markers.analyze3Dmarkers(data, data[0])
self.assertAlmostEqual(np.max(np.abs(pos-translation)), 0)
self.assertAlmostEqual(np.max(np.abs(ori-q)), 0)
def test_rotate_vector(self):
x = [[1,0,0], [0, 1, 0], [0,0,1]]
result = vector.rotate_vector(x, [0, 0, sin(0.1)])
correct = array([[ 0.98006658, 0.19866933, 0. ],
[-0.19866933, 0.98006658, 0. ],
[ 0. , 0. , 1. ]])
error = norm(result - correct)
self.assertTrue(error < self.delta)
break
gl.glClear(gl.GL_COLOR_BUFFER_BIT|gl.GL_DEPTH_BUFFER_BIT)
gl.glEnable(gl.GL_DEPTH_TEST)
# Camera position
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
glu.gluLookAt(
self.cam_pos[0], self.cam_pos[1], self.cam_pos[2],
self.cam_target[0], self.cam_target[1], self.cam_target[2],
self.cam_up[0], self.cam_up[1], self.cam_up[2] )
# Scene elements
gl.glPushMatrix()
cur_corners = vector.rotate_vector(self.vertices, self.quat[counter]) @ self.openGL2skin.T
#cur_corners = cur_corners * np.r_[1, 1, -1] # This seems to be required
##to get things right - but I don't understand OpenGL at this point
self.draw_pointer(cur_corners)
gl.glPopMatrix()
self.draw_axes()
pygame.display.flip()
pygame.time.wait(dt)
q0 = vector.q_shortest_rotation(accMeasured[0], g0)
q_initial = rotmat.convert(R_initialOrientation, to='quat')
# combine the two, to form a reference orientation. Note that the sequence
# is very important!
q_ref = quat.q_mult(q_initial, q0)
# Calculate orientation q by "integrating" omega -----------------
q = quat.calc_quat(omega, q_ref, rate, 'bf')
# Acceleration, velocity, and position ----------------------------
# From q and the measured acceleration, get the \frac{d^2x}{dt^2}
g_v = np.r_[0, 0, g]
accReSensor = accMeasured - vector.rotate_vector(g_v, quat.q_inv(q))
accReSpace = vector.rotate_vector(accReSensor, q)
# Make the first position the reference position
q = quat.q_mult(q, quat.q_inv(q[0]))
# compensate for drift
#drift = np.mean(accReSpace, 0)
#accReSpace -= drift*0.7
# Position and Velocity through integration, assuming 0-velocity at t=0
vel = np.nan*np.ones_like(accReSpace)
pos = np.nan*np.ones_like(accReSpace)
for ii in range(accReSpace.shape[1]):
vel[:,ii] = cumtrapz(accReSpace[:,ii], dx=1./rate, initial=0)
pos[:,ii] = cumtrapz(vel[:,ii], dx=1./rate, initial=initialPosition[ii])
def calc_position(self):
'''Calculate the position, assuming that the orientation is already known.'''
initialPosition = self.pos_init
# Acceleration, velocity, and position ----------------------------
# From q and the measured acceleration, get the \frac{d^2x}{dt^2}
g = constants.g
g_v = np.r_[0, 0, g]
accReSensor = self.acc - vector.rotate_vector(g_v, quat.q_inv(self.quat))
accReSpace = vector.rotate_vector(accReSensor, self.quat)
# Position and Velocity through integration, assuming 0-velocity at t=0
vel = np.nan*np.ones_like(accReSpace)
pos = np.nan*np.ones_like(accReSpace)
for ii in range(accReSpace.shape[1]):
vel[:,ii] = cumtrapz(accReSpace[:,ii], dx=1./np.float(self.rate), initial=0)
pos[:,ii] = cumtrapz(vel[:,ii], dx=1./np.float(self.rate), initial=initialPosition[ii])
self.vel = vel
self.pos = pos
>>> M[0] = np.r_[0,0,0]
>>> M[1]= np.r_[1,0,0]
>>> M[2] = np.r_[1,1,0]
>>> M -= np.mean(M, axis=0)
>>> q = np.vstack((np.zeros_like(t), np.zeros_like(t),quat.deg2quat(100*t))).T
>>> M0 = vector.rotate_vector(M[0], q) + translation
>>> M1 = vector.rotate_vector(M[1], q) + translation
>>> M2 = vector.rotate_vector(M[2], q) + translation
>>> data = np.hstack((M0,M1,M2))
>>> (pos, ori) = signals.analyze_3Dmarkers(data, data[0])
>>> r0 = np.r_[1,2,3]
>>> movement = find_trajectory(r0, pos, ori)
'''
return Position + vector.rotate_vector(r0, Orientation)
corners = [[0, 0, 0.6],
[0.2, -0.2, 0],
[0, 0, 0]]
colors = ['r', 'b']
# Calculate the arrow corners
corner_array = np.column_stack(corners)
corner_arrays = []
corner_arrays.append( corner_array + np.r_[0., 0., delta] )
corner_arrays.append( corner_array - np.r_[0., 0., delta] )
# Calculate the new orientations, given the quaternion orientation
all_corners = []
for quat in quats:
all_corners.append([vector.rotate_vector(corner_arrays[0], quat),
vector.rotate_vector(corner_arrays[1], quat)])
# Animate the whole thing, using 'update_func'
num_frames = len(quats)
ani = animation.FuncAnimation(fig, _update_func, num_frames,
fargs=[all_corners, colors, ax, title_text],
interval=deltaT)
# If requested, save the animation to a file
if out_file is not None:
try:
ani.save(out_file)
print('Animation saved to {0}'.format(out_file))
except ValueError:
print('Sorry, no animation saved!')
print('You probably have to install "ffmpeg", and add it to your PATH.')