The learning seems to get butchered for any duration other than 1.
if __name__ == '__main__':
import pylab as plt
from plot_tools import plot_pos_vel_acc_trajectory
# only Transformation system (f=0)
dmp = DiscreteDMP()
end_time = 1 # Definitely don't change this.
frequency = 1000 # Changing this also seems to break things.
trajectory_time_points = np.linspace(0, end_time, end_time * frequency)
print "Time begins with %d and ends with %d" %(trajectory_time_points[0], trajectory_time_points[-1])
trajectory_y_values = [np.sin(10*t) for t in trajectory_time_points]
dmp.setup(trajectory_y_values[0], trajectory_y_values[-1], end_time)
dmp.learn_batch(trajectory_y_values, frequency)
traj = []
for x in range(end_time * frequency):
#if x == 500:
# dmp.goal = 4.0
dmp.run_step()
traj.append([dmp.x, dmp.xd, dmp.xdd])
fig = plt.figure('f=0 (transformation system only)', figsize=(10, 3))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
plot_pos_vel_acc_trajectory((ax1, ax2, ax3), traj, dmp.delta_t, label='DMP $f=0$', linewidth=1)
fig.tight_layout()
plt.show()
Produces a nice sin trajectory, but changing " end_time = 1" to "end_time = 2" produces this:
.
The learning seems to get butchered for any duration other than 1.
For example:
Produces a nice sin trajectory, but changing " end_time = 1" to "end_time = 2" produces this:
.