diff --git a/built_in_tasks/target_capture_task.py b/built_in_tasks/target_capture_task.py index 8e1e0bed..c09a3c61 100644 --- a/built_in_tasks/target_capture_task.py +++ b/built_in_tasks/target_capture_task.py @@ -50,6 +50,7 @@ class TargetCapture(Sequence): tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial. sequence_generators = [] + cursor_traj = [] reward_time = traits.Float(.5, desc="Length of reward dispensation") hold_time = traits.Float(.2, desc="Length of hold required at targets before next target appears") @@ -344,6 +345,9 @@ def _cycle(self): # Update the trial index self.task_data['trial'] = self.calc_trial_num() + # Update cursor trajectory + self.cursor_traj.append(self.plant.get_endpoint_pos().copy()) + super()._cycle() def move_effector(self): @@ -410,6 +414,7 @@ def _start_target(self): target.show() self.sync_event('TARGET_ON', self.gen_indices[self.target_index]) self.target_location = self.targs[self.target_index] # save for BMILoop + self.cursor_traj.clear() def _start_hold(self): super()._start_hold() diff --git a/built_in_tasks/target_graphics.py b/built_in_tasks/target_graphics.py index 4f6a4a3f..608b1d42 100644 --- a/built_in_tasks/target_graphics.py +++ b/built_in_tasks/target_graphics.py @@ -261,7 +261,7 @@ def __init__(self, text, color, height=1, starting_pos=np.zeros(3)): self._pickle_init() def _pickle_init(self): - self.model = Text(self.size, self.text, color=self.color, justify='right', + self.model = Text(self.size, self.text, color=self.color, justify='left', font_size=28) self.graphics_models = [self.model] self.model.translate(*self.position) diff --git a/features/reward_features.py b/features/reward_features.py index dd53bd81..838f6e67 100644 --- a/features/reward_features.py +++ b/features/reward_features.py @@ -10,6 +10,7 @@ from built_in_tasks.target_graphics import TextTarget, VirtualRectangularTarget import numpy as np import serial, glob +import aopy ###### CONSTANTS sec_per_min = 60 @@ -294,14 +295,14 @@ def _start_tracking_out(self): class ScoreRewards(traits.HasTraits): ''' - Add a "score" to the task that awards points based on target acquisition speed. + Add a "score" to the task that awards points based on movement error in 2D. The score is displayed after each reward and on the web GUI. The running score also gets saved as a value in the task data called 'reward_score'. Note: Only works with target acquisition tasks. ''' - score_display_location = traits.Tuple((10, 0, 10), desc="Location to display the score (in cm)") + score_display_location = traits.Tuple((0, 0, 0), desc="Location to display the score (in cm)") score_display_height = traits.Float(1, desc="Height of the score display (in cm)") score_display_color = traits.Tuple((1, 1, 1, 1), desc="Color of the score display") score_timed_state = traits.String("target", desc="State to display the score after") @@ -315,19 +316,20 @@ def init(self): super().init() self.task_data['reward_score'] = 0 + def _start_reward(self): if hasattr(super(), '_start_reward'): super()._start_reward() - timed_state = None - idx = -1 - while timed_state is None and -idx-1 < len(self.state_log): - if self.state_log[idx][0] == self.score_timed_state: - timed_state = self.state_log[-1][1] - self.state_log[idx][1] - idx -= 1 - if timed_state is None or timed_state == 0.: - score = 0. - else: - score = 10*int(10./timed_state) + + move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,[0,2]], self.target_location[[0,2]]) + avg_error = np.mean(abs(move_error)) + score = int(100 - min(100, avg_error*20)) + + # import matplotlib.pyplot as plt + # plt.figure() + # plt.plot(move_error) + # plt.show() + self.reportstats['Score'] += score self.score_display = TextTarget(str(score), height=self.score_display_height, color=self.score_display_color) @@ -340,6 +342,30 @@ def _end_reward(self): super()._end_reward() self.remove_model(self.score_display.model) +class TimedScoreRewards(ScoreRewards): + ''' + Add a "score" to the task that awards points based on target acquisition speed. + ''' + def _start_reward(self): + if hasattr(super(), '_start_reward'): + super()._start_reward() + timed_state = None + idx = -1 + while timed_state is None and -idx-1 < len(self.state_log): + if self.state_log[idx][0] == self.score_timed_state: + timed_state = self.state_log[-1][1] - self.state_log[idx][1] + idx -= 1 + if timed_state is None or timed_state == 0.: + score = 0. + else: + score = 10*int(10./timed_state) + self.reportstats['Score'] += score + self.score_display = TextTarget(str(score), height=self.score_display_height, + color=self.score_display_color) + self.score_display.move_to_position(self.score_display_location) + self.add_model(self.score_display.model) + self.task_data['reward_score'] += score + """"" BELOW THIS IS ALL THE OLD CODE ASSOCIATED WITH REWARD FEATURES""" diff --git a/riglib/pupillabs/__init__.py b/riglib/pupillabs/__init__.py new file mode 100644 index 00000000..1af57c44 --- /dev/null +++ b/riglib/pupillabs/__init__.py @@ -0,0 +1,133 @@ +import time +import traceback +import numpy as np +import zmq +from msgpack import loads +import msgpack +import time +from surface_no_delay import NoDelaySurfaceGazeMapper, RadialDistortionCamera +from pupillab_timesync import measure_clock_offset + +from riglib.source import DataSourceSystem + +class System(DataSourceSystem): + ''' + + ''' + dtype = np.dtype((float, (6,))) + update_freq = 200 + + def __init__(self, ip="128.95.215.191", port="50020"): + ''' + For eye tracking, need Pupil Capture running in the background (after calibration in Pupil Capture) + ''' + # define a surface AOI + + # open a req port to talk to pupil + self.ip = ip # remote ip or localhost + self.port = port # same as in the pupil remote gui + + # # matrix for camera distortion + camera = RadialDistortionCamera( + resolution=(1280, 720), + cam_matrix=[ + [794.3311439869655, 0.0, 633.0104437728625], + [0.0, 793.5290139393004, 397.36927353414865], + [0.0, 0.0, 1.0], + ], + dist_coefs=[ + [ + -0.3758628065070806, + 0.1643326166951343, + 0.00012182540692089567, + 0.00013422608638039466, + 0.03343691733865076, + 0.08235235770849726, + -0.08225804883227375, + 0.14463365333602152, + ] + ], + ) + + self.mapper = NoDelaySurfaceGazeMapper(camera) + self.mapped_points = [] + + def start(self): + ''' + + ''' + self.ctx = zmq.Context() + self.pupil_remote = self.ctx.socket(zmq.REQ) + # The REQ talks to Pupil remote and receives the session unique IPC SUB PORT + self.pupil_remote.connect(f'tcp://{self.ip}:{self.port}') #connect to eye tracker + + # rec_name = 'test' + # req.send_string(f'R {rec_name}') # start recording + # print('pupillab starting recording in Pupil Capture') + + # # sync pupil internal clock with the local time + # local_clock = time.time # Unix time, less accurate + + # # Measure clock offset once + # self.offset = measure_clock_offset(pupil_remote, clock_function=local_clock) + # print(f"\n Pupillab Clock offset (1 measurement): {self.offset} seconds") + + self.pupil_remote.send_string('SUB_PORT') # Request 'SUB_PORT' for reading data + sub_port = self.pupil_remote.recv_string() + + # open sub ports to listen to pupil; sub: subport that receives surface data + self.sub = self.ctx.socket(zmq.SUB) # open a sub port to listen to pupil surface topic + self.sub.connect(f'tcp://{self.ip}:{sub_port}') + # self.sub.setsockopt_string(zmq.SUBSCRIBE, f"surfaces.{surface_name}") + self.sub.subscribe(f"surfaces") # receive all surface messages + self.sub.subscribe("gaze") # receive all gaze messages + self.sub.subscribe('pupil.0.2d') # receive all pupil0 messages, right eye + self.sub.subscribe('pupil.1.2d') # receive all pupil1 messages, left eye + + def stop(self): + self.sub.close() + # req.send_string('r') # stop recording + # print('pupillab stopped recording in Pupil Capture') + self.pupil_remote.close() + self.ctx.term() + + def get(self): + """ + read in a batch of eye data and retun x, y on surface & pupil diameters for each eye + """ + raw = (np.nan, np.nan) + confidence = np.nan + diameter0, diameter1 = (np.nan, np.nan) + timestamp = np.nan + coords = [raw[0], raw[1], confidence, timestamp, diameter0, diameter1] + + while np.count_nonzero(np.isnan(coords)) > 0: + if not self.sub.poll(0) == zmq.POLLIN: + continue + + topic, payload = self.sub.recv_multipart(flags=zmq.NOBLOCK) + message = msgpack.loads(payload, raw=False) + + if message["topic"].startswith(b"surfaces"): # get the surface datum when gaze in on the surface + self.mapper.update_homography(message["img_to_surf_trans"]) + + elif message["topic"].startswith("gaze.3d.01"): + mapped_gaze = self.mapper.gaze_to_surface(message["norm_pos"]) + raw[0] = float(mapped_gaze.norm_x) + raw[1] = float(mapped_gaze.norm_y) + timestamp = message["timestamp"] + confidence = message["confidence"] + + elif topic.startswith(b"pupil.0.2d"): + diameter0 = float(message["diameter"]) # pupil 0 diamter, right eye, unit: pixel + elif topic.startswith(b"pupil.1.2d"): + diameter1 = float(message["diameter"]) # pupil 1 diamter, left eye, unit: pixel + + coords = [raw[0], raw[1], confidence, timestamp, diameter0, diameter1] + time.sleep(0.001) # sleep for 1 ms to avoid busy waiting + + coords = np.expand_dims(coords, axis=0) + return coords + + +