From 0e9f1bdc061ea88f2c4c3f8af3962658bd1d4d42 Mon Sep 17 00:00:00 2001 From: Hayden Rundle Date: Thu, 14 Nov 2024 14:13:49 -0800 Subject: [PATCH 1/5] First attempt at movement score (not tested) --- built_in_tasks/target_capture_task.py | 5 +++++ features/reward_features.py | 19 ++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/built_in_tasks/target_capture_task.py b/built_in_tasks/target_capture_task.py index 8e1e0bed..c09a3c61 100644 --- a/built_in_tasks/target_capture_task.py +++ b/built_in_tasks/target_capture_task.py @@ -50,6 +50,7 @@ class TargetCapture(Sequence): tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial. sequence_generators = [] + cursor_traj = [] reward_time = traits.Float(.5, desc="Length of reward dispensation") hold_time = traits.Float(.2, desc="Length of hold required at targets before next target appears") @@ -344,6 +345,9 @@ def _cycle(self): # Update the trial index self.task_data['trial'] = self.calc_trial_num() + # Update cursor trajectory + self.cursor_traj.append(self.plant.get_endpoint_pos().copy()) + super()._cycle() def move_effector(self): @@ -410,6 +414,7 @@ def _start_target(self): target.show() self.sync_event('TARGET_ON', self.gen_indices[self.target_index]) self.target_location = self.targs[self.target_index] # save for BMILoop + self.cursor_traj.clear() def _start_hold(self): super()._start_hold() diff --git a/features/reward_features.py b/features/reward_features.py index dd53bd81..966a9ca3 100644 --- a/features/reward_features.py +++ b/features/reward_features.py @@ -10,6 +10,7 @@ from built_in_tasks.target_graphics import TextTarget, VirtualRectangularTarget import numpy as np import serial, glob +import aopy ###### CONSTANTS sec_per_min = 60 @@ -315,7 +316,8 @@ def init(self): super().init() self.task_data['reward_score'] = 0 - def _start_reward(self): +''' + def _start_reward(self): if hasattr(super(), '_start_reward'): super()._start_reward() timed_state = None @@ -334,12 +336,27 @@ def _start_reward(self): self.score_display.move_to_position(self.score_display_location) self.add_model(self.score_display.model) self.task_data['reward_score'] += score +''' + def _start_reward(self): + if hasattr(super(), '_start_reward'): + super()._start_reward() + move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,:2], self.targs[self.target_index][:2]) + avg_error = np.mean(move_error) + score = int(100.0 - avg_error) + + self.reportstats['Score'] += score + self.score_display = TextTarget(str(score), height=self.score_display_height, + color=self.score_display_color) + self.score_display.move_to_position(self.score_display_location) + self.add_model(self.score_display.model) + self.task_data['reward_score'] += score def _end_reward(self): if hasattr(super(), '_end_reward'): super()._end_reward() self.remove_model(self.score_display.model) + """"" BELOW THIS IS ALL THE OLD CODE ASSOCIATED WITH REWARD FEATURES""" From de9d360615c8463282ce63011d9e1004bea59502 Mon Sep 17 00:00:00 2001 From: Hayden Rundle Date: Thu, 14 Nov 2024 14:14:13 -0800 Subject: [PATCH 2/5] whitespace issue --- features/reward_features.py | 44 ++++++------ riglib/pupillabs/__init__.py | 133 +++++++++++++++++++++++++++++++++++ 2 files changed, 155 insertions(+), 22 deletions(-) create mode 100644 riglib/pupillabs/__init__.py diff --git a/features/reward_features.py b/features/reward_features.py index 966a9ca3..395227aa 100644 --- a/features/reward_features.py +++ b/features/reward_features.py @@ -316,28 +316,28 @@ def init(self): super().init() self.task_data['reward_score'] = 0 -''' - def _start_reward(self): - if hasattr(super(), '_start_reward'): - super()._start_reward() - timed_state = None - idx = -1 - while timed_state is None and -idx-1 < len(self.state_log): - if self.state_log[idx][0] == self.score_timed_state: - timed_state = self.state_log[-1][1] - self.state_log[idx][1] - idx -= 1 - if timed_state is None or timed_state == 0.: - score = 0. - else: - score = 10*int(10./timed_state) - self.reportstats['Score'] += score - self.score_display = TextTarget(str(score), height=self.score_display_height, - color=self.score_display_color) - self.score_display.move_to_position(self.score_display_location) - self.add_model(self.score_display.model) - self.task_data['reward_score'] += score -''' - def _start_reward(self): + +# def _start_reward(self): +# if hasattr(super(), '_start_reward'): +# super()._start_reward() +# timed_state = None +# idx = -1 +# while timed_state is None and -idx-1 < len(self.state_log): +# if self.state_log[idx][0] == self.score_timed_state: +# timed_state = self.state_log[-1][1] - self.state_log[idx][1] +# idx -= 1 +# if timed_state is None or timed_state == 0.: +# score = 0. +# else: +# score = 10*int(10./timed_state) +# self.reportstats['Score'] += score +# self.score_display = TextTarget(str(score), height=self.score_display_height, +# color=self.score_display_color) +# self.score_display.move_to_position(self.score_display_location) +# self.add_model(self.score_display.model) +# self.task_data['reward_score'] += score + + def _start_reward(self): if hasattr(super(), '_start_reward'): super()._start_reward() move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,:2], self.targs[self.target_index][:2]) diff --git a/riglib/pupillabs/__init__.py b/riglib/pupillabs/__init__.py new file mode 100644 index 00000000..1af57c44 --- /dev/null +++ b/riglib/pupillabs/__init__.py @@ -0,0 +1,133 @@ +import time +import traceback +import numpy as np +import zmq +from msgpack import loads +import msgpack +import time +from surface_no_delay import NoDelaySurfaceGazeMapper, RadialDistortionCamera +from pupillab_timesync import measure_clock_offset + +from riglib.source import DataSourceSystem + +class System(DataSourceSystem): + ''' + + ''' + dtype = np.dtype((float, (6,))) + update_freq = 200 + + def __init__(self, ip="128.95.215.191", port="50020"): + ''' + For eye tracking, need Pupil Capture running in the background (after calibration in Pupil Capture) + ''' + # define a surface AOI + + # open a req port to talk to pupil + self.ip = ip # remote ip or localhost + self.port = port # same as in the pupil remote gui + + # # matrix for camera distortion + camera = RadialDistortionCamera( + resolution=(1280, 720), + cam_matrix=[ + [794.3311439869655, 0.0, 633.0104437728625], + [0.0, 793.5290139393004, 397.36927353414865], + [0.0, 0.0, 1.0], + ], + dist_coefs=[ + [ + -0.3758628065070806, + 0.1643326166951343, + 0.00012182540692089567, + 0.00013422608638039466, + 0.03343691733865076, + 0.08235235770849726, + -0.08225804883227375, + 0.14463365333602152, + ] + ], + ) + + self.mapper = NoDelaySurfaceGazeMapper(camera) + self.mapped_points = [] + + def start(self): + ''' + + ''' + self.ctx = zmq.Context() + self.pupil_remote = self.ctx.socket(zmq.REQ) + # The REQ talks to Pupil remote and receives the session unique IPC SUB PORT + self.pupil_remote.connect(f'tcp://{self.ip}:{self.port}') #connect to eye tracker + + # rec_name = 'test' + # req.send_string(f'R {rec_name}') # start recording + # print('pupillab starting recording in Pupil Capture') + + # # sync pupil internal clock with the local time + # local_clock = time.time # Unix time, less accurate + + # # Measure clock offset once + # self.offset = measure_clock_offset(pupil_remote, clock_function=local_clock) + # print(f"\n Pupillab Clock offset (1 measurement): {self.offset} seconds") + + self.pupil_remote.send_string('SUB_PORT') # Request 'SUB_PORT' for reading data + sub_port = self.pupil_remote.recv_string() + + # open sub ports to listen to pupil; sub: subport that receives surface data + self.sub = self.ctx.socket(zmq.SUB) # open a sub port to listen to pupil surface topic + self.sub.connect(f'tcp://{self.ip}:{sub_port}') + # self.sub.setsockopt_string(zmq.SUBSCRIBE, f"surfaces.{surface_name}") + self.sub.subscribe(f"surfaces") # receive all surface messages + self.sub.subscribe("gaze") # receive all gaze messages + self.sub.subscribe('pupil.0.2d') # receive all pupil0 messages, right eye + self.sub.subscribe('pupil.1.2d') # receive all pupil1 messages, left eye + + def stop(self): + self.sub.close() + # req.send_string('r') # stop recording + # print('pupillab stopped recording in Pupil Capture') + self.pupil_remote.close() + self.ctx.term() + + def get(self): + """ + read in a batch of eye data and retun x, y on surface & pupil diameters for each eye + """ + raw = (np.nan, np.nan) + confidence = np.nan + diameter0, diameter1 = (np.nan, np.nan) + timestamp = np.nan + coords = [raw[0], raw[1], confidence, timestamp, diameter0, diameter1] + + while np.count_nonzero(np.isnan(coords)) > 0: + if not self.sub.poll(0) == zmq.POLLIN: + continue + + topic, payload = self.sub.recv_multipart(flags=zmq.NOBLOCK) + message = msgpack.loads(payload, raw=False) + + if message["topic"].startswith(b"surfaces"): # get the surface datum when gaze in on the surface + self.mapper.update_homography(message["img_to_surf_trans"]) + + elif message["topic"].startswith("gaze.3d.01"): + mapped_gaze = self.mapper.gaze_to_surface(message["norm_pos"]) + raw[0] = float(mapped_gaze.norm_x) + raw[1] = float(mapped_gaze.norm_y) + timestamp = message["timestamp"] + confidence = message["confidence"] + + elif topic.startswith(b"pupil.0.2d"): + diameter0 = float(message["diameter"]) # pupil 0 diamter, right eye, unit: pixel + elif topic.startswith(b"pupil.1.2d"): + diameter1 = float(message["diameter"]) # pupil 1 diamter, left eye, unit: pixel + + coords = [raw[0], raw[1], confidence, timestamp, diameter0, diameter1] + time.sleep(0.001) # sleep for 1 ms to avoid busy waiting + + coords = np.expand_dims(coords, axis=0) + return coords + + + From 3286876518262065a13184aec90551551ad41d59 Mon Sep 17 00:00:00 2001 From: Hayden Rundle Date: Wed, 20 Nov 2024 11:43:14 -0800 Subject: [PATCH 3/5] Fixed error with target selection --- features/reward_features.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/features/reward_features.py b/features/reward_features.py index 395227aa..77e3fa10 100644 --- a/features/reward_features.py +++ b/features/reward_features.py @@ -340,7 +340,7 @@ def init(self): def _start_reward(self): if hasattr(super(), '_start_reward'): super()._start_reward() - move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,:2], self.targs[self.target_index][:2]) + move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,:2], self.target_location[[0,2]]) avg_error = np.mean(move_error) score = int(100.0 - avg_error) From 90891588ce09cede703ee1d2efcf4c75570ee70b Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 16 Dec 2024 14:03:17 -0800 Subject: [PATCH 4/5] working --- built_in_tasks/target_graphics.py | 2 +- features/reward_features.py | 16 +++++++++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/built_in_tasks/target_graphics.py b/built_in_tasks/target_graphics.py index 4f6a4a3f..608b1d42 100644 --- a/built_in_tasks/target_graphics.py +++ b/built_in_tasks/target_graphics.py @@ -261,7 +261,7 @@ def __init__(self, text, color, height=1, starting_pos=np.zeros(3)): self._pickle_init() def _pickle_init(self): - self.model = Text(self.size, self.text, color=self.color, justify='right', + self.model = Text(self.size, self.text, color=self.color, justify='left', font_size=28) self.graphics_models = [self.model] self.model.translate(*self.position) diff --git a/features/reward_features.py b/features/reward_features.py index 77e3fa10..c99bb370 100644 --- a/features/reward_features.py +++ b/features/reward_features.py @@ -302,7 +302,7 @@ class ScoreRewards(traits.HasTraits): Note: Only works with target acquisition tasks. ''' - score_display_location = traits.Tuple((10, 0, 10), desc="Location to display the score (in cm)") + score_display_location = traits.Tuple((0, 0, 0), desc="Location to display the score (in cm)") score_display_height = traits.Float(1, desc="Height of the score display (in cm)") score_display_color = traits.Tuple((1, 1, 1, 1), desc="Color of the score display") score_timed_state = traits.String("target", desc="State to display the score after") @@ -340,10 +340,16 @@ def init(self): def _start_reward(self): if hasattr(super(), '_start_reward'): super()._start_reward() - move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,:2], self.target_location[[0,2]]) - avg_error = np.mean(move_error) - score = int(100.0 - avg_error) - + + move_error = aopy.analysis.behavior.compute_movement_error(np.array(self.cursor_traj)[:,[0,2]], self.target_location[[0,2]]) + avg_error = np.mean(abs(move_error)) + score = int(100 - min(100, avg_error*20)) + + # import matplotlib.pyplot as plt + # plt.figure() + # plt.plot(move_error) + # plt.show() + self.reportstats['Score'] += score self.score_display = TextTarget(str(score), height=self.score_display_height, color=self.score_display_color) From 173b1ee5d4944a039e5900b8dec07d778927accf Mon Sep 17 00:00:00 2001 From: Leo Date: Mon, 16 Dec 2024 14:05:04 -0800 Subject: [PATCH 5/5] comments --- features/reward_features.py | 45 ++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/features/reward_features.py b/features/reward_features.py index c99bb370..838f6e67 100644 --- a/features/reward_features.py +++ b/features/reward_features.py @@ -295,7 +295,7 @@ def _start_tracking_out(self): class ScoreRewards(traits.HasTraits): ''' - Add a "score" to the task that awards points based on target acquisition speed. + Add a "score" to the task that awards points based on movement error in 2D. The score is displayed after each reward and on the web GUI. The running score also gets saved as a value in the task data called 'reward_score'. @@ -317,26 +317,6 @@ def init(self): self.task_data['reward_score'] = 0 -# def _start_reward(self): -# if hasattr(super(), '_start_reward'): -# super()._start_reward() -# timed_state = None -# idx = -1 -# while timed_state is None and -idx-1 < len(self.state_log): -# if self.state_log[idx][0] == self.score_timed_state: -# timed_state = self.state_log[-1][1] - self.state_log[idx][1] -# idx -= 1 -# if timed_state is None or timed_state == 0.: -# score = 0. -# else: -# score = 10*int(10./timed_state) -# self.reportstats['Score'] += score -# self.score_display = TextTarget(str(score), height=self.score_display_height, -# color=self.score_display_color) -# self.score_display.move_to_position(self.score_display_location) -# self.add_model(self.score_display.model) -# self.task_data['reward_score'] += score - def _start_reward(self): if hasattr(super(), '_start_reward'): super()._start_reward() @@ -362,6 +342,29 @@ def _end_reward(self): super()._end_reward() self.remove_model(self.score_display.model) +class TimedScoreRewards(ScoreRewards): + ''' + Add a "score" to the task that awards points based on target acquisition speed. + ''' + def _start_reward(self): + if hasattr(super(), '_start_reward'): + super()._start_reward() + timed_state = None + idx = -1 + while timed_state is None and -idx-1 < len(self.state_log): + if self.state_log[idx][0] == self.score_timed_state: + timed_state = self.state_log[-1][1] - self.state_log[idx][1] + idx -= 1 + if timed_state is None or timed_state == 0.: + score = 0. + else: + score = 10*int(10./timed_state) + self.reportstats['Score'] += score + self.score_display = TextTarget(str(score), height=self.score_display_height, + color=self.score_display_color) + self.score_display.move_to_position(self.score_display_location) + self.add_model(self.score_display.model) + self.task_data['reward_score'] += score """"" BELOW THIS IS ALL THE OLD CODE ASSOCIATED WITH REWARD FEATURES"""