diff --git a/+Base/QRconv.m b/+Base/QRconv.m new file mode 100644 index 000000000..49008c42a --- /dev/null +++ b/+Base/QRconv.m @@ -0,0 +1,379 @@ +function [v, V, options_fit, stages] = QRconv(img, options_guess, QR_parameters) + % QRconv finds QRs in an image via a convoluation algorithm. + % img is a NxM image. + % options_guess is a struct of the same format as options_fit: struct('ang', [deg], 'calibration', [pix/um], 'X_expected', [QR-X], 'Y_expected', [QR-Y]); + % v and V are arrays of column vectors (2xN) of the same size: + % - v are positions of candidate QR codes in the coordinate system of the image img, while + % - V are the corresponding positons in QR space. + % If a candidate does not pass the checksum, [NaN; NaN] is returned for V. + % Also returns transform from QR-space V to position-space v according to + % v = M * V + b + + % Default QR parameters. + if nargin < 3 + QR_parameters = struct( 'r', .3, ... % Circle radius [um], + 'l', 6.25, ... % Arm length [um], + 'd', 40); % QR spacing [um]. + end + + % Step 0: prepare some helper variables + ang0 = options_guess.ang; + r = QR_parameters.r / options_guess.calibration; + l = QR_parameters.l / options_guess.calibration; + options_guess.d = QR_parameters.d; + + if isfield(options_guess, 'X_expected') && isfield(options_guess, 'Y_expected') + V_expected = [options_guess.X_expected; options_guess.Y_expected]; + else + V_expected = [NaN; NaN]; + end + + % Step 1: remove global gradients in the image (see flatten for more details) + flat = flatten(img); + + % Step 2: perform a convolutional filter to identify QR candidates + [conv, convH, convV] = doConv(flat, ang0, r, l); + + % Step 3: generate a logical image to determine the bit-code of the QR. + bw = threshold(flat); + + stages = struct('flat', flat, 'conv', conv, 'convH', convH, 'convV', convV, 'bw', bw); + + % Step 4: using the candidate locations, return the locations and decoded locations of the QR codes. + [cx, cy, CX, CY] = findQRs(bw, conv, ang0, r, l, V_expected); + + v = [cx; cy]; + V = [CX; CY]; + + if isempty(V) || all(isnan(V(:))) + M = [[NaN NaN]; [NaN NaN]]; + b = [NaN; NaN]; + + options_guess.Vcen = b; + options_guess.M = M; + options_guess.b = b; + options_guess.M2 = M; + options_guess.b2 = b; + options_guess.outliers = false(1, size(V, 2)); + options_fit = options_guess; + return + end + + options_guess.img_W = size(img,1); + options_guess.img_H = size(img,2); + + % Step 5: fit a coordinate system to the positions of our QR codes. + [M, b, M2, b2, outliers] = majorityVoteCoordinateFit(v, V, options_guess); + + if ~any(isnan([M(:); b(:)])) + Vcen = invaffine([options_guess.img_H; options_guess.img_W]/2, M, b); + else + Vcen = [NaN; NaN]; + end + + xaxis = affine([1;0], M, [0; 0]); + if xaxis(1) == 0 + ang1 = (pi/2) + pi * (xaxis(2) < 0); + else + ang1 = atan(xaxis(2)/xaxis(1)); + end + + call = options_guess.d / norm(xaxis); + + options_fit = struct('ang', ang1, 'calibration', call, 'Vcen', Vcen, 'M', M, 'b', b, 'M2', M2, 'b2', b2, 'outliers', outliers); +end + +function [conv, convH, convV] = doConv(img, ang0, r, l) + r = ceil(r); + + ang = mod(ang0, pi); + + invx = sin(ang0) > 0; + invy = cos(ang0) > 0; + + ca = cos(ang); + sa = sin(ang); + + lx = ceil(l*ca); + ly = ceil(l*sa); + + X = -r:lx+r; + Y = -r:ly+r; + + % Replace with meshgrid? + XX = repmat(X, [length(Y) 1]); + YY = repmat(Y', [1 length(X)]); + + % Amplitudes for the QR arm kernel. Derived in part by finding the parameters that work best. + B = .5; + sp = 2; + sn = 1; + + % This filter descirbes the shape of a QR arm, with large holes on each end and small holes on the arm. Negative values are to increase reliablility. + fil = B*circleFunc(XX, YY, 0, 0, r) ... + - sn*circleFunc(XX, YY, 1*lx/8, 1*ly/8, r/3) ... + + sp*circleFunc(XX, YY, lx/4, ly/4, r/3) ... + - sn*circleFunc(XX, YY, 3*lx/8, 3*ly/8, r/3) ... + + sp*circleFunc(XX, YY, lx/2, ly/2, r/3) ... + - sn*circleFunc(XX, YY, 5*lx/8, 5*ly/8, r/3) ... + + sp*circleFunc(XX, YY, 3*lx/4, 3*ly/4, r/3) ... + - sn*circleFunc(XX, YY, 7*lx/8, 7*ly/8, r/3) ... + + B*circleFunc(XX, YY, lx, ly, r); + + % Normalize + S1 = size(fil); + fil = fil - sum(sum(fil))/S1(1)/S1(2); + fil = fil / sqrt(sum(sum(fil.^2))); + + % Uncomment this line to get a better idea of what the filter looks like. +% imwrite(.5 + fil/max(max(fil))/2, 'fil.png'); + + lx = ceil(l*ca); + ly = ceil(l*sa); + + % The heart of the function, doing the convolution on the images with our filter, on both arms (H&V) of the QRs. + convH = conv2(img, fil); + convV = conv2(img, rot90(fil)); + + S = size(img); + X = (1:S(1)) + r; + Y = (1:S(2)) + r; + + convH = convH(X + invx*ly, Y + invy*lx); + convV = convV(X + (~invy)*lx, Y + invx*ly); + + % Our final result is the sum of the cubes of the H and V components. Candidate QRs are where there is both H and V signal. Cube tended to work best. + conv = convH.*convH.*convH + convV.*convV.*convV; +end +function [cx, cy, CX, CY] = findQRs(bw, conv, ang0, r, l, V_expected) + S = size(conv); + + ca0 = cos(ang0); + sa0 = sin(ang0); + + lxx = l*(sa0+ca0)/2; + lyy = l*(sa0-ca0)/2; + + [XX, YY] = meshgrid(1:S(2), 1:S(1)); + + CC = bwconncomp(conv > max(max(conv))/8); + + NQR = CC.NumObjects; + cx = NaN(1,NQR); + cy = NaN(1,NQR); + + for ii = 1:NQR + cx(ii) = mean(XX(CC.PixelIdxList{ii})); + cy(ii) = mean(YY(CC.PixelIdxList{ii})); + end + + pad = abs(l*(sa0+ca0)/2) + 2*r; + + isQR = true(1, NQR); + + bitcoord = .75*(-2:2)/l; + % Replace with meshgrid? + bity = repmat(-bitcoord, [5 1]); + bitx = repmat(-bitcoord', [1 5]); + + A = l * [ca0, sa0; -sa0, ca0]; + + BITX = A(1,1) * bitx + A(2,1) * bity; + BITY = A(1,2) * bitx + A(2,2) * bity; + + CX = NaN*cx; + CY = NaN*cx; + + m_vectors = NaN(25, NQR); + + for ii = 1:NQR + if cx(ii) + lxx < pad || cx(ii) + lxx > S(2) - pad || cy(ii) + lyy < pad || cy(ii) + lyy > S(1) - pad + isQR(ii) = false; % QR is clipping the edge of screen and decoding should not be attempted. + else + m = NaN(5); + bitave = -1:1; + + for jj = 1:length(bitcoord) % TODO: Replace for loop with one-liner. + for kk = 1:length(bitcoord) + m(kk,jj) = mean(mean(bw(round(BITY(jj,kk) + cy(ii) + lyy) + bitave, round(BITX(jj,kk) + cx(ii) + lxx) + bitave))) > .5; + end + end + + [CX(ii), CY(ii), ~, isQR(ii)] = interpretQR(m(:)); + + if ~isQR(ii) && ~any(isnan(V_expected)) + dist = 2; + + for jj = 1:25 + m_ = m(:); + m_(jj) = ~m_(jj); + [CX_, CY_, ~, isQR_] = interpretQR(m_(:)); + + if isQR_ + dist_ = norm([CX_; CY_] - V_expected); + + if dist_ < dist + CX(ii) = CX_; + CY(ii) = CY_; + isQR(ii) = true; + dist = dist_; + end + end + end + end + + m_vectors(:,ii) = m(:); + + if CX(ii) == 0 && CY(ii) == 0 % Empty bits reads as [0,0] QR, so most [0,0] are false positives. + isQR(ii) = false; + end + end + end + + CX(~isQR) = NaN; + CY(~isQR) = NaN; +end +function [CX, CY, version, checksum0] = interpretQR(m) + % From the code contained in m, attempt to read information. + + codelength = 25; % Total length of code + pad = [1 6]; % Pad locations of bits (indexed from 1) + vb = 4; % Version bits + rb = 8; % Number of bits to encode the row + cb = 8; % Number of bits to encode the col + cs = 3; % Checksum + + assert(numel(m) == codelength, 'code m is the wrong length') + + if size(m, 1) > 1 + m = m'; + end + + b = 2 .^ (0:7); + m(pad) = []; + p = 1; + + version = sum(m(p:p+vb-1) .* b(vb:-1:1)); p = p + vb; + CY = sum(m(p:p+rb-1) .* b(rb:-1:1)); p = p + rb; + CX = sum(m(p:p+cb-1) .* b(cb:-1:1)); p = p + cb; + checksum = sum(m(p:p+cs-1) .* b(cs:-1:1)); + + % Remove checksum, and test + m(end-cs+1:end) = []; + + checksum0 = false; + if ~isempty(checksum) + checksum0 = mod(sum(m), 2^cs) == checksum; + end + + % The 120 should be removed if we ever make diamonds this big. For now, this filters away a lot of the noise that accidently satisfies checksum. + checksum0 = checksum0 && CX <= 120 && CY <= 120; +end +function cir = circleFunc(XX, YY, x0, y0, r) + cir = (XX - x0).^2 + (YY - y0).^2 < r^2; +end +function [M, b, M2, b2, outliers] = majorityVoteCoordinateFit(v, V, options_guess) + c = cos(options_guess.ang); + s = sin(options_guess.ang); + M_guess = [[s, c]; [-c, s]] / options_guess.calibration * options_guess.d; + + % The positions and labels of candidate QR codes define candidate coordinate systems. + % We want to find which candidate is correct. + b_guesses = v - M_guess * V; + + duplicates = false(1, size(b_guesses,2)); + + for ii = 1:size(V,2) + dduplicates = V(1,:) == V(1, ii) & V(2,:) == V(2, ii); + + if sum(dduplicates) > 1 + duplicates = duplicates | dduplicates; + end + end + + if isfield(options_guess, 'X_expected') && isfield(options_guess, 'Y_expected') + V_expected = [options_guess.X_expected; options_guess.Y_expected]; + else + V_expected = [NaN; NaN]; + end + b_expected = [options_guess.img_W; options_guess.img_H]/2 - M_guess * V_expected; + + % Setup variables that we will change as we loop. + mostvotes = 1; + b_guess = [NaN; NaN]; + outliers = true(1, size(b_guesses,2)); + dist = 3*options_guess.d; + + % Radius within which b guesses are considered the same guess. + R = options_guess.d; + + for ii = 1:size(b_guesses,2) % For every candidate... + if ~any(isnan(b_guesses(:, ii))) && ~duplicates(ii) + votes = sum((b_guesses - b_guesses(:, ii)).^2) < R*R & ~duplicates; % How many other candidates agree? + + if mostvotes <= sum(votes) % If this is a new record... + b_guess = mean(b_guesses(:, votes), 2); % Estimate b as the average. + dist_ = norm(b_guess - b_expected); + + if (sum(votes) > 0 && dist_ < dist) || (mostvotes < sum(votes) || dist_ < dist) + dist = dist_; + mostvotes = sum(votes); % Record the record. + outliers = ~votes; % Record the candidates that were outside. + end + end + end + end + + % Trim the outliers. + v_trim = v(:, ~outliers); + V_trim = V(:, ~outliers); + + if isempty(v_trim) + M = [[NaN NaN]; [NaN NaN]]; + b = [NaN; NaN]; + + M2 = M; + b2 = b; + + return; + end + + M2 = M_guess; + b2 = b_guess; + + if sum(~outliers) >= 2 + % Fit the candidates fully to an affine transformation. + fun = @(p)( leastsquares(v_trim, V_trim, [p(1:2)', p(3:4)'], p(5:6)') ); + p_guess = [M_guess(:); b_guess]'; + p_full = fminsearch(fun, p_guess, struct('TolFun', 1, 'TolX', 1e-1)); + + M = [p_full(1:2)', p_full(3:4)']; + b = p_full(5:6)'; + else + M = M2; + b = b2; + end +end + +function img = threshold(img) + img = imbinarize(imgaussfilt(img,1)); +end +function img = flatten(img) + img = imgaussfilt(img,10) - imgaussfilt(img,2); +end + +function v_ = affine(v, M, b) + % v and v_ are either column vectors (2x1) or arrays of column vectors (2xN) of the same size + % M is a matrix (2x2) + % b is a column vector (2x1) + v_ = M * v + b; +end +function v = invaffine(v_, M, b) + % v and v_ are either column vectors (2x1) or arrays of column vectors (2xN) of the same size + % M is a matrix (2x2) + % b is a column vector (2x1) + v = M \ (v_ - b); +end +function fom = leastsquares(v_, v, M, b) + fom = sum(sum((v_ - affine(v, M, b)).^2)); +end diff --git a/+Modules/Imaging.m b/+Modules/Imaging.m index 9dcd68876..9f07d105a 100644 --- a/+Modules/Imaging.m +++ b/+Modules/Imaging.m @@ -3,17 +3,19 @@ % Simply enforces required properties. For future use. properties - calibration = 1; % Calibration set and used by CommandCenter (um/#) % When saving, instructs CommandCenter to ignore the last stage (finest moving) % This can be useful for confocal setups, where the stage is also % used for scanning. uses_stage = ''; path = ''; end - properties(Constant,Hidden) + properties(Constant, Hidden) modules_package = 'Imaging'; end - properties(Abstract,SetObservable) + properties(GetObservable, SetObservable) + calibration = 1; % Calibration set and used by CommandCenter (um/#) + end + properties(Abstract, GetObservable, SetObservable) % Region of Interest. Set and Get methods should be used to make sure this works well!!! % Format should be [xMin xMax; yMin yMax] % Note, pixels are the unit for a CCD, voltage for galvos diff --git a/CommandCenter.m b/CommandCenter.m index 26cb9a9a8..9ad401544 100644 --- a/CommandCenter.m +++ b/CommandCenter.m @@ -915,3 +915,4 @@ function ui_module_build_Callback(hObject, eventdata, handles) % eventdata reserved - to be defined in a future version of MATLAB % handles structure with handles and user data (see GUIDATA) Base.Module.uibuild; +o \ No newline at end of file diff --git a/HelperFunctions/PulseSequence/+calibrate/CalibrateDelay.m b/HelperFunctions/PulseSequence/+calibrate/CalibrateDelay.m index 1c2241327..42f05a6cf 100644 --- a/HelperFunctions/PulseSequence/+calibrate/CalibrateDelay.m +++ b/HelperFunctions/PulseSequence/+calibrate/CalibrateDelay.m @@ -3,7 +3,7 @@ % laserline = hardware line for laser being calibrated % APDline = hardware line for APD being used for calibration -apdBin = 0.1; %resolution of APD bin in us +apdBin = 0.05; %resolution of APD bin in us maxDelay = 10; %maximum expeceted delay in us maxCounts = 1e2; nidaq = Drivers.NIDAQ.dev.instance(NIDAQ_dev); diff --git a/Modules/+Drivers/+SignalGenerators/@SignalGenerator/SignalGenerator.m b/Modules/+Drivers/+SignalGenerators/SignalGenerator.m similarity index 100% rename from Modules/+Drivers/+SignalGenerators/@SignalGenerator/SignalGenerator.m rename to Modules/+Drivers/+SignalGenerators/SignalGenerator.m diff --git a/Modules/+Drivers/ArduinoServo.m b/Modules/+Drivers/ArduinoServo.m new file mode 100644 index 000000000..fc2f72314 --- /dev/null +++ b/Modules/+Drivers/ArduinoServo.m @@ -0,0 +1,70 @@ +classdef ArduinoServo < Modules.Driver + %ARDUINOSERVO Connects with a hwserver Arduino and uses one pin to control a servo. + % + % Call with the 1) hostname of the host computer (singleton based on host), and 2) the (integer) pin. + + properties (Constant) + hwname = 'Arduino'; + end + properties (SetAccess=immutable) + connection + pin + end + properties (GetObservable, SetObservable) + angle = Prefs.Double(NaN, 'min', 0, 'max', 180, 'set', 'set_angle', 'allow_nan', true); + end + methods(Static) + function obj = instance(host, pin) + mlock; + persistent Objects + if isempty(Objects) + Objects = Drivers.ArduinoServo.empty(1,0); + end + [~,resolvedIP] = resolvehost(host); + for i = 1:length(Objects) + if isvalid(Objects(i)) && isequal({resolvedIP, pin}, Objects(i).singleton_id) + obj = Objects(i); + return + end + end + obj = Drivers.ArduinoServo(resolvedIP, pin); + obj.singleton_id = {resolvedIP, pin}; + Objects(end+1) = obj; + end + end + methods(Access=private) + function obj = ArduinoServo(host, pin) + obj.connection = hwserver(host); + obj.com('?'); % This command pings the server for an appropriate response. If something is wrong, we will catch it here. + obj.pin = pin; + end + end + methods + function response = com(obj,funcname,varargin) %keep this + response = obj.connection.com(obj.hwname,funcname,varargin{:}); + end + end + methods + function delete(obj) + delete(obj.connection) + end + function val = set_angle(obj,val,~) % Locks to new angle (0 -> 180 standard), then unlocks. + try + errorIfNotOK(obj.com(['s ' num2str(obj.pin) ' ' num2str(val)])); + catch % If fail, reload and try again. + obj.connection.reload('Arduino'); + errorIfNotOK(obj.com(['s ' num2str(obj.pin) ' ' num2str(val)])); + end + end + function lock(obj) % Tells the arduino to get the servo to apply electronic feedback against any force. Without the lock, the servo can spin ~freely by hand. With the lock, this is more difficult. Only works for one pin at a time at the moment. + errorIfNotOK(obj.com(['l ' num2str(obj.pin)])); + end + function unlock(obj) % Unlocks any locked pin. + errorIfNotOK(obj.com('u')); + end + end +end + +function errorIfNotOK(str) + assert(strcmp(str, 'OK'), ['Arduino Error: ' str]); +end \ No newline at end of file diff --git a/Modules/+Drivers/Keithley2400.m b/Modules/+Drivers/Keithley2400.m index b07822b23..166f1c1cb 100644 --- a/Modules/+Drivers/Keithley2400.m +++ b/Modules/+Drivers/Keithley2400.m @@ -2,6 +2,7 @@ %KEITHLEY2400 Interfaces with the eponymous signal generator. properties (SetAccess=protected, Hidden) + GPIBNumber % GPIB number GPIBAddr % GPIB address RsrcName % Resource name of the VISA instrument VisaHandle % Handle of the VISA object @@ -20,7 +21,7 @@ % Constructor functions methods (Static) - function obj = instance(GPIBAddr) + function obj = instance(GPIBNumber,GPIBAddr) mlock; persistent Objects if isempty(Objects) @@ -32,15 +33,16 @@ return end end - obj = Drivers.Keithley2400(GPIBAddr); + obj = Drivers.Keithley2400(GPIBNumber,GPIBAddr); obj.singleton_id = GPIBAddr; Objects(end+1) = obj; end end methods (Access=private) - function obj = Keithley2400(GPIBAddr) + function obj = Keithley2400(GPIBNumber,GPIBAddr) + obj.GPIBNumber = GPIBNumber; obj.GPIBAddr = GPIBAddr; - obj.RsrcName = ['GPIB0::' num2str(obj.GPIBAddr) '::INSTR']; + obj.RsrcName = ['GPIB' num2str(obj.GPIBNumber) '::' num2str(obj.GPIBAddr) '::INSTR']; obj.VisaHandle = visa('ni',obj.RsrcName); obj.openConnection(); end diff --git a/Modules/+Drivers/PM100.m b/Modules/+Drivers/PM100.m index 9ded47976..1c562a0b5 100644 --- a/Modules/+Drivers/PM100.m +++ b/Modules/+Drivers/PM100.m @@ -87,6 +87,15 @@ function set_average_count(obj,count) out = str2num(out); end + function set_range(obj,auto) + obj.command(sprintf('CURR:RANG:AUTO %i',round(auto))); + end + + function out = get_range(obj) + out = obj.query('CURR:RANG:AUTO?'); + out = str2num(out); + end + function out = get_power(obj, units) if ~strcmp(obj.unit_status, units) if strcmp(units, 'DBM') diff --git a/Modules/+Drivers/slm_6spot.m b/Modules/+Drivers/slm_6spot.m new file mode 100644 index 000000000..64f4b740e --- /dev/null +++ b/Modules/+Drivers/slm_6spot.m @@ -0,0 +1,37 @@ +kangle= 60*pi/180; +kangle = 93*pi/180; +kstep = 13e-4; +% kstep = 25e-4; +% nspots = 6; +% ki=[kstep*(-1) kstep*(-2) kstep*(-3) kstep*(-4) kstep*(-5) kstep*(-6)]; +% %w=1./[1:nspots]; +% w=[1 0.45 0.52 0.5 0.45 0.39]; +w=[1.7 2 1.1 0.5 0.9 1]; +w=[1.5 3 0.3 2.5 2.5 2]; +% w=[0 0 1 0 0 0] +nspots = 6; +ki=[kstep*(-1) kstep*(-2) kstep*(0) kstep*(2) kstep*(1) kstep*(-3)]; +phi=[0 pi/2 0 pi/2 pi pi]; +%w=1./[1:nspots]; + +%w=[1 0.65 0.52 0.51 0.4 0.5]; + + + +% setting the range of sweep in weight to calculate the real matrix +% component +weights = cell(1, nspots); +phis = cell(1, nspots); +images = cell(1, nspots); +indices = cell(1, nspots); +maxes = cell(1, nspots); +masks = zeros(1920,1200,nspots); +fullmask = zeros(1920,1200); + for j=1:nspots + k = ki(j); + slm.blaze(k*cos(kangle),k*sin(kangle),phi(j)); + masks(:,:,j) = slm.blaze(k*cos(kangle),k*sin(kangle),phi(j)); %all 0 + fullmask = fullmask + sqrt(w(j)/sum(w))*exp(1i*masks(:,:,j)); + end +fullmask = angle(fullmask); +slm.load_data(fullmask) diff --git a/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/AllOpticalT1.m b/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/AllOpticalT1.m index 38dd79f2a..e7f9ecff4 100644 --- a/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/AllOpticalT1.m +++ b/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/AllOpticalT1.m @@ -9,6 +9,7 @@ resOffset_us = 0.1; resPulse1Time_us = 10; resPulse2Time_us = 10; + APDreadouttime_us = 10; tauTimes_us = 'linspace(0,100,101)'; %eval(tauTimes_us) will define sweepTimes end properties @@ -24,7 +25,7 @@ methods(Access=private) function obj = AllOpticalT1() obj.prefs = [obj.prefs,{'resLaser','repumpLaser','APDline','repumpTime_us','resOffset_us',... - 'resPulse1Time_us','resPulse2Time_us','tauTimes_us'}]; %additional preferences not in superclass + 'resPulse1Time_us','resPulse2Time_us','APDreadouttime_us','tauTimes_us'}]; %additional preferences not in superclass obj.loadPrefs; end end @@ -35,16 +36,20 @@ function PreRun(obj,~,~,ax) %prepare axes for plotting hold(ax,'on'); + % + plotH = plot(ax,obj.tauTimes,NaN(length(obj.tauTimes),1),'color','k'); + plotH(2) = plot(ax,obj.tauTimes,NaN(length(obj.tauTimes),1),'color','b'); %plot data bin 1 - plotH = plot(ax,obj.tauTimes,obj.data.sumCounts(:,1,1),'color','b'); + %plotH = plot(ax,obj.tauTimes,obj.data.sumCounts(:,1,1),'color','b'); %plot data bin 1 errors - plotH(2) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,1,1)+obj.data.stdCounts(:,1,1),'color',[1 .5 0],'LineStyle','--'); %upper bound - plotH(3) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,1,1)-obj.data.stdCounts(:,1,1),'color',[1 .5 0],'LineStyle','--'); %lower bound + %plotH(2) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,1,1)+obj.data.stdCounts(:,1,1),'color',[1 .5 0],'LineStyle','--'); %upper bound + %plotH(3) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,1,1)-obj.data.stdCounts(:,1,1),'color',[1 .5 0],'LineStyle','--'); %lower bound %plot data bin 2 - plotH(4) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,2,1),'color','b'); + %plotH(4) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,2,1),'color','b'); + %plotH = plot(ax,obj.tauTimes,obj.data.sumCounts(1,:,2),'color','b'); %plot data bin 2 errors - plotH(5) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,2,1)+obj.data.stdCounts(:,2,1),'color',[1 .5 0],'LineStyle','--'); %upper bound - plotH(6) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,2,1)-obj.data.stdCounts(:,2,1),'color',[1 .5 0],'LineStyle','--'); %lower bound + %plotH(5) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,2,1)+obj.data.stdCounts(:,2,1),'color',[1 .5 0],'LineStyle','--'); %upper bound + %plotH(6) = plot(ax,obj.tauTimes,obj.data.sumCounts(:,2,1)-obj.data.stdCounts(:,2,1),'color',[1 .5 0],'LineStyle','--'); %lower bound ax.UserData.plots = plotH; ylabel(ax,'Normalized PL'); xlabel(ax,'Delay Time \tau (\mus)'); @@ -53,21 +58,23 @@ function PreRun(obj,~,~,ax) end function UpdateRun(obj,~,~,ax,~,~) - if obj.averages > 1 - averagedData = squeeze(nanmean(obj.data.sumCounts,3)); - meanError = squeeze(nanmean(obj.data.stdCounts,3)); - else - averagedData = obj.data.sumCounts; - meanError = obj.data.stdCounts; - end + %if obj.averages > 1 + averagedData = squeeze(nanmean(obj.data.sumCounts,1)); + meanError = squeeze(nanmean(obj.data.stdCounts,1)); +% else +% averagedData = obj.data.sumCounts; +% meanError = obj.data.stdCounts; +% end %grab handles to data from axes plotted in PreRun - ax.UserData.plots(1).YData = averagedData(:,1); - ax.UserData.plots(2).YData = averagedData(:,1) + meanError(:,1); - ax.UserData.plots(3).YData = averagedData(:,1) - meanError(:,1); - ax.UserData.plots(4).YData = averagedData(:,2); - ax.UserData.plots(5).YData = averagedData(:,2) + meanError(:,2); - ax.UserData.plots(6).YData = averagedData(:,2) - meanError(:,2); +% ax.UserData.plots(1).YData = averagedData(:,1); +% ax.UserData.plots(2).YData = averagedData(:,1) + meanError(:,1); +% ax.UserData.plots(3).YData = averagedData(:,1) - meanError(:,1); +% ax.UserData.plots(4).YData = averagedData(:,2); +% ax.UserData.plots(5).YData = averagedData(:,2) + meanError(:,2); +% ax.UserData.plots(6).YData = averagedData(:,2) - meanError(:,2); + ax.UserData.plots(1).YData = averagedData(:,1)'; + ax.UserData.plots(2).YData = averagedData(:,2)'; drawnow; end diff --git a/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/BuildPulseSequence.m b/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/BuildPulseSequence.m index a9e85b581..64a0fd5d3 100644 --- a/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/BuildPulseSequence.m +++ b/Modules/+Experiments/+PulseSequenceSweep/@AllOpticalT1/BuildPulseSequence.m @@ -2,21 +2,26 @@ %BuildPulseSequence Builds pulse sequence for performing all-optical T1 %characterization given the index (tauIndex) in tauTimes +assert(obj.APDreadouttime_us<=obj.resPulse1Time_us & obj.APDreadouttime_us<=obj.resPulse2Time_us,'APD readout time too short'); + s = sequence('AllOpticalT1'); -repumpChannel = channel('repump','color','g','hardware',obj.repumpLaser.PBline-1); -resChannel = channel('resonant','color','r','hardware',obj.resLaser.PBline-1); -APDchannel = channel('APDgate','color','b','hardware',obj.APDline,'counter','APD1'); +repumpChannel = channel('repump','color','g','hardware',obj.repumpLaser.PB_line-1); +resChannel = channel('resonant','color','r','hardware',obj.resLaser.PB_line-1); +APDchannel = channel('APDgate','color','b','hardware',obj.APDline-1,'counter','APD1'); s.channelOrder = [repumpChannel, resChannel, APDchannel]; g = node(s.StartNode,repumpChannel,'delta',0); -g = node(g,repumpChannel,'units','us','delta',obj.repumpTime_us); -r = node(g,resChannel,'units','us','delta',obj.resOffset_us); -node(r,APDchannel,'delta',0); -r = node(r,resChannel,'units','us','delta',obj.resPulse1Time_us); -node(r,APDchannel,'delta',0); -r = node(g,resChannel,'units','us','delta',obj.tauTimes(tauIndex)); -node(r,APDchannel,'delta',0); -r = node(r,resChannel,'units','us','delta',obj.resPulse2Time_us); -node(r,APDchannel,'delta',0); +gstop = node(g,repumpChannel,'units','us','delta',obj.repumpTime_us); +r1start = node(gstop,resChannel,'units','us','delta',obj.resOffset_us); +%node(r,APDchannel,'delta',0); +r1stop = node(r1start,resChannel,'units','us','delta',obj.resPulse1Time_us); +node(r1stop,APDchannel,'units','us','delta',-obj.APDreadouttime_us); +node(r1stop,APDchannel,'units','us','delta',0); +r2start = node(r1stop,resChannel,'units','us','delta',obj.tauTimes(tauIndex)); +node(r2start,APDchannel,'units','us','delta',0); +node(r2start,APDchannel,'units','us','delta',obj.APDreadouttime_us); +%node(r,APDchannel,'delta',0); +r2stop = node(r2start,resChannel,'units','us','delta',obj.resPulse2Time_us); +%node(r,APDchannel,'delta',0); end diff --git a/Modules/+Experiments/+PulseSequenceSweep/@OpticalSpinPolarization/BuildPulseSequence.m b/Modules/+Experiments/+PulseSequenceSweep/@OpticalSpinPolarization/BuildPulseSequence.m index 6a1fb73a0..3532590ce 100644 --- a/Modules/+Experiments/+PulseSequenceSweep/@OpticalSpinPolarization/BuildPulseSequence.m +++ b/Modules/+Experiments/+PulseSequenceSweep/@OpticalSpinPolarization/BuildPulseSequence.m @@ -5,8 +5,8 @@ nCounters = obj.nCounterBins; s = sequence('OpticalSpinPolarization'); -repumpChannel = channel('repump','color','g','hardware',obj.repumpLaser.PBline-1); -resChannel = channel('resonant','color','r','hardware',obj.resLaser.PBline-1); +repumpChannel = channel('repump','color','g','hardware',obj.repumpLaser.PB_line-1); +resChannel = channel('resonant','color','r','hardware',obj.resLaser.PB_line-1); APDchannel = channel('APDgate','color','b','hardware',obj.APDline-1,'counter','APD1'); s.channelOrder = [repumpChannel, resChannel, APDchannel]; g = node(s.StartNode,repumpChannel,'delta',0); diff --git a/Modules/+Experiments/+SlowScan/@CWAVEscan/CWAVEscan.m b/Modules/+Experiments/+SlowScan/@CWAVEscan/CWAVEscan.m new file mode 100644 index 000000000..f8a2bd91e --- /dev/null +++ b/Modules/+Experiments/+SlowScan/@CWAVEscan/CWAVEscan.m @@ -0,0 +1,112 @@ +classdef CWAVEscan < Experiments.SlowScan.SlowScan_invisible + %Open Open-loop laser sweep for slowscan + % Set center freq_THz + % Sweeps over percents (usually corresponding to a piezo in a resonator) + % - If tune_coarse = true, first moves laser to that frequency; + % otherwise scan is perfomed wherever the laser is + % - If center_scan = true, percents are relative to wherever the + % initial percentage is prior to starting sweep. This can be quite + % useful in combination with tune_coarse for lasers that don't leave + % the percent centered at 50 after tuning. + % + % NOTE: plotting averages over average loop, which might not be same + % frequencies, or even close if laser mode hops. All averages are saved. + + properties(SetObservable,GetObservable,AbortSet) + tune_coarse = Prefs.Boolean(true, 'help_text', 'Whether to tune to the coarse value before the scan.'); + center_scan = Prefs.Boolean(false, 'help_text', 'When true, percents will be shifted after tune_coarse completes to compensate position of percent.'); + post_scan_tune_max = Prefs.Boolean(true, 'help_text', 'Whether to tune to the maximum value after the scan has completed.'); + end + properties(SetObservable,AbortSet) + freq_THz = 470; + CWAVE_voltage_in = 'linspace(0,100,101)'; %eval(percents) will define percents for open-loop scan [scan_points] + end + properties(SetAccess=private,Hidden) + percentInitialPosition = 50; % used to center scan if user wants + keithley; + start_volt; + end + properties(Constant) + xlabel = 'Percent (%)'; + end + methods(Static) + % Static instance method is how to call this experiment + % This is a separate file + obj = instance() + end + methods(Access=private) + function obj = CWAVEscan() + obj.scan_points = eval(obj.CWAVE_voltage_in); + obj.prefs = [{'freq_THz','center_scan','tune_coarse','post_scan_tune_max','CWAVE_voltage_in'}, obj.prefs]; + obj.loadPrefs; % Load prefs specified as obj.prefs + end + end + + methods + function s = BuildPulseSequence(obj,freqIndex) + %BuildPulseSequence Builds pulse sequence for repump pulse followed by APD + %collection during resonant driving + tunePoint = obj.scan_points(freqIndex); + if obj.center_scan + tunePoint = tunePoint - (50-obj.percentInitialPosition); + % Only allow skipping points if center_scan enabled; + % otherwise user entered a bad range for percents should error + if tunePoint < 0 || tunePoint > 100 + s = false; + return % Skip point by returning false + end + end + %obj.resLaser.TunePercent(tunePoint); + obj.keithley.set_voltage(tunePoint); + s = BuildPulseSequence@Experiments.SlowScan.SlowScan_invisible(obj,freqIndex); + end + function PreRun(obj,~,managers,ax) + if obj.tune_coarse + obj.resLaser.TuneCoarse(obj.freq_THz); + end + %obj.percentInitialPosition = obj.resLaser.GetPercent; + PreRun@Experiments.SlowScan.SlowScan_invisible(obj,[],managers,ax); + obj.keithley = Drivers.Keithley2400.instance(0,16); + obj.start_volt = obj.keithley.get_voltage; + % + for volt = linspace(obj.start_volt,obj.scan_points(1),abs(ceil(obj.start_volt-obj.scan_points(1)))) + pause(0.05); + obj.keithley.set_voltage(volt); + end + % + obj.keithley.set_voltage(obj.scan_points(1)); + obj.keithley.set_output(1); + end + function PostRun(obj,~,managers,ax) + curr_volt = obj.keithley.get_voltage; + for volt = linspace(curr_volt,obj.start_volt,abs(ceil(curr_volt-obj.start_volt))) + obj.keithley.set_voltage(volt); + pause(0.05); + end + obj.keithley.set_voltage(obj.start_volt); + obj.keithley.set_output(0); + if obj.post_scan_tune_max + x = obj.data.freqs_measured; + y = obj.data.sumCounts; + + % Find the frequency of the maximum value. + arg = find(y == nanmax(y)); + if isempty(arg) + target_max = NaN; + else + target_max = x(arg(1)); + end + + obj.meta.post_scan_freq_max = target_max; + obj.resLaser.tune(obj.resLaser.c/target_max); + end + end + function set.CWAVE_voltage_in(obj,val) + numeric_vals = str2num(val); %#ok str2num uses eval but is more robust for numeric input + assert(~isempty(numeric_vals),'Must have at least one value for percents.'); + assert(min(numeric_vals)>=0&&max(numeric_vals)<=100,'Voltage must be between 0 and 100 (inclusive).'); + obj.scan_points = numeric_vals; + obj.CWAVE_voltage_in = val; + end + end +end diff --git a/Modules/+Experiments/+SlowScan/@CWAVEscan/instance.m b/Modules/+Experiments/+SlowScan/@CWAVEscan/instance.m new file mode 100644 index 000000000..20511035d --- /dev/null +++ b/Modules/+Experiments/+SlowScan/@CWAVEscan/instance.m @@ -0,0 +1,20 @@ +function obj = instance(varargin) + % This file is what locks the instance in memory such that singleton + % can perform properly. + % For the most part, varargin will be empty, but if you know what you + % are doing, you can modify/use the input (just be aware of singleton_id) + mlock; + persistent Objects + if isempty(Objects) + Objects = Experiments.SlowScan.CWAVEscan.empty(1,0); + end + for i = 1:length(Objects) + if isvalid(Objects(i)) && isequal(varargin,Objects(i).singleton_id) + obj = Objects(i); + return + end + end + obj = Experiments.SlowScan.CWAVEscan(varargin{:}); + obj.singleton_id = varargin; + Objects(end+1) = obj; +end \ No newline at end of file diff --git a/Modules/+Experiments/+SlowScan/@EOMscan/EOMscan.m b/Modules/+Experiments/+SlowScan/@EOMscan/EOMscan.m new file mode 100644 index 000000000..89dec8ecf --- /dev/null +++ b/Modules/+Experiments/+SlowScan/@EOMscan/EOMscan.m @@ -0,0 +1,140 @@ +classdef EOMscan < Experiments.SlowScan.SlowScan_invisible + %Open Open-loop laser sweep for slowscan + % Set center freq_THz + % Sweeps over percents (usually corresponding to a piezo in a resonator) + % - If tune_coarse = true, first moves laser to that frequency; + % otherwise scan is perfomed wherever the laser is + % - If center_scan = true, percents are relative to wherever the + % initial percentage is prior to starting sweep. This can be quite + % useful in combination with tune_coarse for lasers that don't leave + % the percent centered at 50 after tuning. + % + % NOTE: plotting averages over average loop, which might not be same + % frequencies, or even close if laser mode hops. All averages are saved. + + properties(SetObservable,GetObservable,AbortSet) + tune_coarse = Prefs.Boolean(false, 'help_text', 'Whether to tune to the coarse value before the scan.'); + center_scan = Prefs.Boolean(false, 'help_text', 'When true, percents will be shifted after tune_coarse completes to compensate position of percent.'); + post_scan_tune_max = Prefs.Boolean(false, 'help_text', 'Whether to tune to the maximum value after the scan has completed.'); + measure_laser_freq = Prefs.Boolean(false, 'help_text', 'Whether to measure the laser wavelength with the wavemeter.'); + end + properties(SetObservable,AbortSet) + laser_freq_THz = 470; + MW_freq_MHz = 'linspace(0,100,101)'; %eval(percents) will define percents for open-loop scan [scan_points] + MWsource = Modules.Source.empty(1,0); + end + properties(SetAccess=private,Hidden) + percentInitialPosition = 50; % used to center scan if user wants + end + properties(Constant) + xlabel = 'Percent (%)'; + end + methods(Static) + % Static instance method is how to call this experiment + % This is a separate file + obj = instance() + end + methods(Access=private) + function obj = EOMscan() + obj.scan_points = eval(obj.MW_freq_MHz); + obj.prefs = [{'MWsource','laser_freq_THz','MW_freq_MHz','measure_laser_freq'}, obj.prefs]; + obj.loadPrefs; % Load prefs specified as obj.prefs + end + end + + methods + function s = BuildPulseSequence(obj,freqIndex) + %BuildPulseSequence Builds pulse sequence for repump pulse followed by APD + %collection during resonant driving + tunePoint = obj.scan_points(freqIndex); +% if obj.center_scan +% tunePoint = tunePoint - (50-obj.percentInitialPosition); +% % Only allow skipping points if center_scan enabled; +% % otherwise user entered a bad range for percents should error +% if tunePoint < 0 || tunePoint > 100 +% s = false; +% return % Skip point by returning false +% end +% end +% obj.resLaser.TunePercent(tunePoint); + obj.MWsource.set_frequency(tunePoint); + %BuildPulseSequence Builds pulse sequence for repump pulse followed by APD + %collection during resonant driving + if freqIndex > 1 + s = obj.sequence; + else + s = sequence('SlowScan'); %#ok Calling HelperFunction + repumpChannel = channel('Repump','color','g','hardware',obj.repumpLaser.PB_line-1); + resChannel = channel('Resonant','color','r','hardware',obj.resLaser.PB_line-1); + MWChannel = channel('Resonant','color','r','hardware',obj.MWsource.PB_line-1); + APDchannel = channel('APDgate','color','b','hardware',obj.APDline-1,'counter','APD1'); + s.channelOrder = [repumpChannel, resChannel, APDchannel]; + g = node(s.StartNode,repumpChannel,'units','us','delta',0); + g = node(g,repumpChannel,'units','us','delta',obj.repumpTime_us); + r = node(g,resChannel,'units','us','delta',obj.resOffset_us); + node(r,MWChannel,'units','us','delta',0); + node(r,APDchannel,'units','us','delta',0); + r = node(r,resChannel,'units','us','delta',obj.resTime_us); + node(r,MWChannel,'units','us','delta',0); + node(r,APDchannel,'units','us','delta',0); + + obj.sequence = s; + end + end + function PreRun(obj,~,managers,ax) + if obj.tune_coarse + obj.resLaser.TuneCoarse(obj.freq_THz); + end + %obj.percentInitialPosition = obj.resLaser.GetPercent; + PreRun@Experiments.SlowScan.SlowScan_invisible(obj,[],managers,ax); + end + function UpdateRun(obj,~,~,ax,average,freqIndex) + %pull frequency that latest sequence was run at + if obj.measure_laser_freq + if obj.wavemeter_override + obj.data.freqs_measured(average,freqIndex) = obj.wavemeter.getFrequency; + else + obj.data.freqs_measured(average,freqIndex) = obj.resLaser.getFrequency; + end + end + if obj.averages > 1 + averagedData = squeeze(nanmean(obj.data.sumCounts,3)); + meanError = squeeze(nanmean(obj.data.stdCounts,3))*sqrt(obj.samples); + else + averagedData = obj.data.sumCounts; + meanError = obj.data.stdCounts*sqrt(obj.samples); + end + + %grab handles to data from axes plotted in PreRun + ax.UserData.plots{1}.YData = averagedData(1,:); + ax.UserData.plots{1}.YNegativeDelta = meanError(1,:); + ax.UserData.plots{1}.YPositiveDelta = meanError(1,:); + ax.UserData.plots{1}.update; + ax.UserData.plots{2}.YData = nanmean(obj.data.freqs_measured,1); + drawnow limitrate; + end + function PostRun(obj,~,managers,ax) + if obj.post_scan_tune_max + x = obj.data.freqs_measured; + y = obj.data.sumCounts; + + % Find the frequency of the maximum value. + arg = find(y == nanmax(y)); + if isempty(arg) + target_max = NaN; + else + target_max = x(arg(1)); + end + + obj.meta.post_scan_freq_max = target_max; + obj.resLaser.tune(obj.resLaser.c/target_max); + end + end + function set.MW_freq_MHz(obj,val) + numeric_vals = str2num(val); %#ok str2num uses eval but is more robust for numeric input + assert(~isempty(numeric_vals),'Must have at least one value for percents.'); + obj.scan_points = numeric_vals; + obj.MW_freq_MHz = val; + end + end +end diff --git a/Modules/+Experiments/+SlowScan/@EOMscan/instance.m b/Modules/+Experiments/+SlowScan/@EOMscan/instance.m new file mode 100644 index 000000000..74e223bb8 --- /dev/null +++ b/Modules/+Experiments/+SlowScan/@EOMscan/instance.m @@ -0,0 +1,20 @@ +function obj = instance(varargin) + % This file is what locks the instance in memory such that singleton + % can perform properly. + % For the most part, varargin will be empty, but if you know what you + % are doing, you can modify/use the input (just be aware of singleton_id) + mlock; + persistent Objects + if isempty(Objects) + Objects = Experiments.SlowScan.EOMscan.empty(1,0); + end + for i = 1:length(Objects) + if isvalid(Objects(i)) && isequal(varargin,Objects(i).singleton_id) + obj = Objects(i); + return + end + end + obj = Experiments.SlowScan.EOMscan(varargin{:}); + obj.singleton_id = varargin; + Objects(end+1) = obj; +end \ No newline at end of file diff --git a/Modules/+Experiments/Spectrum.m b/Modules/+Experiments/Spectrum.m index c2c42037e..37e72c2fc 100644 --- a/Modules/+Experiments/Spectrum.m +++ b/Modules/+Experiments/Spectrum.m @@ -128,6 +128,9 @@ function abort(obj) dat.wavelength = obj.data.x; dat.intensity = obj.data.y; dat.meta = rmfield(obj.data,{'x','y'}); +% if ~isempty(managers.Imaging.current_image.info.image) +% dat.image = managers.Imaging.current_image.info; +% end end end diff --git a/Modules/+Imaging/+Thorlabs/uc480.m b/Modules/+Imaging/+Thorlabs/uc480.m new file mode 100644 index 000000000..4de27990c --- /dev/null +++ b/Modules/+Imaging/+Thorlabs/uc480.m @@ -0,0 +1,125 @@ +classdef uc480 < Modules.Imaging + % Connects with old-style Thorlabs cameras. + + properties + maxROI = [-1 1; -1 1]; + + prefs = {'exposure'}; + + cam = [] + MemId = [] + end + properties(GetObservable, SetObservable) + exposure = Prefs.Double(NaN, 'units', 'ms', 'min', 0, 'max', inf, 'allow_nan', true, 'set', 'set_exposure'); + end + properties(GetObservable, SetObservable) + bitdepth = 0; + resolution = [120 120]; % Pixels + ROI = [-1 1;-1 1]; + continuous = false; + end + methods(Access=private) + function obj = uc480() + % Open camera connection + try + NET.addAssembly('C:\Program Files\Thorlabs\Scientific Imaging\DCx Camera Support\Develop\DotNet\uc480DotNet.dll'); + catch + error('Could not load uc480 NET. Make sure that ThorCam is installed.') + end + + % Create camera object handle and open the 1st available camera + obj.cam = uc480.Camera; + obj.cam.Init(0); + + % Set display mode to bitmap (DiB) and color mode to 8-bit RGB + obj.cam.Display.Mode.Set(uc480.Defines.DisplayMode.DiB); + obj.cam.PixelFormat.Set(uc480.Defines.ColorMode.RGBA8Packed); + + % Set trigger mode to software (single image acquisition) + obj.cam.Trigger.Set(uc480.Defines.TriggerMode.Software); + + % Allocate memory and take image + [~, obj.MemId] = obj.cam.Memory.Allocate(true); + [~, W, H, B, ~] = obj.cam.Memory.Inquire(obj.MemId); + + % Deal with CC stuff + obj.bitdepth = B; + obj.resolution = [double(W), double(H)]; + obj.maxROI = [1 obj.resolution(1); 1 obj.resolution(2)]; + obj.ROI = obj.maxROI; + obj.loadPrefs; + end + end + methods + function delete(obj) + try + if ~isempty(obj.cam) + obj.cam.Exit; + end + end + end + end + methods(Static) + function obj = instance() + mlock; + persistent Object + if isempty(Object) || ~isvalid(Object) + Object = Imaging.Thorlabs.uc480(); + end + obj = Object; + end + end + methods + function milliseconds = set_exposure(obj, milliseconds, ~) + % set exposure time + obj.cam.Timing.Exposure.Set(milliseconds) %ms + [~, milliseconds] = obj.cam.Timing.Exposure.Get; + end + function set.ROI(obj,val) + % Update ROI without going outside maxROI + val(1,1) = max(obj.maxROI(1,1),val(1,1)); %#ok<*MCSUP> + val(1,2) = min(obj.maxROI(1,2),val(1,2)); + val(2,1) = max(obj.maxROI(2,1),val(2,1)); + val(2,2) = min(obj.maxROI(2,2),val(2,2)); + % Now make sure no cross over + val(1,2) = max(val(1,1),val(1,2)); + val(2,2) = max(val(2,1),val(2,2)); + obj.ROI = val; + end + end + methods + function focus(obj,ax,stageHandle) %#ok + error('Thorlabs.uc480.focus() NotImplemented') + end + function img = snapImage(obj) + % Acquire image + obj.cam.Acquisition.Freeze(uc480.Defines.DeviceParameter.Wait); + + % Copy image from memory + [~, tmp] = obj.cam.Memory.CopyToArray(obj.MemId); + + % Reshape image (make more efficient) + img = reshape(uint8(tmp), [obj.bitdepth/8, obj.resolution(1), obj.resolution(2)]); + img = img(1:3, 1:obj.resolution(1), 1:obj.resolution(2)); + img = permute(img, [3,2,1]); + img = sum(img, 3); + end + % Required method of Modules.Imaging. The "snap button" in the UI calls this and displays the camera result on the imaging axis. + function snap(obj, im, ~) + im.CData = obj.snapImage(); + end + function startVideo(obj, im) + obj.continuous = true; + while obj.continuous + obj.snap(im, true); + drawnow; + end + end + function stopVideo(obj) + obj.continuous = false; + end + + end + +end + diff --git a/Modules/+Imaging/@Hamamatsu/ContrastFocus.m b/Modules/+Imaging/@Hamamatsu/ContrastFocus.m new file mode 100644 index 000000000..66ed8ccc9 --- /dev/null +++ b/Modules/+Imaging/@Hamamatsu/ContrastFocus.m @@ -0,0 +1,85 @@ +function metric = ContrastFocus(obj,Managers ) +%CONTRASTDETECTION Summary of this function goes here +% Detailed explanation goes here + + stageManager = Managers.Stages; + stageHandle = stageManager.modules{1}; + + xlen = obj.resolution(1); % Could have huge speed up with smaller ROI + ylen = obj.resolution(2); + + searchRange = [-1 1]*6; % Range to find maximum + stepSize = 0.5; % um (size of each step) + + n = 2; % Number of points to be sure of slope + dx = 7; + dy = dx; + + xrange = (dx+1):(xlen-dx); + yrange = (dy+1):(ylen-dy); + + startPos = stageHandle.position; + limits = searchRange+startPos(3); + + data = []; + pos_track = []; + + f=figure('name','Contrast Focus Metric'); + newAx = axes('parent',f); + p = plot(newAx,0,0); + xlabel('Z Position (um)') + ylabel('Focus metric') + + increasing_flag = false; + + zpos = startPos(3); + frame = obj.snapImage(obj.binning); % Specify binning to be default + d = contrast_detection(frame,dx,dy,xrange,yrange); + pos_track(end+1) = zpos; + data(end+1) = d; + direction = -1; + + while true + zpos = zpos + direction*stepSize; + assert(zpos>min(limits) && zpos=n + if prod(diff(data(end-n+1:end)) < 0) % If vals are decreasing + direction = 1; + if increasing_flag + % Means we found a peak + break + end + else + increasing_flag = true; + end + + end + end + + [metric,index] = max(data); + stageHandle.move(startPos(1),startPos(2),pos_track(index)) + stageManager.waitUntilStopped(0.1); + close(f) + +end + +function data = contrast_detection(frame,dx,dy,xrange,yrange) +frame = double(frame); +image = frame(yrange,xrange); +imagex = frame(yrange,xrange+dx); +imagey = frame(yrange+dy,xrange); +dI = (imagex-image).^2+(imagey-image).^2; +data = mean2(dI); +end diff --git a/Modules/+Imaging/@Hamamatsu/Hamamatsu.m b/Modules/+Imaging/@Hamamatsu/Hamamatsu.m new file mode 100644 index 000000000..6c32dfc02 --- /dev/null +++ b/Modules/+Imaging/@Hamamatsu/Hamamatsu.m @@ -0,0 +1,438 @@ +classdef Hamamatsu < Modules.Imaging + %AxioCam Control Zeiss AxioCam camera + % + + properties + exposure = 100 % Exposure time in ms + binning = 1 % Bin pixels + EMGain = 4 + ImRot90 + FlipVer + FlipHor + maxROI % Set in constructor + CamCenterCoord = [0,0] % camera's center of coordinates (in same units as camera calibration, i.e. um) + data_name = 'Widefield'; % For diamondbase (via ImagingManager) + data_type = 'General'; % For diamondbase (via ImagingManager) + prefs = {'binning','exposure','EMGain','ImRot90','FlipVer','FlipHor','CamCenterCoord'}; + end + properties(Hidden) + core % The Micro-Manager core utility (java) + dev = 'HamamatsuHam_DCAM'; % Device label (from the cfg file) + end + properties(SetObservable) + resolution = [NaN NaN]; % Set in constructor and set.binning + ROI % Region of Interest in pixels [startX startY; stopX stopY] + continuous = false; + end + properties(Access=private) + setBinning % Handle to GUI settings object for binning + setExposure % Handle to GUI settings object for Exposure + setEMGain + setImRot90 + setFlipHor + setFlipVer + setCamCenterCoordX + setCamCenterCoordY + videoTimer % Handle to video timer object for capturing frames + end + + methods(Access=private) + function obj = Hamamatsu() + % Initialize Java Core + addpath 'C:\Micro-Manager-1.4.22\'; + import mmcorej.*; + core=CMMCore; + core.loadSystemConfiguration('C:\Micro-Manager-1.4.22\HamamatsuEMCCD.cfg'); + obj.core = core; + % Load preferences + obj.core.setCircularBufferMemoryFootprint(3); % 3 MB is enough for one full image + obj.loadPrefs; + obj.setFlipVer; + obj.setFlipHor; + res(1) = core.getImageWidth(); + res(2) = core.getImageHeight(); + obj.resolution = res; + obj.maxROI = [-obj.resolution(1)/2 obj.resolution(1)/2;... + -obj.resolution(2)/2 obj.resolution(2)/2]*obj.binning; + end + end + methods(Static) + function obj = instance() + mlock; + persistent Object + if isempty(Object) || ~isvalid(Object) + Object = Imaging.Hamamatsu(); + end + obj = Object; + end + end + methods + function set.CamCenterCoord(obj,val) + obj.CamCenterCoord = val; + end + + function set.ImRot90(obj,val) + obj.ImRot90 = val; + if ~isempty(obj.setImRot90) + set(obj.setImRot90,'string',num2str(obj.ImRot90)) + end + end + function set.FlipVer(obj,val) + obj.FlipVer = val; + if ~isempty(obj.setFlipVer) + set(obj.setFlipVer,'string',num2str(obj.FlipVer)) + end + end + function set.FlipHor(obj,val) + obj.FlipHor = val; + if ~isempty(obj.setFlipHor) + set(obj.setFlipHor,'string',num2str(obj.FlipHor)) + end + end + function set.EMGain(obj,val) + if val == obj.core.getProperty('HamamatsuHam_DCAM', 'EMGain') + obj.EMGain = val; + return + end + wasRunning = false; + if obj.core.isSequenceRunning() + % Pause camera acquisition, but leave the video going + % (just wont be frames until we resume acquisition) + obj.core.stopSequenceAcquisition(); + wasRunning = true; + end + obj.core.setProperty('HamamatsuHam_DCAM', 'EMGain',num2str(val)) + % Incase an invalid exposure was set, grab what core set it to + obj.EMGain = str2double(obj.core.getProperty('HamamatsuHam_DCAM', 'EMGain')); + if ~isempty(obj.setEMGain) + set(obj.setEMGain,'string',num2str(obj.EMGain)) + end + if wasRunning + obj.core.startContinuousSequenceAcquisition(100); + end + end + function set.exposure(obj,val) + if val == obj.core.getExposure() + obj.exposure = val; + return + end + wasRunning = false; + if obj.core.isSequenceRunning() + % Pause camera acquisition, but leave the video going + % (just wont be frames until we resume acquisition) + obj.core.stopSequenceAcquisition(); + wasRunning = true; + end + obj.core.setExposure(val) + % Incase an invalid exposure was set, grab what core set it to + obj.exposure = obj.core.getExposure(); + if ~isempty(obj.setExposure) + set(obj.setExposure,'string',num2str(obj.exposure)) + end + if wasRunning + obj.core.startContinuousSequenceAcquisition(100); + end + end + function set.binning(obj,val) + if val==str2double(obj.core.getProperty(obj.dev,'Binning')) + obj.binning = val; + return + end + wasRunning = false; + if obj.core.isSequenceRunning() + % Pause camera acquisition, but leave the video going + % (just wont be frames until we resume acquisition) + obj.core.stopSequenceAcquisition(); + wasRunning = true; + end + val = sprintf('%ix%i',val,val); % e.g. 1x1 + obj.core.setProperty(obj.dev,'Binning',val) + bin = char(obj.core.getProperty(obj.dev,'Binning')); + bin = strsplit(bin,'x'); + obj.binning = str2double(bin{1}); + res(1) = obj.core.getImageWidth(); + res(2) = obj.core.getImageHeight(); + obj.resolution = res; + if ~isempty(obj.setBinning) + set(obj.setBinning,'string',num2str(obj.binning)) + end + if wasRunning + obj.core.startContinuousSequenceAcquisition(100); + end + end + function set.ROI(obj,val) + % Because this has a draggable rectangle in CommandCenter, it + % is best to not stop and start acquisition like we do with + % exposure and binning + assert(~obj.core.isSequenceRunning(),'Cannot set while video running.') + val = val/obj.binning; + val(2,:) = fliplr(val(2,:))*-1; + val(1,:) = val(1,:) + obj.resolution(1)/2; + val(2,:) = val(2,:) + obj.resolution(2)/2; + val = round([val(1,1) val(2,1) val(1,2)-val(1,1) val(2,2)-val(2,1)]); + % Use the full ROI as bounds + obj.core.clearROI(); + roi = obj.core.getROI(); + xstart = max(roi.x,val(1)); + ystart = max(roi.y,val(2)); + width = min(roi.width-xstart,val(3)); + height = min(roi.height-ystart,val(4)); + obj.core.setROI(xstart,ystart,width,height); + end + function val = get.ROI(obj) + val = obj.core.getROI(); + val = [val.x val.x+val.width; val.y val.y+val.height]; + val(1,:) = val(1,:) - obj.resolution(1)/2; + val(2,:) = val(2,:) - obj.resolution(2)/2; + val(2,:) = fliplr(val(2,:))*-1; + val = val*obj.binning; + val = val*obj.calibration; + val = val + obj.CamCenterCoord.'*ones(1,2); + val = val/obj.calibration; + end + function delete(obj) + if obj.core.isSequenceRunning() + obj.stopVideo; + end + obj.core.reset() % Unloads all devices, and clears config data + delete(obj.core) + end + + function metric = focus(obj,ax,Managers) + stageManager = Managers.Stages; + stageManager.update_gui = 'off'; + % oldBin = obj.binning; + % oldExp = obj.exposure; + % if oldBin < 3 + % obj.exposure = oldExp*(oldBin/3)^2; + % obj.binning = 3; + % end + try + metric = obj.ContrastFocus(Managers); + catch err + stageManager.update_gui = 'on'; + rethrow(err) + end + % if oldBin < 3 + % obj.exposure = oldExp; + % obj.binning = oldBin; + % end + stageManager.update_gui = 'on'; + end + function dat = snapImage(obj,binning,exposure) + % This function returns the image (unlike snap) + % Default is to use bin of 1. Exposure is configured based on + % bin size before executing this function. Settings are + % restored after function completes. This can be overridden + % using the optional inputs. + oldBin = obj.binning; + oldExp = obj.exposure; + % Parse inputs + switch nargin + case 1 % No optional inputs + newBin = 1; + newExp = oldExp*(oldBin^2); + case 2 % Binning specified + newBin = binning; + newExp = oldExp*(oldBin/newBin)^2; + case 3 % Binning and exposure specified + newBin = binning; + newExp = exposure; + end + % Update state + wasRunning = false; + if obj.core.isSequenceRunning() + wasRunning = true; + obj.core.stopSequenceAcquisition(); + end + obj.binning = newBin; + obj.exposure = newExp; + % Take Image + obj.core.snapImage(); + dat = obj.core.getImage(); + width = obj.core.getImageWidth(); + height = obj.core.getImageHeight(); + dat = typecast(dat, 'uint16'); + dat = reshape(dat, [width, height]); + im = flipud(transpose(dat)); % Fix Y inversion + if obj.ImRot90 > 0 + dat = rot90(dat,obj.ImRot90); + end + if obj.FlipVer + dat = flipud(dat); + end + if obj.FlipHor + dat = fliplr(dat); + end + % Restore last state + obj.exposure = oldExp; + obj.binning = oldBin; + if wasRunning + obj.core.startContinuousSequenceAcquisition(100); + end + end + function snap(obj,hImage) + % This function calls snapImage and applies to hImage. + im = obj.snapImage; + set(hImage,'cdata',im) + end + function startVideo(obj,hImage) + obj.continuous = true; + if obj.core.isSequenceRunning() + warndlg('Video already started.') + return + end + obj.core.startContinuousSequenceAcquisition(100); + obj.videoTimer = timer('tag','Video Timer',... + 'ExecutionMode','FixedSpacing',... + 'BusyMode','drop',... + 'Period',0.01,... + 'TimerFcn',{@obj.grabFrame,hImage}); + start(obj.videoTimer) + end + function grabFrame(obj,~,~,hImage) + % Timer Callback for frame acquisition + if obj.core.isSequenceRunning()&&obj.core.getRemainingImageCount()>0 + dat = obj.core.popNextImage(); + width = obj.core.getImageWidth(); + height = obj.core.getImageHeight(); + dat = typecast(dat, 'uint16'); + dat = reshape(dat, [width, height]); + dat = flipud(dat'); % Fix Y inversion + if obj.ImRot90 > 0 + dat = rot90(dat,obj.ImRot90); + end + if obj.FlipVer + dat = flipud(dat); + end + if obj.FlipHor + dat = fliplr(dat); + end + set(hImage,'cdata',dat); + end + drawnow; + end + function stopVideo(obj) + if ~obj.core.isSequenceRunning() + warndlg('No video started.') + obj.continuous = false; + return + end + obj.core.stopSequenceAcquisition(); + stop(obj.videoTimer) + delete(obj.videoTimer) + obj.continuous = false; + end + + % Settings and Callbacks + function settings(obj,panelH) + spacing = 1.5; + num_lines = 4; + line = 1; + xwidth1 = 14; + xwidth2 = 10; + xwidth3 = 12; + xwidth4 = 10; + uicontrol(panelH,'style','text','string','Exposure (ms):','horizontalalignment','right',... + 'units','characters','position',[0 spacing*(num_lines-line) xwidth1 1.25]); + obj.setExposure = uicontrol(panelH,'style','edit','string',num2str(obj.exposure),... + 'units','characters','callback',@obj.exposureCallback,... + 'horizontalalignment','left','position',[xwidth1+1 spacing*(num_lines-line) xwidth2 1.5]); + + uicontrol(panelH,'style','text','string','Im. rot 90','horizontalalignment','right',... + 'units','characters','position',[xwidth1+xwidth2+1 spacing*(num_lines-line) xwidth3 1.25]); + obj.setImRot90 = uicontrol(panelH,'style','edit','string',num2str(obj.ImRot90),... + 'units','characters','callback',@obj.ImRot90Callback,... + 'horizontalalignment','left','position',[xwidth1+xwidth2+xwidth3+1 spacing*(num_lines-line) xwidth4 1.5]); + + +% uicontrol(panelH,'style','edit','string',num2str(obj.ImRot90),... +% 'units','characters','callback',@obj.setImRot90,... +% 'horizontalalignment','left','position',[43 spacing*(num_lines-line) 10 1.5]); + + line = 2; + uicontrol(panelH,'style','text','string','Binning:','horizontalalignment','right',... + 'units','characters','position',[0 spacing*(num_lines-line) xwidth1 1.25]); + obj.setBinning = uicontrol(panelH,'style','edit','string',num2str(obj.binning),... + 'units','characters','callback',@obj.binningCallback,... + 'horizontalalignment','left','position',[xwidth1+1 spacing*(num_lines-line) xwidth2 1.5]); + + uicontrol(panelH,'style','text','string','Flip Hor.','horizontalalignment','right',... + 'units','characters','position',[xwidth1+xwidth2+1 spacing*(num_lines-line) xwidth3 1.25]); + obj.setFlipHor = uicontrol(panelH,'style','checkbox','value',obj.FlipHor,... + 'units','characters','position',[xwidth1+xwidth2+xwidth3+1 spacing*(num_lines-line) xwidth4 1.5],... + 'tag','Flip Hor.','callback',@obj.FlipHorCallback); + + line = 3; + uicontrol(panelH,'style','text','string','EM Gain:','horizontalalignment','right',... + 'units','characters','position',[0 spacing*(num_lines-line) xwidth1 1.25]); + obj.setEMGain = uicontrol(panelH,'style','edit','string',num2str(obj.EMGain),... + 'units','characters','callback',@obj.EMGainCallback,... + 'horizontalalignment','left','position',[xwidth1+1 spacing*(num_lines-line) xwidth2 1.5]); + + uicontrol(panelH,'style','text','string','Flip Ver.','horizontalalignment','right',... + 'units','characters','position',[xwidth1+xwidth2+1 spacing*(num_lines-line) xwidth3 1.25]); + obj.setFlipVer = uicontrol(panelH,'style','checkbox','value',obj.FlipVer,... + 'units','characters','position',[xwidth1+xwidth2+xwidth3+1 spacing*(num_lines-line) xwidth4 1.5],... + 'tag','Flip Ver.','callback',@obj.FlipVerCallback); + + line = 4; + uicontrol(panelH,'style','text','string','XCenter','horizontalalignment','right',... + 'units','characters','position',[0 spacing*(num_lines-line) xwidth1 1.25]); + obj.setCamCenterCoordX = uicontrol(panelH,'style','edit','string',num2str(obj.CamCenterCoord(1)),... + 'units','characters','callback',@obj.CamCenterCoordXCallback,... + 'horizontalalignment','left','position',[xwidth1+1 spacing*(num_lines-line) xwidth2 1.5]); + + uicontrol(panelH,'style','text','string','YCenter','horizontalalignment','right',... + 'units','characters','position',[xwidth1+xwidth2+1 spacing*(num_lines-line) xwidth3 1.25]); + obj.setCamCenterCoordY = uicontrol(panelH,'style','edit','string',num2str(obj.CamCenterCoord(2)),... + 'units','characters','callback',@obj.CamCenterCoordYCallback,... + 'horizontalalignment','left','position',[xwidth1+xwidth2+xwidth3+1 spacing*(num_lines-line) xwidth4 1.5]); + end + function exposureCallback(obj,hObj,eventdata) + val = str2double((get(hObj,'string'))); + obj.exposure = val; + end + function binningCallback(obj,hObj,eventdata) + val = str2double((get(hObj,'string'))); + obj.binning = val; + end + function EMGainCallback(obj,hObj,eventdata) + val = str2double((get(hObj,'string'))); + obj.EMGain = val; + end + function CamCenterCoordXCallback(obj,hObj,eventdata) + cur = obj.CamCenterCoord; + cur(1) = str2double((get(hObj,'string'))); + obj.CamCenterCoord = cur; + warning('Need to reset ROI for changes to take effect.') + end + function CamCenterCoordYCallback(obj,hObj,eventdata) + cur = obj.CamCenterCoord; + cur(2) = str2double((get(hObj,'string'))); + obj.CamCenterCoord = cur; + warning('Need to reset ROI for changes to take effect.') + end + function ImRot90Callback(obj,hObj,eventdata) + val = str2double((get(hObj,'string'))); + obj.ImRot90 = val; + warning('Only works with full ROI.') + end + function FlipHorCallback(obj,hObj,~) + if (get(hObj,'Value') == get(hObj,'Max')) + obj.FlipHor = 1; + else + obj.FlipHor = 0; + end + end + function FlipVerCallback(obj,hObj,~) + if (get(hObj,'Value') == get(hObj,'Max')) + obj.FlipVer = 1; + else + obj.FlipVer = 0; + end + end + + end +end + diff --git a/Modules/+Imaging/QR.m b/Modules/+Imaging/QR.m new file mode 100644 index 000000000..9ab2f0289 --- /dev/null +++ b/Modules/+Imaging/QR.m @@ -0,0 +1,382 @@ +classdef QR < Modules.Imaging + % Decodes QRs in images from an external imaging module. + + % Internal helper variables. + properties(Hidden) + current_img; % Cache for the previous image. + graphics = []; % Contains handles for graphics objects for QR drawing. + end + properties(SetAccess=private) + % Internal variables for the positions of each potential QR code. + % v are the positions in camera-space (pixels), V are the positions in QR-space. + % v_all and V_all include potential QRs that did not decode in a self-consistent manner. + v_good = []; + V_good = []; + v_all = []; + V_all = []; + end + properties + % This system assumes only local movements, and ignores decoded QRs far from expected. Expected is updated when 3+ QRs self-consistently decode. + X_expected = NaN; + Y_expected = NaN; + end + properties(Constant, Hidden) + displaymodes = {'Raw', 'Flattened', 'Convolution X', 'Convolution Y', '(Convolution X)^3 + (Convolution Y)^3', 'Thresholded'}; + displaylevels = {'All candidates', 'Valid checksum only', 'Self-consistent only'}; + end + + % Prefs which display in the UI pane. See help_text for context. + properties(GetObservable,SetObservable) + QR_len = Prefs.Double(6.25, 'units', 'um', 'readonly', true, 'help_text', 'Length of QR arm. This is set to the standard value.'); + QR_rad = Prefs.Double(.3, 'units', 'um', 'readonly', true, 'help_text', 'Radius of the three large QR dots. This is set to the standard value.'); + QR_ang = Prefs.Double(0, 'units', 'deg', 'help_text', 'QR code angle in the image coordinates (CCW).'); + + image = Prefs.ModuleInstance('inherits', {'Modules.Imaging'}, 'set', 'set_image', ... + 'help_text', 'The imaging module to do QR decoding upon.'); + + flip = Prefs.Boolean('set', 'set_variable', ... + 'help_text', 'Whether the image should be flipped across the x axis. This should be used along with rotate to put the image in a user-friendly frame.'); + rotate = Prefs.MultipleChoice(0, 'set', 'set_rotate', 'allow_empty', true, 'choices', {0, 90, 180, 270}, ... + 'help_text', 'Rotation (CCW) of the image after flipping. This should be used along with flip to put the image in a user-friendly frame.'); + + displaymode = Prefs.MultipleChoice(Imaging.QR.displaymodes{1}, 'set', 'set_variable', 'allow_empty', false, 'choices', Imaging.QR.displaymodes, ... + 'help_text', 'These displaymodes allow the user to see the various stages in the algorithm that allows robust and fast convolutional QR detection.'); + displaylevel = Prefs.MultipleChoice(Imaging.QR.displaylevels{1}, 'set', 'set_variable', 'allow_empty', false, 'choices', Imaging.QR.displaylevels, ... + 'help_text', 'These displaylevels allow the user to hide unnecessary information such as QRs which do not decode.'); + + X = Prefs.Double(NaN, 'readonly', true, 'allow_nan', true, 'help_text', 'Detected X position in QR-space of the center of the field of view.'); + Y = Prefs.Double(NaN, 'readonly', true, 'allow_nan', true, 'help_text', 'Detected Y position in QR-space of the center of the field of view.'); + N = Prefs.Integer(0, 'readonly', true, 'help_text', 'Number of self-consistent QR codes within a field of view.') + end + + % Variables required for imaging modules. This should be cleaned up in the future. + properties + maxROI = [-1 1; -1 1]; + prefs = {'image', 'flip', 'rotate', 'displaymode', 'displaylevel', 'calibration', 'QR_ang'}; + end + properties(GetObservable,SetObservable) + resolution = [120 120]; + ROI = [-1 1;-1 1]; + continuous = false; + end + + % Constructor variables. + methods(Access=private) + function obj = QR() + obj.loadPrefs; + end + end + methods(Static) + function obj = instance() + mlock; + persistent Object + if isempty(Object) || ~isvalid(Object) + Object = Imaging.QR(); + end + obj = Object; + end + end + + % Set variables to handle UI events / etc. + methods + function image = set_image(obj, image, ~) + obj.maxROI = image.maxROI; + obj.ROI = image.ROI; + end + function set.ROI(obj,val) + % Update ROI without going outside maxROI + val(1,1) = max(obj.maxROI(1,1),val(1,1)); %#ok<*MCSUP> + val(1,2) = min(obj.maxROI(1,2),val(1,2)); + val(2,1) = max(obj.maxROI(2,1),val(2,1)); + val(2,2) = min(obj.maxROI(2,2),val(2,2)); + + % Now make sure no cross over + val(1,2) = max(val(1,1),val(1,2)); + val(2,2) = max(val(2,1),val(2,2)); + obj.ROI = val; + end + function val = set_rotate(obj, val, ~) + if val == 90 || val == 270 + obj.image.maxROI + obj.maxROI = obj.image.maxROI([2, 1], :); + else + obj.maxROI = obj.image.maxROI; + end + obj.ROI = obj.maxROI; + obj.analyze(); + end + function val = set_variable(obj, val, ~) + obj.analyze(); + end + end + + % Image acquisition and processing variables. + methods + function img = snapImage(obj) + obj.current_img = obj.image.snapImage(); + + img = obj.analyze(); + end + % Required method of Modules.Imaging. The "snap button" in the UI calls this and displays the camera result on the imaging axis. + function snap(obj, im, continuous) + im.CData = obj.snapImage(); + end + % Analysis method to detect QRs and display them in our graphics figure. + function displayimg = analyze(obj) + img = obj.current_img; + + % No image to analyze: break. + if isempty(img) + return + end + + % Transform the image according to the user's desire. + if obj.flip + img = flipud(img); + end + if obj.rotate ~= 0 + img = rot90(img, round(obj.rotate/90)); + end + + % Provide a guess starting point for the convolutional algorithm. + options_guess = struct('ang', (obj.QR_ang + 90) * pi / 180, 'calibration', obj.calibration, 'X_expected', obj.X_expected, 'Y_expected', obj.Y_expected); + + % Perform the convolution, which returns a list of the + [v, V, options_fit, stages] = Base.QRconv(img, options_guess); + + % Coordinates in QR-space of the center of the field of view. + obj.X = options_fit.Vcen(1); + obj.Y = options_fit.Vcen(2); + + % Number of successfully-decoded QRs. + obj.N = sum(~options_fit.outliers & ~isnan(V(1,:))); + + % Store the detected QRs in memory for later user access. + obj.v_all = v; % Camera-space (pixels). + obj.V_all = V; % QR-space. + + % Remove the QRs which did not self-consistently decode for the good list. + obj.v_good = reshape(v(~isnan(V)),2,[]); + obj.V_good = reshape(V(~isnan(V)),2,[]); + + % If we have agreement, ... + if obj.N >= 2 + % Update the expected values for next time (as movement will generally be local, we can expect similar values). + obj.X_expected = obj.X; + obj.Y_expected = obj.Y; + + % Update the pixels-to-microns calibration. + if abs(options_fit.calibration / obj.calibration - 1) < .05 + obj.calibration = mean([options_fit.calibration, obj.calibration]); + obj.image.calibration = obj.calibration; + end + + if obj.N >= 3 + QR_ang2 = (options_fit.ang * 180 / pi); + + % Update the QR angle with our fit value. + if abs(QR_ang2 - obj.QR_ang) < 2 + obj.QR_ang = mean([QR_ang2, obj.QR_ang]); + end + end + end + + % Change coordinates from pixels to microns for display. + v = (v + obj.ROI(:,1)) * obj.calibration; + + % Determine which stage of the convolutional algorithm we will display. This is useful for understanding and debugging. + switch obj.displaymode + case Imaging.QR.displaymodes{1} + displayimg = img; + case Imaging.QR.displaymodes{2} + displayimg = stages.flat; + case Imaging.QR.displaymodes{3} + displayimg = stages.convH; + case Imaging.QR.displaymodes{4} + displayimg = stages.convV; + case Imaging.QR.displaymodes{5} + displayimg = stages.conv; + case Imaging.QR.displaymodes{6} + displayimg = stages.bw; + otherwise + displayimg = img; + end + + % We use an external figure to display the QRs. If this figure has not been created, create it. + cx = obj.calibration * (obj.ROI(1,1) + obj.ROI(1,2))/2; + cy = obj.calibration * (obj.ROI(2,1) + obj.ROI(2,2))/2; + if isempty(obj.graphics) || isempty(obj.graphics.figure) || ~isvalid(obj.graphics.figure) + % Make the figure and axes. + obj.graphics.figure = figure('Name', 'QR Navigation', 'NumberTitle', 'off', 'Menubar', 'none', 'Toolbar', 'none'); + obj.graphics.figure.Position(2) = obj.graphics.figure.Position(2) - obj.graphics.figure.Position(3) + obj.graphics.figure.Position(4); + obj.graphics.figure.Position(4) = obj.graphics.figure.Position(3); + obj.graphics.axes = axes('Units', 'normalized', 'Position', [0 0 1 1], 'PickableParts', 'none', 'DataAspectRatio', [1 1 1]); + hold(obj.graphics.axes, 'on'); + try + disableDefaultInteractivity(obj.graphics.axes) + end + + % Make the image to display. + obj.graphics.img = imagesc(obj.graphics.axes, ... + [ obj.ROI(1,1), obj.ROI(1,2)] * obj.calibration, ... + [ obj.ROI(2,1), obj.ROI(2,2)] * obj.calibration, ... + NaN(obj.resolution)); + colormap('gray'); + + % Make other graphics objects. + obj.graphics.text = []; + + obj.graphics.grid = plot(obj.graphics.axes, NaN, NaN, 'c-', 'LineWidth', .5); + obj.graphics.grid.Color(4) = 0.25; + + obj.graphics.center = scatter(obj.graphics.axes, cx, cy, 'go'); + obj.graphics.centertext = text(obj.graphics.axes, cx, cy, 'Center', 'color', 'g'); + + obj.graphics.p3 = plot(obj.graphics.axes, NaN, NaN, 'r-', 'LineWidth',.5); + obj.graphics.p2 = plot(obj.graphics.axes, NaN, NaN, 'y-', 'LineWidth',1); + obj.graphics.p1 = plot(obj.graphics.axes, NaN, NaN, 'g-', 'LineWidth',2); + obj.graphics.p3.Color(4) = 0.25; + end + + obj.graphics.center.XData = cx; + obj.graphics.center.YData = cy; + obj.graphics.centertext.Position = [cx, cy, 0]; + + % Update the graphics objects with our image. + obj.graphics.img.CData = displayimg; + + % Helper variables to construct the shape of the squares that outline where we have detected QRs. These are centered on the lower left QR + % corner. The NaN at the end removes lines which would connect squares, leaving them disjointed as desired. + p = .15; + lx0 = obj.QR_len * cosd(obj.QR_ang + 90); + ly0 = obj.QR_len * sind(obj.QR_ang + 90); + sqx = [-p*(ly0+lx0) (1+p)*lx0-p*ly0 (1+p)*(ly0+lx0) (1+p)*ly0-p*lx0 -p*(ly0+lx0) NaN]; + sqy = [-p*(ly0-lx0) (1+p)*ly0-p*lx0 (1+p)*(ly0-lx0) -(1+p)*lx0-p*ly0 -p*(ly0-lx0) NaN]; + + % Make some empty variables which will hold the squares for displaying different types of detected QRs + p1x = []; p1y = []; % Self-consistent QRs. + p2x = []; p2y = []; % QRs which decode, but are not self-consistent (do not make sense). + p3x = []; p3y = []; % QRs which do not decode (violate checksum or otherwise). + + kk = 1; % Empty iterator. + + for ii = 1:size(v,2) + squarex = v(1,ii) + sqx; + squarey = v(2,ii) + sqy; + + if isnan(V(1,ii)) + p3x = [p3x squarex]; %#ok + p3y = [p3y squarey]; %#ok + else + if options_fit.outliers(ii) + p2x = [p2x squarex]; %#ok + p2y = [p2y squarey]; %#ok + else + p1x = [p1x squarex]; %#ok + p1y = [p1y squarey]; %#ok + end + + shoulddisplay = true; + + switch obj.displaylevel + case Imaging.QR.displaylevels{3} + shoulddisplay = ~options_fit.outliers(ii); + end + + if shoulddisplay + % Make more text objects if there are not enough. + if kk > length(obj.graphics.text) + obj.graphics.text(kk) = text(obj.graphics.axes, NaN, NaN, '', 'HorizontalAlignment', 'center', 'VerticalAlignment', 'middle'); + end + + % QRs which decoded successfully additionally get a text label. Set the text objects to the proper values. + set(obj.graphics.text(kk), 'String', ['[' num2str(V(1,ii)) ', ' num2str(V(2,ii)) ']']); + set(obj.graphics.text(kk), 'Position', [v(1,ii) + lx0/2 + ly0/2, v(2,ii) + ly0/2 - lx0/2]); + set(obj.graphics.text(kk), 'Color', 'g'); + if options_fit.outliers(ii) + set(obj.graphics.text(kk), 'Color', 'y'); + end + + kk = kk + 1; + end + end + end + + % Set all unneeded text objects to not display. + while kk <= length(obj.graphics.text) + set(obj.graphics.text(kk), 'String', ''); + set(obj.graphics.text(kk), 'Position', [NaN, NaN]); + set(obj.graphics.text(kk), 'Color', 'k'); + + kk = kk + 1; + end + + % Color the pointer at the center of the FoV according to what was decoded. + if any(isnan(options_fit.Vcen)) + obj.graphics.centertext.String = ''; + obj.graphics.center.MarkerEdgeColor = 'y'; + else + obj.graphics.centertext.String = [' [' num2str(options_fit.Vcen(1), '%.2f') ', ' num2str(options_fit.Vcen(2), '%.2f') ']']; + obj.graphics.center.MarkerEdgeColor = 'g'; + end + + % Update all our graphics with the lines we have created. + obj.graphics.p1.XData = p1x; obj.graphics.p1.YData = p1y; + obj.graphics.p2.XData = p2x; obj.graphics.p2.YData = p2y; + obj.graphics.p3.XData = p3x; obj.graphics.p3.YData = p3y; + + switch obj.displaylevel + case Imaging.QR.displaylevels{1} + obj.graphics.p2.Visible = true; + obj.graphics.p3.Visible = true; + case Imaging.QR.displaylevels{2} + obj.graphics.p2.Visible = true; + obj.graphics.p3.Visible = false; + case Imaging.QR.displaylevels{3} + obj.graphics.p2.Visible = false; + obj.graphics.p3.Visible = false; + end + + gdata = (affine(floor(options_fit.Vcen) + [[0, 1, 1, 0, 0]; [0, 0, 1, 1, 0]], options_fit.M, options_fit.b) - obj.ROI(:,1) ) * options_fit.calibration; + gdata = [gdata [NaN; NaN], (affine(floor(options_fit.Vcen) + [[0, 1, 0, 1, 0]; [0, 0, 1, 1, 0]], options_fit.M, options_fit.b) - obj.ROI(:,1) ) * options_fit.calibration]; + + obj.graphics.grid.XData = gdata(1,:); + obj.graphics.grid.YData = gdata(2,:); + + obj.graphics.img.XData = [ obj.ROI(1,1), obj.ROI(1,2)] * obj.calibration; + obj.graphics.img.YData = [ obj.ROI(2,1), obj.ROI(2,2)] * obj.calibration; + + try % These lines sometime break, so we will try them carefully. + xlim(obj.graphics.axes, obj.graphics.img.XData); + ylim(obj.graphics.axes, obj.graphics.img.YData); + catch + end + end + end + + % Video methods required by Modules.Imaging -- currently just a while loop implementation. + methods + function startVideo(obj,im) + obj.continuous = true; + while obj.continuous + obj.snap(im,true); + drawnow; + end + end + function stopVideo(obj) + obj.continuous = false; + end + + % Focus not implemented (see metastage) + function focus(obj,ax,stageHandle) %#ok + end + end +end + +% Helper function for affine transformations (scale + rotation + shear + translation) +function v_ = affine(v, M, b) + % v and v_ are either column vectors (2x1) or arrays of column vectors (2xN) of the same size + % M is a matrix (2x2) + % b is a column vector (2x1) + v_ = M * v + b; +end \ No newline at end of file diff --git a/Modules/+Drivers/+SignalGenerators/@SignalGenerator/ReadMe.pptx b/Modules/+Sources/+SignalGenerators/@SignalGenerator/ReadMe.pptx similarity index 100% rename from Modules/+Drivers/+SignalGenerators/@SignalGenerator/ReadMe.pptx rename to Modules/+Sources/+SignalGenerators/@SignalGenerator/ReadMe.pptx diff --git a/Modules/+Sources/+SignalGenerators/@SignalGenerator/SignalGenerator.m b/Modules/+Sources/+SignalGenerators/@SignalGenerator/SignalGenerator.m new file mode 100644 index 000000000..9628aecc5 --- /dev/null +++ b/Modules/+Sources/+SignalGenerators/@SignalGenerator/SignalGenerator.m @@ -0,0 +1,130 @@ +classdef SignalGenerator < Modules.Driver + + properties (Abstract) + comObjectInfo; + comObject; + end + + methods + function SG_init(obj) + % Call this on subclasses during instatiation + obj.loadPrefs; + + if ~isstruct(obj.comObjectInfo) + obj.comObjectInfo = struct('comType','','comAddress','','comProperties',''); + end + + % Note fopen(obj.comObject) can error if incorrect address supplied + if isempty(obj.comObjectInfo.comType)&& isempty(obj.comObjectInfo.comAddress)&& isempty(obj.comObjectInfo.comProperties) + %first time connecting should run the helper function + %Connect_Device to establish your connection + [obj.comObject,obj.comObjectInfo.comType,obj.comObjectInfo.comAddress,obj.comObjectInfo.comProperties] = Connect_Device; + fopen(obj.comObject); + else + try + %this is used for connecting every time after the first + %time + [obj.comObject,obj.comObjectInfo.comType,obj.comObjectInfo.comAddress,obj.comObjectInfo.comProperties] = ... + Connect_Device(obj.comObjectInfo.comType,obj.comObjectInfo.comAddress,obj.comObjectInfo.comProperties); + fopen(obj.comObject); + catch + %this is only called if you change a device property + %after the initial connection (ex: change GPIB + %address). This allows you to establish a new + %connection. + [obj.comObject,obj.comObjectInfo.comType,obj.comObjectInfo.comAddress,obj.comObjectInfo.comProperties] ... + = Connect_Device; + fopen(obj.comObject); + end + end + obj.reset; %set the SG to a known state + end + + function setUnitPower(obj) + %this should set the units of the SG to dBm. + error('Not implemented'); + end + + function setFreqCW(obj,Freq) + %Freq should be a double + error('Not implemented'); + end + + function setPowerCW(obj,Power) + %Power should be a double + error('Not implemented'); + end + + + %% + + function getUnitPower(obj) + %this should return the units of the SG power. + error('Not implemented'); + end + + function [Freq]=getFreqCW(obj) + %should return Freq as a double. If in List mode should error. + error('Not implemented'); + end + + function [Power]=getPowerCW(obj) + %should return Power as a double. Power should be in dBm. If in List mode should error. + error('Not implemented'); + end + + function [FreqMode]=getFreqMode(obj) + %should FreqMode as a string. Options should be CW,FIX or List. + error('Not implemented'); + end + + function [PowerMode]=getPowerMode(obj) + %should PowerMode as a string. Options should be CW,FIX or List. + error('Not implemented'); + end + + function [FreqList]=getFreqList(obj) + %should return the frequencies set for List mode. + error('Not implemented'); + end + + function [PowerList]=getPowerList(obj) + %should return the powers set for List mode. + error('Not implemented'); + end + + function [MWstate]=getMWstate(obj) + %should return the state of the SG.On or Off. + error('Not implemented'); + end + + + %% + + function program_list(obj,freq_list,power_list) + %this should program the SG to output a freq list that can be + %stepped through on trigger.Trigger should be a rising edge. + %freq_list and power_list should be a vector of + %doubles and they should be the same length. The SG should be + %off when done. + + error('Not implemented'); + end + + function off(obj) + %this should turn Off the signal generator. + error('Not implemented'); + end + + function on(obj) + %this should turn On the signal generator. + error('Not implemented'); + end + + function reset(obj) + %this should turn the SG to a known state. For instance to CW + %mode and a reference frequency and power. + error('Not implemented'); + end + end +end \ No newline at end of file diff --git a/Modules/+Sources/+SignalGenerators/SG_Source_invisible.m b/Modules/+Sources/+SignalGenerators/SG_Source_invisible.m index 37d487e96..e299010d1 100644 --- a/Modules/+Sources/+SignalGenerators/SG_Source_invisible.m +++ b/Modules/+Sources/+SignalGenerators/SG_Source_invisible.m @@ -15,7 +15,7 @@ PB_line = Prefs.Integer(1, 'min', 1, 'allow_nan', false, 'set', 'set_PB_line', ... 'help', 'Indexed from 1'); - reset_serial = Prefs.Button('string', 'Reset', 'set', 'set_reset_serial', ... + reset_serial = Prefs.Button('Reset', 'set', 'set_reset_serial', ... 'help', 'Push this to kill the current comport (serial, gpib, ...) and be able to reset it upon restart. Future: make this less terrible.') end @@ -116,7 +116,7 @@ function init(obj) % Called by signal generators after instantiation to load pr end end - function set_reset_serial(obj, ~, ~) + function obj = set_reset_serial(obj, ~, ~) obj.serial.comObjectInfo = []; obj.serial.savePrefs(); delete(obj); % Suicide. diff --git a/Modules/+Sources/CWAVE_PB.m b/Modules/+Sources/CWAVE_PB.m new file mode 100644 index 000000000..7828a0722 --- /dev/null +++ b/Modules/+Sources/CWAVE_PB.m @@ -0,0 +1,143 @@ +classdef CWAVE_PB < Modules.Source + % Cobolt_PB controls the Cobolt via USB and fast diode modulation via pulseblaster + + properties(SetObservable, GetObservable) + PB_line = Prefs.Integer(1, 'min', 1, 'help_text', 'Pulse Blaster flag bit (indexed from 1)'); + PB_host = Prefs.String(Sources.Cobolt_PB.noserver, 'set', 'set_pb_host', 'help_text', 'hostname of hwserver computer with PB'); + end + properties(SetAccess=private) + serial % hwserver handle + PulseBlaster % pulseblaster handle + end + properties(Constant) + noserver = 'No Server'; + end + properties + prefs = {'PB_line', 'PB_host'}; + show_prefs = {'PB_host', 'PB_line'}; + end + methods(Access=protected) + function obj = CWAVE_PB() + obj.loadPrefs; % note that this calls set.host + end + end + methods(Static) + function obj = instance() + mlock; + persistent Object + if isempty(Object) || ~isvalid(Object) + Object = Sources.CWAVE_PB(); + end + obj = Object; + end + end + methods + function task = inactive(obj) + try % If PB or line is incorrect, just eat that error + obj.off; + obj.blackout; + task = 'Turned diode off'; + catch + task = 'Attempted to turn diode off; FAILED'; + end + end + + function delete(obj) + delete(obj.serial) + end + + function val = set_source_on(obj, val, ~) + obj.PulseBlaster.lines(obj.PB_line).state = val; + end +% function val = set_armed(obj, val, ~) % Turn the diode on or off. +% if obj.isConnected() +% if val +% errorIfNotOK(obj.serial.com('Cobolt', '@cobas', 0)); % No autostart +% errorIfNotOK(obj.serial.com('Cobolt', 'em')); % Enter Modulation Mode +% errorIfNotOK(obj.serial.com('Cobolt', 'l1')); +% else +% errorIfNotOK(obj.serial.com('Cobolt', 'l0')); % Laser off +% end +% else +% val = NaN; +% end +% end +% function val = set_power(obj, val, ~) +% if obj.isConnected && ~isnan(val) +% errorIfNotOK(obj.serial.com('Cobolt', 'slmp', val)); % Set laser modulation power (mW) +% else +% val = NaN; +% end +% end + +% function val = get_armed(obj, ~) +% val = obj.com('l?'); +% end +% function val = get_power(obj, ~) +% val = obj.com('glmp?'); % Get laser modulation power (mW) +% end +% function val = get_temperature(obj, ~) +% val = obj.com('rbpt?'); +% end +% function val = get_diode_sn(obj, ~) +% val = obj.com('sn?'); +% end +% function val = get_firmware_ver(obj, ~) +% val = obj.com('ver?'); +% end +% function val = get_diode_age(obj, ~) +% val = obj.com('hrs?'); +% end +% +% function val = com(obj, str, varargin) +% if obj.isConnected() +% val = obj.serial.com('Cobolt', str); +% else +% val = NaN; +% end +% end +% function tf = isConnected(obj) +% tf = ~strcmp(Sources.Cobolt_PB.noserver, obj.cobolt_host); % If we are trying to connect to a real IP.... +% +% if tf +% tf = strcmp('OK', obj.serial.com('Cobolt', '?')); % ...If the device is not responding affirmatively... +% +% if ~tf +% obj.cobolt_host = Sources.Cobolt_PB.noserver; +% end +% end +% end + + function val = set_pb_host(obj,val,~) %this loads the pulseblaster driver + try + obj.PulseBlaster = Drivers.PulseBlaster.instance(val); %#ok<*MCSUP> + obj.source_on = obj.PulseBlaster.lines(obj.PB_line).state; + catch + obj.PulseBlaster = []; + obj.source_on = NaN; + val = Sources.Cobolt_PB.noserver; + end + end +% function val = set_cobolt_host(obj,val,~) %this loads the hwserver driver +% delete(obj.serial); +% +% try +% obj.serial = hwserver(val); %#ok<*MCSUP> +% +% obj.temperature = obj.get_temperature(); +% obj.diode_sn = obj.get_diode_sn(); +% obj.diode_age = obj.get_diode_age(); +% obj.power = obj.get_power(); +% obj.armed = obj.get_armed(); +% catch +% obj.serial = []; +% obj.armed = NaN; +% val = Sources.Cobolt_PB.noserver; +% end +% end + end +end +% +% function errorIfNotOK(str) +% assert(strcmp(str, 'OK'), ['Cobolt Error: ' str]); +% end diff --git a/Modules/+Sources/Msquared.m b/Modules/+Sources/Msquared.m index 829f70d4f..ecdbf9944 100644 --- a/Modules/+Sources/Msquared.m +++ b/Modules/+Sources/Msquared.m @@ -379,7 +379,21 @@ function tune(obj, target) % This is the tuning method that int if isnan(val); return; end % Short circuit on NaN obj.getFrequency(); obj.do_wavelength_lock = false; + + start = obj.resonator_voltage/2; + delta = val - start; + steps = abs(round(delta/2)); + powers = start:(delta/steps):val; + try + powers(1) = []; + powers(end) = []; + end + + for p = powers + obj.com('set_resonator_val', 'solstis', p); + end obj.com('set_resonator_val', 'solstis', val); + obj.getFrequency(); end function val = set_do_wavelength_lock(obj, val, pref) if val == pref.value; return; end diff --git a/PLEpowerSweep.m b/PLEpowerSweep.m new file mode 100644 index 000000000..9adcd1676 --- /dev/null +++ b/PLEpowerSweep.m @@ -0,0 +1,46 @@ +function PLEpowerSweep(managers) + % Additional OD2.5 on green. + c = Sources.Cobolt_PB.instance(); + m = Sources.Msquared.instance(); + + ard = Drivers.ArduinoServo.instance('localhost', 2); +% k = Drivers.Keithley2400.instance(16); + +% pm = Drivers.PM100.instance(); +% pm.set_average_count(50) +% pm.set_range(1) + + cset = [4, 5, 6]; + mang = [0, 80, 180]; + + e = Experiments.SlowScan.Open.instance(); + + linewidths = NaN(length(cset), length(mang)); + + figure; + img = imagesc(cset, mang, linewidths); + colorbar + + xlabel('Cobolt Power [mW]') + ylabel('EMM OD Filter Wheel [deg]') + + for ii = 1:length(cset) + c.power = cset(ii); + for jj = 1:length(mang) + ard.angle = mang(jj); + + disp(['Cobolt power: ' num2str(cset(ii)) ', Msquared wheel angle: ' num2str(mang(jj))]) + + managers.Experiment.run(managers) + + s = fitpeaks(e.data.freqs_measured', e.data.sumCounts'); + + try + linewidths(ii, jj) = s.widths(1); + end + img.CData = linewidths; + end + end + + save('PLEpowerSweep', 'cset', 'mang') +end \ No newline at end of file diff --git a/PLEpowerSweep2.m b/PLEpowerSweep2.m new file mode 100644 index 000000000..c6cf6bf25 --- /dev/null +++ b/PLEpowerSweep2.m @@ -0,0 +1,46 @@ +function PLEpowerSweep2(managers) +% c = Sources.Cobolt_PB.instance(); + m = Sources.Msquared.instance(); + + ard = Drivers.ArduinoServo.instance('localhost', 2); + k = Drivers.Keithley2400.instance(16); + +% pm = Drivers.PM100.instance(); +% pm.set_average_count(50) +% pm.set_range(1) + + cset = [0.005, 0.01, 0.02]; + mang = [0, 80, 180]; + + e = Experiments.SlowScan.Open.instance(); + + linewidths = NaN(length(cset), length(mang)); + + figure; + img = imagesc(cset, mang, linewidths); + colorbar + + xlabel('Blue current') + ylabel('EMM OD Filter Wheel [deg]') + + k.output=1; + for ii = 1:length(cset) + k.current=cset(ii); + for jj = 1:length(mang) + ard.angle = mang(jj); + + disp(['405nm current: ' num2str(cset(ii)) ', Msquared wheel angle: ' num2str(mang(jj))]) + + managers.Experiment.run(managers) + + s = fitpeaks(e.data.freqs_measured', e.data.sumCounts'); + + try + linewidths(ii, jj) = s.widths(1); + end + img.CData = linewidths; + end + end + k.output=0; + save('PLEpowerSweep2', 'cset', 'mang') +end \ No newline at end of file diff --git a/image_display.m b/image_display.m new file mode 100644 index 000000000..4b0142ff8 --- /dev/null +++ b/image_display.m @@ -0,0 +1,47 @@ +% SLM input +w=[1 0.65 0.65 0.55 0.48 0.33]; + +kangle = 60*pi/180; +kangle = 93*pi/180; +kstep = 17e-4; +% kstep = 25e-4; +% nspots = 6; +% ki=[kstep*(-1) kstep*(-2) kstep*(-3) kstep*(-4) kstep*(-5) kstep*(-6)]; +% %w=1./[1:nspots]; +% w=[1 0.45 0.52 0.5 0.45 0.39]; +w=[1 0.65 0.65 0.55 0.48 0.33]; +nspots = 6; +ki=[kstep*(-1) kstep*(-2) kstep*(3) kstep*(2) kstep*(1) kstep*(-3)]; +%w=1./[1:nspots]; + +%w=[1 0.65 0.52 0.51 0.4 0.5]; + +for i=1:nspots + + k = ki(i); + mask1 = slm.blaze(k*cos(kangle),k*sin(kangle),0); %all 0 + if i==1 + mask = mask1; + else + mask = slm.sum_mask_weighted(mask,mask1,w(i)); + end +end +csvwrite('6spot.csv',mask') +% slm.load_data(mask) +%% +camera1 = Imaging.Thorlabs.uc480.instance; +camera1.exposure=20; +images_camera=camera1.snapImage(); +figure(1) +plot((images_camera(200:700,507))); +[pks,locs]=findpeaks((images_camera(200:700,507))); +location=locs(find(pks>50)); +p1=images_camera(120,507); +p2=images_camera(170,507); +p3=images_camera(219,507); +p4=images_camera(268,507); +p5=images_camera(318,507); +p6=images_camera(367,507); +pn=[p1 p2 p3 p4 p5 p6]; +std(pn) + diff --git a/plotLinescans.m b/plotLinescans.m new file mode 100644 index 000000000..b52f617ad --- /dev/null +++ b/plotLinescans.m @@ -0,0 +1,22 @@ +function plotLinescans + d = 'Z:\Diamond\EG345\2021_10_01 M4 Chiplet Screening\Wide Diamond'; + + files_ = dir(d); + files = {files_.name}; + + figure + + for ii = 1:length(files) + if files{ii}(1) ~= '.' + data = load([d filesep files{ii}]); + plot(data.data.data.data.freqs_measured, data.data.data.data.sumCounts); + hold on + + [vals, confs, fit_results, gofs] = fitpeaks(data.data.data.data.freqs_measured', data.data.data.data.sumCounts', 'fittype', "voigt"); + + vals + + plot(fit_results{1}) + end + end +end \ No newline at end of file diff --git a/powerCalibration.m b/powerCalibration.m new file mode 100644 index 000000000..399aeede3 --- /dev/null +++ b/powerCalibration.m @@ -0,0 +1,67 @@ +function powerCalibration + c = Sources.Cobolt_PB.instance(); + m = Sources.Msquared.instance(); + + ard = Drivers.ArduinoServo.instance('localhost', 2); + k = Drivers.Keithley2400.instance(16); + + pm = Drivers.PM100.instance(); + pm.set_average_count(50) + pm.set_range(1) + + cset = 0:1:80; + cpow = 0*cset; + mang = 0:5:180; + mpow = 0*mang; + kma = 0:.01:.6; + kpow = 0*kma; + + + %% Cobolt + pm.set_wavelength(515) + c.on + + for ii = 1:length(cset) + ii + c.power = cset(ii); + pause(.1) + cpow(ii) = pm.get_power('MW'); + end + c.off + figure + plot(cset, cpow) + + %% M^2 + pm.set_wavelength(620) + m.on + + for ii = 1:length(mang) + ii + ard.angle = mang(ii); + pause(.1) + mpow(ii) = pm.get_power('MW'); + end + + m.off + figure + plot(mang, mpow) + + save('powerCalibration', 'cset', 'cpow', 'mang', 'mpow'); + +% %% Blue +% pm.set_wavelength(405) +% k.output = true; +% +% for ii = 1:length(kma) +% ii +% k.current = kma(ii); +% pause(.1) +% kpow(ii) = pm.get_power('MW'); +% end +% k.output = false; +% figure +% plot(kma, kpow) +% +% save('powerCalibration', 'cset', 'cpow', 'mang', 'mpow', 'kma', 'kpow'); + +end \ No newline at end of file