From 18242c956593780ea7458f4e137e8a5a716844d4 Mon Sep 17 00:00:00 2001 From: Oliver Lantwin Date: Wed, 1 Dec 2021 10:22:01 +0100 Subject: [PATCH 01/15] Correctly log number of events in case some are skipped --- lardon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lardon.py b/lardon.py index fa15aec..09bb8d5 100644 --- a/lardon.py +++ b/lardon.py @@ -80,7 +80,7 @@ if(nevent > nb_evt or nevent < 0): nevent = nb_evt -print(" --->> Will process ", nevent, " events [ out of ", nb_evt, "] of run ", run) +print(f" --->> Will process {nevent - evt_skip} events [out of {nb_evt}] of run {run}") """ store basic informations """ store.store_run_infos(output, int(run), int(sub), elec, nevent, time.time()) From f750a3ca155ba148d5980dadb4227c48b72f7684 Mon Sep 17 00:00:00 2001 From: Oliver Lantwin Date: Wed, 1 Dec 2021 10:08:00 +0100 Subject: [PATCH 02/15] Default `evt_skip` to 0 for correct event count Otherwise lardon might claim to read n-(-1) events. No code depends on the magic value of -1. --- lardon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lardon.py b/lardon.py index 09bb8d5..41e3edf 100644 --- a/lardon.py +++ b/lardon.py @@ -16,7 +16,7 @@ parser.add_argument('-det', dest='detector', help='which detector is looked at [default is coldbox]', default='coldbox') parser.add_argument('-period', help='which detector period is looked at [default is 1]', default='1') parser.add_argument('-out', dest='outname', help='extra name on the output', default='') -parser.add_argument('-skip', dest='evt_skip', type=int, help='nb of events to skip', default=-1) +parser.add_argument('-skip', dest='evt_skip', type=int, help='nb of events to skip', default=0) parser.add_argument('-f', '--file', help="Override derived filename") args = parser.parse_args() From 529915c5f13b04d3e4584db590bef6e26ac48073 Mon Sep 17 00:00:00 2001 From: Oliver Lantwin Date: Wed, 1 Dec 2021 10:15:03 +0100 Subject: [PATCH 03/15] Be explicit about what is output, as slash is confusing --- data_containers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_containers.py b/data_containers.py index d408fcb..dd20772 100644 --- a/data_containers.py +++ b/data_containers.py @@ -87,7 +87,7 @@ def set_noise_filt(self, noise): self.noise_filt = noise def dump(self): - print("RUN ",self.run_nb, " of ", self.elec, " EVENT ", self.evt_nb, " / ", self.trigger_nb,) + print("RUN ",self.run_nb, " of ", self.elec, " EVENT ", self.evt_nb, " TRIGGER ", self.trigger_nb,) print("Taken at ", time.ctime(self.time_s), " + ", self.time_ns, " ns ") From 5f18abe8019a02eeff63f55510e1d37551d9a150 Mon Sep 17 00:00:00 2001 From: Oliver Lantwin Date: Wed, 1 Dec 2021 10:09:27 +0100 Subject: [PATCH 04/15] Warn when there are too few events in the file --- lardon.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lardon.py b/lardon.py index 41e3edf..524b0a4 100644 --- a/lardon.py +++ b/lardon.py @@ -78,6 +78,7 @@ nb_evt = reader.read_run_header() if(nevent > nb_evt or nevent < 0): + print(f"WARNING: Requested {nevent} events from a file containing only {nb_evt} events.") nevent = nb_evt print(f" --->> Will process {nevent - evt_skip} events [out of {nb_evt}] of run {run}") From c7acdad8c8ed34d5bf07df555ce27b3c94076122 Mon Sep 17 00:00:00 2001 From: Oliver Lantwin Date: Wed, 1 Dec 2021 10:07:28 +0100 Subject: [PATCH 05/15] Make sure detector is one of the possible choices --- lardon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lardon.py b/lardon.py index 524b0a4..4571541 100644 --- a/lardon.py +++ b/lardon.py @@ -13,7 +13,7 @@ parser.add_argument('-run', help='Run number to be processed', required=True) parser.add_argument('-sub', help='Subfile to read', type=int, required=True) parser.add_argument('-n', '--nevent', type=int, help='number of events to process in the file [default (or -1) is all]', default=-1) -parser.add_argument('-det', dest='detector', help='which detector is looked at [default is coldbox]', default='coldbox') +parser.add_argument('-det', dest='detector', help='which detector is looked at [default is coldbox]', default='coldbox', choices=['coldbox',]) parser.add_argument('-period', help='which detector period is looked at [default is 1]', default='1') parser.add_argument('-out', dest='outname', help='extra name on the output', default='') parser.add_argument('-skip', dest='evt_skip', type=int, help='nb of events to skip', default=0) From 490af00f8f3e9d6e2b7d48724eab7496cdd30440 Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Thu, 25 Nov 2021 16:47:04 +0100 Subject: [PATCH 06/15] Add analysis parameters reader/file --- analysis_parameters.py | 69 +++++++++++++++++++++++++++++++ lardon.py | 10 +++-- settings/analysis_parameters.json | 16 +++++++ 3 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 analysis_parameters.py create mode 100644 settings/analysis_parameters.json diff --git a/analysis_parameters.py b/analysis_parameters.py new file mode 100644 index 0000000..02d4e39 --- /dev/null +++ b/analysis_parameters.py @@ -0,0 +1,69 @@ +import json as json + +class params: + def __init__(self): + self.ped_amp_sig_fst = 1 # default amplitude trigger threshold for 1st pass signal mask - in RMS + self.ped_amp_sig_oth = 1 # default amplitude trigger threshold for other pass signal mask - in RMS + + self.hit_amp_sig = [3,6,2] # default amplitude trigger threshold for hit search - in RMS + self.hit_dt_min = [10,10,10] # minimal delta t for hit search - in bins + self.hit_pad_left = 6 + self.hit_pad_right = 10 + + def read(self,config="1"): + try: + with open('settings/analysis_parameters.json','r') as f: + data = json.load(f) + if(config not in data): + print("WARNING: Thresholds configuration ",config," not found.") + print(" Default thresholds will be applied.") + else: + self.ped_amp_sig_fst = data[config]['pedestal']['first_pass_thrs'] + self.ped_amp_sig_oth = data[config]['pedestal']['other_pass_thrs'] + + self.hit_amp_sig = data[config]['hit_finder']['amp_sig_thrs'] + self.hit_dt_min = data[config]['hit_finder']['dt_min'] + self.hit_pad_left = data[config]['hit_finder']['pad']['left'] + self.hit_pad_right = data[config]['hit_finder']['pad']['right'] + except: + print("WARNING: Thresholds setting file (./settings/analysis_parameters.json) not found.") + print(" Default thresholds will be applied.") + + # setters and getters (potentially not useful now) + def set_ped_amp_sig_fst(self,value): + self.ped_amp_sig_fst = values + + def set_ped_amp_sig_oth(self,value): + self.ped_amp_sig_oth = values + + def get_ped_amp_fst(self): + return self.ped_amp_sig_fst + + def get_ped_amp_oth(self): + return self.ped_amp_oth_fst + + def set_hit_amp_sig(self,values): + self.hit_amp_sig = values + + def set_hit_dt_min(self,values): + self.hit_dt_min = values + + def set_hit_pad_left(self,value): + self.hit_pad_left = value + + def set_hit_pad_right(self,value): + self.hit_pad_right = value + + def get_hit_amp_sig_thrs(self): + return self.hit_amp_sig + + def get_hit_dt_min_thrs(self): + return self.hit_dt_min + + def get_hit_pad_left(self): + return self.hit_pad_left + + def get_hit_pad_right(self): + return self.hit_pad_right + + diff --git a/lardon.py b/lardon.py index 4571541..f6ee037 100644 --- a/lardon.py +++ b/lardon.py @@ -61,6 +61,10 @@ output = tab.open_file(name_out, mode="w", title="Reconstruction Output") store.create_tables(output) +""" set analysis parameters """ +pars = params.params() +pars.read(config=args.conf) + """ set the channel mapping """ print(" will use ", cf.channel_map) cmap.get_mapping(elec) @@ -135,7 +139,7 @@ tp = time.time() for i in range(2): ped.compute_pedestal(noise_type='filt') - ped.update_mask(4.) + ped.update_mask(pars.ped_amp_sig_fst) #plot.plot_noise_daqch(noise_type='filt',option='fft', vmin=0, vmax=vmax) #plot.plot_noise_vch(noise_type='filt', vmin=0, vmax=vmax,option='fft')#,to_be_shown=True) @@ -161,7 +165,7 @@ tpm = time.time() for i in range(2): ped.compute_pedestal(noise_type='filt') - ped.update_mask(4.) + ped.update_mask(pars.ped_amp_sig_oth) #plot.plot_noise_daqch(noise_type='filt',option='coherent', vmin=0, vmax=vmax) @@ -181,7 +185,7 @@ th = time.time() - hf.find_hits(6, 10, 10, 3., 6., 2.) + hf.find_hits(pars.hit_pad_left,pars.hit_pad_right, pars.hit_dt_min[0], pars.hit_amp_sig[0],pars.hit_amp_sig[1],pars.hit_amp_sig[2]) print("hit %.2f s"%(time.time()-th)) print(dc.evt_list[-1].n_hits) plot.plot_2dview_hits(to_be_shown=False) diff --git a/settings/analysis_parameters.json b/settings/analysis_parameters.json new file mode 100644 index 0000000..31c0057 --- /dev/null +++ b/settings/analysis_parameters.json @@ -0,0 +1,16 @@ +{ + "1":{ + "pedestal":{ + "first_pass_thrs": 4, + "other_pass_thrs": 3 + }, + "hit_finder":{ + "amp_sig_thrs": [3,6,2], + "dt_min": [10,10,10], + "pad":{ + "left": 6, + "right": 10, + } + } + } +} From be77f3351f8e8975c1508d001d5d654be76fc92f Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Thu, 25 Nov 2021 17:06:48 +0100 Subject: [PATCH 07/15] Fix JSON file readout bugs --- analysis_parameters.py | 28 +++++++++++++++------------- lardon.py | 3 ++- settings/analysis_parameters.json | 2 +- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/analysis_parameters.py b/analysis_parameters.py index 02d4e39..39f150c 100644 --- a/analysis_parameters.py +++ b/analysis_parameters.py @@ -12,23 +12,25 @@ def __init__(self): def read(self,config="1"): try: - with open('settings/analysis_parameters.json','r') as f: - data = json.load(f) - if(config not in data): - print("WARNING: Thresholds configuration ",config," not found.") - print(" Default thresholds will be applied.") - else: - self.ped_amp_sig_fst = data[config]['pedestal']['first_pass_thrs'] - self.ped_amp_sig_oth = data[config]['pedestal']['other_pass_thrs'] - - self.hit_amp_sig = data[config]['hit_finder']['amp_sig_thrs'] - self.hit_dt_min = data[config]['hit_finder']['dt_min'] - self.hit_pad_left = data[config]['hit_finder']['pad']['left'] - self.hit_pad_right = data[config]['hit_finder']['pad']['right'] + with open('settings/analysis_parameters.json','r') as f: + data = json.load(f) + if(config not in data): + print("WARNING: Thresholds configuration ",config," not found.") + print(" Default thresholds will be applied.") + else: + self.ped_amp_sig_fst = data[config]['pedestal']['first_pass_thrs'] + self.ped_amp_sig_oth = data[config]['pedestal']['other_pass_thrs'] + + self.hit_amp_sig = data[config]['hit_finder']['amp_sig_thrs'] + self.hit_dt_min = data[config]['hit_finder']['dt_min'] + self.hit_pad_left = data[config]['hit_finder']['pad']['left'] + self.hit_pad_right = data[config]['hit_finder']['pad']['right'] + except: print("WARNING: Thresholds setting file (./settings/analysis_parameters.json) not found.") print(" Default thresholds will be applied.") + # setters and getters (potentially not useful now) def set_ped_amp_sig_fst(self,value): self.ped_amp_sig_fst = values diff --git a/lardon.py b/lardon.py index f6ee037..aceea09 100644 --- a/lardon.py +++ b/lardon.py @@ -18,6 +18,7 @@ parser.add_argument('-out', dest='outname', help='extra name on the output', default='') parser.add_argument('-skip', dest='evt_skip', type=int, help='nb of events to skip', default=0) parser.add_argument('-f', '--file', help="Override derived filename") +parser.add_argument('-conf','--config',dest='conf', help='Ananlysis configuration ID', default='1') args = parser.parse_args() if(args.elec == 'top' or args.elec == 'tde'): @@ -46,7 +47,7 @@ import store as store import hit_finder as hf import track_2d as trk2d - +import analysis_parameters as params plot.set_style() diff --git a/settings/analysis_parameters.json b/settings/analysis_parameters.json index 31c0057..b9afbef 100644 --- a/settings/analysis_parameters.json +++ b/settings/analysis_parameters.json @@ -9,7 +9,7 @@ "dt_min": [10,10,10], "pad":{ "left": 6, - "right": 10, + "right": 10 } } } From e4c9678b7fcf3c58a5e390ae49ea42c8e8105a79 Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Fri, 26 Nov 2021 12:01:02 +0100 Subject: [PATCH 08/15] Test numba compatible function for getting hits --- hit_finder.py | 150 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 96 insertions(+), 54 deletions(-) diff --git a/hit_finder.py b/hit_finder.py index 869e3a4..989d7fd 100644 --- a/hit_finder.py +++ b/hit_finder.py @@ -3,28 +3,46 @@ import lar_param as lar import numpy as np +import numba as nb def hit_search(data, view, daq_chan, start, dt_min, thr1, thr2, thr3): - if(cf.view_type[view] == "Collection"): - return hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2) - elif(cf.view_type[view] == "Induction"): - return hit_search_induction(data, view, daq_chan, start, dt_min, thr3) - else : + ll = [] + if(cf.view_type[view] != "Collection" and cf.view_type[view] != "Induction"): print(cf.view_type[view], " is not recognized") sys.exit() + elif(cf.view_type[view] == "Collection"): + h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc = hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2) + elif(cf.view_type[view] == "Induction"): + h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc = hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr3) + if(h_num == 0): return ll + for start, stop, charge_int, max_t, max_adc, min_t, min_adc in zip(h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc): + h = dc.hits(view, daq_chan, start, stop, charge_int, max_t, max_adc, min_t, min_adc) + ll.append(h) + if(len(ll) == h_num): break + return ll -def hit_search_induction(data, view, daq_chan, start, dt_min, thr): +@nb.njit('Tuple((int64,int16[:],int16[:],float64[:],int16[:],float64[:],int16[:],float64[:]))(float64[:],int64,int64,int64,int64,float64)') +def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): """ very basic induction-like hit finder """ """ WARNING : CANNOT FIND OVERLAPPING HITS """ + + npts = len(data) - ll = [] + h_num = 0 # store number of found hits + # list of hits parameters to be returned by this numba function (see dc.hits) + h_start = np.zeros(npts,dtype=np.int16) + h_stop = np.zeros(npts,dtype=np.int16) + h_charge_int= np.zeros(npts) + h_max_t = np.zeros(npts,dtype=np.int16) + h_max_adc = np.zeros(npts) + h_min_t = np.zeros(npts,dtype=np.int16) + h_min_adc = np.zeros(npts) - npts = len(data) hitPosFlag = False hitNegFlag = False @@ -33,8 +51,7 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): posSamp = 0 negSamp = 0 - h = dc.hits(view, daq_chan, start, 0, 0., 0., 0., 0., 0.) - + h_start[h_num] = start while(i= thr and hitNegFlag == False): + val = data[i] + while(i < npts and val >= thr and hitNegFlag == False): val = data[i] it = i+start posSamp += 1 @@ -51,19 +69,19 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): if(hitPosFlag == False): hitPosFlag = True - h.start = it + h_start[h_num] = it - h.max_t = it - h.max_adc = val + h_max_t[h_num] = it + h_max_adc[h_num] = val """ update the maximum case """ - if(val > h.max_adc): - h.max_t = it - h.max_adc = val + if(val > h_max_adc[h_num]): + h_max_t[h_num] = it + h_max_adc[h_num] = val if(hitPosFlag): - h.charge_int += val + h_charge_int[h_num] += val i+=1 @@ -71,13 +89,15 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): hitPosFlag = False posSamp = 0 - while(i < npts and hitPosFlag and hitNegFlag == False and np.fabs(data[i]) < thr): - h.charge_int += np.fabs(val) + val = data[i] + while(i < npts and hitPosFlag and hitNegFlag == False and np.fabs(val) < thr): + h_charge_int[h_num] += np.fabs(val) i += 1 - + val = data[i] """ now the negative part """ - while(i < npts and hitPosFlag and data[i] < -1.*thr): + val = data[i] + while(i < npts and hitPosFlag and val < -1.*thr): val = data[i] it = i+start negSamp += 1 @@ -86,19 +106,19 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): if(hitNegFlag == False): hitNegFlag = True - h.min_t = it - h.min_adc = val + h_min_t[h_num] = it + h_min_adc[h_num] = val """ update the minimum case """ - if(val < h.min_adc): - h.min_t = it - h.min_adc = val + if(val < h_min_adc[h_num]): + h_min_t[h_num] = it + h_min_adc[h_num] = val if(hitNegFlag): - h.charge_int += -1.*val + h_charge_int[h_num] += -1.*val - h.stop = it + h_stop[h_num] = it i+=1 if(negSamp < dt_min): @@ -106,20 +126,28 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): negSamp = 0 if(hitPosFlag and hitNegFlag): - ll.append(h) + h_num += 1 break - - return ll + return h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc - -def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): +@nb.njit('Tuple((int64,int16[:],int16[:],float64[:],int16[:],float64[:],int16[:],float64[:]))(float64[:],int64,int64,int64,int64,float64,float64)') +def hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2): """search hit-shape in a list of points""" """algorithm from qscan""" - - ll = [] npts = len(data) + + h_num = 0 # store number of found hits + # list of hits parameters to be returned by this numba function (see dc.hits) + h_start = np.zeros(npts,dtype=np.int16) + h_stop = np.zeros(npts,dtype=np.int16) + h_charge_int= np.zeros(npts) + h_max_t = np.zeros(npts,dtype=np.int16) + h_max_adc = np.zeros(npts) + h_min_t = np.zeros(npts,dtype=np.int16) + h_min_adc = np.zeros(npts) + hitFlag = False i=0 @@ -127,8 +155,6 @@ def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): minSamp = -1 singleHit = True - - while(i= thr1): val = data[i] @@ -138,42 +164,59 @@ def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): hitFlag = True singleHit = True - h = dc.hits(view,daq_chan,it,0,0.,it,val, 0., 0.) + h_start[h_num] = it + h_stop[h_num] = 0 + h_charge_int[h_num]= 0. + h_max_t[h_num] = it + h_max_adc[h_num] = val + h_min_t[h_num] = 0. + h_min_adc[h_num] = 0. + + #h = dc.hits(view,daq_chan,it,0,0.,it,val, 0., 0.) minSamp = -1 - if(it > h.max_t and val < h.max_adc - thr2 and (minSamp==-1 or minimum >= val)): + if(it > h_max_t[h_num] and val < h_max_adc[h_num] - thr2 and (minSamp==-1 or minimum >= val)): minSamp = it minimum = val - if(minSamp >= 0 and it > minSamp and val > minimum + thr2 and (it-h.start) >= dt_min): - h.stop = minSamp-1 - ll.append(h) + if(minSamp >= 0 and it > minSamp and val > minimum + thr2 and (it-h_start[h_num]) >= dt_min): + h_stop[h_num] = minSamp-1 + h_num += 1 hitFlag = True singleHit = False - h = dc.hits(view,daq_chan,minSamp,0,0,it,val, 0., 0.) + + h_start[h_num] = minSamp + h_stop[h_num] = 0 + h_charge_int[h_num]= 0. + h_max_t[h_num] = it + h_max_adc[h_num] = val + h_min_t[h_num] = 0. + h_min_adc[h_num] = 0. + minSamp = -1 - if(h.stop == 0 and val > h.max_adc): - h.max_t = it - h.max_adc = val + if(h_stop[h_num] == 0 and val > h_max_adc[h_num]): + h_max_t[h_num] = it + h_max_adc[h_num] = val if(minSamp >= 0): minSamp = -1 minimum = val - h.charge_int += val + h_charge_int[h_num] += val i+=1 if(hitFlag == True): hitFlag = False - h.stop = it-1 + h_stop[h_num] = it-1 - #if((singleHit and (h.stop-h.start >= dt_min)) or not singleHit): - if(h.stop-h.start >= dt_min): - ll.append(h) + #if((singleHit and (h_stop[h_num]-h_start[h_num] >= dt_min)) or not singleHit): + if(h_stop[h_num]-h_start[h_num] >= dt_min): + h_num += 1 i+=1 - return ll + return h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc + def recompute_hit_charge(hit): daq_ch, start, stop = hit.daq_channel, hit.start, hit.stop @@ -186,7 +229,6 @@ def recompute_hit_charge(hit): hit.charge_int = val - def find_hits(pad_left, pad_right, dt_min, n_sig_coll_1, n_sig_coll_2, n_sig_ind): """ get boolean roi based on mask and alive channels """ From bffbf4005de900cce75c42c77e841714764bc315 Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Mon, 29 Nov 2021 09:29:56 +0100 Subject: [PATCH 09/15] ORevert "Fix JSON file readout bugs" This reverts commit a440a682602dce62dadc956c784b221195434573. --- analysis_parameters.py | 28 +++++++++++++--------------- lardon.py | 3 +-- settings/analysis_parameters.json | 2 +- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/analysis_parameters.py b/analysis_parameters.py index 39f150c..02d4e39 100644 --- a/analysis_parameters.py +++ b/analysis_parameters.py @@ -12,25 +12,23 @@ def __init__(self): def read(self,config="1"): try: - with open('settings/analysis_parameters.json','r') as f: - data = json.load(f) - if(config not in data): - print("WARNING: Thresholds configuration ",config," not found.") - print(" Default thresholds will be applied.") - else: - self.ped_amp_sig_fst = data[config]['pedestal']['first_pass_thrs'] - self.ped_amp_sig_oth = data[config]['pedestal']['other_pass_thrs'] - - self.hit_amp_sig = data[config]['hit_finder']['amp_sig_thrs'] - self.hit_dt_min = data[config]['hit_finder']['dt_min'] - self.hit_pad_left = data[config]['hit_finder']['pad']['left'] - self.hit_pad_right = data[config]['hit_finder']['pad']['right'] - + with open('settings/analysis_parameters.json','r') as f: + data = json.load(f) + if(config not in data): + print("WARNING: Thresholds configuration ",config," not found.") + print(" Default thresholds will be applied.") + else: + self.ped_amp_sig_fst = data[config]['pedestal']['first_pass_thrs'] + self.ped_amp_sig_oth = data[config]['pedestal']['other_pass_thrs'] + + self.hit_amp_sig = data[config]['hit_finder']['amp_sig_thrs'] + self.hit_dt_min = data[config]['hit_finder']['dt_min'] + self.hit_pad_left = data[config]['hit_finder']['pad']['left'] + self.hit_pad_right = data[config]['hit_finder']['pad']['right'] except: print("WARNING: Thresholds setting file (./settings/analysis_parameters.json) not found.") print(" Default thresholds will be applied.") - # setters and getters (potentially not useful now) def set_ped_amp_sig_fst(self,value): self.ped_amp_sig_fst = values diff --git a/lardon.py b/lardon.py index aceea09..f6ee037 100644 --- a/lardon.py +++ b/lardon.py @@ -18,7 +18,6 @@ parser.add_argument('-out', dest='outname', help='extra name on the output', default='') parser.add_argument('-skip', dest='evt_skip', type=int, help='nb of events to skip', default=0) parser.add_argument('-f', '--file', help="Override derived filename") -parser.add_argument('-conf','--config',dest='conf', help='Ananlysis configuration ID', default='1') args = parser.parse_args() if(args.elec == 'top' or args.elec == 'tde'): @@ -47,7 +46,7 @@ import store as store import hit_finder as hf import track_2d as trk2d -import analysis_parameters as params + plot.set_style() diff --git a/settings/analysis_parameters.json b/settings/analysis_parameters.json index b9afbef..31c0057 100644 --- a/settings/analysis_parameters.json +++ b/settings/analysis_parameters.json @@ -9,7 +9,7 @@ "dt_min": [10,10,10], "pad":{ "left": 6, - "right": 10 + "right": 10, } } } From ef561c034a60379e7cc7404f5e934a25aeddacdf Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Mon, 29 Nov 2021 10:55:25 +0100 Subject: [PATCH 10/15] Add more analysis parameters in JSON file - revert numba stuff --- analysis_parameters.py | 43 +++++++-- hit_finder.py | 150 +++++++++++------------------- lardon.py | 47 +++++----- plotting/event_display.py | 12 +-- plotting/noise.py | 13 +-- settings/analysis_parameters.json | 84 +++++++++++++++-- 6 files changed, 198 insertions(+), 151 deletions(-) diff --git a/analysis_parameters.py b/analysis_parameters.py index 02d4e39..27888d0 100644 --- a/analysis_parameters.py +++ b/analysis_parameters.py @@ -1,16 +1,31 @@ import json as json +# To validate JSON file: https://jsonlint.com + class params: def __init__(self): self.ped_amp_sig_fst = 1 # default amplitude trigger threshold for 1st pass signal mask - in RMS self.ped_amp_sig_oth = 1 # default amplitude trigger threshold for other pass signal mask - in RMS + self.noise_coh_group = [32] # coherent noise channel grouping + self.noise_fft_freq = -1 # ?? + self.noise_fft_lcut = 0.0225 # low-pass filter frequency cut + self.hit_amp_sig = [3,6,2] # default amplitude trigger threshold for hit search - in RMS self.hit_dt_min = [10,10,10] # minimal delta t for hit search - in bins self.hit_pad_left = 6 self.hit_pad_right = 10 - def read(self,config="1"): + self.plt_noise_show = 0 + self.plt_evt_disp_daq_show = 0 + self.plt_evt_disp_vch_show = 0 + + self.plt_noise_zrange= [0,900] # color scale for noise plots + self.plt_evt_disp_daq_zrange = [-1000,1000] # color scale for DAQ channels event display plots + self.plt_evt_disp_vch_ind_zrange = [-100,100] # color scale for induction view event display plots + self.plt_evt_disp_vch_col_zrange = [-50,50] # color scale for collection view event display plots + + def read(self,elec="top",config="1"): try: with open('settings/analysis_parameters.json','r') as f: data = json.load(f) @@ -18,13 +33,25 @@ def read(self,config="1"): print("WARNING: Thresholds configuration ",config," not found.") print(" Default thresholds will be applied.") else: - self.ped_amp_sig_fst = data[config]['pedestal']['first_pass_thrs'] - self.ped_amp_sig_oth = data[config]['pedestal']['other_pass_thrs'] - - self.hit_amp_sig = data[config]['hit_finder']['amp_sig_thrs'] - self.hit_dt_min = data[config]['hit_finder']['dt_min'] - self.hit_pad_left = data[config]['hit_finder']['pad']['left'] - self.hit_pad_right = data[config]['hit_finder']['pad']['right'] + self.ped_amp_sig_fst = data[config][elec]['pedestal']['first_pass_thrs'] + self.ped_amp_sig_oth = data[config][elec]['pedestal']['other_pass_thrs'] + + self.noise_coh_group = data[config][elec]['noise']['coherent']['groupings'] + self.noise_fft_freq = data[config][elec]['noise']['fft']['freq'] + self.noise_fft_lcut = data[config][elec]['noise']['fft']['low_cut'] + + self.hit_amp_sig = data[config][elec]['hit_finder']['amp_sig_thrs'] + self.hit_dt_min = data[config][elec]['hit_finder']['dt_min'] + self.hit_pad_left = data[config][elec]['hit_finder']['pad']['left'] + self.hit_pad_right = data[config][elec]['hit_finder']['pad']['right'] + + self.plt_noise_show = data[config][elec]['plot']['noise']['show'] + self.plt_noise_zrange = data[config][elec]['plot']['noise']['zrange'] + self.plt_evt_disp_daq_show = data[config][elec]['plot']['evt_display']['daqch']['show'] + self.plt_evt_disp_daq_zrange = data[config][elec]['plot']['evt_display']['daqch']['zrange'] + self.plt_evt_disp_vch_show = data[config][elec]['plot']['evt_display']['viewch']['show'] + self.plt_evt_disp_vch_ind_zrange = data[config][elec]['plot']['evt_display']['viewch']['ind_zrange'] + self.plt_evt_disp_vch_col_zrange = data[config][elec]['plot']['evt_display']['viewch']['col_zrange'] except: print("WARNING: Thresholds setting file (./settings/analysis_parameters.json) not found.") print(" Default thresholds will be applied.") diff --git a/hit_finder.py b/hit_finder.py index 989d7fd..869e3a4 100644 --- a/hit_finder.py +++ b/hit_finder.py @@ -3,46 +3,28 @@ import lar_param as lar import numpy as np -import numba as nb def hit_search(data, view, daq_chan, start, dt_min, thr1, thr2, thr3): + if(cf.view_type[view] == "Collection"): + return hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2) - ll = [] - if(cf.view_type[view] != "Collection" and cf.view_type[view] != "Induction"): + elif(cf.view_type[view] == "Induction"): + return hit_search_induction(data, view, daq_chan, start, dt_min, thr3) + else : print(cf.view_type[view], " is not recognized") sys.exit() - elif(cf.view_type[view] == "Collection"): - h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc = hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2) - elif(cf.view_type[view] == "Induction"): - h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc = hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr3) - if(h_num == 0): return ll - for start, stop, charge_int, max_t, max_adc, min_t, min_adc in zip(h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc): - h = dc.hits(view, daq_chan, start, stop, charge_int, max_t, max_adc, min_t, min_adc) - ll.append(h) - if(len(ll) == h_num): break - return ll -@nb.njit('Tuple((int64,int16[:],int16[:],float64[:],int16[:],float64[:],int16[:],float64[:]))(float64[:],int64,int64,int64,int64,float64)') -def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): +def hit_search_induction(data, view, daq_chan, start, dt_min, thr): """ very basic induction-like hit finder """ """ WARNING : CANNOT FIND OVERLAPPING HITS """ - - npts = len(data) - h_num = 0 # store number of found hits - # list of hits parameters to be returned by this numba function (see dc.hits) - h_start = np.zeros(npts,dtype=np.int16) - h_stop = np.zeros(npts,dtype=np.int16) - h_charge_int= np.zeros(npts) - h_max_t = np.zeros(npts,dtype=np.int16) - h_max_adc = np.zeros(npts) - h_min_t = np.zeros(npts,dtype=np.int16) - h_min_adc = np.zeros(npts) + ll = [] + npts = len(data) hitPosFlag = False hitNegFlag = False @@ -51,7 +33,8 @@ def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): posSamp = 0 negSamp = 0 - h_start[h_num] = start + h = dc.hits(view, daq_chan, start, 0, 0., 0., 0., 0., 0.) + while(i= thr and hitNegFlag == False): + while(i < npts and data[i] >= thr and hitNegFlag == False): val = data[i] it = i+start posSamp += 1 @@ -69,19 +51,19 @@ def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): if(hitPosFlag == False): hitPosFlag = True - h_start[h_num] = it + h.start = it - h_max_t[h_num] = it - h_max_adc[h_num] = val + h.max_t = it + h.max_adc = val """ update the maximum case """ - if(val > h_max_adc[h_num]): - h_max_t[h_num] = it - h_max_adc[h_num] = val + if(val > h.max_adc): + h.max_t = it + h.max_adc = val if(hitPosFlag): - h_charge_int[h_num] += val + h.charge_int += val i+=1 @@ -89,15 +71,13 @@ def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): hitPosFlag = False posSamp = 0 - val = data[i] - while(i < npts and hitPosFlag and hitNegFlag == False and np.fabs(val) < thr): - h_charge_int[h_num] += np.fabs(val) + while(i < npts and hitPosFlag and hitNegFlag == False and np.fabs(data[i]) < thr): + h.charge_int += np.fabs(val) i += 1 - val = data[i] + """ now the negative part """ - val = data[i] - while(i < npts and hitPosFlag and val < -1.*thr): + while(i < npts and hitPosFlag and data[i] < -1.*thr): val = data[i] it = i+start negSamp += 1 @@ -106,19 +86,19 @@ def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): if(hitNegFlag == False): hitNegFlag = True - h_min_t[h_num] = it - h_min_adc[h_num] = val + h.min_t = it + h.min_adc = val """ update the minimum case """ - if(val < h_min_adc[h_num]): - h_min_t[h_num] = it - h_min_adc[h_num] = val + if(val < h.min_adc): + h.min_t = it + h.min_adc = val if(hitNegFlag): - h_charge_int[h_num] += -1.*val + h.charge_int += -1.*val - h_stop[h_num] = it + h.stop = it i+=1 if(negSamp < dt_min): @@ -126,28 +106,20 @@ def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): negSamp = 0 if(hitPosFlag and hitNegFlag): - h_num += 1 + ll.append(h) break + + return ll - return h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc -@nb.njit('Tuple((int64,int16[:],int16[:],float64[:],int16[:],float64[:],int16[:],float64[:]))(float64[:],int64,int64,int64,int64,float64,float64)') -def hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2): + +def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): """search hit-shape in a list of points""" """algorithm from qscan""" - npts = len(data) - h_num = 0 # store number of found hits - # list of hits parameters to be returned by this numba function (see dc.hits) - h_start = np.zeros(npts,dtype=np.int16) - h_stop = np.zeros(npts,dtype=np.int16) - h_charge_int= np.zeros(npts) - h_max_t = np.zeros(npts,dtype=np.int16) - h_max_adc = np.zeros(npts) - h_min_t = np.zeros(npts,dtype=np.int16) - h_min_adc = np.zeros(npts) - + ll = [] + npts = len(data) hitFlag = False i=0 @@ -155,6 +127,8 @@ def hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2): minSamp = -1 singleHit = True + + while(i= thr1): val = data[i] @@ -164,59 +138,42 @@ def hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2): hitFlag = True singleHit = True - h_start[h_num] = it - h_stop[h_num] = 0 - h_charge_int[h_num]= 0. - h_max_t[h_num] = it - h_max_adc[h_num] = val - h_min_t[h_num] = 0. - h_min_adc[h_num] = 0. - - #h = dc.hits(view,daq_chan,it,0,0.,it,val, 0., 0.) + h = dc.hits(view,daq_chan,it,0,0.,it,val, 0., 0.) minSamp = -1 - if(it > h_max_t[h_num] and val < h_max_adc[h_num] - thr2 and (minSamp==-1 or minimum >= val)): + if(it > h.max_t and val < h.max_adc - thr2 and (minSamp==-1 or minimum >= val)): minSamp = it minimum = val - if(minSamp >= 0 and it > minSamp and val > minimum + thr2 and (it-h_start[h_num]) >= dt_min): - h_stop[h_num] = minSamp-1 - h_num += 1 + if(minSamp >= 0 and it > minSamp and val > minimum + thr2 and (it-h.start) >= dt_min): + h.stop = minSamp-1 + ll.append(h) hitFlag = True singleHit = False - - h_start[h_num] = minSamp - h_stop[h_num] = 0 - h_charge_int[h_num]= 0. - h_max_t[h_num] = it - h_max_adc[h_num] = val - h_min_t[h_num] = 0. - h_min_adc[h_num] = 0. - + h = dc.hits(view,daq_chan,minSamp,0,0,it,val, 0., 0.) minSamp = -1 - if(h_stop[h_num] == 0 and val > h_max_adc[h_num]): - h_max_t[h_num] = it - h_max_adc[h_num] = val + if(h.stop == 0 and val > h.max_adc): + h.max_t = it + h.max_adc = val if(minSamp >= 0): minSamp = -1 minimum = val - h_charge_int[h_num] += val + h.charge_int += val i+=1 if(hitFlag == True): hitFlag = False - h_stop[h_num] = it-1 + h.stop = it-1 - #if((singleHit and (h_stop[h_num]-h_start[h_num] >= dt_min)) or not singleHit): - if(h_stop[h_num]-h_start[h_num] >= dt_min): - h_num += 1 + #if((singleHit and (h.stop-h.start >= dt_min)) or not singleHit): + if(h.stop-h.start >= dt_min): + ll.append(h) i+=1 - return h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc - + return ll def recompute_hit_charge(hit): daq_ch, start, stop = hit.daq_channel, hit.start, hit.stop @@ -229,6 +186,7 @@ def recompute_hit_charge(hit): hit.charge_int = val + def find_hits(pad_left, pad_right, dt_min, n_sig_coll_1, n_sig_coll_2, n_sig_ind): """ get boolean roi based on mask and alive channels """ diff --git a/lardon.py b/lardon.py index f6ee037..fdfb9d8 100644 --- a/lardon.py +++ b/lardon.py @@ -18,6 +18,7 @@ parser.add_argument('-out', dest='outname', help='extra name on the output', default='') parser.add_argument('-skip', dest='evt_skip', type=int, help='nb of events to skip', default=0) parser.add_argument('-f', '--file', help="Override derived filename") +parser.add_argument('-conf','--config',dest='conf', help='Analysis configuration ID', default='1') args = parser.parse_args() if(args.elec == 'top' or args.elec == 'tde'): @@ -46,7 +47,7 @@ import store as store import hit_finder as hf import track_2d as trk2d - +import analysis_parameters as params plot.set_style() @@ -63,7 +64,7 @@ """ set analysis parameters """ pars = params.params() -pars.read(config=args.conf) +pars.read(config=args.conf,elec=elec) """ set the channel mapping """ print(" will use ", cf.channel_map) @@ -115,20 +116,16 @@ ped.compute_pedestal(noise_type='raw') - vmax = 900 if elec == 'bot' else 15 - - #plot.plot_noise_daqch(noise_type='raw',vmin=0,vmax=vmax,to_be_shown=False) - #plot.plot_noise_vch(noise_type='raw', vmin=0, vmax=vmax,to_be_shown=False) - #plot.plot_noise_globch(noise_type='raw', vmin=0, vmax=vmax,to_be_shown=False) - #plot.event_display_per_daqch(-1000,1000,option='raw',to_be_shown=False) + #plot.plot_noise_daqch(noise_type='raw',vrange=pars.plt_noise_zrange,to_be_shown=False) + #plot.plot_noise_vch(noise_type='raw', vrange=pars.plt_noise_zrange,to_be_shown=False) + #plot.plot_noise_globch(noise_type='raw', vrange=pars.plt_noise_zrange,to_be_shown=False) + #plot.event_display_per_daqch(pars.plt_evt_disp_daqch_zrange,option='raw',to_be_shown=False) #cmap.arange_in_view_channels() - #plot.event_display_per_view(-1000,1000,-500,500,option='raw', to_be_shown=False) + #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='raw', to_be_shown=False) - fft_low_cut = 0.6 if elec=='top' else 0.4 - fft_freq = -1 if elec=='top' else 0.0225 tf = time.time() - ps = noise.FFT_low_pass(fft_low_cut, fft_freq) + ps = noise.FFT_low_pass(pars.noise_fft_lcut,pars.noise_fft_freq) """ DO NOT STORE ALL FFT PS !! """ #store.store_fft(output, ps) @@ -140,24 +137,22 @@ for i in range(2): ped.compute_pedestal(noise_type='filt') ped.update_mask(pars.ped_amp_sig_fst) - #plot.plot_noise_daqch(noise_type='filt',option='fft', vmin=0, vmax=vmax) - #plot.plot_noise_vch(noise_type='filt', vmin=0, vmax=vmax,option='fft')#,to_be_shown=True) + #plot.plot_noise_daqch(noise_type='filt',option='fft', vrange=pars.plt_noise_zrange) + #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='fft')#,to_be_shown=True) + - #plot.plot_FFT_daqch(ps,option='raw',to_be_shown=False) #plot.plot_FFT_vch(ps,option='raw',to_be_shown=False) #cmap.arange_in_view_channels() - #plot.event_display_per_view(-100,100,-50,50,option='fft', to_be_shown=False) + #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='fft', to_be_shown=False) #plot.plot_correlation_daqch(option='fft',to_be_shown=True) #plot.plot_correlation_globch(option='fft', to_be_shown=False) - noise_group = [32] if elec == 'top' else [32] - tcoh = time.time() - noise.coherent_noise(noise_group) + noise.coherent_noise(pars.noise_coh_group) print("coherent noise : ", time.time()-tcoh) @@ -167,16 +162,16 @@ ped.compute_pedestal(noise_type='filt') ped.update_mask(pars.ped_amp_sig_oth) - - #plot.plot_noise_daqch(noise_type='filt',option='coherent', vmin=0, vmax=vmax) - #plot.plot_noise_vch(noise_type='filt', vmin=0, vmax=vmax,option='coherent',to_be_shown=False) + #plot.plot_noise_daqch(noise_type='filt',option='coherent', vrange=pars.plt_noise_zrange) + #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='coherent',to_be_shown=False) - #plot.event_display_per_daqch(-1000,1000,option='coherent',to_be_shown=False) + #plot.event_display_per_daqch(pars.plt_evt_disp_daqch_zrange,option='coherent',to_be_shown=False) #cmap.arange_in_view_channels() - #plot.event_display_per_view(-100, 100,-50,50,option='coherent', to_be_shown=False) - #plot.plot_noise_daqch(noise_type='filt',option='coherent', vmin=0, vmax=vmax) - #plot.plot_noise_vch(noise_type='filt', vmin=0, vmax=vmax,option='coherent',to_be_shown=False) + #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=False) + #plot.plot_noise_daqch(noise_type='filt',option='coherent', vrange=pars.plt_noise_zrange) + #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='coherent',to_be_shown=False) + store.store_pedestals(output) diff --git a/plotting/event_display.py b/plotting/event_display.py index e4c0b80..0052a2f 100644 --- a/plotting/event_display.py +++ b/plotting/event_display.py @@ -27,7 +27,7 @@ def draw(view, ax, adc_min, adc_max): return ax -def event_display_per_view(adc_min_ind=-10, adc_max_ind=10, adc_min_coll=-5, adc_max_coll=30, option=None, to_be_shown=False): +def event_display_per_view(adc_ind=[-10,10], adc_coll=[-5,30], option=None, to_be_shown=False): fig = plt.figure(figsize=(10,5)) gs = gridspec.GridSpec(nrows=2, @@ -46,10 +46,10 @@ def event_display_per_view(adc_min_ind=-10, adc_max_ind=10, adc_min_coll=-5, adc if(cf.view_type[iv] == 'Induction'): - axs[iv] = draw(iv, axs[iv], adc_min_ind, adc_max_ind) + axs[iv] = draw(iv, axs[iv], adc_ind[0], adc_ind[1]) vname = 'Ind.' else: - axs[iv] = draw(iv, axs[iv], adc_min_coll, adc_max_coll) + axs[iv] = draw(iv, axs[iv], adc_coll[0], adc_coll[1]) vname = 'Coll.' axs[iv].set_title('View '+str(iv)+'/'+cf.view_name[iv]+' ('+vname+')') @@ -98,7 +98,7 @@ def event_display_per_view(adc_min_ind=-10, adc_max_ind=10, adc_min_coll=-5, adc -def event_display_per_daqch(adc_min=-10, adc_max=10, option=None, to_be_shown=False): +def event_display_per_daqch(adc_range=[-10,10], option=None, to_be_shown=False): fig = plt.figure(figsize=(9,4)) gs = gridspec.GridSpec(nrows=2, @@ -113,8 +113,8 @@ def event_display_per_daqch(adc_min=-10, adc_max=10, option=None, to_be_shown=Fa aspect = 'auto', interpolation='none', cmap = cmap_ed_ind, - vmin = adc_min, - vmax = adc_max) + vmin = adc_range[0], + vmax = adc_range[1]) ax.set_xlabel('DAQ Channel Number') diff --git a/plotting/noise.py b/plotting/noise.py index 9ac1269..d963459 100644 --- a/plotting/noise.py +++ b/plotting/noise.py @@ -17,7 +17,7 @@ -def plot_noise_daqch(noise_type, vmin=0, vmax=10, option=None, to_be_shown=False): +def plot_noise_daqch(noise_type, vrange=[0,10], option=None, to_be_shown=False): if(noise_type==""): print("plot_noise_vch needs to know which noise to show (raw or filt)") return @@ -41,7 +41,7 @@ def plot_noise_daqch(noise_type, vmin=0, vmax=10, option=None, to_be_shown=False ax_std.set_ylabel('RMS Ped [ADC]') ax_std.set_xlabel('DAQ Channel Number') - ax_std.set_ylim(vmin,vmax) + ax_std.set_ylim(vrange[0],vrange[1]) plt.tight_layout() @@ -65,7 +65,7 @@ def plot_noise_daqch(noise_type, vmin=0, vmax=10, option=None, to_be_shown=False -def plot_noise_globch(noise_type, vmin=0, vmax=10, option=None, to_be_shown=False): +def plot_noise_globch(noise_type, vrange=[0,10], option=None, to_be_shown=False): if(noise_type==""): print("plot_noise_vch needs to know which noise to show (raw or filt)") return @@ -99,7 +99,7 @@ def plot_noise_globch(noise_type, vmin=0, vmax=10, option=None, to_be_shown=Fals ax_std.set_ylabel('RMS Ped [ADC]') ax_std.set_xlabel('DAQ Channel Number') - ax_std.set_ylim(vmin,vmax) + ax_std.set_ylim(vrange[0],vrange[1]) plt.tight_layout() @@ -124,11 +124,12 @@ def plot_noise_globch(noise_type, vmin=0, vmax=10, option=None, to_be_shown=Fals -def plot_noise_vch(noise_type, vmin=0,vmax=10, option=None, to_be_shown=False): +def plot_noise_vch(noise_type, vrange=[0,10], option=None, to_be_shown=False): if(noise_type==""): print("plot_noise_vch needs to know which noise to show (raw or filt)") return + if(noise_type != 'raw' and noise_type != 'filt'): print("plot_noise_vch needs to know which noise to show (raw or filt) : ", noise_type, ' is not recognized') @@ -156,7 +157,7 @@ def plot_noise_vch(noise_type, vmin=0,vmax=10, option=None, to_be_shown=False): axs[iv].set_xlabel('Channel Number') axs[iv].set_title('View '+str(iv)+'/'+cf.view_name[iv]) axs[iv].set_xlim(0,cf.view_nchan[iv]) - axs[iv].set_ylim(vmin,vmax) + axs[iv].set_ylim(vrange[0],vrange[1]) plt.tight_layout() diff --git a/settings/analysis_parameters.json b/settings/analysis_parameters.json index 31c0057..cc0b183 100644 --- a/settings/analysis_parameters.json +++ b/settings/analysis_parameters.json @@ -1,15 +1,81 @@ { "1":{ - "pedestal":{ - "first_pass_thrs": 4, - "other_pass_thrs": 3 + "top":{ + "pedestal":{ + "first_pass_thrs": 4, + "other_pass_thrs": 3 + }, + "noise":{ + "coherent":{ + "groupings":[32] + }, + "fft":{ + "freq":-1, + "low_cut":0.6 + } + }, + "hit_finder":{ + "amp_sig_thrs": [3,6,2], + "dt_min": [10,10,10], + "pad":{ + "left": 6, + "right": 10 + } + }, + "plot":{ + "noise":{ + "vmin": 0, + "vmax": 900 + }, + "evt_display":{ + "daqch":{ + "zrange":[-1000,1000] + }, + "viewch":{ + "ind_zrange":[-100,100], + "col_zrange":[-50,50] + } + } + } }, - "hit_finder":{ - "amp_sig_thrs": [3,6,2], - "dt_min": [10,10,10], - "pad":{ - "left": 6, - "right": 10, + "bot":{ + "pedestal":{ + "first_pass_thrs": 4, + "other_pass_thrs": 3 + }, + "noise":{ + "coherent":{ + "groupings":[32] + }, + "fft":{ + "freq":0.0225, + "low_cut":0.4 + } + }, + "hit_finder":{ + "amp_sig_thrs": [3,6,2], + "dt_min": [10,10,10], + "pad":{ + "left": 6, + "right": 10 + } + }, + "plot":{ + "noise":{ + "show": 0, + "zrange": [0,900] + }, + "evt_display":{ + "daqch":{ + "show": 0, + "zrange":[-1000,1000] + }, + "viewch":{ + "show": 0, + "ind_zrange":[-100,100], + "col_zrange":[-50,50] + } + } } } } From c5872f9348251041bdd3a83ff26ad098ec23d781 Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Wed, 1 Dec 2021 11:53:22 +0100 Subject: [PATCH 11/15] Add thresholds for refine_mask() --- analysis_parameters.py | 25 +++++++---- settings/analysis_parameters.json | 73 ++++++++++++++++++++++++++++--- 2 files changed, 83 insertions(+), 15 deletions(-) diff --git a/analysis_parameters.py b/analysis_parameters.py index 27888d0..9beac27 100644 --- a/analysis_parameters.py +++ b/analysis_parameters.py @@ -1,4 +1,5 @@ import json as json +import sys # To validate JSON file: https://jsonlint.com @@ -6,6 +7,11 @@ class params: def __init__(self): self.ped_amp_sig_fst = 1 # default amplitude trigger threshold for 1st pass signal mask - in RMS self.ped_amp_sig_oth = 1 # default amplitude trigger threshold for other pass signal mask - in RMS + self.ped_rise_thr = [5,5,30] # consecutive positive samples threshold + self.ped_ramp_thr = [1,1,2] # consecutive increasing sample threshold + self.ped_amp_thr = [2,2,3] # RMS amplitude threshold + self.ped_dt_thr = 100 # trigger window duration in which a signal is looked for + self.ped_zero_cross_thr = 15 # minimal number of samples after downward zero-crossing to look for upward zero-crossing self.noise_coh_group = [32] # coherent noise channel grouping self.noise_fft_freq = -1 # ?? @@ -13,8 +19,8 @@ def __init__(self): self.hit_amp_sig = [3,6,2] # default amplitude trigger threshold for hit search - in RMS self.hit_dt_min = [10,10,10] # minimal delta t for hit search - in bins - self.hit_pad_left = 6 - self.hit_pad_right = 10 + self.hit_pad_left = 6 # hit lower side band + self.hit_pad_right = 10 # hit upper side band self.plt_noise_show = 0 self.plt_evt_disp_daq_show = 0 @@ -26,15 +32,21 @@ def __init__(self): self.plt_evt_disp_vch_col_zrange = [-50,50] # color scale for collection view event display plots def read(self,elec="top",config="1"): - try: - with open('settings/analysis_parameters.json','r') as f: + with open('settings/analysis_parameters.json','r') as f: + print("AnalysisParameters: Loading analysis setting file: settings/analysis_parameters.json ... ", end='') data = json.load(f) + print("done") if(config not in data): - print("WARNING: Thresholds configuration ",config," not found.") + print("WARNING: Analysis configuration ",config," not found.") print(" Default thresholds will be applied.") else: self.ped_amp_sig_fst = data[config][elec]['pedestal']['first_pass_thrs'] self.ped_amp_sig_oth = data[config][elec]['pedestal']['other_pass_thrs'] + self.ped_rise_thr = data[config][elec]['pedestal']['rise_thr'] + self.ped_ramp_thr = data[config][elec]['pedestal']['ramp_thr'] + self.ped_amp_thr = data[config][elec]['pedestal']['amp_thr'] + self.ped_dt_thr = data[config][elec]['pedestal']['dt_thr'] + self.ped_zero_cross_thr = data[config][elec]['pedestal']['zero_cross_thr'] self.noise_coh_group = data[config][elec]['noise']['coherent']['groupings'] self.noise_fft_freq = data[config][elec]['noise']['fft']['freq'] @@ -52,9 +64,6 @@ def read(self,elec="top",config="1"): self.plt_evt_disp_vch_show = data[config][elec]['plot']['evt_display']['viewch']['show'] self.plt_evt_disp_vch_ind_zrange = data[config][elec]['plot']['evt_display']['viewch']['ind_zrange'] self.plt_evt_disp_vch_col_zrange = data[config][elec]['plot']['evt_display']['viewch']['col_zrange'] - except: - print("WARNING: Thresholds setting file (./settings/analysis_parameters.json) not found.") - print(" Default thresholds will be applied.") # setters and getters (potentially not useful now) def set_ped_amp_sig_fst(self,value): diff --git a/settings/analysis_parameters.json b/settings/analysis_parameters.json index cc0b183..1e4d147 100644 --- a/settings/analysis_parameters.json +++ b/settings/analysis_parameters.json @@ -3,7 +3,12 @@ "top":{ "pedestal":{ "first_pass_thrs": 4, - "other_pass_thrs": 3 + "other_pass_thrs": 3, + "rise_thr": [5,5,30], + "ramp_thr": [1,1,2], + "amp_thr": [2,2,3], + "dt_thr": 100, + "zero_cross_thr": 15 }, "noise":{ "coherent":{ @@ -24,15 +29,17 @@ }, "plot":{ "noise":{ - "vmin": 0, - "vmax": 900 + "show": 0, + "zrange": [0,900] }, "evt_display":{ "daqch":{ - "zrange":[-1000,1000] + "show":0, + "zrange":[-50,50] }, "viewch":{ - "ind_zrange":[-100,100], + "show":0, + "ind_zrange":[-50,50], "col_zrange":[-50,50] } } @@ -41,7 +48,12 @@ "bot":{ "pedestal":{ "first_pass_thrs": 4, - "other_pass_thrs": 3 + "other_pass_thrs": 3, + "rise_thr": [5,5,30], + "ramp_thr": [1,1,2], + "amp_thr": [2,2,3], + "dt_thr": 100, + "zero_cross_thr": 15 }, "noise":{ "coherent":{ @@ -78,5 +90,52 @@ } } } - } + }, + "2":{ + "top":{ + "pedestal":{ + "first_pass_thrs": 4, + "other_pass_thrs": 3, + "rise_thr": [5,5,30], + "ramp_thr": [1,1,2], + "amp_thr": [2,2,3], + "dt_thr": 100, + "zero_cross_thr": 15 + }, + "noise":{ + "coherent":{ + "groupings":[32] + }, + "fft":{ + "freq":-1, + "low_cut":0.6 + } + }, + "hit_finder":{ + "amp_sig_thrs": [3,3,3], + "dt_min": [50,50,50], + "pad":{ + "left": 6, + "right": 10 + } + }, + "plot":{ + "noise":{ + "show": 0, + "zrange": [0,900] + }, + "evt_display":{ + "daqch":{ + "show": 0, + "zrange":[-1000,1000] + }, + "viewch":{ + "show": 0, + "ind_zrange":[-100,100], + "col_zrange":[-50,50] + } + } + } + } + } } From 00fda25ef894b8edebe05f1be1cc671083a8902c Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Fri, 26 Nov 2021 12:01:02 +0100 Subject: [PATCH 12/15] Test numba compatible function for getting hits --- hit_finder.py | 150 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 96 insertions(+), 54 deletions(-) diff --git a/hit_finder.py b/hit_finder.py index 869e3a4..989d7fd 100644 --- a/hit_finder.py +++ b/hit_finder.py @@ -3,28 +3,46 @@ import lar_param as lar import numpy as np +import numba as nb def hit_search(data, view, daq_chan, start, dt_min, thr1, thr2, thr3): - if(cf.view_type[view] == "Collection"): - return hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2) - elif(cf.view_type[view] == "Induction"): - return hit_search_induction(data, view, daq_chan, start, dt_min, thr3) - else : + ll = [] + if(cf.view_type[view] != "Collection" and cf.view_type[view] != "Induction"): print(cf.view_type[view], " is not recognized") sys.exit() + elif(cf.view_type[view] == "Collection"): + h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc = hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2) + elif(cf.view_type[view] == "Induction"): + h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc = hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr3) + if(h_num == 0): return ll + for start, stop, charge_int, max_t, max_adc, min_t, min_adc in zip(h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc): + h = dc.hits(view, daq_chan, start, stop, charge_int, max_t, max_adc, min_t, min_adc) + ll.append(h) + if(len(ll) == h_num): break + return ll -def hit_search_induction(data, view, daq_chan, start, dt_min, thr): +@nb.njit('Tuple((int64,int16[:],int16[:],float64[:],int16[:],float64[:],int16[:],float64[:]))(float64[:],int64,int64,int64,int64,float64)') +def hit_search_induction_nb(data, view, daq_chan, start, dt_min, thr): """ very basic induction-like hit finder """ """ WARNING : CANNOT FIND OVERLAPPING HITS """ + + npts = len(data) - ll = [] + h_num = 0 # store number of found hits + # list of hits parameters to be returned by this numba function (see dc.hits) + h_start = np.zeros(npts,dtype=np.int16) + h_stop = np.zeros(npts,dtype=np.int16) + h_charge_int= np.zeros(npts) + h_max_t = np.zeros(npts,dtype=np.int16) + h_max_adc = np.zeros(npts) + h_min_t = np.zeros(npts,dtype=np.int16) + h_min_adc = np.zeros(npts) - npts = len(data) hitPosFlag = False hitNegFlag = False @@ -33,8 +51,7 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): posSamp = 0 negSamp = 0 - h = dc.hits(view, daq_chan, start, 0, 0., 0., 0., 0., 0.) - + h_start[h_num] = start while(i= thr and hitNegFlag == False): + val = data[i] + while(i < npts and val >= thr and hitNegFlag == False): val = data[i] it = i+start posSamp += 1 @@ -51,19 +69,19 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): if(hitPosFlag == False): hitPosFlag = True - h.start = it + h_start[h_num] = it - h.max_t = it - h.max_adc = val + h_max_t[h_num] = it + h_max_adc[h_num] = val """ update the maximum case """ - if(val > h.max_adc): - h.max_t = it - h.max_adc = val + if(val > h_max_adc[h_num]): + h_max_t[h_num] = it + h_max_adc[h_num] = val if(hitPosFlag): - h.charge_int += val + h_charge_int[h_num] += val i+=1 @@ -71,13 +89,15 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): hitPosFlag = False posSamp = 0 - while(i < npts and hitPosFlag and hitNegFlag == False and np.fabs(data[i]) < thr): - h.charge_int += np.fabs(val) + val = data[i] + while(i < npts and hitPosFlag and hitNegFlag == False and np.fabs(val) < thr): + h_charge_int[h_num] += np.fabs(val) i += 1 - + val = data[i] """ now the negative part """ - while(i < npts and hitPosFlag and data[i] < -1.*thr): + val = data[i] + while(i < npts and hitPosFlag and val < -1.*thr): val = data[i] it = i+start negSamp += 1 @@ -86,19 +106,19 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): if(hitNegFlag == False): hitNegFlag = True - h.min_t = it - h.min_adc = val + h_min_t[h_num] = it + h_min_adc[h_num] = val """ update the minimum case """ - if(val < h.min_adc): - h.min_t = it - h.min_adc = val + if(val < h_min_adc[h_num]): + h_min_t[h_num] = it + h_min_adc[h_num] = val if(hitNegFlag): - h.charge_int += -1.*val + h_charge_int[h_num] += -1.*val - h.stop = it + h_stop[h_num] = it i+=1 if(negSamp < dt_min): @@ -106,20 +126,28 @@ def hit_search_induction(data, view, daq_chan, start, dt_min, thr): negSamp = 0 if(hitPosFlag and hitNegFlag): - ll.append(h) + h_num += 1 break - - return ll + return h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc - -def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): +@nb.njit('Tuple((int64,int16[:],int16[:],float64[:],int16[:],float64[:],int16[:],float64[:]))(float64[:],int64,int64,int64,int64,float64,float64)') +def hit_search_collection_nb(data, view, daq_chan, start, dt_min, thr1, thr2): """search hit-shape in a list of points""" """algorithm from qscan""" - - ll = [] npts = len(data) + + h_num = 0 # store number of found hits + # list of hits parameters to be returned by this numba function (see dc.hits) + h_start = np.zeros(npts,dtype=np.int16) + h_stop = np.zeros(npts,dtype=np.int16) + h_charge_int= np.zeros(npts) + h_max_t = np.zeros(npts,dtype=np.int16) + h_max_adc = np.zeros(npts) + h_min_t = np.zeros(npts,dtype=np.int16) + h_min_adc = np.zeros(npts) + hitFlag = False i=0 @@ -127,8 +155,6 @@ def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): minSamp = -1 singleHit = True - - while(i= thr1): val = data[i] @@ -138,42 +164,59 @@ def hit_search_collection(data, view, daq_chan, start, dt_min, thr1, thr2): hitFlag = True singleHit = True - h = dc.hits(view,daq_chan,it,0,0.,it,val, 0., 0.) + h_start[h_num] = it + h_stop[h_num] = 0 + h_charge_int[h_num]= 0. + h_max_t[h_num] = it + h_max_adc[h_num] = val + h_min_t[h_num] = 0. + h_min_adc[h_num] = 0. + + #h = dc.hits(view,daq_chan,it,0,0.,it,val, 0., 0.) minSamp = -1 - if(it > h.max_t and val < h.max_adc - thr2 and (minSamp==-1 or minimum >= val)): + if(it > h_max_t[h_num] and val < h_max_adc[h_num] - thr2 and (minSamp==-1 or minimum >= val)): minSamp = it minimum = val - if(minSamp >= 0 and it > minSamp and val > minimum + thr2 and (it-h.start) >= dt_min): - h.stop = minSamp-1 - ll.append(h) + if(minSamp >= 0 and it > minSamp and val > minimum + thr2 and (it-h_start[h_num]) >= dt_min): + h_stop[h_num] = minSamp-1 + h_num += 1 hitFlag = True singleHit = False - h = dc.hits(view,daq_chan,minSamp,0,0,it,val, 0., 0.) + + h_start[h_num] = minSamp + h_stop[h_num] = 0 + h_charge_int[h_num]= 0. + h_max_t[h_num] = it + h_max_adc[h_num] = val + h_min_t[h_num] = 0. + h_min_adc[h_num] = 0. + minSamp = -1 - if(h.stop == 0 and val > h.max_adc): - h.max_t = it - h.max_adc = val + if(h_stop[h_num] == 0 and val > h_max_adc[h_num]): + h_max_t[h_num] = it + h_max_adc[h_num] = val if(minSamp >= 0): minSamp = -1 minimum = val - h.charge_int += val + h_charge_int[h_num] += val i+=1 if(hitFlag == True): hitFlag = False - h.stop = it-1 + h_stop[h_num] = it-1 - #if((singleHit and (h.stop-h.start >= dt_min)) or not singleHit): - if(h.stop-h.start >= dt_min): - ll.append(h) + #if((singleHit and (h_stop[h_num]-h_start[h_num] >= dt_min)) or not singleHit): + if(h_stop[h_num]-h_start[h_num] >= dt_min): + h_num += 1 i+=1 - return ll + return h_num, h_start, h_stop, h_charge_int, h_max_t, h_max_adc, h_min_t, h_min_adc + def recompute_hit_charge(hit): daq_ch, start, stop = hit.daq_channel, hit.start, hit.stop @@ -186,7 +229,6 @@ def recompute_hit_charge(hit): hit.charge_int = val - def find_hits(pad_left, pad_right, dt_min, n_sig_coll_1, n_sig_coll_2, n_sig_ind): """ get boolean roi based on mask and alive channels """ From b721d68b0cbd92533fe249e4bc968abb5df1530a Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Wed, 1 Dec 2021 11:50:23 +0100 Subject: [PATCH 13/15] Add signal ID in refine mask --- hit_finder.py | 2 - lardon.py | 23 ++++--- pedestals.py | 165 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+), 11 deletions(-) diff --git a/hit_finder.py b/hit_finder.py index 989d7fd..6e98bca 100644 --- a/hit_finder.py +++ b/hit_finder.py @@ -227,8 +227,6 @@ def recompute_hit_charge(hit): val += np.fabs(dc.data_daq[daq_ch, t] - mean) hit.charge_int = val - - def find_hits(pad_left, pad_right, dt_min, n_sig_coll_1, n_sig_coll_2, n_sig_ind): """ get boolean roi based on mask and alive channels """ diff --git a/lardon.py b/lardon.py index fdfb9d8..1f5fca4 100644 --- a/lardon.py +++ b/lardon.py @@ -158,9 +158,13 @@ #ped.set_mask_wf_rms_all() tpm = time.time() - for i in range(2): - ped.compute_pedestal(noise_type='filt') - ped.update_mask(pars.ped_amp_sig_oth) + + ped.compute_pedestal(noise_type='filt') + print(' %.2f s to process pedestal '%(time.time()-t0)) + ped.refine_mask(pars,debug=True) + print(' %.2f s to refine mask '%(time.time()-t0)) + ped.compute_pedestal(noise_type='filt') + print(' %.2f s to process pedestal '%(time.time()-t0)) #plot.plot_noise_daqch(noise_type='filt',option='coherent', vrange=pars.plt_noise_zrange) #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='coherent',to_be_shown=False) @@ -168,19 +172,15 @@ #plot.event_display_per_daqch(pars.plt_evt_disp_daqch_zrange,option='coherent',to_be_shown=False) #cmap.arange_in_view_channels() - #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=False) + #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=True) #plot.plot_noise_daqch(noise_type='filt',option='coherent', vrange=pars.plt_noise_zrange) #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='coherent',to_be_shown=False) - - store.store_pedestals(output) - print(' %.2f s to process '%(time.time()-t0)) - - th = time.time() hf.find_hits(pars.hit_pad_left,pars.hit_pad_right, pars.hit_dt_min[0], pars.hit_amp_sig[0],pars.hit_amp_sig[1],pars.hit_amp_sig[2]) + print(' %.2f s to process hits '%(time.time()-t0)) print("hit %.2f s"%(time.time()-th)) print(dc.evt_list[-1].n_hits) plot.plot_2dview_hits(to_be_shown=False) @@ -188,9 +188,14 @@ """parameters : min nb hits, rcut, chi2cut, y error, slope error, pbeta""" trk2d.find_tracks_rtree(5, 6., 8., 0.5, 1., 3.) + print(' %.2f s to process tracks '%(time.time()-t0)) [t.mini_dump() for t in dc.tracks2D_list] plot.plot_2dview_2dtracks(to_be_shown=True) + + plot.event_display_per_daqch(pars.plt_evt_disp_daq_zrange,option='coherent',to_be_shown=True) + cmap.arange_in_view_channels() + plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=True) reader.close_file() diff --git a/pedestals.py b/pedestals.py index 651057a..2264f5b 100644 --- a/pedestals.py +++ b/pedestals.py @@ -59,6 +59,171 @@ def update_mask(thresh): dc.mask_daq = ne.evaluate( "where((abs(data) > thresh*rms) | ~alive_chan, 0, 1)", global_dict={'data':dc.data_daq, 'alive_chan':dc.alive_chan, 'rms':dc.evt_list[-1].noise_filt.ped_rms[:,None]}).astype(bool) +def refine_mask(pars,debug=False): + if(debug == True): import matplotlib.pyplot as plt + + mask = dc.mask_daq + for ch in range(len(mask)): + data = dc.data_daq[ch] + rms = dc.evt_list[-1].noise_filt.ped_rms[ch] + view = dc.chmap[ch].view + + if(view == 2): mask_collection_signal(ch,mask,data,pars.ped_rise_thr[view],pars.ped_ramp_thr[view],rms*pars.ped_amp_thr[view],pars.ped_dt_thr) + else: mask_induction_signal(ch,mask,data,pars.ped_rise_thr[view],pars.ped_ramp_thr[view],rms*pars.ped_amp_thr[view],pars.ped_dt_thr,pars.ped_zero_cross_thr) + if(debug==True and ch > 1 and (ch % 50) == 0): + nrows, ncols = 10, 5 + fig = plt.figure() + for it,channel in enumerate(range(ch-50,ch)): + title = 'v'+str(view)+'-ch'+str(channel) + ax = fig.add_subplot(nrows, ncols, it+1) + ax.set_title(title) + ax.plot(dc.data_daq[channel],'o',markersize=0.2) + ax.axhline(y=dc.evt_list[-1].noise_filt.ped_rms[channel]*pars.ped_amp_thr[dc.chmap[channel].view], color='gray') + ax.axhline(y=0, color='lightgray') + ax.axhline(y=-dc.evt_list[-1].noise_filt.ped_rms[channel]*pars.ped_amp_thr[dc.chmap[channel].view], color='gray') + ax.plot(np.max(dc.data_daq[channel, :])*dc.mask_daq[channel,:],linestyle='-',linewidth=1,color='r') + ax.set(xlabel=None,ylabel=None) + ax.axis('off') + plt.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.95, wspace=0.05, hspace=1) + plt.show() + + + dc.mask_daq = mask + +@nb.jit('(int64,boolean[:,:],float64[:],int64,int64,float64,int64)',nopython = True) +def mask_collection_signal(ch,mask,data,rise_thr,ramp_thr,amp_thr,dt_thr): + + tmp_start = 0 # store candidate signal start to be registered when stop is found + trig_pos = 0 # store position at which sample exceed amp_thrs*ped_rms - 0 otherwise + find_pos = 0 # counter for consecutive positive sample + ramp = 0 # counter 3 consecutive rising samples + ongoing = 0 # flag triggered when signal start found + oldval1 = 0 # store previous sample + oldval2 = 0 # store pre-previous sample + zero_cross_check = 0 + + for x,val in np.ndenumerate(data): + + # set positive signal trigger + if(val >= amp_thr): trig_pos = x[0] + # set back positive trigger to zero if a certain period is over + elif(x[0] - trig_pos > dt_thr): trig_pos = 0 + + # if rising edge found - register begining of signal + if(trig_pos > 0 and find_pos >= rise_thr and ramp > ramp_thr and ongoing == 0): + tmp_start = x[0] - find_pos + ongoing = 1 + + # signal found - look when amp. reaches noise floor (neg. value) + if(find_pos >= rise_thr and ongoing == 1 and val < 0): + start = tmp_start + stop = x[0] + for it in range(start,stop): mask[ch][it] = 0 + + # reset counters + zero_cross_check = 0 + find_pos = 0 + ongoing = 0 + trig_pos = 0 + + if(val > 0): + # start counter for positive samples + find_pos += 1 + # identify ramping + if(val > oldval1 and val > oldval2): ramp += 1 + else: ramp -= 1 + else: + zero_cross_check += 1 + + # reset counters when negative sample is found (deep in the noise) for col. + if(zero_cross_check >= 1): + zero_cross_check = 0 + find_pos = 0 + ramp = 0 + ongoing = 0 + + oldval2 = oldval1 + oldval1 = val + +@nb.jit('(int64,boolean[:,:],float64[:],int64,int64,float64,int64,int64)',nopython = True) +def mask_induction_signal(ch,mask,data,rise_thr,ramp_thr,amp_thr,dt_thr,zero_cross_thr): + + tmp_start = 0 # store candidate signal start to be registered when stop is found + trig_pos = 0 # store position at which sample exceed amp_thrs*ped_rms - 0 otherwise + trig_neg = 0 # store position at which sample exceed -amp_thrs*ped_rms - 0 otherwise + find_pos = 0 # counter for consecutive positive sample + find_neg = 0 # counter for consecutive negative sample + ramp = 0 # counter 3 consecutive rising samples + ongoing = 0 # flag triggered when signal start found + oldval1 = 0 # store previous sample + oldval2 = 0 # store pre-previous sample + inv = 0 # invert flag for induction signal + zero_cross_check = 0 + + for x,val in np.ndenumerate(data): + + # set positive signal trigger + if(val >= amp_thr): trig_pos = x[0] + # set back positive trigger to zero if a certain period is over + elif(x[0] - trig_pos > dt_thr): trig_pos = 0 + + # check for sample above threshold for induction signal start + if(trig_pos > 0 and find_pos > rise_thr and val > 0 and ongoing == 0): + tmp_start = x[0] - find_pos + ongoing = 1 + + if(trig_pos > 0 and find_pos >= rise_thr and ongoing == 1): + # set negative signal trigger + if(val <= -amp_thr): trig_neg = x[0] + elif(x[0] - trig_neg > dt_thr): trig_neg = 0 + + # set invert flag when induction crosses zero + if(val < 0 and inv == 0): + inv = 1 + x_zero_cross = x[0] + + # set induction signal stop once a positive sample is found - 5 samples after zero crossing required + if(val > 0 and inv == 1 and x[0] > x_zero_cross+zero_cross_thr): + + # require a minimal signal length/negative amplitude/duration to remove spikes + if(x[0] - tmp_start > dt_thr/4 and trig_pos - tmp_start > 0 and trig_neg >0 and find_neg >= rise_thr): + start= tmp_start + stop = x[0] + for it in range(start,stop): mask[ch][it] = 0 + + # reset counters + zero_cross_check= 0 + find_pos = 0 + find_neg = 0 + ongoing = 0 + trig_pos = 0 + trig_neg = 0 + inv = 0 + + if(val > 0 and inv == 0): + # start counter for positive samples + find_pos += 1 + # identify ramping + if(val > oldval1 and val > oldval2): ramp += 1 + else: ramp -= 1 + elif(val < 0 and inv == 1): + # start counter for negative samples + find_neg += 1 + else: + zero_cross_check += 1 + + # reset counters when negative/positive sample is found (deep in the noise) for col. and ind. resp. + if(zero_cross_check >= 5): + zero_cross_check = 0 + find_pos = 0 + find_neg = 0 + ramp = 0 + ongoing = 0 + inv = 0 + + oldval2 = oldval1 + oldval1 = val + def set_mask_wf_rms_all(): # set signal mask bins from threshold - simplest method From 68eb162afdcfbe572f5b347b39609024dd1ab046 Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Thu, 2 Dec 2021 17:15:54 +0100 Subject: [PATCH 14/15] Reshuffle lardon.py --- lardon.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/lardon.py b/lardon.py index 1f5fca4..36f5c65 100644 --- a/lardon.py +++ b/lardon.py @@ -158,13 +158,9 @@ #ped.set_mask_wf_rms_all() tpm = time.time() - ped.compute_pedestal(noise_type='filt') - print(' %.2f s to process pedestal '%(time.time()-t0)) ped.refine_mask(pars,debug=True) - print(' %.2f s to refine mask '%(time.time()-t0)) ped.compute_pedestal(noise_type='filt') - print(' %.2f s to process pedestal '%(time.time()-t0)) #plot.plot_noise_daqch(noise_type='filt',option='coherent', vrange=pars.plt_noise_zrange) #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='coherent',to_be_shown=False) @@ -172,15 +168,19 @@ #plot.event_display_per_daqch(pars.plt_evt_disp_daqch_zrange,option='coherent',to_be_shown=False) #cmap.arange_in_view_channels() - #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=True) + #plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=False) #plot.plot_noise_daqch(noise_type='filt',option='coherent', vrange=pars.plt_noise_zrange) #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='coherent',to_be_shown=False) + + store.store_pedestals(output) + print(' %.2f s to process '%(time.time()-t0)) + + th = time.time() hf.find_hits(pars.hit_pad_left,pars.hit_pad_right, pars.hit_dt_min[0], pars.hit_amp_sig[0],pars.hit_amp_sig[1],pars.hit_amp_sig[2]) - print(' %.2f s to process hits '%(time.time()-t0)) print("hit %.2f s"%(time.time()-th)) print(dc.evt_list[-1].n_hits) plot.plot_2dview_hits(to_be_shown=False) @@ -188,14 +188,9 @@ """parameters : min nb hits, rcut, chi2cut, y error, slope error, pbeta""" trk2d.find_tracks_rtree(5, 6., 8., 0.5, 1., 3.) - print(' %.2f s to process tracks '%(time.time()-t0)) [t.mini_dump() for t in dc.tracks2D_list] plot.plot_2dview_2dtracks(to_be_shown=True) - - plot.event_display_per_daqch(pars.plt_evt_disp_daq_zrange,option='coherent',to_be_shown=True) - cmap.arange_in_view_channels() - plot.event_display_per_view(pars.plt_evt_disp_vch_ind_zrange,pars.plt_evt_disp_vch_col_zrange,option='coherent', to_be_shown=True) reader.close_file() From 59ec64e10cd8bb0603da72264c7b88ce1a0c33ed Mon Sep 17 00:00:00 2001 From: Yoann Kermaidic Date: Fri, 3 Dec 2021 09:19:05 +0100 Subject: [PATCH 15/15] refine_mask() instead of update_mask() --- lardon.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lardon.py b/lardon.py index 36f5c65..c1bd9d6 100644 --- a/lardon.py +++ b/lardon.py @@ -134,9 +134,10 @@ #ped.set_mask_wf_rms_all() tp = time.time() - for i in range(2): - ped.compute_pedestal(noise_type='filt') - ped.update_mask(pars.ped_amp_sig_fst) + ped.compute_pedestal(noise_type='filt') + ped.refine_mask(pars,debug=True) + ped.compute_pedestal(noise_type='filt') + #plot.plot_noise_daqch(noise_type='filt',option='fft', vrange=pars.plt_noise_zrange) #plot.plot_noise_vch(noise_type='filt', vrange=pars.plt_noise_zrange,option='fft')#,to_be_shown=True)