99__date__ = "2025-04-06"
1010__license__ = "MIT"
1111
12- import os
12+ import warnings
1313import pickle
1414import numpy as np
1515import pandas as pd
16- import warnings
1716import requests
1817from cedne import Worm , Fly , NervousSystem
1918from .config import *
@@ -38,7 +37,7 @@ def makeWorm(name='', import_parameters=None, chem_only=False, gapjn_only=False)
3837 input_file = 'SI 5 Connectome adjacency matrices, corrected July 2020.xlsx'
3938
4039 ## Chemical synapses
41- cook_chem = pd .read_excel (cook_connectome + input_file , sheet_name = 'male chemical' , engine = 'openpyxl' )
40+ cook_chem = pd .read_excel (cook_connectome / input_file , sheet_name = 'male chemical' , engine = 'openpyxl' )
4241 colnames = cook_chem .iloc [1 , 3 :- 1 ].astype (str ).tolist ()
4342 labels = cook_chem .loc [2 :383 ]['Unnamed: 2' ].tolist ()
4443
@@ -84,7 +83,7 @@ def makeWorm(name='', import_parameters=None, chem_only=False, gapjn_only=False)
8483 adj_chem [row ] = {col1 : {"weight" : chem_adj [i ,j ]} for j ,col1 in enumerate (cols ) if col1 in labels }
8584
8685 ## Gap junctions
87- cook_gapjn = pd .read_excel (cook_connectome + input_file , sheet_name = 'male gap jn symmetric' , engine = 'openpyxl' )
86+ cook_gapjn = pd .read_excel (cook_connectome / input_file , sheet_name = 'male gap jn symmetric' , engine = 'openpyxl' )
8887 colnames = cook_gapjn .iloc [1 ][3 :- 1 ].astype (str ).tolist ()
8988
9089 row_labels = cook_gapjn .loc [2 :383 ]['Unnamed: 2' ].tolist ()
@@ -108,28 +107,27 @@ def makeWorm(name='', import_parameters=None, chem_only=False, gapjn_only=False)
108107 nn .setup_chemical_connections (adj_chem )
109108 if not chem_only :
110109 nn .setup_gap_junctions (adj_gapjn )
110+ elif import_parameters ['style' ] == 'witvliet' :
111+ ind_dict = {'L1' : [1 ,2 ,3 ,4 ], 'L2' :[5 ] , 'L3' :[6 ], 'adult' :[7 ,8 ]}
112+ assert import_parameters ['stage' ] in ['L1' , 'L2' , 'L3' , 'adult' ], "stage should be one of 'L1', 'L2', 'L3', 'adult'"
113+ assert int (import_parameters ['dataset_ind' ]) in range (1 ,len (ind_dict [import_parameters ['stage' ]])+ 1 ) , f"Dataset id { int (import_parameters ['dataset_ind' ])} for stage { import_parameters ['stage' ]} should be in { list (range (1 ,len (ind_dict [import_parameters ['stage' ]])+ 1 ))} "
114+
115+ input_file = 'witvliet_2020_' + str (ind_dict [import_parameters ['stage' ]][int (import_parameters ['dataset_ind' ])- 1 ]) + ' ' + import_parameters ['stage' ] + '.xlsx'
116+ witvliet_input = pd .read_excel (witvliet_connectome / input_file , engine = 'openpyxl' )
117+ all_labels = set (witvliet_input ['pre' ]) | set (witvliet_input ['post' ])
118+ labels = [lab for lab in all_labels if not any (lab .startswith (k ) for k in ['BWM-' , 'CEPsh' , 'GLR' ])]
119+
120+ w = Worm (name = name , stage = import_parameters ['stage' ])
121+ w .citations .update ({'witvliet_connectome' :citations ['witvliet_connectome' ]})
122+ nn = NervousSystem (w , network = '_' .join ([import_parameters ['style' ], import_parameters ['stage' ], import_parameters ['dataset_ind' ]]))
123+ nn .create_neurons (labels = labels )
124+ witvliet_input .rename (columns = {'synapses' : 'weight' }, inplace = True )
125+ fin_input = witvliet_input [witvliet_input ['pre' ].isin (labels )]
126+ fin_input = fin_input [fin_input ['post' ].isin (labels )]
127+ for _ , conn in fin_input .iterrows ():
128+ nn .setup_connections (conn , conn ['type' ], input_type = 'edge' )
111129 else :
112- if import_parameters ['style' ] == 'witvliet' :
113- ind_dict = {'L1' : [1 ,2 ,3 ,4 ], 'L2' :[5 ] , 'L3' :[6 ], 'adult' :[7 ,8 ]}
114- assert import_parameters ['stage' ] in ['L1' , 'L2' , 'L3' , 'adult' ], "stage should be one of 'L1', 'L2', 'L3', 'adult'"
115- assert int (import_parameters ['dataset_ind' ]) in range (1 ,len (ind_dict [import_parameters ['stage' ]])+ 1 ) , f"Dataset id { int (import_parameters ['dataset_ind' ])} for stage { import_parameters ['stage' ]} should be in { list (range (1 ,len (ind_dict [import_parameters ['stage' ]])+ 1 ))} "
116-
117- input_file = 'witvliet_2020_' + str (ind_dict [import_parameters ['stage' ]][int (import_parameters ['dataset_ind' ])- 1 ]) + ' ' + import_parameters ['stage' ] + '.xlsx'
118- witvliet_input = pd .read_excel (witvliet_connectome + input_file , engine = 'openpyxl' )
119- all_labels = set (witvliet_input ['pre' ])| set (witvliet_input ['post' ])
120- labels = [lab for lab in all_labels if not any (lab .startswith (k ) for k in ['BWM-' , 'CEPsh' , 'GLR' ])]
121-
122- w = Worm (name = name , stage = import_parameters ['stage' ])
123- w .citations .update ({'witvliet_connectome' :citations ['witvliet_connectome' ]})
124- nn = NervousSystem (w , network = '_' .join ([import_parameters ['style' ],import_parameters ['stage' ], import_parameters ['dataset_ind' ]]))
125- nn .create_neurons (labels = labels )
126- witvliet_input .rename (columns = {'synapses' : 'weight' }, inplace = True )
127- fin_input = witvliet_input [witvliet_input ['pre' ].isin (labels )]
128- fin_input = fin_input [fin_input ['post' ].isin (labels )]
129- for iter , conn in fin_input .iterrows ():
130- nn .setup_connections (conn , conn ['type' ], input_type = 'edge' )
131-
132-
130+ raise ValueError ("Unsupported connectome style" )
133131 return w
134132
135133def makeFly (name = '' ):
@@ -140,7 +138,7 @@ def makeFly(name = ''):
140138 ## Neurons
141139
142140 ### Names
143- names = pd .read_csv (fly_wire + 'names.csv' )
141+ names = pd .read_csv (fly_wire / 'names.csv' )
144142 labs , neuron_types , lab_root_id = names ['name' ], names ['group' ], names ['root_id' ]
145143 neuron_dict = {r :lab for r ,lab in zip (lab_root_id , labs )}
146144 type_dict = {r :ntype for r ,ntype in zip (lab_root_id , neuron_types )}
@@ -150,12 +148,12 @@ def makeFly(name = ''):
150148 neuron_types = {neuron_dict [rid ]:type_dict [rid ] for rid in root_ids }
151149
152150 ### Positions
153- coordinates = pd .read_csv (fly_wire + 'coordinates.csv' )
151+ coordinates = pd .read_csv (fly_wire / 'coordinates.csv' )
154152 pos_root_id , position = coordinates ['root_id' ], coordinates ['position' ]
155153 position_dict = {neuron_dict [rid ]:np .array (list (filter (None , pos .split ('[' )[- 1 ].split (']' )[0 ].split (' ' ))), dtype = int ) for rid ,pos in zip (pos_root_id , position )}
156154
157155 ### Stats
158- stats = pd .read_csv (fly_wire + 'cell_stats.csv' )
156+ stats = pd .read_csv (fly_wire / 'cell_stats.csv' )
159157 stats_root_id , nlength , narea , nvolume = stats ['root_id' ], np .array (stats ['length_nm' ], dtype = int ), np .array (stats ['area_nm' ], dtype = int ), np .array (stats ['size_nm' ], dtype = int )
160158
161159 length_dict = {neuron_dict [rid ]:nlen for (rid ,nlen ) in zip (stats_root_id , nlength )}
@@ -165,7 +163,7 @@ def makeFly(name = ''):
165163 nn .create_neurons (labels , type = neuron_types , position = position_dict , length = length_dict , area = area_dict , volume = vol_dict )
166164
167165 ## Connections
168- conns = pd .read_csv (fly_wire + 'connections_no_threshold.csv' )
166+ conns = pd .read_csv (fly_wire / 'connections_no_threshold.csv' )
169167 pre_rid , post_rid , weights , nts = conns ['pre_root_id' ], conns ['post_root_id' ], conns ['syn_count' ], conns ['nt_type' ]
170168
171169 for pre , post , weight , nt in zip (pre_rid , post_rid , weights , nts ):
@@ -234,7 +232,7 @@ def load_lineage(neural_network, sex='Hermaphrodite'):
234232
235233def getLigands (neuron , sex = 'Hermaphrodite' ):
236234 ''' Returns ligand for each neuron'''
237- lig_file = DOWNLOAD_DIR + prefix_NT + 'ligand-table.xlsx'
235+ lig_file = DOWNLOAD_DIR / prefix_NT / 'ligand-table.xlsx'
238236 if sex in ['Hermaphrodite' , 'hermaphrodite' ]:
239237 ligtable = pd .read_excel (lig_file , sheet_name = 'Hermaphrodite, sorted by neuron' , skiprows = 7 , engine = 'openpyxl' )
240238 elif sex in ['Male' , 'male' ]:
@@ -271,7 +269,7 @@ def getLigandsAndReceptors(npr, ligmap, col):
271269def loadNeurotransmitters (nn , sex = 'Hermaphrodite' ):
272270 ''' Loads Neurotransmitters into neurons using Wang et al 2024'''
273271
274- npr_file = DOWNLOAD_DIR + prefix_NT + 'GenesExpressing-BATCH-thrs4_use.xlsx'
272+ npr_file = DOWNLOAD_DIR / prefix_NT / 'GenesExpressing-BATCH-thrs4_use.xlsx'
275273 npr = pd .read_excel (npr_file , sheet_name = 'npr' , true_values = 'TRUE' , false_values = 'FALSE' , engine = 'openpyxl' )
276274 ligmap = pd .read_excel (npr_file , sheet_name = 'ligmap' , engine = 'openpyxl' )
277275
@@ -306,10 +304,9 @@ def loadNeuropeptides(w, neuropeps:str= 'all'):
306304 ''' Loads Neuropeptides into neurons using Ripoll-Sanchez et al. 2023'''
307305
308306 #csvfile = DOWNLOAD_DIR + prefix_NP + 'neuropeptideConnectome.txt'
309- lrm = DOWNLOAD_DIR + prefix_NP + 'NPP_GPCR_networks_long_range_model_2.csv'
310- nid = DOWNLOAD_DIR + prefix_NP + '26012022_num_neuronID.txt'
311- np_order = DOWNLOAD_DIR + prefix_NP + '91-NPPGPCR networks'
312-
307+ lrm = DOWNLOAD_DIR / prefix_NP / 'NPP_GPCR_networks_long_range_model_2.csv'
308+ nid = DOWNLOAD_DIR / prefix_NP / '26012022_num_neuronID.txt'
309+ np_order = DOWNLOAD_DIR / prefix_NP / '91-NPPGPCR networks'
313310 model = pd .read_csv (lrm ,encoding = 'unicode_escape' , header = None )
314311 neuronID = pd .read_csv (nid ,encoding = 'unicode_escape' , sep = '\t ' , index_col = 0 , names = ['NID' , "Neuron" ])
315312 neuropep_rec = pd .read_csv (np_order , sep = ',' , index_col = 0 )
@@ -345,10 +342,10 @@ def loadNeuropeptides(w, neuropeps:str= 'all'):
345342 w .worm .citations .update ({'neuropeptide_atlas' :citations ['neuropeptide_atlas' ]})
346343
347344## Load CENGEN tables
348- thres_1 = DOWNLOAD_DIR + prefix_CENGEN + 'liberal_threshold1.csv'
349- thres_2 = DOWNLOAD_DIR + prefix_CENGEN + 'medium_threshold2.csv'
350- thres_3 = DOWNLOAD_DIR + prefix_CENGEN + 'conservative_threshold3.csv'
351- thres_4 = DOWNLOAD_DIR + prefix_CENGEN + 'stringent_threshold4.csv'
345+ thres_1 = DOWNLOAD_DIR / prefix_CENGEN / 'liberal_threshold1.csv'
346+ thres_2 = DOWNLOAD_DIR / prefix_CENGEN / 'medium_threshold2.csv'
347+ thres_3 = DOWNLOAD_DIR / prefix_CENGEN / 'conservative_threshold3.csv'
348+ thres_4 = DOWNLOAD_DIR / prefix_CENGEN / 'stringent_threshold4.csv'
352349
353350def returnThresholdDict (th1 , th2 , th3 , th4 , nnames , cengen_neurons ):
354351 """
@@ -546,7 +543,7 @@ def loadSynapticWeights(nn):
546543 None
547544 """
548545 ## Load synaptic weights from Excel file
549- weightMatrix = DOWNLOAD_DIR + prefix_synaptic_weights + "41586_2023_6683_MOESM13_ESM.xls"
546+ weightMatrix = DOWNLOAD_DIR / prefix_synaptic_weights / "41586_2023_6683_MOESM13_ESM.xls"
550547 wtMat = pd .read_excel (weightMatrix , index_col = 0 ).T
551548 for sid in nn .connections .keys ():
552549 if sid [0 ].name in wtMat :
@@ -566,14 +563,14 @@ def download_datasets(key=''):
566563 if not key :
567564 print ("Nothing downloaded. Pass key" )
568565 elif key == 'cengen' :
569- if not os . path . exists (DOWNLOAD_DIR + prefix_CENGEN ):
570- os . makedirs ( DOWNLOAD_DIR + prefix_CENGEN )
566+ cengen_dir = (DOWNLOAD_DIR / prefix_CENGEN ). resolve ()
567+ cengen_dir . mkdir ( parents = True , exist_ok = True )
571568 for link in cengen_links :
572569 response = requests .get (link , stream = True )
573570 response .raise_for_status () # Raises HTTPError for bad responses
574- local_dir = DOWNLOAD_DIR + prefix_CENGEN
571+ local_dir = cengen_dir
575572 local_filename = link .split ('021821_' )[- 1 ]
576- with open (local_dir + local_filename , "wb" ) as f :
573+ with open (local_dir / local_filename , "wb" ) as f :
577574 for chunk in response .iter_content (chunk_size = 8192 ):
578575 f .write (chunk )
579576 print (f"Downloaded { local_filename } at { local_dir } " )
@@ -594,8 +591,7 @@ def download_datasets(key=''):
594591
595592 elif key == 'atanas_whole_brain' :
596593 for stim , location in atanas_whole_brain .items ():
597- if not os .path .exists (location ):
598- os .makedirs (location )
594+ location .mkdir (parents = True , exist_ok = True )
599595
600596 for suff in atanas_links [stim ]:
601597 link = atanas_link_prefix + suff
@@ -604,7 +600,7 @@ def download_datasets(key=''):
604600 local_dir = location
605601 local_filename = suff
606602
607- with open (local_dir + local_filename , "wb" ) as f :
603+ with open (local_dir / local_filename , "wb" ) as f :
608604 for chunk in response .iter_content (chunk_size = 8192 ):
609605 f .write (chunk )
610606 print (f"Downloaded { local_filename } at { local_dir } " )
0 commit comments