parallelize nested loop

General issues of interest both for network and
individual cell parallelization.

Moderator: hines

Post Reply
menica
Posts: 67
Joined: Wed Sep 02, 2015 11:02 am

parallelize nested loop

Post by menica » Wed Mar 14, 2018 12:57 pm

Hi,
I am trying to parallelize my code. The parallelization formalism is new to me.
I am performing a parameter sweep on 17 parameters. For each parameter, I settled in a vector the 3 values I wanted to sweep. Here is the nested looping code I was using:

Code: Select all

proc batrun() { local i, j ,k,l,m,n, p, q, r, s, t, u, h,z, a, b, c

for i=0,2{
NetCon[1].weight = wE.x[i]      //E->I
	for j=0,2{
		NetCon[0].weight = wI.x[j]    //I_>E
		for k = 0,2 {
				Exp2Syn[0].tau1 = t1E.x[k]
				for l = 0,2 {
					Exp2Syn[0].tau2 = t2E.x[l]
					for m = 0,2 {
						is[0].tau1 = t1I.x[m]
							for n = 0,2 {

							is[0].tau2 = t2I.x[n]
							for p = 0,2 {

								somaI.gna_pnas = vna.x[p]
								for q = 0,2 {

									somaI.gkdr_pkdrs = vk.x[q]
										for r = 0,2 {

										somaI.gk_leak = lk.x[r]
											for s = 0,2 {

											somaI.gna_leak = lna.x[s]
												for t = 0,2 {

												somaI.gfix_leak = lfixi.x[t]
												for u = 0,2 {

													somaE.gcl_clleak = lcl.x[u]
														for h = 0,2 {

														somaE.gfix_clleak = lfixe.x[h]
															for z = 0,2 {

															somaE.tau_pump_pumpcltau = tclp.x[z]
															for a = 0,2 {														

																somaI.tau_pump_pumpnatau2 = tnap.x[a]
															for b = 0,2 {
																	somaE.ipumpmax_pumpcltau = maxclp.x[b]
																		for c = 0,2 {
																		somaI.ipumpmax_pumpnatau2 = maxnap.x[c]


run()
calclabel(i,j,k,l,m,n, p, q, r, s, t, u, h,z, a, b, c)
postvoltage()
}}}}}}}}}}}}}}}}}

proc sweep(){
	instrum()   //create vector where to save time course of v and currents in somaE and somaI
        savedata() //create folders and files where to save the output
	setparam()  // create vectors of 3 values for each of the 17 parameters I want to sweep
	batrun() 
}
}
the process calclabel() return a string to label each run with the correspettive parameters combination:

Code: Select all

strdef cmd
proc calclabel() {
sprint(cmd,"%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d\n",$1,$2,$3,$4,$5,$6, $7, $8, $9, $10, $11, $12, $13,$14, $15, $16, $17)      
}
the process postvoltage() calculate the amplitude and duration of the spikes, the interspike time and save the results in files, here the code:

Code: Select all

// to record spike times 
objref nccI,nccE, spveccI,spveccE, nilcI,nilcE 
somaI nccI = new NetCon(&v(0.5), nilcI)
somaE nccE = new NetCon(&v(0.5), nilcE)
nccI.threshold = -20 // mV
nccE.threshold = -20
spveccI = new Vector()
nccI.record(spveccI)
spveccE = new Vector()
nccE.record(spveccE)

proc postvoltage() { local nspikesI, t1I, t2I,nspikesE, t1E, t2E
	
  nspikesI = spveccI.size()  //E
  nspikesE = spveccE.size()  //I

    if (nspikesE == 0) {
    f1.aopen(fa)
    f1.printf("%s\n",cmd)  
	f1.close()	
  }
    if (nspikesI == 0) {
    f5.aopen(fe)
    f5.printf("%s\n",cmd) 
	f5.close()	
  }

  if (nspikesE == 1) {
	calcolaE() 
    f2.aopen(fb)
    f2.printf("%s \t%g \t%g\n",cmd,maxVE.x(0),hWE.x(0) )  
	f2.close()	
  }
  if (nspikesI == 1) {
	calcolaI()
    f6.aopen(ff)
    f6.printf("%s \t%g \t%g\n",cmd,maxVI.x(0),hWI.x(0))   
	f6.close()	
  }

  if (nspikesE > 1) {
  calcolaE()
  calinterspikeE()
    f3.aopen(fc)
        for i=0,hWE.size()-1 {
		f3.printf("%s \t%g \t%g\n",cmd, maxVE.x(i),hWE.x(i))  
		}  
	f3.close()	

	f4.aopen(fd)
		for i=0,isivecE.size()-1 {
		f4.printf("%s \t%g \t%gn",cmd,isivecE.x(i))  
		} 
	f4.close()

  }
  if (nspikesI > 1) {
  calcolaI()
  calinterspikeI()
    f7.aopen(fg)
	 for i=0,hWI.size()-1 {
		f7.printf("%s \t%g \t%g\n",cmd,maxVI.x(i),hWI.x(i))  
		}  
	f7.close()	
	f8.aopen(fh)
		for i=0,isivecI.size()-1 {
		f8.printf("%s \t%g\n",cmd,isivecI.x(i))  
		} 
	f8.close()
  }

}
I would like to have your help in order to proceed with the parallelization. I am looking at the example on the bulletin board parallelization folder here:
https://www.neuron.yale.edu/ftp/ted/neu ... zation.zip
I tried to parallelize the code considering only one parameter, once the code will work I will also insert the others. Here is where I arrived so far...

Code: Select all

// after cell model specification I create ParallelContext instance

objref pc 
pc = new ParallelContext() 

// then I specify the stimolus and simulatoions parameters

// I modify the postvoltage processes ( I am not writing on the files anymore here)
proc postvoltage() { local nspikesI, t1I, t2I,nspikesE, t1E, t2E	
 	nspikesI = spveccI.size()  //E
  	nspikesE = spveccE.size()  //I
	if (nspikesE == 1) {
		calcolaE() 
  	}
  	if (nspikesI == 1) {
	calcolaI()
	  }
  	if (nspikesE > 1) {
  	calcolaE()
 	 calinterspikeE()
  }
  	if (nspikesI > 1) {
  	calcolaI()
  	calinterspikeI()
  }
}

proc setlopar(){ local i
	for i=0,2{
	NetCon[1].weight = wE.x[i]      //E->I
}}

func fi() {         
	instrum()
    //savedata()      // I should not create folders and files here, right? 
	setlopar($1)
	run()
	calclabel($1)
	print cmd
	postvoltage()
}

trun = startsw()

pc.runworker() 

objref svec

proc batchrun() {  tmp
  svec = new Vector()
  pc.submit("fi") // post all jobs to bulletin board
  while (pc.working) { 
    pc.unpack(&tmp)
    svec.append(tmp) // get job number
    printf(".") // indicate progress
  }
}

batchrun()

pc.done() 

// I should here rearrange the output of each job and save the results in files

print " "
print startsw() - trun, "seconds" // report run time

quit()
It is no working...undefined variable tmp...

ted
Site Admin
Posts: 5169
Joined: Wed May 18, 2005 4:50 pm
Location: Yale University School of Medicine
Contact:

Re: parallelize nested loop

Post by ted » Wed Mar 14, 2018 10:51 pm

Search for tmp

menica
Posts: 67
Joined: Wed Sep 02, 2015 11:02 am

Re: parallelize nested loop

Post by menica » Fri Mar 16, 2018 1:21 pm

thanks
Last edited by menica on Wed Mar 21, 2018 12:57 pm, edited 1 time in total.

menica
Posts: 67
Joined: Wed Sep 02, 2015 11:02 am

Re: parallelize nested loop

Post by menica » Wed Mar 21, 2018 12:16 pm

I simplified the problem in order to learn how to parallelize step by step.
I manage to run in parallel but I am struggling now in retrieving the desired output from each simulation.

I created, in this case, one folder with 4 files. At the moment I am using only 2. I would like to have as output: in the first file the records of the cmd string( which gives me the "label" of the run, in other words the parameters combinations of that run), in the case of no AP (nspikesE == 0), in the second file I would like to record the cmd relative to the cases of one AP released.

In the serial simulation, I obtain the same wrong results, so I guess it is related to the way I am writing in the files.

Any help in this direction will be appreciated.

Here is the revised code:

Code: Select all

load_file("nrngui.hoc")

create soma
soma {
	L = 10
	diam = 10
	nseg = 1
	insert hh
}

tstop = 1500
dt = 0.025
v_init = -65

objref stim
soma stim = new IClamp(0.5)
stim.del = 100
stim.dur = 5
stim.amp = 5

objref pc 
pc = new ParallelContext() 

objref g,s
g= new Vector(3)
s= new Vector(3)
g.x[0] = 0.0001
g.x[1] = 0.12
g.x[2] = 2
s.x[0] = 0.0001
s.x[1] = 0.12
s.x[2] = 2

// to record spike times 
objref nccE, spveccE, nilcE 

soma nccE = new NetCon(&v(0.5), nilcE)
nccE.threshold = -20

spveccE = new Vector()
nccE.record(spveccE)

objref vvecE, tvec
objref f1, f2, f3, f4

proc instrum(){
	tvec = new Vector()
	tvec.record(&t) // record time
	vvecE = new Vector()
	vvecE.record(&soma.v(0.5)) 

	strdef fa,fb,fc,fd,pa, folder1, string1
	pa = "somaE"
	f1 = new File()
	f2 = new File()
	f3 = new File()
	f4 = new File()
}

proc savedata5() {
	sprint(folder1, "%s", pa)
	sprint(string1, "system(\"mkdir Vanalisi-%s\")", folder1)

	chdir("getcwd()")
	execute(string1)

	sprint(fa, "Vanalisi-%s/%s-file1.dat", folder1, pa,pa)
	sprint(fb, "Vanalisi-%s/%s-file2.dat", folder1, pa,pa)
	sprint(fc, "Vanalisi-%s/%s-file3.dat", folder1, pa,pa)
	sprint(fd, "Vanalisi-%s/%s-file4.dat", folder1, pa,pa)  
}

instrum()
savedata5()

strdef cmd
proc calclabel() {
	sprint(cmd,"%d%d\n",$1,$2)  
	return cmd   
}

pc.runworker() 

objref lab
strdef comd
proc batchrun() { local tmp
lab= new Vector()
	if (pc.nhost == 1){
		for i = 0, 2 {
			soma.gnabar_hh = g.x[i]
				for j= 0, 2 {
				soma.gkbar_hh= s.x[j]
				run()
                calclabel(i,j)
			}
		}
	}else{
	  for i = 0, 2 {
	   for j= 0, 2 {
		sprint(comd, "soma.gnabar_hh = g.x[%d] soma.gkbar_hh= s.x[%d] run() calclabel(i,j) \n", i, j)
		  pc.submit(comd)
	   }
	}
	  while (pc.working) { // if a result is ready, get it; if not, pick a job and do it
		print "id ", id
		print "return value ", pc.retval()	

		lab.append(pc.retval()) 
		pc.unpack(&tmp)
		lab.append(tmp) // get job number
		printf(".") // indicate progress
	  }
	}
}


batchrun()

pc.done() 

proc saveresults() { local ii  ,nspikesE
	nspikesE = spveccE.size() 

	if (nspikesE == 0) {
    	f1.aopen(fa)
    	f1.printf("%s\n",cmd)  
	f1.close()	
}

	if (nspikesE == 1) {
        f2.aopen(fb)
        f2.printf("%s\n",cmd)  
	f2.close()	
}
}

saveresults()

quit()
I gues I am not calling the calclabel in the right way in the comd string....I can I pass the arguments in the right way? I tried calclabel(%d,%d) and calclabel($1,$2) both wrong

menica
Posts: 67
Joined: Wed Sep 02, 2015 11:02 am

Re: parallelize nested loop

Post by menica » Fri Mar 23, 2018 11:36 am

I discover a small error, I should have used the " " in the pc.submit(" ").
But I am still not sure if my first step to the parallelization is correct.
Any help in understanding this?

Here the simple code I tested:

Code: Select all

load_file("nrngui.hoc")

objref pc 
pc = new ParallelContext(2) 

print "number of hosts: ", pc.nhost(), "\thost id: ", pc.id()

create soma
soma {
	L = 10
	diam = 10
	nseg = 1
	insert hh
}

tstop = 1500
dt = 0.025
v_init = -65

objref stim
soma stim = new IClamp(0.5)
stim.del = 100
stim.dur = 5
stim.amp = 5



objref g,s
g= new Vector(3)
s= new Vector(3)
g.x[0] = 0.0001
g.x[1] = 0.12
g.x[2] = 2
s.x[0] = 0.0001
s.x[1] = 0.12
s.x[2] = 2


pc.runworker()

strdef pccmd

proc batrun4() { 

	for i=0,2 {	
		for j=0,2 {
			sprint(pccmd, "soma.gnabar_hh = g.x[%d] soma.gkbar_hh= s.x[%d] run()\n", i, j)
			pc.submit("pccmd")
				}
			}
    while ((id=pc.working()) != 0) { 
    	print "hello"
  	//print "id", id
  }
}


batrun4() 
pc.done()
quit()


To me it seems that only for the case i=2 and j=2 the id returned by working() is positive.

Thanks

menica
Posts: 67
Joined: Wed Sep 02, 2015 11:02 am

Re: parallelize nested loop

Post by menica » Mon Mar 26, 2018 10:55 am

Hi, I changed the code, I used the initbatpar.hoc file example and now it is working, but I realized that I can sweep until 14 parameters only, when I add the 15 one it returns the following error:

Code: Select all

0 unpack size=320 upkpos=70 type[0]=1   datatype=0  type[1]=1  count=1
Assertion failed: file bbsmpipack.c, line 64
nrniv: type[0] == my_datatype
 in test5.hoc near line 334
 batchrun()
           ^
        ParallelContext[0].working()
      batchrun()


I cannot figure out why. Any explanation for this?

here is the code:

Code: Select all

{load_file("stdgui.hoc") } // load standard library but don't open NMM toolbar

objref pc // create ParallelContext instance here
pc = new ParallelContext() // so it exists whenever we need it

///// Simulation parameters
dt = 0.025
celsius = 37
v_init = -70
TSTOP = 1500 // ms, more than long enough for 15 spikes at ISI = 25 ms

///// Model specification
{load_file("EIcells.hoc") }

///// Instrumentation
// experimental manipulations
objref stim
somaE stim = new IClamp(0.5)
stim.del = 1 // ms
stim.dur = 1e9
stim.amp = 0 // nA

objref wE,wI,t1E,t2E,t1I,t2I
objref vna,vk,lna,lk,lcl,lfixe,lfixi
objref tclp,tnap,maxclp,maxnap

  wE= new Vector(3)
  wI= new Vector(3)
  t1E= new Vector(3)
  t2E= new Vector(3)
  t1I= new Vector(3)
  t2I= new Vector(3)
  vna= new Vector(3)
  vk= new Vector(3)
  lna= new Vector(3)
  lk= new Vector(3)
  lcl= new Vector(3)
  lfixe= new Vector(3)
  lfixi= new Vector(3)
  tclp= new Vector(3)
  tnap= new Vector(3)
  maxclp= new Vector(3)
  maxnap= new Vector(3)
  wE.set(0,0.0001)
  wE.set(1,0.01)
  wE.set(2,1)
  wI.set(0,0.0001)
  wI.set(1,0.01)
  wI.set(2,1)
  t1E.set(0,3)
  t1E.set(1,9)
  t1E.set(2,15)
  t2E.set(0,40)
  t2E.set(1,70)
  t2E.set(2,100)
  t1I.set(0,0.5)
  t1I.set(1,0.75)
  t1I.set(2,1)
  t2I.set(0,2)
  t2I.set(1,4)
  t2I.set(2,6)
  vna.set(0,0.003)
  vna.set(1,0.05)
  vna.set(2,0.5)
  vk.set(0,0.00025)
  vk.set(1,0.005)
  vk.set(2,0.125)
  lna.set(0,0.00005)
  lna.set(1,0.0005)
  lna.set(2,0.005)
  lk.set(0,0.00005)
  lk.set(1,0.0005)
  lk.set(2,0.005)
  lcl.set(0,0.00005)
  lcl.set(1,0.0005)
  lcl.set(2,0.005)
  lfixe.set(0,0.00005)
  lfixe.set(1,0.0005)
  lfixe.set(2,0.005)
  lfixi.set(0,0.00005)
  lfixi.set(1,0.0005)
  lfixi.set(2,0.005)
  tclp.set(0,0.1)
  tclp.set(1,1)
  tclp.set(2,30)
  tnap.set(0,0.1)
  tnap.set(1,1)
  tnap.set(2,30)
  maxclp.set(0,0.001)
  maxclp.set(1,0.12)
  maxclp.set(2,10)
  maxnap.set(0,0.001)
  maxnap.set(1,0.12)
  maxnap.set(2,10)



func fparuno() {
  return wE.x[$1]
}
func fpardue() {
  return wI.x[$1]
}
func fpartre() {
  return t1E.x[$1]
}
func fparquattro() {
  return t2E.x[$1]
}
func fparcinque() {
  return t1I.x[$1]
}
func fparsei() {
  return t2I.x[$1]
}
func fparsette() {
  return vna.x[$1]
}
func fparotto() {
  return vk.x[$1]
}
func fparnove() {
  return lk.x[$1]
}
func fpardieci() {
  return lna.x[$1]
}
func fparundici() {
  return lfixi.x[$1]
}
func fpardodici() {
  return lcl.x[$1]
}
func fpartredici() {
  return lfixe.x[$1]
}
func fparquattordici() {
  return tclp.x[$1]
}
func fparquindici() {
  return tnap.x[$1]
}
func fparsedici() {
  return maxclp.x[$1]
}
func fpardiciasette() {
  return maxnap.x[$1]
}
// sets param to appropriate value 
proc setparamsuno() {
  NetCon[1].weight = fparuno($1)
}
proc setparamsdue() {
  NetCon[0].weight = fpardue($1)
}
proc setpartre() {
  Exp2Syn[0].tau1 = fpartre($1)
}
proc setparquattro() {
  Exp2Syn[0].tau2 = fparquattro($1)
}
proc setparcinque() {
  is[0].tau1 = fparcinque($1)
}
proc setparsei() {
  is[0].tau2 = fparsei($1)
}
proc setparsette() {
  somaI.gna_pnas = fparsette($1)
}
proc setparotto() {
  somaI.gkdr_pkdrs = fparotto($1)
}
proc setparnove() {
  somaI.gk_leak = fparnove($1)
}
proc setpardieci() {
  somaI.gna_leak = fpardieci($1)
}
proc setparundici() {
  somaI.gfix_leak = fparundici($1)
}
proc setpardodici() {
  somaE.gcl_clleak = fpardodici($1)
}
proc setpartredici() {
  somaE.gfix_clleak = fpartredici($1)
}
proc setparquattordici() {
  somaE.tau_pump_pumpcltau = fparquattordici($1)
}
proc setparquindici() {
  somaI.tau_pump_pumpnatau2 = fparquindici($1)
}
proc setparsedici() {
  somaE.ipumpmax_pumpcltau = fparsedici($1)
}
proc setpardiciasette() {
  somaI.ipumpmax_pumpnatau2 = fpardiciasette($1)
}

// data recording and analysis fro somaE and somaI
objref ncE, spvecE, nilE // to record spike times
somaE ncE = new NetCon(&somaE.v(0.5), nilE)
ncE.threshold = -10 // mV
spvecE = new Vector()
{ ncE.record(spvecE) }

objref ncI, spvecI, nilI // to record spike times
// count only those spikes that get to distal end of dend
somaI ncI = new NetCon(&somaI.v(0.5), nilI)
ncI.threshold = -10 // mV
spvecI = new Vector()
{ ncI.record(spvecI) }

NSE=0
NSI=0
proc postproc() { local nspikesE,nspikesI
  NSE=0
  NSI=0
  nspikesE = spvecE.size()
  nspikesI = spvecI.size()
  if (nspikesE == 1) {
    NSE=nspikesE
  }
  if (nspikesE > 1) {
    NSE=2
  }
  if (nspikesI == 1) {
    NSI=nspikesI
  }
  if (nspikesI > 1) {
    NSI=2
  }
}

///// Simulation control
tstop = TSTOP

func fi() { // set params, execute a simulation, analyze and return results
  setparamsuno($1) // set parameters for this run
  setparamsdue($2)
  setpartre($3) 
  setparquattro($4)
  setparcinque($5)
  setparsei($6) 
  setparsette($7)
  setparotto($8) 
  setparnove($9)
  setpardieci($10) 
  setparundici($11) 
  setpardodici($12)
  setpartredici($13) 
  setparquattordici($14) 
  setparquindici($15) 
  //setparsedici($2) 
  //setpardiciasette($3) 
  run()
  postproc() // analyze the data
  return NSI
}

// batch control
trun = startsw() 

{ pc.runworker() } 

objref svec, NSvec
proc batchrun() { local ii, jj, k, l, m, n, p, q, r, s, t, u, h, z, a,  tmp 
  svec = new Vector()
  NSvec = new Vector()
  for ii = 0, 1 { 
    for jj= 0, 1 {
        for k = 0, 1 {
          for l = 0, 1 {
            for m = 0,1 {           
             for n = 0,1 {             
              for p = 0,1 {
                for q = 0,1 {
                  for r = 0,1 {
                    for s = 0,1 {
                      for t = 0,1 {
                        for u = 0,1 {
                          for h = 0,1 {
                            for z = 0,1 {
                             for a = 0,1 {
                      //         for b = 0,1 {
                      //            for c = 0,1 {
          pc.submit("fi", ii, jj, k, l, m, n, p, q, r, s, t, u, h, z, a) // post all jobs to bulletin board  
    }}}}}}}}}}}}}}}
  // retrieve results from bulletin board
  while (pc.working) { // if a result is ready, get it; if not, pick a job and do it
    NSvec.append(pc.retval()) // get frequency
    pc.unpack(&tmp)
    svec.append(tmp) // get job number
    printf(".") // indicate progress
  }
}

batchrun()
{ pc.done() }

///// Reporting of results
NSvec = NSvec.index(svec.sortindex()) // rearrange fvec according to svec sortindex
{
svec.sort()
}


proc saveresults() { local ii  localobj file
  file = new File($s1)
  file.wopen()
  file.printf("label:%s\n%d\n", "f", NSvec.size)
  for ii=0, NSvec.size-1 {
    file.printf("%g\t%g\n", svec.x[ii], NSvec.x[ii])
  }
  file.close()
}

saveresults("fi.dat")

print " "
print startsw() - trun, "seconds" // report run time

//quit()

ted
Site Admin
Posts: 5169
Joined: Wed May 18, 2005 4:50 pm
Location: Yale University School of Medicine
Contact:

Re: parallelize nested loop

Post by ted » Tue Mar 27, 2018 10:46 am

The first step is to determine where the failure occurred. hoc does not have a built-in method for tracing program execution, so you have to work this out for yourself. The easiest way is to insert one or more print statements into your program. Example: suppose you change

Code: Select all

maxnap.set(2,10)

func fparuno() {
  return wE.x[$1]
}
to

Code: Select all

maxnap.set(2,10)
print "here"

func fparuno() {
  return wE.x[$1]
}
and then run your program. If the program's output is

Code: Select all

here
0 unpack size=320 upkpos=70 type[0]=1   datatype=0  type[1]=1  count=1
Assertion failed: file bbsmpipack.c, line 64
. . .
then you know that the problem happens AFTER maxnap.set(2,10) is executed.

menica
Posts: 67
Joined: Wed Sep 02, 2015 11:02 am

Re: parallelize nested loop

Post by menica » Wed Mar 28, 2018 7:10 am

Dear Ted,
thanks for your reply.
I tested the code with a printing command at different points of the code.
the only thing I can conclude is that maybe the submit command has a maximum number of arguments that can accept as input.

Post Reply