Add weights_path command-line argument to msd executable to indicate
+where to store final weights.
+
Add MSDBlock2d: this is a faster and slightly more memory efficient
+implementation of the same MSD component. Many thanks to Jonas
+Adler for suggesting this way of structuring the code!
[docs]deftrain(model,epochs,train_dl,val_dl,weights_path):
+
+ weights_path=Path(weights_path).expanduser().resolve()
+ ifweights_path.exists():
+ logging.warning(f"Overwriting weights file {weights_path}")
+
# The network works best if the input data has mean zero and has a# standard deviation of 1. To achieve this, we get a rough estimate of# correction parameters from the training data. These parameters are
@@ -307,16 +317,16 @@
Source code for msd_pytorch.main
# Save network if worthwileifvalidation_error<best_validation_errororval_dlisNone:best_validation_error=validation_error
- model.save(f"msd_network_epoch_{epoch}.torch",epoch)
- ex.add_artifact(f"msd_network_epoch_{epoch}.torch")
+ model.save(f"{weights_path}_epoch_{epoch}.torch",epoch)
+ ex.add_artifact(f"{weights_path}_epoch_{epoch}.torch")end=timer()ex.log_scalar("Iteration time",end-start)logging.info(f"{epoch:05} Iteration time: {end-start: 0.6f}")# Always save final network parameters
- model.save(f"msd_network_epoch_{epoch}.torch",epoch)
- ex.add_artifact(f"msd_network_epoch_{epoch}.torch")
[docs]defforward(self,input):
+ # This is a bit of a hack, since we require but cannot assume
+ # that self.parameters() remains sorted in the order that we
+ # added the parameters.
+ #
+ # However, we need to obtain weights in this way, because
+ # self.weights may become obsolete when used in multi-gpu
+ # settings when the weights are automatically transferred (by,
+ # e.g., torch.nn.DataParallel). In that case, self.weights may
+ # continue to point to the weight parameters on the original
+ # device, even when the weight parameters have been
+ # transferred to a different gpu.
+ bias,*weights=self.parameters()
+ returnMSDBlockImpl2d.apply(input,self.dilations,bias,*weights)
:rtype: """
- # The number of paramters in the convolution depends on whether
+ # The number of parameters in the convolution depends on whether# the convolution is 2D or 3D. We multiply all non-channel# dimensions of the weight here to get the right answer.num_params=np.product(conv_weight.shape[2:])
@@ -285,8 +284,7 @@
stitch_layer=StitchCopyModule(buffer,0)# Then we have `depth` number of hidden layers:
- hidden_layers=[
+ self.hidden_layers=[MSDLayerModule(buffer,c_in,d,width,dilations[d%len(dilations)])fordinrange(depth)]# Initialize weights for hidden layers:
- forminhidden_layers:
+ forminself.hidden_layers:init_convolution_weights(m.convolution.weight.data,c_in,c_out,width,depth)m.convolution.bias.data.zero_()in_front=units_in_front(c_in,width,depth)
- c_final=MSDFinalLayer(in_front,c_out)
+ self.c_final=MSDFinalLayer(in_front,c_out)
- self.net=nn.Sequential(stitch_layer,*hidden_layers,c_final)
+ self.net=nn.Sequential(stitch_layer,*self.hidden_layers,self.c_final)self.net.cuda()
*,dilations=[1,2,3,4,5,6,7,8,9,10],loss="L2",
+ parallel=False,):"""Create a new MSD network for regression.
@@ -195,6 +196,12 @@
Source code for msd_pytorch.msd_regression_model
* "L1" - ``nn.L1Loss()`` * "L2" - ``nn.MSELoss()``
+ :param parallel: `bool`
+
+ Whether or not to execute the model on multiple GPUs. Note
+ that the batch size must be a multiple of the number of
+ available GPUs.
+
:returns: :rtype:
@@ -210,6 +217,8 @@
Source code for msd_pytorch.msd_regression_model
# Define the whole network:self.net=nn.Sequential(self.scale_in,self.msd,self.scale_out)self.net.cuda()
+ ifparallel:
+ self.net=nn.DataParallel(self.net)# Train only MSD parameters:self.init_optimizer(self.msd)
Source code for msd_pytorch.msd_segmentation_model
width,*,dilations=[1,2,3,4,5,6,7,8,9,10],
+ parallel=False,):"""Create a new MSD network for segmentation.
@@ -185,6 +186,12 @@
Source code for msd_pytorch.msd_segmentation_model
good alternative is ``[1, 2, 4, 8]``. The dilations are
repeated when there are more layers than supplied dilations.
+ :param parallel: `bool`
+
+ Whether or not to execute the model on multiple GPUs. Note
+ that the batch size must be a multiple of the number of
+ available GPUs.
+
:returns: :rtype:
@@ -200,6 +207,9 @@
Source code for msd_pytorch.msd_segmentation_model
self.net=nn.Sequential(self.scale_in,net_trained)self.net.cuda()
+ ifparallel:
+ self.net=nn.DataParallel(self.net)
+
# Train all parameters apart from self.scale_in.self.init_optimizer(net_trained)
diff --git a/docs/_sources/CHANGELOG.md.txt b/docs/_sources/CHANGELOG.md.txt
index 099818c..2d9091b 100644
--- a/docs/_sources/CHANGELOG.md.txt
+++ b/docs/_sources/CHANGELOG.md.txt
@@ -6,12 +6,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Added
--
+- Support for multi-gpu execution. Use `parallel=True` when
+ constructing a `MSDRegressionModel` or `MSDSegmentationModel`.
### Fixed
--
+- Make `model.forward()` more memory-efficient.
+### Removed
+
+## [0.7.1] - 2019-05-27
+### Added
+- Add `weights_path` command-line argument to msd executable to indicate
+ where to store final weights.
+- Add `MSDBlock2d`: this is a faster and slightly more memory efficient
+ implementation of the same MSD component. Many thanks to Jonas
+ Adler for suggesting this way of structuring the code!
+### Changed
+- The MSD models use `MSDBlock2d` implementation by default now.
## 0.6.2 - 2019-05-23
### Added
- Initial release.
[Unreleased]: https://www.github.com/ahendriksen/msd_pytorch/compare/v0.6.2...master
+[0.7.1]: https://www.github.com/ahendriksen/msd_pytorch/compare/v0.6.2...v0.7.1
diff --git a/docs/_sources/README.md.txt b/docs/_sources/README.md.txt
index e08995d..1a2f790 100644
--- a/docs/_sources/README.md.txt
+++ b/docs/_sources/README.md.txt
@@ -127,6 +127,7 @@ examples folder.
## Authors and contributors
* **Allard Hendriksen** - *Initial work*
+* **Jonas Adler** - *Discussions and code*
See also the list of [contributors](https://github.com/ahendriksen/msd_pytorch/contributors) who participated in this project.
diff --git a/docs/_sources/msd_pytorch.rst.txt b/docs/_sources/msd_pytorch.rst.txt
index 90a99ca..e955df8 100644
--- a/docs/_sources/msd_pytorch.rst.txt
+++ b/docs/_sources/msd_pytorch.rst.txt
@@ -52,6 +52,14 @@ msd\_pytorch.main module
:undoc-members:
:show-inheritance:
+msd\_pytorch.msd\_block module
+------------------------------
+
+.. automodule:: msd_pytorch.msd_block
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
msd\_pytorch.msd\_model module
------------------------------
diff --git a/docs/genindex.html b/docs/genindex.html
index 3d020b1..ebb43cf 100644
--- a/docs/genindex.html
+++ b/docs/genindex.html
@@ -185,6 +185,10 @@
Although the recipe for forward pass needs to be defined within
+this function, one should call the Module instance afterwards
+instead of this since the former takes care of running the
+registered hooks while the latter silently ignores them.
Defines a formula for differentiating the operation.
+
This function is to be overridden by all subclasses.
+
It must accept a context ctx as the first argument, followed by
+as many outputs did forward() return, and it should return as many
+tensors, as there were inputs to forward(). Each argument is the
+gradient w.r.t the given output, and each returned value should be the
+gradient w.r.t. the corresponding input.
+
The context can be used to retrieve tensors saved during the forward
+pass. It also has an attribute ctx.needs_input_grad as a tuple
+of booleans representing whether each input needs gradient. E.g.,
+backward() will have ctx.needs_input_grad[0]=True if the
+first input to forward() needs gradient computated w.r.t. the
+output.
Although the recipe for forward pass needs to be defined within
+this function, one should call the Module instance afterwards
+instead of this since the former takes care of running the
+registered hooks while the latter silently ignores them.
diff --git a/docs/searchindex.js b/docs/searchindex.js
index 2955e92..c1e2155 100644
--- a/docs/searchindex.js
+++ b/docs/searchindex.js
@@ -1 +1 @@
-Search.setIndex({docnames:["CHANGELOG","LICENSE","README","examples","index","modules","msd_pytorch"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["CHANGELOG.md","LICENSE.md","README.md","examples.rst","index.rst","modules.rst","msd_pytorch.rst"],objects:{"":{msd_pytorch:[6,0,0,"-"]},"msd_pytorch.bench":{TimeitResult:[6,1,1,""],bench:[6,4,1,""]},"msd_pytorch.bench.TimeitResult":{__init__:[6,2,1,""],average:[6,3,1,""],stdev:[6,3,1,""]},"msd_pytorch.conv":{Conv2dInPlaceFunction:[6,1,1,""],Conv2dInPlaceModule:[6,1,1,""],conv2dInPlace:[6,4,1,""]},"msd_pytorch.conv.Conv2dInPlaceFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.conv.Conv2dInPlaceModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.conv_relu":{ConvRelu2dInPlaceFunction:[6,1,1,""],ConvRelu2dInPlaceModule:[6,1,1,""],conv_relu2dInPlace:[6,4,1,""]},"msd_pytorch.conv_relu.ConvRelu2dInPlaceFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.conv_relu.ConvRelu2dInPlaceModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.errors":{Error:[6,6,1,""],InputError:[6,6,1,""]},"msd_pytorch.errors.InputError":{__init__:[6,2,1,""]},"msd_pytorch.image_dataset":{ImageDataset:[6,1,1,""],ImageStack:[6,1,1,""]},"msd_pytorch.image_dataset.ImageDataset":{__init__:[6,2,1,""],num_labels:[6,3,1,""]},"msd_pytorch.image_dataset.ImageStack":{__init__:[6,2,1,""],find_images:[6,2,1,""],num_labels:[6,3,1,""]},"msd_pytorch.main":{benchmark:[6,4,1,""],experiment_main:[6,4,1,""],main_function:[6,4,1,""],regression:[6,4,1,""],segmentation:[6,4,1,""],train:[6,4,1,""]},"msd_pytorch.msd_model":{MSDModel:[6,1,1,""],scaling_module:[6,4,1,""]},"msd_pytorch.msd_model.MSDModel":{__init__:[6,2,1,""],forward:[6,2,1,""],get_loss:[6,2,1,""],get_output:[6,2,1,""],init_optimizer:[6,2,1,""],learn:[6,2,1,""],load:[6,2,1,""],print:[6,2,1,""],save:[6,2,1,""],set_input:[6,2,1,""],set_normalization:[6,2,1,""],set_target:[6,2,1,""],train:[6,2,1,""],validate:[6,2,1,""]},"msd_pytorch.msd_module":{MSDFinalLayer:[6,1,1,""],MSDLayerModule:[6,1,1,""],MSDModule:[6,1,1,""],init_convolution_weights:[6,4,1,""],stitchLazy:[6,4,1,""],units_in_front:[6,4,1,""]},"msd_pytorch.msd_module.MSDFinalLayer":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.msd_module.MSDLayerModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.msd_module.MSDModule":{__init__:[6,2,1,""],forward:[6,2,1,""],init_buffers:[6,2,1,""]},"msd_pytorch.msd_regression_model":{MSDRegressionModel:[6,1,1,""]},"msd_pytorch.msd_regression_model.MSDRegressionModel":{__init__:[6,2,1,""]},"msd_pytorch.msd_segmentation_model":{MSDSegmentationModel:[6,1,1,""]},"msd_pytorch.msd_segmentation_model.MSDSegmentationModel":{__init__:[6,2,1,""],set_normalization:[6,2,1,""],set_target:[6,2,1,""]},"msd_pytorch.relu_inplace":{ReLUInplaceFunction:[6,1,1,""],ReLUInplaceModule:[6,1,1,""]},"msd_pytorch.relu_inplace.ReLUInplaceFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.relu_inplace.ReLUInplaceModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.stitch":{StitchBuffer:[6,1,1,""],StitchCopyFunction:[6,1,1,""],StitchCopyModule:[6,1,1,""],StitchLazyFunction:[6,1,1,""],StitchLazyModule:[6,1,1,""],StitchSlowFunction:[6,1,1,""],stitchCopy:[6,4,1,""],stitchLazy:[6,4,1,""],stitchSlow:[6,4,1,""]},"msd_pytorch.stitch.StitchBuffer":{__init__:[6,2,1,""],like_:[6,2,1,""],zero_:[6,2,1,""]},"msd_pytorch.stitch.StitchCopyFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.stitch.StitchCopyModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.stitch.StitchLazyFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.stitch.StitchLazyModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.stitch.StitchSlowFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},msd_pytorch:{bench:[6,0,0,"-"],conv:[6,0,0,"-"],conv_relu:[6,0,0,"-"],errors:[6,0,0,"-"],image_dataset:[6,0,0,"-"],main:[6,0,0,"-"],msd_model:[6,0,0,"-"],msd_module:[6,0,0,"-"],msd_regression_model:[6,0,0,"-"],msd_segmentation_model:[6,0,0,"-"],relu_inplace:[6,0,0,"-"],stitch:[6,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["py","staticmethod","Python static method"],"6":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"py:staticmethod","6":"py:exception"},terms:{"1x1":6,"boolean":6,"case":6,"class":6,"default":[3,6],"final":[3,6],"float":6,"function":[2,3,6],"import":3,"int":6,"new":6,"public":[1,2],"return":6,"short":3,"static":6,"true":[3,6],"while":6,But:1,FOR:1,For:[1,3,6],Its:6,The:[0,1,2,3,6],These:[3,6],Use:6,__init__:6,aahendriksen:2,about:[2,6],academi:6,accept:6,access:6,accomplish:6,accumul:6,accur:6,achiev:3,add:[1,6],addit:6,adher:0,after:[3,6],afterward:6,again:3,against:2,ahendriksen:2,all:[0,2,6],all_run:6,allard:[1,2],along:1,alreadi:6,also:[1,2,6],altern:[2,6],although:6,alwai:2,anaconda:2,analysi:6,ani:[1,2,6],appli:1,applic:1,argument:[2,6],associ:6,assum:6,attribut:6,author:4,autograd:6,automat:2,avail:2,averag:6,backward:6,base:[0,6],batch:3,batch_siz:[3,6],been:6,befor:6,below:3,bench:5,benchmark:6,best:[3,6],best_validation_error:3,between:3,bia:6,bool:6,both:6,branch:2,brief:2,buffer:6,bxcxhxw:6,c_in:[2,3,6],c_out:[2,3,6],calcul:6,call:6,can:[1,2,3,6],card:2,care:6,cat:[2,6],chang:[0,6],changelog:4,channel:[2,3,6],check:2,clone:2,code:[2,3],collapse_channel:6,collect:6,color:6,com:2,come:6,command:2,compat:2,compil:2,comput:[3,6],concaten:6,consid:1,contact:1,contain:[2,6],content:[4,5],context:6,contribut:4,contributor:4,conv2dinplac:6,conv2dinplacefunct:6,conv2dinplacemodul:6,conv3d:6,conv:5,conv_cuda:6,conv_relu2dinplac:6,conv_relu:5,conv_weight:6,convert:6,convnd:6,convolut:6,convrelu2dinplacefunct:6,convrelu2dinplacemodul:6,copi:[1,6],copyright:1,correct:3,correspond:6,count:6,cours:6,cpp:2,cpu:6,creat:[3,6],ctx:6,cuda:[2,6],cudatoolkit:2,current:6,cxhxw:6,daniel:6,data:[2,3,6],dataload:[3,6],dataset:[3,6],defin:6,dens:[1,3,6],depend:6,depth:[2,3,6],descent:6,describ:[2,6],descript:2,detail:[1,2],dev:2,deviat:[3,6],devic:6,did:6,differenti:6,dilat:[2,3,6],dimens:6,dimension:6,direct:2,directli:6,directori:[2,6],disclaim:1,disk:[3,6],distribut:[1,6],divid:6,document:[0,2,6],doe:[1,6],doi:6,doing:[2,3,6],done:[3,6],dure:6,each:6,easier:2,effici:6,either:[1,2,3,6],electron:1,els:3,employ:1,empti:3,epoch:[2,3,6],error:[3,5],estim:3,even:1,everi:6,exampl:[4,6],except:6,execut:6,expand:[2,6],expandus:[2,6],expect:[2,3],experiment_main:6,explan:6,explicitli:3,factor:6,fals:[3,6],fast:6,featur:2,few:2,file:[0,2,6],filesystem:6,filetyp:6,find_imag:6,first:[1,6],fit:1,float32:6,folder:2,follow:[1,2,3,6],forg:2,format:[0,6],former:6,formula:6,forward:6,foundat:1,free:[1,2],from:[3,6],front:6,futur:6,gener:[1,2],get:[1,3,4,6],get_loss:6,get_output:6,git:2,github:2,give:3,given:6,glob:[2,3,6],gnu:[1,2],goe:6,good:[3,6],gpl:1,grad:6,grad_output:6,gradcheck:6,gradient:6,grai:6,graphic:2,gxx:2,handl:6,has:[3,6],have:[1,2,6],height:6,help:6,helper:6,henc:6,hendriksen:[1,2],hidden:6,high:2,hold:6,home:[2,6],hook:6,hope:1,how:[1,3,4,6],html:1,http:[1,2,6],ignor:6,imag:[2,6],image_dataset:5,imagedataset:[3,6],imageio:6,imagestack:6,img:3,implement:[1,2,6],impli:1,improv:6,in_channel:6,includ:[2,6],incorpor:1,index:[4,6],indic:[3,6],ineffici:6,inf:3,info:6,inform:[1,6],init_buff:6,init_convolution_weight:6,init_optim:6,initi:[0,2,6],input1:6,input2:6,input:[2,3,6],input_path_specifi:6,input_s:6,inputerror:6,instanc:[3,6],instead:[1,6],integ:[3,6],integr:6,intend:3,intermedi:6,introduct:3,invoc:6,issu:2,its:2,jpeg:[2,6],june:1,just_one_imag:[2,6],keep:0,kernel:6,kernel_s:6,l1loss:6,label:[2,3,6],last:6,later:1,latter:6,layer:6,layer_depth:6,lazi:6,learn:[2,6],least:6,left:3,lesser:1,level:[2,6],lgpl:1,librari:1,licens:[1,4],life:2,like_:6,limit:2,line:2,link:1,linux:2,list:[2,3,6],load:[3,6],loop:6,loss:[2,3,6],lost:[3,6],machin:2,magic:6,mai:[1,2,6],mail:1,main:5,main_funct:6,make:[2,6],manag:[2,6],mani:6,master:2,match:[2,6],mean:[3,6],measur:6,mechan:6,merchant:1,messag:6,method:6,mini:3,minibatch:6,mix:[1,3,6],mltestdata:3,model:[3,6],modifi:[1,6],modul:[4,5],more:[1,2,6],moreov:2,msd:[2,3,6],msd_model:5,msd_modul:5,msd_network_epoch_:3,msd_pytorch:[2,3,4],msd_regression_model:5,msd_segmentation_model:5,msdfinallay:6,msdlayermodul:6,msdmodel:6,msdmodul:6,msdregressionmodel:[3,6],msdsegmentationmodel:[3,6],mseloss:6,multipl:6,must:6,naiv:6,name:6,nation:6,nativ:2,natur:6,necessari:1,need:[2,6],needs_input_grad:6,network:[1,3,6],neural:6,new_shap:6,newli:6,noisi:3,none:[3,6],nonetyp:6,normal:[3,6],notabl:0,noth:6,num_epoch:6,num_label:[3,6],number:[2,3,6],numpi:3,nvcc:2,object:6,one:[2,6],onli:6,open:2,oper:6,option:[1,2,6],org:[1,6],other:[2,6],our:2,out:2,out_channel:6,output:[2,3,6],overridden:6,overview:2,packag:[2,3,4,5],page:[3,4],pair:6,paper:1,paragraph:2,paramet:[3,6],part:6,particip:2,particular:1,pass:6,path:[2,6],path_specifi:6,pattern:[2,3,6],pelt:6,per:6,perform:[3,6],permit:1,philosophi:1,pip:2,pixel:6,pleas:[1,2],pna:6,png:[2,3,6],point:[2,6],possibl:[2,6],practic:6,precis:6,prefer:6,previou:6,primari:6,print:[3,6],proceed:6,program:1,programm:1,project:[0,2],properti:6,proprietari:1,provid:6,publish:1,pull:2,purpos:1,python:2,pytorch:[1,3,6],question:2,rais:6,rang:3,read:1,receiv:1,recip:6,recommend:[2,3],redistribut:1,refer:6,regist:6,regress:[2,3,6],releas:0,reload:[3,6],relu_inplac:5,reluinplacefunct:6,reluinplacemodul:6,remark:2,renorm:6,repeat:6,repeatedli:6,repres:6,request:2,resolv:[2,6],respect:6,respons:6,retain:6,retriev:6,reus:6,rough:3,run:6,runtimeerror:6,same:6,save:[3,6],scale:[1,3,6],scaling_modul:6,scan10:6,scan1:6,scan:[2,6],scheme:[2,3],school:1,scienc:6,score:6,search:4,see:[1,2,6],segment:[2,3,6],self:6,semant:0,set:[2,3,6],set_input:6,set_norm:[3,6],set_target:6,sethian:6,setup:2,shape:6,share:6,ship:2,should:[1,2,6],shuffl:3,sign:1,signatur:6,silent:6,similar:6,similarli:2,simpl:3,simpli:2,sinc:6,singl:[2,6],size:[3,6],slow:6,softwar:[1,2],solver:6,some:[2,6],sometim:6,sort:6,sourc:6,sparingli:6,specif:[2,6],specifi:[2,3,6],stabl:6,stack:6,standard:[2,3,6],start:[3,4],std:6,stdev:6,step:[2,3,6],stitch:5,stitchbuff:6,stitchcopi:6,stitchcopyfunct:6,stitchcopymodul:6,stitchlazi:6,stitchlazyfunct:6,stitchlazymodul:6,stitchslow:6,stitchslowfunct:6,store:[3,6],stride:6,string:6,subclass:6,submit:2,submodul:5,subroutin:1,suppli:6,support:6,suppos:6,symlink:[2,6],take:[2,6],taken:6,target:[2,3,6],target_path_specifi:6,task:3,tensor:6,term:1,termin:2,test:6,than:6,thei:[3,6],them:6,therefor:6,thi:[0,1,2,3,6],thread:6,three:6,through:6,tif:[2,3,6],tiff:[3,6],tild:[2,6],time:6,timeit:6,timeitresult:6,timer:6,too:3,toolkit:2,top:6,torch:[3,6],torchvis:2,train:[2,3,6],train_d:3,train_dl:[3,6],train_error:3,train_imag:[2,6],train_input_glob:[2,3,6],train_target_glob:[2,3,6],trainable_net:6,transpar:6,trigger:2,tupl:6,two:6,type:6,under:[1,2],unit:6,units_in_front:6,unreleas:4,updat:[3,6],use:[1,2,3,6],used:[2,3,6],useful:[1,6],using:[2,3,6],util:[3,6],val_d:3,val_dl:[3,6],val_input_glob:[2,3,6],val_target_glob:[2,3,6],valid:[2,3,6],validation_error:3,valu:[3,6],version:[0,1,2],wai:[2,6],want:[1,6],warranti:1,weight:6,welcom:2,were:6,what:1,when:[3,6],where:6,whether:6,which:[2,3,6],who:2,why:1,width:[2,3,6],within:6,without:1,work:[1,2,3],worst:6,worthwil:3,write:6,www:1,x64:2,you:[1,2,3,6],your:[1,2],zero:[3,6],zero_:6},titles:["Changelog","<no title>","Mixed-scale Dense Networks for PyTorch","Examples","Welcome to the documentation of Mixed-scale Dense Networks for PyTorch!","msd_pytorch","msd_pytorch package"],titleterms:{Added:0,Using:2,author:2,bench:6,changelog:0,conda:2,content:6,contribut:2,contributor:2,conv:6,conv_relu:6,dens:[2,4],document:4,error:6,exampl:[2,3],fix:0,from:2,get:2,how:2,image_dataset:6,indic:4,instal:2,licens:2,main:6,mix:[2,4],modul:6,msd_model:6,msd_modul:6,msd_pytorch:[5,6],msd_regression_model:6,msd_segmentation_model:6,network:[2,4],packag:6,pytorch:[2,4],relu_inplac:6,requir:2,run:2,scale:[2,4],sourc:2,start:2,stitch:6,submodul:6,tabl:4,tool:2,unreleas:0,welcom:4}})
\ No newline at end of file
+Search.setIndex({docnames:["CHANGELOG","LICENSE","README","examples","index","modules","msd_pytorch"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.todo":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["CHANGELOG.md","LICENSE.md","README.md","examples.rst","index.rst","modules.rst","msd_pytorch.rst"],objects:{"":{msd_pytorch:[6,0,0,"-"]},"msd_pytorch.bench":{TimeitResult:[6,1,1,""],bench:[6,4,1,""]},"msd_pytorch.bench.TimeitResult":{__init__:[6,2,1,""],average:[6,3,1,""],stdev:[6,3,1,""]},"msd_pytorch.conv":{Conv2dInPlaceFunction:[6,1,1,""],Conv2dInPlaceModule:[6,1,1,""],conv2dInPlace:[6,4,1,""]},"msd_pytorch.conv.Conv2dInPlaceFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.conv.Conv2dInPlaceModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.conv_relu":{ConvRelu2dInPlaceFunction:[6,1,1,""],ConvRelu2dInPlaceModule:[6,1,1,""],conv_relu2dInPlace:[6,4,1,""]},"msd_pytorch.conv_relu.ConvRelu2dInPlaceFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.conv_relu.ConvRelu2dInPlaceModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.errors":{Error:[6,6,1,""],InputError:[6,6,1,""]},"msd_pytorch.errors.InputError":{__init__:[6,2,1,""]},"msd_pytorch.image_dataset":{ImageDataset:[6,1,1,""],ImageStack:[6,1,1,""]},"msd_pytorch.image_dataset.ImageDataset":{__init__:[6,2,1,""],num_labels:[6,3,1,""]},"msd_pytorch.image_dataset.ImageStack":{__init__:[6,2,1,""],find_images:[6,2,1,""],num_labels:[6,3,1,""]},"msd_pytorch.main":{benchmark:[6,4,1,""],experiment_main:[6,4,1,""],main_function:[6,4,1,""],regression:[6,4,1,""],segmentation:[6,4,1,""],train:[6,4,1,""]},"msd_pytorch.msd_block":{MSDBlock2d:[6,1,1,""],MSDBlockImpl2d:[6,1,1,""],MSDModule2d:[6,1,1,""],msdblock2d:[6,4,1,""]},"msd_pytorch.msd_block.MSDBlock2d":{__init__:[6,2,1,""],forward:[6,2,1,""],reset_parameters:[6,2,1,""]},"msd_pytorch.msd_block.MSDBlockImpl2d":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.msd_block.MSDModule2d":{__init__:[6,2,1,""],forward:[6,2,1,""],reset_parameters:[6,2,1,""]},"msd_pytorch.msd_model":{MSDModel:[6,1,1,""],scaling_module:[6,4,1,""]},"msd_pytorch.msd_model.MSDModel":{__init__:[6,2,1,""],forward:[6,2,1,""],get_loss:[6,2,1,""],get_output:[6,2,1,""],init_optimizer:[6,2,1,""],learn:[6,2,1,""],load:[6,2,1,""],print:[6,2,1,""],save:[6,2,1,""],set_input:[6,2,1,""],set_normalization:[6,2,1,""],set_target:[6,2,1,""],train:[6,2,1,""],validate:[6,2,1,""]},"msd_pytorch.msd_module":{MSDFinalLayer:[6,1,1,""],MSDLayerModule:[6,1,1,""],MSDModule:[6,1,1,""],init_convolution_weights:[6,4,1,""],stitchLazy:[6,4,1,""],units_in_front:[6,4,1,""]},"msd_pytorch.msd_module.MSDFinalLayer":{__init__:[6,2,1,""],forward:[6,2,1,""],reset_parameters:[6,2,1,""]},"msd_pytorch.msd_module.MSDLayerModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.msd_module.MSDModule":{__init__:[6,2,1,""],forward:[6,2,1,""],init_buffers:[6,2,1,""]},"msd_pytorch.msd_regression_model":{MSDRegressionModel:[6,1,1,""]},"msd_pytorch.msd_regression_model.MSDRegressionModel":{__init__:[6,2,1,""]},"msd_pytorch.msd_segmentation_model":{MSDSegmentationModel:[6,1,1,""]},"msd_pytorch.msd_segmentation_model.MSDSegmentationModel":{__init__:[6,2,1,""],set_normalization:[6,2,1,""],set_target:[6,2,1,""]},"msd_pytorch.relu_inplace":{ReLUInplaceFunction:[6,1,1,""],ReLUInplaceModule:[6,1,1,""]},"msd_pytorch.relu_inplace.ReLUInplaceFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.relu_inplace.ReLUInplaceModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.stitch":{StitchBuffer:[6,1,1,""],StitchCopyFunction:[6,1,1,""],StitchCopyModule:[6,1,1,""],StitchLazyFunction:[6,1,1,""],StitchLazyModule:[6,1,1,""],StitchSlowFunction:[6,1,1,""],stitchCopy:[6,4,1,""],stitchLazy:[6,4,1,""],stitchSlow:[6,4,1,""]},"msd_pytorch.stitch.StitchBuffer":{__init__:[6,2,1,""],like_:[6,2,1,""],zero_:[6,2,1,""]},"msd_pytorch.stitch.StitchCopyFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.stitch.StitchCopyModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.stitch.StitchLazyFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},"msd_pytorch.stitch.StitchLazyModule":{__init__:[6,2,1,""],forward:[6,2,1,""]},"msd_pytorch.stitch.StitchSlowFunction":{backward:[6,5,1,""],forward:[6,5,1,""]},msd_pytorch:{bench:[6,0,0,"-"],conv:[6,0,0,"-"],conv_relu:[6,0,0,"-"],errors:[6,0,0,"-"],image_dataset:[6,0,0,"-"],main:[6,0,0,"-"],msd_block:[6,0,0,"-"],msd_model:[6,0,0,"-"],msd_module:[6,0,0,"-"],msd_regression_model:[6,0,0,"-"],msd_segmentation_model:[6,0,0,"-"],relu_inplace:[6,0,0,"-"],stitch:[6,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","attribute","Python attribute"],"4":["py","function","Python function"],"5":["py","staticmethod","Python static method"],"6":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:attribute","4":"py:function","5":"py:staticmethod","6":"py:exception"},terms:{"1x1":6,"boolean":6,"case":6,"class":6,"default":[0,3,6],"final":[0,3,6],"float":6,"function":[2,3,6],"import":3,"int":6,"new":6,"public":[1,2],"return":6,"short":3,"static":6,"true":[0,3,6],"while":6,But:1,FOR:1,For:[1,3,6],Its:6,The:[0,1,2,3,6],These:[3,6],Use:[0,6],__init__:6,aahendriksen:2,about:[2,6],academi:6,accept:6,access:6,accomplish:6,accumul:6,accur:6,achiev:3,add:[0,1,6],addit:6,adher:0,adler:[0,2],after:[3,6],afterward:6,again:3,against:2,ahendriksen:2,all:[0,2,6],all_run:6,allard:[1,2],along:1,alreadi:6,also:[1,2,6],altern:[2,6],although:6,alwai:2,anaconda:2,analysi:6,ani:[1,2,6],appli:1,applic:1,argument:[0,2,6],associ:6,assum:6,attribut:6,author:4,autograd:6,automat:2,avail:[2,6],averag:6,backward:6,base:[0,6],batch:[3,6],batch_siz:[3,6],been:6,befor:6,below:3,bench:5,benchmark:6,best:[3,6],best_validation_error:3,between:3,bia:6,block:6,bool:6,both:6,branch:2,brief:2,buffer:6,bxcxhxw:6,c_in:[2,3,6],c_out:[2,3,6],calcul:6,call:6,can:[1,2,3,6],card:2,care:6,cat:[2,6],chang:6,changelog:4,channel:[2,3,6],check:2,clone:2,code:[0,2,3],collapse_channel:6,collect:6,color:6,com:2,come:6,command:[0,2],compat:2,compil:2,compon:0,comput:[3,6],concaten:6,consid:1,construct:0,contact:1,contain:[2,6],content:[4,5],context:6,contribut:4,contributor:4,conv2dinplac:6,conv2dinplacefunct:6,conv2dinplacemodul:6,conv3d:6,conv:5,conv_cuda:6,conv_relu2dinplac:6,conv_relu:5,conv_weight:6,convert:6,convnd:6,convolut:6,convrelu2dinplacefunct:6,convrelu2dinplacemodul:6,copi:[1,6],copyright:1,correct:3,correspond:6,count:6,cours:6,cpp:2,cpu:6,creat:[3,6],ctx:6,cuda:[2,6],cudatoolkit:2,current:6,cxhxw:6,daniel:6,data:[2,3,6],dataload:[3,6],dataset:[3,6],defin:6,dens:[1,3,6],depend:6,depth:[2,3,6],descent:6,describ:[2,6],descript:2,detail:[1,2],dev:2,deviat:[3,6],devic:6,did:6,differenti:6,dilat:[2,3,6],dimens:6,dimension:6,direct:2,directli:6,directori:[2,6],disclaim:1,discuss:2,disk:[3,6],distribut:[1,6],divid:6,document:[0,2,6],doe:[1,6],doi:6,doing:[2,3,6],done:[3,6],dure:6,each:6,easier:2,effici:[0,6],either:[1,2,3,6],electron:1,els:3,employ:1,empti:3,epoch:[2,3,6],error:[3,5],estim:3,even:1,everi:6,exampl:[4,6],except:6,execut:[0,6],expand:[2,6],expandus:[2,6],expect:[2,3],experiment_main:6,explan:6,explicitli:3,factor:6,fals:[3,6],fast:6,faster:0,featur:2,few:2,file:[0,2,6],filesystem:6,filetyp:6,find_imag:6,first:[1,6],fit:1,float32:6,folder:2,follow:[1,2,3,6],forg:2,format:[0,6],former:6,formula:6,forward:[0,6],foundat:1,free:[1,2],from:[3,6],front:6,futur:6,gener:[1,2],get:[1,3,4,6],get_loss:6,get_output:6,git:2,github:2,give:3,given:6,glob:[2,3,6],gnu:[1,2],goe:6,good:[3,6],gpl:1,gpu:[0,6],grad:6,grad_output:6,gradcheck:6,gradient:6,grai:6,graphic:2,gxx:2,handl:6,has:[3,6],have:[1,2,6],height:6,help:6,helper:6,henc:6,hendriksen:[1,2],hidden:6,high:2,hold:6,home:[2,6],hook:6,hope:1,how:[1,3,4,6],html:1,http:[1,2,6],ignor:6,imag:[2,6],image_dataset:5,imagedataset:[3,6],imageio:6,imagestack:6,img:3,implement:[0,1,2,6],impli:1,improv:6,in_channel:6,includ:[2,6],incorpor:1,index:[4,6],indic:[0,3,6],ineffici:6,inf:3,info:6,inform:[1,6],init_buff:6,init_convolution_weight:6,init_optim:6,initi:[0,2,6],input1:6,input2:6,input:[2,3,6],input_path_specifi:6,input_s:6,inputerror:6,instanc:[3,6],instead:[1,6],integ:[3,6],integr:6,intend:3,intermedi:6,introduct:3,invoc:6,issu:2,its:2,jona:[0,2],jpeg:[2,6],june:1,just_one_imag:[2,6],keep:0,kernel:6,kernel_s:6,l1loss:6,label:[2,3,6],last:6,later:1,latter:6,layer:6,layer_depth:6,lazi:6,learn:[2,6],least:6,left:3,lesser:1,level:[2,6],lgpl:1,librari:1,licens:[1,4],life:2,like_:6,limit:2,line:[0,2],link:1,linux:2,list:[2,3,6],load:[3,6],loop:6,loss:[2,3,6],lost:[3,6],machin:2,magic:6,mai:[1,2,6],mail:1,main:5,main_funct:6,make:[0,2,6],manag:[2,6],mani:[0,6],master:2,match:[2,6],mean:[3,6],measur:6,mechan:6,memori:0,merchant:1,messag:6,method:6,mini:3,minibatch:6,mix:[1,3,6],mltestdata:3,model:[0,3,6],modifi:[1,6],modul:[4,5],more:[0,1,2,6],moreov:2,msd:[0,2,3,6],msd_block:5,msd_model:5,msd_modul:5,msd_network_epoch_:3,msd_pytorch:[2,3,4],msd_regression_model:5,msd_segmentation_model:5,msdblock2d:[0,6],msdblockimpl2d:6,msdfinallay:6,msdlayermodul:6,msdmodel:6,msdmodul:6,msdmodule2d:6,msdregressionmodel:[0,3,6],msdsegmentationmodel:[0,3,6],mseloss:6,multi:[0,6],multipl:6,must:6,naiv:6,name:6,nation:6,nativ:2,natur:6,necessari:1,need:[2,6],needs_input_grad:6,network:[1,3,6],neural:6,new_shap:6,newli:6,noisi:3,none:[3,6],nonetyp:6,normal:[3,6],notabl:0,note:6,noth:6,now:0,num_epoch:6,num_label:[3,6],number:[2,3,6],numpi:3,nvcc:2,object:6,one:[2,6],onli:6,open:2,oper:6,option:[1,2,6],org:[1,6],other:[2,6],our:2,out:2,out_channel:6,output:[2,3,6],overridden:6,overview:2,packag:[2,3,4,5],page:[3,4],pair:6,paper:1,paragraph:2,parallel:[0,6],paramet:[3,6],part:6,particip:2,particular:1,pass:6,path:[2,6],path_specifi:6,pattern:[2,3,6],pelt:6,per:6,perform:[3,6],permit:1,philosophi:1,pip:2,pixel:6,pleas:[1,2],pna:6,png:[2,3,6],point:[2,6],possibl:[2,6],practic:6,precis:6,prefer:6,previou:6,primari:6,print:[3,6],proceed:6,program:1,programm:1,project:[0,2],properti:6,proprietari:1,provid:6,publish:1,pull:2,purpos:1,python:2,pytorch:[1,3,6],question:2,rais:6,rang:3,read:1,receiv:1,recip:6,recommend:[2,3],redistribut:1,refer:6,regist:6,regress:[2,3,6],releas:0,reload:[3,6],relu_inplac:5,reluinplacefunct:6,reluinplacemodul:6,remark:2,renorm:6,repeat:6,repeatedli:6,repres:6,request:2,reset_paramet:6,resolv:[2,6],respect:6,respons:6,retain:6,retriev:6,reus:6,rough:3,run:6,runtimeerror:6,same:[0,6],save:[3,6],scale:[1,3,6],scaling_modul:6,scan10:6,scan1:6,scan:[2,6],scheme:[2,3],school:1,scienc:6,score:6,search:4,see:[1,2,6],segment:[2,3,6],self:6,semant:0,set:[2,3,6],set_input:6,set_norm:[3,6],set_target:6,sethian:6,setup:2,shape:6,share:6,ship:2,should:[1,2,6],shuffl:3,sign:1,signatur:6,silent:6,similar:6,similarli:2,simpl:3,simpli:2,sinc:6,singl:[2,6],size:[3,6],slightli:0,slow:6,softwar:[1,2],solver:6,some:[2,6],sometim:6,sort:6,sourc:6,sparingli:6,specif:[2,6],specifi:[2,3,6],stabl:6,stack:6,standard:[2,3,6],start:[3,4],std:6,stdev:6,step:[2,3,6],stitch:5,stitchbuff:6,stitchcopi:6,stitchcopyfunct:6,stitchcopymodul:6,stitchlazi:6,stitchlazyfunct:6,stitchlazymodul:6,stitchslow:6,stitchslowfunct:6,store:[0,3,6],stride:6,string:6,structur:0,subclass:6,submit:2,submodul:5,subroutin:1,suggest:0,suppli:6,support:[0,6],suppos:6,symlink:[2,6],take:[2,6],taken:6,target:[2,3,6],target_path_specifi:6,task:3,tensor:6,term:1,termin:2,test:6,than:6,thank:0,thei:[3,6],them:6,therefor:6,thi:[0,1,2,3,6],thread:6,three:6,through:6,tif:[2,3,6],tiff:[3,6],tild:[2,6],time:6,timeit:6,timeitresult:6,timer:6,too:3,toolkit:2,top:6,torch:[3,6],torchvis:2,train:[2,3,6],train_d:3,train_dl:[3,6],train_error:3,train_imag:[2,6],train_input_glob:[2,3,6],train_target_glob:[2,3,6],trainable_net:6,transpar:6,trigger:2,tupl:6,two:6,type:6,under:[1,2],unit:6,units_in_front:6,unreleas:4,updat:[3,6],use:[0,1,2,3,6],used:[2,3,6],useful:[1,6],using:[2,3,6],util:[3,6],val_d:3,val_dl:[3,6],val_input_glob:[2,3,6],val_target_glob:[2,3,6],valid:[2,3,6],validation_error:3,valu:[3,6],version:[0,1,2],wai:[0,2,6],want:[1,6],warranti:1,weight:[0,6],weights_path:[0,6],welcom:2,were:6,what:1,when:[0,3,6],where:[0,6],whether:6,which:[2,3,6],who:2,why:1,width:[2,3,6],within:6,without:1,work:[1,2,3],worst:6,worthwil:3,write:6,www:1,x64:2,you:[1,2,3,6],your:[1,2],zero:[3,6],zero_:6},titles:["Changelog","<no title>","Mixed-scale Dense Networks for PyTorch","Examples","Welcome to the documentation of Mixed-scale Dense Networks for PyTorch!","msd_pytorch","msd_pytorch package"],titleterms:{Added:0,Using:2,author:2,bench:6,chang:0,changelog:0,conda:2,content:6,contribut:2,contributor:2,conv:6,conv_relu:6,dens:[2,4],document:4,error:6,exampl:[2,3],fix:0,from:2,get:2,how:2,image_dataset:6,indic:4,instal:2,licens:2,main:6,mix:[2,4],modul:6,msd_block:6,msd_model:6,msd_modul:6,msd_pytorch:[5,6],msd_regression_model:6,msd_segmentation_model:6,network:[2,4],packag:6,pytorch:[2,4],relu_inplac:6,remov:0,requir:2,run:2,scale:[2,4],sourc:2,start:2,stitch:6,submodul:6,tabl:4,tool:2,unreleas:0,welcom:4}})
\ No newline at end of file