[27c943]: / pathflowai / __pycache__ / models.cpython-36.pyc

Download this file

386 lines (342 with data), 18.7 kB

3

R└A]NŃ@s\dZddlmZddlmZddlmZddlm	Z	ddl
Z
ddlZddlmZddl
mZdd	l
mZdd
lmZddlZddlZddlZddljZddlZddlTddlZddl m!Z!ddl"Z"dd
l#m$Z$m%Z%m&Z&m'Z'ej(âddl)m*Z*m+Z+ddl,m-Z-dd
lmZddl.Z.Gddädej/âZ0Gddädej/âZ1dddäZ2dddäZ3GddädâZ4dS) zů
models.py
=======================
Houses all of the PyTorch models to access and the corresponding Scikit-Learn like model trainer.
Ú)┌UNet)┌
NestedUNet)┌	UNetSmall)┌
get_fast_scnnN)┌models)┌segmentation)┌nn)┌
functional)┌*)┌Variable)┌	roc_curve┌confusion_matrix┌classification_report┌r2_score)┌GeneralizedDiceLoss┌	FocalLoss)┌ampcs"eZdZdZdçfddä	ZçZS)┌MLPa>Multi-layer perceptron model.

	Parameters
	----------
	n_input:int
		Number input dimensions.
	hidden_topology:list
		List of hidden topology
	dropout_p:float
		Amount dropout.
	n_outputs:int
		Number outputs.
	binary:bool
		Binary output with sigmoid transform.
	softmax:bool
		Whether to apply softmax on output.

	ÚTFc
sţttłâjâ|g||gł_çfddättłjâdâDâ}x|D]}tjjj	|j
âqFWçfddä|Dâł_tjłjdłjd	âł_
tjjj	łj
j
â|rČtjâ}	n|r║tjâ}	ntjddŹ}	łjjtjłj
|	ââtjłjÄł_dS)
Ncs(g|] }tjłj|łj|dâĹqS)r)r┌Linear┌topology)┌.0┌i)┌selfę˙B/Users/joshualevy/Documents/GitHub/PathFlowAI/pathflowai/models.py˙
<listcomp>4sz MLP.__init__.<locals>.<listcomp>Úcs&g|]}tj|tjâtjłdŹâĹqS))┌p)r┌
Sequential┌	LeakyReLU┌Dropout)r┌layer)┌	dropout_prrr7srg)rÚ■   Ú    )┌superr┌__init__r┌range┌len┌torchr┌init┌xavier_uniform_┌weight┌layersrZoutput_layer┌Sigmoid┌Softmaxr!┌appendr┌mlp)
rZn_inputZhidden_topologyr#┌	n_outputs┌binary┌softmaxr.r"Zoutput_transform)┌	__class__)r#rrr'1s 


zMLP.__init__)rTF)┌__name__┌
__module__┌__qualname__┌__doc__r'┌
__classcell__rr)r6rrsrcs(eZdZdZçfddäZddäZçZS)┌FixedSegmentationModulez¬Special model modification for segmentation tasks. Gets output from some of the models' forward loops.

	Parameters
	----------
	segnet:nn.Module
		Segmentation network
	cstt|âjâ||_dS)N)r&r<r'┌segnet)rr=)r6rrr'Ksz FixedSegmentationModule.__init__cCs|j|âdS)ztForward pass.

		Parameters
		----------
		x:Tensor
			Input

		Returns
		-------
		Tensor
			Output from model.

		┌out)r=)r┌xrrr┌forwardOszFixedSegmentationModule.forward)r7r8r9r:r'r@r;rr)r6rr<Csr<TÚdFcCs■d}|dkrtd|dŹ}ÉnŮ|dkr2td|â}Én╚|dkrFt|â}Én┤|dkr\td|â}Én×|jdârČd	d
lm}|rî|j|t|dŹdŹ}n|j	|t|dŹdŹ}t
|âÉnNd
dättâDâ}ddätt
âDâ}	||krŔtt|â|dŹ}|Érp||	kÉr
tt
|â|dŹ}n
tddŹ}|jdâÉrBtjd|d#d$dŹ|jd<t|â}n,|jdâÉr˙tjd|d%d&dŹ|jd<t|â}nŐ|jdâÉsł|jdâÉrČ|jj}
t|
dgd||ddŹj|_nN|jdâÉsđ|jd âÉsđ|jd!âÉr˙|jd"j}
t|
dgd||ddŹj|jd"<|S)'aÂGenerate a nn.Module for use.

	Parameters
	----------
	pretrain:bool
		Pretrain using ImageNet?
	architecture:str
		See model_training for list of all architectures you can train with.
	num_classes:int
		Number of classes to predict.
	add_sigmoid:type
		Add sigmoid non-linearity at end.
	n_hidden:int
		Number of hidden fully connected layers.
	segmentation:bool
		Whether segment task?

	Returns
	-------
	nn.Module
		Pytorch model.

	N┌unetÚ)┌
n_channels┌	n_classes┌unet2┌	fast_scnn┌nested_unetZefficientnetr)┌EfficientNet)┌num_classes)Zoverride_paramscSsg|]}|jdâs|ĹqS)┌__)┌
startswith)r┌mrrrrîsz"generate_model.<locals>.<listcomp>cSsg|]}|jdâs|ĹqS)rK)rL)rrMrrrrŹs)┌
pretrained)rD┌deeplabÚr)┌kernel_size┌strideÚ┌fcni┌resnet┌	inceptioniŔgF)r#r3r4r5┌alexnet┌vggZ	densenetsÚ)rr)rr)rr)rr)r┌UNet2rrrLZefficientnet_pytorchrI┌from_pretrained┌dict┌	from_name┌print┌dirr┌	segmodels┌getattrr┌Conv2d┌
classifierr<┌fc┌in_featuresrr2)┌pretrain┌architecturerJ┌add_sigmoid┌n_hiddenr┌modelrIZmodel_namesZsegmentation_model_namesZnum_ftrsrrr┌generate_model_sH




$rkšH»╝ܲÎz>cCsL|jd}|dkrÂtj|dâ|jdâ}|jddddâjâ}|ddůddůddůddůf}|ddůddůddůddůf}tj||gddŹ}tj|â}d|}tj||gddŹ}	n6tj|â|jdâ}|jddddâjâ}tj	|ddŹ}	|j
|j
ââ}dttd|j
âââ}
tj|	||
â}tj|	||
â}d|||jâ}
d|
S)	u▀https://github.com/kevinzakka/pytorch-goodies
	Computes the S├ŞrensenÔÇôDice loss.

	Note that PyTorch optimizers minimize a loss. In this
	case, we would like to maximize the dice loss so we
	return the negated dice loss.

	Args:
		true: a tensor of shape [B, 1, H, W].
		logits: a tensor of shape [B, C, H, W]. Corresponds to
			the raw output or logits of the model.
		eps: added to the denominator for numerical stability.

	Returns:
		dice_loss: the S├ŞrensenÔÇôDice loss.
	rrrCrN)┌dimg@)r)┌shaper*┌eye┌squeeze┌permute┌float┌cat┌sigmoid┌Fr5┌type┌tupler(┌
ndimension┌sum┌mean)┌logits┌true┌epsrJZ
true_1_hotZtrue_1_hot_fZtrue_1_hot_sZpos_probZneg_probZprobas┌dims┌intersectionZcardinality┌	dice_lossrrrrÇĘs&
  
rÇc
@sÂeZdZdZddeddddŹedd	d
ddd
ŹdddfddäZddäZddäZddäZddäZ	ddäZ
ddäZddäZd0d!d"äZ
d#d$äZd1d&d'äZd2d(d)äZd*d+äZd,d-äZd.d/äZdS)3┌ModelTrainera+Trainer for the neural network model that wraps it into a scikit-learn like interface.

	Parameters
	----------
	model:nn.Module
		Deep learning pytorch model.
	n_epoch:int
		Number training epochs.
	validation_dataloader:DataLoader
		Dataloader of validation dataset.
	optimizer_opts:dict
		Options for optimizer.
	scheduler_opts:dict
		Options for learning rate scheduler.
	loss_fn:str
		String to call a particular loss function for model.
	reduction:str
		Mean or sum reduction of loss.
	num_train_batches:int
		Number of training batches for epoch.
	i,N┌adamgŘę˝ĎMbP?g-CŰÔ6?)┌name┌lr┌weight_decay┌
warm_restartsgÓ?Ú
gH»╝ܲÎj>r)┌	scheduler┌lr_scheduler_decay┌T_max┌eta_min┌T_mult┌cerzc	
	s ||_tjjtjjdť}	tj|dŹtj|dŹtj|dŹtj	|dŹt
tddŹtddŹdťëçfdd	äłd
<dt
|jââkrÇd|d<|	|jdâ|jjâf|Ä|_tj|j|jd
dŹ\|_|_t|j|dŹ|_||_||_ł||_||_|jdkp°|jjj|_tjâ|_tj ł|â|_!||_"dS)N)réZsgd)┌	reductionr)┌	num_classT)┌add_softmax)┌bcerŹ┌mse┌nll┌dice┌focal┌gdlcst||âłd||âS)NrŹ)rÇ)┌y_pred┌y_true)┌loss_functionsrr┌<lambda>Űsz'ModelTrainer.__init__.<locals>.<lambda>zdice+cerâréZO2)┌	opt_level)┌	optimizer┌optsrĹ)#rjr*┌optim┌Adam┌SGDr┌BCEWithLogitsLoss┌CrossEntropyLoss┌MSELoss┌NLLLossrÇrr┌list┌keys┌pop┌
parametersrťr┌
initializeZ	Schedulerrł┌n_epoch┌validation_dataloader┌loss_fn┌loss_fn_name┌dataset┌mt_bcerĹr/rt┌copy┌deepcopy┌original_loss_fn┌num_train_batches)
rrjr¬rź┌optimizer_opts┌scheduler_optsrČrÄr│Z
optimizersr)rÖrr'Šs"@

zModelTrainer.__init__cCs|j||âS)z┬Calculates loss supplied in init statement and modified by reweighting.

		Parameters
		----------
		y_pred:tensor
			Predictions.
		y_true:tensor
			True values.

		Returns
		-------
		loss

		)rČ)rrŚrśrrr┌	calc_loss˙szModelTrainer.calc_losscCs|j||âS)z╝Calculates loss supplied in init statement on validation set.

		Parameters
		----------
		y_pred:tensor
			Predictions.
		y_true:tensor
			True values.

		Returns
		-------
		val_loss

		)r▓)rrŚrśrrr┌
calc_val_lossszModelTrainer.calc_val_losscCs|j|_dS)z'Resets loss to original specified loss.N)r▓rČ)rrrr┌
reset_loss_fnszModelTrainer.reset_loss_fncsé|jâł_tjłjâł_tjłjtjdŹ}tj	j
âr<|j	â}łjdkrVtj
|dŹł_n(łjdkrptj|dŹł_nçfddäł_dS)z░Updates loss function to handle class imbalance by weighting inverse to class appearance.

		Parameters
		----------
		dataset:DynamicImageDataset
			Dataset to balance by.

		)┌dtyperŹ)r-rôcstçççfddätdâDââS)Ncs>g|]6}tł|kârłj|łjłł|kłł|kâĹqSr)ry┌
class_weightsr▓)rr)rrŚrśrrr5szIModelTrainer.add_class_balance_loss.<locals>.<lambda>.<locals>.<listcomp>r)ryr()rŚrś)r)rŚrśrrÜ5sz5ModelTrainer.add_class_balance_loss.<locals>.<lambda>N)┌get_class_weightsr║r░r▒rČr▓r*┌tensorrr┌cuda┌is_availablerşrrórĄ)rr«r-r)rr┌add_class_balance_loss"s	



z#ModelTrainer.add_class_balance_losscCsĺt||â\}}}|tjtjtjddgâtj||fâjdddŹdâ}||kjtâ}|t	j
t||âddgdd	gd
Źjdddůddd
ůfjfS)a=Calculate confusion matrix on validation set for classification/segmentation tasks, optimize threshold where positive.

		Parameters
		----------
		y_pred:array
			Predictions.
		y_true:array
			Ground truth.

		Returns
		-------
		float
			Optimized threshold to use on test set.
		dataframe
			Confusion matrix.

		rrr)┌axisgÓ?ru┌T˙-˙+)┌index┌columnsNr%r%)
r┌np┌argminry┌array┌vstackr┴┌astype┌int┌pd┌	DataFramer
┌iloc)rrŚrś┌fpr┌tpr┌
thresholds┌	thresholdrrr┌calc_best_confusion7s:z ModelTrainer.calc_best_confusionc	Cs&tj||jâĆ}|jâWdQRXdS)zzBackprop using mixed precision for added speed boost.

		Parameters
		----------
		loss:loss
			Torch loss calculated.

		N)rZ
scale_lossrť┌backward)r┌lossZscaled_lossrrr┌
loss_backwardNs	zModelTrainer.loss_backwardcCs|jjdâd}|jdkr*t|jâ|jn|j}x╚t|âD]╝\}}||krLPt|dddŹ}t|dâ}|jjrä|j	dkrä|j
dâ}tjj
âr×|jâ}|jâ}|j|â}	|j|	|â}
|
jâ}||7}|jjâ|j|
â|jjâtdj||||ââq:W|jjâ||}|S)	zńOne training epoch, calculate predictions, loss, backpropagate.

		Parameters
		----------
		epoch:int
			Current epoch.
		train_dataloader:DataLoader
			Training data.

		Returns
		-------
		float
			Training loss for epoch

		TgNr)┌
requires_gradrrözEpoch {}[{}/{}] Train Loss:{})rj┌trainr│r)r«┌
batch_size┌	enumeraterrrşrpr*rŻrżrÂ┌itemrť┌	zero_gradrÍ┌stepr^┌formatrł)r┌epoch┌train_dataloader┌running_loss┌n_batchr┌batch┌XrśrŚrŇ┌
train_lossrrr┌
train_loop[s0 






zModelTrainer.train_loopTcCs@|jjdât|jâ|j}d}ggdť}tjâÉĆ┤Éx¬t|âDÉ]ť\}}	t|	dddŹ}
t|	dâ}|jj	rä|j
dkrä|jdâ}tjj
âr×|
jâ}
|jâ}|j|
â}|Ér░|jj	Ér|djtj|jjsđ|n|âjâjâjâjtâjââ|d	j|jâjâjâjdd
Źjtâjâânö|dj|jâjâjâjtâjââ|jÉsL|n|j|âjâjâjâjtâ}
t|
âdkÉr×|
jddkÉr×|jjÉr×|
jdd
Ź}
|d	j|
jââ|j||â}|jâ}||7}tdj||||ââqBWWdQRX|Ér4|Ér4t j!|d	ât j!|dâ}}|jj	És&|j
dkÉr||jjÉr||j"||â\}}tdj||âât|â|jtâ}||kjtâ}nĘ|jjÉr4t|jj#â}||dk}||dk}|t j$|âdk}|t j$|âdk}dÉr
|dkÉr
t|â|}|j%t|â|â}|j%t|â|â}tdj|t&t'||âââântt(||ââ||}|S)aECalculate loss over validation set.

		Parameters
		----------
		epoch:int
			Current epoch.
		val_dataloader:DataLoader
			Validation iterator.
		print_val_confusion:bool
			Calculate confusion matrix and plot.
		save_predictions:int
			Print validation results.

		Returns
		-------
		float
			Validation loss for epoch.
		Fg)┌predr|r)rÎrrör|rš)r└zEpoch {}[{}/{}] Val Loss:{}NrĹrĺz%Epoch {} Val Confusion, Threshold {}:z$Epoch {} Val Regression, R2 Score {})rĹrĺ))rjrěr)r«r┘r*┌no_gradr┌rrrşrprŻrżr1┌flattenrľ┌detach┌cpu┌numpyr╩r╦┌argmaxrĹrtrrrnr»rĚr█r^rŮrĂ┌hstackrË┌targets┌isnan┌reshape┌strrr)rr▀Zval_dataloader┌print_val_confusion┌save_predictionsrÔrß┌YrrŃrńrśrŚZy_pred_numpyrŇ┌val_lossrĎZbest_confusionZ	n_targets┌n_rowrrr┌val_loopůsd




6.$(*"


zModelTrainer.val_loopc	Cs­g}d}tjâĆ╚x└t|âD]┤\}\}}tjjâr:|jâ}|jjrx|j|âjâj	âj
âjddŹ}|j}|j
|jtââq|j|â}t|jjâdsś|jrĄ|j|â}n|jjr║tj|ddŹ}|j
|jâj	âj
ââqWWdQRXtj|ddŹ}|S)zźCalculate final predictions on loss.

		Parameters
		----------
		test_dataloader:DataLoader
			Test dataset.

		Returns
		-------
		array
			Predictions or embeddings.
		gr)r└)rmNr)r*rŔr┌rŻrżr«rrjrŕrŰrýrÝrnr1r╩r╦r)r´rĹrt┌classify_annotationsrur5rĂ┌concatenate)	r┌test_dataloaderrŚrßrrń┌y_test┌
predictionZ	pred_sizerrr┌	test_loop¤s$


$zModelTrainer.test_loopFc	CsŔg|_g|_xĂt|jâD]Ş}	tjâ}
|j|	|â}tjâ}||
}
|jj|â|j|	|j||dŹ}tjâ|}|jj|â|r¬|	|r¬|rö|j	|ât
dj|	|||
|ââ|t|jâkr|r|}|	}t
j|jâ}qW|rŮ||_|||fS)aŃFits the segmentation or classification model to the patches, saving the model with the lowest validation score.

		Parameters
		----------
		train_dataloader:DataLoader
			Training dataset.
		verbose:bool
			Print training and validation loss?
		print_every:int
			Number of epochs until print?
		save_model:bool
			Whether to save model when reaching lowest validation loss.
		plot_training_curves:bool
			Plot training curves over epochs.
		plot_save_file:str
			File to save training curves.
		print_val_confusion:bool
			Print validation confusion matrix.
		save_val_predictions:bool
			Print validation results.

		Returns
		-------
		self
			Trainer.
		float
			Minimum val loss.
		int
			Best validation epoch with lowest loss.

		)rˇr˘z@Epoch {}: Train Loss {}, Val Loss {}, Train Time {}, Val Time {})┌train_losses┌
val_lossesr(r¬┌timerŠr1r°rź┌plot_train_val_curvesr^rŮ┌minr░r▒rj)rrÓ┌verbose┌print_everyZ
save_model┌plot_training_curves┌plot_save_filerˇ┌save_val_predictionsr▀┌
start_timerň┌current_timeZ
train_timer÷Zval_timeZmin_val_lossZ
best_epoch┌
best_modelrrr┌fit˘s,!
zModelTrainer.fitc	Csrtjâtjdddtjtjtjt	|j
ââ|j
|jfâjdddgdŹj
dgddgdŹdŹ|d	k	rntj|d
dŹd	S)zhPlots training and validation curves.

		Parameters
		----------
		save_file:str
			File to save to.

		r▀┌value┌variablerě┌val)r┼)┌id_vars┌
value_vars)┌hue┌dataNi,)┌dpi)┌plt┌figure┌sns┌lineplotr╠r═rĂr╔┌aranger)r rr┴┌melt┌savefig)r┌	save_filerrrr,s	
$"z"ModelTrainer.plot_train_val_curvescCs|j|â}|S)z┤Make classification segmentation predictions on testing data.

		Parameters
		----------
		test_dataloader:DataLoader
			Test data.

		Returns
		-------
		array
			Predictions.

		)r■)rrűrŚrrr┌predict<s
zModelTrainer.predictcCs|j|âdj|âS)aFit model to training data and make classification segmentation predictions on testing data.

		Parameters
		----------
		train_dataloader:DataLoader
			Train data.
		test_dataloader:DataLoader
			Test data.

		Returns
		-------
		array
			Predictions.

		r)rr)rrÓrűrrr┌fit_predictMszModelTrainer.fit_predictcCs|jS)zReturns pytorch model.
		)rj)rrrr┌return_model_szModelTrainer.return_model)TT)FrçTFNTT)N)r7r8r9r:r\r'rÂrĚrŞr┐rËrÍrŠr°r■rrrrrrrrrrüđs 0
*
J%
8
rü)TrAF)rl)5r:Zpathflowai.unetrZpathflowai.unet2rZpathflowai.unet4rrZZpathflowai.fast_scnnrr*┌torchvisionrZtorchvision.modelsrr`r┌torch.nnr	ru┌pandasr╠rýrĂ┌
matplotlib┌matplotlib.pyplot┌pyplotr┌seabornr┌pathflowai.schedulers┌	pysnooperZtorch.autogradrr░Zsklearn.metricsrr
rr┌set┌pathflowai.lossesrrZapexrr┌Modulerr<rkrÇrürrrr┌<module>s8%
I
(