[dff9e0]: / __pycache__ / losses.cpython-310.pyc

Download this file

71 lines (70 with data), 9.8 kB

o

A–®eÃ.ã@søddlZddlmmZddlmZddlmZddlmZm	Z	m
Z
ddd„ZGdd	„d	ejƒZ
Gd
d„dejƒZGdd
„d
ejƒZGdd„deƒZGdd„deƒZGdd„dejƒZGdd„dejƒZGdd„dej	ƒZdd„Zdd„Zdd„ZdS)éN)Únn)ÚVariable)ÚMSELossÚSmoothL1LossÚL1Lossçíµ ÷ư>cCs~| ¡| ¡ksJdƒ‚t|ƒ}t|ƒ}| ¡}|| d¡}|dur'||}|| d¡|| d¡}d||j|dS)aÓ
    Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given  a multi channel input and target.
    Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function.

    Args:
         input (torch.Tensor): NxCxSpatial input tensor
         target (torch.Tensor): NxCxSpatial target tensor
         epsilon (float): prevents division by zero
         weight (torch.Tensor): Cx1 tensor of weight per channel/class
    ú-'input' and 'target' must have the same shapeéÿÿÿÿNé©Úmin)ÚsizeÚflattenÚfloatÚsumÚclamp)ÚinputÚtargetÚepsilonÚweightÚ	intersectÚdenominator©rúDc:\Users\zafry\Downloads\bone_paper_codes\bone_paper_codes\losses.pyÚcompute_per_channel_dices
rcó(eZdZdZ‡fdd„Zdd„Z‡ZS)Ú_MaskingLossWrapperzv
    Loss wrapper which prevents the gradient of the loss to be computed where target is equal to `ignore_index`.
    cs.tt|ƒ ¡|dusJdƒ‚||_||_dS)Nzignore_index cannot be None)ÚsuperrÚ__init__ÚlossÚignore_index)Úselfrr ©Ú	__class__rrr)s
z_MaskingLossWrapper.__init__cCs2| ¡ |j¡}d|_||}||}| ||¡S)NF)ÚcloneÚne_r Ú
requires_gradr)r!rrÚmaskrrrÚforward/s
z_MaskingLossWrapper.forward©Ú__name__Ú
__module__Ú__qualname__Ú__doc__rr(Ú
__classcell__rrr"rr$srcs*eZdZdZd‡fdd„	Zdd„Z‡ZS)ÚSkipLastTargetChannelWrapperz>
    Loss wrapper which removes additional target channel
    Fcstt|ƒ ¡||_||_dS©N)rr/rrÚsqueeze_channel)r!rr1r"rrrAs
z%SkipLastTargetChannelWrapper.__init__cCsL| d¡dksJdƒ‚|dd…dd…df}|jr tj|dd}| ||¡S)NézFTarget tensor has a singleton channel dimension, cannot remove channelr	.©Údim)r
r1ÚtorchÚsqueezer©r!rrrrrr(Fs
z$SkipLastTargetChannelWrapper.forward)Fr)rrr"rr/<sr/cs2eZdZdZd
‡fdd„	Zdd„Zdd	„Z‡ZS)Ú_AbstractDiceLossz@
    Base class for different implementations of Dice loss.
    NÚsigmoidcsdtt|ƒ ¡| d|¡|dvsJ‚|dkrt ¡|_dS|dkr+tjdd|_dSdd„|_dS)	Nr)r9ÚsoftmaxÚnoner9r:r2r3cSs|Sr0r)ÚxrrrÚ<lambda>esz,_AbstractDiceLoss.__init__.<locals>.<lambda>)rr8rÚregister_bufferrÚSigmoidÚ
normalizationÚSoftmax©r!rr@r"rrrWsz_AbstractDiceLoss.__init__cCst‚r0)ÚNotImplementedError©r!rrrrrrÚdicegsz_AbstractDiceLoss.dicecCs*| |¡}|j|||jd}dt |¡S)N©rçð?)r@rErr5Úmean)r!rrZper_channel_dicerrrr(ks
z_AbstractDiceLoss.forward©Nr9)r*r+r,r-rrEr(r.rrr"rr8Rs
r8có*eZdZdZd‡fdd„	Zdd„Z‡ZS)	ÚDiceLossaComputes Dice Loss according to https://arxiv.org/abs/1606.04797.
    For multi-class segmentation `weight` parameter can be used to assign different weights per class.
    The input to the loss function is assumed to be a logit and will be normalized by the Sigmoid function.
    Nr9cstƒ ||¡dSr0)rrrBr"rrr|szDiceLoss.__init__cCst|||jdS)NrF)rrrDrrrrEsz
DiceLoss.dicerI©r*r+r,r-rrEr.rrr"rrKvsrKcrJ)	ÚGeneralizedDiceLossz_Computes Generalized Dice Loss (GDL) as described in https://arxiv.org/pdf/1707.03237.pdf.
    r9rcstƒjd|d||_dS)N©rr@)rrr)r!r@rr"rrr‡s
zGeneralizedDiceLoss.__init__cCsÚ| ¡| ¡ksJdƒ‚t|ƒ}t|ƒ}| ¡}| d¡dkr5tj|d|fdd}tj|d|fdd}| d¡}d||j|jd}d|_|| d¡}||}|| d¡}||j|jd}d| ¡| ¡S)	Nrrr2r3r	rFr
)	r
rrr5Úcatrrrr&)r!rrrZw_lrrrrrrE‹s
zGeneralizedDiceLoss.dice)r9rrLrrr"rrMƒsrMcr)ÚBCEDiceLossz)Linear combination of BCE and Dice lossescs0tt|ƒ ¡||_t ¡|_||_tƒ|_	dSr0)
rrPrÚalpharÚBCEWithLogitsLossÚbceÚbetarKrE)r!rQrTr"rrr©s

zBCEDiceLoss.__init__cCs$|j| ||¡|j| ||¡Sr0)rQrSrTrEr7rrrr(°s$zBCEDiceLoss.forwardr)rrr"rrP¦srPcs6eZdZdZd	‡fdd„	Zdd„Zedd„ƒZ‡ZS)
ÚWeightedCrossEntropyLosszXWeightedCrossEntropyLoss (WCE) as described in https://arxiv.org/pdf/1707.03237.pdf
    r	cstt|ƒ ¡||_dSr0)rrUrr )r!r r"rrr¸s
z!WeightedCrossEntropyLoss.__init__cCs| |¡}tj||||jdS)N©rr )Ú_class_weightsÚFÚ
cross_entropyr rDrrrr(¼s
z WeightedCrossEntropyLoss.forwardcCsBtj|dd}t|ƒ}d| d¡}| d¡}t||dd}|S)Nr2r3rGr	F)r&)rXr:rrr)rÚ	flattenedÚ	nominatorrÚ
class_weightsrrrrWÀs
z'WeightedCrossEntropyLoss._class_weights)r	)	r*r+r,r-rr(ÚstaticmethodrWr.rrr"rrU´srUcs*eZdZd‡fdd„	Z‡fdd„Z‡ZS)ÚWeightedSmoothL1LossTcs$tƒjdd||_||_||_dS)Nr;)Ú	reduction)rrÚ	thresholdÚapply_below_thresholdr)r!r`Úinitial_weightrar"rrrÌs
zWeightedSmoothL1Loss.__init__csDtƒ ||¡}|jr||jk}n||jk}|||j||<| ¡Sr0)rr(rar`rrH)r!rrÚl1r'r"rrr(Òs
zWeightedSmoothL1Loss.forward)T)r*r+r,rr(r.rrr"rr^Ësr^cCs:| d¡}dttd| ¡ƒƒ}| |¡}| ¡ |d¡S)z™Flattens a given tensor such that the channel axis is first.
    The shapes are transformed as follows:
       (N, C, D, H, W) -> (C, N * D * H * W)
    r2)r2rr
r	)r
ÚtupleÚranger4ÚpermuteÚ
contiguousÚview)ÚtensorÚCZ
axis_orderÚ
transposedrrrrßs

rcCsÎd|vsJdƒ‚|d}| d¡}| dd¡}| dd¡}| dd¡}|dur1t |¡ |d	¡}| d
d¡}|durEt |¡ |d	¡}t|||||ƒ}|dusZ|dvsZt||ƒ}|ret|| dd¡ƒ}|S)
zÆ
    Returns the loss function based on provided configuration
    :param config: (dict) a top level configuration object containing the 'loss' key
    :return: an instance of the loss function
    rz*Could not find loss function configurationÚnamer NÚskip_last_targetFrÚdeviceÚ
pos_weight)ÚCrossEntropyLossrUr1)Úpopr5riÚtoÚ_create_lossrr/Úget)ÚconfigÚloss_configrlr rmrrorrrrÚget_loss_criterionîs"

rwcCs0|dkr
tj|dS|dkr| dd¡}| dd¡}t||ƒS|dkr0|dur)d}tj||d	S|d
kr?|dur:d}t|dS|dkrIt||d
S|dkrX| dd¡}t|dS|dkrh| dd¡}t||dS|dkrot	ƒS|dkrvt
ƒS|dkr}tƒS|dkrt|d|d| dd¡dSt
d|›dƒ‚)NrR)rorPZalphsrGrTrpiœÿÿÿrVrU)r ÚPixelWiseCrossEntropyLoss)r\r rMr@r9)r@rKrNrrrr^r`rbraT)r`rbrazUnsupported loss function: 'ú')rrRrtrPrprUrxrMrKrrrr^ÚRuntimeError)rlrvrr rorQrTr@rrrrssD



þrs)rN)r5Útorch.nn.functionalrÚ
functionalrXÚtorch.autogradrÚtorch.nnrrrrÚModulerr/r8rKrMrPrUr^rrwrsrrrrÚ<module>s"
$
#%