How to use the torchattacks.attack.Attack function in torchattacks

To help you get started, we’ve selected a few torchattacks examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / ifgsm.py View on Github external
import torch
import torch.nn as nn

from ..attack import Attack

class IFGSM(Attack):
    """
    I-FGSM attack in the paper 'Adversarial Examples in the Physical World'
    [https://arxiv.org/abs/1607.02533]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 4/255)
        alpha (float): alpha in the paper. (DEFALUT : 1/255)
        iters (int): max iterations. (DEFALUT : 0)
    
    .. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
    """
    def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
        super(IFGSM, self).__init__("IFGSM", model)
        self.eps = eps
        self.alpha = alpha
github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / iterll.py View on Github external
import torch
import torch.nn as nn

from ..attack import Attack

class IterLL(Attack):
    """
    iterative least-likely class attack in the paper 'Adversarial Examples in the Physical World'
    [https://arxiv.org/abs/1607.02533]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 4/255)
        alpha (float): alpha in the paper. (DEFALUT : 1/255)
        iters (int): max iterations. (DEFALUT : 0)
    
    .. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
    """
    def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
        super(IterLL, self).__init__("IterLL", model)
        self.eps = eps
        self.alpha = alpha
github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / cw.py View on Github external
import torch
import torch.nn as nn
import torch.optim as optim

from ..attack import Attack

class CW(Attack):
    """
    CW(L2) attack in the paper 'Towards Evaluating the Robustness of Neural Networks'
    [https://arxiv.org/abs/1608.04644]

    Arguments:
        model (nn.Module): a model to attack.
        targeted (bool):  (DEFALUT : False)
            True - change image closer to a given label
            False  - change image away from a right label
        c (float): c in the paper. (DEFALUT : 1e-4)
        kappa (float): kappa (also written as 'confidence') in the paper. (DEFALUT : 0)
        iters (int): max iterations. (DEFALUT : 1000)
        lr (float): learning rate of the 
        izer. (DEFALUT : 0.01)
        
    .. note:: There are serveral NOT IMPLEMENTED part of the paper/other codes.
github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / fgsm.py View on Github external
import torch
import torch.nn as nn

from ..attack import Attack

class FGSM(Attack):
    """
    FGSM attack in the paper 'Explaining and harnessing adversarial examples'
    [https://arxiv.org/abs/1412.6572]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 0.007)
    
    """
    def __init__(self, model, eps=0.007):
        super(FGSM, self).__init__("FGSM", model)
        self.eps = eps
    
    def forward(self, images, labels):
        images = images.to(self.device)
        labels = labels.to(self.device)
github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / pgd.py View on Github external
import torch
import torch.nn as nn

from ..attack import Attack

class PGD(Attack):
    """
    PGD attack in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'
    [https://arxiv.org/abs/1706.06083]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 0.3)
        alpha (float): alpha in the paper. (DEFALUT : 2/255)
        iters (int): max iterations. (DEFALUT : 40)
        
    """
    def __init__(self, model, eps=0.3, alpha=2/255, iters=40):
        super(PGD, self).__init__("PGD", model)
        self.eps = eps
        self.alpha = alpha
        self.iters = iters
github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / apgd.py View on Github external
import torch
import torch.nn as nn

from ..attack import Attack

class APGD(Attack):
    """
    Comment on "Adv-BNN: Improved Adversarial Defense through Robust Bayesian Neural Network"
    [https://arxiv.org/abs/1907.00895]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the PGD paper. (DEFALUT : 0.3)
        alpha (float): alpha in the PGD paper. (DEFALUT : 2/255)
        iters (int): max iterations. (DEFALUT : 40)
        sampling (int) : the number of sampling models. (DEFALUT : 100)
        
    """
    def __init__(self, model, eps=0.3, alpha=2/255, iters=40, sampling=10):
        super(APGD, self).__init__("APGD", model)
        self.eps = eps
        self.alpha = alpha
github Harry24k / adversairal-attacks-pytorch / torchattacks / attacks / rfgsm.py View on Github external
import torch
import torch.nn as nn

from ..attack import Attack

class RFGSM(Attack):
    """
    R+FGSM attack in the paper 'Ensemble Adversarial Training : Attacks and Defences'
    [https://arxiv.org/abs/1705.07204]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 16/255)
        alpha (float): alpha in the paper. (DEFALUT : 8/255)
        iters (int): max iterations. (DEFALUT : 1)
    
    """
    def __init__(self, model, eps=16/255, alpha=8/255, iters=1):
        super(RFGSM, self).__init__("RFGSM", model)
        self.eps = eps
        self.alpha = alpha
        self.iters = iters