How to use the torchattacks.torchattacks.Attacks function in torchattacks

To help you get started, we’ve selected a few torchattacks examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Harry24k / adversairal-attacks-pytorch / torchattacks / torchattacks.py View on Github external
cost = loss(outputs, labels).to(self.device)
            cost.backward()

            adv_images = images + self.alpha*images.grad.sign()
            
            a = torch.clamp(images - self.eps, min=0)
            b = (adv_images>=a).float()*adv_images + (a>adv_images).float()*a
            c = (b > images+self.eps).float()*(images+self.eps) + (images+self.eps >= b).float()*b
            images = torch.clamp(c, max=1).detach_()

        adv_images = images

        return adv_images
        
        
class IterLL(Attacks):
    """
    iterative least-likely class attack in the paper 'Adversarial Examples in the Physical World'
    [https://arxiv.org/abs/1607.02533]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 4/255)
        alpha (float): alpha in the paper. (DEFALUT : 1/255)
        iters (int): max iterations. (DEFALUT : 0)
    
    .. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
    """
    def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
        super(IterLL, self).__init__("IterLL", model)
        self.eps = eps
        self.alpha = alpha
github Harry24k / adversairal-attacks-pytorch / torchattacks / torchattacks.py View on Github external
optimizer.step()

            # Early Stop when loss does not converge.
            if step % (self.iters//10) == 0 :
                if cost > prev :
                    print('CW Attack is stopped due to CONVERGENCE....')
                    return a
                prev = cost
            
            print('- CW Attack Progress : %2.2f %%        ' %((step+1)/self.iters*100), end='\r')
            
        adv_images = (1/2*(nn.Tanh()(w) + 1)).detach_()

        return adv_images
        
class PGD(Attacks):
    """
    CW attack in the paper 'Towards Deep Learning Models Resistant to Adversarial Attacks'
    [https://arxiv.org/abs/1706.06083]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 0.3)
        alpha (float): alpha in the paper. (DEFALUT : 2/255)
        iters (int): max iterations. (DEFALUT : 40)
        
    """
    def __init__(self, model, eps=0.3, alpha=2/255, iters=40):
        super(PGD, self).__init__("PGD", model)
        self.eps = eps
        self.alpha = alpha
        self.iters = iters
github Harry24k / adversairal-attacks-pytorch / torchattacks / torchattacks.py View on Github external
outputs = model(adv_images)

            _, predicted = torch.max(outputs.data, 1)

            total += labels.size(0)
            correct += (predicted == labels.to(self.device)).sum()
            
            print('- Evaluation Progress : %2.2f %%        ' %((step+1)/total_batch*100), end='\r')

        accuracy = 100 * float(correct) / total
        print('\n- Accuracy of model : %f %%' % (accuracy))

        return accuracy
    '''
    
class FGSM(Attacks):
    """
    FGSM attack in the paper 'Explaining and harnessing adversarial examples'
    [https://arxiv.org/abs/1412.6572]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 0.007)
    
    """
    def __init__(self, model, eps=0.007):
        super(FGSM, self).__init__("FGSM", model)
        self.eps = eps
    
    def __call__(self, images, labels):
        images = images.to(self.device)
        labels = labels.to(self.device)
github Harry24k / adversairal-attacks-pytorch / torchattacks / torchattacks.py View on Github external
self.model.zero_grad()
            cost = loss(outputs, labels).to(self.device)
            cost.backward()

            adv_images = images - self.alpha*images.grad.sign()
            
            a = torch.clamp(images - self.eps, min=0)
            b = (adv_images>=a).float()*adv_images + (a>adv_images).float()*a
            c = (b > images+self.eps).float()*(images+self.eps) + (images+self.eps >= b).float()*b
            images = torch.clamp(c, max=1).detach_()

        adv_images = images

        return adv_images
        
class RFGSM(Attacks):
    """
    R+FGSM attack in the paper 'Ensemble Adversarial Training : Attacks and Defences'
    [https://arxiv.org/abs/1705.07204]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 16/255)
        alpha (float): alpha in the paper. (DEFALUT : 8/255)
        iters (int): max iterations. (DEFALUT : 1)
    
    """
    def __init__(self, model, eps=16/255, alpha=8/255, iters=1):
        super(RFGSM, self).__init__("RFGSM", model)
        self.eps = eps
        self.alpha = alpha
        self.iters = iters
github Harry24k / adversairal-attacks-pytorch / torchattacks / torchattacks.py View on Github external
loss = nn.CrossEntropyLoss()
        
        images.requires_grad = True
        outputs = self.model(images)

        self.model.zero_grad()
        cost = loss(outputs, labels).to(self.device)
        cost.backward()

        adv_images = images + self.eps*images.grad.sign()
        adv_images = torch.clamp(adv_images, min=0, max=1).detach_()

        return adv_images
        
        
class IFGSM(Attacks):
    """
    I-FGSM attack in the paper 'Adversarial Examples in the Physical World'
    [https://arxiv.org/abs/1607.02533]

    Arguments:
        model (nn.Module): a model to attack.
        eps (float): epsilon in the paper. (DEFALUT : 4/255)
        alpha (float): alpha in the paper. (DEFALUT : 1/255)
        iters (int): max iterations. (DEFALUT : 0)
    
    .. note:: With 0 iters, iters will be automatically decided with the formula in the paper.
    """
    def __init__(self, model, eps=4/255, alpha=1/255, iters=0):
        super(IFGSM, self).__init__("IFGSM", model)
        self.eps = eps
        self.alpha = alpha
github Harry24k / adversairal-attacks-pytorch / torchattacks / torchattacks.py View on Github external
for i in range(self.iters) :
            images.requires_grad = True
            outputs = self.model(images)

            self.model.zero_grad()
            cost = loss(outputs, labels).to(self.device)
            cost.backward()

            adv_images = images + (self.eps-self.alpha)*images.grad.sign()
            images = torch.clamp(adv_images, min=0, max=1).detach_()

        adv_images = images
        
        return adv_images
    
class CW(Attacks):
    """
    CW(L2) attack in the paper 'Towards Evaluating the Robustness of Neural Networks'
    [https://arxiv.org/abs/1608.04644]

    Arguments:
        model (nn.Module): a model to attack.
        targeted (bool):  (DEFALUT : False)
            True - change image closer to a given label
            False  - change image away from a right label
        c (float): c in the paper. (DEFALUT : 1e-4)
        kappa (float): kappa (also written as 'confidence') in the paper. (DEFALUT : 0)
        iters (int): max iterations. (DEFALUT : 1000)
        lr (float): learning rate of the optimizer. (DEFALUT : 0.01)
        
    .. note:: There are serveral NOT IMPLEMENTED part of the paper/other codes.
    (1) Binary search method for c : It costs too many times.