# AutoFMN Algorithm
## def FMN_plus(norm, num_steps, starting_point):
##### given an epsilon, makes the adversarial update following the rule:
```
with torch.no_grad():
best_adv = torch.where(batch_view(is_both), x_adv.detach(), best_adv)
x_adv = x_adv.detach()
grad2 = x_adv - x_adv_old
x_adv_old = x_adv.clone()
a = 0.75 if i > 0 else 1.0
if self.norm == 'Linf':
x_adv_1 = x_adv + step_size * torch.sign(grad)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv_1, x - eps), x + eps), 0.0, 1.0)
x_adv_1 = torch.clamp(torch.min(torch.max(x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a), x - eps), x + eps), 0.0, 1.0)
elif self.norm == 'L2':
x_adv_1 = x_adv + step_size * grad / ((grad ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
eps * torch.ones(x.shape).to(self.device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt()), 0.0, 1.0)
x_adv_1 = x_adv + (x_adv_1 - x_adv) * a + grad2 * (1 - a)
x_adv_1 = torch.clamp(x + (x_adv_1 - x) / (((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12) * torch.min(
eps * torch.ones(x.shape).to(self.device).detach(), ((x_adv_1 - x) ** 2).sum(dim=(1, 2, 3), keepdim=True).sqrt() + 1e-12), 0.0, 1.0)
x_adv = x_adv_1
```
which is taken from the autopgd code from adaptive autoattack while the epsilon is updated at each iteration using the linear extrapolation rule from vanilla FMN
## def autoFMN(max_steps = 500, search_steps = 10, norm = 'Linf'):
```
for all configurations of losses, starting_points, initial_step_size, initial_epsilon, step_size_divider:
FMN_plus(norm = norm, num_steps = search_steps)
```
with the best found configuration:
```
starting_point_final = FMN_plus(norm = a norm different from the given norm, num_steps = max_steps / 2, starting_point = best starting point found from search)
final_adv_example = MN_plus(norm = norm, num_steps = max_steps / 2, starting_point = starting_point_final)
```