1. #!/usr/bin/env python
    2. from __future__ import print_function
    3. from itertools import count
    4. import torch
    5. import torch.nn.functional as F
    6. POLY_DEGREE = 4
    7. W_target = torch.randn(POLY_DEGREE, 1) * 5
    8. b_target = torch.randn(1) * 5
    9. def make_features(x):
    10. """Builds features i.e. a matrix with columns [x, x^2, x^3, x^4]."""
    11. x = x.unsqueeze(1)
    12. return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)
    13. def f(x):
    14. """Approximated function."""
    15. return x.mm(W_target) + b_target.item()
    16. def poly_desc(W, b):
    17. """Creates a string description of a polynomial."""
    18. result = 'y = '
    19. for i, w in enumerate(W):
    20. result += '{:+.2f} x^{} '.format(w, i + 1)
    21. result += '{:+.2f}'.format(b[0])
    22. return result
    23. def get_batch(batch_size=32):
    24. """Builds a batch i.e. (x, f(x)) pair."""
    25. random = torch.randn(batch_size)
    26. x = make_features(random)
    27. y = f(x)
    28. return x, y
    29. # Define model
    30. fc = torch.nn.Linear(W_target.size(0), 1)
    31. for batch_idx in count(1):
    32. # Get data
    33. batch_x, batch_y = get_batch()
    34. # Reset gradients
    35. fc.zero_grad()
    36. # Forward pass
    37. output = F.smooth_l1_loss(fc(batch_x), batch_y)
    38. loss = output.item()
    39. # Backward pass
    40. output.backward()
    41. # Apply gradients
    42. for param in fc.parameters():
    43. param.data.add_(-0.1 * param.grad)
    44. # Stop criterion
    45. if loss < 1e-3:
    46. break
    47. print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
    48. print('==> Learned function:\t' + poly_desc(fc.weight.view(-1), fc.bias))
    49. print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))