torchModel.py 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. from __future__ import print_function
  4. from itertools import count
  5. import matplotlib.pyplot as plt
  6. import numpy as np
  7. import torch
  8. import torch.autograd
  9. import torch.nn.functional as F
  10. from torch.autograd import Variable
  11. random_state = 5000
  12. torch.manual_seed(random_state)
  13. POLY_DEGREE = 4
  14. W_target = torch.randn(POLY_DEGREE, 1) * 5
  15. b_target = torch.randn(1) * 5
  16. def make_features(x):
  17. """创建一个特征矩阵结构为[x, x^2, x^3, x^4]."""
  18. x = x.unsqueeze(1)
  19. return torch.cat([x ** i for i in range(1, POLY_DEGREE + 1)], 1)
  20. def f(x):
  21. """近似函数."""
  22. return x.mm(W_target) + b_target[0]
  23. def poly_desc(W, b):
  24. """生成多向式描述内容."""
  25. result = 'y = '
  26. for i, w in enumerate(W):
  27. result += '{:+.2f} x^{} '.format(w, len(W) - i)
  28. result += '{:+.2f}'.format(b[0])
  29. return result
  30. def get_batch(batch_size=32):
  31. """创建类似 (x, f(x))的批数据."""
  32. random = torch.from_numpy(np.sort(torch.randn(batch_size)))
  33. x = make_features(random)
  34. y = f(x)
  35. return Variable(x), Variable(y)
  36. # 声明模型
  37. fc = torch.nn.Linear(W_target.size(0), 1)
  38. for batch_idx in count(1):
  39. # 获取数据
  40. batch_x, batch_y = get_batch()
  41. print(len(batch_x))
  42. # 重置求导
  43. fc.zero_grad()
  44. # 前向传播
  45. output = F.smooth_l1_loss(fc(batch_x), batch_y)
  46. loss = output.data.item()
  47. # 后向传播
  48. output.backward()
  49. # 应用导数
  50. for param in fc.parameters():
  51. param.data.add_(-0.1 * param.grad.data)
  52. # 停止条件
  53. if loss < 1e-3:
  54. plt.cla()
  55. plt.scatter(batch_x.data.numpy()[:, 0], batch_y.data.numpy()[:, 0], label='real curve', color='b')
  56. plt.plot(batch_x.data.numpy()[:, 0], fc(batch_x).data.numpy()[:, 0], label='fitting curve', color='r')
  57. plt.legend()
  58. plt.show()
  59. break
  60. print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
  61. print('==> Learned function:\t' + poly_desc(fc.weight.data.view(-1), fc.bias.data))
  62. print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))