relay.Expr

  1. def test_print_expr():
  2. data = relay.var("data", shape=(1, 1, 5, 5))
  3. weight = relay.var("weight")
  4. conv = relay.nn.conv2d(data, weight,
  5. kernel_size=(3, 3),
  6. padding=(1, 1),
  7. channels=1)
  8. z = relay.nn.relu(data=conv)
  9. print(z)

打印出来是这样的

  1. v0.0.4
  2. free_var %data: Tensor[(1, 1, 5, 5), float32]
  3. free_var %weight
  4. %0 = nn.conv2d(%data, %weight, padding=[1, 1], channels=1, kernel_size=[3, 3]);
  5. nn.relu(%0)

relay.Function

  1. def test_print_function():
  2. data = relay.var("data", shape=(1, 1, 5, 5))
  3. weight = relay.var("weight")
  4. conv = relay.nn.conv2d(data, weight,
  5. kernel_size=(3, 3),
  6. padding=(1, 1),
  7. channels=1)
  8. z = relay.nn.relu(data=conv)
  9. #print(z)
  10. f = relay.Function([data,weight],z)
  11. print(f)

打印的结果是这样的

  1. v0.0.4
  2. fn (%data: Tensor[(1, 1, 5, 5), float32], %weight) {
  3. %0 = nn.conv2d(%data, %weight, padding=[1, 1], channels=1, kernel_size=[3, 3]);
  4. nn.relu(%0)
  5. }

relay.Module

  1. def test_print_function():
  2. data = relay.var("data", shape=(1, 1, 5, 5))
  3. weight = relay.var("weight")
  4. conv = relay.nn.conv2d(data, weight,
  5. kernel_size=(3, 3),
  6. padding=(1, 1),
  7. channels=1)
  8. z = relay.nn.relu(data=conv)
  9. # print(z)
  10. # f = relay.Function([data,weight],z)
  11. # print(f)
  12. mod = relay.Module.from_expr(z)
  13. print(mod.astext())

打印的结果是这样的

  1. v0.0.4
  2. def @main(%data: Tensor[(1, 1, 5, 5), float32], %weight: Tensor[(1, 1, 3, 3), float32]) -> Tensor[(1, 1, 5, 5), float32] {
  3. %0 = nn.conv2d(%data, %weight, padding=[1, 1], channels=1, kernel_size=[3, 3]) /* ty=Tensor[(1, 1, 5, 5), float32] */;
  4. nn.relu(%0) /* ty=Tensor[(1, 1, 5, 5), float32] */
  5. }