import numpy as np
import sys
sys.path.append(r'D:\WorkSpace\DeepLearning\WebsiteV2') # 定义自定义模块保存位置
from codes.paddle import common
import paddle
# codes06013_conv2d_in_single_out_single
def conv2d_in_single_out_single(X, K):
# 对输入和卷积核执行互相关运算,并直接返回运算结果
return common.cross_correlation(X, K)
# codes06014_print_conv2d_in_single_out_single
# 1.定义输入张量和核张量(输入:6×6,卷积核:3×3)
X = paddle.to_tensor([[7,2,4,3,1,5],[6,5,5,6,6,9],[5,8,9,9,5,0],[7,9,7,1,7,6],[4,0,6,9,9,6],[0,9,5,8,6,3]], dtype='float32')
W = paddle.to_tensor([[-9,3,2],[1,2,-9],[8,-8,2]], dtype='float32')
# 2.计算多输入单输出的运算结果
print(conv2d_in_single_out_single(X, W))
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True, [[-84. , -29. , -52. , -45. ], [-91. , -55. , 29. , -35. ], [ 3. , -43. , -104., -93. ], [-134., -79. , -115., 19. ]])
# codes06015_conv2d_in_multi_out_single
def conv2d_in_multi_out_single(X, K):
# 按通道遍历计算每一层的互相关运算结果,然后再按通道进行累加
return np.sum(common.cross_correlation(x, k) for x, k in zip(X, K))
# codes06016_print_conv2d_in_multi_out_single
# 1.定义输入张量和核张量(输入:3×6×6,卷积核:3×3×3)
X = paddle.to_tensor([[[7,2,4,3,1,5],[6,5,5,6,6,9],[5,8,9,9,5,0],[7,9,7,1,7,6],[4,0,6,9,9,6],[0,9,5,8,6,3]],
[[1,3,1,7,2,4],[3,1,0,5,3,4],[8,4,8,2,5,7],[5,1,2,5,7,6],[9,1,1,7,3,2],[7,9,4,9,4,0]],
[[3,3,3,8,6,6],[2,8,0,4,9,0],[1,3,7,8,3,3],[7,9,3,8,6,9],[2,5,1,8,7,5],[2,3,0,8,8,1]]], dtype='float32')
W = paddle.to_tensor([[[-9,3,2],[1,2,-9],[8,-8,2]],
[[1,-2,1],[2,3,-6],[-7,6,2]],
[[3,9,2],[-1,7,4],[2,6,-6]]], dtype='float32')
# 2.计算多输入单输出的运算结果
print(conv2d_in_multi_out_single(X, W))
Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True, [[-21., 35., 101., 152.], [ 41., 84., 177., 80.], [ 97., 38., 107., 29.], [ 66., -73., 99., 203.]])
# codes06017_conv2d_multi_in_multi_out
def conv2d_in_multi_out_multi(X, K):
# 迭代卷积核的输出通道 c_o,每次都与输入X进行互相关运算,并在最后进行组合,生成c_o维张量
return np.stack([conv2d_in_multi_out_single(X, k) for k in K], 0)
# codes06018_print_conv2d_in_multi_out_multi
# 1.定义输入张量和核张量(输入:3×6×6,卷积核:3×3×3×3)
X = paddle.to_tensor([[[7,2,4,3,1,5],[6,5,5,6,6,9],[5,8,9,9,5,0],[7,9,7,1,7,6],[4,0,6,9,9,6],[0,9,5,8,6,3]],
[[1,3,1,7,2,4],[3,1,0,5,3,4],[8,4,8,2,5,7],[5,1,2,5,7,6],[9,1,1,7,3,2],[7,9,4,9,4,0]],
[[3,3,3,8,6,6],[2,8,0,4,9,0],[1,3,7,8,3,3],[7,9,3,8,6,9],[2,5,1,8,7,5],[2,3,0,8,8,1]]], dtype='float32')
W = paddle.to_tensor([[[[-9,3,2],[1,2,-9],[8,-8,2]],
[[1,-2,1],[2,3,-6],[-7,6,2]],
[[3,9,2],[-1,7,4],[2,6,-6]]],
[[[3,-2,2],[-1,2,7],[-5,-3,2]],
[[1,4,-1],[2,-3,5],[4,-6,3]],
[[3,-8,2],[-2,1,4],[-1,2,2]]],
[[[1,-2,2],[-1,2,-4],[-4,-3,4]],
[[1,3,-2],[2,-2,5],[3,5,1]],
[[2,-6,4],[3,-2,4],[-1,-2,-2]]]], dtype='float32')
# 2.计算多输入单输出的运算结果
print(conv2d_in_multi_out_multi(X, W))
[[[-21. 35. 101. 152.] [ 41. 84. 177. 80.] [ 97. 38. 107. 29.] [ 66. -73. 99. 203.]] [[ 94. 31. 69. 50.] [103. 101. 103. 5.] [145. 109. 48. 110.] [ 15. 178. 79. 69.]] [[ 20. 58. -17. -25.] [-20. -33. 73. 49.] [ 66. 115. -9. 42.] [ 27. 100. 7. 45.]]]
# codes06019_conv2d_in_multi_out_1x1
def conv2d_in_multi_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.reshape((c_i, h * w))
K = K.reshape((c_o, c_i))
# 全连接层中的矩阵乘法
Y = paddle.matmul(K, X)
return Y.reshape((c_o, h, w)).numpy()
# codes06020_print_conv2d_in_multi_out_1x1
# 1.定义输入张量和核张量
X = paddle.to_tensor([[[1,1,1,0,0],[0,1,1,1,0],[0,0,1,1,1],[0,0,1,1,0],[0,1,1,0,0]],
[[1,0,1,1,0],[0,1,1,1,1],[0,0,1,1,1],[0,0,1,1,0],[0,1,1,0,1]],
[[1,1,1,1,0],[0,1,1,1,0],[0,0,1,1,1],[0,0,1,1,0],[0,1,1,0,0]]], dtype='float32')
W = paddle.rand([32,3,1,1])
# 2.计算多输入单输出的运算结果
Y1 = conv2d_in_multi_out_1x1(X, W)
Y2 = conv2d_in_multi_out_multi(X, W)
print('Y1与Y2的关系是:{}。\n 其形态为:{}。'.format('完全相同' if (Y1==Y2).any() else '不相同', Y2.shape))
Y1与Y2的关系是:完全相同。 其形态为:(32, 5, 5)。