当前位置:网站首页>Pytorch Foundation (1)
Pytorch Foundation (1)
2022-06-27 23:57:00 【51CTO】
Basic data types and tensor
1 import torch
2 import numpy as np
3
4 #array and tensor Transformation
5 array = np.array([1.1,2,3])
6 tensorArray = torch.from_numpy(array) #array The object becomes tensor object
7 array1 = tensorArray.numpy()#tensor The object becomes array object
8 print(array,'\t', tensorArray, '\t', array1 )
9
10 #torch Have and numpy The same ability to process data
11 print(torch.sin(tensorArray))
12 print(np.ones([2,5]))# Two lines and five columns
13 print(np.ones(2))# Two numbers on a line
14 a = torch.randn(2, 3)# A normal distribution of two rows and three columns
15 print(a)
16 print(a.size(0),a.size(1),a.shape[1])#2,3,3 0 On behalf of the line ,1 Represents the corresponding number of columns
17 print(a.shape)#torch.Size([2,3])
18 print(a.type())#torch.FloatTensor
19 isinstance(a, torch.DoubleTensor)#false
20 isinstance(a, torch.FloatTensor)#true
21 a1 = a.cuda()
22 print(isinstance(a1,torch.FloatTensor))#false
23 print(isinstance(a1,torch.cuda.FloatTensor))#true,#torch The data inside is different from torch.cuda The data in it
24
25 #torch Of tensor object
26 tensor1 = torch.tensor(1)
27 print(tensor1)#tensor(1)
28 tensor2 = torch.tensor(1.2)
29 print(tensor2)#tensor(1.2000)
30 print(tensor2.shape)#torch.Size([])
31 print(len(tensor2.shape))#0, When tensor When it's just a number , His dimension is 0, So his size yes [],shape by 0
32 tensor3 = torch.tensor([1.1])# One dimensional list , So the output dimension is 1
33 print(tensor3,tensor3.shape)#tensor([1.1000]) torch.Size([1])
34 tensor4 = torch.FloatTensor(1)# Note that this time 1 Represents a random return FloatTensor object
35 print(tensor4)#tensor([1.1000])
36 tensor5 = torch.FloatTensor(3)# Think about it tensor and FloatTensor The difference between
37 print(tensor5)#tensor([0.0000e+00, 0.0000e+00, 6.8645e+36])
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
- 26.
- 27.
- 28.
- 29.
- 30.
- 31.
- 32.
- 33.
- 34.
- 35.
- 36.
- 37.
section
1 import torch
2 import numpy as np
3
4 #tensor And random numbers
5 a = torch.rand(2, 3, 28, 28)
6 print(a, a.shape)# Randomly generate one 2*3*28*28 Four dimensional matrix of ( It can be seen as declaring a four-dimensional matrix ),torch.Size([2, 3, 28, 28])
7 # 4D is suitable for CNN , Three dimensional fit RNN, Two dimensional fit batch
8 print(a.numel())#4707, Count the number of elements
9 print(a.dim())#4
10 print(torch.tensor(1).dim())#0,
11 print(torch.empty(1))# One dimensional number 0,tensor([0.])
12 print(torch.Tensor(2,3).type())# The default is torch.FloatTensor
13 print(torch.IntTensor(2,3))#2*3
14 print(torch.tensor([1,1]).type())#torch.LongTensor
15 print(torch.tensor([1.2,1]).type())#torch.FloatTensor
16 print(torch.rand(3,3))# The value range is 0 To 1 Between
17 print(torch.rand_like(torch.rand(3,3)))#rand_like Rows and columns that directly inherit parameters , Generate 3*3 Of 0 To 1 Between random matrices
18 print(torch.randint(1,10,(3,3)))# The value is 1 To 10 Between ( Left closed right away ) The size is 3*3 Matrix
19 print(torch.randn(3,3))#3*3 matrix , To obey the mean is 0, The variance of 1 Is a normal distribution
20 print(torch.normal(mean=torch.full([10],0),std = torch.arange(1,0,-0.1)))# The mean for 0 Decreasing variance 10*1 One dimensional matrix
21 print(torch.full([2,3],7))#2*3 All for 7 Two dimensional matrix of
22 print(torch.full([],7))# Numbers 7 dimension 0
23 print([1],7)# A one-dimensional 1*1 The matrix element is 7
24 print(torch.logspace(0,1,steps=10))#log(10^0) To log(10^1) Take... In the middle 10 Number
25
26 # section
27 a = torch.rand(4,3,28,28)
28 print(a[0].shape)#torch.Size([3,28,28])
29 print(a[0,0].shape)#torch.Size([28, 28])
30 print(a[0,0,2,4])#tensor(0.6186)
31 print(a[:2].shape)#torch.Size([2, 3, 28, 28])
32 print(a[:2,1:,:,:].shape)#torch.Size([2, 2, 28, 28])
33 print(a[:2,-1:,:,:].shape)#torch.Size([2, 1, 28, 28])
34
35 print(a[:,:,0:28:2,0:28:2].shape)#torch.Size([4, 3, 14, 14])
36 print(a[:,:,::2,::2].shape)#torch.Size([4, 3, 14, 14])
37 print(a.index_select(1,torch.arange(1)).shape)#torch.Size([4, 1, 28, 28])
38
39 x = torch.randn(3,4)
40 mask = x.ge(0.5)# Than 0.5 The large ones are marked as true
41 print(mask)
42 torch.masked_select(x,mask)# Put true The choice of
43 torch.masked_select(x,mask).shape
44
45 src =torch.tensor([[4,3,5],[6,7,8]])
46 taa = torch.take(src, torch.tensor([0,3,5]))# Select data according to position after flattening
47 print(taa)
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
- 26.
- 27.
- 28.
- 29.
- 30.
- 31.
- 32.
- 33.
- 34.
- 35.
- 36.
- 37.
- 38.
- 39.
- 40.
- 41.
- 42.
- 43.
- 44.
- 45.
- 46.
- 47.
Dimensional transformation
1 import torch
2 import numpy as np
3 # Dimensional change
4 #View reshape
5 a = torch.rand(4,1,28,28)# Four pictures , The number of channels is 1, Length and width are 28*28
6 print(a.shape)#torch.Size([4, 1, 28, 28])
7 print(a.view(4,1*28*28).shape)#torch.Size([4, 784]), Expand the last three dimensions into a line
8 print(a.view(4*28,28).shape)#torch.Size([112, 28]) become 112 That's ok 28 Two dimensional data for Columns
9 print(a.view(4*1,28,28).shape)#torch.Size([4, 28, 28]) To understand the physical meaning of the corresponding picture
10 b = a.view(4,784)
11 print(b.view(4,28,28,1).shape)#torch.Size([4, 28, 28, 1]),b The data that becomes is not a( Be sure to pay attention to )
12 #print(a.view(4,783))# If the size is inconsistent, an error will be reported
13
14 #unsqueeze, Increase the dimension , But it will not affect the change of data
15 # The range of data is [-a.dim()-1,a.dim()+1)
16 print()# The following example is [-5,5)
17 print(a.unsqueeze(0).shape)#torch.Size([1, 4, 1, 28, 28])
18 print(a.unsqueeze(-1).shape)#torch.Size([4, 1, 28, 28, 1])
19 print(a.unsqueeze(4).shape)#torch.Size([4, 1, 28, 28, 1])
20 print(a.unsqueeze(-4).shape)#torch.Size([4, 1, 1, 28, 28])
21 print(a.unsqueeze(-5).shape)#torch.Size([1, 4, 1, 28, 28])
22 #print(a.unsqueeze(5).shape)
23 a = torch.tensor([1.2,2.3])#a Of shape yes [2]
24 print(a.unsqueeze(-1))#tensor([[1.2000],
25 #[2.3000]]) become 2 Row by column
26 print(a.unsqueeze(0))#tensor([[1.2000, 2.3000]])#shape become [1,2], That is, one row and two columns
27 b = torch.rand(32)
28 f = torch.rand(4,3,14,14)
29 b = b.unsqueeze(1).unsqueeze(2).unsqueeze(0)#torch.Size([1, 32, 1, 1])
30 print(b.shape)
31
32 # Dimension reduction
33 print()
34 print(b.shape)#torch.Size([1, 32, 1, 1])
35 print(b.squeeze().shape)#torch.Size([32]), All for 1 Of being squeezed
36 print(b.squeeze(-1).shape)#torch.Size([1, 32, 1])
37 print(b.squeeze(0).shape)#torch.Size([32, 1, 1])
38 print(b.squeeze(1).shape)#torch.Size([1, 32, 1, 1]), Because it doesn't mean 1 So it wasn't squeezed
39 print(b.squeeze(-4).shape)#torch.Size([32, 1, 1])
40
41 #expand Extended data , Copy data , But it will not actively replicate data , It will only be copied when necessary , Recommended
42 print()
43 print(b.shape)#torch.Size([1, 32, 1, 1])
44 print(b.expand(4,32,14,14).shape)#torch.Size([4, 32, 14, 14]), Only for dimensions that are 1 To expand
45 print(b.expand(-1,32,-1,-1).shape)#torch.Size([1, 32, 1, 1]), Other dimensions are -1, In this way, you can expand dimensions of the same size when the original dimension is not one
46 print(b.expand(-1,32,-1,-4).shape)#torch.Size([1, 32, 1, -4]) -4 It's meaningless
47
48 #repeat Indicates the number of copies on the original dimension , Not how much , This method applies for new space , Use more space
49 print()
50 print(b.shape)#torch.Size([1, 32, 1, 1])
51 print(b.repeat(4,32,1,1).shape)#torch.Size([4, 1024, 1, 1]), The second dimension indicates that the copy is willing to come 32 times
52 print(b.repeat(4,1,1,1).shape)#torch.Size([4, 32, 1, 1])
53 print(b.repeat(4,1,32,32).shape)#torch.Size([4, 32, 32, 32])
54
55 #transpose Realize the exchange between specified dimensions
56 a = torch.rand(4,3,32,32)
57 print(a.shape)#torch.Size([4, 3, 32, 32])
58 a1 = a.transpose(1,3).contiguous().view(4,3*32*32).view(4,32,32,3).transpose(1,3)
59 print(a1.shape)#torch.Size([4, 3, 32, 32])
60 print(torch.all(torch.eq(a,a1)))#tensor(True)
61
62 #premute Realize the exchange from the specified dimension location to the specified location
63 print(a.permute(0,2,3,1).shape)#torch.Size([4, 32, 32, 3])
- 1.
- 2.
- 3.
- 4.
- 5.
- 6.
- 7.
- 8.
- 9.
- 10.
- 11.
- 12.
- 13.
- 14.
- 15.
- 16.
- 17.
- 18.
- 19.
- 20.
- 21.
- 22.
- 23.
- 24.
- 25.
- 26.
- 27.
- 28.
- 29.
- 30.
- 31.
- 32.
- 33.
- 34.
- 35.
- 36.
- 37.
- 38.
- 39.
- 40.
- 41.
- 42.
- 43.
- 44.
- 45.
- 46.
- 47.
- 48.
- 49.
- 50.
- 51.
- 52.
- 53.
- 54.
- 55.
- 56.
- 57.
- 58.
- 59.
- 60.
- 61.
- 62.
- 63.
author : Your Rego
The copyright of this article belongs to the author , Welcome to reprint , But without the author's consent, the original link must be given on the article page , Otherwise, the right to pursue legal responsibility is reserved .
边栏推荐
猜你喜欢

2022 PMP project management examination agile knowledge points (3)

图的存储结构

Teach you how to transplant tinyriscv to FPGA

安全省油环保 骆驼AGM启停电池魅力十足

vivado VIO IP的用法

【Try to Hack】veil-evasion免杀

How to set the enterprise wechat group robots to send messages regularly?

vmware虚拟机桥接连通

seata

The file or assembly 'cefsharp.core.runtime.dll' or one of its dependencies could not be loaded. Is not a valid Win32 Application. (exception from hresult:0x800700c1)
随机推荐
Course strategy sharing plan of Zhejiang University
Pat class B 1013
支持删除,更新任意结点的优先级队列
第 2 章 集成 MP
Safe, fuel-efficient and environment-friendly camel AGM start stop battery is full of charm
Vivado FFT IP的使用说明
C# Winform 读取Resources图片
【AI应用】NVIDIA Tesla V100-PCIE-32GB的详情参数
【AI应用】NVIDIA Tesla V100S-PCIE-32GB的详情参数
EXCEL 打印设置公共表头
Structure de stockage des graphiques
Storage structure of graph
go日志包 log的使用
[learn FPGA programming from scratch -48]: Vision - development and application of intelligent sensors
const关键字及其作用(用法),C语言const详解
Use of go log package log
golang使用mongo-driver操作——查(基础)
安全省油环保 骆驼AGM启停电池魅力十足
C language - date formatting [easy to understand]
Teach you how to transplant tinyriscv to FPGA