-
Recitation 2
- numpy operations
- array index
-
x = np.arange(10) ** 2 # x:[ 0 1 4 9 16 25 36 49 64 81] print(x[::-1]) # all reversed print(x[8:1:-1]) # reversed slice print(x[::2]) # every other print(x[:]) # no-op (but useful syntax when dealing with n-d arrays) --- output: [81 64 49 36 25 16 9 4 1 0] [64 49 36 25 16 9 4] [ 0 4 16 36 64] [ 0 1 4 9 16 25 36 49 64 81]
- simple syntax
-
# Simple syntax np.random.seed(123) x=np.random.random((10,)) print(x) print(x>0.5) print(x[x>0.5]) --- [ 0.69646919 0.28613933 0.22685145 0.55131477 0.71946897 0.42310646 0.9807642 0.68482974 0.4809319 0.39211752] [ True False False True True False True True False False] [ 0.69646919 0.55131477 0.71946897 0.9807642 0.68482974]
- get diagonal elements
-
# Create a random matrix x = np.random.random((5,5)) print(x) # Get diagonal elements print(np.diag(x))
- save a single array
-
x = np.random.random((5,)) np.save('temp.npy', x) y = np.load('temp.npy') print(y)
- save dict of arrays
-
x1 = x = np.random.random((2,)) y1 = x = np.random.random((2,)) np.savez('temp.npy', x = x1, y = y1) data.np.load('temp.npy') print(data['x']) print(data['y'])
- transpose
-
x=np.random.random((2,3)) print(x) print(x.T) # simple transpose print(np.transpose(x, (1,0))) # syntax for multiple dimensions --- [[ 0.6919703 0.55438325 0.38895057] [ 0.92513249 0.84167 0.35739757]] [[ 0.6919703 0.92513249] [ 0.55438325 0.84167 ] [ 0.38895057 0.35739757]] [[ 0.6919703 0.92513249] [ 0.55438325 0.84167 ] [ 0.38895057 0.35739757]]
- Add/remove a dim
-
# Special functions for adding and removing dims x=np.random.random((2,3,1)) print(np.expand_dims(x, 1).shape) # add a new dimension print(np.squeeze(x,2).shape) # remove a dimension (must be size of 1) --- (2, 1, 3, 1) (2, 3)
- Pytorch operation
- numpy operations
import torch
import numpy as np
from torch.autograd import Variable
x = torch.FloatTensor(2,3)
print(x)
x.zero_()
print(x)
np.random.seed(123)
np_array = np.random.random((2,3))
print(torch.FloatTensor(np_array))
print(torch.from_numpy(np_array))
torch.manual_seed(123)
print(torch.randn(2,3))
print(torch.eye(3))
print(torch.ones(2,3))
print(torch.zeros(2,3))
print(torch.arange(0,3))
x = torch.FloatTensor(3,4)
print(x.size())
print(x.type())
x = torch.rand(3,2)
print(x)
y = x.cuda()
print(y)
z = y.cpu()
print(z)
print(z.numpy())
x = torch.rand(3,5).cuda()
y = torch.rand(5,4).cuda()
print(torch.mm(x,y))
print(x.new(1,2).zero_())
from timeit import timeit
x = torch.rand(1000,64)
y = torch.rand(64,32)
number = 10000
def square():
z = torch.mm(x,y)
print('CPU: {}ms'.format(timeit(square,number = number)1000))
x,y = x.cuda(),y.cuda()
print('GPU: {}ms'.format(timeit(square,number = number)1000))
x = torch.arange(0,5)
print(torch.sum(x))
print(torch.sum(torch.exp(x)))
print(torch.mean(x))
x = torch.rand(3,2)
print(x)
print(x[1,:])
x = Variable(torch.arange(0,4),requires_grad = True)
y = torch.sum(x**2)
y.backward()
print(x)
print(y)
print(x.grad)
x = torch.rand(3,5)
y = torch.rand(5,4)
xv = Variable(x)
yv = Variable(y)
print(torch.mm(x,y))
print(torch.mm(xv,yv))
x = Variable(torch.arange(0,4),requires_grad = True)
torch.sum(x ** 2).backward()
print(x.grad)
torch.sum(x ** 2).backward()
print(x.grad)
x.grad.data.zero_()
torch.sum(x ** 2).backward()
print(x.grad)
net = torch.nn.Sequential(
torch.nn.Linear(28*28,256),
torch.nn.Sigmoid(),
torch.nn.Linear(256,10)
)
print(net.state_dict().keys())
print(net.state_dict())
torch.save(net.state_dict(),'test.t7')
net.load_state_dict(torch.load('test.t7'))
class MyNetwork(torch.nn.Module):
def init(self):
super().init()
self.layer1 = torch.nn.Linear(28*28,256),
self.layer2 = torch.nn.Sigmoid(),
self.layer3 = torch.nn.Linear(256,10)
def forward(self,input_val):
h = input_val
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
return h
net = MyNetwork()