torch
torch.autograd
模块用于自动计算梯度。示例代码:
import torch # 创建张量 x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True) y = torch.tensor([4.0, 5.0, 6.0]) # 张量加法 z = x + y print(f'z: {z}') # 计算梯度 z.sum().backward() # 求和的原因是求梯度需要是一个标量 print(f'Gradients of x: {x.grad}')
torch.nn
nn.Module
是所有神经网络模块的基类。示例代码:
import torch.nn as nn # 定义一个简单的前馈神经网络 class SimpleNet(nn.Module): def __init__(self): super(SimpleNet, self).__init__() self.fc1 = nn.Linear(10, 5) self.fc2 = nn.Linear(5, 1) def forward(self, x): x = torch.relu(self.fc1(x)) x = self.fc2(x) return x model = SimpleNet() print(model)
torch.optim
StepLR
、ExponentialLR
。示例代码:
import torch.optim as optim # 定义模型 model = SimpleNet() # 定义优化器 optimizer = optim.Adam(model.parameters(), lr=0.001) # 更新模型参数 optimizer.zero_grad() output = model(torch.randn(1, 10)) loss = torch.mean(output) loss.backward() optimizer.step() # 学习率调度器 scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) scheduler.step()
torch.utils.data
Dataset
类用于自定义数据集。DataLoader
用于批量加载数据,支持多线程加载。torchvision.transforms
可以对数据进行预处理和增强。示例代码:
from torch.utils.data import Dataset, DataLoader # 自定义数据集 class MyDataset(Dataset): def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] dataset = MyDataset([1, 2, 3, 4]) dataloader = DataLoader(dataset, batch_size=2, shuffle=True) for batch in dataloader: print(batch)
torchvision
示例代码:
import torchvision.transforms as transforms import torchvision.datasets as datasets # 定义数据预处理 transform = transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ]) # 下载 MNIST 数据集 dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform) dataloader = DataLoader(dataset, batch_size=64, shuffle=True) for images, labels in dataloader: print(images.shape, labels.shape) break
torch.jit
torch.jit.script
用于将 Python 代码转换为 TorchScript 代码。torch.jit.trace
用于通过追踪模型的执行流程创建 TorchScript 模型。示例代码:
import torch.jit # 定义简单模型 class SimpleNet(nn.Module): def forward(self, x): return x * 2 model = SimpleNet() # 脚本化模型 scripted_model = torch.jit.script(model) print(scripted_model) # 追踪模型 traced_model = torch.jit.trace(model, torch.randn(1, 10)) print(traced_model)
torch.cuda
示例代码:
if torch.cuda.is_available(): device = torch.device("cuda") x = torch.tensor([1.0, 2.0, 3.0]).to(device) print(f'GPU tensor: {x}') else: print("CUDA is not available.")
torch.autograd
示例代码:
x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True) y = x + 2 z = y * y * 3 out = z.mean() # 反向传播计算梯度 out.backward() print(x.grad) # 输出 x 的梯度
torch.multiprocessing
示例代码:
import torch.multiprocessing as mp def worker(rank, data): print(f'Worker {rank} processing data: {data}') if __name__ == '__main__': data = [1, 2, 3, 4] mp.spawn(worker, args=(data,), nprocs=4)
torch.distributed
示例代码:
import torch import torch.distributed as dist def init_process(rank, size, fn, backend='gloo'): dist.init_process_group(backend, rank=rank, world_size=size) fn(rank, size) def example(rank, size): tensor = torch.zeros(1) if rank == 0: tensor += 1 dist.send(tensor, dst=1) else: dist.recv(tensor, src=0) print(f'Rank {rank} has data {tensor[0]}') if __name__ == "__main__": size = 2 processes = [] for rank in range(size): p = mp.Process(target=init_process, args=(rank, size, example)) p.start() processes.append(p) for p in processes: p.join()
通过这些模块,PyTorch 提供了构建、训练、优化和部署深度学习模型所需的全面支持。
上一篇:电脑配置10代是什么意思
下一篇:关键词表_更新热词表