import torch
import torch.nn as nn
# Your code here
class MyCNN(nn.Module):
def __init__(self):
super().__init__()
# TODO: Define layers
pass
def forward(self, x):
# TODO: Implement forward pass
pass
model = MyCNN()13 Practice Exercises
These hands-on exercises cover chapters 1-12. Each includes a problem statement, starter code, and a collapsible solution.
13.1 Exercise 1: Build a Custom CNN
Task: Build a CNN for CIFAR-10 with 3 convolutional blocks.
Requirements: 1. Use 32, 64, 128 filters in conv blocks 2. Add batch normalization after each conv 3. Use dropout (0.5) before final layer 4. Achieve >75% test accuracy
class MyCNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(128 * 4 * 4, 256)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
x = self.pool(torch.relu(self.bn1(self.conv1(x))))
x = self.pool(torch.relu(self.bn2(self.conv2(x))))
x = self.pool(torch.relu(self.bn3(self.conv3(x))))
x = x.view(-1, 128 * 4 * 4)
x = torch.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return xfrom tensorflow import keras
# Your code here
# TODO: Build model with Sequential API
# model = keras.Sequential([...])model = keras.Sequential([
keras.layers.Conv2D(32, 3, padding='same', input_shape=(32, 32, 3)),
keras.layers.BatchNormalization(),
keras.layers.Activation('relu'),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Conv2D(64, 3, padding='same'),
keras.layers.BatchNormalization(),
keras.layers.Activation('relu'),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Conv2D(128, 3, padding='same'),
keras.layers.BatchNormalization(),
keras.layers.Activation('relu'),
keras.layers.MaxPooling2D(2, 2),
keras.layers.Flatten(),
keras.layers.Dense(256, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation='softmax')
])13.2 Exercise 2: Transfer Learning
Task: Use a pre-trained model for binary classification (cats vs dogs).
Requirements: 1. Load ResNet18 (PyTorch) or ResNet50 (TensorFlow) 2. Freeze all layers except the last 3. Add a custom classifier for 2 classes 4. Use appropriate preprocessing
from torchvision import models
import torch.nn as nn
# Your code here
# Hint: models.resnet18(pretrained=True)model = models.resnet18(pretrained=True)
# Freeze all layers
for param in model.parameters():
param.requires_grad = False
# Replace classifier
model.fc = nn.Linear(model.fc.in_features, 2)
# Only train classifier
optimizer = torch.optim.Adam(model.fc.parameters(), lr=0.001)from tensorflow.keras.applications import ResNet50
# Your code here
# Hint: ResNet50(weights='imagenet', include_top=False)base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
base_model.trainable = False
model = keras.Sequential([
base_model,
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(2, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])13.3 Exercise 3: Sentiment Analysis LSTM
Task: Build an LSTM for binary sentiment classification.
Requirements: 1. Use embedding layer (vocab=5000, dim=128) 2. Add bidirectional LSTM (128 units) 3. Add dropout (0.3) 4. Binary output
# Your code here
class SentimentLSTM(nn.Module):
def __init__(self):
super().__init__()
# TODO: Define layers
passclass SentimentLSTM(nn.Module):
def __init__(self, vocab_size=5000, embedding_dim=128, hidden_dim=128):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True, bidirectional=True)
self.dropout = nn.Dropout(0.3)
self.fc = nn.Linear(hidden_dim * 2, 2)
def forward(self, x):
embedded = self.embedding(x)
lstm_out, (hidden, _) = self.lstm(embedded)
hidden = torch.cat((hidden[-2], hidden[-1]), dim=1)
out = self.dropout(hidden)
return self.fc(out)# Your code here
# TODO: Build LSTM modelmodel = keras.Sequential([
keras.layers.Embedding(5000, 128, input_length=100),
keras.layers.Bidirectional(keras.layers.LSTM(128)),
keras.layers.Dropout(0.3),
keras.layers.Dense(2, activation='softmax')
])13.4 Exercise 4: Learning Rate Schedule
Task: Implement cosine annealing learning rate schedule.
import torch.optim.lr_scheduler as lr_scheduler
# Your code here
# Hint: Use CosineAnnealingLRoptimizer = torch.optim.Adam(model.parameters(), lr=0.001)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=50, eta_min=1e-6)
# In training loop:
# for epoch in range(50):
# train(...)
# scheduler.step()# Your code here
# Hint: Use keras.optimizers.schedules.CosineDecaylr_schedule = keras.optimizers.schedules.CosineDecay(
initial_learning_rate=0.001,
decay_steps=1000,
alpha=1e-6
)
optimizer = keras.optimizers.Adam(lr_schedule)13.5 Exercise 5: Data Augmentation Pipeline
Task: Create an aggressive augmentation pipeline for CIFAR-10.
from torchvision import transforms
# Your code here
# Include: RandomCrop, HorizontalFlip, ColorJitter, RandomRotationtrain_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(15),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])# Your code here
# Use keras.Sequential with augmentation layersdata_augmentation = keras.Sequential([
keras.layers.RandomFlip("horizontal"),
keras.layers.RandomRotation(0.1),
keras.layers.RandomZoom(0.1),
keras.layers.RandomContrast(0.2),
])13.6 Summary
Practice these exercises to solidify your deep learning skills! Each exercise builds on concepts from previous chapters.
Next: Chapter 14 covers advanced topics like Transformers, GANs, and Autoencoders!