12  Real-World Deep Learning Projects

This chapter brings everything together with three complete projects: image classification API, sentiment analysis, and time series forecasting.

12.1 Project 1: Image Classifier API

Build a deployable image classifier with Flask/FastAPI.

12.1.1 Step 1: Train and Save Model

import torch
import torch.nn as nn
from torchvision import models

# Use pre-trained ResNet
model = models.resnet18(pretrained=True)
model.fc = nn.Linear(model.fc.in_features, 10)  # 10 classes

# Train model (abbreviated)
# train(model, ...)

# Save for deployment
torch.save(model.state_dict(), 'classifier.pth')
print("✅ Model saved: classifier.pth")
from tensorflow import keras

# Use pre-trained ResNet
base_model = keras.applications.ResNet50(weights='imagenet', include_top=False)
model = keras.Sequential([
    base_model,
    keras.layers.GlobalAveragePooling2D(),
    keras.layers.Dense(10, activation='softmax')
])

# Train model (abbreviated)
# model.fit(...)

# Save for deployment
model.save('classifier.h5')
print("✅ Model saved: classifier.h5")

12.1.2 Step 2: Create API

# app.py - Flask API (framework-agnostic)
from flask import Flask, request, jsonify
from PIL import Image
import numpy as np

app = Flask(__name__)

# Load model (PyTorch or TensorFlow)
# model = load_model()

@app.route('/predict', methods=['POST'])
def predict():
    if 'file' not in request.files:
        return jsonify({'error': 'No file uploaded'}), 400

    file = request.files['file']
    image = Image.open(file.stream)

    # Preprocess
    image = preprocess(image)

    # Predict
    prediction = model.predict(image)
    class_idx = prediction.argmax()
    confidence = prediction[class_idx]

    return jsonify({
        'class': int(class_idx),
        'confidence': float(confidence)
    })

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)

12.1.3 Step 3: Test API

# Terminal
curl -X POST -F "file=@cat.jpg" http://localhost:5000/predict
# Response: {"class": 3, "confidence": 0.95}

12.2 Project 2: Sentiment Analysis Service

12.2.1 Complete Pipeline

import torch
import torch.nn as nn

class SentimentModel(nn.Module):
    def __init__(self, vocab_size):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, 128)
        self.lstm = nn.LSTM(128, 256, batch_first=True)
        self.fc = nn.Linear(256, 2)

    def forward(self, x):
        emb = self.embedding(x)
        _, (hidden, _) = self.lstm(emb)
        return self.fc(hidden[-1])

# Inference function
def predict_sentiment(text):
    tokens = tokenize(text)  # Your tokenizer
    indices = torch.tensor([tokens])

    model.eval()
    with torch.no_grad():
        output = model(indices)
        prob = torch.softmax(output, dim=1)
        sentiment = "Positive" if prob[0][1] > 0.5 else "Negative"
        confidence = prob.max().item()

    return sentiment, confidence

# Example
text = "This movie was amazing!"
sentiment, conf = predict_sentiment(text)
print(f"{sentiment} (confidence: {conf:.2f})")
from tensorflow import keras
import numpy as np

# Load trained model
model = keras.models.load_model('sentiment_model.h5')

# Inference function
def predict_sentiment(text, tokenizer, maxlen=200):
    sequence = tokenizer.texts_to_sequences([text])
    padded = keras.preprocessing.sequence.pad_sequences(sequence, maxlen=maxlen)

    prediction = model.predict(padded, verbose=0)
    sentiment = "Positive" if prediction[0][1] > 0.5 else "Negative"
    confidence = prediction[0].max()

    return sentiment, confidence

# Example
text = "This movie was amazing!"
sentiment, conf = predict_sentiment(text, tokenizer)
print(f"{sentiment} (confidence: {conf:.2f})")

12.3 Project 3: Time Series Forecasting

Predict future values from historical data.

import torch
import torch.nn as nn
import numpy as np

class TimeSeriesLSTM(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers=2):
        super().__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, 1)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        prediction = self.fc(lstm_out[:, -1, :])
        return prediction

# Create sequences
def create_sequences(data, seq_length):
    xs, ys = [], []
    for i in range(len(data) - seq_length):
        x = data[i:i+seq_length]
        y = data[i+seq_length]
        xs.append(x)
        ys.append(y)
    return torch.tensor(xs, dtype=torch.float32), torch.tensor(ys, dtype=torch.float32)

# Example usage
data = np.sin(np.linspace(0, 100, 1000))  # Synthetic data
X, y = create_sequences(data, seq_length=10)

model = TimeSeriesLSTM(input_size=1, hidden_size=64)
print(f"Input shape: {X.shape}")  # [990, 10, 1]
print(f"Output shape: {y.shape}")  # [990]
from tensorflow import keras
import numpy as np

# Create sequences
def create_sequences(data, seq_length):
    xs, ys = [], []
    for i in range(len(data) - seq_length):
        x = data[i:i+seq_length]
        y = data[i+seq_length]
        xs.append(x)
        ys.append(y)
    return np.array(xs), np.array(ys)

# Build model
model = keras.Sequential([
    keras.layers.LSTM(64, input_shape=(None, 1)),
    keras.layers.Dense(1)
])

model.compile(optimizer='adam', loss='mse')

# Example usage
data = np.sin(np.linspace(0, 100, 1000)).reshape(-1, 1)
X, y = create_sequences(data, seq_length=10)
X = X.reshape(-1, 10, 1)

print(f"Input shape: {X.shape}")
print(f"Output shape: {y.shape}")

12.4 Model Deployment Checklist

Before deployment: 1. Save model weights and architecture 2. Save preprocessing parameters (mean, std, vocab) 3. Test inference speed 4. Create requirements.txt 5. Write API documentation

Deployment options: - Simple: Flask/FastAPI on cloud VM - Scalable: Docker + Kubernetes - Serverless: AWS Lambda, Google Cloud Functions - Edge: TensorFlow Lite, ONNX for mobile/embedded

12.5 Optimization for Production

12.5.1 Model Quantization

Reduce model size and speed up inference:

# Quantize model (int8 instead of float32)
# quantized_model = torch.quantization.quantize_dynamic(
#     model, {nn.Linear, nn.LSTM}, dtype=torch.qint8
# )

print("Quantization reduces model size by 4x")
print("Speeds up CPU inference by 2-3x")
import tensorflow as tf

# Convert to TensorFlow Lite
# converter = tf.lite.TFLiteConverter.from_keras_model(model)
# converter.optimizations = [tf.lite.Optimize.DEFAULT]
# tflite_model = converter.convert()

print("TensorFlow Lite for mobile deployment")
print("Reduces size by 4x, faster inference")

12.6 Summary

  • Project 1: Image classifier with REST API
  • Project 2: Sentiment analysis service
  • Project 3: Time series forecasting
  • Deployment: Flask, Docker, cloud platforms
  • Optimization: Quantization for production

12.7 What’s Next?

Chapter 13: Practice exercises to solidify your skills!