Using AWS CDK to Deploy a Static Site like Hugo

First we will setup our development environment using Docker and add a new aws service to docker-compose.yml.

# aws/Dockerfile

FROM node:18-slim

# AWS CDK will use the folder name for the modules
WORKDIR /site

ARG AWS_CDK_VERSION=2.53.0

RUN apt-get update -y &&\
    apt-get install -y --no-install-recommends \
      ca-certificates \
      curl \
      groff \
      less \
      unzip

RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o awscli.zip &&\
    unzip awscli.zip &&\
    ./aws/install

RUN npm install -g \
    aws-cdk@${AWS_CDK_VERSION} \
    esbuild

# NOTE: Uncomment these lines after you have ran `cdk init app`
# COPY src/package.json src/package-lock.json ./
# RUN npm install
# docker-compose.yml

version: '3.7'
services:

  aws:
    restart: unless-stopped
    env_file:
      - .env
      - aws/.env
    build:
      context: ./aws
      dockerfile: Dockerfile
    volumes:
      - type: bind
        source: ./aws/src
        target: /site
      # NOTE: Uncomment these lines after you have ran `cdk init app`
      # - /site/node_modules/

# ...

You will need to put the AWS credentials to manage your AWS account in the aws/.env file.

# aws/.env

AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=

Now we can build the service ./build.sh aws and initialize our CDK application.

mkdir aws/src
./run.sh aws bash
cdk init app --language typescript

Now that we have a package.json file we need uncomment the lines in aws/Dockerfile and docker-compose.yml then run ./build.sh aws to apply the changes. Now, whenever we run ./up.sh or ./build.sh, Docker will install the nom modules automatically for us.

// aws/src/lib/site-stack.ts
import { Stack, StackProps, CfnOutput } from 'aws-cdk-lib';
import { Construct } from 'constructs';
import { Bucket, BucketAccessControl } from 'aws-cdk-lib/aws-s3';
import { Distribution, AllowedMethods, ViewerProtocolPolicy, OriginProtocolPolicy } from 'aws-cdk-lib/aws-cloudfront';
import { User, Policy, AccessKey, AnyPrincipal, PolicyStatement, Effect } from 'aws-cdk-lib/aws-iam';
import { HttpOrigin } from 'aws-cdk-lib/aws-cloudfront-origins';
import { Certificate, CertificateValidation } from 'aws-cdk-lib/aws-certificatemanager';

interface SiteStackProps extends StackProps {
  domainNames?: string[],
}

export class SiteStack extends Stack {
  /**
   * Create the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` outputs for the
   * deployment user. This user will have minimal permissions for managing the
   * S3 bucket and pusging the CloudFront cache.
   */
  createHugoUser(): User {
    const user = new User(this, 'user');
    const credentials = new AccessKey(this, 'credentials', { user });

    new CfnOutput(this, 'deployKey', {
      value: credentials.accessKeyId,
      description: 'AWS_ACCESS_KEY_ID',
    });

    new CfnOutput(this, 'deploySecret', {
      value: credentials.secretAccessKey.unsafeUnwrap(),
      description: 'AWS_SECRET_ACCESS_KEY',
    });

    return user;
  }

  /**
   * Creates the S3 bucket and defines the minimal permissions
   * to the provided `user` to allow deployment and readonly
   * permissions to the world.
   * 
   * @param {DestinationProps} props
   */
  createDestination(user: User): Bucket {
    const bucket = new Bucket(this, 'bucket', {
      accessControl: BucketAccessControl.PRIVATE,
      publicReadAccess: true,
      versioned: true,
      websiteIndexDocument: 'index.html',
    });

    bucket.addToResourcePolicy(new PolicyStatement({
      actions: [
        's3:GetObject'
      ],
      effect: Effect.ALLOW,
      principals: [
        new AnyPrincipal(),
      ],
      resources: [
        bucket.arnForObjects('*'),
      ],
    }));

    new Policy(this, 'publishPolicy', {
      statements: [
        new PolicyStatement({
          actions: [
            'cloudFront:CreateInvalidation',
          ],
          effect: Effect.ALLOW,
          resources: ['*'],
        }),
        new PolicyStatement({
          actions: [
            's3:GetObject',
            's3:ListBucket',
            's3:PutObject',
            's3:DeleteObject',
            's3:PutObjectACL',
          ],
          effect: Effect.ALLOW,
          resources: [
            bucket.bucketArn,
            bucket.arnForObjects('*'),
          ],
        })
      ],
      users: [user],
    });

    new CfnOutput(this, 'bucketName', {
      description: 'AWS_BUCKET',
      value: bucket.bucketName,
    });

    return bucket;
  }

  /**
   * Creates the CloudFront distribution and ACM certificates for
   * the `domainNames`.
   * 
   * @param {ServiceProps} props
   */
   createService({ domainNames }: SiteStackProps, destination: Bucket) {
    const [domainName] = domainNames || [];

    const certificate = domainName ?
      new Certificate(this, 'certificate', {
        domainName,
        validation: CertificateValidation.fromDns(),
        subjectAlternativeNames: domainNames,
      }) :
      undefined;

    const distribution = new Distribution(this, 'distribution', {
      certificate,
      defaultBehavior: {
        allowedMethods: AllowedMethods.ALLOW_GET_HEAD_OPTIONS,
        compress: true,
        origin: new HttpOrigin(destination.bucketWebsiteDomainName, {
          protocolPolicy: OriginProtocolPolicy.HTTP_ONLY,
        }),

        // NOTE: If using Cloudflare as a CDN you will need to use `ViewerProtocolPolicy.ALLOW_ALL`
        //       Otherwise you will get an infiinite redirect. Or, you change the Cloudflare's 
        //       "SSL/TLS encryption mode" to "Full (strict)".
        viewerProtocolPolicy: ViewerProtocolPolicy.ALLOW_ALL,
        // viewerProtocolPolicy: ViewerProtocolPolicy.REDIRECT_TO_HTTPS,
      },
      defaultRootObject: 'index.html',
      domainNames,
      errorResponses: [
        // Wire up Hugo's 404 page
        { httpStatus: 404, responsePagePath: `/404.html` },
      ],
    });

    new CfnOutput(this, 'distribution', {
      description: 'distributionID',
      value: distribution.distributionId,
    });

    new CfnOutput(this, 'hostname', {
      description: 'hostname',
      value: distribution.distributionDomainName,
    });

    new CfnOutput(this, 'website', {
      description: 'website',
      value: `https://${distribution.distributionDomainName}`,
    });

    return distribution;
  }

  constructor(scope: Construct, id: string, props: SiteStackProps) {
    super(scope, id, props);

    const user = this.createHugoUser();
    const destination = this.createDestination(user);

    this.createService(props, destination);
  }
}
// aws/src/bin/site.ts
#!/usr/bin/env node
import 'source-map-support/register';
import * as cdk from 'aws-cdk-lib';
import { SiteStack } from '../lib/site-stack';

const app = new cdk.App();
const stackName = app.node.tryGetContext('name');
const domainNames = app.node.tryGetContext('domains')?.split(',').map((domain: string) => domain.trim()) || [];

new SiteStack(app, stackName, { domainNames });

Now we can deploy our stack by running ./run.sh aws bash to open the terminal for our aws docker service and using:

cdk deploy -c name=<name> -c domains=<comma separated list of domains>

I like to create a .sh file as a shortcut for this command so I dont have to remember the name or domains.

On the first cdk deploy run you will be prompted to bootstrap the cdk via cdk bootstrap which sets up the resources for the CDK to oprerate.

If you added domain names, you will need to log into the AWS ACM console and find the certificate that has the “Pending validation” status. Clicking that record will open the list of domains with “Pending validation”. You will need to add each record to your DNS so AWS can validate you are the owner.

Once the validation is complete, the CDK process will continue. The CloudFront distribution can take around 5 minutes to complete. Once complete we will get a list of “Outputs”

  • SiteStack.bucketName
  • SiteStack.deployKey
  • SiteStack.deploySecret
  • SiteStack.distribution
  • SiteStack.hostname
  • SiteStack.website

We will first setup the DNS for our site using the SiteStack.hostname output. We will need to add a CNAME for each of the domain names listed in aws/src/bin/site.ts with the SiteStack.hostname as the value.

Next we will add the hugo deployment user credentials to hugo/.env. These credentials are different from the credentials used in aws/.env. While the credentials in aws/.env will work to deploy the Hugo site, those credentials have elevated permissions with access to our entire AWS account. The credentials created by the CDK have the minimal permissions required to upload our site and clear the CDN cache.

# hugo/.env

AWS_ACCESS_KEY_ID=# SiteStack.deployKey
AWS_SECRET_ACCESS_KEY=# SiteStack.deploySecret

These are also the credentials you will want to add to your CI environment secrets.

The last bit is to configure Hugo deployment in our hugo/src/config.yaml. We need to replace the <SiteStack.bucketName>, <SiteStack.bucketName> and <SiteStack.id> tokens with the actual values from the CDK outputs.

# hugo/src/config.yaml
# ...

deployment:
  # Upload `css` and `js` files first to prevent errors when a page is accessed while a deployment is in progress
  order: ['.css$', '.js$']
  targets:
    - name: production
      URL: 's3://<SiteStack.bucketName>?region=us-east-1'
      cloudFrontDistributionID: <SiteStack.distribution>

If you have a baseURL with a subfolder, you will need to add a prefix query parameter to the URL, for instance if the baseURL is https://example.com/blog/ the URL should be s3://<SiteStack.bucketName>?region=<SiteStack.bucketName>&prefix=/blog.

At this point we can go uncomment the lines in aws/Dockerfile and docker-compose.yml so we dont have to npm install everytime we need to manage the CDK stack. Once uncommented you can run ./build.sh aws to apply the changes to your current container.

Now we can manually deploy the site by running ./run.sh hugo npm run release. Be sure that you are not still in the aws container terminal.

Opening any of the custom domains or SiteStack.website url from the output in a brower should show our Hugo page.

If you are using Cloudflare and you are seeing an “Failed to find a valid digest in the ‘integrity’ attribute” error in your developer tools cnosole while visiting your site, its likely because you have the “Auto Minify” option selected in Cloudflare’s “Speed -> Optimization” settings.

# .github/workflows/release.yml
name: Release

on:
  push:
    branches: [ main ]

jobs:
  deploy:
    env:
      HUGO_VERSION: 0.107.0
      AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
      AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
      # NOTE: Add any other `HUGO_` variables here and be sure to add them to Github actions secrets

    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v3

      - uses: actions/setup-node@v3
        with:
          node-version: 18

      - name: Install Hugo
        shell: bash
        run: |
          curl -L https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_${HUGO_VERSION}_Linux-64bit.tar.gz \
               -o hugo.tar.gz &&\
          tar -xvzf hugo.tar.gz -C $HOME &&\
          echo $HOME >> $GITHUB_PATH          

      - uses: actions/cache@v3
        env:
          cache-name: cache-node-modules
        with:
          path: ~/.npm
          key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/package*.json') }}
          restore-keys: |
            ${{ runner.os }}-build-${{ env.cache-name }}-
            ${{ runner.os }}-build-
            ${{ runner.os }}-            

      - name: Install Dependencies
        shell: bash
        working-directory: hugo/src
        run: npm ci
      
      - name: Deploy
        shell: bash
        working-directory: hugo/src
        run: npm run release