New Upstream Release - golang-github-digitalocean-godo

Ready changes

Summary

Merged new upstream version: 1.99.0 (was: 1.37.0).

Diff

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index c557202..fb84b4d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -2,27 +2,33 @@ name: CI
 
 on:
   push:
-    branches: [ master ]
+    branches: [ main ]
   pull_request:
-    branches: [ master ]
+    branches: [ main ]
 
 jobs:
-  go-pipeline:
-    name: test
-    runs-on: ubuntu-latest
-
+  test:
+    strategy:
+      matrix:
+        go-version: [ 1.19.x, 1.20.x ]
+        os: [ ubuntu-latest, macos-latest, windows-latest ]
+    runs-on: ${{ matrix.os }}
     steps:
-    - name: checkout
+    - name: Install Go
+      uses: actions/setup-go@v2
+      with:
+        go-version: ${{ matrix.go-version }}
+    - name: Checkout code
       uses: actions/checkout@v2
-
-    - name: go1.11 test
-      uses: digitalocean/golang-pipeline/go1.11/test@master
-
-    - name: go1.12 test
-      uses: digitalocean/golang-pipeline/go1.12/test@master
-
-    - name: go1.13 test
-      uses: digitalocean/golang-pipeline/go1.13/test@master
-
-    - name: go1.14 test
-      uses: digitalocean/golang-pipeline/go1.14/test@master
+    - name: Restore cache
+      uses: actions/cache@v2
+      with:
+        path: ~/go/pkg/mod
+        key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+        restore-keys: |
+          ${{ runner.os }}-go-
+    - name: Format
+      run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi
+      if: matrix.os == 'ubuntu-latest'
+    - name: Test
+      run: go test -race ./...
diff --git a/.whitesource b/.whitesource
index e0aaa3e..6b6a735 100644
--- a/.whitesource
+++ b/.whitesource
@@ -5,4 +5,4 @@
   "issueSettings": {
     "minSeverityLevel": "LOW"
   }
-}
\ No newline at end of file
+}
diff --git a/1-click.go b/1-click.go
index fab04fe..2e07cf6 100644
--- a/1-click.go
+++ b/1-click.go
@@ -10,9 +10,10 @@ const oneClickBasePath = "v2/1-clicks"
 
 // OneClickService is an interface for interacting with 1-clicks with the
 // DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2/#1-click-applications
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/1-Click-Applications
 type OneClickService interface {
 	List(context.Context, string) ([]*OneClick, *Response, error)
+	InstallKubernetes(context.Context, *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error)
 }
 
 var _ OneClickService = &OneClickServiceOp{}
@@ -33,6 +34,17 @@ type OneClicksRoot struct {
 	List []*OneClick `json:"1_clicks"`
 }
 
+// InstallKubernetesAppsRequest represents a request required to install 1-click kubernetes apps
+type InstallKubernetesAppsRequest struct {
+	Slugs       []string `json:"addon_slugs"`
+	ClusterUUID string   `json:"cluster_uuid"`
+}
+
+// InstallKubernetesAppsResponse is the response of a kubernetes 1-click install request
+type InstallKubernetesAppsResponse struct {
+	Message string `json:"message"`
+}
+
 // List returns a list of the available 1-click applications.
 func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([]*OneClick, *Response, error) {
 	path := fmt.Sprintf(`%s?type=%s`, oneClickBasePath, oneClickType)
@@ -50,3 +62,20 @@ func (ocs *OneClickServiceOp) List(ctx context.Context, oneClickType string) ([]
 
 	return root.List, resp, nil
 }
+
+// InstallKubernetes installs an addon on a kubernetes cluster
+func (ocs *OneClickServiceOp) InstallKubernetes(ctx context.Context, install *InstallKubernetesAppsRequest) (*InstallKubernetesAppsResponse, *Response, error) {
+	path := fmt.Sprintf(oneClickBasePath + "/kubernetes")
+
+	req, err := ocs.client.NewRequest(ctx, http.MethodPost, path, install)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	responseMessage := new(InstallKubernetesAppsResponse)
+	resp, err := ocs.client.Do(ctx, req, responseMessage)
+	if err != nil {
+		return nil, resp, err
+	}
+	return responseMessage, resp, err
+}
diff --git a/1-click_test.go b/1-click_test.go
index 7b11f40..fca5d2a 100644
--- a/1-click_test.go
+++ b/1-click_test.go
@@ -20,6 +20,20 @@ var testOneClickJSON = `
       "type":"droplet"
     }
 `
+var testMessage = &InstallKubernetesAppsResponse{
+	Message: "test message",
+}
+
+var testMessageJSON = `
+{
+  "message" : "test message"
+}
+`
+
+var kubernetesPayload = &InstallKubernetesAppsRequest{
+	ClusterUUID: "123",
+	Slugs:       []string{"slug1", "slug2"},
+}
 
 func TestOneClick_List(t *testing.T) {
 	setup()
@@ -47,3 +61,20 @@ func TestOneClick_List(t *testing.T) {
 	require.NoError(t, err)
 	assert.Equal(t, want, got)
 }
+
+func TestOneClick_InstallKubernetes(t *testing.T) {
+	setup()
+	defer teardown()
+
+	svc := client.OneClick
+	path := "/v2/1-clicks/kubernetes"
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+		fmt.Fprint(w, testMessageJSON)
+	})
+
+	got, _, err := svc.InstallKubernetes(ctx, kubernetesPayload)
+	require.NoError(t, err)
+	assert.Equal(t, testMessage, got)
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c826205..9c1849b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,353 @@
 # Change Log
 
+## [v1.99.0] - 2023-04-24
+
+- #616 - @bentranter - Bump CI version for Go 1.20
+- #615 - @bentranter - Remove beta support for tokens API
+- #604 - @dvigueras - Add support for "Validate a Container Registry Name"
+- #613 - @ibilalkayy - updated the README file by showing up the build status icon
+
+## [v1.98.0] - 2023-03-09
+
+- #608 - @anitgandhi - client: don't process body upon 204 response
+- #607 - @gregmankes - add apps rewrites/redirects to app spec
+
+## [v1.97.0] - 2023-02-10
+
+- #601 - @jcodybaker - APPS-6813: update app platform - pending_deployment + timing
+- #602 - @jcodybaker - Use App Platform active deployment for GetLogs if not specified
+
+## [v1.96.0] - 2023-01-23
+
+- #599 - @markpaulson - Adding PromoteReplicaToPrimary to client interface.
+
+## [v1.95.0] - 2023-01-23
+
+- #595 - @dweinshenker - Add UpgradeMajorVersion to godo
+
+## [v1.94.0] - 2022-01-23
+
+- #596 - @DMW2151 - DBAAS-3906: Include updatePool for DB Clusters
+- #593 - @danaelhe - Add Uptime Checks and Alerts Support
+
+## [v1.93.0] - 2022-12-15
+
+- #591 - @andrewsomething - tokens: Add initial support for new API.
+
+## [v1.92.0] - 2022-12-14
+
+- #589 - @wez470 - load-balancers: Minor doc fixup
+- #585 - @StephenVarela - Add firewall support for load balancers
+- #587 - @StephenVarela - Support new http alerts for load balancers
+- #586 - @andrewsomething - godo.go: Sort service lists.
+- #583 - @ddebarros - Adds support for functions trigger API
+
+## [v1.91.1] - 2022-11-23
+
+- #582 - @StephenVarela - Load Balancers: Support new endpoints for http alerts
+
+## [v1.90.0] - 2022-11-16
+
+- #571 - @kraai - Add WaitForAvailable
+- #579 - @bentranter - Deprecate old pointer helpers, use generic one
+- #580 - @StephenVarela - LBAAS Fixup default http idle timeout behaviour
+- #578 - @StephenVarela - LBAAS-2430 Add support for HTTP idle timeout seconds
+- #577 - @ddebarros - Functions api support
+
+## [v1.89.0] - 2022-11-02
+
+- #575 - @ghostlandr - apps: add option to get projects data from Apps List endpoint
+
+## [v1.88.0] - 2022-10-31
+
+- #573 - @kamaln7 - apps: add ListBuildpacks, UpgradeBuildpack
+- #572 - @ghostlandr - Apps: add project id as a parameter to CreateApp and to the App struct
+- #570 - @kraai - Fix copy-and-paste error in comment
+- #568 - @StephenVarela - LBAAS-2321 Add project_id to load balancers structs
+
+## [v1.87.0] - 2022-10-12
+
+- #564 - @DWizGuy58 - Add public monitoring alert policies for dbaas
+- #565 - @dylanrhysscott - CON-5657 (Re-)expose public HA enablement flags in godo
+- #563 - @andrewsomething - Add option to configure a rate.Limiter for the client.
+
+## [v1.86.0] - 2022-09-23
+
+- #561 - @jonfriesen - apps: add docr image deploy on push
+
+## [v1.85.0] - 2022-09-21
+
+- #560 - @andrewsomething - Bump golang.org/x/net (fixes: #557).
+- #559 - @kamaln7 - apps: update component spec interfaces
+- #555 - @kamaln7 - apps: add accessor methods and spec helpers
+- #556 - @kamaln7 - update CI for go 1.18 & 1.19
+
+## [v1.84.1] - 2022-09-16
+
+- #554 - @andrewsomething - reserved IPs: project_id should have omitempty in create req.
+
+## [v1.84.0] - 2022-09-16
+
+- #552 - @andrewsomething - reserved IPs: Expose project_id and locked attributes.
+- #549 - @rpmoore - adding the replica id to the database replica model
+
+## [v1.83.0] - 2022-08-10
+
+- #546 - @DWizGuy58 - Add support for database options
+
+## [v1.82.0] - 2022-08-04
+
+- #544 - @andrewsomething - apps: Add URN() method.
+- #542 - @andrewsomething - databases: Support advanced config endpoints.
+- #543 - @nicktate - Ntate/detection models
+- #541 - @andrewsomething - droplets: Support listing Droplets filtered by name.
+- #540 - @bentranter - Update links to API documentation
+
+## [v1.81.0] - 2022-06-15
+
+- #532 - @senorprogrammer - Add support for Reserved IP addresses
+- #538 - @bentranter - util: update droplet create example
+- #537 - @rpmoore - Adding project_id to databases
+- #536 - @andrewsomething - account: Now may include info on current team.
+- #535 - @ElanHasson - APPS-5636 Update App Platform for functions and Starter Tier App Proposals.
+
+## [v1.80.0] - 2022-05-23
+
+- #533 - @ElanHasson - APPS-5636 - App Platform updates
+
+## [v1.79.0] - 2022-04-29
+
+- #530 - @anitgandhi - monitoring: alerts for Load Balancers TLS conns/s utilization
+- #529 - @ChiefMateStarbuck - Test against Go 1.18
+- #528 - @senorprogrammer - Remove DisablePublicNetworking option from the Create path
+- #527 - @senorprogrammer - Remove the WithFloatingIPAddress create option
+
+## [v1.78.0] - 2022-03-31
+
+- #522 - @jcodybaker - app platform: add support for features field
+
+## [v1.77.0] - 2022-03-16
+
+- #518 - @rcj4747 - apps: Update apps protos
+
+## [v1.76.0] - 2022-03-09
+
+- #516 - @CollinShoop - Add registry region support
+
+## [v1.75.0] - 2022-01-27
+
+- #508 - @ElanHasson - Synchronize public protos and add multiple specs
+
+## [v1.74.0] - 2022-01-20
+
+- #506 - @ZachEddy - Add new component type to apps-related structs
+
+## [v1.73.0] - 2021-12-03
+
+- #501 - @CollinShoop - Add support for Registry ListManifests and ListRepositoriesV2
+
+## [v1.72.0] - 2021-11-29
+
+- #500 - @ElanHasson - APPS-4420: Add PreservePathPrefix to AppRouteSpec
+
+## [v1.71.0] - 2021-11-09
+
+- #498 - @bojand - apps: update spec to include log destinations
+
+## [v1.70.0] - 2021-11-01
+
+- #491 - @andrewsomething - Add support for retrieving Droplet monitoring metrics.
+- #494 - @alexandear - Refactor tests: replace t.Errorf with assert/require
+- #495 - @alexandear - Fix typos and grammar issues in comments
+- #492 - @andrewsomething - Update golang.org/x/net
+- #486 - @abeltay - Fix typo on "DigitalOcean"
+
+## [v1.69.1] - 2021-10-06
+
+- #484 - @sunny-b - k8s/godo: remove ha field from update request
+
+## [v1.69.0] - 2021-10-04
+
+- #482 - @dikshant - godo/load-balancers: add DisableLetsEncryptDNSRecords field for LBaaS
+
+## [v1.68.0] - 2021-09-29
+
+- #480 - @sunny-b - kubernetes: add support for HA control plane
+
+## [v1.67.0] - 2021-09-22
+
+- #478 - @sunny-b - kubernetes: add supported_features field to the kubernetes/options response
+- #477 - @wez470 - Add size unit to LB API.
+
+## [v1.66.0] - 2021-09-21
+
+- #473 - @andrewsomething - Add Go 1.17.x to test matrix and drop unsupported versions.
+- #472 - @bsnyder788 - insights: add private (in/out)bound and public inbound bandwidth aler…
+- #470 - @gottwald - domains: remove invalid json struct tag option
+
+## [v1.65.0] - 2021-08-05
+
+- #468 - @notxarb - New alerts feature for App Platform
+- #467 - @andrewsomething - docs: Update links to API documentation.
+- #466 - @andrewsomething - Mark Response.Monitor as deprecated.
+
+## [v1.64.2] - 2021-07-23
+
+- #464 - @bsnyder788 - insights: update HTTP method for alert policy update
+
+## [v1.64.1] - 2021-07-19
+
+- #462 - @bsnyder788 - insights: fix alert policy update endpoint
+
+## [v1.64.0] - 2021-07-19
+
+- #460 - @bsnyder788 - insights: add CRUD APIs for alert policies
+
+## [v1.63.0] - 2021-07-06
+
+- #458 - @ZachEddy - apps: Add tail_lines query parameter to GetLogs function
+
+## [v1.62.0] - 2021-06-07
+
+- #454 - @house-lee - add with_droplet_agent option to create requests
+
+## [v1.61.0] - 2021-05-12
+
+- #452 - @caiofilipini - Add support for DOKS clusters as peers in Firewall rules
+- #448 - @andrewsomething - flip: Set omitempty for Region in FloatingIPCreateRequest.
+- #451 - @andrewsomething - CheckResponse: Add RequestID from header to ErrorResponse when missing from body.
+- #450 - @nanzhong - dbaas: handle ca certificates as base64 encoded
+- #449 - @nanzhong - dbaas: add support for getting cluster CA
+- #446 - @kamaln7 - app spec: update cors policy
+
+## [v1.60.0] - 2021-04-04
+
+- #443 - @andrewsomething - apps: Support pagination.
+- #442 - @andrewsomething - dbaas: Support restoring from a backup.
+- #441 - @andrewsomething - k8s: Add URN method to KubernetesCluster.
+
+## [v1.59.0] - 2021-03-29
+
+- #439 - @andrewsomething - vpcs: Support listing members of a VPC.
+- #438 - @andrewsomething - Add Go 1.16.x to the testing matrix.
+
+## [v1.58.0] - 2021-02-17
+
+- #436 - @MorrisLaw - kubernetes: add name field to associated resources
+- #434 - @andrewsomething - sizes: Add description field.
+- #433 - @andrewsomething - Deprecate Name field in godo.DropletCreateVolume
+
+## [v1.57.0] - 2021-01-15
+
+- #429 - @varshavaradarajan - kubernetes: support optional cascading deletes for clusters
+- #430 - @jonfriesen - apps: updates apps.gen.go for gitlab addition
+- #431 - @nicktate - apps: update proto to support dockerhub registry type
+
+## [v1.56.0] - 2021-01-08
+
+- #422 - @kamaln7 - apps: add ProposeApp method
+
+## [v1.55.0] - 2021-01-07
+
+- #425 - @adamwg - registry: Support the storage usage indicator
+- #423 - @ChiefMateStarbuck - Updated README example
+- #421 - @andrewsomething - Add some basic input cleaning to NewFromToken
+- #420 - @bentranter - Don't set "Content-Type" header on GET requests
+
+## [v1.54.0] - 2020-11-24
+
+- #417 - @waynr - registry: add support for garbage collection types
+
+## [v1.53.0] - 2020-11-20
+
+- #414 - @varshavaradarajan - kubernetes: add clusterlint support
+- #413 - @andrewsomething - images: Support updating distribution and description.
+
+## [v1.52.0] - 2020-11-05
+
+- #411 - @nicktate - apps: add unspecified type to image source registry types
+- #409 - @andrewsomething - registry: Add support for updating a subscription.
+- #408 - @nicktate - apps: update spec to include image source
+- #407 - @kamaln7 - apps: add the option to force build a new deployment
+
+## [v1.51.0] - 2020-11-02
+
+- #405 - @adamwg - registry: Support subscription options
+- #398 - @reeseconor - Add support for caching dependencies between GitHub Action runs
+- #404 - @andrewsomething - CONTRIBUTING.md: Suggest using github-changelog-generator.
+
+## [v1.50.0] - 2020-10-26
+
+- #400 - @waynr - registry: add garbage collection support
+- #402 - @snormore - apps: add catchall_document static site spec field and failed-deploy job type
+- #401 - @andrewlouis93 - VPC: adds option to set a VPC as the regional default
+
+## [v1.49.0] - 2020-10-21
+
+- #383 - @kamaln7 - apps: add ListRegions, Get/ListTiers, Get/ListInstanceSizes
+- #390 - @snormore - apps: add service spec internal_ports
+
+## [v1.48.0] - 2020-10-16
+
+- #388 - @varshavaradarajan - kubernetes - change docr integration api routes
+- #386 - @snormore - apps: pull in recent updates to jobs and domains
+
+## [v1.47.0] - 2020-10-14
+
+- #384 kubernetes - add registry related doks apis - @varshavaradarajan
+- #385 Fixed some typo in apps.gen.go and databases.go file - @devil-cyber
+- #382 Add GetKubeConfigWithExpiry (#334) - @ivanlemeshev
+- #381 Fix golint issues #377 - @sidsbrmnn
+- #380 refactor: Cyclomatic complexity issue - @DonRenando
+- #379 Run gofmt to fix some issues in codebase - @mycodeself
+
+## [v1.46.0] - 2020-10-05
+
+- #373 load balancers: add LB size field, currently in closed beta - @anitgandhi
+
+## [v1.45.0] - 2020-09-25
+
+**Note**: This release contains breaking changes to App Platform features currently in closed beta.
+
+- #369 update apps types to latest - @kamaln7
+- #368 Kubernetes: add taints field to node pool create and update requests - @timoreimann
+- #367 update apps types, address marshaling bug - @kamaln7
+
+## [v1.44.0] - 2020-09-08
+
+- #364 apps: support aggregate deployment logs - @kamaln7
+
+## [v1.43.0] - 2020-09-08
+
+- #362 update apps types - @kamaln7
+
+## [v1.42.1] - 2020-08-06
+
+- #360 domains: Allow for SRV records with port 0. - @andrewsomething
+
+## [v1.42.0] - 2020-07-22
+
+- #357 invoices: add category to InvoiceItem - @rbutler
+- #358 apps: add support for following logs - @nanzhong
+
+## [v1.41.0] - 2020-07-17
+
+- #355 kubernetes: Add support for surge upgrades - @varshavaradarajan
+
+## [v1.40.0] - 2020-07-16
+
+- #347 Make Rate limits thread safe - @roidelapluie
+- #353 Reuse TCP connection - @itsksaurabh
+
+## [v1.39.0] - 2020-07-14
+
+- #345, #346 Add app platform support [beta] - @nanzhong
+
+## [v1.38.0] - 2020-06-18
+
+- #341 Install 1-click applications on a Kubernetes cluster - @keladhruv
+- #340 Add RecordsByType, RecordsByName and RecordsByTypeAndName to the DomainsService - @viola
+
 ## [v1.37.0] - 2020-06-01
 
 - #336 registry: URL encode repository names when building URLs. @adamwg
@@ -238,16 +586,19 @@
 ## [v1.1.0] - 2017-06-06
 
 ### Added
+
 - #145 Add FirewallsService for managing Firewalls with the DigitalOcean API. - @viola
 - #139 Add TTL field to the Domains. - @xmudrii
 
 ### Fixed
+
 - #143 Fix oauth2.NoContext depreciation. - @jbowens
 - #141 Fix DropletActions on tagged resources. - @xmudrii
 
 ## [v1.0.0] - 2017-03-10
 
 ### Added
+
 - #130 Add Convert to ImageActionsService. - @xmudrii
 - #126 Add CertificatesService for managing certificates with the DigitalOcean API. - @viola
 - #125 Add LoadBalancersService for managing load balancers with the DigitalOcean API. - @viola
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 33f0313..23bbe20 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -25,11 +25,11 @@ go test -mod=vendor .
 
 Godo follows [semver](https://www.semver.org) versioning semantics.
 New functionality should be accompanied by increment to the minor
-version number. Any code merged to master is subject to release.
+version number. Any code merged to main is subject to release.
 
 ## Releasing
 
-Releasing a new version of godo is currently a manual process. 
+Releasing a new version of godo is currently a manual process.
 
 Submit a separate pull request for the version change from the pull
 request with your changes.
@@ -38,17 +38,32 @@ request with your changes.
    for the next (unreleased) version does not exist, create one.
    Include one bullet point for each piece of new functionality in the
    release, including the pull request ID, description, and author(s).
+   For example:
 
 ```
 ## [v1.8.0] - 2019-03-13
 
-- #210 Expose tags on storage volume create/list/get. - @jcodybaker
-- #123 Update test dependencies - @digitalocean
+- #210 - @jcodybaker - Expose tags on storage volume create/list/get.
+- #123 - @digitalocean - Update test dependencies
+```
+
+   To generate a list of changes since the previous release in the correct
+   format, you can use [github-changelog-generator](https://github.com/digitalocean/github-changelog-generator).
+   It can be installed from source by running:
+
+```
+go get -u github.com/digitalocean/github-changelog-generator
+```
+
+   Next, list the changes by running:
+
+```
+github-changelog-generator -org digitalocean -repo godo
 ```
 
 2. Update the `libraryVersion` number in `godo.go`.
 3. Make a pull request with these changes.  This PR should be separate from the PR containing the godo changes.
-4. Once the pull request has been merged, [draft a new release](https://github.com/digitalocean/godo/releases/new).  
-5. Update the `Tag version` and `Release title` field with the new godo version.  Be sure the version has a `v` prefixed in both places. Ex `v1.8.0`.  
+4. Once the pull request has been merged, [draft a new release](https://github.com/digitalocean/godo/releases/new).
+5. Update the `Tag version` and `Release title` field with the new godo version.  Be sure the version has a `v` prefixed in both places. Ex `v1.8.0`.
 6. Copy the changelog bullet points to the description field.
 7. Publish the release.
diff --git a/README.md b/README.md
index cadeb69..4c9ee2d 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,13 @@
 # Godo
 
-[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo)
+[![GitHub Actions CI](https://github.com/digitalocean/godo/actions/workflows/ci.yml/badge.svg)](https://github.com/digitalocean/godo/actions/workflows/ci.yml)
 [![GoDoc](https://godoc.org/github.com/digitalocean/godo?status.svg)](https://godoc.org/github.com/digitalocean/godo)
 
 Godo is a Go client library for accessing the DigitalOcean V2 API.
 
 You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo)
 
-You can view DigitalOcean API docs here: [https://developers.digitalocean.com/documentation/v2/](https://developers.digitalocean.com/documentation/v2/)
+You can view DigitalOcean API docs here: [https://docs.digitalocean.com/reference/api/api-reference/](https://docs.digitalocean.com/reference/api/api-reference/)
 
 ## Install
 ```sh
@@ -66,7 +66,7 @@ createRequest := &godo.DropletCreateRequest{
     Region: "nyc3",
     Size:   "s-1vcpu-1gb",
     Image: godo.DropletCreateImage{
-        Slug: "ubuntu-14-04-x64",
+        Slug: "ubuntu-20-04-x64",
     },
 }
 
@@ -98,9 +98,7 @@ func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, erro
         }
 
         // append the current page's droplets to our list
-        for _, d := range droplets {
-            list = append(list, d)
-        }
+        list = append(list, droplets...)
 
         // if we are at the last page, break out the for loop
         if resp.Links == nil || resp.Links.IsLastPage() {
@@ -120,6 +118,43 @@ func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, erro
 }
 ```
 
+Some endpoints offer token based pagination. For example, to fetch all Registry Repositories:
+
+```go
+func ListRepositoriesV2(ctx context.Context, client *godo.Client, registryName string) ([]*godo.RepositoryV2, error) {
+    // create a list to hold our registries
+    list := []*godo.RepositoryV2{}
+
+    // create options. initially, these will be blank
+    opt := &godo.TokenListOptions{}
+    for {
+        repositories, resp, err := client.Registry.ListRepositoriesV2(ctx, registryName, opt)
+        if err != nil {
+            return nil, err
+        }
+
+        // append the current page's registries to our list
+        list = append(list, repositories...)
+
+        // if we are at the last page, break out the for loop
+        if resp.Links == nil || resp.Links.IsLastPage() {
+            break
+        }
+
+        // grab the next page token
+        nextPageToken, err := resp.Links.NextPageToken()
+        if err != nil {
+            return nil, err
+        }
+
+        // provide the next page token for the next request
+        opt.Token = nextPageToken
+    }
+
+    return list, nil
+}
+```
+
 ## Versioning
 
 Each version of the client is tagged and the version is updated accordingly.
@@ -129,7 +164,7 @@ To see the list of past versions, run `git tag`.
 
 ## Documentation
 
-For a comprehensive list of examples, check out the [API documentation](https://developers.digitalocean.com/documentation/v2/).
+For a comprehensive list of examples, check out the [API documentation](https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys).
 
 For details on all the functionality in this library, see the [GoDoc](http://godoc.org/github.com/digitalocean/godo) documentation.
 
diff --git a/account.go b/account.go
index 7d3e105..48582c9 100644
--- a/account.go
+++ b/account.go
@@ -7,7 +7,7 @@ import (
 
 // AccountService is an interface for interfacing with the Account
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2/#account
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Account
 type AccountService interface {
 	Get(context.Context) (*Account, *Response, error)
 }
@@ -22,14 +22,22 @@ var _ AccountService = &AccountServiceOp{}
 
 // Account represents a DigitalOcean Account
 type Account struct {
-	DropletLimit    int    `json:"droplet_limit,omitempty"`
-	FloatingIPLimit int    `json:"floating_ip_limit,omitempty"`
-	VolumeLimit     int    `json:"volume_limit,omitempty"`
-	Email           string `json:"email,omitempty"`
-	UUID            string `json:"uuid,omitempty"`
-	EmailVerified   bool   `json:"email_verified,omitempty"`
-	Status          string `json:"status,omitempty"`
-	StatusMessage   string `json:"status_message,omitempty"`
+	DropletLimit    int       `json:"droplet_limit,omitempty"`
+	FloatingIPLimit int       `json:"floating_ip_limit,omitempty"`
+	ReservedIPLimit int       `json:"reserved_ip_limit,omitempty"`
+	VolumeLimit     int       `json:"volume_limit,omitempty"`
+	Email           string    `json:"email,omitempty"`
+	UUID            string    `json:"uuid,omitempty"`
+	EmailVerified   bool      `json:"email_verified,omitempty"`
+	Status          string    `json:"status,omitempty"`
+	StatusMessage   string    `json:"status_message,omitempty"`
+	Team            *TeamInfo `json:"team,omitempty"`
+}
+
+// TeamInfo contains information about the currently team context.
+type TeamInfo struct {
+	Name string `json:"name,omitempty"`
+	UUID string `json:"uuid,omitempty"`
 }
 
 type accountRoot struct {
diff --git a/account_test.go b/account_test.go
index 47ca389..934cddf 100644
--- a/account_test.go
+++ b/account_test.go
@@ -18,6 +18,7 @@ func TestAccountGet(t *testing.T) {
 		{ "account": {
 			"droplet_limit": 25,
 			"floating_ip_limit": 25,
+			"reserved_ip_limit": 25,
 			"volume_limit": 22,
 			"email": "sammy@digitalocean.com",
 			"uuid": "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
@@ -33,7 +34,7 @@ func TestAccountGet(t *testing.T) {
 		t.Errorf("Account.Get returned error: %v", err)
 	}
 
-	expected := &Account{DropletLimit: 25, FloatingIPLimit: 25, Email: "sammy@digitalocean.com",
+	expected := &Account{DropletLimit: 25, FloatingIPLimit: 25, ReservedIPLimit: 25, Email: "sammy@digitalocean.com",
 		UUID: "b6fr89dbf6d9156cace5f3c78dc9851d957381ef", EmailVerified: true, VolumeLimit: 22}
 	if !reflect.DeepEqual(acct, expected) {
 		t.Errorf("Account.Get returned %+v, expected %+v", acct, expected)
@@ -44,18 +45,91 @@ func TestAccountString(t *testing.T) {
 	acct := &Account{
 		DropletLimit:    25,
 		FloatingIPLimit: 25,
+		ReservedIPLimit: 25,
+		VolumeLimit:     22,
 		Email:           "sammy@digitalocean.com",
 		UUID:            "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
 		EmailVerified:   true,
 		Status:          "active",
 		StatusMessage:   "message",
+	}
+
+	stringified := acct.String()
+	expected := `godo.Account{DropletLimit:25, FloatingIPLimit:25, ReservedIPLimit:25, VolumeLimit:22, Email:"sammy@digitalocean.com", UUID:"b6fr89dbf6d9156cace5f3c78dc9851d957381ef", EmailVerified:true, Status:"active", StatusMessage:"message"}`
+	if expected != stringified {
+		t.Errorf("\n     got %+v\nexpected %+v", stringified, expected)
+	}
+
+}
+
+func TestAccountGetWithTeam(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/account", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		response := `
+		{ "account": {
+			"droplet_limit": 25,
+			"floating_ip_limit": 25,
+			"volume_limit": 22,
+			"email": "sammy@digitalocean.com",
+			"uuid": "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
+			"email_verified": true,
+			"team": {
+				"name": "My Team",
+				"uuid": "b6fr89dbf6d9156cace5f3c78dc9851d957381ef"
+			}
+			}
+		}`
+
+		fmt.Fprint(w, response)
+	})
+
+	acct, _, err := client.Account.Get(ctx)
+	if err != nil {
+		t.Errorf("Account.Get returned error: %v", err)
+	}
+
+	expected := &Account{
+		DropletLimit:    25,
+		FloatingIPLimit: 25,
+		Email:           "sammy@digitalocean.com",
+		UUID:            "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
+		EmailVerified:   true,
 		VolumeLimit:     22,
+		Team: &TeamInfo{
+			Name: "My Team",
+			UUID: "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
+		},
+	}
+	if !reflect.DeepEqual(acct, expected) {
+		t.Errorf("Account.Get returned %+v, expected %+v", acct, expected)
+	}
+}
+
+func TestAccountStringWithTeam(t *testing.T) {
+	acct := &Account{
+		DropletLimit:    25,
+		FloatingIPLimit: 25,
+		ReservedIPLimit: 25,
+		VolumeLimit:     22,
+		Email:           "sammy@digitalocean.com",
+		UUID:            "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
+		EmailVerified:   true,
+		Status:          "active",
+		StatusMessage:   "message",
+		Team: &TeamInfo{
+			Name: "My Team",
+			UUID: "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
+		},
 	}
 
 	stringified := acct.String()
-	expected := `godo.Account{DropletLimit:25, FloatingIPLimit:25, VolumeLimit:22, Email:"sammy@digitalocean.com", UUID:"b6fr89dbf6d9156cace5f3c78dc9851d957381ef", EmailVerified:true, Status:"active", StatusMessage:"message"}`
+	expected := `godo.Account{DropletLimit:25, FloatingIPLimit:25, ReservedIPLimit:25, VolumeLimit:22, Email:"sammy@digitalocean.com", UUID:"b6fr89dbf6d9156cace5f3c78dc9851d957381ef", EmailVerified:true, Status:"active", StatusMessage:"message", Team:godo.TeamInfo{Name:"My Team", UUID:"b6fr89dbf6d9156cace5f3c78dc9851d957381ef"}}`
 	if expected != stringified {
-		t.Errorf("Account.String returned %+v, expected %+v", stringified, expected)
+		t.Errorf("\n     got %+v\nexpected %+v", stringified, expected)
 	}
 
 }
diff --git a/action.go b/action.go
index e317600..07ee914 100644
--- a/action.go
+++ b/action.go
@@ -16,14 +16,14 @@ const (
 	ActionCompleted = "completed"
 )
 
-// ActionsService handles communction with action related methods of the
-// DigitalOcean API: https://developers.digitalocean.com/documentation/v2#actions
+// ActionsService handles communication with action related methods of the
+// DigitalOcean API: https://docs.digitalocean.com/reference/api/api-reference/#tag/Actions
 type ActionsService interface {
 	List(context.Context, *ListOptions) ([]Action, *Response, error)
 	Get(context.Context, int) (*Action, *Response, error)
 }
 
-// ActionsServiceOp handles communition with the image action related methods of the
+// ActionsServiceOp handles communication with the image action related methods of the
 // DigitalOcean API.
 type ActionsServiceOp struct {
 	client *Client
diff --git a/apps.gen.go b/apps.gen.go
new file mode 100644
index 0000000..8bb8885
--- /dev/null
+++ b/apps.gen.go
@@ -0,0 +1,1118 @@
+// Code generated automatically. DO NOT EDIT.
+
+package godo
+
+import (
+	"time"
+)
+
+// AppAlert Represents an alert configured for an app or component.
+type AppAlert struct {
+	// The ID of the alert. This will be auto-generated by App Platform once the spec is submitted.
+	ID string `json:"id,omitempty"`
+	// Name of the component this alert applies to.
+	ComponentName string        `json:"component_name,omitempty"`
+	Spec          *AppAlertSpec `json:"spec,omitempty"`
+	// Email destinations for the alert when triggered.
+	Emails []string `json:"emails,omitempty"`
+	// Slack webhook destinations for the alert when triggered.
+	SlackWebhooks []*AppAlertSlackWebhook `json:"slack_webhooks,omitempty"`
+	Phase         AppAlertPhase           `json:"phase,omitempty"`
+	Progress      *AppAlertProgress       `json:"progress,omitempty"`
+}
+
+// AppAlertPhase the model 'AppAlertPhase'
+type AppAlertPhase string
+
+// List of AppAlertPhase
+const (
+	AppAlertPhase_Unknown     AppAlertPhase = "UNKNOWN"
+	AppAlertPhase_Pending     AppAlertPhase = "PENDING"
+	AppAlertPhase_Configuring AppAlertPhase = "CONFIGURING"
+	AppAlertPhase_Active      AppAlertPhase = "ACTIVE"
+	AppAlertPhase_Error       AppAlertPhase = "ERROR"
+)
+
+// AppAlertProgress struct for AppAlertProgress
+type AppAlertProgress struct {
+	Steps []*AppAlertProgressStep `json:"steps,omitempty"`
+}
+
+// AppAlertProgressStep struct for AppAlertProgressStep
+type AppAlertProgressStep struct {
+	Name      string                      `json:"name,omitempty"`
+	Status    AppAlertProgressStepStatus  `json:"status,omitempty"`
+	Steps     []*AppAlertProgressStep     `json:"steps,omitempty"`
+	StartedAt time.Time                   `json:"started_at,omitempty"`
+	EndedAt   time.Time                   `json:"ended_at,omitempty"`
+	Reason    *AppAlertProgressStepReason `json:"reason,omitempty"`
+}
+
+// AppAlertProgressStepReason struct for AppAlertProgressStepReason
+type AppAlertProgressStepReason struct {
+	Code    string `json:"code,omitempty"`
+	Message string `json:"message,omitempty"`
+}
+
+// AppAlertProgressStepStatus the model 'AppAlertProgressStepStatus'
+type AppAlertProgressStepStatus string
+
+// List of AppAlertProgressStepStatus
+const (
+	AppAlertProgressStepStatus_Unknown AppAlertProgressStepStatus = "UNKNOWN"
+	AppAlertProgressStepStatus_Pending AppAlertProgressStepStatus = "PENDING"
+	AppAlertProgressStepStatus_Running AppAlertProgressStepStatus = "RUNNING"
+	AppAlertProgressStepStatus_Error   AppAlertProgressStepStatus = "ERROR"
+	AppAlertProgressStepStatus_Success AppAlertProgressStepStatus = "SUCCESS"
+)
+
+// AppAlertSlackWebhook Configuration of a Slack alerting destination.
+type AppAlertSlackWebhook struct {
+	// URL for the Slack webhook. The value will be encrypted on the app spec after it is submitted.
+	URL string `json:"url,omitempty"`
+	// Name of the Slack channel.
+	Channel string `json:"channel,omitempty"`
+}
+
+// App An application's configuration and status.
+type App struct {
+	ID                      string          `json:"id,omitempty"`
+	OwnerUUID               string          `json:"owner_uuid,omitempty"`
+	Spec                    *AppSpec        `json:"spec"`
+	LastDeploymentActiveAt  time.Time       `json:"last_deployment_active_at,omitempty"`
+	DefaultIngress          string          `json:"default_ingress,omitempty"`
+	CreatedAt               time.Time       `json:"created_at,omitempty"`
+	UpdatedAt               time.Time       `json:"updated_at,omitempty"`
+	ActiveDeployment        *Deployment     `json:"active_deployment,omitempty"`
+	InProgressDeployment    *Deployment     `json:"in_progress_deployment,omitempty"`
+	PendingDeployment       *Deployment     `json:"pending_deployment,omitempty"`
+	LastDeploymentCreatedAt time.Time       `json:"last_deployment_created_at,omitempty"`
+	LiveURL                 string          `json:"live_url,omitempty"`
+	Region                  *AppRegion      `json:"region,omitempty"`
+	TierSlug                string          `json:"tier_slug,omitempty"`
+	LiveURLBase             string          `json:"live_url_base,omitempty"`
+	LiveDomain              string          `json:"live_domain,omitempty"`
+	Domains                 []*AppDomain    `json:"domains,omitempty"`
+	PinnedDeployment        *Deployment     `json:"pinned_deployment,omitempty"`
+	BuildConfig             *AppBuildConfig `json:"build_config,omitempty"`
+	// The id of the project for the app. This will be empty if there is a fleet (project) lookup failure.
+	ProjectID string `json:"project_id,omitempty"`
+}
+
+// AppAlertSpec Configuration of an alert for the app or a individual component.
+type AppAlertSpec struct {
+	Rule AppAlertSpecRule `json:"rule,omitempty"`
+	// Determines whether or not the alert is disabled.
+	Disabled bool                 `json:"disabled,omitempty"`
+	Operator AppAlertSpecOperator `json:"operator,omitempty"`
+	// The meaning is dependent upon the rule. It is used in conjunction with the operator and window to determine when an alert should trigger.
+	Value  float32            `json:"value,omitempty"`
+	Window AppAlertSpecWindow `json:"window,omitempty"`
+}
+
+// AppAlertSpecOperator the model 'AppAlertSpecOperator'
+type AppAlertSpecOperator string
+
+// List of AppAlertSpecOperator
+const (
+	AppAlertSpecOperator_UnspecifiedOperator AppAlertSpecOperator = "UNSPECIFIED_OPERATOR"
+	AppAlertSpecOperator_GreaterThan         AppAlertSpecOperator = "GREATER_THAN"
+	AppAlertSpecOperator_LessThan            AppAlertSpecOperator = "LESS_THAN"
+)
+
+// AppAlertSpecRule  - CPU_UTILIZATION: Represents CPU for a given container instance. Only applicable at the component level.  - MEM_UTILIZATION: Represents RAM for a given container instance. Only applicable at the component level.  - RESTART_COUNT: Represents restart count for a given container instance. Only applicable at the component level.  - DEPLOYMENT_FAILED: Represents whether a deployment has failed. Only applicable at the app level.  - DEPLOYMENT_LIVE: Represents whether a deployment has succeeded. Only applicable at the app level.  - DOMAIN_FAILED: Represents whether a domain configuration has failed. Only applicable at the app level.  - DOMAIN_LIVE: Represents whether a domain configuration has succeeded. Only applicable at the app level.  - FUNCTIONS_ACTIVATION_COUNT: Represents an activation count for a given functions instance. Only applicable to functions components.  - FUNCTIONS_AVERAGE_DURATION_MS: Represents the average duration for function runtimes. Only applicable to functions components.  - FUNCTIONS_ERROR_RATE_PER_MINUTE: Represents an error rate per minute for a given functions instance. Only applicable to functions components.  - FUNCTIONS_AVERAGE_WAIT_TIME_MS: Represents the average wait time for functions. Only applicable to functions components.  - FUNCTIONS_ERROR_COUNT: Represents an error count for a given functions instance. Only applicable to functions components.  - FUNCTIONS_GB_RATE_PER_SECOND: Represents the rate of memory consumption (GB x seconds) for functions. Only applicable to functions components.
+type AppAlertSpecRule string
+
+// List of AppAlertSpecRule
+const (
+	AppAlertSpecRule_UnspecifiedRule             AppAlertSpecRule = "UNSPECIFIED_RULE"
+	AppAlertSpecRule_CPUUtilization              AppAlertSpecRule = "CPU_UTILIZATION"
+	AppAlertSpecRule_MemUtilization              AppAlertSpecRule = "MEM_UTILIZATION"
+	AppAlertSpecRule_RestartCount                AppAlertSpecRule = "RESTART_COUNT"
+	AppAlertSpecRule_DeploymentFailed            AppAlertSpecRule = "DEPLOYMENT_FAILED"
+	AppAlertSpecRule_DeploymentLive              AppAlertSpecRule = "DEPLOYMENT_LIVE"
+	AppAlertSpecRule_DomainFailed                AppAlertSpecRule = "DOMAIN_FAILED"
+	AppAlertSpecRule_DomainLive                  AppAlertSpecRule = "DOMAIN_LIVE"
+	AppAlertSpecRule_FunctionsActivationCount    AppAlertSpecRule = "FUNCTIONS_ACTIVATION_COUNT"
+	AppAlertSpecRule_FunctionsAverageDurationMS  AppAlertSpecRule = "FUNCTIONS_AVERAGE_DURATION_MS"
+	AppAlertSpecRule_FunctionsErrorRatePerMinute AppAlertSpecRule = "FUNCTIONS_ERROR_RATE_PER_MINUTE"
+	AppAlertSpecRule_FunctionsAverageWaitTimeMs  AppAlertSpecRule = "FUNCTIONS_AVERAGE_WAIT_TIME_MS"
+	AppAlertSpecRule_FunctionsErrorCount         AppAlertSpecRule = "FUNCTIONS_ERROR_COUNT"
+	AppAlertSpecRule_FunctionsGBRatePerSecond    AppAlertSpecRule = "FUNCTIONS_GB_RATE_PER_SECOND"
+)
+
+// AppAlertSpecWindow the model 'AppAlertSpecWindow'
+type AppAlertSpecWindow string
+
+// List of AppAlertSpecWindow
+const (
+	AppAlertSpecWindow_UnspecifiedWindow AppAlertSpecWindow = "UNSPECIFIED_WINDOW"
+	AppAlertSpecWindow_FiveMinutes       AppAlertSpecWindow = "FIVE_MINUTES"
+	AppAlertSpecWindow_TenMinutes        AppAlertSpecWindow = "TEN_MINUTES"
+	AppAlertSpecWindow_ThirtyMinutes     AppAlertSpecWindow = "THIRTY_MINUTES"
+	AppAlertSpecWindow_OneHour           AppAlertSpecWindow = "ONE_HOUR"
+)
+
+// AppBuildConfig struct for AppBuildConfig
+type AppBuildConfig struct {
+	CNBVersioning *AppBuildConfigCNBVersioning `json:"cnb_versioning,omitempty"`
+}
+
+// AppBuildConfigCNBVersioning struct for AppBuildConfigCNBVersioning
+type AppBuildConfigCNBVersioning struct {
+	// List of versioned buildpacks used for the application.  Buildpacks are only versioned based on the major semver version, therefore exact versions will not be available at the app build config.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+	// A version id that represents the underlying CNB stack. The version of the stack indicates what buildpacks are supported.
+	StackID string `json:"stack_id,omitempty"`
+}
+
+// AppDatabaseSpec struct for AppDatabaseSpec
+type AppDatabaseSpec struct {
+	// The name. Must be unique across all components within the same app.
+	Name    string                `json:"name"`
+	Engine  AppDatabaseSpecEngine `json:"engine,omitempty"`
+	Version string                `json:"version,omitempty"`
+	// Deprecated.
+	Size string `json:"size,omitempty"`
+	// Deprecated.
+	NumNodes int64 `json:"num_nodes,omitempty"`
+	// Whether this is a production or dev database.
+	Production bool `json:"production,omitempty"`
+	// The name of the underlying DigitalOcean DBaaS cluster. This is required for production databases. For dev databases, if cluster_name is not set, a new cluster will be provisioned.
+	ClusterName string `json:"cluster_name,omitempty"`
+	// The name of the MySQL or PostgreSQL database to configure.
+	DBName string `json:"db_name,omitempty"`
+	// The name of the MySQL or PostgreSQL user to configure.
+	DBUser string `json:"db_user,omitempty"`
+}
+
+// AppDatabaseSpecEngine the model 'AppDatabaseSpecEngine'
+type AppDatabaseSpecEngine string
+
+// List of AppDatabaseSpecEngine
+const (
+	AppDatabaseSpecEngine_Unset   AppDatabaseSpecEngine = "UNSET"
+	AppDatabaseSpecEngine_MySQL   AppDatabaseSpecEngine = "MYSQL"
+	AppDatabaseSpecEngine_PG      AppDatabaseSpecEngine = "PG"
+	AppDatabaseSpecEngine_Redis   AppDatabaseSpecEngine = "REDIS"
+	AppDatabaseSpecEngine_MongoDB AppDatabaseSpecEngine = "MONGODB"
+)
+
+// AppDomainSpec struct for AppDomainSpec
+type AppDomainSpec struct {
+	Domain   string            `json:"domain"`
+	Type     AppDomainSpecType `json:"type,omitempty"`
+	Wildcard bool              `json:"wildcard,omitempty"`
+	// Optional. If the domain uses DigitalOcean DNS and you would like App Platform to automatically manage it for you, set this to the name of the domain on your account.  For example, If the domain you are adding is `app.domain.com`, the zone could be `domain.com`.
+	Zone        string `json:"zone,omitempty"`
+	Certificate string `json:"certificate,omitempty"`
+	// Optional. The minimum version of TLS a client application can use to access resources for the domain.  Must be one of the following values wrapped within quotations: `\"1.2\"` or `\"1.3\"`.
+	MinimumTLSVersion string `json:"minimum_tls_version,omitempty"`
+}
+
+// AppDomainSpecType the model 'AppDomainSpecType'
+type AppDomainSpecType string
+
+// List of AppDomainSpecType
+const (
+	AppDomainSpecType_Unspecified AppDomainSpecType = "UNSPECIFIED"
+	AppDomainSpecType_Default     AppDomainSpecType = "DEFAULT"
+	AppDomainSpecType_Primary     AppDomainSpecType = "PRIMARY"
+	AppDomainSpecType_Alias       AppDomainSpecType = "ALIAS"
+)
+
+// AppFunctionsSpec struct for AppFunctionsSpec
+type AppFunctionsSpec struct {
+	// The name. Must be unique across all components within the same app.
+	Name   string            `json:"name"`
+	Git    *GitSourceSpec    `json:"git,omitempty"`
+	GitHub *GitHubSourceSpec `json:"github,omitempty"`
+	GitLab *GitLabSourceSpec `json:"gitlab,omitempty"`
+	// An optional path to the working directory to use for the build. Must be relative to the root of the repo.
+	SourceDir string `json:"source_dir,omitempty"`
+	// A list of environment variables made available to the component.
+	Envs []*AppVariableDefinition `json:"envs,omitempty"`
+	// A list of HTTP routes that should be routed to this component.
+	Routes []*AppRouteSpec `json:"routes,omitempty"`
+	// A list of configured alerts the user has enabled.
+	Alerts []*AppAlertSpec `json:"alerts,omitempty"`
+	// A list of configured log forwarding destinations.
+	LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"`
+	CORS            *AppCORSPolicy           `json:"cors,omitempty"`
+}
+
+// AppIngressSpec Specification for app ingress configurations.
+type AppIngressSpec struct {
+	LoadBalancer     AppIngressSpecLoadBalancer `json:"load_balancer,omitempty"`
+	LoadBalancerSize int64                      `json:"load_balancer_size,omitempty"`
+	// Rules for configuring HTTP ingress for component routes, CORS, rewrites, and redirects.
+	Rules []*AppIngressSpecRule `json:"rules,omitempty"`
+}
+
+// AppIngressSpecLoadBalancer the model 'AppIngressSpecLoadBalancer'
+type AppIngressSpecLoadBalancer string
+
+// List of AppIngressSpecLoadBalancer
+const (
+	AppIngressSpecLoadBalancer_Unknown      AppIngressSpecLoadBalancer = "UNKNOWN"
+	AppIngressSpecLoadBalancer_DigitalOcean AppIngressSpecLoadBalancer = "DIGITALOCEAN"
+)
+
+// AppIngressSpecRule A rule that configures component routes, rewrites, redirects and cors.
+type AppIngressSpecRule struct {
+	Match     *AppIngressSpecRuleMatch            `json:"match,omitempty"`
+	Component *AppIngressSpecRuleRoutingComponent `json:"component,omitempty"`
+	Redirect  *AppIngressSpecRuleRoutingRedirect  `json:"redirect,omitempty"`
+	CORS      *AppCORSPolicy                      `json:"cors,omitempty"`
+}
+
+// AppIngressSpecRuleMatch The match configuration for a rule.
+type AppIngressSpecRuleMatch struct {
+	Path *AppIngressSpecRuleStringMatch `json:"path,omitempty"`
+}
+
+// AppIngressSpecRuleRoutingComponent The component routing configuration.
+type AppIngressSpecRuleRoutingComponent struct {
+	// The name of the component to route to.
+	Name string `json:"name,omitempty"`
+	// An optional flag to preserve the path that is forwarded to the backend service. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If this value is `true`, the path will remain `/api/list`. Note: this is not applicable for Functions Components and is mutually exclusive with `rewrite`.
+	PreservePathPrefix bool `json:"preserve_path_prefix,omitempty"`
+	// An optional field that will rewrite the path of the component to be what is specified here. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If you specified the rewrite to be `/v1/`, requests to `/api/list` would be rewritten to `/v1/list`. Note: this is mutually exclusive with `preserve_path_prefix`.
+	Rewrite string `json:"rewrite,omitempty"`
+}
+
+// AppIngressSpecRuleRoutingRedirect The redirect routing configuration.
+type AppIngressSpecRuleRoutingRedirect struct {
+	// An optional URI path to redirect to. Note: if this is specified the whole URI of the original request will be overwritten to this value, irrespective of the original request URI being matched.
+	Uri string `json:"uri,omitempty"`
+	// The authority/host to redirect to. This can be a hostname or IP address. Note: use `port` to set the port.
+	Authority string `json:"authority,omitempty"`
+	// The port to redirect to.
+	Port int64 `json:"port,omitempty"`
+	// The scheme to redirect to. Supported values are `http` or `https`. Default: `https`.
+	Scheme string `json:"scheme,omitempty"`
+	// The redirect code to use. Defaults to `302`. Supported values are 300, 301, 302, 303, 304, 305, 307, 308.
+	RedirectCode int64 `json:"redirect_code,omitempty"`
+}
+
+// AppIngressSpecRuleStringMatch The string match configuration.
+type AppIngressSpecRuleStringMatch struct {
+	// Prefix-based match. For example, `/api` will match `/api`, `/api/`, and any nested paths such as `/api/v1/endpoint`.
+	Prefix string `json:"prefix,omitempty"`
+}
+
+// AppJobSpec struct for AppJobSpec
+type AppJobSpec struct {
+	// The name. Must be unique across all components within the same app.
+	Name   string            `json:"name"`
+	Git    *GitSourceSpec    `json:"git,omitempty"`
+	GitHub *GitHubSourceSpec `json:"github,omitempty"`
+	Image  *ImageSourceSpec  `json:"image,omitempty"`
+	GitLab *GitLabSourceSpec `json:"gitlab,omitempty"`
+	// The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks.
+	DockerfilePath string `json:"dockerfile_path,omitempty"`
+	// An optional build command to run while building this component from source.
+	BuildCommand string `json:"build_command,omitempty"`
+	// An optional run command to override the component's default.
+	RunCommand string `json:"run_command,omitempty"`
+	// An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo.
+	SourceDir string `json:"source_dir,omitempty"`
+	// An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/).
+	EnvironmentSlug string `json:"environment_slug,omitempty"`
+	// A list of environment variables made available to the component.
+	Envs []*AppVariableDefinition `json:"envs,omitempty"`
+	// The instance size to use for this component.
+	InstanceSizeSlug string         `json:"instance_size_slug,omitempty"`
+	InstanceCount    int64          `json:"instance_count,omitempty"`
+	Kind             AppJobSpecKind `json:"kind,omitempty"`
+	// A list of configured alerts which apply to the component.
+	Alerts []*AppAlertSpec `json:"alerts,omitempty"`
+	// A list of configured log forwarding destinations.
+	LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"`
+}
+
+// AppJobSpecKind  - UNSPECIFIED: Default job type, will auto-complete to POST_DEPLOY kind.  - PRE_DEPLOY: Indicates a job that runs before an app deployment.  - POST_DEPLOY: Indicates a job that runs after an app deployment.  - FAILED_DEPLOY: Indicates a job that runs after a component fails to deploy.
+type AppJobSpecKind string
+
+// List of AppJobSpecKind
+const (
+	AppJobSpecKind_Unspecified  AppJobSpecKind = "UNSPECIFIED"
+	AppJobSpecKind_PreDeploy    AppJobSpecKind = "PRE_DEPLOY"
+	AppJobSpecKind_PostDeploy   AppJobSpecKind = "POST_DEPLOY"
+	AppJobSpecKind_FailedDeploy AppJobSpecKind = "FAILED_DEPLOY"
+)
+
+// AppLogDestinationSpec struct for AppLogDestinationSpec
+type AppLogDestinationSpec struct {
+	// Name of the log destination.
+	Name        string                           `json:"name"`
+	Papertrail  *AppLogDestinationSpecPapertrail `json:"papertrail,omitempty"`
+	Datadog     *AppLogDestinationSpecDataDog    `json:"datadog,omitempty"`
+	Logtail     *AppLogDestinationSpecLogtail    `json:"logtail,omitempty"`
+	Endpoint    string                           `json:"endpoint,omitempty"`
+	TLSInsecure bool                             `json:"tls_insecure,omitempty"`
+	Headers     []*AppLogDestinationSpecHeader   `json:"headers,omitempty"`
+}
+
+// AppLogDestinationSpecDataDog DataDog configuration.
+type AppLogDestinationSpecDataDog struct {
+	// Datadog HTTP log intake endpoint.
+	Endpoint string `json:"endpoint,omitempty"`
+	// Datadog API key.
+	ApiKey string `json:"api_key"`
+}
+
+// AppLogDestinationSpecHeader struct for AppLogDestinationSpecHeader
+type AppLogDestinationSpecHeader struct {
+	// The name
+	Key string `json:"key"`
+	// The header value.
+	Value string `json:"value,omitempty"`
+}
+
+// AppLogDestinationSpecLogtail Logtail configuration.
+type AppLogDestinationSpecLogtail struct {
+	// Logtail token.
+	Token string `json:"token"`
+}
+
+// AppLogDestinationSpecPapertrail Papertrail configuration.
+type AppLogDestinationSpecPapertrail struct {
+	// Papertrail syslog endpoint.
+	Endpoint string `json:"endpoint"`
+}
+
+// AppRouteSpec struct for AppRouteSpec
+type AppRouteSpec struct {
+	// An HTTP path prefix. Paths must start with / and must be unique across all components within an app.
+	Path string `json:"path,omitempty"`
+	// An optional flag to preserve the path that is forwarded to the backend service. By default, the HTTP request path will be trimmed from the left when forwarded to the component. For example, a component with `path=/api` will have requests to `/api/list` trimmed to `/list`. If this value is `true`, the path will remain `/api/list`. Note: this is not applicable for Functions Components.
+	PreservePathPrefix bool `json:"preserve_path_prefix,omitempty"`
+}
+
+// AppServiceSpec struct for AppServiceSpec
+type AppServiceSpec struct {
+	// The name. Must be unique across all components within the same app.
+	Name   string            `json:"name"`
+	Git    *GitSourceSpec    `json:"git,omitempty"`
+	GitHub *GitHubSourceSpec `json:"github,omitempty"`
+	Image  *ImageSourceSpec  `json:"image,omitempty"`
+	GitLab *GitLabSourceSpec `json:"gitlab,omitempty"`
+	// The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks.
+	DockerfilePath string `json:"dockerfile_path,omitempty"`
+	// An optional build command to run while building this component from source.
+	BuildCommand string `json:"build_command,omitempty"`
+	// An optional run command to override the component's default.
+	RunCommand string `json:"run_command,omitempty"`
+	// An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo.
+	SourceDir string `json:"source_dir,omitempty"`
+	// An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/).
+	EnvironmentSlug string `json:"environment_slug,omitempty"`
+	// A list of environment variables made available to the component.
+	Envs             []*AppVariableDefinition `json:"envs,omitempty"`
+	InstanceSizeSlug string                   `json:"instance_size_slug,omitempty"`
+	InstanceCount    int64                    `json:"instance_count,omitempty"`
+	// The internal port on which this service's run command will listen. Default: 8080 If there is not an environment variable with the name `PORT`, one will be automatically added with its value set to the value of this field.
+	HTTPPort int64 `json:"http_port,omitempty"`
+	// A list of HTTP routes that should be routed to this component.
+	Routes      []*AppRouteSpec            `json:"routes,omitempty"`
+	HealthCheck *AppServiceSpecHealthCheck `json:"health_check,omitempty"`
+	CORS        *AppCORSPolicy             `json:"cors,omitempty"`
+	// The ports on which this service will listen for internal traffic.
+	InternalPorts []int64 `json:"internal_ports,omitempty"`
+	// A list of configured alerts which apply to the component.
+	Alerts []*AppAlertSpec `json:"alerts,omitempty"`
+	// A list of configured log forwarding destinations.
+	LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"`
+}
+
+// AppServiceSpecHealthCheck struct for AppServiceSpecHealthCheck
+type AppServiceSpecHealthCheck struct {
+	// Deprecated. Use http_path instead.
+	Path string `json:"path,omitempty"`
+	// The number of seconds to wait before beginning health checks. Default: 0 seconds; start health checks as soon as the service starts.
+	InitialDelaySeconds int32 `json:"initial_delay_seconds,omitempty"`
+	// The number of seconds to wait between health checks. Default: 10 seconds.
+	PeriodSeconds int32 `json:"period_seconds,omitempty"`
+	// The number of seconds after which the check times out. Default: 1 second.
+	TimeoutSeconds int32 `json:"timeout_seconds,omitempty"`
+	// The number of successful health checks before considered healthy. Default: 1.
+	SuccessThreshold int32 `json:"success_threshold,omitempty"`
+	// The number of failed health checks before considered unhealthy. Default: 9.
+	FailureThreshold int32 `json:"failure_threshold,omitempty"`
+	// The route path used for the HTTP health check ping. If not set, the HTTP health check will be disabled and a TCP health check used instead.
+	HTTPPath string `json:"http_path,omitempty"`
+	// The port on which the health check will be performed. If not set, the health check will be performed on the component's http_port.
+	Port int64 `json:"port,omitempty"`
+}
+
+// AppSpec The desired configuration of an application.
+type AppSpec struct {
+	// The name of the app. Must be unique across all apps in the same account.
+	Name string `json:"name"`
+	// Workloads which expose publicly-accessible HTTP services.
+	Services []*AppServiceSpec `json:"services,omitempty"`
+	// Content which can be rendered to static web assets.
+	StaticSites []*AppStaticSiteSpec `json:"static_sites,omitempty"`
+	// Workloads which do not expose publicly-accessible HTTP services.
+	Workers []*AppWorkerSpec `json:"workers,omitempty"`
+	// Pre and post deployment workloads which do not expose publicly-accessible HTTP routes.
+	Jobs []*AppJobSpec `json:"jobs,omitempty"`
+	// Workloads which expose publicly-accessible HTTP services via Functions Components.
+	Functions []*AppFunctionsSpec `json:"functions,omitempty"`
+	// Database instances which can provide persistence to workloads within the application.
+	Databases []*AppDatabaseSpec `json:"databases,omitempty"`
+	// A set of hostnames where the application will be available.
+	Domains []*AppDomainSpec `json:"domains,omitempty"`
+	Region  string           `json:"region,omitempty"`
+	// A list of environment variables made available to all components in the app.
+	Envs []*AppVariableDefinition `json:"envs,omitempty"`
+	// A list of alerts which apply to the app.
+	Alerts   []*AppAlertSpec `json:"alerts,omitempty"`
+	Ingress  *AppIngressSpec `json:"ingress,omitempty"`
+	Features []string        `json:"features,omitempty"`
+}
+
+// AppStaticSiteSpec struct for AppStaticSiteSpec
+type AppStaticSiteSpec struct {
+	// The name. Must be unique across all components within the same app.
+	Name   string            `json:"name"`
+	Git    *GitSourceSpec    `json:"git,omitempty"`
+	GitHub *GitHubSourceSpec `json:"github,omitempty"`
+	GitLab *GitLabSourceSpec `json:"gitlab,omitempty"`
+	// The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks.
+	DockerfilePath string `json:"dockerfile_path,omitempty"`
+	// An optional build command to run while building this component from source.
+	BuildCommand string `json:"build_command,omitempty"`
+	// An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo.
+	SourceDir string `json:"source_dir,omitempty"`
+	// An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/).
+	EnvironmentSlug string `json:"environment_slug,omitempty"`
+	// An optional path to where the built assets will be located, relative to the build context. If not set, App Platform will automatically scan for these directory names: `_static`, `dist`, `public`, `build`.
+	OutputDir     string `json:"output_dir,omitempty"`
+	IndexDocument string `json:"index_document,omitempty"`
+	// The name of the error document to use when serving this static site. Default: 404.html. If no such file exists within the built assets, App Platform will supply one.
+	ErrorDocument string `json:"error_document,omitempty"`
+	// A list of environment variables made available to the component.
+	Envs []*AppVariableDefinition `json:"envs,omitempty"`
+	// A list of HTTP routes that should be routed to this component.
+	Routes []*AppRouteSpec `json:"routes,omitempty"`
+	CORS   *AppCORSPolicy  `json:"cors,omitempty"`
+	// The name of the document to use as the fallback for any requests to documents that are not found when serving this static site. Only 1 of `catchall_document` or `error_document` can be set.
+	CatchallDocument string `json:"catchall_document,omitempty"`
+}
+
+// AppVariableDefinition struct for AppVariableDefinition
+type AppVariableDefinition struct {
+	// The name
+	Key string `json:"key"`
+	// The value. If the type is `SECRET`, the value will be encrypted on first submission. On following submissions, the encrypted value should be used.
+	Value string           `json:"value,omitempty"`
+	Scope AppVariableScope `json:"scope,omitempty"`
+	Type  AppVariableType  `json:"type,omitempty"`
+}
+
+// AppWorkerSpec struct for AppWorkerSpec
+type AppWorkerSpec struct {
+	// The name. Must be unique across all components within the same app.
+	Name   string            `json:"name"`
+	Git    *GitSourceSpec    `json:"git,omitempty"`
+	GitHub *GitHubSourceSpec `json:"github,omitempty"`
+	Image  *ImageSourceSpec  `json:"image,omitempty"`
+	GitLab *GitLabSourceSpec `json:"gitlab,omitempty"`
+	// The path to the Dockerfile relative to the root of the repo. If set, it will be used to build this component. Otherwise, App Platform will attempt to build it using buildpacks.
+	DockerfilePath string `json:"dockerfile_path,omitempty"`
+	// An optional build command to run while building this component from source.
+	BuildCommand string `json:"build_command,omitempty"`
+	// An optional run command to override the component's default.
+	RunCommand string `json:"run_command,omitempty"`
+	// An optional path to the working directory to use for the build. For Dockerfile builds, this will be used as the build context. Must be relative to the root of the repo.
+	SourceDir string `json:"source_dir,omitempty"`
+	// An environment slug describing the type of this app. For a full list, please refer to [the product documentation](https://www.digitalocean.com/docs/app-platform/).
+	EnvironmentSlug string `json:"environment_slug,omitempty"`
+	// A list of environment variables made available to the component.
+	Envs []*AppVariableDefinition `json:"envs,omitempty"`
+	// The instance size to use for this component.
+	InstanceSizeSlug string `json:"instance_size_slug,omitempty"`
+	InstanceCount    int64  `json:"instance_count,omitempty"`
+	// A list of configured alerts which apply to the component.
+	Alerts []*AppAlertSpec `json:"alerts,omitempty"`
+	// A list of configured log forwarding destinations.
+	LogDestinations []*AppLogDestinationSpec `json:"log_destinations,omitempty"`
+}
+
+// Buildpack struct for Buildpack
+type Buildpack struct {
+	// The ID of the buildpack.
+	ID string `json:"id,omitempty"`
+	// Full semver version string.
+	Version string `json:"version,omitempty"`
+	// The major version line that the buildpack is pinned to. Example: a value of `1` indicates that the buildpack is pinned to versions `>=1.0.0 and <2.0.0`.
+	MajorVersion int32 `json:"major_version,omitempty"`
+	// Indicates whether the buildpack is on the latest major version line available.
+	Latest bool `json:"latest,omitempty"`
+	// A human friendly name.
+	Name string `json:"name,omitempty"`
+	// A description of the buildpack's purpose and steps performed at build time.
+	Description []string `json:"description,omitempty"`
+	// A link to the buildpack's documentation.
+	DocsLink string `json:"docs_link,omitempty"`
+}
+
+// DeploymentCauseDetailsDigitalOceanUser struct for DeploymentCauseDetailsDigitalOceanUser
+type DeploymentCauseDetailsDigitalOceanUser struct {
+	UUID     string `json:"uuid,omitempty"`
+	Email    string `json:"email,omitempty"`
+	FullName string `json:"full_name,omitempty"`
+}
+
+// DeploymentCauseDetailsDigitalOceanUserAction struct for DeploymentCauseDetailsDigitalOceanUserAction
+type DeploymentCauseDetailsDigitalOceanUserAction struct {
+	User *DeploymentCauseDetailsDigitalOceanUser          `json:"user,omitempty"`
+	Name DeploymentCauseDetailsDigitalOceanUserActionName `json:"name,omitempty"`
+}
+
+// DeploymentCauseDetailsDOCRPush struct for DeploymentCauseDetailsDOCRPush
+type DeploymentCauseDetailsDOCRPush struct {
+	// The registry name.
+	Registry string `json:"registry,omitempty"`
+	// The repository name.
+	Repository string `json:"repository,omitempty"`
+	// The repository tag.
+	Tag string `json:"tag,omitempty"`
+	// OCI Image digest.
+	ImageDigest string `json:"image_digest,omitempty"`
+}
+
+// DeploymentCauseDetailsGitPush struct for DeploymentCauseDetailsGitPush
+type DeploymentCauseDetailsGitPush struct {
+	GitHub        *GitHubSourceSpec `json:"github,omitempty"`
+	GitLab        *GitLabSourceSpec `json:"gitlab,omitempty"`
+	Username      string            `json:"username,omitempty"`
+	CommitAuthor  string            `json:"commit_author,omitempty"`
+	CommitSHA     string            `json:"commit_sha,omitempty"`
+	CommitMessage string            `json:"commit_message,omitempty"`
+}
+
+// AppCORSPolicy struct for AppCORSPolicy
+type AppCORSPolicy struct {
+	// The set of allowed CORS origins. This configures the Access-Control-Allow-Origin header.
+	AllowOrigins []*AppStringMatch `json:"allow_origins,omitempty"`
+	// The set of allowed HTTP methods. This configures the Access-Control-Allow-Methods header.
+	AllowMethods []string `json:"allow_methods,omitempty"`
+	// The set of allowed HTTP request headers. This configures the Access-Control-Allow-Headers header.
+	AllowHeaders []string `json:"allow_headers,omitempty"`
+	// The set of HTTP response headers that browsers are allowed to access. This configures the Access-Control-Expose-Headers  header.
+	ExposeHeaders []string `json:"expose_headers,omitempty"`
+	// An optional duration specifying how long browsers can cache the results of a preflight request. This configures the Access-Control-Max-Age header. Example: `5h30m`.
+	MaxAge string `json:"max_age,omitempty"`
+	// Whether browsers should expose the response to the client-side JavaScript code when the request's credentials mode is `include`. This configures the Access-Control-Allow-Credentials header.
+	AllowCredentials bool `json:"allow_credentials,omitempty"`
+}
+
+// AppCreateRequest struct for AppCreateRequest
+type AppCreateRequest struct {
+	Spec *AppSpec `json:"spec"`
+	// Optional. The UUID of the project the app should be assigned.
+	ProjectID string `json:"project_id,omitempty"`
+}
+
+// DeployTemplate struct for DeployTemplate
+type DeployTemplate struct {
+	Spec *AppSpec `json:"spec,omitempty"`
+}
+
+// Deployment struct for Deployment
+type Deployment struct {
+	ID                   string                  `json:"id,omitempty"`
+	Spec                 *AppSpec                `json:"spec,omitempty"`
+	Services             []*DeploymentService    `json:"services,omitempty"`
+	StaticSites          []*DeploymentStaticSite `json:"static_sites,omitempty"`
+	Workers              []*DeploymentWorker     `json:"workers,omitempty"`
+	Jobs                 []*DeploymentJob        `json:"jobs,omitempty"`
+	Functions            []*DeploymentFunctions  `json:"functions,omitempty"`
+	PhaseLastUpdatedAt   time.Time               `json:"phase_last_updated_at,omitempty"`
+	CreatedAt            time.Time               `json:"created_at,omitempty"`
+	UpdatedAt            time.Time               `json:"updated_at,omitempty"`
+	Cause                string                  `json:"cause,omitempty"`
+	ClonedFrom           string                  `json:"cloned_from,omitempty"`
+	Progress             *DeploymentProgress     `json:"progress,omitempty"`
+	Phase                DeploymentPhase         `json:"phase,omitempty"`
+	TierSlug             string                  `json:"tier_slug,omitempty"`
+	PreviousDeploymentID string                  `json:"previous_deployment_id,omitempty"`
+	CauseDetails         *DeploymentCauseDetails `json:"cause_details,omitempty"`
+	LoadBalancerID       string                  `json:"load_balancer_id,omitempty"`
+	Timing               *DeploymentTiming       `json:"timing,omitempty"`
+}
+
+// DeploymentCauseDetails struct for DeploymentCauseDetails
+type DeploymentCauseDetails struct {
+	DigitalOceanUserAction *DeploymentCauseDetailsDigitalOceanUserAction `json:"digitalocean_user_action,omitempty"`
+	GitPush                *DeploymentCauseDetailsGitPush                `json:"git_push,omitempty"`
+	DOCRPush               *DeploymentCauseDetailsDOCRPush               `json:"docr_push,omitempty"`
+	Internal               bool                                          `json:"internal,omitempty"`
+	Type                   DeploymentCauseDetailsType                    `json:"type,omitempty"`
+}
+
+// DeploymentCauseDetailsType - MANUAL: A deployment that was manually created  - DEPLOY_ON_PUSH: A deployment that was automatically created by a Deploy on Push hook  - MAINTENANCE: A deployment created for App Platform maintenance  - MANUAL_ROLLBACK: A rollback deployment that was manually created  - AUTO_ROLLBACK: An automatic rollback deployment created as a result of a previous deployment failing  - UPDATE_DATABASE_TRUSTED_SOURCES: A deployment that was created due to an update in database trusted sources.
+type DeploymentCauseDetailsType string
+
+// List of DeploymentCauseDetailsType
+const (
+	DeploymentCauseDetailsType_Unknown                      DeploymentCauseDetailsType = "UNKNOWN"
+	DeploymentCauseDetailsType_Manual                       DeploymentCauseDetailsType = "MANUAL"
+	DeploymentCauseDetailsType_DeployOnPush                 DeploymentCauseDetailsType = "DEPLOY_ON_PUSH"
+	DeploymentCauseDetailsType_Maintenance                  DeploymentCauseDetailsType = "MAINTENANCE"
+	DeploymentCauseDetailsType_ManualRollback               DeploymentCauseDetailsType = "MANUAL_ROLLBACK"
+	DeploymentCauseDetailsType_AutoRollback                 DeploymentCauseDetailsType = "AUTO_ROLLBACK"
+	DeploymentCauseDetailsType_UpdateDatabaseTrustedSources DeploymentCauseDetailsType = "UPDATE_DATABASE_TRUSTED_SOURCES"
+)
+
+// DeploymentFunctions struct for DeploymentFunctions
+type DeploymentFunctions struct {
+	Name string `json:"name,omitempty"`
+	// The commit hash of the repository that was used to build this functions component.
+	SourceCommitHash string `json:"source_commit_hash,omitempty"`
+	// The namespace where the functions are deployed.
+	Namespace string `json:"namespace,omitempty"`
+}
+
+// DeploymentJob struct for DeploymentJob
+type DeploymentJob struct {
+	Name             string `json:"name,omitempty"`
+	SourceCommitHash string `json:"source_commit_hash,omitempty"`
+	// The list of resolved buildpacks used for a given deployment component.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// DeploymentPhase the model 'DeploymentPhase'
+type DeploymentPhase string
+
+// List of DeploymentPhase
+const (
+	DeploymentPhase_Unknown       DeploymentPhase = "UNKNOWN"
+	DeploymentPhase_PendingBuild  DeploymentPhase = "PENDING_BUILD"
+	DeploymentPhase_Building      DeploymentPhase = "BUILDING"
+	DeploymentPhase_PendingDeploy DeploymentPhase = "PENDING_DEPLOY"
+	DeploymentPhase_Deploying     DeploymentPhase = "DEPLOYING"
+	DeploymentPhase_Active        DeploymentPhase = "ACTIVE"
+	DeploymentPhase_Superseded    DeploymentPhase = "SUPERSEDED"
+	DeploymentPhase_Error         DeploymentPhase = "ERROR"
+	DeploymentPhase_Canceled      DeploymentPhase = "CANCELED"
+)
+
+// DeploymentProgress struct for DeploymentProgress
+type DeploymentProgress struct {
+	PendingSteps int32                     `json:"pending_steps,omitempty"`
+	RunningSteps int32                     `json:"running_steps,omitempty"`
+	SuccessSteps int32                     `json:"success_steps,omitempty"`
+	ErrorSteps   int32                     `json:"error_steps,omitempty"`
+	TotalSteps   int32                     `json:"total_steps,omitempty"`
+	Steps        []*DeploymentProgressStep `json:"steps,omitempty"`
+	SummarySteps []*DeploymentProgressStep `json:"summary_steps,omitempty"`
+}
+
+// DeploymentProgressStep struct for DeploymentProgressStep
+type DeploymentProgressStep struct {
+	Name          string                        `json:"name,omitempty"`
+	Status        DeploymentProgressStepStatus  `json:"status,omitempty"`
+	Steps         []*DeploymentProgressStep     `json:"steps,omitempty"`
+	StartedAt     time.Time                     `json:"started_at,omitempty"`
+	EndedAt       time.Time                     `json:"ended_at,omitempty"`
+	Reason        *DeploymentProgressStepReason `json:"reason,omitempty"`
+	ComponentName string                        `json:"component_name,omitempty"`
+	// The base of a human-readable description of the step intended to be combined with the component name for presentation. For example:  `message_base` = \"Building service\" `component_name` = \"api\"
+	MessageBase string `json:"message_base,omitempty"`
+}
+
+// DeploymentProgressStepReason struct for DeploymentProgressStepReason
+type DeploymentProgressStepReason struct {
+	Code    string `json:"code,omitempty"`
+	Message string `json:"message,omitempty"`
+}
+
+// DeploymentProgressStepStatus the model 'DeploymentProgressStepStatus'
+type DeploymentProgressStepStatus string
+
+// List of DeploymentProgressStepStatus
+const (
+	DeploymentProgressStepStatus_Unknown DeploymentProgressStepStatus = "UNKNOWN"
+	DeploymentProgressStepStatus_Pending DeploymentProgressStepStatus = "PENDING"
+	DeploymentProgressStepStatus_Running DeploymentProgressStepStatus = "RUNNING"
+	DeploymentProgressStepStatus_Error   DeploymentProgressStepStatus = "ERROR"
+	DeploymentProgressStepStatus_Success DeploymentProgressStepStatus = "SUCCESS"
+)
+
+// DeploymentService struct for DeploymentService
+type DeploymentService struct {
+	Name             string `json:"name,omitempty"`
+	SourceCommitHash string `json:"source_commit_hash,omitempty"`
+	// The list of resolved buildpacks used for a given deployment component.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// DeploymentStaticSite struct for DeploymentStaticSite
+type DeploymentStaticSite struct {
+	Name             string `json:"name,omitempty"`
+	SourceCommitHash string `json:"source_commit_hash,omitempty"`
+	// The list of resolved buildpacks used for a given deployment component.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// DeploymentTiming struct for DeploymentTiming
+type DeploymentTiming struct {
+	// Pending describes the time spent waiting for the build to begin. This may include delays related to build concurrency limits.
+	Pending string `json:"pending,omitempty"`
+	// BuildTotal describes total time between the start of the build and its completion.
+	BuildTotal string `json:"build_total,omitempty"`
+	// BuildBillable describes the time spent executing the build. As builds may run concurrently  this may be greater than the build total.
+	BuildBillable string `json:"build_billable,omitempty"`
+	// Components breaks down billable build time by component.
+	Components []*DeploymentTimingComponent `json:"components,omitempty"`
+	// DatabaseProvision describes the time spent creating databases.
+	DatabaseProvision string `json:"database_provision,omitempty"`
+	// Deploying is time spent starting containers and waiting for health checks to pass.
+	Deploying string `json:"deploying,omitempty"`
+}
+
+// DeploymentTimingComponent struct for DeploymentTimingComponent
+type DeploymentTimingComponent struct {
+	// Name of the component.
+	Name string `json:"name,omitempty"`
+	// BuildBillable is the billable build time for this component.
+	BuildBillable string `json:"build_billable,omitempty"`
+}
+
+// DeploymentWorker struct for DeploymentWorker
+type DeploymentWorker struct {
+	Name             string `json:"name,omitempty"`
+	SourceCommitHash string `json:"source_commit_hash,omitempty"`
+	// The list of resolved buildpacks used for a given deployment component.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// DetectRequest struct for DetectRequest
+type DetectRequest struct {
+	Git    *GitSourceSpec    `json:"git,omitempty"`
+	GitHub *GitHubSourceSpec `json:"github,omitempty"`
+	GitLab *GitLabSourceSpec `json:"gitlab,omitempty"`
+	// An optional commit hash to use instead of the branch specified in the source spec.
+	CommitSHA string `json:"commit_sha,omitempty"`
+	// An optional path to the working directory for the detection process.
+	SourceDir string `json:"source_dir,omitempty"`
+}
+
+// DetectResponse struct for DetectResponse
+type DetectResponse struct {
+	Components    []*DetectResponseComponent `json:"components,omitempty"`
+	Template      *DeployTemplate            `json:"template,omitempty"`
+	TemplateFound bool                       `json:"template_found,omitempty"`
+	TemplateValid bool                       `json:"template_valid,omitempty"`
+	TemplateError string                     `json:"template_error,omitempty"`
+}
+
+// DetectResponseComponent struct for DetectResponseComponent
+type DetectResponseComponent struct {
+	Strategy DetectResponseType `json:"strategy,omitempty"`
+	Types    []string           `json:"types,omitempty"`
+	// A list of Dockerfiles that were found for this component. The recommendation is to use the first Dockerfile.
+	Dockerfiles     []string `json:"dockerfiles,omitempty"`
+	BuildCommand    string   `json:"build_command,omitempty"`
+	RunCommand      string   `json:"run_command,omitempty"`
+	EnvironmentSlug string   `json:"environment_slug,omitempty"`
+	// A list of HTTP ports that this component may listen on. The recommendation is to use the last port in the list.
+	HTTPPorts []int64                  `json:"http_ports,omitempty"`
+	EnvVars   []*AppVariableDefinition `json:"env_vars,omitempty"`
+	// List of serverless packages detected.
+	ServerlessPackages []*DetectResponseServerlessPackage `json:"serverless_packages,omitempty"`
+	SourceDir          string                             `json:"source_dir,omitempty"`
+	// The list of detected buildpacks that will be used for the component build.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// DetectResponseServerlessFunction struct for DetectResponseServerlessFunction
+type DetectResponseServerlessFunction struct {
+	// Name of the function.
+	Name string `json:"name,omitempty"`
+	// Package that the function belongs to.
+	Package string `json:"package,omitempty"`
+	// Runtime detected for the function.
+	Runtime string                                  `json:"runtime,omitempty"`
+	Limits  *DetectResponseServerlessFunctionLimits `json:"limits,omitempty"`
+}
+
+// DetectResponseServerlessFunctionLimits struct for DetectResponseServerlessFunctionLimits
+type DetectResponseServerlessFunctionLimits struct {
+	// Timeout for function invocation in milliseconds.
+	Timeout string `json:"timeout,omitempty"`
+	// Max memory allocation for function invocation in megabytes.
+	Memory string `json:"memory,omitempty"`
+	// Max log size usage for function invocation in kilobytes.
+	Logs string `json:"logs,omitempty"`
+}
+
+// DetectResponseServerlessPackage struct for DetectResponseServerlessPackage
+type DetectResponseServerlessPackage struct {
+	// Name of the serverless package.
+	Name string `json:"name,omitempty"`
+	// List of functions detected in the serverless package.
+	Functions []*DetectResponseServerlessFunction `json:"functions,omitempty"`
+}
+
+// DetectResponseType the model 'DetectResponseType'
+type DetectResponseType string
+
+// List of DetectResponseType
+const (
+	DetectResponseType_Unspecified DetectResponseType = "UNSPECIFIED"
+	DetectResponseType_Dockerfile  DetectResponseType = "DOCKERFILE"
+	DetectResponseType_Buildpack   DetectResponseType = "BUILDPACK"
+	DetectResponseType_HTML        DetectResponseType = "HTML"
+	DetectResponseType_Serverless  DetectResponseType = "SERVERLESS"
+)
+
+// DeploymentCauseDetailsDigitalOceanUserActionName the model 'CauseDetailsDigitalOceanUserActionName'
+type DeploymentCauseDetailsDigitalOceanUserActionName string
+
+// List of DeploymentCauseDetailsDigitalOceanUserActionName
+const (
+	DeploymentCauseDetailsDigitalOceanUserActionName_Unknown               DeploymentCauseDetailsDigitalOceanUserActionName = "UNKNOWN"
+	DeploymentCauseDetailsDigitalOceanUserActionName_CreateDeployment      DeploymentCauseDetailsDigitalOceanUserActionName = "CREATE_DEPLOYMENT"
+	DeploymentCauseDetailsDigitalOceanUserActionName_UpdateSpec            DeploymentCauseDetailsDigitalOceanUserActionName = "UPDATE_SPEC"
+	DeploymentCauseDetailsDigitalOceanUserActionName_ResetDatabasePassword DeploymentCauseDetailsDigitalOceanUserActionName = "RESET_DATABASE_PASSWORD"
+	DeploymentCauseDetailsDigitalOceanUserActionName_RollbackApp           DeploymentCauseDetailsDigitalOceanUserActionName = "ROLLBACK_APP"
+	DeploymentCauseDetailsDigitalOceanUserActionName_RevertAppRollback     DeploymentCauseDetailsDigitalOceanUserActionName = "REVERT_APP_ROLLBACK"
+	DeploymentCauseDetailsDigitalOceanUserActionName_UpgradeBuildpack      DeploymentCauseDetailsDigitalOceanUserActionName = "UPGRADE_BUILDPACK"
+)
+
+// AppDomain struct for AppDomain
+type AppDomain struct {
+	ID                      string                 `json:"id,omitempty"`
+	Spec                    *AppDomainSpec         `json:"spec,omitempty"`
+	Phase                   AppDomainPhase         `json:"phase,omitempty"`
+	Progress                *AppDomainProgress     `json:"progress,omitempty"`
+	Validation              *AppDomainValidation   `json:"validation,omitempty"`
+	Validations             []*AppDomainValidation `json:"validations,omitempty"`
+	RotateValidationRecords bool                   `json:"rotate_validation_records,omitempty"`
+	CertificateExpiresAt    time.Time              `json:"certificate_expires_at,omitempty"`
+}
+
+// AppDomainPhase the model 'AppDomainPhase'
+type AppDomainPhase string
+
+// List of AppDomainPhase
+const (
+	AppJobSpecKindPHASE_Unknown     AppDomainPhase = "UNKNOWN"
+	AppJobSpecKindPHASE_Pending     AppDomainPhase = "PENDING"
+	AppJobSpecKindPHASE_Configuring AppDomainPhase = "CONFIGURING"
+	AppJobSpecKindPHASE_Active      AppDomainPhase = "ACTIVE"
+	AppJobSpecKindPHASE_Error       AppDomainPhase = "ERROR"
+)
+
+// AppDomainProgress struct for AppDomainProgress
+type AppDomainProgress struct {
+	Steps []*AppDomainProgressStep `json:"steps,omitempty"`
+}
+
+// AppDomainProgressStep struct for AppDomainProgressStep
+type AppDomainProgressStep struct {
+	Name      string                       `json:"name,omitempty"`
+	Status    AppDomainProgressStepStatus  `json:"status,omitempty"`
+	Steps     []*AppDomainProgressStep     `json:"steps,omitempty"`
+	StartedAt time.Time                    `json:"started_at,omitempty"`
+	EndedAt   time.Time                    `json:"ended_at,omitempty"`
+	Reason    *AppDomainProgressStepReason `json:"reason,omitempty"`
+}
+
+// AppDomainProgressStepReason struct for AppDomainProgressStepReason
+type AppDomainProgressStepReason struct {
+	Code    string `json:"code,omitempty"`
+	Message string `json:"message,omitempty"`
+}
+
+// AppDomainProgressStepStatus the model 'AppDomainProgressStepStatus'
+type AppDomainProgressStepStatus string
+
+// List of AppDomainProgressStepStatus
+const (
+	AppJobSpecKindProgressStepStatus_Unknown AppDomainProgressStepStatus = "UNKNOWN"
+	AppJobSpecKindProgressStepStatus_Pending AppDomainProgressStepStatus = "PENDING"
+	AppJobSpecKindProgressStepStatus_Running AppDomainProgressStepStatus = "RUNNING"
+	AppJobSpecKindProgressStepStatus_Error   AppDomainProgressStepStatus = "ERROR"
+	AppJobSpecKindProgressStepStatus_Success AppDomainProgressStepStatus = "SUCCESS"
+)
+
+// AppDomainValidation struct for AppDomainValidation
+type AppDomainValidation struct {
+	TXTName  string `json:"txt_name,omitempty"`
+	TXTValue string `json:"txt_value,omitempty"`
+}
+
+// GitHubSourceSpec struct for GitHubSourceSpec
+type GitHubSourceSpec struct {
+	Repo         string `json:"repo,omitempty"`
+	Branch       string `json:"branch,omitempty"`
+	DeployOnPush bool   `json:"deploy_on_push,omitempty"`
+}
+
+// GitLabSourceSpec struct for GitLabSourceSpec
+type GitLabSourceSpec struct {
+	Repo         string `json:"repo,omitempty"`
+	Branch       string `json:"branch,omitempty"`
+	DeployOnPush bool   `json:"deploy_on_push,omitempty"`
+}
+
+// GitSourceSpec struct for GitSourceSpec
+type GitSourceSpec struct {
+	RepoCloneURL string `json:"repo_clone_url,omitempty"`
+	Branch       string `json:"branch,omitempty"`
+}
+
+// ImageSourceSpec struct for ImageSourceSpec
+type ImageSourceSpec struct {
+	RegistryType ImageSourceSpecRegistryType `json:"registry_type,omitempty"`
+	// The registry name. Must be left empty for the `DOCR` registry type.  Required for the `DOCKER_HUB` registry type.
+	Registry string `json:"registry,omitempty"`
+	// The repository name.
+	Repository string `json:"repository,omitempty"`
+	// The repository tag. Defaults to `latest` if not provided.
+	Tag          string                       `json:"tag,omitempty"`
+	DeployOnPush *ImageSourceSpecDeployOnPush `json:"deploy_on_push,omitempty"`
+}
+
+// ImageSourceSpecDeployOnPush struct for ImageSourceSpecDeployOnPush
+type ImageSourceSpecDeployOnPush struct {
+	// Automatically deploy new images. Only for DOCR images.
+	Enabled bool `json:"enabled,omitempty"`
+}
+
+// ImageSourceSpecRegistryType  - DOCR: The DigitalOcean container registry type.  - DOCKER_HUB: The DockerHub container registry type.
+type ImageSourceSpecRegistryType string
+
+// List of ImageSourceSpecRegistryType
+const (
+	ImageSourceSpecRegistryType_Unspecified ImageSourceSpecRegistryType = "UNSPECIFIED"
+	ImageSourceSpecRegistryType_DOCR        ImageSourceSpecRegistryType = "DOCR"
+	ImageSourceSpecRegistryType_DockerHub   ImageSourceSpecRegistryType = "DOCKER_HUB"
+)
+
+// AppInstanceSize struct for AppInstanceSize
+type AppInstanceSize struct {
+	Name            string                 `json:"name,omitempty"`
+	Slug            string                 `json:"slug,omitempty"`
+	CPUType         AppInstanceSizeCPUType `json:"cpu_type,omitempty"`
+	CPUs            string                 `json:"cpus,omitempty"`
+	MemoryBytes     string                 `json:"memory_bytes,omitempty"`
+	USDPerMonth     string                 `json:"usd_per_month,omitempty"`
+	USDPerSecond    string                 `json:"usd_per_second,omitempty"`
+	TierSlug        string                 `json:"tier_slug,omitempty"`
+	TierUpgradeTo   string                 `json:"tier_upgrade_to,omitempty"`
+	TierDowngradeTo string                 `json:"tier_downgrade_to,omitempty"`
+}
+
+// AppInstanceSizeCPUType the model 'AppInstanceSizeCPUType'
+type AppInstanceSizeCPUType string
+
+// List of AppInstanceSizeCPUType
+const (
+	AppInstanceSizeCPUType_Unspecified AppInstanceSizeCPUType = "UNSPECIFIED"
+	AppInstanceSizeCPUType_Shared      AppInstanceSizeCPUType = "SHARED"
+	AppInstanceSizeCPUType_Dedicated   AppInstanceSizeCPUType = "DEDICATED"
+)
+
+// ListBuildpacksResponse struct for ListBuildpacksResponse
+type ListBuildpacksResponse struct {
+	// List of the available buildpacks on App Platform.
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// AppProposeRequest struct for AppProposeRequest
+type AppProposeRequest struct {
+	Spec *AppSpec `json:"spec"`
+	// An optional ID of an existing app. If set, the spec will be treated as a proposed update to the specified app. The existing app is not modified using this method.
+	AppID string `json:"app_id,omitempty"`
+}
+
+// AppProposeResponse struct for AppProposeResponse
+type AppProposeResponse struct {
+	// Deprecated. Please use app_is_starter instead.
+	AppIsStatic bool `json:"app_is_static,omitempty"`
+	// Indicates whether the app name is available.
+	AppNameAvailable bool `json:"app_name_available,omitempty"`
+	// If the app name is unavailable, this will be set to a suggested available name.
+	AppNameSuggestion string `json:"app_name_suggestion,omitempty"`
+	// Deprecated. Please use existing_starter_apps instead.
+	ExistingStaticApps string `json:"existing_static_apps,omitempty"`
+	// Deprecated. Please use max_free_starter_apps instead.
+	MaxFreeStaticApps string   `json:"max_free_static_apps,omitempty"`
+	Spec              *AppSpec `json:"spec,omitempty"`
+	// The monthly cost of the proposed app in USD.
+	AppCost float32 `json:"app_cost,omitempty"`
+	// The monthly cost of the proposed app in USD using the next pricing plan tier. For example, if you propose an app that uses the Basic tier, the `app_tier_upgrade_cost` field displays the monthly cost of the app if it were to use the Professional tier. If the proposed app already uses the most expensive tier, the field is empty.
+	AppTierUpgradeCost float32 `json:"app_tier_upgrade_cost,omitempty"`
+	// The monthly cost of the proposed app in USD using the previous pricing plan tier. For example, if you propose an app that uses the Professional tier, the `app_tier_downgrade_cost` field displays the monthly cost of the app if it were to use the Basic tier. If the proposed app already uses the lest expensive tier, the field is empty.
+	AppTierDowngradeCost float32 `json:"app_tier_downgrade_cost,omitempty"`
+	// The number of existing starter tier apps the account has.
+	ExistingStarterApps string `json:"existing_starter_apps,omitempty"`
+	// The maximum number of free starter apps the account can have. Any additional starter apps will be charged for. These include apps with only static sites, functions, and databases.
+	MaxFreeStarterApps string `json:"max_free_starter_apps,omitempty"`
+	// Indicates whether the app is a starter tier app.
+	AppIsStarter bool `json:"app_is_starter,omitempty"`
+}
+
+// AppRegion struct for AppRegion
+type AppRegion struct {
+	Slug        string   `json:"slug,omitempty"`
+	Label       string   `json:"label,omitempty"`
+	Flag        string   `json:"flag,omitempty"`
+	Continent   string   `json:"continent,omitempty"`
+	Disabled    bool     `json:"disabled,omitempty"`
+	DataCenters []string `json:"data_centers,omitempty"`
+	Reason      string   `json:"reason,omitempty"`
+	// Whether or not the region is presented as the default.
+	Default bool `json:"default,omitempty"`
+}
+
+// AppStringMatch struct for AppStringMatch
+type AppStringMatch struct {
+	// Exact string match. Only 1 of `exact`, `prefix`, or `regex` must be set.
+	Exact string `json:"exact,omitempty"`
+	// Prefix-based match. Only 1 of `exact`, `prefix`, or `regex` must be set.
+	Prefix string `json:"prefix,omitempty"`
+	Regex  string `json:"regex,omitempty"`
+}
+
+// AppTier struct for AppTier
+type AppTier struct {
+	Name                 string `json:"name,omitempty"`
+	Slug                 string `json:"slug,omitempty"`
+	EgressBandwidthBytes string `json:"egress_bandwidth_bytes,omitempty"`
+	BuildSeconds         string `json:"build_seconds,omitempty"`
+}
+
+// UpgradeBuildpackResponse struct for UpgradeBuildpackResponse
+type UpgradeBuildpackResponse struct {
+	// The components that were affected by the upgrade.
+	AffectedComponents []string    `json:"affected_components,omitempty"`
+	Deployment         *Deployment `json:"deployment,omitempty"`
+}
+
+// AppVariableScope the model 'AppVariableScope'
+type AppVariableScope string
+
+// List of AppVariableScope
+const (
+	AppVariableScope_Unset           AppVariableScope = "UNSET"
+	AppVariableScope_RunTime         AppVariableScope = "RUN_TIME"
+	AppVariableScope_BuildTime       AppVariableScope = "BUILD_TIME"
+	AppVariableScope_RunAndBuildTime AppVariableScope = "RUN_AND_BUILD_TIME"
+)
+
+// AppVariableType the model 'AppVariableType'
+type AppVariableType string
+
+// List of AppVariableType
+const (
+	AppVariableType_General AppVariableType = "GENERAL"
+	AppVariableType_Secret  AppVariableType = "SECRET"
+)
diff --git a/apps.go b/apps.go
new file mode 100644
index 0000000..422b48a
--- /dev/null
+++ b/apps.go
@@ -0,0 +1,725 @@
+package godo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+)
+
+const (
+	appsBasePath = "/v2/apps"
+)
+
+// AppLogType is the type of app logs.
+type AppLogType string
+
+const (
+	// AppLogTypeBuild represents build logs.
+	AppLogTypeBuild AppLogType = "BUILD"
+	// AppLogTypeDeploy represents deploy logs.
+	AppLogTypeDeploy AppLogType = "DEPLOY"
+	// AppLogTypeRun represents run logs.
+	AppLogTypeRun AppLogType = "RUN"
+)
+
+// AppsService is an interface for interfacing with the App Platform endpoints
+// of the DigitalOcean API.
+type AppsService interface {
+	Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error)
+	Get(ctx context.Context, appID string) (*App, *Response, error)
+	List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error)
+	Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error)
+	Delete(ctx context.Context, appID string) (*Response, error)
+	Propose(ctx context.Context, propose *AppProposeRequest) (*AppProposeResponse, *Response, error)
+
+	GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error)
+	ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error)
+	CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error)
+
+	GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error)
+
+	ListRegions(ctx context.Context) ([]*AppRegion, *Response, error)
+
+	ListTiers(ctx context.Context) ([]*AppTier, *Response, error)
+	GetTier(ctx context.Context, slug string) (*AppTier, *Response, error)
+
+	ListInstanceSizes(ctx context.Context) ([]*AppInstanceSize, *Response, error)
+	GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error)
+
+	ListAlerts(ctx context.Context, appID string) ([]*AppAlert, *Response, error)
+	UpdateAlertDestinations(ctx context.Context, appID, alertID string, update *AlertDestinationUpdateRequest) (*AppAlert, *Response, error)
+
+	Detect(ctx context.Context, detect *DetectRequest) (*DetectResponse, *Response, error)
+
+	ListBuildpacks(ctx context.Context) ([]*Buildpack, *Response, error)
+	UpgradeBuildpack(ctx context.Context, appID string, opts UpgradeBuildpackOptions) (*UpgradeBuildpackResponse, *Response, error)
+}
+
+// AppLogs represent app logs.
+type AppLogs struct {
+	LiveURL      string   `json:"live_url"`
+	HistoricURLs []string `json:"historic_urls"`
+}
+
+// AppUpdateRequest represents a request to update an app.
+type AppUpdateRequest struct {
+	Spec *AppSpec `json:"spec"`
+}
+
+// DeploymentCreateRequest represents a request to create a deployment.
+type DeploymentCreateRequest struct {
+	ForceBuild bool `json:"force_build"`
+}
+
+// AlertDestinationUpdateRequest represents a request to update alert destinations.
+type AlertDestinationUpdateRequest struct {
+	Emails        []string                `json:"emails"`
+	SlackWebhooks []*AppAlertSlackWebhook `json:"slack_webhooks"`
+}
+
+// UpgradeBuildpackOptions struct for UpgradeBuildpackOptions
+type UpgradeBuildpackOptions struct {
+	// The ID of the buildpack to upgrade.
+	BuildpackID string `json:"buildpack_id,omitempty"`
+	// The Major Version to upgrade the buildpack to. If omitted, the latest available major version will be used.
+	MajorVersion int32 `json:"major_version,omitempty"`
+	// Whether or not to trigger a deployment for the app after upgrading the buildpack.
+	TriggerDeployment bool `json:"trigger_deployment,omitempty"`
+}
+
+type appRoot struct {
+	App *App `json:"app"`
+}
+
+type appsRoot struct {
+	Apps  []*App `json:"apps"`
+	Links *Links `json:"links"`
+	Meta  *Meta  `json:"meta"`
+}
+
+type deploymentRoot struct {
+	Deployment *Deployment `json:"deployment"`
+}
+
+type deploymentsRoot struct {
+	Deployments []*Deployment `json:"deployments"`
+	Links       *Links        `json:"links"`
+	Meta        *Meta         `json:"meta"`
+}
+
+type appTierRoot struct {
+	Tier *AppTier `json:"tier"`
+}
+
+type appTiersRoot struct {
+	Tiers []*AppTier `json:"tiers"`
+}
+
+type instanceSizeRoot struct {
+	InstanceSize *AppInstanceSize `json:"instance_size"`
+}
+
+type instanceSizesRoot struct {
+	InstanceSizes []*AppInstanceSize `json:"instance_sizes"`
+}
+
+type appRegionsRoot struct {
+	Regions []*AppRegion `json:"regions"`
+}
+
+type appAlertsRoot struct {
+	Alerts []*AppAlert `json:"alerts"`
+}
+
+type appAlertRoot struct {
+	Alert *AppAlert `json:"alert"`
+}
+
+type buildpacksRoot struct {
+	Buildpacks []*Buildpack `json:"buildpacks,omitempty"`
+}
+
+// AppsServiceOp handles communication with Apps methods of the DigitalOcean API.
+type AppsServiceOp struct {
+	client *Client
+}
+
+// URN returns a URN identifier for the app
+func (a App) URN() string {
+	return ToURN("app", a.ID)
+}
+
+// Create an app.
+func (s *AppsServiceOp) Create(ctx context.Context, create *AppCreateRequest) (*App, *Response, error) {
+	path := appsBasePath
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, create)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(appRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.App, resp, nil
+}
+
+// Get an app.
+func (s *AppsServiceOp) Get(ctx context.Context, appID string) (*App, *Response, error) {
+	path := fmt.Sprintf("%s/%s", appsBasePath, appID)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.App, resp, nil
+}
+
+// List apps.
+func (s *AppsServiceOp) List(ctx context.Context, opts *ListOptions) ([]*App, *Response, error) {
+	path := appsBasePath
+	path, err := addOptions(path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appsRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+
+	return root.Apps, resp, nil
+}
+
+// Update an app.
+func (s *AppsServiceOp) Update(ctx context.Context, appID string, update *AppUpdateRequest) (*App, *Response, error) {
+	path := fmt.Sprintf("%s/%s", appsBasePath, appID)
+	req, err := s.client.NewRequest(ctx, http.MethodPut, path, update)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(appRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.App, resp, nil
+}
+
+// Delete an app.
+func (s *AppsServiceOp) Delete(ctx context.Context, appID string) (*Response, error) {
+	path := fmt.Sprintf("%s/%s", appsBasePath, appID)
+	req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// Propose an app.
+func (s *AppsServiceOp) Propose(ctx context.Context, propose *AppProposeRequest) (*AppProposeResponse, *Response, error) {
+	path := fmt.Sprintf("%s/propose", appsBasePath)
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, propose)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	res := &AppProposeResponse{}
+	resp, err := s.client.Do(ctx, req, res)
+	if err != nil {
+		return nil, resp, err
+	}
+	return res, resp, nil
+}
+
+// GetDeployment gets an app deployment.
+func (s *AppsServiceOp) GetDeployment(ctx context.Context, appID, deploymentID string) (*Deployment, *Response, error) {
+	path := fmt.Sprintf("%s/%s/deployments/%s", appsBasePath, appID, deploymentID)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(deploymentRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Deployment, resp, nil
+}
+
+// ListDeployments lists an app deployments.
+func (s *AppsServiceOp) ListDeployments(ctx context.Context, appID string, opts *ListOptions) ([]*Deployment, *Response, error) {
+	path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID)
+	path, err := addOptions(path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(deploymentsRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+
+	return root.Deployments, resp, nil
+}
+
+// CreateDeployment creates an app deployment.
+func (s *AppsServiceOp) CreateDeployment(ctx context.Context, appID string, create ...*DeploymentCreateRequest) (*Deployment, *Response, error) {
+	path := fmt.Sprintf("%s/%s/deployments", appsBasePath, appID)
+
+	var createReq *DeploymentCreateRequest
+	for _, c := range create {
+		createReq = c
+	}
+
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, createReq)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(deploymentRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Deployment, resp, nil
+}
+
+// GetLogs retrieves app logs.
+func (s *AppsServiceOp) GetLogs(ctx context.Context, appID, deploymentID, component string, logType AppLogType, follow bool, tailLines int) (*AppLogs, *Response, error) {
+	var url string
+	if deploymentID == "" {
+		url = fmt.Sprintf("%s/%s/logs?type=%s&follow=%t&tail_lines=%d", appsBasePath, appID, logType, follow, tailLines)
+	} else {
+		url = fmt.Sprintf("%s/%s/deployments/%s/logs?type=%s&follow=%t&tail_lines=%d", appsBasePath, appID, deploymentID, logType, follow, tailLines)
+	}
+	if component != "" {
+		url = fmt.Sprintf("%s&component_name=%s", url, component)
+	}
+
+	req, err := s.client.NewRequest(ctx, http.MethodGet, url, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	logs := new(AppLogs)
+	resp, err := s.client.Do(ctx, req, logs)
+	if err != nil {
+		return nil, resp, err
+	}
+	return logs, resp, nil
+}
+
+// ListRegions lists all regions supported by App Platform.
+func (s *AppsServiceOp) ListRegions(ctx context.Context) ([]*AppRegion, *Response, error) {
+	path := fmt.Sprintf("%s/regions", appsBasePath)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appRegionsRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Regions, resp, nil
+}
+
+// ListTiers lists available app tiers.
+func (s *AppsServiceOp) ListTiers(ctx context.Context) ([]*AppTier, *Response, error) {
+	path := fmt.Sprintf("%s/tiers", appsBasePath)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appTiersRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Tiers, resp, nil
+}
+
+// GetTier retrieves information about a specific app tier.
+func (s *AppsServiceOp) GetTier(ctx context.Context, slug string) (*AppTier, *Response, error) {
+	path := fmt.Sprintf("%s/tiers/%s", appsBasePath, slug)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appTierRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Tier, resp, nil
+}
+
+// ListInstanceSizes lists available instance sizes for service, worker, and job components.
+func (s *AppsServiceOp) ListInstanceSizes(ctx context.Context) ([]*AppInstanceSize, *Response, error) {
+	path := fmt.Sprintf("%s/tiers/instance_sizes", appsBasePath)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(instanceSizesRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.InstanceSizes, resp, nil
+}
+
+// GetInstanceSize retrieves information about a specific instance size for service, worker, and job components.
+func (s *AppsServiceOp) GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error) {
+	path := fmt.Sprintf("%s/tiers/instance_sizes/%s", appsBasePath, slug)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(instanceSizeRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.InstanceSize, resp, nil
+}
+
+// ListAlerts retrieves a list of alerts on an app
+func (s *AppsServiceOp) ListAlerts(ctx context.Context, appID string) ([]*AppAlert, *Response, error) {
+	path := fmt.Sprintf("%s/%s/alerts", appsBasePath, appID)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appAlertsRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Alerts, resp, nil
+}
+
+// UpdateAlertDestinations updates the alert destinations of an app's alert
+func (s *AppsServiceOp) UpdateAlertDestinations(ctx context.Context, appID, alertID string, update *AlertDestinationUpdateRequest) (*AppAlert, *Response, error) {
+	path := fmt.Sprintf("%s/%s/alerts/%s/destinations", appsBasePath, appID, alertID)
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, update)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(appAlertRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Alert, resp, nil
+}
+
+// Detect an app.
+func (s *AppsServiceOp) Detect(ctx context.Context, detect *DetectRequest) (*DetectResponse, *Response, error) {
+	path := fmt.Sprintf("%s/detect", appsBasePath)
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, detect)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	res := &DetectResponse{}
+	resp, err := s.client.Do(ctx, req, res)
+	if err != nil {
+		return nil, resp, err
+	}
+	return res, resp, nil
+}
+
+// ListBuildpacks lists the available buildpacks on App Platform.
+func (s *AppsServiceOp) ListBuildpacks(ctx context.Context) ([]*Buildpack, *Response, error) {
+	path := fmt.Sprintf("%s/buildpacks", appsBasePath)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(buildpacksRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Buildpacks, resp, nil
+}
+
+// UpgradeBuildpack upgrades a buildpack for an app.
+func (s *AppsServiceOp) UpgradeBuildpack(ctx context.Context, appID string, opts UpgradeBuildpackOptions) (*UpgradeBuildpackResponse, *Response, error) {
+	path := fmt.Sprintf("%s/%s/upgrade_buildpack", appsBasePath, appID)
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(UpgradeBuildpackResponse)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root, resp, nil
+}
+
+// AppComponentType is an app component type.
+type AppComponentType string
+
+const (
+	// AppComponentTypeService is the type for a service component.
+	AppComponentTypeService AppComponentType = "service"
+	// AppComponentTypeWorker is the type for a worker component.
+	AppComponentTypeWorker AppComponentType = "worker"
+	// AppComponentTypeJob is the type for a job component.
+	AppComponentTypeJob AppComponentType = "job"
+	// AppComponentTypeStaticSite is the type for a static site component.
+	AppComponentTypeStaticSite AppComponentType = "static_site"
+	// AppComponentTypeDatabase is the type for a database component.
+	AppComponentTypeDatabase AppComponentType = "database"
+	// AppComponentTypeFunctions is the type for a functions component.
+	AppComponentTypeFunctions AppComponentType = "functions"
+)
+
+// GetType returns the Service component type.
+func (s *AppServiceSpec) GetType() AppComponentType {
+	return AppComponentTypeService
+}
+
+// GetType returns the Worker component type.
+func (s *AppWorkerSpec) GetType() AppComponentType {
+	return AppComponentTypeWorker
+}
+
+// GetType returns the Job component type.
+func (s *AppJobSpec) GetType() AppComponentType {
+	return AppComponentTypeJob
+}
+
+// GetType returns the StaticSite component type.
+func (s *AppStaticSiteSpec) GetType() AppComponentType {
+	return AppComponentTypeStaticSite
+}
+
+// GetType returns the Database component type.
+func (s *AppDatabaseSpec) GetType() AppComponentType {
+	return AppComponentTypeDatabase
+}
+
+// GetType returns the Functions component type.
+func (s *AppFunctionsSpec) GetType() AppComponentType {
+	return AppComponentTypeFunctions
+}
+
+// AppComponentSpec represents a component's spec.
+type AppComponentSpec interface {
+	GetName() string
+	GetType() AppComponentType
+}
+
+// AppBuildableComponentSpec is a component that is buildable from source.
+type AppBuildableComponentSpec interface {
+	AppComponentSpec
+
+	GetGit() *GitSourceSpec
+	GetGitHub() *GitHubSourceSpec
+	GetGitLab() *GitLabSourceSpec
+
+	GetSourceDir() string
+
+	GetEnvs() []*AppVariableDefinition
+}
+
+// AppDockerBuildableComponentSpec is a component that is buildable from source using Docker.
+type AppDockerBuildableComponentSpec interface {
+	AppBuildableComponentSpec
+
+	GetDockerfilePath() string
+}
+
+// AppCNBBuildableComponentSpec is a component that is buildable from source using cloud native buildpacks.
+type AppCNBBuildableComponentSpec interface {
+	AppBuildableComponentSpec
+
+	GetBuildCommand() string
+}
+
+// AppContainerComponentSpec is a component that runs in a cluster.
+type AppContainerComponentSpec interface {
+	AppBuildableComponentSpec
+
+	GetImage() *ImageSourceSpec
+	GetRunCommand() string
+	GetInstanceSizeSlug() string
+	GetInstanceCount() int64
+}
+
+// AppRoutableComponentSpec is a component that defines routes.
+type AppRoutableComponentSpec interface {
+	AppComponentSpec
+
+	GetRoutes() []*AppRouteSpec
+	GetCORS() *AppCORSPolicy
+}
+
+// AppSourceType is an app source type.
+type AppSourceType string
+
+const (
+	AppSourceTypeGitHub AppSourceType = "github"
+	AppSourceTypeGitLab AppSourceType = "gitlab"
+	AppSourceTypeGit    AppSourceType = "git"
+	AppSourceTypeImage  AppSourceType = "image"
+)
+
+// SourceSpec represents a source.
+type SourceSpec interface {
+	GetType() AppSourceType
+}
+
+// GetType returns the GitHub source type.
+func (s *GitHubSourceSpec) GetType() AppSourceType {
+	return AppSourceTypeGitHub
+}
+
+// GetType returns the GitLab source type.
+func (s *GitLabSourceSpec) GetType() AppSourceType {
+	return AppSourceTypeGitLab
+}
+
+// GetType returns the Git source type.
+func (s *GitSourceSpec) GetType() AppSourceType {
+	return AppSourceTypeGit
+}
+
+// GetType returns the Image source type.
+func (s *ImageSourceSpec) GetType() AppSourceType {
+	return AppSourceTypeImage
+}
+
+// VCSSourceSpec represents a VCS source.
+type VCSSourceSpec interface {
+	SourceSpec
+	GetRepo() string
+	GetBranch() string
+}
+
+// GetRepo allows GitSourceSpec to implement the SourceSpec interface.
+func (s *GitSourceSpec) GetRepo() string {
+	return s.RepoCloneURL
+}
+
+// ForEachAppComponentSpec iterates over each component spec in an app.
+func (s *AppSpec) ForEachAppComponentSpec(fn func(component AppComponentSpec) error) error {
+	if s == nil {
+		return nil
+	}
+	for _, c := range s.Services {
+		if err := fn(c); err != nil {
+			return err
+		}
+	}
+	for _, c := range s.Workers {
+		if err := fn(c); err != nil {
+			return err
+		}
+	}
+	for _, c := range s.Jobs {
+		if err := fn(c); err != nil {
+			return err
+		}
+	}
+	for _, c := range s.StaticSites {
+		if err := fn(c); err != nil {
+			return err
+		}
+	}
+	for _, c := range s.Databases {
+		if err := fn(c); err != nil {
+			return err
+		}
+	}
+	for _, c := range s.Functions {
+		if err := fn(c); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// ForEachAppSpecComponent loops over each component spec that matches the provided interface type.
+// The type constraint is intentionally set to `any` to allow use of arbitrary interfaces to match the desired component types.
+//
+// Examples:
+//   - interface constraint
+//     godo.ForEachAppSpecComponent(spec, func(component godo.AppBuildableComponentSpec) error { ... })
+//   - struct type constraint
+//     godo.ForEachAppSpecComponent(spec, func(component *godo.AppStaticSiteSpec) error { ... })
+func ForEachAppSpecComponent[T any](s *AppSpec, fn func(component T) error) error {
+	return s.ForEachAppComponentSpec(func(component AppComponentSpec) error {
+		if c, ok := component.(T); ok {
+			if err := fn(c); err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+}
+
+// GetAppSpecComponent returns an app spec component by type and name.
+//
+// Examples:
+//   - interface constraint
+//     godo.GetAppSpecComponent[godo.AppBuildableComponentSpec](spec, "component-name")
+//   - struct type constraint
+//     godo.GetAppSpecComponent[*godo.AppServiceSpec](spec, "component-name")
+func GetAppSpecComponent[T interface {
+	GetName() string
+}](s *AppSpec, name string) (T, error) {
+	var c T
+	errStop := errors.New("stop")
+	err := ForEachAppSpecComponent(s, func(component T) error {
+		if component.GetName() == name {
+			c = component
+			return errStop
+		}
+		return nil
+	})
+	if err == errStop {
+		return c, nil
+	}
+	return c, fmt.Errorf("component %s not found", name)
+}
diff --git a/apps_accessors.go b/apps_accessors.go
new file mode 100644
index 0000000..82bba43
--- /dev/null
+++ b/apps_accessors.go
@@ -0,0 +1,3159 @@
+// Code generated automatically. DO NOT EDIT.
+
+package godo
+
+import (
+	"time"
+)
+
+// GetActiveDeployment returns the ActiveDeployment field.
+func (a *App) GetActiveDeployment() *Deployment {
+	if a == nil {
+		return nil
+	}
+	return a.ActiveDeployment
+}
+
+// GetBuildConfig returns the BuildConfig field.
+func (a *App) GetBuildConfig() *AppBuildConfig {
+	if a == nil {
+		return nil
+	}
+	return a.BuildConfig
+}
+
+// GetCreatedAt returns the CreatedAt field.
+func (a *App) GetCreatedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.CreatedAt
+}
+
+// GetDefaultIngress returns the DefaultIngress field.
+func (a *App) GetDefaultIngress() string {
+	if a == nil {
+		return ""
+	}
+	return a.DefaultIngress
+}
+
+// GetDomains returns the Domains field.
+func (a *App) GetDomains() []*AppDomain {
+	if a == nil {
+		return nil
+	}
+	return a.Domains
+}
+
+// GetID returns the ID field.
+func (a *App) GetID() string {
+	if a == nil {
+		return ""
+	}
+	return a.ID
+}
+
+// GetInProgressDeployment returns the InProgressDeployment field.
+func (a *App) GetInProgressDeployment() *Deployment {
+	if a == nil {
+		return nil
+	}
+	return a.InProgressDeployment
+}
+
+// GetLastDeploymentActiveAt returns the LastDeploymentActiveAt field.
+func (a *App) GetLastDeploymentActiveAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.LastDeploymentActiveAt
+}
+
+// GetLastDeploymentCreatedAt returns the LastDeploymentCreatedAt field.
+func (a *App) GetLastDeploymentCreatedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.LastDeploymentCreatedAt
+}
+
+// GetLiveDomain returns the LiveDomain field.
+func (a *App) GetLiveDomain() string {
+	if a == nil {
+		return ""
+	}
+	return a.LiveDomain
+}
+
+// GetLiveURL returns the LiveURL field.
+func (a *App) GetLiveURL() string {
+	if a == nil {
+		return ""
+	}
+	return a.LiveURL
+}
+
+// GetLiveURLBase returns the LiveURLBase field.
+func (a *App) GetLiveURLBase() string {
+	if a == nil {
+		return ""
+	}
+	return a.LiveURLBase
+}
+
+// GetOwnerUUID returns the OwnerUUID field.
+func (a *App) GetOwnerUUID() string {
+	if a == nil {
+		return ""
+	}
+	return a.OwnerUUID
+}
+
+// GetPendingDeployment returns the PendingDeployment field.
+func (a *App) GetPendingDeployment() *Deployment {
+	if a == nil {
+		return nil
+	}
+	return a.PendingDeployment
+}
+
+// GetPinnedDeployment returns the PinnedDeployment field.
+func (a *App) GetPinnedDeployment() *Deployment {
+	if a == nil {
+		return nil
+	}
+	return a.PinnedDeployment
+}
+
+// GetProjectID returns the ProjectID field.
+func (a *App) GetProjectID() string {
+	if a == nil {
+		return ""
+	}
+	return a.ProjectID
+}
+
+// GetRegion returns the Region field.
+func (a *App) GetRegion() *AppRegion {
+	if a == nil {
+		return nil
+	}
+	return a.Region
+}
+
+// GetSpec returns the Spec field.
+func (a *App) GetSpec() *AppSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Spec
+}
+
+// GetTierSlug returns the TierSlug field.
+func (a *App) GetTierSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.TierSlug
+}
+
+// GetUpdatedAt returns the UpdatedAt field.
+func (a *App) GetUpdatedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.UpdatedAt
+}
+
+// GetComponentName returns the ComponentName field.
+func (a *AppAlert) GetComponentName() string {
+	if a == nil {
+		return ""
+	}
+	return a.ComponentName
+}
+
+// GetEmails returns the Emails field.
+func (a *AppAlert) GetEmails() []string {
+	if a == nil {
+		return nil
+	}
+	return a.Emails
+}
+
+// GetID returns the ID field.
+func (a *AppAlert) GetID() string {
+	if a == nil {
+		return ""
+	}
+	return a.ID
+}
+
+// GetPhase returns the Phase field.
+func (a *AppAlert) GetPhase() AppAlertPhase {
+	if a == nil {
+		return ""
+	}
+	return a.Phase
+}
+
+// GetProgress returns the Progress field.
+func (a *AppAlert) GetProgress() *AppAlertProgress {
+	if a == nil {
+		return nil
+	}
+	return a.Progress
+}
+
+// GetSlackWebhooks returns the SlackWebhooks field.
+func (a *AppAlert) GetSlackWebhooks() []*AppAlertSlackWebhook {
+	if a == nil {
+		return nil
+	}
+	return a.SlackWebhooks
+}
+
+// GetSpec returns the Spec field.
+func (a *AppAlert) GetSpec() *AppAlertSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Spec
+}
+
+// GetSteps returns the Steps field.
+func (a *AppAlertProgress) GetSteps() []*AppAlertProgressStep {
+	if a == nil {
+		return nil
+	}
+	return a.Steps
+}
+
+// GetEndedAt returns the EndedAt field.
+func (a *AppAlertProgressStep) GetEndedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.EndedAt
+}
+
+// GetName returns the Name field.
+func (a *AppAlertProgressStep) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetReason returns the Reason field.
+func (a *AppAlertProgressStep) GetReason() *AppAlertProgressStepReason {
+	if a == nil {
+		return nil
+	}
+	return a.Reason
+}
+
+// GetStartedAt returns the StartedAt field.
+func (a *AppAlertProgressStep) GetStartedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.StartedAt
+}
+
+// GetStatus returns the Status field.
+func (a *AppAlertProgressStep) GetStatus() AppAlertProgressStepStatus {
+	if a == nil {
+		return ""
+	}
+	return a.Status
+}
+
+// GetSteps returns the Steps field.
+func (a *AppAlertProgressStep) GetSteps() []*AppAlertProgressStep {
+	if a == nil {
+		return nil
+	}
+	return a.Steps
+}
+
+// GetCode returns the Code field.
+func (a *AppAlertProgressStepReason) GetCode() string {
+	if a == nil {
+		return ""
+	}
+	return a.Code
+}
+
+// GetMessage returns the Message field.
+func (a *AppAlertProgressStepReason) GetMessage() string {
+	if a == nil {
+		return ""
+	}
+	return a.Message
+}
+
+// GetChannel returns the Channel field.
+func (a *AppAlertSlackWebhook) GetChannel() string {
+	if a == nil {
+		return ""
+	}
+	return a.Channel
+}
+
+// GetURL returns the URL field.
+func (a *AppAlertSlackWebhook) GetURL() string {
+	if a == nil {
+		return ""
+	}
+	return a.URL
+}
+
+// GetDisabled returns the Disabled field.
+func (a *AppAlertSpec) GetDisabled() bool {
+	if a == nil {
+		return false
+	}
+	return a.Disabled
+}
+
+// GetOperator returns the Operator field.
+func (a *AppAlertSpec) GetOperator() AppAlertSpecOperator {
+	if a == nil {
+		return ""
+	}
+	return a.Operator
+}
+
+// GetRule returns the Rule field.
+func (a *AppAlertSpec) GetRule() AppAlertSpecRule {
+	if a == nil {
+		return ""
+	}
+	return a.Rule
+}
+
+// GetValue returns the Value field.
+func (a *AppAlertSpec) GetValue() float32 {
+	if a == nil {
+		return 0
+	}
+	return a.Value
+}
+
+// GetWindow returns the Window field.
+func (a *AppAlertSpec) GetWindow() AppAlertSpecWindow {
+	if a == nil {
+		return ""
+	}
+	return a.Window
+}
+
+// GetCNBVersioning returns the CNBVersioning field.
+func (a *AppBuildConfig) GetCNBVersioning() *AppBuildConfigCNBVersioning {
+	if a == nil {
+		return nil
+	}
+	return a.CNBVersioning
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (a *AppBuildConfigCNBVersioning) GetBuildpacks() []*Buildpack {
+	if a == nil {
+		return nil
+	}
+	return a.Buildpacks
+}
+
+// GetStackID returns the StackID field.
+func (a *AppBuildConfigCNBVersioning) GetStackID() string {
+	if a == nil {
+		return ""
+	}
+	return a.StackID
+}
+
+// GetAllowCredentials returns the AllowCredentials field.
+func (a *AppCORSPolicy) GetAllowCredentials() bool {
+	if a == nil {
+		return false
+	}
+	return a.AllowCredentials
+}
+
+// GetAllowHeaders returns the AllowHeaders field.
+func (a *AppCORSPolicy) GetAllowHeaders() []string {
+	if a == nil {
+		return nil
+	}
+	return a.AllowHeaders
+}
+
+// GetAllowMethods returns the AllowMethods field.
+func (a *AppCORSPolicy) GetAllowMethods() []string {
+	if a == nil {
+		return nil
+	}
+	return a.AllowMethods
+}
+
+// GetAllowOrigins returns the AllowOrigins field.
+func (a *AppCORSPolicy) GetAllowOrigins() []*AppStringMatch {
+	if a == nil {
+		return nil
+	}
+	return a.AllowOrigins
+}
+
+// GetExposeHeaders returns the ExposeHeaders field.
+func (a *AppCORSPolicy) GetExposeHeaders() []string {
+	if a == nil {
+		return nil
+	}
+	return a.ExposeHeaders
+}
+
+// GetMaxAge returns the MaxAge field.
+func (a *AppCORSPolicy) GetMaxAge() string {
+	if a == nil {
+		return ""
+	}
+	return a.MaxAge
+}
+
+// GetProjectID returns the ProjectID field.
+func (a *AppCreateRequest) GetProjectID() string {
+	if a == nil {
+		return ""
+	}
+	return a.ProjectID
+}
+
+// GetSpec returns the Spec field.
+func (a *AppCreateRequest) GetSpec() *AppSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Spec
+}
+
+// GetClusterName returns the ClusterName field.
+func (a *AppDatabaseSpec) GetClusterName() string {
+	if a == nil {
+		return ""
+	}
+	return a.ClusterName
+}
+
+// GetDBName returns the DBName field.
+func (a *AppDatabaseSpec) GetDBName() string {
+	if a == nil {
+		return ""
+	}
+	return a.DBName
+}
+
+// GetDBUser returns the DBUser field.
+func (a *AppDatabaseSpec) GetDBUser() string {
+	if a == nil {
+		return ""
+	}
+	return a.DBUser
+}
+
+// GetEngine returns the Engine field.
+func (a *AppDatabaseSpec) GetEngine() AppDatabaseSpecEngine {
+	if a == nil {
+		return ""
+	}
+	return a.Engine
+}
+
+// GetName returns the Name field.
+func (a *AppDatabaseSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetNumNodes returns the NumNodes field.
+func (a *AppDatabaseSpec) GetNumNodes() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.NumNodes
+}
+
+// GetProduction returns the Production field.
+func (a *AppDatabaseSpec) GetProduction() bool {
+	if a == nil {
+		return false
+	}
+	return a.Production
+}
+
+// GetSize returns the Size field.
+func (a *AppDatabaseSpec) GetSize() string {
+	if a == nil {
+		return ""
+	}
+	return a.Size
+}
+
+// GetVersion returns the Version field.
+func (a *AppDatabaseSpec) GetVersion() string {
+	if a == nil {
+		return ""
+	}
+	return a.Version
+}
+
+// GetCertificateExpiresAt returns the CertificateExpiresAt field.
+func (a *AppDomain) GetCertificateExpiresAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.CertificateExpiresAt
+}
+
+// GetID returns the ID field.
+func (a *AppDomain) GetID() string {
+	if a == nil {
+		return ""
+	}
+	return a.ID
+}
+
+// GetPhase returns the Phase field.
+func (a *AppDomain) GetPhase() AppDomainPhase {
+	if a == nil {
+		return ""
+	}
+	return a.Phase
+}
+
+// GetProgress returns the Progress field.
+func (a *AppDomain) GetProgress() *AppDomainProgress {
+	if a == nil {
+		return nil
+	}
+	return a.Progress
+}
+
+// GetRotateValidationRecords returns the RotateValidationRecords field.
+func (a *AppDomain) GetRotateValidationRecords() bool {
+	if a == nil {
+		return false
+	}
+	return a.RotateValidationRecords
+}
+
+// GetSpec returns the Spec field.
+func (a *AppDomain) GetSpec() *AppDomainSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Spec
+}
+
+// GetValidation returns the Validation field.
+func (a *AppDomain) GetValidation() *AppDomainValidation {
+	if a == nil {
+		return nil
+	}
+	return a.Validation
+}
+
+// GetValidations returns the Validations field.
+func (a *AppDomain) GetValidations() []*AppDomainValidation {
+	if a == nil {
+		return nil
+	}
+	return a.Validations
+}
+
+// GetSteps returns the Steps field.
+func (a *AppDomainProgress) GetSteps() []*AppDomainProgressStep {
+	if a == nil {
+		return nil
+	}
+	return a.Steps
+}
+
+// GetEndedAt returns the EndedAt field.
+func (a *AppDomainProgressStep) GetEndedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.EndedAt
+}
+
+// GetName returns the Name field.
+func (a *AppDomainProgressStep) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetReason returns the Reason field.
+func (a *AppDomainProgressStep) GetReason() *AppDomainProgressStepReason {
+	if a == nil {
+		return nil
+	}
+	return a.Reason
+}
+
+// GetStartedAt returns the StartedAt field.
+func (a *AppDomainProgressStep) GetStartedAt() time.Time {
+	if a == nil {
+		return time.Time{}
+	}
+	return a.StartedAt
+}
+
+// GetStatus returns the Status field.
+func (a *AppDomainProgressStep) GetStatus() AppDomainProgressStepStatus {
+	if a == nil {
+		return ""
+	}
+	return a.Status
+}
+
+// GetSteps returns the Steps field.
+func (a *AppDomainProgressStep) GetSteps() []*AppDomainProgressStep {
+	if a == nil {
+		return nil
+	}
+	return a.Steps
+}
+
+// GetCode returns the Code field.
+func (a *AppDomainProgressStepReason) GetCode() string {
+	if a == nil {
+		return ""
+	}
+	return a.Code
+}
+
+// GetMessage returns the Message field.
+func (a *AppDomainProgressStepReason) GetMessage() string {
+	if a == nil {
+		return ""
+	}
+	return a.Message
+}
+
+// GetCertificate returns the Certificate field.
+func (a *AppDomainSpec) GetCertificate() string {
+	if a == nil {
+		return ""
+	}
+	return a.Certificate
+}
+
+// GetDomain returns the Domain field.
+func (a *AppDomainSpec) GetDomain() string {
+	if a == nil {
+		return ""
+	}
+	return a.Domain
+}
+
+// GetMinimumTLSVersion returns the MinimumTLSVersion field.
+func (a *AppDomainSpec) GetMinimumTLSVersion() string {
+	if a == nil {
+		return ""
+	}
+	return a.MinimumTLSVersion
+}
+
+// GetType returns the Type field.
+func (a *AppDomainSpec) GetType() AppDomainSpecType {
+	if a == nil {
+		return ""
+	}
+	return a.Type
+}
+
+// GetWildcard returns the Wildcard field.
+func (a *AppDomainSpec) GetWildcard() bool {
+	if a == nil {
+		return false
+	}
+	return a.Wildcard
+}
+
+// GetZone returns the Zone field.
+func (a *AppDomainSpec) GetZone() string {
+	if a == nil {
+		return ""
+	}
+	return a.Zone
+}
+
+// GetTXTName returns the TXTName field.
+func (a *AppDomainValidation) GetTXTName() string {
+	if a == nil {
+		return ""
+	}
+	return a.TXTName
+}
+
+// GetTXTValue returns the TXTValue field.
+func (a *AppDomainValidation) GetTXTValue() string {
+	if a == nil {
+		return ""
+	}
+	return a.TXTValue
+}
+
+// GetAlerts returns the Alerts field.
+func (a *AppFunctionsSpec) GetAlerts() []*AppAlertSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Alerts
+}
+
+// GetCORS returns the CORS field.
+func (a *AppFunctionsSpec) GetCORS() *AppCORSPolicy {
+	if a == nil {
+		return nil
+	}
+	return a.CORS
+}
+
+// GetEnvs returns the Envs field.
+func (a *AppFunctionsSpec) GetEnvs() []*AppVariableDefinition {
+	if a == nil {
+		return nil
+	}
+	return a.Envs
+}
+
+// GetGit returns the Git field.
+func (a *AppFunctionsSpec) GetGit() *GitSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Git
+}
+
+// GetGitHub returns the GitHub field.
+func (a *AppFunctionsSpec) GetGitHub() *GitHubSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (a *AppFunctionsSpec) GetGitLab() *GitLabSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitLab
+}
+
+// GetLogDestinations returns the LogDestinations field.
+func (a *AppFunctionsSpec) GetLogDestinations() []*AppLogDestinationSpec {
+	if a == nil {
+		return nil
+	}
+	return a.LogDestinations
+}
+
+// GetName returns the Name field.
+func (a *AppFunctionsSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetRoutes returns the Routes field.
+func (a *AppFunctionsSpec) GetRoutes() []*AppRouteSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Routes
+}
+
+// GetSourceDir returns the SourceDir field.
+func (a *AppFunctionsSpec) GetSourceDir() string {
+	if a == nil {
+		return ""
+	}
+	return a.SourceDir
+}
+
+// GetLoadBalancer returns the LoadBalancer field.
+func (a *AppIngressSpec) GetLoadBalancer() AppIngressSpecLoadBalancer {
+	if a == nil {
+		return ""
+	}
+	return a.LoadBalancer
+}
+
+// GetLoadBalancerSize returns the LoadBalancerSize field.
+func (a *AppIngressSpec) GetLoadBalancerSize() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.LoadBalancerSize
+}
+
+// GetRules returns the Rules field.
+func (a *AppIngressSpec) GetRules() []*AppIngressSpecRule {
+	if a == nil {
+		return nil
+	}
+	return a.Rules
+}
+
+// GetComponent returns the Component field.
+func (a *AppIngressSpecRule) GetComponent() *AppIngressSpecRuleRoutingComponent {
+	if a == nil {
+		return nil
+	}
+	return a.Component
+}
+
+// GetCORS returns the CORS field.
+func (a *AppIngressSpecRule) GetCORS() *AppCORSPolicy {
+	if a == nil {
+		return nil
+	}
+	return a.CORS
+}
+
+// GetMatch returns the Match field.
+func (a *AppIngressSpecRule) GetMatch() *AppIngressSpecRuleMatch {
+	if a == nil {
+		return nil
+	}
+	return a.Match
+}
+
+// GetRedirect returns the Redirect field.
+func (a *AppIngressSpecRule) GetRedirect() *AppIngressSpecRuleRoutingRedirect {
+	if a == nil {
+		return nil
+	}
+	return a.Redirect
+}
+
+// GetPath returns the Path field.
+func (a *AppIngressSpecRuleMatch) GetPath() *AppIngressSpecRuleStringMatch {
+	if a == nil {
+		return nil
+	}
+	return a.Path
+}
+
+// GetName returns the Name field.
+func (a *AppIngressSpecRuleRoutingComponent) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetPreservePathPrefix returns the PreservePathPrefix field.
+func (a *AppIngressSpecRuleRoutingComponent) GetPreservePathPrefix() bool {
+	if a == nil {
+		return false
+	}
+	return a.PreservePathPrefix
+}
+
+// GetRewrite returns the Rewrite field.
+func (a *AppIngressSpecRuleRoutingComponent) GetRewrite() string {
+	if a == nil {
+		return ""
+	}
+	return a.Rewrite
+}
+
+// GetAuthority returns the Authority field.
+func (a *AppIngressSpecRuleRoutingRedirect) GetAuthority() string {
+	if a == nil {
+		return ""
+	}
+	return a.Authority
+}
+
+// GetPort returns the Port field.
+func (a *AppIngressSpecRuleRoutingRedirect) GetPort() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.Port
+}
+
+// GetRedirectCode returns the RedirectCode field.
+func (a *AppIngressSpecRuleRoutingRedirect) GetRedirectCode() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.RedirectCode
+}
+
+// GetScheme returns the Scheme field.
+func (a *AppIngressSpecRuleRoutingRedirect) GetScheme() string {
+	if a == nil {
+		return ""
+	}
+	return a.Scheme
+}
+
+// GetUri returns the Uri field.
+func (a *AppIngressSpecRuleRoutingRedirect) GetUri() string {
+	if a == nil {
+		return ""
+	}
+	return a.Uri
+}
+
+// GetPrefix returns the Prefix field.
+func (a *AppIngressSpecRuleStringMatch) GetPrefix() string {
+	if a == nil {
+		return ""
+	}
+	return a.Prefix
+}
+
+// GetCPUs returns the CPUs field.
+func (a *AppInstanceSize) GetCPUs() string {
+	if a == nil {
+		return ""
+	}
+	return a.CPUs
+}
+
+// GetCPUType returns the CPUType field.
+func (a *AppInstanceSize) GetCPUType() AppInstanceSizeCPUType {
+	if a == nil {
+		return ""
+	}
+	return a.CPUType
+}
+
+// GetMemoryBytes returns the MemoryBytes field.
+func (a *AppInstanceSize) GetMemoryBytes() string {
+	if a == nil {
+		return ""
+	}
+	return a.MemoryBytes
+}
+
+// GetName returns the Name field.
+func (a *AppInstanceSize) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetSlug returns the Slug field.
+func (a *AppInstanceSize) GetSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.Slug
+}
+
+// GetTierDowngradeTo returns the TierDowngradeTo field.
+func (a *AppInstanceSize) GetTierDowngradeTo() string {
+	if a == nil {
+		return ""
+	}
+	return a.TierDowngradeTo
+}
+
+// GetTierSlug returns the TierSlug field.
+func (a *AppInstanceSize) GetTierSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.TierSlug
+}
+
+// GetTierUpgradeTo returns the TierUpgradeTo field.
+func (a *AppInstanceSize) GetTierUpgradeTo() string {
+	if a == nil {
+		return ""
+	}
+	return a.TierUpgradeTo
+}
+
+// GetUSDPerMonth returns the USDPerMonth field.
+func (a *AppInstanceSize) GetUSDPerMonth() string {
+	if a == nil {
+		return ""
+	}
+	return a.USDPerMonth
+}
+
+// GetUSDPerSecond returns the USDPerSecond field.
+func (a *AppInstanceSize) GetUSDPerSecond() string {
+	if a == nil {
+		return ""
+	}
+	return a.USDPerSecond
+}
+
+// GetAlerts returns the Alerts field.
+func (a *AppJobSpec) GetAlerts() []*AppAlertSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Alerts
+}
+
+// GetBuildCommand returns the BuildCommand field.
+func (a *AppJobSpec) GetBuildCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.BuildCommand
+}
+
+// GetDockerfilePath returns the DockerfilePath field.
+func (a *AppJobSpec) GetDockerfilePath() string {
+	if a == nil {
+		return ""
+	}
+	return a.DockerfilePath
+}
+
+// GetEnvironmentSlug returns the EnvironmentSlug field.
+func (a *AppJobSpec) GetEnvironmentSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.EnvironmentSlug
+}
+
+// GetEnvs returns the Envs field.
+func (a *AppJobSpec) GetEnvs() []*AppVariableDefinition {
+	if a == nil {
+		return nil
+	}
+	return a.Envs
+}
+
+// GetGit returns the Git field.
+func (a *AppJobSpec) GetGit() *GitSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Git
+}
+
+// GetGitHub returns the GitHub field.
+func (a *AppJobSpec) GetGitHub() *GitHubSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (a *AppJobSpec) GetGitLab() *GitLabSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitLab
+}
+
+// GetImage returns the Image field.
+func (a *AppJobSpec) GetImage() *ImageSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Image
+}
+
+// GetInstanceCount returns the InstanceCount field.
+func (a *AppJobSpec) GetInstanceCount() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.InstanceCount
+}
+
+// GetInstanceSizeSlug returns the InstanceSizeSlug field.
+func (a *AppJobSpec) GetInstanceSizeSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.InstanceSizeSlug
+}
+
+// GetKind returns the Kind field.
+func (a *AppJobSpec) GetKind() AppJobSpecKind {
+	if a == nil {
+		return ""
+	}
+	return a.Kind
+}
+
+// GetLogDestinations returns the LogDestinations field.
+func (a *AppJobSpec) GetLogDestinations() []*AppLogDestinationSpec {
+	if a == nil {
+		return nil
+	}
+	return a.LogDestinations
+}
+
+// GetName returns the Name field.
+func (a *AppJobSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetRunCommand returns the RunCommand field.
+func (a *AppJobSpec) GetRunCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.RunCommand
+}
+
+// GetSourceDir returns the SourceDir field.
+func (a *AppJobSpec) GetSourceDir() string {
+	if a == nil {
+		return ""
+	}
+	return a.SourceDir
+}
+
+// GetDatadog returns the Datadog field.
+func (a *AppLogDestinationSpec) GetDatadog() *AppLogDestinationSpecDataDog {
+	if a == nil {
+		return nil
+	}
+	return a.Datadog
+}
+
+// GetEndpoint returns the Endpoint field.
+func (a *AppLogDestinationSpec) GetEndpoint() string {
+	if a == nil {
+		return ""
+	}
+	return a.Endpoint
+}
+
+// GetHeaders returns the Headers field.
+func (a *AppLogDestinationSpec) GetHeaders() []*AppLogDestinationSpecHeader {
+	if a == nil {
+		return nil
+	}
+	return a.Headers
+}
+
+// GetLogtail returns the Logtail field.
+func (a *AppLogDestinationSpec) GetLogtail() *AppLogDestinationSpecLogtail {
+	if a == nil {
+		return nil
+	}
+	return a.Logtail
+}
+
+// GetName returns the Name field.
+func (a *AppLogDestinationSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetPapertrail returns the Papertrail field.
+func (a *AppLogDestinationSpec) GetPapertrail() *AppLogDestinationSpecPapertrail {
+	if a == nil {
+		return nil
+	}
+	return a.Papertrail
+}
+
+// GetTLSInsecure returns the TLSInsecure field.
+func (a *AppLogDestinationSpec) GetTLSInsecure() bool {
+	if a == nil {
+		return false
+	}
+	return a.TLSInsecure
+}
+
+// GetApiKey returns the ApiKey field.
+func (a *AppLogDestinationSpecDataDog) GetApiKey() string {
+	if a == nil {
+		return ""
+	}
+	return a.ApiKey
+}
+
+// GetEndpoint returns the Endpoint field.
+func (a *AppLogDestinationSpecDataDog) GetEndpoint() string {
+	if a == nil {
+		return ""
+	}
+	return a.Endpoint
+}
+
+// GetKey returns the Key field.
+func (a *AppLogDestinationSpecHeader) GetKey() string {
+	if a == nil {
+		return ""
+	}
+	return a.Key
+}
+
+// GetValue returns the Value field.
+func (a *AppLogDestinationSpecHeader) GetValue() string {
+	if a == nil {
+		return ""
+	}
+	return a.Value
+}
+
+// GetToken returns the Token field.
+func (a *AppLogDestinationSpecLogtail) GetToken() string {
+	if a == nil {
+		return ""
+	}
+	return a.Token
+}
+
+// GetEndpoint returns the Endpoint field.
+func (a *AppLogDestinationSpecPapertrail) GetEndpoint() string {
+	if a == nil {
+		return ""
+	}
+	return a.Endpoint
+}
+
+// GetAppID returns the AppID field.
+func (a *AppProposeRequest) GetAppID() string {
+	if a == nil {
+		return ""
+	}
+	return a.AppID
+}
+
+// GetSpec returns the Spec field.
+func (a *AppProposeRequest) GetSpec() *AppSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Spec
+}
+
+// GetAppCost returns the AppCost field.
+func (a *AppProposeResponse) GetAppCost() float32 {
+	if a == nil {
+		return 0
+	}
+	return a.AppCost
+}
+
+// GetAppIsStarter returns the AppIsStarter field.
+func (a *AppProposeResponse) GetAppIsStarter() bool {
+	if a == nil {
+		return false
+	}
+	return a.AppIsStarter
+}
+
+// GetAppIsStatic returns the AppIsStatic field.
+func (a *AppProposeResponse) GetAppIsStatic() bool {
+	if a == nil {
+		return false
+	}
+	return a.AppIsStatic
+}
+
+// GetAppNameAvailable returns the AppNameAvailable field.
+func (a *AppProposeResponse) GetAppNameAvailable() bool {
+	if a == nil {
+		return false
+	}
+	return a.AppNameAvailable
+}
+
+// GetAppNameSuggestion returns the AppNameSuggestion field.
+func (a *AppProposeResponse) GetAppNameSuggestion() string {
+	if a == nil {
+		return ""
+	}
+	return a.AppNameSuggestion
+}
+
+// GetAppTierDowngradeCost returns the AppTierDowngradeCost field.
+func (a *AppProposeResponse) GetAppTierDowngradeCost() float32 {
+	if a == nil {
+		return 0
+	}
+	return a.AppTierDowngradeCost
+}
+
+// GetAppTierUpgradeCost returns the AppTierUpgradeCost field.
+func (a *AppProposeResponse) GetAppTierUpgradeCost() float32 {
+	if a == nil {
+		return 0
+	}
+	return a.AppTierUpgradeCost
+}
+
+// GetExistingStarterApps returns the ExistingStarterApps field.
+func (a *AppProposeResponse) GetExistingStarterApps() string {
+	if a == nil {
+		return ""
+	}
+	return a.ExistingStarterApps
+}
+
+// GetExistingStaticApps returns the ExistingStaticApps field.
+func (a *AppProposeResponse) GetExistingStaticApps() string {
+	if a == nil {
+		return ""
+	}
+	return a.ExistingStaticApps
+}
+
+// GetMaxFreeStarterApps returns the MaxFreeStarterApps field.
+func (a *AppProposeResponse) GetMaxFreeStarterApps() string {
+	if a == nil {
+		return ""
+	}
+	return a.MaxFreeStarterApps
+}
+
+// GetMaxFreeStaticApps returns the MaxFreeStaticApps field.
+func (a *AppProposeResponse) GetMaxFreeStaticApps() string {
+	if a == nil {
+		return ""
+	}
+	return a.MaxFreeStaticApps
+}
+
+// GetSpec returns the Spec field.
+func (a *AppProposeResponse) GetSpec() *AppSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Spec
+}
+
+// GetContinent returns the Continent field.
+func (a *AppRegion) GetContinent() string {
+	if a == nil {
+		return ""
+	}
+	return a.Continent
+}
+
+// GetDataCenters returns the DataCenters field.
+func (a *AppRegion) GetDataCenters() []string {
+	if a == nil {
+		return nil
+	}
+	return a.DataCenters
+}
+
+// GetDefault returns the Default field.
+func (a *AppRegion) GetDefault() bool {
+	if a == nil {
+		return false
+	}
+	return a.Default
+}
+
+// GetDisabled returns the Disabled field.
+func (a *AppRegion) GetDisabled() bool {
+	if a == nil {
+		return false
+	}
+	return a.Disabled
+}
+
+// GetFlag returns the Flag field.
+func (a *AppRegion) GetFlag() string {
+	if a == nil {
+		return ""
+	}
+	return a.Flag
+}
+
+// GetLabel returns the Label field.
+func (a *AppRegion) GetLabel() string {
+	if a == nil {
+		return ""
+	}
+	return a.Label
+}
+
+// GetReason returns the Reason field.
+func (a *AppRegion) GetReason() string {
+	if a == nil {
+		return ""
+	}
+	return a.Reason
+}
+
+// GetSlug returns the Slug field.
+func (a *AppRegion) GetSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.Slug
+}
+
+// GetPath returns the Path field.
+func (a *AppRouteSpec) GetPath() string {
+	if a == nil {
+		return ""
+	}
+	return a.Path
+}
+
+// GetPreservePathPrefix returns the PreservePathPrefix field.
+func (a *AppRouteSpec) GetPreservePathPrefix() bool {
+	if a == nil {
+		return false
+	}
+	return a.PreservePathPrefix
+}
+
+// GetAlerts returns the Alerts field.
+func (a *AppServiceSpec) GetAlerts() []*AppAlertSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Alerts
+}
+
+// GetBuildCommand returns the BuildCommand field.
+func (a *AppServiceSpec) GetBuildCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.BuildCommand
+}
+
+// GetCORS returns the CORS field.
+func (a *AppServiceSpec) GetCORS() *AppCORSPolicy {
+	if a == nil {
+		return nil
+	}
+	return a.CORS
+}
+
+// GetDockerfilePath returns the DockerfilePath field.
+func (a *AppServiceSpec) GetDockerfilePath() string {
+	if a == nil {
+		return ""
+	}
+	return a.DockerfilePath
+}
+
+// GetEnvironmentSlug returns the EnvironmentSlug field.
+func (a *AppServiceSpec) GetEnvironmentSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.EnvironmentSlug
+}
+
+// GetEnvs returns the Envs field.
+func (a *AppServiceSpec) GetEnvs() []*AppVariableDefinition {
+	if a == nil {
+		return nil
+	}
+	return a.Envs
+}
+
+// GetGit returns the Git field.
+func (a *AppServiceSpec) GetGit() *GitSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Git
+}
+
+// GetGitHub returns the GitHub field.
+func (a *AppServiceSpec) GetGitHub() *GitHubSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (a *AppServiceSpec) GetGitLab() *GitLabSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitLab
+}
+
+// GetHealthCheck returns the HealthCheck field.
+func (a *AppServiceSpec) GetHealthCheck() *AppServiceSpecHealthCheck {
+	if a == nil {
+		return nil
+	}
+	return a.HealthCheck
+}
+
+// GetHTTPPort returns the HTTPPort field.
+func (a *AppServiceSpec) GetHTTPPort() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.HTTPPort
+}
+
+// GetImage returns the Image field.
+func (a *AppServiceSpec) GetImage() *ImageSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Image
+}
+
+// GetInstanceCount returns the InstanceCount field.
+func (a *AppServiceSpec) GetInstanceCount() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.InstanceCount
+}
+
+// GetInstanceSizeSlug returns the InstanceSizeSlug field.
+func (a *AppServiceSpec) GetInstanceSizeSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.InstanceSizeSlug
+}
+
+// GetInternalPorts returns the InternalPorts field.
+func (a *AppServiceSpec) GetInternalPorts() []int64 {
+	if a == nil {
+		return nil
+	}
+	return a.InternalPorts
+}
+
+// GetLogDestinations returns the LogDestinations field.
+func (a *AppServiceSpec) GetLogDestinations() []*AppLogDestinationSpec {
+	if a == nil {
+		return nil
+	}
+	return a.LogDestinations
+}
+
+// GetName returns the Name field.
+func (a *AppServiceSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetRoutes returns the Routes field.
+func (a *AppServiceSpec) GetRoutes() []*AppRouteSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Routes
+}
+
+// GetRunCommand returns the RunCommand field.
+func (a *AppServiceSpec) GetRunCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.RunCommand
+}
+
+// GetSourceDir returns the SourceDir field.
+func (a *AppServiceSpec) GetSourceDir() string {
+	if a == nil {
+		return ""
+	}
+	return a.SourceDir
+}
+
+// GetFailureThreshold returns the FailureThreshold field.
+func (a *AppServiceSpecHealthCheck) GetFailureThreshold() int32 {
+	if a == nil {
+		return 0
+	}
+	return a.FailureThreshold
+}
+
+// GetHTTPPath returns the HTTPPath field.
+func (a *AppServiceSpecHealthCheck) GetHTTPPath() string {
+	if a == nil {
+		return ""
+	}
+	return a.HTTPPath
+}
+
+// GetInitialDelaySeconds returns the InitialDelaySeconds field.
+func (a *AppServiceSpecHealthCheck) GetInitialDelaySeconds() int32 {
+	if a == nil {
+		return 0
+	}
+	return a.InitialDelaySeconds
+}
+
+// GetPath returns the Path field.
+func (a *AppServiceSpecHealthCheck) GetPath() string {
+	if a == nil {
+		return ""
+	}
+	return a.Path
+}
+
+// GetPeriodSeconds returns the PeriodSeconds field.
+func (a *AppServiceSpecHealthCheck) GetPeriodSeconds() int32 {
+	if a == nil {
+		return 0
+	}
+	return a.PeriodSeconds
+}
+
+// GetPort returns the Port field.
+func (a *AppServiceSpecHealthCheck) GetPort() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.Port
+}
+
+// GetSuccessThreshold returns the SuccessThreshold field.
+func (a *AppServiceSpecHealthCheck) GetSuccessThreshold() int32 {
+	if a == nil {
+		return 0
+	}
+	return a.SuccessThreshold
+}
+
+// GetTimeoutSeconds returns the TimeoutSeconds field.
+func (a *AppServiceSpecHealthCheck) GetTimeoutSeconds() int32 {
+	if a == nil {
+		return 0
+	}
+	return a.TimeoutSeconds
+}
+
+// GetAlerts returns the Alerts field.
+func (a *AppSpec) GetAlerts() []*AppAlertSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Alerts
+}
+
+// GetDatabases returns the Databases field.
+func (a *AppSpec) GetDatabases() []*AppDatabaseSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Databases
+}
+
+// GetDomains returns the Domains field.
+func (a *AppSpec) GetDomains() []*AppDomainSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Domains
+}
+
+// GetEnvs returns the Envs field.
+func (a *AppSpec) GetEnvs() []*AppVariableDefinition {
+	if a == nil {
+		return nil
+	}
+	return a.Envs
+}
+
+// GetFeatures returns the Features field.
+func (a *AppSpec) GetFeatures() []string {
+	if a == nil {
+		return nil
+	}
+	return a.Features
+}
+
+// GetFunctions returns the Functions field.
+func (a *AppSpec) GetFunctions() []*AppFunctionsSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Functions
+}
+
+// GetIngress returns the Ingress field.
+func (a *AppSpec) GetIngress() *AppIngressSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Ingress
+}
+
+// GetJobs returns the Jobs field.
+func (a *AppSpec) GetJobs() []*AppJobSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Jobs
+}
+
+// GetName returns the Name field.
+func (a *AppSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetRegion returns the Region field.
+func (a *AppSpec) GetRegion() string {
+	if a == nil {
+		return ""
+	}
+	return a.Region
+}
+
+// GetServices returns the Services field.
+func (a *AppSpec) GetServices() []*AppServiceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Services
+}
+
+// GetStaticSites returns the StaticSites field.
+func (a *AppSpec) GetStaticSites() []*AppStaticSiteSpec {
+	if a == nil {
+		return nil
+	}
+	return a.StaticSites
+}
+
+// GetWorkers returns the Workers field.
+func (a *AppSpec) GetWorkers() []*AppWorkerSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Workers
+}
+
+// GetBuildCommand returns the BuildCommand field.
+func (a *AppStaticSiteSpec) GetBuildCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.BuildCommand
+}
+
+// GetCatchallDocument returns the CatchallDocument field.
+func (a *AppStaticSiteSpec) GetCatchallDocument() string {
+	if a == nil {
+		return ""
+	}
+	return a.CatchallDocument
+}
+
+// GetCORS returns the CORS field.
+func (a *AppStaticSiteSpec) GetCORS() *AppCORSPolicy {
+	if a == nil {
+		return nil
+	}
+	return a.CORS
+}
+
+// GetDockerfilePath returns the DockerfilePath field.
+func (a *AppStaticSiteSpec) GetDockerfilePath() string {
+	if a == nil {
+		return ""
+	}
+	return a.DockerfilePath
+}
+
+// GetEnvironmentSlug returns the EnvironmentSlug field.
+func (a *AppStaticSiteSpec) GetEnvironmentSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.EnvironmentSlug
+}
+
+// GetEnvs returns the Envs field.
+func (a *AppStaticSiteSpec) GetEnvs() []*AppVariableDefinition {
+	if a == nil {
+		return nil
+	}
+	return a.Envs
+}
+
+// GetErrorDocument returns the ErrorDocument field.
+func (a *AppStaticSiteSpec) GetErrorDocument() string {
+	if a == nil {
+		return ""
+	}
+	return a.ErrorDocument
+}
+
+// GetGit returns the Git field.
+func (a *AppStaticSiteSpec) GetGit() *GitSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Git
+}
+
+// GetGitHub returns the GitHub field.
+func (a *AppStaticSiteSpec) GetGitHub() *GitHubSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (a *AppStaticSiteSpec) GetGitLab() *GitLabSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitLab
+}
+
+// GetIndexDocument returns the IndexDocument field.
+func (a *AppStaticSiteSpec) GetIndexDocument() string {
+	if a == nil {
+		return ""
+	}
+	return a.IndexDocument
+}
+
+// GetName returns the Name field.
+func (a *AppStaticSiteSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetOutputDir returns the OutputDir field.
+func (a *AppStaticSiteSpec) GetOutputDir() string {
+	if a == nil {
+		return ""
+	}
+	return a.OutputDir
+}
+
+// GetRoutes returns the Routes field.
+func (a *AppStaticSiteSpec) GetRoutes() []*AppRouteSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Routes
+}
+
+// GetSourceDir returns the SourceDir field.
+func (a *AppStaticSiteSpec) GetSourceDir() string {
+	if a == nil {
+		return ""
+	}
+	return a.SourceDir
+}
+
+// GetExact returns the Exact field.
+func (a *AppStringMatch) GetExact() string {
+	if a == nil {
+		return ""
+	}
+	return a.Exact
+}
+
+// GetPrefix returns the Prefix field.
+func (a *AppStringMatch) GetPrefix() string {
+	if a == nil {
+		return ""
+	}
+	return a.Prefix
+}
+
+// GetRegex returns the Regex field.
+func (a *AppStringMatch) GetRegex() string {
+	if a == nil {
+		return ""
+	}
+	return a.Regex
+}
+
+// GetBuildSeconds returns the BuildSeconds field.
+func (a *AppTier) GetBuildSeconds() string {
+	if a == nil {
+		return ""
+	}
+	return a.BuildSeconds
+}
+
+// GetEgressBandwidthBytes returns the EgressBandwidthBytes field.
+func (a *AppTier) GetEgressBandwidthBytes() string {
+	if a == nil {
+		return ""
+	}
+	return a.EgressBandwidthBytes
+}
+
+// GetName returns the Name field.
+func (a *AppTier) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetSlug returns the Slug field.
+func (a *AppTier) GetSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.Slug
+}
+
+// GetKey returns the Key field.
+func (a *AppVariableDefinition) GetKey() string {
+	if a == nil {
+		return ""
+	}
+	return a.Key
+}
+
+// GetScope returns the Scope field.
+func (a *AppVariableDefinition) GetScope() AppVariableScope {
+	if a == nil {
+		return ""
+	}
+	return a.Scope
+}
+
+// GetType returns the Type field.
+func (a *AppVariableDefinition) GetType() AppVariableType {
+	if a == nil {
+		return ""
+	}
+	return a.Type
+}
+
+// GetValue returns the Value field.
+func (a *AppVariableDefinition) GetValue() string {
+	if a == nil {
+		return ""
+	}
+	return a.Value
+}
+
+// GetAlerts returns the Alerts field.
+func (a *AppWorkerSpec) GetAlerts() []*AppAlertSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Alerts
+}
+
+// GetBuildCommand returns the BuildCommand field.
+func (a *AppWorkerSpec) GetBuildCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.BuildCommand
+}
+
+// GetDockerfilePath returns the DockerfilePath field.
+func (a *AppWorkerSpec) GetDockerfilePath() string {
+	if a == nil {
+		return ""
+	}
+	return a.DockerfilePath
+}
+
+// GetEnvironmentSlug returns the EnvironmentSlug field.
+func (a *AppWorkerSpec) GetEnvironmentSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.EnvironmentSlug
+}
+
+// GetEnvs returns the Envs field.
+func (a *AppWorkerSpec) GetEnvs() []*AppVariableDefinition {
+	if a == nil {
+		return nil
+	}
+	return a.Envs
+}
+
+// GetGit returns the Git field.
+func (a *AppWorkerSpec) GetGit() *GitSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Git
+}
+
+// GetGitHub returns the GitHub field.
+func (a *AppWorkerSpec) GetGitHub() *GitHubSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (a *AppWorkerSpec) GetGitLab() *GitLabSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.GitLab
+}
+
+// GetImage returns the Image field.
+func (a *AppWorkerSpec) GetImage() *ImageSourceSpec {
+	if a == nil {
+		return nil
+	}
+	return a.Image
+}
+
+// GetInstanceCount returns the InstanceCount field.
+func (a *AppWorkerSpec) GetInstanceCount() int64 {
+	if a == nil {
+		return 0
+	}
+	return a.InstanceCount
+}
+
+// GetInstanceSizeSlug returns the InstanceSizeSlug field.
+func (a *AppWorkerSpec) GetInstanceSizeSlug() string {
+	if a == nil {
+		return ""
+	}
+	return a.InstanceSizeSlug
+}
+
+// GetLogDestinations returns the LogDestinations field.
+func (a *AppWorkerSpec) GetLogDestinations() []*AppLogDestinationSpec {
+	if a == nil {
+		return nil
+	}
+	return a.LogDestinations
+}
+
+// GetName returns the Name field.
+func (a *AppWorkerSpec) GetName() string {
+	if a == nil {
+		return ""
+	}
+	return a.Name
+}
+
+// GetRunCommand returns the RunCommand field.
+func (a *AppWorkerSpec) GetRunCommand() string {
+	if a == nil {
+		return ""
+	}
+	return a.RunCommand
+}
+
+// GetSourceDir returns the SourceDir field.
+func (a *AppWorkerSpec) GetSourceDir() string {
+	if a == nil {
+		return ""
+	}
+	return a.SourceDir
+}
+
+// GetDescription returns the Description field.
+func (b *Buildpack) GetDescription() []string {
+	if b == nil {
+		return nil
+	}
+	return b.Description
+}
+
+// GetDocsLink returns the DocsLink field.
+func (b *Buildpack) GetDocsLink() string {
+	if b == nil {
+		return ""
+	}
+	return b.DocsLink
+}
+
+// GetID returns the ID field.
+func (b *Buildpack) GetID() string {
+	if b == nil {
+		return ""
+	}
+	return b.ID
+}
+
+// GetLatest returns the Latest field.
+func (b *Buildpack) GetLatest() bool {
+	if b == nil {
+		return false
+	}
+	return b.Latest
+}
+
+// GetMajorVersion returns the MajorVersion field.
+func (b *Buildpack) GetMajorVersion() int32 {
+	if b == nil {
+		return 0
+	}
+	return b.MajorVersion
+}
+
+// GetName returns the Name field.
+func (b *Buildpack) GetName() string {
+	if b == nil {
+		return ""
+	}
+	return b.Name
+}
+
+// GetVersion returns the Version field.
+func (b *Buildpack) GetVersion() string {
+	if b == nil {
+		return ""
+	}
+	return b.Version
+}
+
+// GetCause returns the Cause field.
+func (d *Deployment) GetCause() string {
+	if d == nil {
+		return ""
+	}
+	return d.Cause
+}
+
+// GetCauseDetails returns the CauseDetails field.
+func (d *Deployment) GetCauseDetails() *DeploymentCauseDetails {
+	if d == nil {
+		return nil
+	}
+	return d.CauseDetails
+}
+
+// GetClonedFrom returns the ClonedFrom field.
+func (d *Deployment) GetClonedFrom() string {
+	if d == nil {
+		return ""
+	}
+	return d.ClonedFrom
+}
+
+// GetCreatedAt returns the CreatedAt field.
+func (d *Deployment) GetCreatedAt() time.Time {
+	if d == nil {
+		return time.Time{}
+	}
+	return d.CreatedAt
+}
+
+// GetFunctions returns the Functions field.
+func (d *Deployment) GetFunctions() []*DeploymentFunctions {
+	if d == nil {
+		return nil
+	}
+	return d.Functions
+}
+
+// GetID returns the ID field.
+func (d *Deployment) GetID() string {
+	if d == nil {
+		return ""
+	}
+	return d.ID
+}
+
+// GetJobs returns the Jobs field.
+func (d *Deployment) GetJobs() []*DeploymentJob {
+	if d == nil {
+		return nil
+	}
+	return d.Jobs
+}
+
+// GetLoadBalancerID returns the LoadBalancerID field.
+func (d *Deployment) GetLoadBalancerID() string {
+	if d == nil {
+		return ""
+	}
+	return d.LoadBalancerID
+}
+
+// GetPhase returns the Phase field.
+func (d *Deployment) GetPhase() DeploymentPhase {
+	if d == nil {
+		return ""
+	}
+	return d.Phase
+}
+
+// GetPhaseLastUpdatedAt returns the PhaseLastUpdatedAt field.
+func (d *Deployment) GetPhaseLastUpdatedAt() time.Time {
+	if d == nil {
+		return time.Time{}
+	}
+	return d.PhaseLastUpdatedAt
+}
+
+// GetPreviousDeploymentID returns the PreviousDeploymentID field.
+func (d *Deployment) GetPreviousDeploymentID() string {
+	if d == nil {
+		return ""
+	}
+	return d.PreviousDeploymentID
+}
+
+// GetProgress returns the Progress field.
+func (d *Deployment) GetProgress() *DeploymentProgress {
+	if d == nil {
+		return nil
+	}
+	return d.Progress
+}
+
+// GetServices returns the Services field.
+func (d *Deployment) GetServices() []*DeploymentService {
+	if d == nil {
+		return nil
+	}
+	return d.Services
+}
+
+// GetSpec returns the Spec field.
+func (d *Deployment) GetSpec() *AppSpec {
+	if d == nil {
+		return nil
+	}
+	return d.Spec
+}
+
+// GetStaticSites returns the StaticSites field.
+func (d *Deployment) GetStaticSites() []*DeploymentStaticSite {
+	if d == nil {
+		return nil
+	}
+	return d.StaticSites
+}
+
+// GetTierSlug returns the TierSlug field.
+func (d *Deployment) GetTierSlug() string {
+	if d == nil {
+		return ""
+	}
+	return d.TierSlug
+}
+
+// GetTiming returns the Timing field.
+func (d *Deployment) GetTiming() *DeploymentTiming {
+	if d == nil {
+		return nil
+	}
+	return d.Timing
+}
+
+// GetUpdatedAt returns the UpdatedAt field.
+func (d *Deployment) GetUpdatedAt() time.Time {
+	if d == nil {
+		return time.Time{}
+	}
+	return d.UpdatedAt
+}
+
+// GetWorkers returns the Workers field.
+func (d *Deployment) GetWorkers() []*DeploymentWorker {
+	if d == nil {
+		return nil
+	}
+	return d.Workers
+}
+
+// GetDigitalOceanUserAction returns the DigitalOceanUserAction field.
+func (d *DeploymentCauseDetails) GetDigitalOceanUserAction() *DeploymentCauseDetailsDigitalOceanUserAction {
+	if d == nil {
+		return nil
+	}
+	return d.DigitalOceanUserAction
+}
+
+// GetDOCRPush returns the DOCRPush field.
+func (d *DeploymentCauseDetails) GetDOCRPush() *DeploymentCauseDetailsDOCRPush {
+	if d == nil {
+		return nil
+	}
+	return d.DOCRPush
+}
+
+// GetGitPush returns the GitPush field.
+func (d *DeploymentCauseDetails) GetGitPush() *DeploymentCauseDetailsGitPush {
+	if d == nil {
+		return nil
+	}
+	return d.GitPush
+}
+
+// GetInternal returns the Internal field.
+func (d *DeploymentCauseDetails) GetInternal() bool {
+	if d == nil {
+		return false
+	}
+	return d.Internal
+}
+
+// GetType returns the Type field.
+func (d *DeploymentCauseDetails) GetType() DeploymentCauseDetailsType {
+	if d == nil {
+		return ""
+	}
+	return d.Type
+}
+
+// GetEmail returns the Email field.
+func (d *DeploymentCauseDetailsDigitalOceanUser) GetEmail() string {
+	if d == nil {
+		return ""
+	}
+	return d.Email
+}
+
+// GetFullName returns the FullName field.
+func (d *DeploymentCauseDetailsDigitalOceanUser) GetFullName() string {
+	if d == nil {
+		return ""
+	}
+	return d.FullName
+}
+
+// GetUUID returns the UUID field.
+func (d *DeploymentCauseDetailsDigitalOceanUser) GetUUID() string {
+	if d == nil {
+		return ""
+	}
+	return d.UUID
+}
+
+// GetName returns the Name field.
+func (d *DeploymentCauseDetailsDigitalOceanUserAction) GetName() DeploymentCauseDetailsDigitalOceanUserActionName {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetUser returns the User field.
+func (d *DeploymentCauseDetailsDigitalOceanUserAction) GetUser() *DeploymentCauseDetailsDigitalOceanUser {
+	if d == nil {
+		return nil
+	}
+	return d.User
+}
+
+// GetImageDigest returns the ImageDigest field.
+func (d *DeploymentCauseDetailsDOCRPush) GetImageDigest() string {
+	if d == nil {
+		return ""
+	}
+	return d.ImageDigest
+}
+
+// GetRegistry returns the Registry field.
+func (d *DeploymentCauseDetailsDOCRPush) GetRegistry() string {
+	if d == nil {
+		return ""
+	}
+	return d.Registry
+}
+
+// GetRepository returns the Repository field.
+func (d *DeploymentCauseDetailsDOCRPush) GetRepository() string {
+	if d == nil {
+		return ""
+	}
+	return d.Repository
+}
+
+// GetTag returns the Tag field.
+func (d *DeploymentCauseDetailsDOCRPush) GetTag() string {
+	if d == nil {
+		return ""
+	}
+	return d.Tag
+}
+
+// GetCommitAuthor returns the CommitAuthor field.
+func (d *DeploymentCauseDetailsGitPush) GetCommitAuthor() string {
+	if d == nil {
+		return ""
+	}
+	return d.CommitAuthor
+}
+
+// GetCommitMessage returns the CommitMessage field.
+func (d *DeploymentCauseDetailsGitPush) GetCommitMessage() string {
+	if d == nil {
+		return ""
+	}
+	return d.CommitMessage
+}
+
+// GetCommitSHA returns the CommitSHA field.
+func (d *DeploymentCauseDetailsGitPush) GetCommitSHA() string {
+	if d == nil {
+		return ""
+	}
+	return d.CommitSHA
+}
+
+// GetGitHub returns the GitHub field.
+func (d *DeploymentCauseDetailsGitPush) GetGitHub() *GitHubSourceSpec {
+	if d == nil {
+		return nil
+	}
+	return d.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (d *DeploymentCauseDetailsGitPush) GetGitLab() *GitLabSourceSpec {
+	if d == nil {
+		return nil
+	}
+	return d.GitLab
+}
+
+// GetUsername returns the Username field.
+func (d *DeploymentCauseDetailsGitPush) GetUsername() string {
+	if d == nil {
+		return ""
+	}
+	return d.Username
+}
+
+// GetName returns the Name field.
+func (d *DeploymentFunctions) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetNamespace returns the Namespace field.
+func (d *DeploymentFunctions) GetNamespace() string {
+	if d == nil {
+		return ""
+	}
+	return d.Namespace
+}
+
+// GetSourceCommitHash returns the SourceCommitHash field.
+func (d *DeploymentFunctions) GetSourceCommitHash() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceCommitHash
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (d *DeploymentJob) GetBuildpacks() []*Buildpack {
+	if d == nil {
+		return nil
+	}
+	return d.Buildpacks
+}
+
+// GetName returns the Name field.
+func (d *DeploymentJob) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetSourceCommitHash returns the SourceCommitHash field.
+func (d *DeploymentJob) GetSourceCommitHash() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceCommitHash
+}
+
+// GetErrorSteps returns the ErrorSteps field.
+func (d *DeploymentProgress) GetErrorSteps() int32 {
+	if d == nil {
+		return 0
+	}
+	return d.ErrorSteps
+}
+
+// GetPendingSteps returns the PendingSteps field.
+func (d *DeploymentProgress) GetPendingSteps() int32 {
+	if d == nil {
+		return 0
+	}
+	return d.PendingSteps
+}
+
+// GetRunningSteps returns the RunningSteps field.
+func (d *DeploymentProgress) GetRunningSteps() int32 {
+	if d == nil {
+		return 0
+	}
+	return d.RunningSteps
+}
+
+// GetSteps returns the Steps field.
+func (d *DeploymentProgress) GetSteps() []*DeploymentProgressStep {
+	if d == nil {
+		return nil
+	}
+	return d.Steps
+}
+
+// GetSuccessSteps returns the SuccessSteps field.
+func (d *DeploymentProgress) GetSuccessSteps() int32 {
+	if d == nil {
+		return 0
+	}
+	return d.SuccessSteps
+}
+
+// GetSummarySteps returns the SummarySteps field.
+func (d *DeploymentProgress) GetSummarySteps() []*DeploymentProgressStep {
+	if d == nil {
+		return nil
+	}
+	return d.SummarySteps
+}
+
+// GetTotalSteps returns the TotalSteps field.
+func (d *DeploymentProgress) GetTotalSteps() int32 {
+	if d == nil {
+		return 0
+	}
+	return d.TotalSteps
+}
+
+// GetComponentName returns the ComponentName field.
+func (d *DeploymentProgressStep) GetComponentName() string {
+	if d == nil {
+		return ""
+	}
+	return d.ComponentName
+}
+
+// GetEndedAt returns the EndedAt field.
+func (d *DeploymentProgressStep) GetEndedAt() time.Time {
+	if d == nil {
+		return time.Time{}
+	}
+	return d.EndedAt
+}
+
+// GetMessageBase returns the MessageBase field.
+func (d *DeploymentProgressStep) GetMessageBase() string {
+	if d == nil {
+		return ""
+	}
+	return d.MessageBase
+}
+
+// GetName returns the Name field.
+func (d *DeploymentProgressStep) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetReason returns the Reason field.
+func (d *DeploymentProgressStep) GetReason() *DeploymentProgressStepReason {
+	if d == nil {
+		return nil
+	}
+	return d.Reason
+}
+
+// GetStartedAt returns the StartedAt field.
+func (d *DeploymentProgressStep) GetStartedAt() time.Time {
+	if d == nil {
+		return time.Time{}
+	}
+	return d.StartedAt
+}
+
+// GetStatus returns the Status field.
+func (d *DeploymentProgressStep) GetStatus() DeploymentProgressStepStatus {
+	if d == nil {
+		return ""
+	}
+	return d.Status
+}
+
+// GetSteps returns the Steps field.
+func (d *DeploymentProgressStep) GetSteps() []*DeploymentProgressStep {
+	if d == nil {
+		return nil
+	}
+	return d.Steps
+}
+
+// GetCode returns the Code field.
+func (d *DeploymentProgressStepReason) GetCode() string {
+	if d == nil {
+		return ""
+	}
+	return d.Code
+}
+
+// GetMessage returns the Message field.
+func (d *DeploymentProgressStepReason) GetMessage() string {
+	if d == nil {
+		return ""
+	}
+	return d.Message
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (d *DeploymentService) GetBuildpacks() []*Buildpack {
+	if d == nil {
+		return nil
+	}
+	return d.Buildpacks
+}
+
+// GetName returns the Name field.
+func (d *DeploymentService) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetSourceCommitHash returns the SourceCommitHash field.
+func (d *DeploymentService) GetSourceCommitHash() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceCommitHash
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (d *DeploymentStaticSite) GetBuildpacks() []*Buildpack {
+	if d == nil {
+		return nil
+	}
+	return d.Buildpacks
+}
+
+// GetName returns the Name field.
+func (d *DeploymentStaticSite) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetSourceCommitHash returns the SourceCommitHash field.
+func (d *DeploymentStaticSite) GetSourceCommitHash() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceCommitHash
+}
+
+// GetBuildBillable returns the BuildBillable field.
+func (d *DeploymentTiming) GetBuildBillable() string {
+	if d == nil {
+		return ""
+	}
+	return d.BuildBillable
+}
+
+// GetBuildTotal returns the BuildTotal field.
+func (d *DeploymentTiming) GetBuildTotal() string {
+	if d == nil {
+		return ""
+	}
+	return d.BuildTotal
+}
+
+// GetComponents returns the Components field.
+func (d *DeploymentTiming) GetComponents() []*DeploymentTimingComponent {
+	if d == nil {
+		return nil
+	}
+	return d.Components
+}
+
+// GetDatabaseProvision returns the DatabaseProvision field.
+func (d *DeploymentTiming) GetDatabaseProvision() string {
+	if d == nil {
+		return ""
+	}
+	return d.DatabaseProvision
+}
+
+// GetDeploying returns the Deploying field.
+func (d *DeploymentTiming) GetDeploying() string {
+	if d == nil {
+		return ""
+	}
+	return d.Deploying
+}
+
+// GetPending returns the Pending field.
+func (d *DeploymentTiming) GetPending() string {
+	if d == nil {
+		return ""
+	}
+	return d.Pending
+}
+
+// GetBuildBillable returns the BuildBillable field.
+func (d *DeploymentTimingComponent) GetBuildBillable() string {
+	if d == nil {
+		return ""
+	}
+	return d.BuildBillable
+}
+
+// GetName returns the Name field.
+func (d *DeploymentTimingComponent) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (d *DeploymentWorker) GetBuildpacks() []*Buildpack {
+	if d == nil {
+		return nil
+	}
+	return d.Buildpacks
+}
+
+// GetName returns the Name field.
+func (d *DeploymentWorker) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetSourceCommitHash returns the SourceCommitHash field.
+func (d *DeploymentWorker) GetSourceCommitHash() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceCommitHash
+}
+
+// GetSpec returns the Spec field.
+func (d *DeployTemplate) GetSpec() *AppSpec {
+	if d == nil {
+		return nil
+	}
+	return d.Spec
+}
+
+// GetCommitSHA returns the CommitSHA field.
+func (d *DetectRequest) GetCommitSHA() string {
+	if d == nil {
+		return ""
+	}
+	return d.CommitSHA
+}
+
+// GetGit returns the Git field.
+func (d *DetectRequest) GetGit() *GitSourceSpec {
+	if d == nil {
+		return nil
+	}
+	return d.Git
+}
+
+// GetGitHub returns the GitHub field.
+func (d *DetectRequest) GetGitHub() *GitHubSourceSpec {
+	if d == nil {
+		return nil
+	}
+	return d.GitHub
+}
+
+// GetGitLab returns the GitLab field.
+func (d *DetectRequest) GetGitLab() *GitLabSourceSpec {
+	if d == nil {
+		return nil
+	}
+	return d.GitLab
+}
+
+// GetSourceDir returns the SourceDir field.
+func (d *DetectRequest) GetSourceDir() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceDir
+}
+
+// GetComponents returns the Components field.
+func (d *DetectResponse) GetComponents() []*DetectResponseComponent {
+	if d == nil {
+		return nil
+	}
+	return d.Components
+}
+
+// GetTemplate returns the Template field.
+func (d *DetectResponse) GetTemplate() *DeployTemplate {
+	if d == nil {
+		return nil
+	}
+	return d.Template
+}
+
+// GetTemplateError returns the TemplateError field.
+func (d *DetectResponse) GetTemplateError() string {
+	if d == nil {
+		return ""
+	}
+	return d.TemplateError
+}
+
+// GetTemplateFound returns the TemplateFound field.
+func (d *DetectResponse) GetTemplateFound() bool {
+	if d == nil {
+		return false
+	}
+	return d.TemplateFound
+}
+
+// GetTemplateValid returns the TemplateValid field.
+func (d *DetectResponse) GetTemplateValid() bool {
+	if d == nil {
+		return false
+	}
+	return d.TemplateValid
+}
+
+// GetBuildCommand returns the BuildCommand field.
+func (d *DetectResponseComponent) GetBuildCommand() string {
+	if d == nil {
+		return ""
+	}
+	return d.BuildCommand
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (d *DetectResponseComponent) GetBuildpacks() []*Buildpack {
+	if d == nil {
+		return nil
+	}
+	return d.Buildpacks
+}
+
+// GetDockerfiles returns the Dockerfiles field.
+func (d *DetectResponseComponent) GetDockerfiles() []string {
+	if d == nil {
+		return nil
+	}
+	return d.Dockerfiles
+}
+
+// GetEnvironmentSlug returns the EnvironmentSlug field.
+func (d *DetectResponseComponent) GetEnvironmentSlug() string {
+	if d == nil {
+		return ""
+	}
+	return d.EnvironmentSlug
+}
+
+// GetEnvVars returns the EnvVars field.
+func (d *DetectResponseComponent) GetEnvVars() []*AppVariableDefinition {
+	if d == nil {
+		return nil
+	}
+	return d.EnvVars
+}
+
+// GetHTTPPorts returns the HTTPPorts field.
+func (d *DetectResponseComponent) GetHTTPPorts() []int64 {
+	if d == nil {
+		return nil
+	}
+	return d.HTTPPorts
+}
+
+// GetRunCommand returns the RunCommand field.
+func (d *DetectResponseComponent) GetRunCommand() string {
+	if d == nil {
+		return ""
+	}
+	return d.RunCommand
+}
+
+// GetServerlessPackages returns the ServerlessPackages field.
+func (d *DetectResponseComponent) GetServerlessPackages() []*DetectResponseServerlessPackage {
+	if d == nil {
+		return nil
+	}
+	return d.ServerlessPackages
+}
+
+// GetSourceDir returns the SourceDir field.
+func (d *DetectResponseComponent) GetSourceDir() string {
+	if d == nil {
+		return ""
+	}
+	return d.SourceDir
+}
+
+// GetStrategy returns the Strategy field.
+func (d *DetectResponseComponent) GetStrategy() DetectResponseType {
+	if d == nil {
+		return ""
+	}
+	return d.Strategy
+}
+
+// GetTypes returns the Types field.
+func (d *DetectResponseComponent) GetTypes() []string {
+	if d == nil {
+		return nil
+	}
+	return d.Types
+}
+
+// GetLimits returns the Limits field.
+func (d *DetectResponseServerlessFunction) GetLimits() *DetectResponseServerlessFunctionLimits {
+	if d == nil {
+		return nil
+	}
+	return d.Limits
+}
+
+// GetName returns the Name field.
+func (d *DetectResponseServerlessFunction) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetPackage returns the Package field.
+func (d *DetectResponseServerlessFunction) GetPackage() string {
+	if d == nil {
+		return ""
+	}
+	return d.Package
+}
+
+// GetRuntime returns the Runtime field.
+func (d *DetectResponseServerlessFunction) GetRuntime() string {
+	if d == nil {
+		return ""
+	}
+	return d.Runtime
+}
+
+// GetLogs returns the Logs field.
+func (d *DetectResponseServerlessFunctionLimits) GetLogs() string {
+	if d == nil {
+		return ""
+	}
+	return d.Logs
+}
+
+// GetMemory returns the Memory field.
+func (d *DetectResponseServerlessFunctionLimits) GetMemory() string {
+	if d == nil {
+		return ""
+	}
+	return d.Memory
+}
+
+// GetTimeout returns the Timeout field.
+func (d *DetectResponseServerlessFunctionLimits) GetTimeout() string {
+	if d == nil {
+		return ""
+	}
+	return d.Timeout
+}
+
+// GetFunctions returns the Functions field.
+func (d *DetectResponseServerlessPackage) GetFunctions() []*DetectResponseServerlessFunction {
+	if d == nil {
+		return nil
+	}
+	return d.Functions
+}
+
+// GetName returns the Name field.
+func (d *DetectResponseServerlessPackage) GetName() string {
+	if d == nil {
+		return ""
+	}
+	return d.Name
+}
+
+// GetBranch returns the Branch field.
+func (g *GitHubSourceSpec) GetBranch() string {
+	if g == nil {
+		return ""
+	}
+	return g.Branch
+}
+
+// GetDeployOnPush returns the DeployOnPush field.
+func (g *GitHubSourceSpec) GetDeployOnPush() bool {
+	if g == nil {
+		return false
+	}
+	return g.DeployOnPush
+}
+
+// GetRepo returns the Repo field.
+func (g *GitHubSourceSpec) GetRepo() string {
+	if g == nil {
+		return ""
+	}
+	return g.Repo
+}
+
+// GetBranch returns the Branch field.
+func (g *GitLabSourceSpec) GetBranch() string {
+	if g == nil {
+		return ""
+	}
+	return g.Branch
+}
+
+// GetDeployOnPush returns the DeployOnPush field.
+func (g *GitLabSourceSpec) GetDeployOnPush() bool {
+	if g == nil {
+		return false
+	}
+	return g.DeployOnPush
+}
+
+// GetRepo returns the Repo field.
+func (g *GitLabSourceSpec) GetRepo() string {
+	if g == nil {
+		return ""
+	}
+	return g.Repo
+}
+
+// GetBranch returns the Branch field.
+func (g *GitSourceSpec) GetBranch() string {
+	if g == nil {
+		return ""
+	}
+	return g.Branch
+}
+
+// GetRepoCloneURL returns the RepoCloneURL field.
+func (g *GitSourceSpec) GetRepoCloneURL() string {
+	if g == nil {
+		return ""
+	}
+	return g.RepoCloneURL
+}
+
+// GetDeployOnPush returns the DeployOnPush field.
+func (i *ImageSourceSpec) GetDeployOnPush() *ImageSourceSpecDeployOnPush {
+	if i == nil {
+		return nil
+	}
+	return i.DeployOnPush
+}
+
+// GetRegistry returns the Registry field.
+func (i *ImageSourceSpec) GetRegistry() string {
+	if i == nil {
+		return ""
+	}
+	return i.Registry
+}
+
+// GetRegistryType returns the RegistryType field.
+func (i *ImageSourceSpec) GetRegistryType() ImageSourceSpecRegistryType {
+	if i == nil {
+		return ""
+	}
+	return i.RegistryType
+}
+
+// GetRepository returns the Repository field.
+func (i *ImageSourceSpec) GetRepository() string {
+	if i == nil {
+		return ""
+	}
+	return i.Repository
+}
+
+// GetTag returns the Tag field.
+func (i *ImageSourceSpec) GetTag() string {
+	if i == nil {
+		return ""
+	}
+	return i.Tag
+}
+
+// GetEnabled returns the Enabled field.
+func (i *ImageSourceSpecDeployOnPush) GetEnabled() bool {
+	if i == nil {
+		return false
+	}
+	return i.Enabled
+}
+
+// GetBuildpacks returns the Buildpacks field.
+func (l *ListBuildpacksResponse) GetBuildpacks() []*Buildpack {
+	if l == nil {
+		return nil
+	}
+	return l.Buildpacks
+}
+
+// GetAffectedComponents returns the AffectedComponents field.
+func (u *UpgradeBuildpackResponse) GetAffectedComponents() []string {
+	if u == nil {
+		return nil
+	}
+	return u.AffectedComponents
+}
+
+// GetDeployment returns the Deployment field.
+func (u *UpgradeBuildpackResponse) GetDeployment() *Deployment {
+	if u == nil {
+		return nil
+	}
+	return u.Deployment
+}
diff --git a/apps_accessors_test.go b/apps_accessors_test.go
new file mode 100644
index 0000000..3ad5d8d
--- /dev/null
+++ b/apps_accessors_test.go
@@ -0,0 +1,2772 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by gen-accessors; DO NOT EDIT.
+// Instead, please run "go generate ./..." as described here:
+// https://github.com/google/go-github/blob/master/CONTRIBUTING.md#submitting-a-patch
+
+package godo
+
+import (
+	"testing"
+)
+
+func TestApp_GetActiveDeployment(tt *testing.T) {
+	a := &App{}
+	a.GetActiveDeployment()
+	a = nil
+	a.GetActiveDeployment()
+}
+
+func TestApp_GetBuildConfig(tt *testing.T) {
+	a := &App{}
+	a.GetBuildConfig()
+	a = nil
+	a.GetBuildConfig()
+}
+
+func TestApp_GetCreatedAt(tt *testing.T) {
+	a := &App{}
+	a.GetCreatedAt()
+	a = nil
+	a.GetCreatedAt()
+}
+
+func TestApp_GetDefaultIngress(tt *testing.T) {
+	a := &App{}
+	a.GetDefaultIngress()
+	a = nil
+	a.GetDefaultIngress()
+}
+
+func TestApp_GetDomains(tt *testing.T) {
+	a := &App{}
+	a.GetDomains()
+	a = nil
+	a.GetDomains()
+}
+
+func TestApp_GetID(tt *testing.T) {
+	a := &App{}
+	a.GetID()
+	a = nil
+	a.GetID()
+}
+
+func TestApp_GetInProgressDeployment(tt *testing.T) {
+	a := &App{}
+	a.GetInProgressDeployment()
+	a = nil
+	a.GetInProgressDeployment()
+}
+
+func TestApp_GetLastDeploymentActiveAt(tt *testing.T) {
+	a := &App{}
+	a.GetLastDeploymentActiveAt()
+	a = nil
+	a.GetLastDeploymentActiveAt()
+}
+
+func TestApp_GetLastDeploymentCreatedAt(tt *testing.T) {
+	a := &App{}
+	a.GetLastDeploymentCreatedAt()
+	a = nil
+	a.GetLastDeploymentCreatedAt()
+}
+
+func TestApp_GetLiveDomain(tt *testing.T) {
+	a := &App{}
+	a.GetLiveDomain()
+	a = nil
+	a.GetLiveDomain()
+}
+
+func TestApp_GetLiveURL(tt *testing.T) {
+	a := &App{}
+	a.GetLiveURL()
+	a = nil
+	a.GetLiveURL()
+}
+
+func TestApp_GetLiveURLBase(tt *testing.T) {
+	a := &App{}
+	a.GetLiveURLBase()
+	a = nil
+	a.GetLiveURLBase()
+}
+
+func TestApp_GetOwnerUUID(tt *testing.T) {
+	a := &App{}
+	a.GetOwnerUUID()
+	a = nil
+	a.GetOwnerUUID()
+}
+
+func TestApp_GetPendingDeployment(tt *testing.T) {
+	a := &App{}
+	a.GetPendingDeployment()
+	a = nil
+	a.GetPendingDeployment()
+}
+
+func TestApp_GetPinnedDeployment(tt *testing.T) {
+	a := &App{}
+	a.GetPinnedDeployment()
+	a = nil
+	a.GetPinnedDeployment()
+}
+
+func TestApp_GetProjectID(tt *testing.T) {
+	a := &App{}
+	a.GetProjectID()
+	a = nil
+	a.GetProjectID()
+}
+
+func TestApp_GetRegion(tt *testing.T) {
+	a := &App{}
+	a.GetRegion()
+	a = nil
+	a.GetRegion()
+}
+
+func TestApp_GetSpec(tt *testing.T) {
+	a := &App{}
+	a.GetSpec()
+	a = nil
+	a.GetSpec()
+}
+
+func TestApp_GetTierSlug(tt *testing.T) {
+	a := &App{}
+	a.GetTierSlug()
+	a = nil
+	a.GetTierSlug()
+}
+
+func TestApp_GetUpdatedAt(tt *testing.T) {
+	a := &App{}
+	a.GetUpdatedAt()
+	a = nil
+	a.GetUpdatedAt()
+}
+
+func TestAppAlert_GetComponentName(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetComponentName()
+	a = nil
+	a.GetComponentName()
+}
+
+func TestAppAlert_GetEmails(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetEmails()
+	a = nil
+	a.GetEmails()
+}
+
+func TestAppAlert_GetID(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetID()
+	a = nil
+	a.GetID()
+}
+
+func TestAppAlert_GetPhase(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetPhase()
+	a = nil
+	a.GetPhase()
+}
+
+func TestAppAlert_GetProgress(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetProgress()
+	a = nil
+	a.GetProgress()
+}
+
+func TestAppAlert_GetSlackWebhooks(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetSlackWebhooks()
+	a = nil
+	a.GetSlackWebhooks()
+}
+
+func TestAppAlert_GetSpec(tt *testing.T) {
+	a := &AppAlert{}
+	a.GetSpec()
+	a = nil
+	a.GetSpec()
+}
+
+func TestAppAlertProgress_GetSteps(tt *testing.T) {
+	a := &AppAlertProgress{}
+	a.GetSteps()
+	a = nil
+	a.GetSteps()
+}
+
+func TestAppAlertProgressStep_GetEndedAt(tt *testing.T) {
+	a := &AppAlertProgressStep{}
+	a.GetEndedAt()
+	a = nil
+	a.GetEndedAt()
+}
+
+func TestAppAlertProgressStep_GetName(tt *testing.T) {
+	a := &AppAlertProgressStep{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppAlertProgressStep_GetReason(tt *testing.T) {
+	a := &AppAlertProgressStep{}
+	a.GetReason()
+	a = nil
+	a.GetReason()
+}
+
+func TestAppAlertProgressStep_GetStartedAt(tt *testing.T) {
+	a := &AppAlertProgressStep{}
+	a.GetStartedAt()
+	a = nil
+	a.GetStartedAt()
+}
+
+func TestAppAlertProgressStep_GetStatus(tt *testing.T) {
+	a := &AppAlertProgressStep{}
+	a.GetStatus()
+	a = nil
+	a.GetStatus()
+}
+
+func TestAppAlertProgressStep_GetSteps(tt *testing.T) {
+	a := &AppAlertProgressStep{}
+	a.GetSteps()
+	a = nil
+	a.GetSteps()
+}
+
+func TestAppAlertProgressStepReason_GetCode(tt *testing.T) {
+	a := &AppAlertProgressStepReason{}
+	a.GetCode()
+	a = nil
+	a.GetCode()
+}
+
+func TestAppAlertProgressStepReason_GetMessage(tt *testing.T) {
+	a := &AppAlertProgressStepReason{}
+	a.GetMessage()
+	a = nil
+	a.GetMessage()
+}
+
+func TestAppAlertSlackWebhook_GetChannel(tt *testing.T) {
+	a := &AppAlertSlackWebhook{}
+	a.GetChannel()
+	a = nil
+	a.GetChannel()
+}
+
+func TestAppAlertSlackWebhook_GetURL(tt *testing.T) {
+	a := &AppAlertSlackWebhook{}
+	a.GetURL()
+	a = nil
+	a.GetURL()
+}
+
+func TestAppAlertSpec_GetDisabled(tt *testing.T) {
+	a := &AppAlertSpec{}
+	a.GetDisabled()
+	a = nil
+	a.GetDisabled()
+}
+
+func TestAppAlertSpec_GetOperator(tt *testing.T) {
+	a := &AppAlertSpec{}
+	a.GetOperator()
+	a = nil
+	a.GetOperator()
+}
+
+func TestAppAlertSpec_GetRule(tt *testing.T) {
+	a := &AppAlertSpec{}
+	a.GetRule()
+	a = nil
+	a.GetRule()
+}
+
+func TestAppAlertSpec_GetValue(tt *testing.T) {
+	a := &AppAlertSpec{}
+	a.GetValue()
+	a = nil
+	a.GetValue()
+}
+
+func TestAppAlertSpec_GetWindow(tt *testing.T) {
+	a := &AppAlertSpec{}
+	a.GetWindow()
+	a = nil
+	a.GetWindow()
+}
+
+func TestAppBuildConfig_GetCNBVersioning(tt *testing.T) {
+	a := &AppBuildConfig{}
+	a.GetCNBVersioning()
+	a = nil
+	a.GetCNBVersioning()
+}
+
+func TestAppBuildConfigCNBVersioning_GetBuildpacks(tt *testing.T) {
+	a := &AppBuildConfigCNBVersioning{}
+	a.GetBuildpacks()
+	a = nil
+	a.GetBuildpacks()
+}
+
+func TestAppBuildConfigCNBVersioning_GetStackID(tt *testing.T) {
+	a := &AppBuildConfigCNBVersioning{}
+	a.GetStackID()
+	a = nil
+	a.GetStackID()
+}
+
+func TestAppCORSPolicy_GetAllowCredentials(tt *testing.T) {
+	a := &AppCORSPolicy{}
+	a.GetAllowCredentials()
+	a = nil
+	a.GetAllowCredentials()
+}
+
+func TestAppCORSPolicy_GetAllowHeaders(tt *testing.T) {
+	a := &AppCORSPolicy{}
+	a.GetAllowHeaders()
+	a = nil
+	a.GetAllowHeaders()
+}
+
+func TestAppCORSPolicy_GetAllowMethods(tt *testing.T) {
+	a := &AppCORSPolicy{}
+	a.GetAllowMethods()
+	a = nil
+	a.GetAllowMethods()
+}
+
+func TestAppCORSPolicy_GetAllowOrigins(tt *testing.T) {
+	a := &AppCORSPolicy{}
+	a.GetAllowOrigins()
+	a = nil
+	a.GetAllowOrigins()
+}
+
+func TestAppCORSPolicy_GetExposeHeaders(tt *testing.T) {
+	a := &AppCORSPolicy{}
+	a.GetExposeHeaders()
+	a = nil
+	a.GetExposeHeaders()
+}
+
+func TestAppCORSPolicy_GetMaxAge(tt *testing.T) {
+	a := &AppCORSPolicy{}
+	a.GetMaxAge()
+	a = nil
+	a.GetMaxAge()
+}
+
+func TestAppCreateRequest_GetProjectID(tt *testing.T) {
+	a := &AppCreateRequest{}
+	a.GetProjectID()
+	a = nil
+	a.GetProjectID()
+}
+
+func TestAppCreateRequest_GetSpec(tt *testing.T) {
+	a := &AppCreateRequest{}
+	a.GetSpec()
+	a = nil
+	a.GetSpec()
+}
+
+func TestAppDatabaseSpec_GetClusterName(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetClusterName()
+	a = nil
+	a.GetClusterName()
+}
+
+func TestAppDatabaseSpec_GetDBName(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetDBName()
+	a = nil
+	a.GetDBName()
+}
+
+func TestAppDatabaseSpec_GetDBUser(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetDBUser()
+	a = nil
+	a.GetDBUser()
+}
+
+func TestAppDatabaseSpec_GetEngine(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetEngine()
+	a = nil
+	a.GetEngine()
+}
+
+func TestAppDatabaseSpec_GetName(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppDatabaseSpec_GetNumNodes(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetNumNodes()
+	a = nil
+	a.GetNumNodes()
+}
+
+func TestAppDatabaseSpec_GetProduction(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetProduction()
+	a = nil
+	a.GetProduction()
+}
+
+func TestAppDatabaseSpec_GetSize(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetSize()
+	a = nil
+	a.GetSize()
+}
+
+func TestAppDatabaseSpec_GetVersion(tt *testing.T) {
+	a := &AppDatabaseSpec{}
+	a.GetVersion()
+	a = nil
+	a.GetVersion()
+}
+
+func TestAppDomain_GetCertificateExpiresAt(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetCertificateExpiresAt()
+	a = nil
+	a.GetCertificateExpiresAt()
+}
+
+func TestAppDomain_GetID(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetID()
+	a = nil
+	a.GetID()
+}
+
+func TestAppDomain_GetPhase(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetPhase()
+	a = nil
+	a.GetPhase()
+}
+
+func TestAppDomain_GetProgress(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetProgress()
+	a = nil
+	a.GetProgress()
+}
+
+func TestAppDomain_GetRotateValidationRecords(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetRotateValidationRecords()
+	a = nil
+	a.GetRotateValidationRecords()
+}
+
+func TestAppDomain_GetSpec(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetSpec()
+	a = nil
+	a.GetSpec()
+}
+
+func TestAppDomain_GetValidation(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetValidation()
+	a = nil
+	a.GetValidation()
+}
+
+func TestAppDomain_GetValidations(tt *testing.T) {
+	a := &AppDomain{}
+	a.GetValidations()
+	a = nil
+	a.GetValidations()
+}
+
+func TestAppDomainProgress_GetSteps(tt *testing.T) {
+	a := &AppDomainProgress{}
+	a.GetSteps()
+	a = nil
+	a.GetSteps()
+}
+
+func TestAppDomainProgressStep_GetEndedAt(tt *testing.T) {
+	a := &AppDomainProgressStep{}
+	a.GetEndedAt()
+	a = nil
+	a.GetEndedAt()
+}
+
+func TestAppDomainProgressStep_GetName(tt *testing.T) {
+	a := &AppDomainProgressStep{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppDomainProgressStep_GetReason(tt *testing.T) {
+	a := &AppDomainProgressStep{}
+	a.GetReason()
+	a = nil
+	a.GetReason()
+}
+
+func TestAppDomainProgressStep_GetStartedAt(tt *testing.T) {
+	a := &AppDomainProgressStep{}
+	a.GetStartedAt()
+	a = nil
+	a.GetStartedAt()
+}
+
+func TestAppDomainProgressStep_GetStatus(tt *testing.T) {
+	a := &AppDomainProgressStep{}
+	a.GetStatus()
+	a = nil
+	a.GetStatus()
+}
+
+func TestAppDomainProgressStep_GetSteps(tt *testing.T) {
+	a := &AppDomainProgressStep{}
+	a.GetSteps()
+	a = nil
+	a.GetSteps()
+}
+
+func TestAppDomainProgressStepReason_GetCode(tt *testing.T) {
+	a := &AppDomainProgressStepReason{}
+	a.GetCode()
+	a = nil
+	a.GetCode()
+}
+
+func TestAppDomainProgressStepReason_GetMessage(tt *testing.T) {
+	a := &AppDomainProgressStepReason{}
+	a.GetMessage()
+	a = nil
+	a.GetMessage()
+}
+
+func TestAppDomainSpec_GetCertificate(tt *testing.T) {
+	a := &AppDomainSpec{}
+	a.GetCertificate()
+	a = nil
+	a.GetCertificate()
+}
+
+func TestAppDomainSpec_GetDomain(tt *testing.T) {
+	a := &AppDomainSpec{}
+	a.GetDomain()
+	a = nil
+	a.GetDomain()
+}
+
+func TestAppDomainSpec_GetMinimumTLSVersion(tt *testing.T) {
+	a := &AppDomainSpec{}
+	a.GetMinimumTLSVersion()
+	a = nil
+	a.GetMinimumTLSVersion()
+}
+
+func TestAppDomainSpec_GetType(tt *testing.T) {
+	a := &AppDomainSpec{}
+	a.GetType()
+	a = nil
+	a.GetType()
+}
+
+func TestAppDomainSpec_GetWildcard(tt *testing.T) {
+	a := &AppDomainSpec{}
+	a.GetWildcard()
+	a = nil
+	a.GetWildcard()
+}
+
+func TestAppDomainSpec_GetZone(tt *testing.T) {
+	a := &AppDomainSpec{}
+	a.GetZone()
+	a = nil
+	a.GetZone()
+}
+
+func TestAppDomainValidation_GetTXTName(tt *testing.T) {
+	a := &AppDomainValidation{}
+	a.GetTXTName()
+	a = nil
+	a.GetTXTName()
+}
+
+func TestAppDomainValidation_GetTXTValue(tt *testing.T) {
+	a := &AppDomainValidation{}
+	a.GetTXTValue()
+	a = nil
+	a.GetTXTValue()
+}
+
+func TestAppFunctionsSpec_GetAlerts(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetAlerts()
+	a = nil
+	a.GetAlerts()
+}
+
+func TestAppFunctionsSpec_GetCORS(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetCORS()
+	a = nil
+	a.GetCORS()
+}
+
+func TestAppFunctionsSpec_GetEnvs(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetEnvs()
+	a = nil
+	a.GetEnvs()
+}
+
+func TestAppFunctionsSpec_GetGit(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetGit()
+	a = nil
+	a.GetGit()
+}
+
+func TestAppFunctionsSpec_GetGitHub(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetGitHub()
+	a = nil
+	a.GetGitHub()
+}
+
+func TestAppFunctionsSpec_GetGitLab(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetGitLab()
+	a = nil
+	a.GetGitLab()
+}
+
+func TestAppFunctionsSpec_GetLogDestinations(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetLogDestinations()
+	a = nil
+	a.GetLogDestinations()
+}
+
+func TestAppFunctionsSpec_GetName(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppFunctionsSpec_GetRoutes(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetRoutes()
+	a = nil
+	a.GetRoutes()
+}
+
+func TestAppFunctionsSpec_GetSourceDir(tt *testing.T) {
+	a := &AppFunctionsSpec{}
+	a.GetSourceDir()
+	a = nil
+	a.GetSourceDir()
+}
+
+func TestAppIngressSpec_GetLoadBalancer(tt *testing.T) {
+	a := &AppIngressSpec{}
+	a.GetLoadBalancer()
+	a = nil
+	a.GetLoadBalancer()
+}
+
+func TestAppIngressSpec_GetLoadBalancerSize(tt *testing.T) {
+	a := &AppIngressSpec{}
+	a.GetLoadBalancerSize()
+	a = nil
+	a.GetLoadBalancerSize()
+}
+
+func TestAppIngressSpec_GetRules(tt *testing.T) {
+	a := &AppIngressSpec{}
+	a.GetRules()
+	a = nil
+	a.GetRules()
+}
+
+func TestAppIngressSpecRule_GetComponent(tt *testing.T) {
+	a := &AppIngressSpecRule{}
+	a.GetComponent()
+	a = nil
+	a.GetComponent()
+}
+
+func TestAppIngressSpecRule_GetCORS(tt *testing.T) {
+	a := &AppIngressSpecRule{}
+	a.GetCORS()
+	a = nil
+	a.GetCORS()
+}
+
+func TestAppIngressSpecRule_GetMatch(tt *testing.T) {
+	a := &AppIngressSpecRule{}
+	a.GetMatch()
+	a = nil
+	a.GetMatch()
+}
+
+func TestAppIngressSpecRule_GetRedirect(tt *testing.T) {
+	a := &AppIngressSpecRule{}
+	a.GetRedirect()
+	a = nil
+	a.GetRedirect()
+}
+
+func TestAppIngressSpecRuleMatch_GetPath(tt *testing.T) {
+	a := &AppIngressSpecRuleMatch{}
+	a.GetPath()
+	a = nil
+	a.GetPath()
+}
+
+func TestAppIngressSpecRuleRoutingComponent_GetName(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingComponent{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppIngressSpecRuleRoutingComponent_GetPreservePathPrefix(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingComponent{}
+	a.GetPreservePathPrefix()
+	a = nil
+	a.GetPreservePathPrefix()
+}
+
+func TestAppIngressSpecRuleRoutingComponent_GetRewrite(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingComponent{}
+	a.GetRewrite()
+	a = nil
+	a.GetRewrite()
+}
+
+func TestAppIngressSpecRuleRoutingRedirect_GetAuthority(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingRedirect{}
+	a.GetAuthority()
+	a = nil
+	a.GetAuthority()
+}
+
+func TestAppIngressSpecRuleRoutingRedirect_GetPort(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingRedirect{}
+	a.GetPort()
+	a = nil
+	a.GetPort()
+}
+
+func TestAppIngressSpecRuleRoutingRedirect_GetRedirectCode(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingRedirect{}
+	a.GetRedirectCode()
+	a = nil
+	a.GetRedirectCode()
+}
+
+func TestAppIngressSpecRuleRoutingRedirect_GetScheme(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingRedirect{}
+	a.GetScheme()
+	a = nil
+	a.GetScheme()
+}
+
+func TestAppIngressSpecRuleRoutingRedirect_GetUri(tt *testing.T) {
+	a := &AppIngressSpecRuleRoutingRedirect{}
+	a.GetUri()
+	a = nil
+	a.GetUri()
+}
+
+func TestAppIngressSpecRuleStringMatch_GetPrefix(tt *testing.T) {
+	a := &AppIngressSpecRuleStringMatch{}
+	a.GetPrefix()
+	a = nil
+	a.GetPrefix()
+}
+
+func TestAppInstanceSize_GetCPUs(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetCPUs()
+	a = nil
+	a.GetCPUs()
+}
+
+func TestAppInstanceSize_GetCPUType(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetCPUType()
+	a = nil
+	a.GetCPUType()
+}
+
+func TestAppInstanceSize_GetMemoryBytes(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetMemoryBytes()
+	a = nil
+	a.GetMemoryBytes()
+}
+
+func TestAppInstanceSize_GetName(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppInstanceSize_GetSlug(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetSlug()
+	a = nil
+	a.GetSlug()
+}
+
+func TestAppInstanceSize_GetTierDowngradeTo(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetTierDowngradeTo()
+	a = nil
+	a.GetTierDowngradeTo()
+}
+
+func TestAppInstanceSize_GetTierSlug(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetTierSlug()
+	a = nil
+	a.GetTierSlug()
+}
+
+func TestAppInstanceSize_GetTierUpgradeTo(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetTierUpgradeTo()
+	a = nil
+	a.GetTierUpgradeTo()
+}
+
+func TestAppInstanceSize_GetUSDPerMonth(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetUSDPerMonth()
+	a = nil
+	a.GetUSDPerMonth()
+}
+
+func TestAppInstanceSize_GetUSDPerSecond(tt *testing.T) {
+	a := &AppInstanceSize{}
+	a.GetUSDPerSecond()
+	a = nil
+	a.GetUSDPerSecond()
+}
+
+func TestAppJobSpec_GetAlerts(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetAlerts()
+	a = nil
+	a.GetAlerts()
+}
+
+func TestAppJobSpec_GetBuildCommand(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetBuildCommand()
+	a = nil
+	a.GetBuildCommand()
+}
+
+func TestAppJobSpec_GetDockerfilePath(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetDockerfilePath()
+	a = nil
+	a.GetDockerfilePath()
+}
+
+func TestAppJobSpec_GetEnvironmentSlug(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetEnvironmentSlug()
+	a = nil
+	a.GetEnvironmentSlug()
+}
+
+func TestAppJobSpec_GetEnvs(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetEnvs()
+	a = nil
+	a.GetEnvs()
+}
+
+func TestAppJobSpec_GetGit(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetGit()
+	a = nil
+	a.GetGit()
+}
+
+func TestAppJobSpec_GetGitHub(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetGitHub()
+	a = nil
+	a.GetGitHub()
+}
+
+func TestAppJobSpec_GetGitLab(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetGitLab()
+	a = nil
+	a.GetGitLab()
+}
+
+func TestAppJobSpec_GetImage(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetImage()
+	a = nil
+	a.GetImage()
+}
+
+func TestAppJobSpec_GetInstanceCount(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetInstanceCount()
+	a = nil
+	a.GetInstanceCount()
+}
+
+func TestAppJobSpec_GetInstanceSizeSlug(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetInstanceSizeSlug()
+	a = nil
+	a.GetInstanceSizeSlug()
+}
+
+func TestAppJobSpec_GetKind(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetKind()
+	a = nil
+	a.GetKind()
+}
+
+func TestAppJobSpec_GetLogDestinations(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetLogDestinations()
+	a = nil
+	a.GetLogDestinations()
+}
+
+func TestAppJobSpec_GetName(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppJobSpec_GetRunCommand(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetRunCommand()
+	a = nil
+	a.GetRunCommand()
+}
+
+func TestAppJobSpec_GetSourceDir(tt *testing.T) {
+	a := &AppJobSpec{}
+	a.GetSourceDir()
+	a = nil
+	a.GetSourceDir()
+}
+
+func TestAppLogDestinationSpec_GetDatadog(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetDatadog()
+	a = nil
+	a.GetDatadog()
+}
+
+func TestAppLogDestinationSpec_GetEndpoint(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetEndpoint()
+	a = nil
+	a.GetEndpoint()
+}
+
+func TestAppLogDestinationSpec_GetHeaders(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetHeaders()
+	a = nil
+	a.GetHeaders()
+}
+
+func TestAppLogDestinationSpec_GetLogtail(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetLogtail()
+	a = nil
+	a.GetLogtail()
+}
+
+func TestAppLogDestinationSpec_GetName(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppLogDestinationSpec_GetPapertrail(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetPapertrail()
+	a = nil
+	a.GetPapertrail()
+}
+
+func TestAppLogDestinationSpec_GetTLSInsecure(tt *testing.T) {
+	a := &AppLogDestinationSpec{}
+	a.GetTLSInsecure()
+	a = nil
+	a.GetTLSInsecure()
+}
+
+func TestAppLogDestinationSpecDataDog_GetApiKey(tt *testing.T) {
+	a := &AppLogDestinationSpecDataDog{}
+	a.GetApiKey()
+	a = nil
+	a.GetApiKey()
+}
+
+func TestAppLogDestinationSpecDataDog_GetEndpoint(tt *testing.T) {
+	a := &AppLogDestinationSpecDataDog{}
+	a.GetEndpoint()
+	a = nil
+	a.GetEndpoint()
+}
+
+func TestAppLogDestinationSpecHeader_GetKey(tt *testing.T) {
+	a := &AppLogDestinationSpecHeader{}
+	a.GetKey()
+	a = nil
+	a.GetKey()
+}
+
+func TestAppLogDestinationSpecHeader_GetValue(tt *testing.T) {
+	a := &AppLogDestinationSpecHeader{}
+	a.GetValue()
+	a = nil
+	a.GetValue()
+}
+
+func TestAppLogDestinationSpecLogtail_GetToken(tt *testing.T) {
+	a := &AppLogDestinationSpecLogtail{}
+	a.GetToken()
+	a = nil
+	a.GetToken()
+}
+
+func TestAppLogDestinationSpecPapertrail_GetEndpoint(tt *testing.T) {
+	a := &AppLogDestinationSpecPapertrail{}
+	a.GetEndpoint()
+	a = nil
+	a.GetEndpoint()
+}
+
+func TestAppProposeRequest_GetAppID(tt *testing.T) {
+	a := &AppProposeRequest{}
+	a.GetAppID()
+	a = nil
+	a.GetAppID()
+}
+
+func TestAppProposeRequest_GetSpec(tt *testing.T) {
+	a := &AppProposeRequest{}
+	a.GetSpec()
+	a = nil
+	a.GetSpec()
+}
+
+func TestAppProposeResponse_GetAppCost(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppCost()
+	a = nil
+	a.GetAppCost()
+}
+
+func TestAppProposeResponse_GetAppIsStarter(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppIsStarter()
+	a = nil
+	a.GetAppIsStarter()
+}
+
+func TestAppProposeResponse_GetAppIsStatic(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppIsStatic()
+	a = nil
+	a.GetAppIsStatic()
+}
+
+func TestAppProposeResponse_GetAppNameAvailable(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppNameAvailable()
+	a = nil
+	a.GetAppNameAvailable()
+}
+
+func TestAppProposeResponse_GetAppNameSuggestion(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppNameSuggestion()
+	a = nil
+	a.GetAppNameSuggestion()
+}
+
+func TestAppProposeResponse_GetAppTierDowngradeCost(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppTierDowngradeCost()
+	a = nil
+	a.GetAppTierDowngradeCost()
+}
+
+func TestAppProposeResponse_GetAppTierUpgradeCost(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetAppTierUpgradeCost()
+	a = nil
+	a.GetAppTierUpgradeCost()
+}
+
+func TestAppProposeResponse_GetExistingStarterApps(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetExistingStarterApps()
+	a = nil
+	a.GetExistingStarterApps()
+}
+
+func TestAppProposeResponse_GetExistingStaticApps(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetExistingStaticApps()
+	a = nil
+	a.GetExistingStaticApps()
+}
+
+func TestAppProposeResponse_GetMaxFreeStarterApps(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetMaxFreeStarterApps()
+	a = nil
+	a.GetMaxFreeStarterApps()
+}
+
+func TestAppProposeResponse_GetMaxFreeStaticApps(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetMaxFreeStaticApps()
+	a = nil
+	a.GetMaxFreeStaticApps()
+}
+
+func TestAppProposeResponse_GetSpec(tt *testing.T) {
+	a := &AppProposeResponse{}
+	a.GetSpec()
+	a = nil
+	a.GetSpec()
+}
+
+func TestAppRegion_GetContinent(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetContinent()
+	a = nil
+	a.GetContinent()
+}
+
+func TestAppRegion_GetDataCenters(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetDataCenters()
+	a = nil
+	a.GetDataCenters()
+}
+
+func TestAppRegion_GetDefault(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetDefault()
+	a = nil
+	a.GetDefault()
+}
+
+func TestAppRegion_GetDisabled(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetDisabled()
+	a = nil
+	a.GetDisabled()
+}
+
+func TestAppRegion_GetFlag(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetFlag()
+	a = nil
+	a.GetFlag()
+}
+
+func TestAppRegion_GetLabel(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetLabel()
+	a = nil
+	a.GetLabel()
+}
+
+func TestAppRegion_GetReason(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetReason()
+	a = nil
+	a.GetReason()
+}
+
+func TestAppRegion_GetSlug(tt *testing.T) {
+	a := &AppRegion{}
+	a.GetSlug()
+	a = nil
+	a.GetSlug()
+}
+
+func TestAppRouteSpec_GetPath(tt *testing.T) {
+	a := &AppRouteSpec{}
+	a.GetPath()
+	a = nil
+	a.GetPath()
+}
+
+func TestAppRouteSpec_GetPreservePathPrefix(tt *testing.T) {
+	a := &AppRouteSpec{}
+	a.GetPreservePathPrefix()
+	a = nil
+	a.GetPreservePathPrefix()
+}
+
+func TestAppServiceSpec_GetAlerts(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetAlerts()
+	a = nil
+	a.GetAlerts()
+}
+
+func TestAppServiceSpec_GetBuildCommand(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetBuildCommand()
+	a = nil
+	a.GetBuildCommand()
+}
+
+func TestAppServiceSpec_GetCORS(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetCORS()
+	a = nil
+	a.GetCORS()
+}
+
+func TestAppServiceSpec_GetDockerfilePath(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetDockerfilePath()
+	a = nil
+	a.GetDockerfilePath()
+}
+
+func TestAppServiceSpec_GetEnvironmentSlug(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetEnvironmentSlug()
+	a = nil
+	a.GetEnvironmentSlug()
+}
+
+func TestAppServiceSpec_GetEnvs(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetEnvs()
+	a = nil
+	a.GetEnvs()
+}
+
+func TestAppServiceSpec_GetGit(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetGit()
+	a = nil
+	a.GetGit()
+}
+
+func TestAppServiceSpec_GetGitHub(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetGitHub()
+	a = nil
+	a.GetGitHub()
+}
+
+func TestAppServiceSpec_GetGitLab(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetGitLab()
+	a = nil
+	a.GetGitLab()
+}
+
+func TestAppServiceSpec_GetHealthCheck(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetHealthCheck()
+	a = nil
+	a.GetHealthCheck()
+}
+
+func TestAppServiceSpec_GetHTTPPort(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetHTTPPort()
+	a = nil
+	a.GetHTTPPort()
+}
+
+func TestAppServiceSpec_GetImage(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetImage()
+	a = nil
+	a.GetImage()
+}
+
+func TestAppServiceSpec_GetInstanceCount(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetInstanceCount()
+	a = nil
+	a.GetInstanceCount()
+}
+
+func TestAppServiceSpec_GetInstanceSizeSlug(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetInstanceSizeSlug()
+	a = nil
+	a.GetInstanceSizeSlug()
+}
+
+func TestAppServiceSpec_GetInternalPorts(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetInternalPorts()
+	a = nil
+	a.GetInternalPorts()
+}
+
+func TestAppServiceSpec_GetLogDestinations(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetLogDestinations()
+	a = nil
+	a.GetLogDestinations()
+}
+
+func TestAppServiceSpec_GetName(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppServiceSpec_GetRoutes(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetRoutes()
+	a = nil
+	a.GetRoutes()
+}
+
+func TestAppServiceSpec_GetRunCommand(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetRunCommand()
+	a = nil
+	a.GetRunCommand()
+}
+
+func TestAppServiceSpec_GetSourceDir(tt *testing.T) {
+	a := &AppServiceSpec{}
+	a.GetSourceDir()
+	a = nil
+	a.GetSourceDir()
+}
+
+func TestAppServiceSpecHealthCheck_GetFailureThreshold(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetFailureThreshold()
+	a = nil
+	a.GetFailureThreshold()
+}
+
+func TestAppServiceSpecHealthCheck_GetHTTPPath(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetHTTPPath()
+	a = nil
+	a.GetHTTPPath()
+}
+
+func TestAppServiceSpecHealthCheck_GetInitialDelaySeconds(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetInitialDelaySeconds()
+	a = nil
+	a.GetInitialDelaySeconds()
+}
+
+func TestAppServiceSpecHealthCheck_GetPath(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetPath()
+	a = nil
+	a.GetPath()
+}
+
+func TestAppServiceSpecHealthCheck_GetPeriodSeconds(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetPeriodSeconds()
+	a = nil
+	a.GetPeriodSeconds()
+}
+
+func TestAppServiceSpecHealthCheck_GetPort(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetPort()
+	a = nil
+	a.GetPort()
+}
+
+func TestAppServiceSpecHealthCheck_GetSuccessThreshold(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetSuccessThreshold()
+	a = nil
+	a.GetSuccessThreshold()
+}
+
+func TestAppServiceSpecHealthCheck_GetTimeoutSeconds(tt *testing.T) {
+	a := &AppServiceSpecHealthCheck{}
+	a.GetTimeoutSeconds()
+	a = nil
+	a.GetTimeoutSeconds()
+}
+
+func TestAppSpec_GetAlerts(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetAlerts()
+	a = nil
+	a.GetAlerts()
+}
+
+func TestAppSpec_GetDatabases(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetDatabases()
+	a = nil
+	a.GetDatabases()
+}
+
+func TestAppSpec_GetDomains(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetDomains()
+	a = nil
+	a.GetDomains()
+}
+
+func TestAppSpec_GetEnvs(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetEnvs()
+	a = nil
+	a.GetEnvs()
+}
+
+func TestAppSpec_GetFeatures(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetFeatures()
+	a = nil
+	a.GetFeatures()
+}
+
+func TestAppSpec_GetFunctions(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetFunctions()
+	a = nil
+	a.GetFunctions()
+}
+
+func TestAppSpec_GetIngress(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetIngress()
+	a = nil
+	a.GetIngress()
+}
+
+func TestAppSpec_GetJobs(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetJobs()
+	a = nil
+	a.GetJobs()
+}
+
+func TestAppSpec_GetName(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppSpec_GetRegion(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetRegion()
+	a = nil
+	a.GetRegion()
+}
+
+func TestAppSpec_GetServices(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetServices()
+	a = nil
+	a.GetServices()
+}
+
+func TestAppSpec_GetStaticSites(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetStaticSites()
+	a = nil
+	a.GetStaticSites()
+}
+
+func TestAppSpec_GetWorkers(tt *testing.T) {
+	a := &AppSpec{}
+	a.GetWorkers()
+	a = nil
+	a.GetWorkers()
+}
+
+func TestAppStaticSiteSpec_GetBuildCommand(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetBuildCommand()
+	a = nil
+	a.GetBuildCommand()
+}
+
+func TestAppStaticSiteSpec_GetCatchallDocument(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetCatchallDocument()
+	a = nil
+	a.GetCatchallDocument()
+}
+
+func TestAppStaticSiteSpec_GetCORS(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetCORS()
+	a = nil
+	a.GetCORS()
+}
+
+func TestAppStaticSiteSpec_GetDockerfilePath(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetDockerfilePath()
+	a = nil
+	a.GetDockerfilePath()
+}
+
+func TestAppStaticSiteSpec_GetEnvironmentSlug(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetEnvironmentSlug()
+	a = nil
+	a.GetEnvironmentSlug()
+}
+
+func TestAppStaticSiteSpec_GetEnvs(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetEnvs()
+	a = nil
+	a.GetEnvs()
+}
+
+func TestAppStaticSiteSpec_GetErrorDocument(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetErrorDocument()
+	a = nil
+	a.GetErrorDocument()
+}
+
+func TestAppStaticSiteSpec_GetGit(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetGit()
+	a = nil
+	a.GetGit()
+}
+
+func TestAppStaticSiteSpec_GetGitHub(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetGitHub()
+	a = nil
+	a.GetGitHub()
+}
+
+func TestAppStaticSiteSpec_GetGitLab(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetGitLab()
+	a = nil
+	a.GetGitLab()
+}
+
+func TestAppStaticSiteSpec_GetIndexDocument(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetIndexDocument()
+	a = nil
+	a.GetIndexDocument()
+}
+
+func TestAppStaticSiteSpec_GetName(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppStaticSiteSpec_GetOutputDir(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetOutputDir()
+	a = nil
+	a.GetOutputDir()
+}
+
+func TestAppStaticSiteSpec_GetRoutes(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetRoutes()
+	a = nil
+	a.GetRoutes()
+}
+
+func TestAppStaticSiteSpec_GetSourceDir(tt *testing.T) {
+	a := &AppStaticSiteSpec{}
+	a.GetSourceDir()
+	a = nil
+	a.GetSourceDir()
+}
+
+func TestAppStringMatch_GetExact(tt *testing.T) {
+	a := &AppStringMatch{}
+	a.GetExact()
+	a = nil
+	a.GetExact()
+}
+
+func TestAppStringMatch_GetPrefix(tt *testing.T) {
+	a := &AppStringMatch{}
+	a.GetPrefix()
+	a = nil
+	a.GetPrefix()
+}
+
+func TestAppStringMatch_GetRegex(tt *testing.T) {
+	a := &AppStringMatch{}
+	a.GetRegex()
+	a = nil
+	a.GetRegex()
+}
+
+func TestAppTier_GetBuildSeconds(tt *testing.T) {
+	a := &AppTier{}
+	a.GetBuildSeconds()
+	a = nil
+	a.GetBuildSeconds()
+}
+
+func TestAppTier_GetEgressBandwidthBytes(tt *testing.T) {
+	a := &AppTier{}
+	a.GetEgressBandwidthBytes()
+	a = nil
+	a.GetEgressBandwidthBytes()
+}
+
+func TestAppTier_GetName(tt *testing.T) {
+	a := &AppTier{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppTier_GetSlug(tt *testing.T) {
+	a := &AppTier{}
+	a.GetSlug()
+	a = nil
+	a.GetSlug()
+}
+
+func TestAppVariableDefinition_GetKey(tt *testing.T) {
+	a := &AppVariableDefinition{}
+	a.GetKey()
+	a = nil
+	a.GetKey()
+}
+
+func TestAppVariableDefinition_GetScope(tt *testing.T) {
+	a := &AppVariableDefinition{}
+	a.GetScope()
+	a = nil
+	a.GetScope()
+}
+
+func TestAppVariableDefinition_GetType(tt *testing.T) {
+	a := &AppVariableDefinition{}
+	a.GetType()
+	a = nil
+	a.GetType()
+}
+
+func TestAppVariableDefinition_GetValue(tt *testing.T) {
+	a := &AppVariableDefinition{}
+	a.GetValue()
+	a = nil
+	a.GetValue()
+}
+
+func TestAppWorkerSpec_GetAlerts(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetAlerts()
+	a = nil
+	a.GetAlerts()
+}
+
+func TestAppWorkerSpec_GetBuildCommand(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetBuildCommand()
+	a = nil
+	a.GetBuildCommand()
+}
+
+func TestAppWorkerSpec_GetDockerfilePath(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetDockerfilePath()
+	a = nil
+	a.GetDockerfilePath()
+}
+
+func TestAppWorkerSpec_GetEnvironmentSlug(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetEnvironmentSlug()
+	a = nil
+	a.GetEnvironmentSlug()
+}
+
+func TestAppWorkerSpec_GetEnvs(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetEnvs()
+	a = nil
+	a.GetEnvs()
+}
+
+func TestAppWorkerSpec_GetGit(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetGit()
+	a = nil
+	a.GetGit()
+}
+
+func TestAppWorkerSpec_GetGitHub(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetGitHub()
+	a = nil
+	a.GetGitHub()
+}
+
+func TestAppWorkerSpec_GetGitLab(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetGitLab()
+	a = nil
+	a.GetGitLab()
+}
+
+func TestAppWorkerSpec_GetImage(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetImage()
+	a = nil
+	a.GetImage()
+}
+
+func TestAppWorkerSpec_GetInstanceCount(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetInstanceCount()
+	a = nil
+	a.GetInstanceCount()
+}
+
+func TestAppWorkerSpec_GetInstanceSizeSlug(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetInstanceSizeSlug()
+	a = nil
+	a.GetInstanceSizeSlug()
+}
+
+func TestAppWorkerSpec_GetLogDestinations(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetLogDestinations()
+	a = nil
+	a.GetLogDestinations()
+}
+
+func TestAppWorkerSpec_GetName(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetName()
+	a = nil
+	a.GetName()
+}
+
+func TestAppWorkerSpec_GetRunCommand(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetRunCommand()
+	a = nil
+	a.GetRunCommand()
+}
+
+func TestAppWorkerSpec_GetSourceDir(tt *testing.T) {
+	a := &AppWorkerSpec{}
+	a.GetSourceDir()
+	a = nil
+	a.GetSourceDir()
+}
+
+func TestBuildpack_GetDescription(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetDescription()
+	b = nil
+	b.GetDescription()
+}
+
+func TestBuildpack_GetDocsLink(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetDocsLink()
+	b = nil
+	b.GetDocsLink()
+}
+
+func TestBuildpack_GetID(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetID()
+	b = nil
+	b.GetID()
+}
+
+func TestBuildpack_GetLatest(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetLatest()
+	b = nil
+	b.GetLatest()
+}
+
+func TestBuildpack_GetMajorVersion(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetMajorVersion()
+	b = nil
+	b.GetMajorVersion()
+}
+
+func TestBuildpack_GetName(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetName()
+	b = nil
+	b.GetName()
+}
+
+func TestBuildpack_GetVersion(tt *testing.T) {
+	b := &Buildpack{}
+	b.GetVersion()
+	b = nil
+	b.GetVersion()
+}
+
+func TestDeployment_GetCause(tt *testing.T) {
+	d := &Deployment{}
+	d.GetCause()
+	d = nil
+	d.GetCause()
+}
+
+func TestDeployment_GetCauseDetails(tt *testing.T) {
+	d := &Deployment{}
+	d.GetCauseDetails()
+	d = nil
+	d.GetCauseDetails()
+}
+
+func TestDeployment_GetClonedFrom(tt *testing.T) {
+	d := &Deployment{}
+	d.GetClonedFrom()
+	d = nil
+	d.GetClonedFrom()
+}
+
+func TestDeployment_GetCreatedAt(tt *testing.T) {
+	d := &Deployment{}
+	d.GetCreatedAt()
+	d = nil
+	d.GetCreatedAt()
+}
+
+func TestDeployment_GetFunctions(tt *testing.T) {
+	d := &Deployment{}
+	d.GetFunctions()
+	d = nil
+	d.GetFunctions()
+}
+
+func TestDeployment_GetID(tt *testing.T) {
+	d := &Deployment{}
+	d.GetID()
+	d = nil
+	d.GetID()
+}
+
+func TestDeployment_GetJobs(tt *testing.T) {
+	d := &Deployment{}
+	d.GetJobs()
+	d = nil
+	d.GetJobs()
+}
+
+func TestDeployment_GetLoadBalancerID(tt *testing.T) {
+	d := &Deployment{}
+	d.GetLoadBalancerID()
+	d = nil
+	d.GetLoadBalancerID()
+}
+
+func TestDeployment_GetPhase(tt *testing.T) {
+	d := &Deployment{}
+	d.GetPhase()
+	d = nil
+	d.GetPhase()
+}
+
+func TestDeployment_GetPhaseLastUpdatedAt(tt *testing.T) {
+	d := &Deployment{}
+	d.GetPhaseLastUpdatedAt()
+	d = nil
+	d.GetPhaseLastUpdatedAt()
+}
+
+func TestDeployment_GetPreviousDeploymentID(tt *testing.T) {
+	d := &Deployment{}
+	d.GetPreviousDeploymentID()
+	d = nil
+	d.GetPreviousDeploymentID()
+}
+
+func TestDeployment_GetProgress(tt *testing.T) {
+	d := &Deployment{}
+	d.GetProgress()
+	d = nil
+	d.GetProgress()
+}
+
+func TestDeployment_GetServices(tt *testing.T) {
+	d := &Deployment{}
+	d.GetServices()
+	d = nil
+	d.GetServices()
+}
+
+func TestDeployment_GetSpec(tt *testing.T) {
+	d := &Deployment{}
+	d.GetSpec()
+	d = nil
+	d.GetSpec()
+}
+
+func TestDeployment_GetStaticSites(tt *testing.T) {
+	d := &Deployment{}
+	d.GetStaticSites()
+	d = nil
+	d.GetStaticSites()
+}
+
+func TestDeployment_GetTierSlug(tt *testing.T) {
+	d := &Deployment{}
+	d.GetTierSlug()
+	d = nil
+	d.GetTierSlug()
+}
+
+func TestDeployment_GetTiming(tt *testing.T) {
+	d := &Deployment{}
+	d.GetTiming()
+	d = nil
+	d.GetTiming()
+}
+
+func TestDeployment_GetUpdatedAt(tt *testing.T) {
+	d := &Deployment{}
+	d.GetUpdatedAt()
+	d = nil
+	d.GetUpdatedAt()
+}
+
+func TestDeployment_GetWorkers(tt *testing.T) {
+	d := &Deployment{}
+	d.GetWorkers()
+	d = nil
+	d.GetWorkers()
+}
+
+func TestDeploymentCauseDetails_GetDigitalOceanUserAction(tt *testing.T) {
+	d := &DeploymentCauseDetails{}
+	d.GetDigitalOceanUserAction()
+	d = nil
+	d.GetDigitalOceanUserAction()
+}
+
+func TestDeploymentCauseDetails_GetDOCRPush(tt *testing.T) {
+	d := &DeploymentCauseDetails{}
+	d.GetDOCRPush()
+	d = nil
+	d.GetDOCRPush()
+}
+
+func TestDeploymentCauseDetails_GetGitPush(tt *testing.T) {
+	d := &DeploymentCauseDetails{}
+	d.GetGitPush()
+	d = nil
+	d.GetGitPush()
+}
+
+func TestDeploymentCauseDetails_GetInternal(tt *testing.T) {
+	d := &DeploymentCauseDetails{}
+	d.GetInternal()
+	d = nil
+	d.GetInternal()
+}
+
+func TestDeploymentCauseDetails_GetType(tt *testing.T) {
+	d := &DeploymentCauseDetails{}
+	d.GetType()
+	d = nil
+	d.GetType()
+}
+
+func TestDeploymentCauseDetailsDigitalOceanUser_GetEmail(tt *testing.T) {
+	d := &DeploymentCauseDetailsDigitalOceanUser{}
+	d.GetEmail()
+	d = nil
+	d.GetEmail()
+}
+
+func TestDeploymentCauseDetailsDigitalOceanUser_GetFullName(tt *testing.T) {
+	d := &DeploymentCauseDetailsDigitalOceanUser{}
+	d.GetFullName()
+	d = nil
+	d.GetFullName()
+}
+
+func TestDeploymentCauseDetailsDigitalOceanUser_GetUUID(tt *testing.T) {
+	d := &DeploymentCauseDetailsDigitalOceanUser{}
+	d.GetUUID()
+	d = nil
+	d.GetUUID()
+}
+
+func TestDeploymentCauseDetailsDigitalOceanUserAction_GetName(tt *testing.T) {
+	d := &DeploymentCauseDetailsDigitalOceanUserAction{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentCauseDetailsDigitalOceanUserAction_GetUser(tt *testing.T) {
+	d := &DeploymentCauseDetailsDigitalOceanUserAction{}
+	d.GetUser()
+	d = nil
+	d.GetUser()
+}
+
+func TestDeploymentCauseDetailsDOCRPush_GetImageDigest(tt *testing.T) {
+	d := &DeploymentCauseDetailsDOCRPush{}
+	d.GetImageDigest()
+	d = nil
+	d.GetImageDigest()
+}
+
+func TestDeploymentCauseDetailsDOCRPush_GetRegistry(tt *testing.T) {
+	d := &DeploymentCauseDetailsDOCRPush{}
+	d.GetRegistry()
+	d = nil
+	d.GetRegistry()
+}
+
+func TestDeploymentCauseDetailsDOCRPush_GetRepository(tt *testing.T) {
+	d := &DeploymentCauseDetailsDOCRPush{}
+	d.GetRepository()
+	d = nil
+	d.GetRepository()
+}
+
+func TestDeploymentCauseDetailsDOCRPush_GetTag(tt *testing.T) {
+	d := &DeploymentCauseDetailsDOCRPush{}
+	d.GetTag()
+	d = nil
+	d.GetTag()
+}
+
+func TestDeploymentCauseDetailsGitPush_GetCommitAuthor(tt *testing.T) {
+	d := &DeploymentCauseDetailsGitPush{}
+	d.GetCommitAuthor()
+	d = nil
+	d.GetCommitAuthor()
+}
+
+func TestDeploymentCauseDetailsGitPush_GetCommitMessage(tt *testing.T) {
+	d := &DeploymentCauseDetailsGitPush{}
+	d.GetCommitMessage()
+	d = nil
+	d.GetCommitMessage()
+}
+
+func TestDeploymentCauseDetailsGitPush_GetCommitSHA(tt *testing.T) {
+	d := &DeploymentCauseDetailsGitPush{}
+	d.GetCommitSHA()
+	d = nil
+	d.GetCommitSHA()
+}
+
+func TestDeploymentCauseDetailsGitPush_GetGitHub(tt *testing.T) {
+	d := &DeploymentCauseDetailsGitPush{}
+	d.GetGitHub()
+	d = nil
+	d.GetGitHub()
+}
+
+func TestDeploymentCauseDetailsGitPush_GetGitLab(tt *testing.T) {
+	d := &DeploymentCauseDetailsGitPush{}
+	d.GetGitLab()
+	d = nil
+	d.GetGitLab()
+}
+
+func TestDeploymentCauseDetailsGitPush_GetUsername(tt *testing.T) {
+	d := &DeploymentCauseDetailsGitPush{}
+	d.GetUsername()
+	d = nil
+	d.GetUsername()
+}
+
+func TestDeploymentFunctions_GetName(tt *testing.T) {
+	d := &DeploymentFunctions{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentFunctions_GetNamespace(tt *testing.T) {
+	d := &DeploymentFunctions{}
+	d.GetNamespace()
+	d = nil
+	d.GetNamespace()
+}
+
+func TestDeploymentFunctions_GetSourceCommitHash(tt *testing.T) {
+	d := &DeploymentFunctions{}
+	d.GetSourceCommitHash()
+	d = nil
+	d.GetSourceCommitHash()
+}
+
+func TestDeploymentJob_GetBuildpacks(tt *testing.T) {
+	d := &DeploymentJob{}
+	d.GetBuildpacks()
+	d = nil
+	d.GetBuildpacks()
+}
+
+func TestDeploymentJob_GetName(tt *testing.T) {
+	d := &DeploymentJob{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentJob_GetSourceCommitHash(tt *testing.T) {
+	d := &DeploymentJob{}
+	d.GetSourceCommitHash()
+	d = nil
+	d.GetSourceCommitHash()
+}
+
+func TestDeploymentProgress_GetErrorSteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetErrorSteps()
+	d = nil
+	d.GetErrorSteps()
+}
+
+func TestDeploymentProgress_GetPendingSteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetPendingSteps()
+	d = nil
+	d.GetPendingSteps()
+}
+
+func TestDeploymentProgress_GetRunningSteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetRunningSteps()
+	d = nil
+	d.GetRunningSteps()
+}
+
+func TestDeploymentProgress_GetSteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetSteps()
+	d = nil
+	d.GetSteps()
+}
+
+func TestDeploymentProgress_GetSuccessSteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetSuccessSteps()
+	d = nil
+	d.GetSuccessSteps()
+}
+
+func TestDeploymentProgress_GetSummarySteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetSummarySteps()
+	d = nil
+	d.GetSummarySteps()
+}
+
+func TestDeploymentProgress_GetTotalSteps(tt *testing.T) {
+	d := &DeploymentProgress{}
+	d.GetTotalSteps()
+	d = nil
+	d.GetTotalSteps()
+}
+
+func TestDeploymentProgressStep_GetComponentName(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetComponentName()
+	d = nil
+	d.GetComponentName()
+}
+
+func TestDeploymentProgressStep_GetEndedAt(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetEndedAt()
+	d = nil
+	d.GetEndedAt()
+}
+
+func TestDeploymentProgressStep_GetMessageBase(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetMessageBase()
+	d = nil
+	d.GetMessageBase()
+}
+
+func TestDeploymentProgressStep_GetName(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentProgressStep_GetReason(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetReason()
+	d = nil
+	d.GetReason()
+}
+
+func TestDeploymentProgressStep_GetStartedAt(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetStartedAt()
+	d = nil
+	d.GetStartedAt()
+}
+
+func TestDeploymentProgressStep_GetStatus(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetStatus()
+	d = nil
+	d.GetStatus()
+}
+
+func TestDeploymentProgressStep_GetSteps(tt *testing.T) {
+	d := &DeploymentProgressStep{}
+	d.GetSteps()
+	d = nil
+	d.GetSteps()
+}
+
+func TestDeploymentProgressStepReason_GetCode(tt *testing.T) {
+	d := &DeploymentProgressStepReason{}
+	d.GetCode()
+	d = nil
+	d.GetCode()
+}
+
+func TestDeploymentProgressStepReason_GetMessage(tt *testing.T) {
+	d := &DeploymentProgressStepReason{}
+	d.GetMessage()
+	d = nil
+	d.GetMessage()
+}
+
+func TestDeploymentService_GetBuildpacks(tt *testing.T) {
+	d := &DeploymentService{}
+	d.GetBuildpacks()
+	d = nil
+	d.GetBuildpacks()
+}
+
+func TestDeploymentService_GetName(tt *testing.T) {
+	d := &DeploymentService{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentService_GetSourceCommitHash(tt *testing.T) {
+	d := &DeploymentService{}
+	d.GetSourceCommitHash()
+	d = nil
+	d.GetSourceCommitHash()
+}
+
+func TestDeploymentStaticSite_GetBuildpacks(tt *testing.T) {
+	d := &DeploymentStaticSite{}
+	d.GetBuildpacks()
+	d = nil
+	d.GetBuildpacks()
+}
+
+func TestDeploymentStaticSite_GetName(tt *testing.T) {
+	d := &DeploymentStaticSite{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentStaticSite_GetSourceCommitHash(tt *testing.T) {
+	d := &DeploymentStaticSite{}
+	d.GetSourceCommitHash()
+	d = nil
+	d.GetSourceCommitHash()
+}
+
+func TestDeploymentTiming_GetBuildBillable(tt *testing.T) {
+	d := &DeploymentTiming{}
+	d.GetBuildBillable()
+	d = nil
+	d.GetBuildBillable()
+}
+
+func TestDeploymentTiming_GetBuildTotal(tt *testing.T) {
+	d := &DeploymentTiming{}
+	d.GetBuildTotal()
+	d = nil
+	d.GetBuildTotal()
+}
+
+func TestDeploymentTiming_GetComponents(tt *testing.T) {
+	d := &DeploymentTiming{}
+	d.GetComponents()
+	d = nil
+	d.GetComponents()
+}
+
+func TestDeploymentTiming_GetDatabaseProvision(tt *testing.T) {
+	d := &DeploymentTiming{}
+	d.GetDatabaseProvision()
+	d = nil
+	d.GetDatabaseProvision()
+}
+
+func TestDeploymentTiming_GetDeploying(tt *testing.T) {
+	d := &DeploymentTiming{}
+	d.GetDeploying()
+	d = nil
+	d.GetDeploying()
+}
+
+func TestDeploymentTiming_GetPending(tt *testing.T) {
+	d := &DeploymentTiming{}
+	d.GetPending()
+	d = nil
+	d.GetPending()
+}
+
+func TestDeploymentTimingComponent_GetBuildBillable(tt *testing.T) {
+	d := &DeploymentTimingComponent{}
+	d.GetBuildBillable()
+	d = nil
+	d.GetBuildBillable()
+}
+
+func TestDeploymentTimingComponent_GetName(tt *testing.T) {
+	d := &DeploymentTimingComponent{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentWorker_GetBuildpacks(tt *testing.T) {
+	d := &DeploymentWorker{}
+	d.GetBuildpacks()
+	d = nil
+	d.GetBuildpacks()
+}
+
+func TestDeploymentWorker_GetName(tt *testing.T) {
+	d := &DeploymentWorker{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDeploymentWorker_GetSourceCommitHash(tt *testing.T) {
+	d := &DeploymentWorker{}
+	d.GetSourceCommitHash()
+	d = nil
+	d.GetSourceCommitHash()
+}
+
+func TestDeployTemplate_GetSpec(tt *testing.T) {
+	d := &DeployTemplate{}
+	d.GetSpec()
+	d = nil
+	d.GetSpec()
+}
+
+func TestDetectRequest_GetCommitSHA(tt *testing.T) {
+	d := &DetectRequest{}
+	d.GetCommitSHA()
+	d = nil
+	d.GetCommitSHA()
+}
+
+func TestDetectRequest_GetGit(tt *testing.T) {
+	d := &DetectRequest{}
+	d.GetGit()
+	d = nil
+	d.GetGit()
+}
+
+func TestDetectRequest_GetGitHub(tt *testing.T) {
+	d := &DetectRequest{}
+	d.GetGitHub()
+	d = nil
+	d.GetGitHub()
+}
+
+func TestDetectRequest_GetGitLab(tt *testing.T) {
+	d := &DetectRequest{}
+	d.GetGitLab()
+	d = nil
+	d.GetGitLab()
+}
+
+func TestDetectRequest_GetSourceDir(tt *testing.T) {
+	d := &DetectRequest{}
+	d.GetSourceDir()
+	d = nil
+	d.GetSourceDir()
+}
+
+func TestDetectResponse_GetComponents(tt *testing.T) {
+	d := &DetectResponse{}
+	d.GetComponents()
+	d = nil
+	d.GetComponents()
+}
+
+func TestDetectResponse_GetTemplate(tt *testing.T) {
+	d := &DetectResponse{}
+	d.GetTemplate()
+	d = nil
+	d.GetTemplate()
+}
+
+func TestDetectResponse_GetTemplateError(tt *testing.T) {
+	d := &DetectResponse{}
+	d.GetTemplateError()
+	d = nil
+	d.GetTemplateError()
+}
+
+func TestDetectResponse_GetTemplateFound(tt *testing.T) {
+	d := &DetectResponse{}
+	d.GetTemplateFound()
+	d = nil
+	d.GetTemplateFound()
+}
+
+func TestDetectResponse_GetTemplateValid(tt *testing.T) {
+	d := &DetectResponse{}
+	d.GetTemplateValid()
+	d = nil
+	d.GetTemplateValid()
+}
+
+func TestDetectResponseComponent_GetBuildCommand(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetBuildCommand()
+	d = nil
+	d.GetBuildCommand()
+}
+
+func TestDetectResponseComponent_GetBuildpacks(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetBuildpacks()
+	d = nil
+	d.GetBuildpacks()
+}
+
+func TestDetectResponseComponent_GetDockerfiles(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetDockerfiles()
+	d = nil
+	d.GetDockerfiles()
+}
+
+func TestDetectResponseComponent_GetEnvironmentSlug(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetEnvironmentSlug()
+	d = nil
+	d.GetEnvironmentSlug()
+}
+
+func TestDetectResponseComponent_GetEnvVars(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetEnvVars()
+	d = nil
+	d.GetEnvVars()
+}
+
+func TestDetectResponseComponent_GetHTTPPorts(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetHTTPPorts()
+	d = nil
+	d.GetHTTPPorts()
+}
+
+func TestDetectResponseComponent_GetRunCommand(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetRunCommand()
+	d = nil
+	d.GetRunCommand()
+}
+
+func TestDetectResponseComponent_GetServerlessPackages(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetServerlessPackages()
+	d = nil
+	d.GetServerlessPackages()
+}
+
+func TestDetectResponseComponent_GetSourceDir(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetSourceDir()
+	d = nil
+	d.GetSourceDir()
+}
+
+func TestDetectResponseComponent_GetStrategy(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetStrategy()
+	d = nil
+	d.GetStrategy()
+}
+
+func TestDetectResponseComponent_GetTypes(tt *testing.T) {
+	d := &DetectResponseComponent{}
+	d.GetTypes()
+	d = nil
+	d.GetTypes()
+}
+
+func TestDetectResponseServerlessFunction_GetLimits(tt *testing.T) {
+	d := &DetectResponseServerlessFunction{}
+	d.GetLimits()
+	d = nil
+	d.GetLimits()
+}
+
+func TestDetectResponseServerlessFunction_GetName(tt *testing.T) {
+	d := &DetectResponseServerlessFunction{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestDetectResponseServerlessFunction_GetPackage(tt *testing.T) {
+	d := &DetectResponseServerlessFunction{}
+	d.GetPackage()
+	d = nil
+	d.GetPackage()
+}
+
+func TestDetectResponseServerlessFunction_GetRuntime(tt *testing.T) {
+	d := &DetectResponseServerlessFunction{}
+	d.GetRuntime()
+	d = nil
+	d.GetRuntime()
+}
+
+func TestDetectResponseServerlessFunctionLimits_GetLogs(tt *testing.T) {
+	d := &DetectResponseServerlessFunctionLimits{}
+	d.GetLogs()
+	d = nil
+	d.GetLogs()
+}
+
+func TestDetectResponseServerlessFunctionLimits_GetMemory(tt *testing.T) {
+	d := &DetectResponseServerlessFunctionLimits{}
+	d.GetMemory()
+	d = nil
+	d.GetMemory()
+}
+
+func TestDetectResponseServerlessFunctionLimits_GetTimeout(tt *testing.T) {
+	d := &DetectResponseServerlessFunctionLimits{}
+	d.GetTimeout()
+	d = nil
+	d.GetTimeout()
+}
+
+func TestDetectResponseServerlessPackage_GetFunctions(tt *testing.T) {
+	d := &DetectResponseServerlessPackage{}
+	d.GetFunctions()
+	d = nil
+	d.GetFunctions()
+}
+
+func TestDetectResponseServerlessPackage_GetName(tt *testing.T) {
+	d := &DetectResponseServerlessPackage{}
+	d.GetName()
+	d = nil
+	d.GetName()
+}
+
+func TestGitHubSourceSpec_GetBranch(tt *testing.T) {
+	g := &GitHubSourceSpec{}
+	g.GetBranch()
+	g = nil
+	g.GetBranch()
+}
+
+func TestGitHubSourceSpec_GetDeployOnPush(tt *testing.T) {
+	g := &GitHubSourceSpec{}
+	g.GetDeployOnPush()
+	g = nil
+	g.GetDeployOnPush()
+}
+
+func TestGitHubSourceSpec_GetRepo(tt *testing.T) {
+	g := &GitHubSourceSpec{}
+	g.GetRepo()
+	g = nil
+	g.GetRepo()
+}
+
+func TestGitLabSourceSpec_GetBranch(tt *testing.T) {
+	g := &GitLabSourceSpec{}
+	g.GetBranch()
+	g = nil
+	g.GetBranch()
+}
+
+func TestGitLabSourceSpec_GetDeployOnPush(tt *testing.T) {
+	g := &GitLabSourceSpec{}
+	g.GetDeployOnPush()
+	g = nil
+	g.GetDeployOnPush()
+}
+
+func TestGitLabSourceSpec_GetRepo(tt *testing.T) {
+	g := &GitLabSourceSpec{}
+	g.GetRepo()
+	g = nil
+	g.GetRepo()
+}
+
+func TestGitSourceSpec_GetBranch(tt *testing.T) {
+	g := &GitSourceSpec{}
+	g.GetBranch()
+	g = nil
+	g.GetBranch()
+}
+
+func TestGitSourceSpec_GetRepoCloneURL(tt *testing.T) {
+	g := &GitSourceSpec{}
+	g.GetRepoCloneURL()
+	g = nil
+	g.GetRepoCloneURL()
+}
+
+func TestImageSourceSpec_GetDeployOnPush(tt *testing.T) {
+	i := &ImageSourceSpec{}
+	i.GetDeployOnPush()
+	i = nil
+	i.GetDeployOnPush()
+}
+
+func TestImageSourceSpec_GetRegistry(tt *testing.T) {
+	i := &ImageSourceSpec{}
+	i.GetRegistry()
+	i = nil
+	i.GetRegistry()
+}
+
+func TestImageSourceSpec_GetRegistryType(tt *testing.T) {
+	i := &ImageSourceSpec{}
+	i.GetRegistryType()
+	i = nil
+	i.GetRegistryType()
+}
+
+func TestImageSourceSpec_GetRepository(tt *testing.T) {
+	i := &ImageSourceSpec{}
+	i.GetRepository()
+	i = nil
+	i.GetRepository()
+}
+
+func TestImageSourceSpec_GetTag(tt *testing.T) {
+	i := &ImageSourceSpec{}
+	i.GetTag()
+	i = nil
+	i.GetTag()
+}
+
+func TestImageSourceSpecDeployOnPush_GetEnabled(tt *testing.T) {
+	i := &ImageSourceSpecDeployOnPush{}
+	i.GetEnabled()
+	i = nil
+	i.GetEnabled()
+}
+
+func TestListBuildpacksResponse_GetBuildpacks(tt *testing.T) {
+	l := &ListBuildpacksResponse{}
+	l.GetBuildpacks()
+	l = nil
+	l.GetBuildpacks()
+}
+
+func TestUpgradeBuildpackResponse_GetAffectedComponents(tt *testing.T) {
+	u := &UpgradeBuildpackResponse{}
+	u.GetAffectedComponents()
+	u = nil
+	u.GetAffectedComponents()
+}
+
+func TestUpgradeBuildpackResponse_GetDeployment(tt *testing.T) {
+	u := &UpgradeBuildpackResponse{}
+	u.GetDeployment()
+	u = nil
+	u.GetDeployment()
+}
diff --git a/apps_test.go b/apps_test.go
new file mode 100644
index 0000000..49a6506
--- /dev/null
+++ b/apps_test.go
@@ -0,0 +1,1001 @@
+package godo
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+var (
+	testAppSpec = &AppSpec{
+		Name:   "app-name",
+		Region: testAppRegion.Slug,
+		Services: []*AppServiceSpec{{
+			Name: "service-name",
+			Routes: []*AppRouteSpec{{
+				Path: "/",
+			}},
+			RunCommand:     "run-command",
+			BuildCommand:   "build-command",
+			DockerfilePath: "Dockerfile",
+			GitHub: &GitHubSourceSpec{
+				Repo:   "owner/service",
+				Branch: "branch",
+			},
+			InstanceSizeSlug: "professional-xs",
+			InstanceCount:    1,
+		}},
+		Workers: []*AppWorkerSpec{{
+			Name:           "worker-name",
+			RunCommand:     "run-command",
+			BuildCommand:   "build-command",
+			DockerfilePath: "Dockerfile",
+			GitHub: &GitHubSourceSpec{
+				Repo:   "owner/worker",
+				Branch: "branch",
+			},
+			InstanceSizeSlug: "professional-xs",
+			InstanceCount:    1,
+		}},
+		StaticSites: []*AppStaticSiteSpec{{
+			Name:         "static-name",
+			BuildCommand: "build-command",
+			Git: &GitSourceSpec{
+				RepoCloneURL: "git@githost.com/owner/static.git",
+				Branch:       "branch",
+			},
+			OutputDir: "out",
+		}},
+		Jobs: []*AppJobSpec{{
+			Name:           "job-name",
+			RunCommand:     "run-command",
+			BuildCommand:   "build-command",
+			DockerfilePath: "Dockerfile",
+			GitHub: &GitHubSourceSpec{
+				Repo:   "owner/job",
+				Branch: "branch",
+			},
+			InstanceSizeSlug: "professional-xs",
+			InstanceCount:    1,
+		}},
+		Databases: []*AppDatabaseSpec{{
+			Name:        "db",
+			Engine:      AppDatabaseSpecEngine_MySQL,
+			Version:     "8",
+			Size:        "size",
+			NumNodes:    1,
+			Production:  true,
+			ClusterName: "cluster-name",
+			DBName:      "app",
+			DBUser:      "appuser",
+		}},
+		Functions: []*AppFunctionsSpec{{
+			Name: "functions-name",
+			GitHub: &GitHubSourceSpec{
+				Repo:   "git@githost.com/owner/functions.git",
+				Branch: "branch",
+			},
+		}},
+		Domains: []*AppDomainSpec{
+			{
+				Domain: "example.com",
+				Type:   AppDomainSpecType_Primary,
+			},
+		},
+	}
+
+	testAppRegion = AppRegion{
+		Slug:        "ams",
+		Label:       "Amsterdam",
+		Flag:        "netherlands",
+		Continent:   "Europe",
+		DataCenters: []string{"ams3"},
+		Default:     true,
+	}
+
+	testDeployment = Deployment{
+		ID:   "08f10d33-94c3-4492-b9a3-1603e9ab7fe4",
+		Spec: testAppSpec,
+		Services: []*DeploymentService{{
+			Name:             "service-name",
+			SourceCommitHash: "service-hash",
+		}},
+		Workers: []*DeploymentWorker{{
+			Name:             "worker-name",
+			SourceCommitHash: "worker-hash",
+		}},
+		StaticSites: []*DeploymentStaticSite{{
+			Name:             "static-name",
+			SourceCommitHash: "static-hash",
+		}},
+		Jobs: []*DeploymentJob{{
+			Name:             "job-name",
+			SourceCommitHash: "job-hash",
+		}},
+		Functions: []*DeploymentFunctions{{
+			Name:             "functions-name",
+			SourceCommitHash: "functions-hash",
+		}},
+		CreatedAt:          time.Unix(1595959200, 0).UTC(),
+		UpdatedAt:          time.Unix(1595959200, 0).UTC(),
+		PhaseLastUpdatedAt: time.Unix(1595959200, 0).UTC(),
+		Phase:              DeploymentPhase_Active,
+		Progress: &DeploymentProgress{
+			SuccessSteps: 1,
+			TotalSteps:   1,
+			Steps: []*DeploymentProgressStep{{
+				Name:      "step",
+				Status:    DeploymentProgressStepStatus_Success,
+				StartedAt: time.Unix(1595959200, 0).UTC(),
+				EndedAt:   time.Unix(1595959200, 0).UTC(),
+				Steps: []*DeploymentProgressStep{{
+					Name:      "sub",
+					Status:    DeploymentProgressStepStatus_Success,
+					StartedAt: time.Unix(1595959200, 0).UTC(),
+					EndedAt:   time.Unix(1595959200, 0).UTC(),
+				}},
+			}},
+		},
+	}
+
+	testApp = App{
+		ID:                      "1c70f8f3-106e-428b-ae6d-bfc693c77536",
+		Spec:                    testAppSpec,
+		DefaultIngress:          "example.com",
+		LiveURL:                 "https://example.com",
+		LiveURLBase:             "https://example.com",
+		LiveDomain:              "example.com",
+		ActiveDeployment:        &testDeployment,
+		InProgressDeployment:    &testDeployment,
+		LastDeploymentCreatedAt: time.Unix(1595959200, 0).UTC(),
+		LastDeploymentActiveAt:  time.Unix(1595959200, 0).UTC(),
+		CreatedAt:               time.Unix(1595959200, 0).UTC(),
+		UpdatedAt:               time.Unix(1595959200, 0).UTC(),
+		Region:                  &testAppRegion,
+		TierSlug:                testAppTier.Slug,
+	}
+
+	testAppTier = AppTier{
+		Name:                 "Test",
+		Slug:                 "test",
+		EgressBandwidthBytes: "10240",
+		BuildSeconds:         "3000",
+	}
+
+	testInstanceSize = AppInstanceSize{
+		Name:            "Basic XXS",
+		Slug:            "basic-xxs",
+		CPUType:         AppInstanceSizeCPUType_Dedicated,
+		CPUs:            "1",
+		MemoryBytes:     "536870912",
+		USDPerMonth:     "5",
+		USDPerSecond:    "0.0000018896447",
+		TierSlug:        "basic",
+		TierUpgradeTo:   "professional-xs",
+		TierDowngradeTo: "basic-xxxs",
+	}
+
+	testAlerts = []*AppAlert{
+		{
+			ID: "c586fc0d-e8e2-4c50-9bf6-6c0a6b2ed2a7",
+			Spec: &AppAlertSpec{
+				Rule: AppAlertSpecRule_DeploymentFailed,
+			},
+			Emails: []string{"test@example.com", "test2@example.com"},
+			SlackWebhooks: []*AppAlertSlackWebhook{
+				{
+					URL:     "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
+					Channel: "channel name",
+				},
+			},
+		},
+	}
+
+	testAlert = AppAlert{
+		ID: "c586fc0d-e8e2-4c50-9bf6-6c0a6b2ed2a7",
+		Spec: &AppAlertSpec{
+			Rule: AppAlertSpecRule_DeploymentFailed,
+		},
+		Emails: []string{"test@example.com", "test2@example.com"},
+		SlackWebhooks: []*AppAlertSlackWebhook{
+			{
+				URL:     "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
+				Channel: "channel name",
+			},
+		},
+	}
+
+	testBuildpacks = []*Buildpack{
+		{
+			ID:           "digitalocean/node",
+			Name:         "Node.js",
+			Version:      "1.2.3",
+			MajorVersion: 1,
+		},
+		{
+			ID:           "digitalocean/php",
+			Name:         "PHP",
+			Version:      "0.3.5",
+			MajorVersion: 0,
+		},
+	}
+)
+
+func TestApps_CreateApp(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc("/v2/apps", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+		var req AppCreateRequest
+		err := json.NewDecoder(r.Body).Decode(&req)
+		require.NoError(t, err)
+		assert.Equal(t, testAppSpec, req.Spec)
+
+		json.NewEncoder(w).Encode(&appRoot{App: &testApp})
+	})
+
+	app, _, err := client.Apps.Create(ctx, &AppCreateRequest{Spec: testAppSpec})
+	require.NoError(t, err)
+	assert.Equal(t, &testApp, app)
+}
+
+func TestApps_GetApp(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&appRoot{App: &testApp})
+	})
+
+	app, _, err := client.Apps.Get(ctx, testApp.ID)
+	require.NoError(t, err)
+	assert.Equal(t, &testApp, app)
+}
+
+func TestApps_ListApp(t *testing.T) {
+	t.Run("WithProjects false/not passed in", func(t *testing.T) {
+		setup()
+		defer teardown()
+
+		ctx := context.Background()
+
+		mux.HandleFunc("/v2/apps", func(w http.ResponseWriter, r *http.Request) {
+			testMethod(t, r, http.MethodGet)
+
+			json.NewEncoder(w).Encode(&appsRoot{Apps: []*App{&testApp}, Meta: &Meta{Total: 1}, Links: &Links{}})
+		})
+
+		apps, resp, err := client.Apps.List(ctx, nil)
+		require.NoError(t, err)
+		assert.Equal(t, []*App{&testApp}, apps)
+		assert.Equal(t, 1, resp.Meta.Total)
+		currentPage, err := resp.Links.CurrentPage()
+		require.NoError(t, err)
+		assert.Equal(t, 1, currentPage)
+	})
+
+	t.Run("WithProjects true", func(t *testing.T) {
+		setup()
+		defer teardown()
+
+		ctx := context.Background()
+
+		mux.HandleFunc("/v2/apps", func(w http.ResponseWriter, r *http.Request) {
+			testMethod(t, r, http.MethodGet)
+
+			json.NewEncoder(w).Encode(&appsRoot{Apps: []*App{{ProjectID: "something"}}, Meta: &Meta{Total: 1}, Links: &Links{}})
+		})
+
+		apps, resp, err := client.Apps.List(ctx, &ListOptions{WithProjects: true})
+		require.NoError(t, err)
+		assert.Equal(t, "something", apps[0].ProjectID)
+		assert.Equal(t, 1, resp.Meta.Total)
+		currentPage, err := resp.Links.CurrentPage()
+		require.NoError(t, err)
+		assert.Equal(t, 1, currentPage)
+	})
+}
+
+func TestApps_UpdateApp(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	updatedSpec := *testAppSpec
+	updatedSpec.Name = "new-name"
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPut)
+		var req AppUpdateRequest
+		err := json.NewDecoder(r.Body).Decode(&req)
+		require.NoError(t, err)
+		assert.Equal(t, &updatedSpec, req.Spec)
+
+		json.NewEncoder(w).Encode(&appRoot{App: &testApp})
+	})
+
+	app, _, err := client.Apps.Update(ctx, testApp.ID, &AppUpdateRequest{Spec: &updatedSpec})
+	require.NoError(t, err)
+	assert.Equal(t, &testApp, app)
+}
+
+func TestApps_DeleteApp(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.Apps.Delete(ctx, testApp.ID)
+	require.NoError(t, err)
+}
+
+func TestApps_ProposeApp(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	spec := &AppSpec{
+		Name: "sample-golang",
+		Services: []*AppServiceSpec{{
+			Name:            "web",
+			EnvironmentSlug: "go",
+			RunCommand:      "bin/sample-golang",
+			GitHub: &GitHubSourceSpec{
+				Repo:   "digitalocean/sample-golang",
+				Branch: "branch",
+			},
+		}},
+	}
+
+	mux.HandleFunc("/v2/apps/propose", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+		var req AppProposeRequest
+		err := json.NewDecoder(r.Body).Decode(&req)
+		require.NoError(t, err)
+		assert.Equal(t, spec, req.Spec)
+		assert.Equal(t, testApp.ID, req.AppID)
+
+		json.NewEncoder(w).Encode(&AppProposeResponse{
+			Spec: &AppSpec{
+				Name: "sample-golang",
+				Services: []*AppServiceSpec{{
+					Name:            "web",
+					EnvironmentSlug: "go",
+					RunCommand:      "bin/sample-golang",
+					GitHub: &GitHubSourceSpec{
+						Repo:   "digitalocean/sample-golang",
+						Branch: "branch",
+					},
+					InstanceCount: 1,
+					Routes: []*AppRouteSpec{{
+						Path: "/",
+					}},
+				}},
+			},
+			AppNameAvailable: true,
+		})
+	})
+
+	res, _, err := client.Apps.Propose(ctx, &AppProposeRequest{
+		Spec:  spec,
+		AppID: testApp.ID,
+	})
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), res.Spec.Services[0].InstanceCount)
+	assert.Equal(t, "/", res.Spec.Services[0].Routes[0].Path)
+	assert.True(t, res.AppNameAvailable)
+}
+
+func TestApps_CreateDeployment(t *testing.T) {
+	for _, forceBuild := range []bool{true, false} {
+		t.Run(fmt.Sprintf("ForceBuild_%t", forceBuild), func(t *testing.T) {
+			setup()
+			defer teardown()
+
+			ctx := context.Background()
+
+			mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/deployments", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+				testMethod(t, r, http.MethodPost)
+
+				var req DeploymentCreateRequest
+				err := json.NewDecoder(r.Body).Decode(&req)
+				require.NoError(t, err)
+				assert.Equal(t, forceBuild, req.ForceBuild)
+
+				json.NewEncoder(w).Encode(&deploymentRoot{Deployment: &testDeployment})
+			})
+
+			deployment, _, err := client.Apps.CreateDeployment(ctx, testApp.ID, &DeploymentCreateRequest{
+				ForceBuild: forceBuild,
+			})
+			require.NoError(t, err)
+			assert.Equal(t, &testDeployment, deployment)
+		})
+	}
+}
+
+func TestApps_GetDeployment(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/deployments/%s", testApp.ID, testDeployment.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&deploymentRoot{Deployment: &testDeployment})
+	})
+
+	deployment, _, err := client.Apps.GetDeployment(ctx, testApp.ID, testDeployment.ID)
+	require.NoError(t, err)
+	assert.Equal(t, &testDeployment, deployment)
+}
+
+func TestApps_ListDeployments(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/deployments", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&deploymentsRoot{Deployments: []*Deployment{&testDeployment}, Meta: &Meta{Total: 1}, Links: &Links{}})
+	})
+
+	deployments, resp, err := client.Apps.ListDeployments(ctx, testApp.ID, nil)
+	require.NoError(t, err)
+	assert.Equal(t, []*Deployment{&testDeployment}, deployments)
+	assert.Equal(t, 1, resp.Meta.Total)
+	currentPage, err := resp.Links.CurrentPage()
+	require.NoError(t, err)
+	assert.Equal(t, 1, currentPage)
+}
+
+func TestApps_GetLogs(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/deployments/%s/logs", testApp.ID, testDeployment.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		assert.Equal(t, "RUN", r.URL.Query().Get("type"))
+		assert.Equal(t, "true", r.URL.Query().Get("follow"))
+		assert.Equal(t, "1", r.URL.Query().Get("tail_lines"))
+		_, hasComponent := r.URL.Query()["component_name"]
+		assert.False(t, hasComponent)
+
+		json.NewEncoder(w).Encode(&AppLogs{LiveURL: "https://live.logs.url"})
+	})
+
+	logs, _, err := client.Apps.GetLogs(ctx, testApp.ID, testDeployment.ID, "", AppLogTypeRun, true, 1)
+	require.NoError(t, err)
+	assert.NotEmpty(t, logs.LiveURL)
+}
+
+func TestApps_GetLogs_ActiveDeployment(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/logs", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		assert.Equal(t, "RUN", r.URL.Query().Get("type"))
+		assert.Equal(t, "true", r.URL.Query().Get("follow"))
+		assert.Equal(t, "1", r.URL.Query().Get("tail_lines"))
+		_, hasComponent := r.URL.Query()["component_name"]
+		assert.False(t, hasComponent)
+
+		json.NewEncoder(w).Encode(&AppLogs{LiveURL: "https://live.logs.url"})
+	})
+
+	logs, _, err := client.Apps.GetLogs(ctx, testApp.ID, "", "", AppLogTypeRun, true, 1)
+	require.NoError(t, err)
+	assert.NotEmpty(t, logs.LiveURL)
+}
+
+func TestApps_GetLogs_component(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/deployments/%s/logs", testApp.ID, testDeployment.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		assert.Equal(t, "RUN", r.URL.Query().Get("type"))
+		assert.Equal(t, "true", r.URL.Query().Get("follow"))
+		assert.Equal(t, "1", r.URL.Query().Get("tail_lines"))
+		assert.Equal(t, "service-name", r.URL.Query().Get("component_name"))
+
+		json.NewEncoder(w).Encode(&AppLogs{LiveURL: "https://live.logs.url"})
+	})
+
+	logs, _, err := client.Apps.GetLogs(ctx, testApp.ID, testDeployment.ID, "service-name", AppLogTypeRun, true, 1)
+	require.NoError(t, err)
+	assert.NotEmpty(t, logs.LiveURL)
+}
+
+func TestApps_ListRegions(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc("/v2/apps/regions", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&appRegionsRoot{Regions: []*AppRegion{&testAppRegion}})
+	})
+
+	regions, _, err := client.Apps.ListRegions(ctx)
+	require.NoError(t, err)
+	assert.Equal(t, []*AppRegion{&testAppRegion}, regions)
+}
+
+func TestApps_ListTiers(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc("/v2/apps/tiers", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&appTiersRoot{Tiers: []*AppTier{&testAppTier}})
+	})
+
+	tiers, _, err := client.Apps.ListTiers(ctx)
+	require.NoError(t, err)
+	assert.Equal(t, []*AppTier{&testAppTier}, tiers)
+}
+
+func TestApps_GetTier(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/tiers/%s", testAppTier.Slug), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&appTierRoot{Tier: &testAppTier})
+	})
+
+	tier, _, err := client.Apps.GetTier(ctx, testAppTier.Slug)
+	require.NoError(t, err)
+	assert.Equal(t, &testAppTier, tier)
+}
+
+func TestApps_ListInstanceSizes(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc("/v2/apps/tiers/instance_sizes", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&instanceSizesRoot{InstanceSizes: []*AppInstanceSize{&testInstanceSize}})
+	})
+
+	instanceSizes, _, err := client.Apps.ListInstanceSizes(ctx)
+	require.NoError(t, err)
+	assert.Equal(t, []*AppInstanceSize{&testInstanceSize}, instanceSizes)
+}
+
+func TestApps_GetInstanceSize(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/tiers/instance_sizes/%s", testInstanceSize.Slug), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&instanceSizeRoot{InstanceSize: &testInstanceSize})
+	})
+
+	instancesize, _, err := client.Apps.GetInstanceSize(ctx, testInstanceSize.Slug)
+	require.NoError(t, err)
+	assert.Equal(t, &testInstanceSize, instancesize)
+}
+
+func TestApps_ListAppAlerts(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/alerts", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&appAlertsRoot{Alerts: testAlerts})
+	})
+
+	appAlerts, _, err := client.Apps.ListAlerts(ctx, testApp.ID)
+	require.NoError(t, err)
+	assert.Equal(t, testAlerts, appAlerts)
+}
+
+func TestApps_UpdateAppAlertDestinations(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/alerts/%s/destinations", testApp.ID, testAlert.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+
+		json.NewEncoder(w).Encode(&appAlertRoot{Alert: &testAlert})
+	})
+
+	appAlert, _, err := client.Apps.UpdateAlertDestinations(ctx, testApp.ID, testAlert.ID, &AlertDestinationUpdateRequest{Emails: testAlert.Emails, SlackWebhooks: testAlert.SlackWebhooks})
+	require.NoError(t, err)
+	assert.Equal(t, &testAlert, appAlert)
+}
+
+func TestApps_Detect(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	gitSource := &GitSourceSpec{
+		RepoCloneURL: "https://github.com/digitalocean/sample-nodejs.git",
+		Branch:       "main",
+	}
+	component := &DetectResponseComponent{
+		Strategy: DetectResponseType_Buildpack,
+		EnvVars: []*AppVariableDefinition{{
+			Key:   "k",
+			Value: "v",
+		}},
+	}
+
+	mux.HandleFunc("/v2/apps/detect", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+		var req DetectRequest
+		err := json.NewDecoder(r.Body).Decode(&req)
+		require.NoError(t, err)
+		assert.Equal(t, gitSource, req.Git)
+		json.NewEncoder(w).Encode(&DetectResponse{
+			Components: []*DetectResponseComponent{component},
+		})
+	})
+
+	res, _, err := client.Apps.Detect(ctx, &DetectRequest{
+		Git: gitSource,
+	})
+	require.NoError(t, err)
+	assert.Equal(t, component, res.Components[0])
+}
+
+func TestApps_ListBuildpacks(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	mux.HandleFunc("/v2/apps/buildpacks", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+
+		json.NewEncoder(w).Encode(&buildpacksRoot{Buildpacks: testBuildpacks})
+	})
+
+	bps, _, err := client.Apps.ListBuildpacks(ctx)
+	require.NoError(t, err)
+	assert.Equal(t, testBuildpacks, bps)
+}
+
+func TestApps_UpgradeBuildpack(t *testing.T) {
+	setup()
+	defer teardown()
+
+	ctx := context.Background()
+
+	response := &UpgradeBuildpackResponse{
+		AffectedComponents: []string{"api", "frontend"},
+		Deployment:         &testDeployment,
+	}
+	opts := UpgradeBuildpackOptions{
+		BuildpackID:       "digitalocean/node",
+		MajorVersion:      3,
+		TriggerDeployment: true,
+	}
+
+	mux.HandleFunc(fmt.Sprintf("/v2/apps/%s/upgrade_buildpack", testApp.ID), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+
+		var gotOpts UpgradeBuildpackOptions
+		err := json.NewDecoder(r.Body).Decode(&gotOpts)
+		require.NoError(t, err)
+		assert.Equal(t, opts, gotOpts)
+
+		json.NewEncoder(w).Encode(response)
+	})
+
+	gotResponse, _, err := client.Apps.UpgradeBuildpack(ctx, testApp.ID, opts)
+	require.NoError(t, err)
+	assert.Equal(t, response, gotResponse)
+}
+
+func TestApps_ToURN(t *testing.T) {
+	app := &App{
+		ID: "deadbeef-dead-4aa5-beef-deadbeef347d",
+	}
+	want := "do:app:deadbeef-dead-4aa5-beef-deadbeef347d"
+	got := app.URN()
+
+	require.Equal(t, want, got)
+}
+
+func TestApps_Interfaces(t *testing.T) {
+	t.Run("AppComponentSpec", func(t *testing.T) {
+		for _, impl := range []interface{}{
+			&AppServiceSpec{},
+			&AppWorkerSpec{},
+			&AppJobSpec{},
+			&AppStaticSiteSpec{},
+			&AppDatabaseSpec{},
+			&AppFunctionsSpec{},
+		} {
+			if _, ok := impl.(AppComponentSpec); !ok {
+				t.Fatalf("%T should match interface", impl)
+			}
+		}
+	})
+
+	t.Run("AppBuildableComponentSpec", func(t *testing.T) {
+		for impl, wantMatch := range map[any]bool{
+			&AppServiceSpec{}:    true,
+			&AppWorkerSpec{}:     true,
+			&AppJobSpec{}:        true,
+			&AppStaticSiteSpec{}: true,
+			&AppFunctionsSpec{}:  true,
+
+			&AppDatabaseSpec{}: false,
+		} {
+			_, ok := impl.(AppBuildableComponentSpec)
+			if wantMatch && !ok {
+				t.Fatalf("%T should match interface", impl)
+			} else if !wantMatch && ok {
+				t.Fatalf("%T should NOT match interface", impl)
+			}
+		}
+	})
+
+	t.Run("AppDockerBuildableComponentSpec", func(t *testing.T) {
+		for impl, wantMatch := range map[any]bool{
+			&AppServiceSpec{}:    true,
+			&AppWorkerSpec{}:     true,
+			&AppJobSpec{}:        true,
+			&AppStaticSiteSpec{}: true,
+
+			&AppFunctionsSpec{}: false,
+			&AppDatabaseSpec{}:  false,
+		} {
+			_, ok := impl.(AppDockerBuildableComponentSpec)
+			if wantMatch && !ok {
+				t.Fatalf("%T should match interface", impl)
+			} else if !wantMatch && ok {
+				t.Fatalf("%T should NOT match interface", impl)
+			}
+		}
+	})
+
+	t.Run("AppCNBBuildableComponentSpec", func(t *testing.T) {
+		for impl, wantMatch := range map[any]bool{
+			&AppServiceSpec{}:    true,
+			&AppWorkerSpec{}:     true,
+			&AppJobSpec{}:        true,
+			&AppStaticSiteSpec{}: true,
+
+			&AppFunctionsSpec{}: false,
+			&AppDatabaseSpec{}:  false,
+		} {
+			_, ok := impl.(AppCNBBuildableComponentSpec)
+			if wantMatch && !ok {
+				t.Fatalf("%T should match interface", impl)
+			} else if !wantMatch && ok {
+				t.Fatalf("%T should NOT match interface", impl)
+			}
+		}
+	})
+
+	t.Run("AppContainerComponentSpec", func(t *testing.T) {
+		for impl, wantMatch := range map[any]bool{
+			&AppServiceSpec{}: true,
+			&AppWorkerSpec{}:  true,
+			&AppJobSpec{}:     true,
+
+			&AppStaticSiteSpec{}: false,
+			&AppFunctionsSpec{}:  false,
+			&AppDatabaseSpec{}:   false,
+		} {
+			_, ok := impl.(AppContainerComponentSpec)
+			if wantMatch && !ok {
+				t.Fatalf("%T should match interface", impl)
+			} else if !wantMatch && ok {
+				t.Fatalf("%T should NOT match interface", impl)
+			}
+		}
+	})
+
+	t.Run("AppRoutableComponentSpec", func(t *testing.T) {
+		for impl, wantMatch := range map[any]bool{
+			&AppServiceSpec{}:    true,
+			&AppStaticSiteSpec{}: true,
+			&AppFunctionsSpec{}:  true,
+
+			&AppWorkerSpec{}:   false,
+			&AppJobSpec{}:      false,
+			&AppDatabaseSpec{}: false,
+		} {
+			_, ok := impl.(AppRoutableComponentSpec)
+			if wantMatch && !ok {
+				t.Fatalf("%T should match interface", impl)
+			} else if !wantMatch && ok {
+				t.Fatalf("%T should NOT match interface", impl)
+			}
+		}
+	})
+
+	t.Run("SourceSpec", func(t *testing.T) {
+		for _, impl := range []interface{}{
+			&GitSourceSpec{},
+			&GitHubSourceSpec{},
+			&GitLabSourceSpec{},
+			&ImageSourceSpec{},
+		} {
+			if _, ok := impl.(SourceSpec); !ok {
+				t.Fatalf("%T should match interface", impl)
+			}
+		}
+	})
+
+	t.Run("VCSSourceSpec", func(t *testing.T) {
+		for _, impl := range []interface{}{
+			&GitSourceSpec{},
+			&GitHubSourceSpec{},
+			&GitLabSourceSpec{},
+		} {
+			if _, ok := impl.(VCSSourceSpec); !ok {
+				t.Fatalf("%T should match interface", impl)
+			}
+		}
+		for impl, wantMatch := range map[any]bool{
+			&GitSourceSpec{}:    true,
+			&GitHubSourceSpec{}: true,
+			&GitLabSourceSpec{}: true,
+
+			&ImageSourceSpec{}: false,
+		} {
+			_, ok := impl.(VCSSourceSpec)
+			if wantMatch && !ok {
+				t.Fatalf("%T should match interface", impl)
+			} else if !wantMatch && ok {
+				t.Fatalf("%T should NOT match interface", impl)
+			}
+		}
+	})
+}
+
+func TestForEachAppSpecComponent(t *testing.T) {
+	spec := &AppSpec{
+		Services: []*AppServiceSpec{
+			{Name: "service-1"},
+			{Name: "service-2"},
+		},
+		Workers: []*AppWorkerSpec{
+			{Name: "worker-1"},
+			{Name: "worker-2"},
+		},
+		Databases: []*AppDatabaseSpec{
+			{Name: "database-1"},
+			{Name: "database-2"},
+		},
+		StaticSites: []*AppStaticSiteSpec{
+			{Name: "site-1"},
+			{Name: "site-2"},
+		},
+	}
+
+	t.Run("interface", func(t *testing.T) {
+		var components []string
+		_ = ForEachAppSpecComponent(spec, func(component AppBuildableComponentSpec) error {
+			components = append(components, component.GetName())
+			return nil
+		})
+		require.ElementsMatch(t, components, []string{
+			"service-1",
+			"service-2",
+			"worker-1",
+			"worker-2",
+			"site-1",
+			"site-2",
+		})
+	})
+
+	t.Run("struct type", func(t *testing.T) {
+		var components []string
+		_ = ForEachAppSpecComponent(spec, func(component *AppStaticSiteSpec) error {
+			components = append(components, component.GetName())
+			return nil
+		})
+		require.ElementsMatch(t, components, []string{
+			"site-1",
+			"site-2",
+		})
+	})
+}
+
+func TestGetAppSpecComponent(t *testing.T) {
+	spec := &AppSpec{
+		Services: []*AppServiceSpec{
+			{Name: "service-1"},
+			{Name: "service-2"},
+		},
+		Workers: []*AppWorkerSpec{
+			{Name: "worker-1"},
+			{Name: "worker-2"},
+		},
+		Databases: []*AppDatabaseSpec{
+			{Name: "database-1"},
+			{Name: "database-2"},
+		},
+		StaticSites: []*AppStaticSiteSpec{
+			{Name: "site-1"},
+			{Name: "site-2"},
+		},
+	}
+
+	t.Run("interface", func(t *testing.T) {
+		site, err := GetAppSpecComponent[AppBuildableComponentSpec](spec, "site-1")
+		require.NoError(t, err)
+		require.Equal(t, &AppStaticSiteSpec{Name: "site-1"}, site)
+
+		svc, err := GetAppSpecComponent[AppBuildableComponentSpec](spec, "service-2")
+		require.NoError(t, err)
+		require.Equal(t, &AppServiceSpec{Name: "service-2"}, svc)
+
+		db, err := GetAppSpecComponent[AppBuildableComponentSpec](spec, "db-123123")
+		require.EqualError(t, err, "component db-123123 not found")
+		require.Nil(t, db)
+	})
+
+	t.Run("struct type", func(t *testing.T) {
+		db, err := GetAppSpecComponent[*AppDatabaseSpec](spec, "database-1")
+		require.NoError(t, err)
+		require.Equal(t, &AppDatabaseSpec{Name: "database-1"}, db)
+
+		svc, err := GetAppSpecComponent[*AppServiceSpec](spec, "service-2")
+		require.NoError(t, err)
+		require.Equal(t, &AppServiceSpec{Name: "service-2"}, svc)
+
+		db, err = GetAppSpecComponent[*AppDatabaseSpec](spec, "404")
+		require.EqualError(t, err, "component 404 not found")
+		require.Nil(t, db)
+	})
+}
diff --git a/balance.go b/balance.go
index 4da6978..bfd0b04 100644
--- a/balance.go
+++ b/balance.go
@@ -8,7 +8,7 @@ import (
 
 // BalanceService is an interface for interfacing with the Balance
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2/#balance
+// See: https://docs.digitalocean.com/reference/api/api-reference/#operation/balance_get
 type BalanceService interface {
 	Get(context.Context) (*Balance, *Response, error)
 }
diff --git a/billing_history.go b/billing_history.go
index a510100..ae87c10 100644
--- a/billing_history.go
+++ b/billing_history.go
@@ -10,7 +10,7 @@ const billingHistoryBasePath = "v2/customers/my/billing_history"
 
 // BillingHistoryService is an interface for interfacing with the BillingHistory
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2/#billing_history
+// See: https://docs.digitalocean.com/reference/api/api-reference/#operation/billingHistory_list
 type BillingHistoryService interface {
 	List(context.Context, *ListOptions) (*BillingHistory, *Response, error)
 }
diff --git a/billing_history_test.go b/billing_history_test.go
index c1026cd..40aedda 100644
--- a/billing_history_test.go
+++ b/billing_history_test.go
@@ -46,8 +46,8 @@ func TestBillingHistory_List(t *testing.T) {
 		{
 			Description: "Invoice for May 2018",
 			Amount:      "12.34",
-			InvoiceID:   String("123"),
-			InvoiceUUID: String("example-uuid"),
+			InvoiceID:   PtrTo("123"),
+			InvoiceUUID: PtrTo("example-uuid"),
 			Date:        time.Date(2018, 6, 1, 8, 44, 38, 0, time.UTC),
 			Type:        "Invoice",
 		},
diff --git a/certificates.go b/certificates.go
index 9a6bdb2..faf26a3 100644
--- a/certificates.go
+++ b/certificates.go
@@ -9,7 +9,7 @@ import (
 const certificatesBasePath = "/v2/certificates"
 
 // CertificatesService is an interface for managing certificates with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2/#certificates
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Certificates
 type CertificatesService interface {
 	Get(context.Context, string) (*Certificate, *Response, error)
 	List(context.Context, *ListOptions) ([]Certificate, *Response, error)
diff --git a/certificates_test.go b/certificates_test.go
index d52188a..c94b127 100644
--- a/certificates_test.go
+++ b/certificates_test.go
@@ -8,6 +8,7 @@ import (
 	"testing"
 
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 var certJSONResponse = `
@@ -75,9 +76,7 @@ func TestCertificates_Get(t *testing.T) {
 	})
 
 	certificate, _, err := client.Certificates.Get(ctx, cID)
-	if err != nil {
-		t.Errorf("Certificates.Get returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := &Certificate{
 		ID:              "892071a0-bb95-49bc-8021-3afd67a210bf",
@@ -105,9 +104,7 @@ func TestCertificates_List(t *testing.T) {
 
 	certificates, resp, err := client.Certificates.List(ctx, nil)
 
-	if err != nil {
-		t.Errorf("Certificates.List returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expectedCertificates := []Certificate{
 		{
@@ -230,10 +227,8 @@ func TestCertificates_Create(t *testing.T) {
 			})
 
 			certificate, _, err := client.Certificates.Create(ctx, test.createRequest)
-			if err != nil {
-				t.Errorf("Certificates.Create returned error: %v", err)
-			}
 
+			require.NoError(t, err)
 			assert.Equal(t, test.expectedCertificate, certificate)
 		})
 	}
@@ -252,7 +247,5 @@ func TestCertificates_Delete(t *testing.T) {
 
 	_, err := client.Certificates.Delete(ctx, cID)
 
-	if err != nil {
-		t.Errorf("Certificates.Delete returned error: %v", err)
-	}
+	assert.NoError(t, err)
 }
diff --git a/databases.go b/databases.go
index 42d83ee..a024073 100644
--- a/databases.go
+++ b/databases.go
@@ -9,24 +9,29 @@ import (
 )
 
 const (
-	databaseBasePath           = "/v2/databases"
-	databaseSinglePath         = databaseBasePath + "/%s"
-	databaseResizePath         = databaseBasePath + "/%s/resize"
-	databaseMigratePath        = databaseBasePath + "/%s/migrate"
-	databaseMaintenancePath    = databaseBasePath + "/%s/maintenance"
-	databaseBackupsPath        = databaseBasePath + "/%s/backups"
-	databaseUsersPath          = databaseBasePath + "/%s/users"
-	databaseUserPath           = databaseBasePath + "/%s/users/%s"
-	databaseResetUserAuthPath  = databaseUserPath + "/reset_auth"
-	databaseDBPath             = databaseBasePath + "/%s/dbs/%s"
-	databaseDBsPath            = databaseBasePath + "/%s/dbs"
-	databasePoolPath           = databaseBasePath + "/%s/pools/%s"
-	databasePoolsPath          = databaseBasePath + "/%s/pools"
-	databaseReplicaPath        = databaseBasePath + "/%s/replicas/%s"
-	databaseReplicasPath       = databaseBasePath + "/%s/replicas"
-	databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy"
-	databaseSQLModePath        = databaseBasePath + "/%s/sql_mode"
-	databaseFirewallRulesPath  = databaseBasePath + "/%s/firewall"
+	databaseBasePath                    = "/v2/databases"
+	databaseSinglePath                  = databaseBasePath + "/%s"
+	databaseCAPath                      = databaseBasePath + "/%s/ca"
+	databaseConfigPath                  = databaseBasePath + "/%s/config"
+	databaseResizePath                  = databaseBasePath + "/%s/resize"
+	databaseMigratePath                 = databaseBasePath + "/%s/migrate"
+	databaseMaintenancePath             = databaseBasePath + "/%s/maintenance"
+	databaseBackupsPath                 = databaseBasePath + "/%s/backups"
+	databaseUsersPath                   = databaseBasePath + "/%s/users"
+	databaseUserPath                    = databaseBasePath + "/%s/users/%s"
+	databaseResetUserAuthPath           = databaseUserPath + "/reset_auth"
+	databaseDBPath                      = databaseBasePath + "/%s/dbs/%s"
+	databaseDBsPath                     = databaseBasePath + "/%s/dbs"
+	databasePoolPath                    = databaseBasePath + "/%s/pools/%s"
+	databasePoolsPath                   = databaseBasePath + "/%s/pools"
+	databaseReplicaPath                 = databaseBasePath + "/%s/replicas/%s"
+	databaseReplicasPath                = databaseBasePath + "/%s/replicas"
+	databaseEvictionPolicyPath          = databaseBasePath + "/%s/eviction_policy"
+	databaseSQLModePath                 = databaseBasePath + "/%s/sql_mode"
+	databaseFirewallRulesPath           = databaseBasePath + "/%s/firewall"
+	databaseOptionsPath                 = databaseBasePath + "/options"
+	databaseUpgradeMajorVersionPath     = databaseBasePath + "/%s/upgrade"
+	databasePromoteReplicaToPrimaryPath = databaseReplicaPath + "/promote"
 )
 
 // SQL Mode constants allow for MySQL-specific SQL flavor configuration.
@@ -79,18 +84,29 @@ const (
 	EvictionPolicyVolatileTTL    = "volatile_ttl"
 )
 
+// evictionPolicyMap is used to normalize the eviction policy string in requests
+// to the advanced Redis configuration endpoint from the consts used with SetEvictionPolicy.
+var evictionPolicyMap = map[string]string{
+	EvictionPolicyAllKeysLRU:     "allkeys-lru",
+	EvictionPolicyAllKeysRandom:  "allkeys-random",
+	EvictionPolicyVolatileLRU:    "volatile-lru",
+	EvictionPolicyVolatileRandom: "volatile-random",
+	EvictionPolicyVolatileTTL:    "volatile-ttl",
+}
+
 // The DatabasesService provides access to the DigitalOcean managed database
 // suite of products through the public API. Customers can create new database
 // clusters, migrate them  between regions, create replicas and interact with
-// their configurations. Each database service is refered to as a Database. A
+// their configurations. Each database service is referred to as a Database. A
 // SQL database service can have multiple databases residing in the system. To
 // help make these entities distinct from Databases in godo, we refer to them
 // here as DatabaseDBs.
 //
-// See: https://developers.digitalocean.com/documentation/v2#databases
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Databases
 type DatabasesService interface {
 	List(context.Context, *ListOptions) ([]Database, *Response, error)
 	Get(context.Context, string) (*Database, *Response, error)
+	GetCA(context.Context, string) (*DatabaseCA, *Response, error)
 	Create(context.Context, *DatabaseCreateRequest) (*Database, *Response, error)
 	Delete(context.Context, string) (*Response, error)
 	Resize(context.Context, string, *DatabaseResizeRequest) (*Response, error)
@@ -110,16 +126,26 @@ type DatabasesService interface {
 	CreatePool(context.Context, string, *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error)
 	GetPool(context.Context, string, string) (*DatabasePool, *Response, error)
 	DeletePool(context.Context, string, string) (*Response, error)
+	UpdatePool(context.Context, string, string, *DatabaseUpdatePoolRequest) (*Response, error)
 	GetReplica(context.Context, string, string) (*DatabaseReplica, *Response, error)
 	ListReplicas(context.Context, string, *ListOptions) ([]DatabaseReplica, *Response, error)
 	CreateReplica(context.Context, string, *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error)
 	DeleteReplica(context.Context, string, string) (*Response, error)
+	PromoteReplicaToPrimary(context.Context, string, string) (*Response, error)
 	GetEvictionPolicy(context.Context, string) (string, *Response, error)
 	SetEvictionPolicy(context.Context, string, string) (*Response, error)
 	GetSQLMode(context.Context, string) (string, *Response, error)
 	SetSQLMode(context.Context, string, ...string) (*Response, error)
 	GetFirewallRules(context.Context, string) ([]DatabaseFirewallRule, *Response, error)
 	UpdateFirewallRules(context.Context, string, *DatabaseUpdateFirewallRulesRequest) (*Response, error)
+	GetPostgreSQLConfig(context.Context, string) (*PostgreSQLConfig, *Response, error)
+	GetRedisConfig(context.Context, string) (*RedisConfig, *Response, error)
+	GetMySQLConfig(context.Context, string) (*MySQLConfig, *Response, error)
+	UpdatePostgreSQLConfig(context.Context, string, *PostgreSQLConfig) (*Response, error)
+	UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error)
+	UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error)
+	ListOptions(todo context.Context) (*DatabaseOptions, *Response, error)
+	UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error)
 }
 
 // DatabasesServiceOp handles communication with the Databases related methods
@@ -152,6 +178,12 @@ type Database struct {
 	CreatedAt          time.Time                  `json:"created_at,omitempty"`
 	PrivateNetworkUUID string                     `json:"private_network_uuid,omitempty"`
 	Tags               []string                   `json:"tags,omitempty"`
+	ProjectID          string                     `json:"project_id,omitempty"`
+}
+
+// DatabaseCA represents a database ca.
+type DatabaseCA struct {
+	Certificate []byte `json:"certificate"`
 }
 
 // DatabaseConnection represents a database connection
@@ -193,16 +225,24 @@ type DatabaseBackup struct {
 	SizeGigabytes float64   `json:"size_gigabytes,omitempty"`
 }
 
+// DatabaseBackupRestore contains information needed to restore a backup.
+type DatabaseBackupRestore struct {
+	DatabaseName    string `json:"database_name,omitempty"`
+	BackupCreatedAt string `json:"backup_created_at,omitempty"`
+}
+
 // DatabaseCreateRequest represents a request to create a database cluster
 type DatabaseCreateRequest struct {
-	Name               string   `json:"name,omitempty"`
-	EngineSlug         string   `json:"engine,omitempty"`
-	Version            string   `json:"version,omitempty"`
-	SizeSlug           string   `json:"size,omitempty"`
-	Region             string   `json:"region,omitempty"`
-	NumNodes           int      `json:"num_nodes,omitempty"`
-	PrivateNetworkUUID string   `json:"private_network_uuid"`
-	Tags               []string `json:"tags,omitempty"`
+	Name               string                 `json:"name,omitempty"`
+	EngineSlug         string                 `json:"engine,omitempty"`
+	Version            string                 `json:"version,omitempty"`
+	SizeSlug           string                 `json:"size,omitempty"`
+	Region             string                 `json:"region,omitempty"`
+	NumNodes           int                    `json:"num_nodes,omitempty"`
+	PrivateNetworkUUID string                 `json:"private_network_uuid"`
+	Tags               []string               `json:"tags,omitempty"`
+	BackupRestore      *DatabaseBackupRestore `json:"backup_restore,omitempty"`
+	ProjectID          string                 `json:"project_id"`
 }
 
 // DatabaseResizeRequest can be used to initiate a database resize operation.
@@ -225,7 +265,7 @@ type DatabaseUpdateMaintenanceRequest struct {
 
 // DatabaseDB represents an engine-specific database created within a database cluster. For SQL
 // databases like PostgreSQL or MySQL, a "DB" refers to a database created on the RDBMS. For instance,
-// a PostgreSQL database server can contain many database schemas, each with it's own settings, access
+// a PostgreSQL database server can contain many database schemas, each with its own settings, access
 // permissions and data. ListDBs will return all databases present on the server.
 type DatabaseDB struct {
 	Name string `json:"name"`
@@ -233,6 +273,7 @@ type DatabaseDB struct {
 
 // DatabaseReplica represents a read-only replica of a particular database
 type DatabaseReplica struct {
+	ID                 string              `json:"id"`
 	Name               string              `json:"name"`
 	Connection         *DatabaseConnection `json:"connection"`
 	PrivateConnection  *DatabaseConnection `json:"private_connection,omitempty"`
@@ -263,13 +304,21 @@ type DatabaseCreatePoolRequest struct {
 	Mode     string `json:"mode"`
 }
 
+// DatabaseUpdatePoolRequest is used to update a database connection pool
+type DatabaseUpdatePoolRequest struct {
+	User     string `json:"user,omitempty"`
+	Size     int    `json:"size"`
+	Database string `json:"db"`
+	Mode     string `json:"mode"`
+}
+
 // DatabaseCreateUserRequest is used to create a new database user
 type DatabaseCreateUserRequest struct {
 	Name          string                     `json:"name"`
 	MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"`
 }
 
-// DatabaseResetUserAuth request is used to reset a users DB auth
+// DatabaseResetUserAuthRequest is used to reset a users DB auth
 type DatabaseResetUserAuthRequest struct {
 	MySQLSettings *DatabaseMySQLUserSettings `json:"mysql_settings,omitempty"`
 }
@@ -302,6 +351,125 @@ type DatabaseFirewallRule struct {
 	CreatedAt   time.Time `json:"created_at"`
 }
 
+// PostgreSQLConfig holds advanced configurations for PostgreSQL database clusters.
+type PostgreSQLConfig struct {
+	AutovacuumFreezeMaxAge          *int                         `json:"autovacuum_freeze_max_age,omitempty"`
+	AutovacuumMaxWorkers            *int                         `json:"autovacuum_max_workers,omitempty"`
+	AutovacuumNaptime               *int                         `json:"autovacuum_naptime,omitempty"`
+	AutovacuumVacuumThreshold       *int                         `json:"autovacuum_vacuum_threshold,omitempty"`
+	AutovacuumAnalyzeThreshold      *int                         `json:"autovacuum_analyze_threshold,omitempty"`
+	AutovacuumVacuumScaleFactor     *float32                     `json:"autovacuum_vacuum_scale_factor,omitempty"`
+	AutovacuumAnalyzeScaleFactor    *float32                     `json:"autovacuum_analyze_scale_factor,omitempty"`
+	AutovacuumVacuumCostDelay       *int                         `json:"autovacuum_vacuum_cost_delay,omitempty"`
+	AutovacuumVacuumCostLimit       *int                         `json:"autovacuum_vacuum_cost_limit,omitempty"`
+	BGWriterDelay                   *int                         `json:"bgwriter_delay,omitempty"`
+	BGWriterFlushAfter              *int                         `json:"bgwriter_flush_after,omitempty"`
+	BGWriterLRUMaxpages             *int                         `json:"bgwriter_lru_maxpages,omitempty"`
+	BGWriterLRUMultiplier           *float32                     `json:"bgwriter_lru_multiplier,omitempty"`
+	DeadlockTimeoutMillis           *int                         `json:"deadlock_timeout,omitempty"`
+	DefaultToastCompression         *string                      `json:"default_toast_compression,omitempty"`
+	IdleInTransactionSessionTimeout *int                         `json:"idle_in_transaction_session_timeout,omitempty"`
+	JIT                             *bool                        `json:"jit,omitempty"`
+	LogAutovacuumMinDuration        *int                         `json:"log_autovacuum_min_duration,omitempty"`
+	LogErrorVerbosity               *string                      `json:"log_error_verbosity,omitempty"`
+	LogLinePrefix                   *string                      `json:"log_line_prefix,omitempty"`
+	LogMinDurationStatement         *int                         `json:"log_min_duration_statement,omitempty"`
+	MaxFilesPerProcess              *int                         `json:"max_files_per_process,omitempty"`
+	MaxPreparedTransactions         *int                         `json:"max_prepared_transactions,omitempty"`
+	MaxPredLocksPerTransaction      *int                         `json:"max_pred_locks_per_transaction,omitempty"`
+	MaxLocksPerTransaction          *int                         `json:"max_locks_per_transaction,omitempty"`
+	MaxStackDepth                   *int                         `json:"max_stack_depth,omitempty"`
+	MaxStandbyArchiveDelay          *int                         `json:"max_standby_archive_delay,omitempty"`
+	MaxStandbyStreamingDelay        *int                         `json:"max_standby_streaming_delay,omitempty"`
+	MaxReplicationSlots             *int                         `json:"max_replication_slots,omitempty"`
+	MaxLogicalReplicationWorkers    *int                         `json:"max_logical_replication_workers,omitempty"`
+	MaxParallelWorkers              *int                         `json:"max_parallel_workers,omitempty"`
+	MaxParallelWorkersPerGather     *int                         `json:"max_parallel_workers_per_gather,omitempty"`
+	MaxWorkerProcesses              *int                         `json:"max_worker_processes,omitempty"`
+	PGPartmanBGWRole                *string                      `json:"pg_partman_bgw.role,omitempty"`
+	PGPartmanBGWInterval            *int                         `json:"pg_partman_bgw.interval,omitempty"`
+	PGStatStatementsTrack           *string                      `json:"pg_stat_statements.track,omitempty"`
+	TempFileLimit                   *int                         `json:"temp_file_limit,omitempty"`
+	Timezone                        *string                      `json:"timezone,omitempty"`
+	TrackActivityQuerySize          *int                         `json:"track_activity_query_size,omitempty"`
+	TrackCommitTimestamp            *string                      `json:"track_commit_timestamp,omitempty"`
+	TrackFunctions                  *string                      `json:"track_functions,omitempty"`
+	TrackIOTiming                   *string                      `json:"track_io_timing,omitempty"`
+	MaxWalSenders                   *int                         `json:"max_wal_senders,omitempty"`
+	WalSenderTimeout                *int                         `json:"wal_sender_timeout,omitempty"`
+	WalWriterDelay                  *int                         `json:"wal_writer_delay,omitempty"`
+	SharedBuffersPercentage         *float32                     `json:"shared_buffers_percentage,omitempty"`
+	PgBouncer                       *PostgreSQLBouncerConfig     `json:"pgbouncer,omitempty"`
+	BackupHour                      *int                         `json:"backup_hour,omitempty"`
+	BackupMinute                    *int                         `json:"backup_minute,omitempty"`
+	WorkMem                         *int                         `json:"work_mem,omitempty"`
+	TimeScaleDB                     *PostgreSQLTimeScaleDBConfig `json:"timescaledb,omitempty"`
+}
+
+// PostgreSQLBouncerConfig configuration
+type PostgreSQLBouncerConfig struct {
+	ServerResetQueryAlways  *bool     `json:"server_reset_query_always,omitempty"`
+	IgnoreStartupParameters *[]string `json:"ignore_startup_parameters,omitempty"`
+	MinPoolSize             *int      `json:"min_pool_size,omitempty"`
+	ServerLifetime          *int      `json:"server_lifetime,omitempty"`
+	ServerIdleTimeout       *int      `json:"server_idle_timeout,omitempty"`
+	AutodbPoolSize          *int      `json:"autodb_pool_size,omitempty"`
+	AutodbPoolMode          *string   `json:"autodb_pool_mode,omitempty"`
+	AutodbMaxDbConnections  *int      `json:"autodb_max_db_connections,omitempty"`
+	AutodbIdleTimeout       *int      `json:"autodb_idle_timeout,omitempty"`
+}
+
+// PostgreSQLTimeScaleDBConfig configuration
+type PostgreSQLTimeScaleDBConfig struct {
+	MaxBackgroundWorkers *int `json:"max_background_workers,omitempty"`
+}
+
+// RedisConfig holds advanced configurations for Redis database clusters.
+type RedisConfig struct {
+	RedisMaxmemoryPolicy               *string `json:"redis_maxmemory_policy,omitempty"`
+	RedisPubsubClientOutputBufferLimit *int    `json:"redis_pubsub_client_output_buffer_limit,omitempty"`
+	RedisNumberOfDatabases             *int    `json:"redis_number_of_databases,omitempty"`
+	RedisIOThreads                     *int    `json:"redis_io_threads,omitempty"`
+	RedisLFULogFactor                  *int    `json:"redis_lfu_log_factor,omitempty"`
+	RedisLFUDecayTime                  *int    `json:"redis_lfu_decay_time,omitempty"`
+	RedisSSL                           *bool   `json:"redis_ssl,omitempty"`
+	RedisTimeout                       *int    `json:"redis_timeout,omitempty"`
+	RedisNotifyKeyspaceEvents          *string `json:"redis_notify_keyspace_events,omitempty"`
+	RedisPersistence                   *string `json:"redis_persistence,omitempty"`
+	RedisACLChannelsDefault            *string `json:"redis_acl_channels_default,omitempty"`
+}
+
+// MySQLConfig holds advanced configurations for MySQL database clusters.
+type MySQLConfig struct {
+	ConnectTimeout               *int     `json:"connect_timeout,omitempty"`
+	DefaultTimeZone              *string  `json:"default_time_zone,omitempty"`
+	InnodbLogBufferSize          *int     `json:"innodb_log_buffer_size,omitempty"`
+	InnodbOnlineAlterLogMaxSize  *int     `json:"innodb_online_alter_log_max_size,omitempty"`
+	InnodbLockWaitTimeout        *int     `json:"innodb_lock_wait_timeout,omitempty"`
+	InteractiveTimeout           *int     `json:"interactive_timeout,omitempty"`
+	MaxAllowedPacket             *int     `json:"max_allowed_packet,omitempty"`
+	NetReadTimeout               *int     `json:"net_read_timeout,omitempty"`
+	SortBufferSize               *int     `json:"sort_buffer_size,omitempty"`
+	SQLMode                      *string  `json:"sql_mode,omitempty"`
+	SQLRequirePrimaryKey         *bool    `json:"sql_require_primary_key,omitempty"`
+	WaitTimeout                  *int     `json:"wait_timeout,omitempty"`
+	NetWriteTimeout              *int     `json:"net_write_timeout,omitempty"`
+	GroupConcatMaxLen            *int     `json:"group_concat_max_len,omitempty"`
+	InformationSchemaStatsExpiry *int     `json:"information_schema_stats_expiry,omitempty"`
+	InnodbFtMinTokenSize         *int     `json:"innodb_ft_min_token_size,omitempty"`
+	InnodbFtServerStopwordTable  *string  `json:"innodb_ft_server_stopword_table,omitempty"`
+	InnodbPrintAllDeadlocks      *bool    `json:"innodb_print_all_deadlocks,omitempty"`
+	InnodbRollbackOnTimeout      *bool    `json:"innodb_rollback_on_timeout,omitempty"`
+	InternalTmpMemStorageEngine  *string  `json:"internal_tmp_mem_storage_engine,omitempty"`
+	MaxHeapTableSize             *int     `json:"max_heap_table_size,omitempty"`
+	TmpTableSize                 *int     `json:"tmp_table_size,omitempty"`
+	SlowQueryLog                 *bool    `json:"slow_query_log,omitempty"`
+	LongQueryTime                *float32 `json:"long_query_time,omitempty"`
+	BackupHour                   *int     `json:"backup_hour,omitempty"`
+	BackupMinute                 *int     `json:"backup_minute,omitempty"`
+	BinlogRetentionPeriod        *int     `json:"binlog_retention_period,omitempty"`
+}
+
 type databaseUserRoot struct {
 	User *DatabaseUser `json:"user"`
 }
@@ -326,6 +494,22 @@ type databaseRoot struct {
 	Database *Database `json:"database"`
 }
 
+type databaseCARoot struct {
+	CA *DatabaseCA `json:"ca"`
+}
+
+type databasePostgreSQLConfigRoot struct {
+	Config *PostgreSQLConfig `json:"config"`
+}
+
+type databaseRedisConfigRoot struct {
+	Config *RedisConfig `json:"config"`
+}
+
+type databaseMySQLConfigRoot struct {
+	Config *MySQLConfig `json:"config"`
+}
+
 type databaseBackupsRoot struct {
 	Backups []DatabaseBackup `json:"backups"`
 }
@@ -350,6 +534,10 @@ type evictionPolicyRoot struct {
 	EvictionPolicy string `json:"eviction_policy"`
 }
 
+type UpgradeVersionRequest struct {
+	Version string `json:"version"`
+}
+
 type sqlModeRoot struct {
 	SQLMode string `json:"sql_mode"`
 }
@@ -358,6 +546,32 @@ type databaseFirewallRuleRoot struct {
 	Rules []DatabaseFirewallRule `json:"rules"`
 }
 
+// databaseOptionsRoot represents the root of all available database options (i.e. engines, regions, version, etc.)
+type databaseOptionsRoot struct {
+	Options *DatabaseOptions `json:"options"`
+}
+
+// DatabaseOptions represents the available database engines
+type DatabaseOptions struct {
+	MongoDBOptions     DatabaseEngineOptions `json:"mongodb"`
+	MySQLOptions       DatabaseEngineOptions `json:"mysql"`
+	PostgresSQLOptions DatabaseEngineOptions `json:"pg"`
+	RedisOptions       DatabaseEngineOptions `json:"redis"`
+}
+
+// DatabaseEngineOptions represents the configuration options that are available for a given database engine
+type DatabaseEngineOptions struct {
+	Regions  []string         `json:"regions"`
+	Versions []string         `json:"versions"`
+	Layouts  []DatabaseLayout `json:"layouts"`
+}
+
+// DatabaseLayout represents the slugs available for a given database engine at various node counts
+type DatabaseLayout struct {
+	NodeNum int      `json:"num_nodes"`
+	Sizes   []string `json:"sizes"`
+}
+
 // URN returns a URN identifier for the database
 func (d Database) URN() string {
 	return ToURN("dbaas", d.ID)
@@ -397,6 +611,21 @@ func (svc *DatabasesServiceOp) Get(ctx context.Context, databaseID string) (*Dat
 	return root.Database, resp, nil
 }
 
+// GetCA retrieves the CA of a database cluster.
+func (svc *DatabasesServiceOp) GetCA(ctx context.Context, databaseID string) (*DatabaseCA, *Response, error) {
+	path := fmt.Sprintf(databaseCAPath, databaseID)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(databaseCARoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.CA, resp, nil
+}
+
 // Create creates a database cluster
 func (svc *DatabasesServiceOp) Create(ctx context.Context, create *DatabaseCreateRequest) (*Database, *Response, error) {
 	path := databaseBasePath
@@ -537,6 +766,7 @@ func (svc *DatabasesServiceOp) CreateUser(ctx context.Context, databaseID string
 	return root.User, resp, nil
 }
 
+// ResetUserAuth will reset user authentication
 func (svc *DatabasesServiceOp) ResetUserAuth(ctx context.Context, databaseID, userID string, resetAuth *DatabaseResetUserAuthRequest) (*DatabaseUser, *Response, error) {
 	path := fmt.Sprintf(databaseResetUserAuthPath, databaseID, userID)
 	req, err := svc.client.NewRequest(ctx, http.MethodPost, path, resetAuth)
@@ -691,6 +921,37 @@ func (svc *DatabasesServiceOp) DeletePool(ctx context.Context, databaseID, name
 	return resp, nil
 }
 
+// UpdatePool will update an existing database connection pool
+func (svc *DatabasesServiceOp) UpdatePool(ctx context.Context, databaseID, name string, updatePool *DatabaseUpdatePoolRequest) (*Response, error) {
+	path := fmt.Sprintf(databasePoolPath, databaseID, name)
+
+	if updatePool == nil {
+		return nil, NewArgError("updatePool", "cannot be nil")
+	}
+
+	if updatePool.Mode == "" {
+		return nil, NewArgError("mode", "cannot be empty")
+	}
+
+	if updatePool.Database == "" {
+		return nil, NewArgError("database", "cannot be empty")
+	}
+
+	if updatePool.Size < 1 {
+		return nil, NewArgError("size", "cannot be less than 1")
+	}
+
+	req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updatePool)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
 // GetReplica returns a single database replica
 func (svc *DatabasesServiceOp) GetReplica(ctx context.Context, databaseID, name string) (*DatabaseReplica, *Response, error) {
 	path := fmt.Sprintf(databaseReplicaPath, databaseID, name)
@@ -754,6 +1015,20 @@ func (svc *DatabasesServiceOp) DeleteReplica(ctx context.Context, databaseID, na
 	return resp, nil
 }
 
+// PromoteReplicaToPrimary will sever the read replica integration and then promote the replica cluster to be a R/W cluster
+func (svc *DatabasesServiceOp) PromoteReplicaToPrimary(ctx context.Context, databaseID, name string) (*Response, error) {
+	path := fmt.Sprintf(databasePromoteReplicaToPrimaryPath, databaseID, name)
+	req, err := svc.client.NewRequest(ctx, http.MethodPut, path, nil)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
 // GetEvictionPolicy loads the eviction policy for a given Redis cluster.
 func (svc *DatabasesServiceOp) GetEvictionPolicy(ctx context.Context, databaseID string) (string, *Response, error) {
 	path := fmt.Sprintf(databaseEvictionPolicyPath, databaseID)
@@ -843,3 +1118,142 @@ func (svc *DatabasesServiceOp) UpdateFirewallRules(ctx context.Context, database
 	}
 	return svc.client.Do(ctx, req, nil)
 }
+
+// GetPostgreSQLConfig retrieves the config for a PostgreSQL database cluster.
+func (svc *DatabasesServiceOp) GetPostgreSQLConfig(ctx context.Context, databaseID string) (*PostgreSQLConfig, *Response, error) {
+	path := fmt.Sprintf(databaseConfigPath, databaseID)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(databasePostgreSQLConfigRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Config, resp, nil
+}
+
+// UpdatePostgreSQLConfig updates the config for a PostgreSQL database cluster.
+func (svc *DatabasesServiceOp) UpdatePostgreSQLConfig(ctx context.Context, databaseID string, config *PostgreSQLConfig) (*Response, error) {
+	path := fmt.Sprintf(databaseConfigPath, databaseID)
+	root := &databasePostgreSQLConfigRoot{
+		Config: config,
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// GetRedisConfig retrieves the config for a Redis database cluster.
+func (svc *DatabasesServiceOp) GetRedisConfig(ctx context.Context, databaseID string) (*RedisConfig, *Response, error) {
+	path := fmt.Sprintf(databaseConfigPath, databaseID)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(databaseRedisConfigRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Config, resp, nil
+}
+
+// UpdateRedisConfig updates the config for a Redis database cluster.
+func (svc *DatabasesServiceOp) UpdateRedisConfig(ctx context.Context, databaseID string, config *RedisConfig) (*Response, error) {
+	path := fmt.Sprintf(databaseConfigPath, databaseID)
+
+	// We provide consts for use with SetEvictionPolicy method. Unfortunately, those are
+	// in a different format than what can be used for RedisConfig.RedisMaxmemoryPolicy.
+	// So we attempt to normalize them here to use dashes as separators if provided in
+	// the old format (underscores). Other values are passed through untouched.
+	if config.RedisMaxmemoryPolicy != nil {
+		if policy, ok := evictionPolicyMap[*config.RedisMaxmemoryPolicy]; ok {
+			config.RedisMaxmemoryPolicy = &policy
+		}
+	}
+
+	root := &databaseRedisConfigRoot{
+		Config: config,
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// GetMySQLConfig retrieves the config for a MySQL database cluster.
+func (svc *DatabasesServiceOp) GetMySQLConfig(ctx context.Context, databaseID string) (*MySQLConfig, *Response, error) {
+	path := fmt.Sprintf(databaseConfigPath, databaseID)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(databaseMySQLConfigRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Config, resp, nil
+}
+
+// UpdateMySQLConfig updates the config for a MySQL database cluster.
+func (svc *DatabasesServiceOp) UpdateMySQLConfig(ctx context.Context, databaseID string, config *MySQLConfig) (*Response, error) {
+	path := fmt.Sprintf(databaseConfigPath, databaseID)
+	root := &databaseMySQLConfigRoot{
+		Config: config,
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodPatch, path, root)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// ListOptions gets the database options available.
+func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOptions, *Response, error) {
+	root := new(databaseOptionsRoot)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, databaseOptionsPath, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.Options, resp, nil
+}
+
+// UpgradeMajorVersion upgrades the major version of a cluster.
+func (svc *DatabasesServiceOp) UpgradeMajorVersion(ctx context.Context, databaseID string, upgradeReq *UpgradeVersionRequest) (*Response, error) {
+	path := fmt.Sprintf(databaseUpgradeMajorVersionPath, databaseID)
+	req, err := svc.client.NewRequest(ctx, http.MethodPut, path, upgradeReq)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+
+	return resp, nil
+}
diff --git a/databases_test.go b/databases_test.go
index f45818d..9809c05 100644
--- a/databases_test.go
+++ b/databases_test.go
@@ -7,6 +7,7 @@ import (
 	"testing"
 	"time"
 
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
@@ -36,7 +37,7 @@ var db = Database{
 		SSL:      true,
 	},
 	Users: []DatabaseUser{
-		DatabaseUser{
+		{
 			Name:     "doadmin",
 			Role:     "primary",
 			Password: "zt91mum075ofzyww",
@@ -58,6 +59,7 @@ var db = Database{
 	SizeSlug:           "db-s-2vcpu-4gb",
 	PrivateNetworkUUID: "da4e0206-d019-41d7-b51f-deadbeefbb8f",
 	Tags:               []string{"production", "staging"},
+	ProjectID:          "6d0f9073-0a24-4f1b-9065-7dc5c8bad3e2",
 }
 
 var dbJSON = `
@@ -106,7 +108,8 @@ var dbJSON = `
 	},
 	"size": "db-s-2vcpu-4gb",
 	"private_network_uuid": "da4e0206-d019-41d7-b51f-deadbeefbb8f",
-	"tags": ["production", "staging"]
+	"tags": ["production", "staging"],
+	"project_id": "6d0f9073-0a24-4f1b-9065-7dc5c8bad3e2"
 }
 `
 
@@ -160,55 +163,172 @@ func TestDatabases_Get(t *testing.T) {
 	require.Equal(t, &db, got)
 }
 
-func TestDatabases_Create(t *testing.T) {
+func TestDatabases_GetCA(t *testing.T) {
 	setup()
 	defer teardown()
 
-	want := &Database{
-		ID:          "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
-		Name:        "backend-test",
-		EngineSlug:  "pg",
-		VersionSlug: "10",
-		Connection: &DatabaseConnection{
-			URI:      "postgres://doadmin:zt91mum075ofzyww@dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
-			Database: "defaultdb",
-			Host:     "dbtest-do-user-3342561-0.db.ondigitalocean.com",
-			Port:     25060,
-			User:     "doadmin",
-			Password: "zt91mum075ofzyww",
-			SSL:      true,
+	dbID := "da4e0206-d019-41d7-b51f-deadbeefbb8f"
+
+	body := `
+{
+  "ca": {
+    "certificate": "ZmFrZQpjYQpjZXJ0"
+  }
+}
+`
+
+	path := fmt.Sprintf("/v2/databases/%s/ca", dbID)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, body)
+	})
+
+	got, _, err := client.Databases.GetCA(ctx, dbID)
+	require.NoError(t, err)
+	require.Equal(t, &DatabaseCA{Certificate: []byte("fake\nca\ncert")}, got)
+}
+
+func TestDatabases_Create(t *testing.T) {
+	tests := []struct {
+		title         string
+		createRequest *DatabaseCreateRequest
+		want          *Database
+		body          string
+	}{
+		{
+			title: "create",
+			createRequest: &DatabaseCreateRequest{
+				Name:       "backend-test",
+				EngineSlug: "pg",
+				Version:    "10",
+				Region:     "nyc3",
+				SizeSlug:   "db-s-2vcpu-4gb",
+				NumNodes:   2,
+				Tags:       []string{"production", "staging"},
+				ProjectID:  "05d84f74-db8c-4de5-ae72-2fd4823fb1c8",
+			},
+			want: &Database{
+				ID:          "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
+				Name:        "backend-test",
+				EngineSlug:  "pg",
+				VersionSlug: "10",
+				Connection: &DatabaseConnection{
+					URI:      "postgres://doadmin:zt91mum075ofzyww@dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
+					Database: "defaultdb",
+					Host:     "dbtest-do-user-3342561-0.db.ondigitalocean.com",
+					Port:     25060,
+					User:     "doadmin",
+					Password: "zt91mum075ofzyww",
+					SSL:      true,
+				},
+				PrivateConnection: &DatabaseConnection{
+					URI:      "postgres://doadmin:zt91mum075ofzyww@private-dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
+					Database: "defaultdb",
+					Host:     "dbtest-do-user-3342561-0.db.ondigitalocean.com",
+					Port:     25060,
+					User:     "doadmin",
+					Password: "zt91mum075ofzyww",
+					SSL:      true,
+				},
+				Users:             nil,
+				DBNames:           nil,
+				NumNodes:          2,
+				RegionSlug:        "nyc3",
+				Status:            "creating",
+				CreatedAt:         time.Date(2019, 2, 26, 6, 12, 39, 0, time.UTC),
+				MaintenanceWindow: nil,
+				SizeSlug:          "db-s-2vcpu-4gb",
+				Tags:              []string{"production", "staging"},
+				ProjectID:         "05d84f74-db8c-4de5-ae72-2fd4823fb1c8",
+			},
+			body: `
+{
+	"database": {
+		"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
+		"name": "backend-test",
+		"engine": "pg",
+		"version": "10",
+		"connection": {
+			"uri": "postgres://doadmin:zt91mum075ofzyww@dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
+			"database": "defaultdb",
+			"host": "dbtest-do-user-3342561-0.db.ondigitalocean.com",
+			"port": 25060,
+			"user": "doadmin",
+			"password": "zt91mum075ofzyww",
+			"ssl": true
 		},
-		PrivateConnection: &DatabaseConnection{
-			URI:      "postgres://doadmin:zt91mum075ofzyww@private-dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
-			Database: "defaultdb",
-			Host:     "dbtest-do-user-3342561-0.db.ondigitalocean.com",
-			Port:     25060,
-			User:     "doadmin",
-			Password: "zt91mum075ofzyww",
-			SSL:      true,
+		"private_connection": {
+			"uri": "postgres://doadmin:zt91mum075ofzyww@private-dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
+			"database": "defaultdb",
+			"host": "dbtest-do-user-3342561-0.db.ondigitalocean.com",
+			"port": 25060,
+			"user": "doadmin",
+			"password": "zt91mum075ofzyww",
+			"ssl": true
 		},
-		Users:             nil,
-		DBNames:           nil,
-		NumNodes:          2,
-		RegionSlug:        "nyc3",
-		Status:            "creating",
-		CreatedAt:         time.Date(2019, 2, 26, 6, 12, 39, 0, time.UTC),
-		MaintenanceWindow: nil,
-		SizeSlug:          "db-s-2vcpu-4gb",
-		Tags:              []string{"production", "staging"},
-	}
-
-	createRequest := &DatabaseCreateRequest{
-		Name:       "backend-test",
-		EngineSlug: "pg",
-		Version:    "10",
-		Region:     "nyc3",
-		SizeSlug:   "db-s-2vcpu-4gb",
-		NumNodes:   2,
-		Tags:       []string{"production", "staging"},
+		"users": null,
+		"db_names": null,
+		"num_nodes": 2,
+		"region": "nyc3",
+		"status": "creating",
+		"created_at": "2019-02-26T06:12:39Z",
+		"maintenance_window": null,
+		"size": "db-s-2vcpu-4gb",
+		"tags": ["production", "staging"],
+        "project_id": "05d84f74-db8c-4de5-ae72-2fd4823fb1c8"
 	}
-
-	body := `
+}`,
+		},
+		{
+			title: "create from backup",
+			createRequest: &DatabaseCreateRequest{
+				Name:       "backend-restored",
+				EngineSlug: "pg",
+				Version:    "10",
+				Region:     "nyc3",
+				SizeSlug:   "db-s-2vcpu-4gb",
+				NumNodes:   2,
+				Tags:       []string{"production", "staging"},
+				BackupRestore: &DatabaseBackupRestore{
+					DatabaseName:    "backend-orig",
+					BackupCreatedAt: "2019-01-31T19:25:22Z",
+				},
+			},
+			want: &Database{
+				ID:          "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
+				Name:        "backend-test",
+				EngineSlug:  "pg",
+				VersionSlug: "10",
+				Connection: &DatabaseConnection{
+					URI:      "postgres://doadmin:zt91mum075ofzyww@dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
+					Database: "defaultdb",
+					Host:     "dbtest-do-user-3342561-0.db.ondigitalocean.com",
+					Port:     25060,
+					User:     "doadmin",
+					Password: "zt91mum075ofzyww",
+					SSL:      true,
+				},
+				PrivateConnection: &DatabaseConnection{
+					URI:      "postgres://doadmin:zt91mum075ofzyww@private-dbtest-do-user-3342561-0.db.ondigitalocean.com:25060/defaultdb?sslmode=require",
+					Database: "defaultdb",
+					Host:     "dbtest-do-user-3342561-0.db.ondigitalocean.com",
+					Port:     25060,
+					User:     "doadmin",
+					Password: "zt91mum075ofzyww",
+					SSL:      true,
+				},
+				Users:             nil,
+				DBNames:           nil,
+				NumNodes:          2,
+				RegionSlug:        "nyc3",
+				Status:            "creating",
+				CreatedAt:         time.Date(2019, 2, 26, 6, 12, 39, 0, time.UTC),
+				MaintenanceWindow: nil,
+				SizeSlug:          "db-s-2vcpu-4gb",
+				Tags:              []string{"production", "staging"},
+			},
+			body: `
 {
 	"database": {
 		"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
@@ -243,23 +363,32 @@ func TestDatabases_Create(t *testing.T) {
 		"size": "db-s-2vcpu-4gb",
 		"tags": ["production", "staging"]
 	}
-}`
+}`,
+		},
+	}
 
-	mux.HandleFunc("/v2/databases", func(w http.ResponseWriter, r *http.Request) {
-		v := new(DatabaseCreateRequest)
-		err := json.NewDecoder(r.Body).Decode(v)
-		if err != nil {
-			t.Fatal(err)
-		}
+	for _, tt := range tests {
+		t.Run(tt.title, func(t *testing.T) {
+			setup()
+			defer teardown()
 
-		testMethod(t, r, http.MethodPost)
-		require.Equal(t, v, createRequest)
-		fmt.Fprint(w, body)
-	})
+			mux.HandleFunc("/v2/databases", func(w http.ResponseWriter, r *http.Request) {
+				v := new(DatabaseCreateRequest)
+				err := json.NewDecoder(r.Body).Decode(v)
+				if err != nil {
+					t.Fatal(err)
+				}
 
-	got, _, err := client.Databases.Create(ctx, createRequest)
-	require.NoError(t, err)
-	require.Equal(t, want, got)
+				testMethod(t, r, http.MethodPost)
+				require.Equal(t, v, tt.createRequest)
+				fmt.Fprint(w, tt.body)
+			})
+
+			got, _, err := client.Databases.Create(ctx, tt.createRequest)
+			require.NoError(t, err)
+			require.Equal(t, tt.want, got)
+		})
+	}
 }
 
 func TestDatabases_Delete(t *testing.T) {
@@ -366,11 +495,11 @@ func TestDatabases_ListBackups(t *testing.T) {
 	defer teardown()
 
 	want := []DatabaseBackup{
-		DatabaseBackup{
+		{
 			CreatedAt:     time.Date(2019, 1, 11, 18, 42, 27, 0, time.UTC),
 			SizeGigabytes: 0.03357696,
 		},
-		DatabaseBackup{
+		{
 			CreatedAt:     time.Date(2019, 1, 12, 18, 42, 29, 0, time.UTC),
 			SizeGigabytes: 0.03364864,
 		},
@@ -930,6 +1059,27 @@ func TestDatabases_DeletePool(t *testing.T) {
 	require.NoError(t, err)
 }
 
+func TestDatabases_UpdatePool(t *testing.T) {
+	setup()
+	defer teardown()
+
+	dbID := "deadbeef-dead-4aa5-beef-deadbeef347d"
+
+	path := fmt.Sprintf("/v2/databases/%s/pools/pool", dbID)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPut)
+	})
+
+	_, err := client.Databases.UpdatePool(ctx, dbID, "pool", &DatabaseUpdatePoolRequest{
+		User:     "user",
+		Size:     12,
+		Database: "db",
+		Mode:     "transaction",
+	})
+	require.NoError(t, err)
+}
+
 func TestDatabases_GetReplica(t *testing.T) {
 	setup()
 	defer teardown()
@@ -939,6 +1089,7 @@ func TestDatabases_GetReplica(t *testing.T) {
 	createdAt := time.Date(2019, 01, 01, 0, 0, 0, 0, time.UTC)
 
 	want := &DatabaseReplica{
+		ID:        "326f188b-5dd1-45fc-9584-62ad553107cd",
 		Name:      "pool",
 		Region:    "nyc1",
 		Status:    "online",
@@ -968,6 +1119,7 @@ func TestDatabases_GetReplica(t *testing.T) {
 	body := `
 {
   "replica": {
+    "id": "326f188b-5dd1-45fc-9584-62ad553107cd",
     "name": "pool",
     "region": "nyc1",
     "status": "online",
@@ -1169,6 +1321,22 @@ func TestDatabases_CreateReplica(t *testing.T) {
 	require.Equal(t, want, got)
 }
 
+func TestDatabases_PromoteReplicaToPrimary(t *testing.T) {
+	setup()
+	defer teardown()
+
+	dbID := "deadbeef-dead-4aa5-beef-deadbeef347d"
+
+	path := fmt.Sprintf("/v2/databases/%s/replicas/replica/promote", dbID)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPut)
+	})
+
+	_, err := client.Databases.PromoteReplicaToPrimary(ctx, dbID, "replica")
+	require.NoError(t, err)
+}
+
 func TestDatabases_DeleteReplica(t *testing.T) {
 	setup()
 	defer teardown()
@@ -1337,6 +1505,160 @@ func TestDatabases_UpdateFirewallRules(t *testing.T) {
 	require.NoError(t, err)
 }
 
+func TestDatabases_GetDatabaseOptions(t *testing.T) {
+	setup()
+	defer teardown()
+
+	path := "/v2/databases/options"
+
+	body := ` {
+		"options": {
+			"mongodb": {
+				"regions": [
+					"ams3",
+					"blr1"
+				],
+				"versions": [
+					"4.4",
+					"5.0"
+				],
+				"layouts": [
+					{
+						"num_nodes": 1,
+						"sizes": [
+							"db-s-1vcpu-1gb",
+							"db-s-1vcpu-2gb"
+						]
+					},
+					{
+						"num_nodes": 3,
+						"sizes": [
+							"so1_5-4vcpu-32gb",
+							"so1_5-32vcpu-256gb"
+						]
+					}
+				]
+			},
+			"mysql": {
+				"regions": [
+					"ams3",
+					"sgp1",
+					"tor1"
+				],
+				"versions": [
+					"8"
+				],
+				"layouts": [
+					{
+						"num_nodes": 1,
+						"sizes": [
+							"db-s-1vcpu-1gb",
+							"db-s-1vcpu-2gb"
+						]
+					},
+					{
+						"num_nodes": 2,
+						"sizes": [
+							"db-s-1vcpu-2gb",
+							"so1_5-32vcpu-256gb"
+						]
+					},
+					{
+						"num_nodes": 3,
+						"sizes": [
+							"db-s-1vcpu-2gb",
+							"so1_5-32vcpu-256gb"
+						]
+					}
+				]
+			},
+			"pg": {
+				"regions": [
+					"ams3",
+					"blr1"
+				],
+				"versions": [
+					"13",
+					"14"
+				],
+				"layouts": [
+					{
+						"num_nodes": 1,
+						"sizes": [
+							"db-s-1vcpu-1gb",
+							"db-s-1vcpu-2gb"
+						]
+					},
+					{
+						"num_nodes": 2,
+						"sizes": [
+							"db-s-1vcpu-2gb",
+							"db-s-2vcpu-4gb"
+						]
+					},
+					{
+						"num_nodes": 3,
+						"sizes": [
+							"db-s-1vcpu-2gb",
+							"db-s-2vcpu-4gb"
+						]
+					}
+				]
+			},
+			"redis": {
+				"regions": [
+					"ams3",
+					"tor1"
+				],
+				"versions": [
+					"6"
+				],
+				"layouts": [
+					{
+						"num_nodes": 1,
+						"sizes": [
+							"m-32vcpu-256gb"
+						]
+					},
+					{
+						"num_nodes": 2,
+						"sizes": [
+							"db-s-1vcpu-2gb",
+							"db-s-2vcpu-4gb",
+							"m-32vcpu-256gb"
+						]
+					}
+				]
+			}
+		}
+	} `
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, body)
+	})
+
+	options, _, err := client.Databases.ListOptions(ctx)
+	require.NoError(t, err)
+	require.NotNil(t, options)
+	require.NotNil(t, options.MongoDBOptions)
+	require.NotNil(t, options.PostgresSQLOptions)
+	require.NotNil(t, options.RedisOptions)
+	require.NotNil(t, options.MySQLOptions)
+	require.Greater(t, len(options.MongoDBOptions.Regions), 0)
+	require.Greater(t, len(options.PostgresSQLOptions.Regions), 0)
+	require.Greater(t, len(options.RedisOptions.Regions), 0)
+	require.Greater(t, len(options.MySQLOptions.Regions), 0)
+	require.Greater(t, len(options.MongoDBOptions.Versions), 0)
+	require.Greater(t, len(options.PostgresSQLOptions.Versions), 0)
+	require.Greater(t, len(options.RedisOptions.Versions), 0)
+	require.Greater(t, len(options.MySQLOptions.Versions), 0)
+	require.Greater(t, len(options.MongoDBOptions.Layouts), 0)
+	require.Greater(t, len(options.PostgresSQLOptions.Layouts), 0)
+	require.Greater(t, len(options.RedisOptions.Layouts), 0)
+	require.Greater(t, len(options.MySQLOptions.Layouts), 0)
+}
+
 func TestDatabases_CreateDatabaseUserWithMySQLSettings(t *testing.T) {
 	setup()
 	defer teardown()
@@ -1350,7 +1672,7 @@ func TestDatabases_CreateDatabaseUserWithMySQLSettings(t *testing.T) {
 			"name": "foo",
 			"mysql_settings": {
 				"auth_plugin": "%s"
-			}	
+			}
 		}
 	}`, SQLAuthPluginNative))
 	expectedUser := &DatabaseUser{
@@ -1446,3 +1768,338 @@ func TestDatabases_GetDatabaseUserWithMySQLSettings(t *testing.T) {
 	require.NoError(t, err)
 	require.Equal(t, expectedUser, user)
 }
+
+func TestDatabases_GetConfigPostgres(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbSvc = client.Databases
+		dbID  = "da4e0206-d019-41d7-b51f-deadbeefbb8f"
+		path  = fmt.Sprintf("/v2/databases/%s/config", dbID)
+
+		postgresConfigJSON = `{
+  "config": {
+    "autovacuum_naptime": 60,
+    "autovacuum_vacuum_threshold": 50,
+    "autovacuum_analyze_threshold": 50,
+    "autovacuum_vacuum_scale_factor": 0.2,
+    "autovacuum_analyze_scale_factor": 0.2,
+    "autovacuum_vacuum_cost_delay": 20,
+    "autovacuum_vacuum_cost_limit": -1,
+    "bgwriter_flush_after": 512,
+    "bgwriter_lru_maxpages": 100,
+    "bgwriter_lru_multiplier": 2,
+    "idle_in_transaction_session_timeout": 0,
+    "jit": true,
+    "log_autovacuum_min_duration": -1,
+    "log_min_duration_statement": -1,
+    "max_prepared_transactions": 0,
+    "max_parallel_workers": 8,
+    "max_parallel_workers_per_gather": 2,
+    "temp_file_limit": -1,
+    "wal_sender_timeout": 60000,
+    "backup_hour": 18,
+    "backup_minute": 26
+  }
+}`
+
+		postgresConfig = PostgreSQLConfig{
+			AutovacuumNaptime:               PtrTo(60),
+			AutovacuumVacuumThreshold:       PtrTo(50),
+			AutovacuumAnalyzeThreshold:      PtrTo(50),
+			AutovacuumVacuumScaleFactor:     PtrTo(float32(0.2)),
+			AutovacuumAnalyzeScaleFactor:    PtrTo(float32(0.2)),
+			AutovacuumVacuumCostDelay:       PtrTo(20),
+			AutovacuumVacuumCostLimit:       PtrTo(-1),
+			BGWriterFlushAfter:              PtrTo(512),
+			BGWriterLRUMaxpages:             PtrTo(100),
+			BGWriterLRUMultiplier:           PtrTo(float32(2)),
+			IdleInTransactionSessionTimeout: PtrTo(0),
+			JIT:                             PtrTo(true),
+			LogAutovacuumMinDuration:        PtrTo(-1),
+			LogMinDurationStatement:         PtrTo(-1),
+			MaxPreparedTransactions:         PtrTo(0),
+			MaxParallelWorkers:              PtrTo(8),
+			MaxParallelWorkersPerGather:     PtrTo(2),
+			TempFileLimit:                   PtrTo(-1),
+			WalSenderTimeout:                PtrTo(60000),
+			BackupHour:                      PtrTo(18),
+			BackupMinute:                    PtrTo(26),
+		}
+	)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, postgresConfigJSON)
+	})
+
+	got, _, err := dbSvc.GetPostgreSQLConfig(ctx, dbID)
+	require.NoError(t, err)
+	require.Equal(t, &postgresConfig, got)
+}
+
+func TestDatabases_UpdateConfigPostgres(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbID           = "deadbeef-dead-4aa5-beef-deadbeef347d"
+		path           = fmt.Sprintf("/v2/databases/%s/config", dbID)
+		postgresConfig = &PostgreSQLConfig{
+			AutovacuumNaptime:          PtrTo(75),
+			AutovacuumVacuumThreshold:  PtrTo(45),
+			AutovacuumAnalyzeThreshold: PtrTo(45),
+			MaxPreparedTransactions:    PtrTo(0),
+		}
+	)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPatch)
+
+		var b databasePostgreSQLConfigRoot
+		decoder := json.NewDecoder(r.Body)
+		err := decoder.Decode(&b)
+		require.NoError(t, err)
+
+		assert.Equal(t, b.Config, postgresConfig)
+		assert.Equal(t, 0, *b.Config.MaxPreparedTransactions, "pointers to zero value should be sent")
+		assert.Nil(t, b.Config.MaxParallelWorkers, "excluded value should not be sent")
+
+		w.WriteHeader(http.StatusNoContent)
+	})
+
+	_, err := client.Databases.UpdatePostgreSQLConfig(ctx, dbID, postgresConfig)
+	require.NoError(t, err)
+}
+
+func TestDatabases_GetConfigRedis(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbSvc = client.Databases
+		dbID  = "da4e0206-d019-41d7-b51f-deadbeefbb8f"
+		path  = fmt.Sprintf("/v2/databases/%s/config", dbID)
+
+		redisConfigJSON = `{
+  "config": {
+    "redis_maxmemory_policy": "allkeys-lru",
+    "redis_lfu_log_factor": 10,
+    "redis_lfu_decay_time": 1,
+    "redis_ssl": true,
+    "redis_timeout": 300,
+    "redis_notify_keyspace_events": "",
+    "redis_persistence": "off",
+    "redis_acl_channels_default": "allchannels"
+  }
+}`
+
+		redisConfig = RedisConfig{
+			RedisMaxmemoryPolicy:      PtrTo("allkeys-lru"),
+			RedisLFULogFactor:         PtrTo(10),
+			RedisLFUDecayTime:         PtrTo(1),
+			RedisSSL:                  PtrTo(true),
+			RedisTimeout:              PtrTo(300),
+			RedisNotifyKeyspaceEvents: PtrTo(""),
+			RedisPersistence:          PtrTo("off"),
+			RedisACLChannelsDefault:   PtrTo("allchannels"),
+		}
+	)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, redisConfigJSON)
+	})
+
+	got, _, err := dbSvc.GetRedisConfig(ctx, dbID)
+	require.NoError(t, err)
+	require.Equal(t, &redisConfig, got)
+}
+
+func TestDatabases_UpdateConfigRedis(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbID        = "deadbeef-dead-4aa5-beef-deadbeef347d"
+		path        = fmt.Sprintf("/v2/databases/%s/config", dbID)
+		redisConfig = &RedisConfig{
+			RedisMaxmemoryPolicy:      PtrTo("allkeys-lru"),
+			RedisLFULogFactor:         PtrTo(10),
+			RedisNotifyKeyspaceEvents: PtrTo(""),
+		}
+	)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPatch)
+
+		var b databaseRedisConfigRoot
+		decoder := json.NewDecoder(r.Body)
+		err := decoder.Decode(&b)
+		require.NoError(t, err)
+
+		assert.Equal(t, b.Config, redisConfig)
+		assert.Equal(t, "", *b.Config.RedisNotifyKeyspaceEvents, "pointers to zero value should be sent")
+		assert.Nil(t, b.Config.RedisPersistence, "excluded value should not be sent")
+
+		w.WriteHeader(http.StatusNoContent)
+	})
+
+	_, err := client.Databases.UpdateRedisConfig(ctx, dbID, redisConfig)
+	require.NoError(t, err)
+}
+
+func TestDatabases_UpdateConfigRedisNormalizeEvictionPolicy(t *testing.T) {
+	type test struct {
+		input string
+		want  string
+	}
+
+	tests := []test{
+		{input: EvictionPolicyAllKeysLRU, want: "allkeys-lru"},
+		{input: EvictionPolicyAllKeysRandom, want: "allkeys-random"},
+		{input: EvictionPolicyVolatileLRU, want: "volatile-lru"},
+		{input: EvictionPolicyVolatileRandom, want: "volatile-random"},
+		{input: EvictionPolicyVolatileTTL, want: "volatile-ttl"},
+		{input: "allkeys-lru", want: "allkeys-lru"},
+		{input: "allkeys-random", want: "allkeys-random"},
+		{input: "volatile-lru", want: "volatile-lru"},
+		{input: "volatile-random", want: "volatile-random"},
+		{input: "volatile-ttl", want: "volatile-ttl"},
+		{input: "some_unknown_value", want: "some_unknown_value"},
+	}
+
+	for _, tt := range tests {
+		setup()
+		defer teardown()
+
+		var (
+			dbID        = "deadbeef-dead-4aa5-beef-deadbeef347d"
+			path        = fmt.Sprintf("/v2/databases/%s/config", dbID)
+			redisConfig = &RedisConfig{
+				RedisMaxmemoryPolicy: PtrTo(tt.input),
+			}
+		)
+
+		mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+			testMethod(t, r, http.MethodPatch)
+
+			var b databaseRedisConfigRoot
+			decoder := json.NewDecoder(r.Body)
+			err := decoder.Decode(&b)
+			require.NoError(t, err)
+			assert.Equal(t, tt.want, *b.Config.RedisMaxmemoryPolicy)
+
+			w.WriteHeader(http.StatusNoContent)
+		})
+
+		_, err := client.Databases.UpdateRedisConfig(ctx, dbID, redisConfig)
+		require.NoError(t, err)
+	}
+}
+
+func TestDatabases_GetConfigMySQL(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbSvc = client.Databases
+		dbID  = "da4e0206-d019-41d7-b51f-deadbeefbb8f"
+		path  = fmt.Sprintf("/v2/databases/%s/config", dbID)
+
+		mySQLConfigJSON = `{
+  "config": {
+    "sql_mode": "ANSI,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,STRICT_ALL_TABLES",
+    "sql_require_primary_key": true,
+    "innodb_ft_min_token_size": 3,
+    "innodb_ft_server_stopword_table": "",
+    "innodb_print_all_deadlocks": false,
+    "innodb_rollback_on_timeout": false,
+    "slow_query_log": false,
+    "long_query_time": 10,
+    "backup_hour": 21,
+    "backup_minute": 59
+  }
+}`
+
+		mySQLConfig = MySQLConfig{
+			SQLMode:                     PtrTo("ANSI,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,STRICT_ALL_TABLES"),
+			SQLRequirePrimaryKey:        PtrTo(true),
+			InnodbFtMinTokenSize:        PtrTo(3),
+			InnodbFtServerStopwordTable: PtrTo(""),
+			InnodbPrintAllDeadlocks:     PtrTo(false),
+			InnodbRollbackOnTimeout:     PtrTo(false),
+			SlowQueryLog:                PtrTo(false),
+			LongQueryTime:               PtrTo(float32(10)),
+			BackupHour:                  PtrTo(21),
+			BackupMinute:                PtrTo(59),
+		}
+	)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, mySQLConfigJSON)
+	})
+
+	got, _, err := dbSvc.GetMySQLConfig(ctx, dbID)
+	require.NoError(t, err)
+	require.Equal(t, &mySQLConfig, got)
+}
+
+func TestDatabases_UpdateConfigMySQL(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbID        = "deadbeef-dead-4aa5-beef-deadbeef347d"
+		path        = fmt.Sprintf("/v2/databases/%s/config", dbID)
+		mySQLConfig = &MySQLConfig{
+			SQLRequirePrimaryKey:        PtrTo(true),
+			InnodbFtMinTokenSize:        PtrTo(3),
+			InnodbFtServerStopwordTable: PtrTo(""),
+		}
+	)
+
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPatch)
+
+		var b databaseMySQLConfigRoot
+		decoder := json.NewDecoder(r.Body)
+		err := decoder.Decode(&b)
+		require.NoError(t, err)
+
+		assert.Equal(t, b.Config, mySQLConfig)
+		assert.Equal(t, "", *b.Config.InnodbFtServerStopwordTable, "pointers to zero value should be sent")
+		assert.Nil(t, b.Config.InnodbPrintAllDeadlocks, "excluded value should not be sent")
+
+		w.WriteHeader(http.StatusNoContent)
+	})
+
+	_, err := client.Databases.UpdateMySQLConfig(ctx, dbID, mySQLConfig)
+	require.NoError(t, err)
+}
+
+func TestDatabases_UpgradeMajorVersion(t *testing.T) {
+	setup()
+	defer teardown()
+
+	var (
+		dbID              = "deadbeef-dead-4aa5-beef-deadbeef347d"
+		path              = fmt.Sprintf("/v2/databases/%s/upgrade", dbID)
+		upgradeVersionReq = &UpgradeVersionRequest{
+			Version: "14",
+		}
+	)
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPut)
+		var b UpgradeVersionRequest
+		decoder := json.NewDecoder(r.Body)
+		err := decoder.Decode(&b)
+		require.NoError(t, err)
+		assert.Equal(t, b.Version, upgradeVersionReq.Version)
+		w.WriteHeader(http.StatusNoContent)
+	})
+	_, err := client.Databases.UpgradeMajorVersion(ctx, dbID, upgradeVersionReq)
+	require.NoError(t, err)
+}
diff --git a/debian/changelog b/debian/changelog
index f6c0019..0c0f4fc 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+golang-github-digitalocean-godo (1.99.0-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Tue, 20 Jun 2023 01:24:02 -0000
+
 golang-github-digitalocean-godo (1.37.0-2) unstable; urgency=medium
 
   [ Debian Janitor ]
diff --git a/doc.go b/doc.go
index ac812e9..113b02a 100644
--- a/doc.go
+++ b/doc.go
@@ -1,2 +1,2 @@
-// Package godo is the DigtalOcean API v2 client for Go.
+// Package godo is the DigitalOcean API v2 client for Go.
 package godo
diff --git a/domains.go b/domains.go
index 43c0424..6a86296 100644
--- a/domains.go
+++ b/domains.go
@@ -9,8 +9,8 @@ import (
 const domainsBasePath = "v2/domains"
 
 // DomainsService is an interface for managing DNS with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#domains and
-// https://developers.digitalocean.com/documentation/v2#domain-records
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Domains and
+// https://docs.digitalocean.com/reference/api/api-reference/#tag/Domain-Records
 type DomainsService interface {
 	List(context.Context, *ListOptions) ([]Domain, *Response, error)
 	Get(context.Context, string) (*Domain, *Response, error)
@@ -18,6 +18,9 @@ type DomainsService interface {
 	Delete(context.Context, string) (*Response, error)
 
 	Records(context.Context, string, *ListOptions) ([]DomainRecord, *Response, error)
+	RecordsByType(context.Context, string, string, *ListOptions) ([]DomainRecord, *Response, error)
+	RecordsByName(context.Context, string, string, *ListOptions) ([]DomainRecord, *Response, error)
+	RecordsByTypeAndName(context.Context, string, string, string, *ListOptions) ([]DomainRecord, *Response, error)
 	Record(context.Context, string, int) (*DomainRecord, *Response, error)
 	DeleteRecord(context.Context, string, int) (*Response, error)
 	EditRecord(context.Context, string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error)
@@ -50,7 +53,7 @@ type domainsRoot struct {
 	Meta    *Meta    `json:"meta"`
 }
 
-// DomainCreateRequest respresents a request to create a domain.
+// DomainCreateRequest represents a request to create a domain.
 type DomainCreateRequest struct {
 	Name      string `json:"name"`
 	IPAddress string `json:"ip_address,omitempty"`
@@ -69,12 +72,12 @@ type domainRecordsRoot struct {
 
 // DomainRecord represents a DigitalOcean DomainRecord
 type DomainRecord struct {
-	ID       int    `json:"id,float64,omitempty"`
+	ID       int    `json:"id,omitempty"`
 	Type     string `json:"type,omitempty"`
 	Name     string `json:"name,omitempty"`
 	Data     string `json:"data,omitempty"`
 	Priority int    `json:"priority"`
-	Port     int    `json:"port,omitempty"`
+	Port     int    `json:"port"`
 	TTL      int    `json:"ttl,omitempty"`
 	Weight   int    `json:"weight"`
 	Flags    int    `json:"flags"`
@@ -87,7 +90,7 @@ type DomainRecordEditRequest struct {
 	Name     string `json:"name,omitempty"`
 	Data     string `json:"data,omitempty"`
 	Priority int    `json:"priority"`
-	Port     int    `json:"port,omitempty"`
+	Port     int    `json:"port"`
 	TTL      int    `json:"ttl,omitempty"`
 	Weight   int    `json:"weight"`
 	Flags    int    `json:"flags"`
@@ -98,6 +101,7 @@ func (d Domain) String() string {
 	return Stringify(d)
 }
 
+// URN returns the domain name in a valid DO API URN form.
 func (d Domain) URN() string {
 	return ToURN("Domain", d.Name)
 }
@@ -201,7 +205,7 @@ func (d DomainRecordEditRequest) String() string {
 	return Stringify(d)
 }
 
-// Records returns a slice of DomainRecords for a domain
+// Records returns a slice of DomainRecord for a domain.
 func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *ListOptions) ([]DomainRecord, *Response, error) {
 	if len(domain) < 1 {
 		return nil, nil, NewArgError("domain", "cannot be an empty string")
@@ -213,21 +217,68 @@ func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *List
 		return nil, nil, err
 	}
 
-	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	return s.records(ctx, path)
+}
+
+// RecordsByType returns a slice of DomainRecord for a domain matched by record type.
+func (s *DomainsServiceOp) RecordsByType(ctx context.Context, domain, ofType string, opt *ListOptions) ([]DomainRecord, *Response, error) {
+	if len(domain) < 1 {
+		return nil, nil, NewArgError("domain", "cannot be an empty string")
+	}
+
+	if len(ofType) < 1 {
+		return nil, nil, NewArgError("type", "cannot be an empty string")
+	}
+
+	path := fmt.Sprintf("%s/%s/records?type=%s", domainsBasePath, domain, ofType)
+	path, err := addOptions(path, opt)
 	if err != nil {
 		return nil, nil, err
 	}
 
-	root := new(domainRecordsRoot)
-	resp, err := s.client.Do(ctx, req, root)
+	return s.records(ctx, path)
+}
+
+// RecordsByName returns a slice of DomainRecord for a domain matched by record name.
+func (s *DomainsServiceOp) RecordsByName(ctx context.Context, domain, name string, opt *ListOptions) ([]DomainRecord, *Response, error) {
+	if len(domain) < 1 {
+		return nil, nil, NewArgError("domain", "cannot be an empty string")
+	}
+
+	if len(name) < 1 {
+		return nil, nil, NewArgError("name", "cannot be an empty string")
+	}
+
+	path := fmt.Sprintf("%s/%s/records?name=%s", domainsBasePath, domain, name)
+	path, err := addOptions(path, opt)
 	if err != nil {
-		return nil, resp, err
+		return nil, nil, err
 	}
-	if l := root.Links; l != nil {
-		resp.Links = l
+
+	return s.records(ctx, path)
+}
+
+// RecordsByTypeAndName returns a slice of DomainRecord for a domain matched by record type and name.
+func (s *DomainsServiceOp) RecordsByTypeAndName(ctx context.Context, domain, ofType, name string, opt *ListOptions) ([]DomainRecord, *Response, error) {
+	if len(domain) < 1 {
+		return nil, nil, NewArgError("domain", "cannot be an empty string")
 	}
 
-	return root.DomainRecords, resp, err
+	if len(ofType) < 1 {
+		return nil, nil, NewArgError("type", "cannot be an empty string")
+	}
+
+	if len(name) < 1 {
+		return nil, nil, NewArgError("name", "cannot be an empty string")
+	}
+
+	path := fmt.Sprintf("%s/%s/records?type=%s&name=%s", domainsBasePath, domain, ofType, name)
+	path, err := addOptions(path, opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return s.records(ctx, path)
 }
 
 // Record returns the record id from a domain
@@ -339,3 +390,22 @@ func (s *DomainsServiceOp) CreateRecord(ctx context.Context,
 
 	return d.DomainRecord, resp, err
 }
+
+// Performs a domain records request given a path.
+func (s *DomainsServiceOp) records(ctx context.Context, path string) ([]DomainRecord, *Response, error) {
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(domainRecordsRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+
+	return root.DomainRecords, resp, err
+}
diff --git a/domains_test.go b/domains_test.go
index f79e24e..8f3e3b7 100644
--- a/domains_test.go
+++ b/domains_test.go
@@ -4,8 +4,11 @@ import (
 	"encoding/json"
 	"fmt"
 	"net/http"
-	"reflect"
+	"strconv"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 func TestDomains_ListDomains(t *testing.T) {
@@ -30,19 +33,13 @@ func TestDomains_ListDomains(t *testing.T) {
 	})
 
 	domains, resp, err := client.Domains.List(ctx, nil)
-	if err != nil {
-		t.Errorf("Domains.List returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expectedDomains := []Domain{{Name: "foo.com"}, {Name: "bar.com"}}
-	if !reflect.DeepEqual(domains, expectedDomains) {
-		t.Errorf("Domains.List returned domains %+v, expected %+v", domains, expectedDomains)
-	}
+	assert.Equal(t, expectedDomains, domains)
 
 	expectedMeta := &Meta{Total: 2}
-	if !reflect.DeepEqual(resp.Meta, expectedMeta) {
-		t.Errorf("Domains.List returned meta %+v, expected %+v", resp.Meta, expectedMeta)
-	}
+	assert.Equal(t, expectedMeta, resp.Meta)
 }
 
 func TestDomains_ListDomainsMultiplePages(t *testing.T) {
@@ -106,14 +103,10 @@ func TestDomains_GetDomain(t *testing.T) {
 	})
 
 	domains, _, err := client.Domains.Get(ctx, "example.com")
-	if err != nil {
-		t.Errorf("domain.Get returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := &Domain{Name: "example.com"}
-	if !reflect.DeepEqual(domains, expected) {
-		t.Errorf("domains.Get returned %+v, expected %+v", domains, expected)
-	}
+	assert.Equal(t, expected, domains)
 }
 
 func TestDomains_Create(t *testing.T) {
@@ -133,22 +126,16 @@ func TestDomains_Create(t *testing.T) {
 		}
 
 		testMethod(t, r, http.MethodPost)
-		if !reflect.DeepEqual(v, createRequest) {
-			t.Errorf("Request body = %+v, expected %+v", v, createRequest)
-		}
+		assert.Equal(t, createRequest, v)
 
 		fmt.Fprint(w, `{"domain":{"name":"example.com"}}`)
 	})
 
 	domain, _, err := client.Domains.Create(ctx, createRequest)
-	if err != nil {
-		t.Errorf("Domains.Create returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := &Domain{Name: "example.com"}
-	if !reflect.DeepEqual(domain, expected) {
-		t.Errorf("Domains.Create returned %+v, expected %+v", domain, expected)
-	}
+	assert.Equal(t, expected, domain)
 }
 
 func TestDomains_Destroy(t *testing.T) {
@@ -160,9 +147,8 @@ func TestDomains_Destroy(t *testing.T) {
 	})
 
 	_, err := client.Domains.Delete(ctx, "example.com")
-	if err != nil {
-		t.Errorf("Domains.Delete returned error: %v", err)
-	}
+
+	assert.NoError(t, err)
 }
 
 func TestDomains_AllRecordsForDomainName(t *testing.T) {
@@ -175,14 +161,10 @@ func TestDomains_AllRecordsForDomainName(t *testing.T) {
 	})
 
 	records, _, err := client.Domains.Records(ctx, "example.com", nil)
-	if err != nil {
-		t.Errorf("Domains.List returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := []DomainRecord{{ID: 1}, {ID: 2}}
-	if !reflect.DeepEqual(records, expected) {
-		t.Errorf("Domains.List returned %+v, expected %+v", records, expected)
-	}
+	assert.Equal(t, expected, records)
 }
 
 func TestDomains_AllRecordsForDomainName_PerPage(t *testing.T) {
@@ -200,13 +182,163 @@ func TestDomains_AllRecordsForDomainName_PerPage(t *testing.T) {
 
 	dro := &ListOptions{PerPage: 2}
 	records, _, err := client.Domains.Records(ctx, "example.com", dro)
-	if err != nil {
-		t.Errorf("Domains.List returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := []DomainRecord{{ID: 1}, {ID: 2}}
-	if !reflect.DeepEqual(records, expected) {
-		t.Errorf("Domains.List returned %+v, expected %+v", records, expected)
+	assert.Equal(t, expected, records)
+}
+
+func TestDomains_RecordsByType(t *testing.T) {
+	tests := []struct {
+		name        string
+		recordType  string
+		pagination  *ListOptions
+		expectedErr *ArgError
+	}{
+		{
+			name:       "success",
+			recordType: "CNAME",
+		},
+		{
+			name:        "when record type is empty it returns argument error",
+			expectedErr: &ArgError{arg: "type", reason: "cannot be an empty string"},
+		},
+		{
+			name:       "with pagination",
+			recordType: "CNAME",
+			pagination: &ListOptions{Page: 1, PerPage: 10},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			setup()
+			defer teardown()
+
+			mux.HandleFunc("/v2/domains/example.com/records", func(w http.ResponseWriter, r *http.Request) {
+				require.Equal(t, tt.recordType, r.URL.Query().Get("type"))
+				if tt.pagination != nil {
+					require.Equal(t, strconv.Itoa(tt.pagination.Page), r.URL.Query().Get("page"))
+					require.Equal(t, strconv.Itoa(tt.pagination.PerPage), r.URL.Query().Get("per_page"))
+				}
+				testMethod(t, r, http.MethodGet)
+				fmt.Fprint(w, `{"domain_records":[{"id":1},{"id":2}]}`)
+			})
+
+			records, _, err := client.Domains.RecordsByType(ctx, "example.com", tt.recordType, tt.pagination)
+			if tt.expectedErr != nil {
+				assert.Equal(t, tt.expectedErr, err)
+			} else {
+				expected := []DomainRecord{{ID: 1}, {ID: 2}}
+				assert.Equal(t, expected, records)
+			}
+		})
+	}
+}
+
+func TestDomains_RecordsByName(t *testing.T) {
+	tests := []struct {
+		name        string
+		recordName  string
+		pagination  *ListOptions
+		expectedErr *ArgError
+	}{
+		{
+			name:       "success",
+			recordName: "foo.com",
+		},
+		{
+			name:        "when record name is empty it returns argument error",
+			expectedErr: &ArgError{arg: "name", reason: "cannot be an empty string"},
+		},
+		{
+			name:       "with pagination",
+			recordName: "foo.com",
+			pagination: &ListOptions{Page: 2, PerPage: 1},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			setup()
+			defer teardown()
+
+			mux.HandleFunc("/v2/domains/example.com/records", func(w http.ResponseWriter, r *http.Request) {
+				require.Equal(t, tt.recordName, r.URL.Query().Get("name"))
+				if tt.pagination != nil {
+					require.Equal(t, strconv.Itoa(tt.pagination.Page), r.URL.Query().Get("page"))
+					require.Equal(t, strconv.Itoa(tt.pagination.PerPage), r.URL.Query().Get("per_page"))
+				}
+				testMethod(t, r, http.MethodGet)
+				fmt.Fprint(w, `{"domain_records":[{"id":1},{"id":2}]}`)
+			})
+
+			records, _, err := client.Domains.RecordsByName(ctx, "example.com", tt.recordName, tt.pagination)
+			if tt.expectedErr != nil {
+				assert.Equal(t, tt.expectedErr, err)
+			} else {
+				expected := []DomainRecord{{ID: 1}, {ID: 2}}
+				assert.Equal(t, expected, records)
+			}
+		})
+	}
+}
+
+func TestDomains_RecordsByTypeAndName(t *testing.T) {
+	tests := []struct {
+		name        string
+		recordType  string
+		recordName  string
+		pagination  *ListOptions
+		expectedErr *ArgError
+	}{
+		{
+			name:       "success",
+			recordType: "NS",
+			recordName: "foo.com",
+		},
+		{
+			name:        "when record type is empty it returns argument error",
+			recordName:  "foo.com",
+			expectedErr: &ArgError{arg: "type", reason: "cannot be an empty string"},
+		},
+		{
+			name:        "when record name is empty it returns argument error",
+			recordType:  "NS",
+			expectedErr: &ArgError{arg: "name", reason: "cannot be an empty string"},
+		},
+		{
+			name:       "with pagination",
+			recordType: "CNAME",
+			recordName: "foo.com",
+			pagination: &ListOptions{Page: 1, PerPage: 1},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			setup()
+			defer teardown()
+
+			mux.HandleFunc("/v2/domains/example.com/records", func(w http.ResponseWriter, r *http.Request) {
+				require.Equal(t, tt.recordType, r.URL.Query().Get("type"))
+				require.Equal(t, tt.recordName, r.URL.Query().Get("name"))
+				if tt.pagination != nil {
+					require.Equal(t, strconv.Itoa(tt.pagination.Page), r.URL.Query().Get("page"))
+					require.Equal(t, strconv.Itoa(tt.pagination.PerPage), r.URL.Query().Get("per_page"))
+				}
+				testMethod(t, r, http.MethodGet)
+				fmt.Fprint(w, `{"domain_records":[{"id":1},{"id":2}]}`)
+			})
+
+			records, _, err := client.Domains.RecordsByTypeAndName(ctx, "example.com", tt.recordType, tt.recordName, tt.pagination)
+			if tt.expectedErr != nil {
+				assert.Equal(t, tt.expectedErr, err)
+			} else {
+				expected := []DomainRecord{{ID: 1}, {ID: 2}}
+				assert.Equal(t, expected, records)
+			}
+		})
 	}
 }
 
@@ -220,14 +352,10 @@ func TestDomains_GetRecordforDomainName(t *testing.T) {
 	})
 
 	record, _, err := client.Domains.Record(ctx, "example.com", 1)
-	if err != nil {
-		t.Errorf("Domains.GetRecord returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := &DomainRecord{ID: 1}
-	if !reflect.DeepEqual(record, expected) {
-		t.Errorf("Domains.GetRecord returned %+v, expected %+v", record, expected)
-	}
+	assert.Equal(t, expected, record)
 }
 
 func TestDomains_DeleteRecordForDomainName(t *testing.T) {
@@ -239,9 +367,8 @@ func TestDomains_DeleteRecordForDomainName(t *testing.T) {
 	})
 
 	_, err := client.Domains.DeleteRecord(ctx, "example.com", 1)
-	if err != nil {
-		t.Errorf("Domains.RecordDelete returned error: %v", err)
-	}
+
+	assert.NoError(t, err)
 }
 
 func TestDomains_CreateRecordForDomainName(t *testing.T) {
@@ -265,27 +392,19 @@ func TestDomains_CreateRecordForDomainName(t *testing.T) {
 			v := new(DomainRecordEditRequest)
 			err := json.NewDecoder(r.Body).Decode(v)
 
-			if err != nil {
-				t.Fatalf("decode json: %v", err)
-			}
+			require.NoError(t, err)
 
 			testMethod(t, r, http.MethodPost)
-			if !reflect.DeepEqual(v, createRequest) {
-				t.Errorf("Request body = %+v, expected %+v", v, createRequest)
-			}
+			assert.Equal(t, createRequest, v)
 
 			fmt.Fprintf(w, `{"domain_record": {"id":1}}`)
 		})
 
 	record, _, err := client.Domains.CreateRecord(ctx, "example.com", createRequest)
-	if err != nil {
-		t.Errorf("Domains.CreateRecord returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := &DomainRecord{ID: 1}
-	if !reflect.DeepEqual(record, expected) {
-		t.Errorf("Domains.CreateRecord returned %+v, expected %+v", record, expected)
-	}
+	assert.Equal(t, expected, record)
 }
 
 func TestDomains_EditRecordForDomainName(t *testing.T) {
@@ -312,22 +431,16 @@ func TestDomains_EditRecordForDomainName(t *testing.T) {
 		}
 
 		testMethod(t, r, http.MethodPut)
-		if !reflect.DeepEqual(v, editRequest) {
-			t.Errorf("Request body = %+v, expected %+v", v, editRequest)
-		}
+		assert.Equal(t, editRequest, v)
 
 		fmt.Fprintf(w, `{"domain_record": {"id":1, "type": "CNAME", "name": "example"}}`)
 	})
 
 	record, _, err := client.Domains.EditRecord(ctx, "example.com", 1, editRequest)
-	if err != nil {
-		t.Errorf("Domains.EditRecord returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	expected := &DomainRecord{ID: 1, Type: "CNAME", Name: "example"}
-	if !reflect.DeepEqual(record, expected) {
-		t.Errorf("Domains.EditRecord returned %+v, expected %+v", record, expected)
-	}
+	assert.Equal(t, expected, record)
 }
 
 func TestDomainRecord_String(t *testing.T) {
@@ -346,9 +459,7 @@ func TestDomainRecord_String(t *testing.T) {
 
 	stringified := record.String()
 	expected := `godo.DomainRecord{ID:1, Type:"CNAME", Name:"example", Data:"@", Priority:10, Port:10, TTL:1800, Weight:10, Flags:1, Tag:"test"}`
-	if expected != stringified {
-		t.Errorf("DomainRecord.String returned %+v, expected %+v", stringified, expected)
-	}
+	assert.Equal(t, expected, stringified)
 }
 
 func TestDomainRecordEditRequest_String(t *testing.T) {
@@ -366,7 +477,5 @@ func TestDomainRecordEditRequest_String(t *testing.T) {
 
 	stringified := record.String()
 	expected := `godo.DomainRecordEditRequest{Type:"CNAME", Name:"example", Data:"@", Priority:10, Port:10, TTL:1800, Weight:10, Flags:1, Tag:"test"}`
-	if expected != stringified {
-		t.Errorf("DomainRecordEditRequest.String returned %+v, expected %+v", stringified, expected)
-	}
+	assert.Equal(t, expected, stringified)
 }
diff --git a/droplet_actions.go b/droplet_actions.go
index ddeacfc..2e09d0c 100644
--- a/droplet_actions.go
+++ b/droplet_actions.go
@@ -7,12 +7,12 @@ import (
 	"net/url"
 )
 
-// ActionRequest reprents DigitalOcean Action Request
+// ActionRequest represents DigitalOcean Action Request
 type ActionRequest map[string]interface{}
 
 // DropletActionsService is an interface for interfacing with the Droplet actions
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#droplet-actions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Droplet-Actions
 type DropletActionsService interface {
 	Shutdown(context.Context, int) (*Action, *Response, error)
 	ShutdownByTag(context.Context, string) ([]Action, *Response, error)
@@ -293,7 +293,7 @@ func (s *DropletActionsServiceOp) Get(ctx context.Context, dropletID, actionID i
 	return s.get(ctx, path)
 }
 
-// GetByURI gets an action for a particular Droplet by id.
+// GetByURI gets an action for a particular Droplet by URI.
 func (s *DropletActionsServiceOp) GetByURI(ctx context.Context, rawurl string) (*Action, *Response, error) {
 	u, err := url.Parse(rawurl)
 	if err != nil {
diff --git a/droplets.go b/droplets.go
index 72edf2b..5f19863 100644
--- a/droplets.go
+++ b/droplets.go
@@ -14,9 +14,10 @@ var errNoNetworks = errors.New("no networks have been defined")
 
 // DropletsService is an interface for interfacing with the Droplet
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#droplets
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Droplets
 type DropletsService interface {
 	List(context.Context, *ListOptions) ([]Droplet, *Response, error)
+	ListByName(context.Context, string, *ListOptions) ([]Droplet, *Response, error)
 	ListByTag(context.Context, string, *ListOptions) ([]Droplet, *Response, error)
 	Get(context.Context, int) (*Droplet, *Response, error)
 	Create(context.Context, *DropletCreateRequest) (*Droplet, *Response, error)
@@ -126,6 +127,7 @@ func (d Droplet) String() string {
 	return Stringify(d)
 }
 
+// URN returns the droplet ID in a valid DO API URN form.
 func (d Droplet) URN() string {
 	return ToURN("Droplet", d.ID)
 }
@@ -176,25 +178,25 @@ func (d DropletCreateImage) MarshalJSON() ([]byte, error) {
 	return json.Marshal(d.ID)
 }
 
-// DropletCreateVolume identifies a volume to attach for the create request. It
-// prefers Name over ID,
+// DropletCreateVolume identifies a volume to attach for the create request.
 type DropletCreateVolume struct {
-	ID   string
+	ID string
+	// Deprecated: You must pass the volume's ID when creating a Droplet.
 	Name string
 }
 
-// MarshalJSON returns an object with either the name or id of the volume. It
-// returns the id if the name is empty.
+// MarshalJSON returns an object with either the ID or name of the volume. It
+// prefers the ID over the name.
 func (d DropletCreateVolume) MarshalJSON() ([]byte, error) {
-	if d.Name != "" {
+	if d.ID != "" {
 		return json.Marshal(struct {
-			Name string `json:"name"`
-		}{Name: d.Name})
+			ID string `json:"id"`
+		}{ID: d.ID})
 	}
 
 	return json.Marshal(struct {
-		ID string `json:"id"`
-	}{ID: d.ID})
+		Name string `json:"name"`
+	}{Name: d.Name})
 }
 
 // DropletCreateSSHKey identifies a SSH Key for the create request. It prefers fingerprint over ID.
@@ -228,6 +230,7 @@ type DropletCreateRequest struct {
 	Volumes           []DropletCreateVolume `json:"volumes,omitempty"`
 	Tags              []string              `json:"tags"`
 	VPCUUID           string                `json:"vpc_uuid,omitempty"`
+	WithDropletAgent  *bool                 `json:"with_droplet_agent,omitempty"`
 }
 
 // DropletMultiCreateRequest is a request to create multiple Droplets.
@@ -244,6 +247,7 @@ type DropletMultiCreateRequest struct {
 	UserData          string                `json:"user_data,omitempty"`
 	Tags              []string              `json:"tags"`
 	VPCUUID           string                `json:"vpc_uuid,omitempty"`
+	WithDropletAgent  *bool                 `json:"with_droplet_agent,omitempty"`
 }
 
 func (d DropletCreateRequest) String() string {
@@ -317,6 +321,18 @@ func (s *DropletsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Dropl
 	return s.list(ctx, path)
 }
 
+// ListByName lists all Droplets filtered by name returning only exact matches.
+// It is case-insensitive
+func (s *DropletsServiceOp) ListByName(ctx context.Context, name string, opt *ListOptions) ([]Droplet, *Response, error) {
+	path := fmt.Sprintf("%s?name=%s", dropletBasePath, name)
+	path, err := addOptions(path, opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return s.list(ctx, path)
+}
+
 // ListByTag lists all Droplets matched by a Tag.
 func (s *DropletsServiceOp) ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Droplet, *Response, error) {
 	path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag)
diff --git a/droplets_test.go b/droplets_test.go
index 875412e..17a62d7 100644
--- a/droplets_test.go
+++ b/droplets_test.go
@@ -84,6 +84,45 @@ func TestDroplets_ListDropletsByTag(t *testing.T) {
 	}
 }
 
+func TestDroplets_ListDropletsByName(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
+		name := "testing"
+		if r.URL.Query().Get("name") != name {
+			t.Errorf("Droplets.ListByName request did not contain the 'name=%s' query parameter", name)
+		}
+
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{
+			"droplets": [
+				{
+					"id": 1,
+					"name": "testing"
+				},
+				{
+					"id": 2,
+					"name": "testing"
+				}
+			],
+			"meta": {
+				"total": 2
+			}
+		}`)
+	})
+
+	droplets, _, err := client.Droplets.ListByName(ctx, "testing", nil)
+	if err != nil {
+		t.Errorf("Droplets.ListByTag returned error: %v", err)
+	}
+
+	expected := []Droplet{{ID: 1, Name: "testing"}, {ID: 2, Name: "testing"}}
+	if !reflect.DeepEqual(droplets, expected) {
+		t.Errorf("Droplets.ListByTag returned droplets %+v, expected %+v", droplets, expected)
+	}
+}
+
 func TestDroplets_ListDropletsMultiplePages(t *testing.T) {
 	setup()
 	defer teardown()
@@ -180,9 +219,8 @@ func TestDroplets_Create(t *testing.T) {
 			ID: 1,
 		},
 		Volumes: []DropletCreateVolume{
-			{Name: "hello-im-a-volume"},
 			{ID: "hello-im-another-volume"},
-			{Name: "hello-im-still-a-volume", ID: "should be ignored due to Name"},
+			{Name: "should be ignored due to Name", ID: "aaa-111-bbb-222-ccc"},
 		},
 		Tags:    []string{"one", "two"},
 		VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6",
@@ -200,9 +238,8 @@ func TestDroplets_Create(t *testing.T) {
 			"private_networking": false,
 			"monitoring":         false,
 			"volumes": []interface{}{
-				map[string]interface{}{"name": "hello-im-a-volume"},
 				map[string]interface{}{"id": "hello-im-another-volume"},
-				map[string]interface{}{"name": "hello-im-still-a-volume"},
+				map[string]interface{}{"id": "aaa-111-bbb-222-ccc"},
 			},
 			"tags":     []interface{}{"one", "two"},
 			"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
@@ -257,6 +294,206 @@ func TestDroplets_Create(t *testing.T) {
 	}
 }
 
+func TestDroplets_CreateWithoutDropletAgent(t *testing.T) {
+	setup()
+	defer teardown()
+
+	boolVal := false
+	createRequest := &DropletCreateRequest{
+		Name:   "name",
+		Region: "region",
+		Size:   "size",
+		Image: DropletCreateImage{
+			ID: 1,
+		},
+		Volumes: []DropletCreateVolume{
+			{ID: "hello-im-another-volume"},
+			{Name: "should be ignored due to Name", ID: "aaa-111-bbb-222-ccc"},
+		},
+		Tags:             []string{"one", "two"},
+		VPCUUID:          "880b7f98-f062-404d-b33c-458d545696f6",
+		WithDropletAgent: &boolVal,
+	}
+
+	mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
+		expected := map[string]interface{}{
+			"name":               "name",
+			"region":             "region",
+			"size":               "size",
+			"image":              float64(1),
+			"ssh_keys":           nil,
+			"backups":            false,
+			"ipv6":               false,
+			"private_networking": false,
+			"monitoring":         false,
+			"volumes": []interface{}{
+				map[string]interface{}{"id": "hello-im-another-volume"},
+				map[string]interface{}{"id": "aaa-111-bbb-222-ccc"},
+			},
+			"tags":               []interface{}{"one", "two"},
+			"vpc_uuid":           "880b7f98-f062-404d-b33c-458d545696f6",
+			"with_droplet_agent": false,
+		}
+		jsonBlob := `
+{
+  "droplet": {
+    "id": 1,
+    "vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6"
+  },
+  "links": {
+    "actions": [
+      {
+        "id": 1,
+        "href": "http://example.com",
+        "rel": "create"
+      }
+    ]
+  }
+}
+`
+
+		var v map[string]interface{}
+		err := json.NewDecoder(r.Body).Decode(&v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		if !reflect.DeepEqual(v, expected) {
+			t.Errorf("Request body\n got=%#v\nwant=%#v", v, expected)
+		}
+
+		fmt.Fprintf(w, jsonBlob)
+	})
+
+	droplet, resp, err := client.Droplets.Create(ctx, createRequest)
+	if err != nil {
+		t.Errorf("Droplets.Create returned error: %v", err)
+	}
+
+	if id := droplet.ID; id != 1 {
+		t.Errorf("expected id '%d', received '%d'", 1, id)
+	}
+
+	vpcid := "880b7f98-f062-404d-b33c-458d545696f6"
+	if id := droplet.VPCUUID; id != vpcid {
+		t.Errorf("expected VPC uuid '%s', received '%s'", vpcid, id)
+	}
+
+	if a := resp.Links.Actions[0]; a.ID != 1 {
+		t.Errorf("expected action id '%d', received '%d'", 1, a.ID)
+	}
+}
+
+func TestDroplets_WithDropletAgentJsonMarshal(t *testing.T) {
+	boolF := false
+	boolT := true
+	tests := []struct {
+		in   *DropletCreateRequest
+		want string
+	}{
+		{
+			in:   &DropletCreateRequest{Name: "foo", WithDropletAgent: &boolF},
+			want: `{"name":"foo","region":"","size":"","image":0,"ssh_keys":null,"backups":false,"ipv6":false,"private_networking":false,"monitoring":false,"tags":null,"with_droplet_agent":false}`,
+		},
+		{
+			in:   &DropletCreateRequest{Name: "foo", WithDropletAgent: &boolT},
+			want: `{"name":"foo","region":"","size":"","image":0,"ssh_keys":null,"backups":false,"ipv6":false,"private_networking":false,"monitoring":false,"tags":null,"with_droplet_agent":true}`,
+		},
+		{
+			in:   &DropletCreateRequest{Name: "foo"},
+			want: `{"name":"foo","region":"","size":"","image":0,"ssh_keys":null,"backups":false,"ipv6":false,"private_networking":false,"monitoring":false,"tags":null}`,
+		},
+	}
+
+	for _, tt := range tests {
+		got, err := json.Marshal(tt.in)
+		if err != nil {
+			t.Fatalf("error: %v", err)
+		}
+		if !reflect.DeepEqual(tt.want, string(got)) {
+			t.Errorf("expected: %v, got: %v", tt.want, string(got))
+		}
+	}
+}
+
+func TestDroplets_CreateWithDisabledPublicNetworking(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &DropletCreateRequest{
+		Name:   "name",
+		Region: "region",
+		Size:   "size",
+		Image: DropletCreateImage{
+			ID: 1,
+		},
+		Volumes: []DropletCreateVolume{
+			{ID: "hello-im-another-volume"},
+			{Name: "should be ignored due to Name", ID: "aaa-111-bbb-222-ccc"},
+		},
+		Tags:    []string{"one", "two"},
+		VPCUUID: "880b7f98-f062-404d-b33c-458d545696f6",
+	}
+
+	mux.HandleFunc("/v2/droplets", func(w http.ResponseWriter, r *http.Request) {
+		expected := map[string]interface{}{
+			"name":               "name",
+			"region":             "region",
+			"size":               "size",
+			"image":              float64(1),
+			"ssh_keys":           nil,
+			"backups":            false,
+			"ipv6":               false,
+			"private_networking": false,
+			"monitoring":         false,
+			"volumes": []interface{}{
+				map[string]interface{}{"id": "hello-im-another-volume"},
+				map[string]interface{}{"id": "aaa-111-bbb-222-ccc"},
+			},
+			"tags":     []interface{}{"one", "two"},
+			"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
+		}
+		jsonBlob := `
+{
+  "droplet": {
+    "id": 1,
+    "vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6"
+  },
+  "links": {
+    "actions": [
+      {
+        "id": 1,
+        "href": "http://example.com",
+        "rel": "create"
+      }
+    ]
+  }
+}
+`
+
+		var v map[string]interface{}
+		err := json.NewDecoder(r.Body).Decode(&v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		if !reflect.DeepEqual(v, expected) {
+			t.Errorf("Request body\n got=%#v\nwant=%#v", v, expected)
+		}
+
+		fmt.Fprintf(w, jsonBlob)
+	})
+
+	droplet, _, err := client.Droplets.Create(ctx, createRequest)
+	if err != nil {
+		t.Errorf("Droplets.Create returned error: %v", err)
+	}
+
+	if id := droplet.ID; id != 1 {
+		t.Errorf("expected id '%d', received '%d'", 1, id)
+	}
+}
+
 func TestDroplets_CreateMultiple(t *testing.T) {
 	setup()
 	defer teardown()
diff --git a/firewalls.go b/firewalls.go
index 8453e66..d2aadb4 100644
--- a/firewalls.go
+++ b/firewalls.go
@@ -10,7 +10,7 @@ import (
 const firewallsBasePath = "/v2/firewalls"
 
 // FirewallsService is an interface for managing Firewalls with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2/#firewalls
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Firewalls
 type FirewallsService interface {
 	Get(context.Context, string) (*Firewall, *Response, error)
 	Create(context.Context, *FirewallRequest) (*Firewall, *Response, error)
@@ -49,6 +49,7 @@ func (fw Firewall) String() string {
 	return Stringify(fw)
 }
 
+// URN returns the firewall name in a valid DO API URN form.
 func (fw Firewall) URN() string {
 	return ToURN("Firewall", fw.ID)
 }
@@ -88,6 +89,7 @@ type Sources struct {
 	Tags             []string `json:"tags,omitempty"`
 	DropletIDs       []int    `json:"droplet_ids,omitempty"`
 	LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"`
+	KubernetesIDs    []string `json:"kubernetes_ids,omitempty"`
 }
 
 // PendingChange represents a DigitalOcean Firewall status details.
@@ -103,6 +105,7 @@ type Destinations struct {
 	Tags             []string `json:"tags,omitempty"`
 	DropletIDs       []int    `json:"droplet_ids,omitempty"`
 	LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"`
+	KubernetesIDs    []string `json:"kubernetes_ids,omitempty"`
 }
 
 var _ FirewallsService = &FirewallsServiceOp{}
diff --git a/firewalls_test.go b/firewalls_test.go
index 7e50b9b..e26d382 100644
--- a/firewalls_test.go
+++ b/firewalls_test.go
@@ -20,7 +20,8 @@ var (
         "addresses": ["0.0.0.0/0"],
         "tags": ["frontend"],
         "droplet_ids": [123, 456],
-        "load_balancer_uids": ["lb-uid"]
+        "load_balancer_uids": ["lb-uid"],
+        "kubernetes_ids": ["doks-01", "doks-02"]
       }
     },
     {
@@ -297,6 +298,7 @@ func TestFirewalls_Create(t *testing.T) {
 					Tags:             []string{"frontend"},
 					DropletIDs:       []int{123, 456},
 					LoadBalancerUIDs: []string{"lb-uid"},
+					KubernetesIDs:    []string{"doks-01", "doks-02"},
 				},
 			},
 			{
@@ -807,7 +809,7 @@ func TestFirewalls_RemoveRules(t *testing.T) {
 
 func makeExpectedFirewalls() []Firewall {
 	return []Firewall{
-		Firewall{
+		{
 			ID:   "fe6b88f2-b42b-4bf7-bbd3-5ae20208f0b0",
 			Name: "f-i-r-e-w-a-l-l",
 			InboundRules: []InboundRule{
diff --git a/floating_ips.go b/floating_ips.go
index 1720d76..5a29c67 100644
--- a/floating_ips.go
+++ b/floating_ips.go
@@ -10,7 +10,7 @@ const floatingBasePath = "v2/floating_ips"
 
 // FloatingIPsService is an interface for interfacing with the floating IPs
 // endpoints of the Digital Ocean API.
-// See: https://developers.digitalocean.com/documentation/v2#floating-ips
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs
 type FloatingIPsService interface {
 	List(context.Context, *ListOptions) ([]FloatingIP, *Response, error)
 	Get(context.Context, string) (*FloatingIP, *Response, error)
@@ -28,15 +28,18 @@ var _ FloatingIPsService = &FloatingIPsServiceOp{}
 
 // FloatingIP represents a Digital Ocean floating IP.
 type FloatingIP struct {
-	Region  *Region  `json:"region"`
-	Droplet *Droplet `json:"droplet"`
-	IP      string   `json:"ip"`
+	Region    *Region  `json:"region"`
+	Droplet   *Droplet `json:"droplet"`
+	IP        string   `json:"ip"`
+	ProjectID string   `json:"project_id"`
+	Locked    bool     `json:"locked"`
 }
 
 func (f FloatingIP) String() string {
 	return Stringify(f)
 }
 
+// URN returns the floating IP in a valid DO API URN form.
 func (f FloatingIP) URN() string {
 	return ToURN("FloatingIP", f.IP)
 }
@@ -53,11 +56,12 @@ type floatingIPRoot struct {
 }
 
 // FloatingIPCreateRequest represents a request to create a floating IP.
-// If DropletID is not empty, the floating IP will be assigned to the
-// droplet.
+// Specify DropletID to assign the floating IP to a Droplet or Region
+// to reserve it to the region.
 type FloatingIPCreateRequest struct {
-	Region    string `json:"region"`
+	Region    string `json:"region,omitempty"`
 	DropletID int    `json:"droplet_id,omitempty"`
+	ProjectID string `json:"project_id,omitempty"`
 }
 
 // List all floating IPs.
diff --git a/floating_ips_actions.go b/floating_ips_actions.go
index 74ad279..9fd6e0a 100644
--- a/floating_ips_actions.go
+++ b/floating_ips_actions.go
@@ -8,7 +8,7 @@ import (
 
 // FloatingIPActionsService is an interface for interfacing with the
 // floating IPs actions endpoints of the Digital Ocean API.
-// See: https://developers.digitalocean.com/documentation/v2#floating-ips-action
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IP-Actions
 type FloatingIPActionsService interface {
 	Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error)
 	Unassign(ctx context.Context, ip string) (*Action, *Response, error)
diff --git a/floating_ips_actions_test.go b/floating_ips_actions_test.go
index eef0d83..ef93c2d 100644
--- a/floating_ips_actions_test.go
+++ b/floating_ips_actions_test.go
@@ -144,10 +144,10 @@ func TestFloatingIPsActions_ListPageByNumber(t *testing.T) {
 		"actions":[{"status":"in-progress"}],
 		"links":{
 			"pages":{
-				"next":"http://example.com/v2/regions/?page=3",
-				"prev":"http://example.com/v2/regions/?page=1",
-				"last":"http://example.com/v2/regions/?page=3",
-				"first":"http://example.com/v2/regions/?page=1"
+				"next":"http://example.com/v2/floating_ips/?page=3",
+				"prev":"http://example.com/v2/floating_ips/?page=1",
+				"last":"http://example.com/v2/floating_ips/?page=3",
+				"first":"http://example.com/v2/floating_ips/?page=1"
 			}
 		}
 	}`
diff --git a/floating_ips_test.go b/floating_ips_test.go
index 02312f8..ae7c2bb 100644
--- a/floating_ips_test.go
+++ b/floating_ips_test.go
@@ -14,7 +14,11 @@ func TestFloatingIPs_ListFloatingIPs(t *testing.T) {
 
 	mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) {
 		testMethod(t, r, http.MethodGet)
-		fmt.Fprint(w, `{"floating_ips": [{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"},{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2"}],"meta":{"total":2}}`)
+		fmt.Fprint(w, `{"floating_ips": [
+			{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false},
+			{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}],
+			"meta":{"total":2}
+		}`)
 	})
 
 	floatingIPs, resp, err := client.FloatingIPs.List(ctx, nil)
@@ -23,8 +27,8 @@ func TestFloatingIPs_ListFloatingIPs(t *testing.T) {
 	}
 
 	expectedFloatingIPs := []FloatingIP{
-		{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1"},
-		{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 2}, IP: "192.168.0.2"},
+		{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"},
+		{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 2}, IP: "192.168.0.2", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"},
 	}
 	if !reflect.DeepEqual(floatingIPs, expectedFloatingIPs) {
 		t.Errorf("FloatingIPs.List returned floating IPs %+v, expected %+v", floatingIPs, expectedFloatingIPs)
@@ -44,7 +48,11 @@ func TestFloatingIPs_ListFloatingIPsMultiplePages(t *testing.T) {
 
 	mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) {
 		testMethod(t, r, http.MethodGet)
-		fmt.Fprint(w, `{"floating_ips": [{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"},{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2"}], "links":{"pages":{"next":"http://example.com/v2/floating_ips/?page=2"}}}`)
+		fmt.Fprint(w, `{"floating_ips": [
+			{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false},
+			{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}],
+			"links":{"pages":{"next":"http://example.com/v2/floating_ips/?page=2"}}}
+		`)
 	})
 
 	_, resp, err := client.FloatingIPs.List(ctx, nil)
@@ -61,7 +69,9 @@ func TestFloatingIPs_RetrievePageByNumber(t *testing.T) {
 
 	jBlob := `
 	{
-		"floating_ips": [{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"},{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2"}],
+		"floating_ips": [
+			{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false},
+			{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}],
 		"links":{
 			"pages":{
 				"next":"http://example.com/v2/floating_ips/?page=3",
@@ -92,7 +102,7 @@ func TestFloatingIPs_Get(t *testing.T) {
 
 	mux.HandleFunc("/v2/floating_ips/192.168.0.1", func(w http.ResponseWriter, r *http.Request) {
 		testMethod(t, r, http.MethodGet)
-		fmt.Fprint(w, `{"floating_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"}}`)
+		fmt.Fprint(w, `{"floating_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}}`)
 	})
 
 	floatingIP, _, err := client.FloatingIPs.Get(ctx, "192.168.0.1")
@@ -100,7 +110,7 @@ func TestFloatingIPs_Get(t *testing.T) {
 		t.Errorf("domain.Get returned error: %v", err)
 	}
 
-	expected := &FloatingIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1"}
+	expected := &FloatingIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"}
 	if !reflect.DeepEqual(floatingIP, expected) {
 		t.Errorf("FloatingIPs.Get returned %+v, expected %+v", floatingIP, expected)
 	}
@@ -113,6 +123,7 @@ func TestFloatingIPs_Create(t *testing.T) {
 	createRequest := &FloatingIPCreateRequest{
 		Region:    "nyc3",
 		DropletID: 1,
+		ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032",
 	}
 
 	mux.HandleFunc("/v2/floating_ips", func(w http.ResponseWriter, r *http.Request) {
@@ -127,7 +138,7 @@ func TestFloatingIPs_Create(t *testing.T) {
 			t.Errorf("Request body = %+v, expected %+v", v, createRequest)
 		}
 
-		fmt.Fprint(w, `{"floating_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1"}}`)
+		fmt.Fprint(w, `{"floating_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}}`)
 	})
 
 	floatingIP, _, err := client.FloatingIPs.Create(ctx, createRequest)
@@ -135,7 +146,7 @@ func TestFloatingIPs_Create(t *testing.T) {
 		t.Errorf("FloatingIPs.Create returned error: %v", err)
 	}
 
-	expected := &FloatingIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1"}
+	expected := &FloatingIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"}
 	if !reflect.DeepEqual(floatingIP, expected) {
 		t.Errorf("FloatingIPs.Create returned %+v, expected %+v", floatingIP, expected)
 	}
diff --git a/functions.go b/functions.go
new file mode 100644
index 0000000..61c8077
--- /dev/null
+++ b/functions.go
@@ -0,0 +1,236 @@
+package godo
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"time"
+)
+
+const (
+	functionsBasePath        = "/v2/functions/namespaces"
+	functionsNamespacePath   = functionsBasePath + "/%s"
+	functionsTriggerBasePath = functionsNamespacePath + "/triggers"
+)
+
+type FunctionsService interface {
+	ListNamespaces(context.Context) ([]FunctionsNamespace, *Response, error)
+	GetNamespace(context.Context, string) (*FunctionsNamespace, *Response, error)
+	CreateNamespace(context.Context, *FunctionsNamespaceCreateRequest) (*FunctionsNamespace, *Response, error)
+	DeleteNamespace(context.Context, string) (*Response, error)
+
+	ListTriggers(context.Context, string) ([]FunctionsTrigger, *Response, error)
+	GetTrigger(context.Context, string, string) (*FunctionsTrigger, *Response, error)
+	CreateTrigger(context.Context, string, *FunctionsTriggerCreateRequest) (*FunctionsTrigger, *Response, error)
+	UpdateTrigger(context.Context, string, string, *FunctionsTriggerUpdateRequest) (*FunctionsTrigger, *Response, error)
+	DeleteTrigger(context.Context, string, string) (*Response, error)
+}
+
+type FunctionsServiceOp struct {
+	client *Client
+}
+
+var _ FunctionsService = &FunctionsServiceOp{}
+
+type namespacesRoot struct {
+	Namespaces []FunctionsNamespace `json:"namespaces,omitempty"`
+}
+
+type namespaceRoot struct {
+	Namespace *FunctionsNamespace `json:"namespace,omitempty"`
+}
+
+type FunctionsNamespace struct {
+	ApiHost   string    `json:"api_host,omitempty"`
+	Namespace string    `json:"namespace,omitempty"`
+	CreatedAt time.Time `json:"created_at,omitempty"`
+	UpdatedAt time.Time `json:"updated_at,omitempty"`
+	Label     string    `json:"label,omitempty"`
+	Region    string    `json:"region,omitempty"`
+	UUID      string    `json:"uuid,omitempty"`
+	Key       string    `json:"key,omitempty"`
+}
+
+type FunctionsNamespaceCreateRequest struct {
+	Label  string `json:"label"`
+	Region string `json:"region"`
+}
+
+type triggersRoot struct {
+	Triggers []FunctionsTrigger `json:"triggers,omitempty"`
+}
+
+type triggerRoot struct {
+	Trigger *FunctionsTrigger `json:"trigger,omitempty"`
+}
+
+type FunctionsTrigger struct {
+	Namespace        string                   `json:"namespace,omitempty"`
+	Function         string                   `json:"function,omitempty"`
+	Type             string                   `json:"type,omitempty"`
+	Name             string                   `json:"name,omitempty"`
+	IsEnabled        bool                     `json:"is_enabled"`
+	CreatedAt        time.Time                `json:"created_at,omitempty"`
+	UpdatedAt        time.Time                `json:"updated_at,omitempty"`
+	ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"`
+	ScheduledRuns    *TriggerScheduledRuns    `json:"scheduled_runs,omitempty"`
+}
+
+type TriggerScheduledDetails struct {
+	Cron string                 `json:"cron,omitempty"`
+	Body map[string]interface{} `json:"body,omitempty"`
+}
+
+type TriggerScheduledRuns struct {
+	LastRunAt time.Time `json:"last_run_at,omitempty"`
+	NextRunAt time.Time `json:"next_run_at,omitempty"`
+}
+
+type FunctionsTriggerCreateRequest struct {
+	Name             string                   `json:"name"`
+	Type             string                   `json:"type"`
+	Function         string                   `json:"function"`
+	IsEnabled        bool                     `json:"is_enabled"`
+	ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"`
+}
+
+type FunctionsTriggerUpdateRequest struct {
+	IsEnabled        *bool                    `json:"is_enabled,omitempty"`
+	ScheduledDetails *TriggerScheduledDetails `json:"scheduled_details,omitempty"`
+}
+
+// Gets a list of namespaces
+func (s *FunctionsServiceOp) ListNamespaces(ctx context.Context) ([]FunctionsNamespace, *Response, error) {
+	req, err := s.client.NewRequest(ctx, http.MethodGet, functionsBasePath, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	nsRoot := new(namespacesRoot)
+	resp, err := s.client.Do(ctx, req, nsRoot)
+	if err != nil {
+		return nil, resp, err
+	}
+	return nsRoot.Namespaces, resp, nil
+}
+
+// Gets a single namespace
+func (s *FunctionsServiceOp) GetNamespace(ctx context.Context, namespace string) (*FunctionsNamespace, *Response, error) {
+	path := fmt.Sprintf(functionsNamespacePath, namespace)
+
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	nsRoot := new(namespaceRoot)
+	resp, err := s.client.Do(ctx, req, nsRoot)
+	if err != nil {
+		return nil, resp, err
+	}
+	return nsRoot.Namespace, resp, nil
+}
+
+// Creates a namespace
+func (s *FunctionsServiceOp) CreateNamespace(ctx context.Context, opts *FunctionsNamespaceCreateRequest) (*FunctionsNamespace, *Response, error) {
+	req, err := s.client.NewRequest(ctx, http.MethodPost, functionsBasePath, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	nsRoot := new(namespaceRoot)
+	resp, err := s.client.Do(ctx, req, nsRoot)
+	if err != nil {
+		return nil, resp, err
+	}
+	return nsRoot.Namespace, resp, nil
+}
+
+// Delete a namespace
+func (s *FunctionsServiceOp) DeleteNamespace(ctx context.Context, namespace string) (*Response, error) {
+	path := fmt.Sprintf(functionsNamespacePath, namespace)
+
+	req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// ListTriggers gets a list of triggers
+func (s *FunctionsServiceOp) ListTriggers(ctx context.Context, namespace string) ([]FunctionsTrigger, *Response, error) {
+	path := fmt.Sprintf(functionsTriggerBasePath, namespace)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(triggersRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Triggers, resp, nil
+}
+
+// GetTrigger gets a single trigger
+func (s *FunctionsServiceOp) GetTrigger(ctx context.Context, namespace string, trigger string) (*FunctionsTrigger, *Response, error) {
+	path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger)
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(triggerRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Trigger, resp, nil
+}
+
+// CreateTrigger creates a trigger
+func (s *FunctionsServiceOp) CreateTrigger(ctx context.Context, namespace string, opts *FunctionsTriggerCreateRequest) (*FunctionsTrigger, *Response, error) {
+	path := fmt.Sprintf(functionsTriggerBasePath, namespace)
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(triggerRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Trigger, resp, nil
+}
+
+// UpdateTrigger updates a trigger
+func (s *FunctionsServiceOp) UpdateTrigger(ctx context.Context, namespace string, trigger string, opts *FunctionsTriggerUpdateRequest) (*FunctionsTrigger, *Response, error) {
+	path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger)
+	req, err := s.client.NewRequest(ctx, http.MethodPut, path, opts)
+
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(triggerRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Trigger, resp, nil
+}
+
+// DeleteTrigger deletes a trigger
+func (s *FunctionsServiceOp) DeleteTrigger(ctx context.Context, namespace string, trigger string) (*Response, error) {
+	path := fmt.Sprintf(functionsTriggerBasePath+"/%s", namespace, trigger)
+	req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+
+	if err != nil {
+		return nil, err
+	}
+	resp, err := s.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/functions_test.go b/functions_test.go
new file mode 100644
index 0000000..32e408d
--- /dev/null
+++ b/functions_test.go
@@ -0,0 +1,429 @@
+package godo
+
+import (
+	"fmt"
+	"net/http"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestFunctions_ListNamespaces(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/functions/namespaces", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{
+			"namespaces": [
+				{
+					"api_host": "https://faas.do.com",
+					"namespace": "123-abc",
+					"created_at": "2022-06-16T12:09:13Z",
+					"updated_at": "2022-06-16T12:09:13Z",
+					"label": "my-namespace-1",
+					"region": "nyc1",
+					"uuid": "",
+					"key": ""
+				},
+				{
+					"api_host": "https://faas.do.com",
+					"namespace": "456-abc",
+					"created_at": "2022-11-02T18:33:44Z",
+					"updated_at": "2022-11-02T18:33:44Z",
+					"label": "my-namespace-2",
+					"region": "nyc3",
+					"uuid": "",
+					"key": ""
+				}
+			]
+		}`)
+	})
+
+	namespaces, _, err := client.Functions.ListNamespaces(ctx)
+	require.NoError(t, err)
+
+	expectedNamespaces := []FunctionsNamespace{
+		{
+			ApiHost:   "https://faas.do.com",
+			Namespace: "123-abc",
+			CreatedAt: time.Date(2022, 6, 16, 12, 9, 13, 0, time.UTC),
+			UpdatedAt: time.Date(2022, 6, 16, 12, 9, 13, 0, time.UTC),
+			Label:     "my-namespace-1",
+			Region:    "nyc1",
+			UUID:      "",
+			Key:       "",
+		},
+		{
+			ApiHost:   "https://faas.do.com",
+			Namespace: "456-abc",
+			CreatedAt: time.Date(2022, 11, 2, 18, 33, 44, 0, time.UTC),
+			UpdatedAt: time.Date(2022, 11, 2, 18, 33, 44, 0, time.UTC),
+			Label:     "my-namespace-2",
+			Region:    "nyc3",
+			UUID:      "",
+			Key:       "",
+		},
+	}
+	assert.Equal(t, expectedNamespaces, namespaces)
+}
+
+func TestFunctions_GetNamespace(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/functions/namespaces/123-abc", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{
+			"namespace": {
+				"api_host": "https://faas.do.com",
+				"namespace": "123-abc",
+				"created_at": "2022-06-16T12:09:13Z",
+				"updated_at": "2022-06-16T12:09:13Z",
+				"label": "my-namespace-1",
+				"region": "nyc1",
+				"uuid": "123-456",
+				"key": "abc-123"
+			}
+		}`)
+	})
+
+	namespace, _, err := client.Functions.GetNamespace(ctx, "123-abc")
+	require.NoError(t, err)
+
+	expectedNamespace := &FunctionsNamespace{
+		ApiHost:   "https://faas.do.com",
+		Namespace: "123-abc",
+		CreatedAt: time.Date(2022, 6, 16, 12, 9, 13, 0, time.UTC),
+		UpdatedAt: time.Date(2022, 6, 16, 12, 9, 13, 0, time.UTC),
+		Label:     "my-namespace-1",
+		Region:    "nyc1",
+		UUID:      "123-456",
+		Key:       "abc-123",
+	}
+	assert.Equal(t, expectedNamespace, namespace)
+}
+
+func TestFunctions_CreateNamespace(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/functions/namespaces", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+		fmt.Fprint(w, `{
+			"namespace": {
+				"api_host": "https://faas.do.com",
+				"namespace": "123-abc",
+				"created_at": "2022-06-16T12:09:13Z",
+				"updated_at": "2022-06-16T12:09:13Z",
+				"label": "my-namespace-1",
+				"region": "nyc1",
+				"uuid": "123-456",
+				"key": "abc-123"
+			}
+		}`)
+	})
+
+	opts := FunctionsNamespaceCreateRequest{Label: "my-namespace-1", Region: "nyc1"}
+	namespace, _, err := client.Functions.CreateNamespace(ctx, &opts)
+	require.NoError(t, err)
+
+	expectedNamespace := &FunctionsNamespace{
+		ApiHost:   "https://faas.do.com",
+		Namespace: "123-abc",
+		CreatedAt: time.Date(2022, 6, 16, 12, 9, 13, 0, time.UTC),
+		UpdatedAt: time.Date(2022, 6, 16, 12, 9, 13, 0, time.UTC),
+		Label:     "my-namespace-1",
+		Region:    "nyc1",
+		UUID:      "123-456",
+		Key:       "abc-123",
+	}
+	assert.Equal(t, expectedNamespace, namespace)
+}
+
+func TestFunctions_DeleteNamespace(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/functions/namespaces/123-abc", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.Functions.DeleteNamespace(ctx, "123-abc")
+
+	assert.NoError(t, err)
+}
+
+func TestFunctions_ListTriggers(t *testing.T) {
+	setup()
+	defer teardown()
+	mux.HandleFunc("/v2/functions/namespaces/123-456/triggers", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{
+			"triggers": [
+				{
+					"name": "trigger",
+					"namespace": "123-456",
+					"function": "my_func",
+					"type": "SCHEDULED",
+					"is_enabled": true,
+					"created_at": "2022-10-05T13:46:59Z",
+					"updated_at": "2022-10-17T18:41:30Z",
+					"scheduled_details": {
+						"cron": "* * * * *",
+						"body": {
+						"foo": "bar"
+						}
+					},
+					"scheduled_runs": {
+						"next_run_at": "2022-11-03T17:03:02Z"
+					}
+				},			
+				{
+					"name": "trigger1",
+					"namespace": "123-456",
+					"function": "sample/hello",
+					"type": "SCHEDULED",
+					"is_enabled": true,
+					"created_at": "2022-10-14T20:29:43Z",
+					"updated_at": "2022-10-14T20:29:43Z",
+					"scheduled_details": {
+						"cron": "* * * * *",
+						"body": {}
+					},
+					"scheduled_runs": {
+						"last_run_at": "2022-11-03T17:02:43Z",
+						"next_run_at": "2022-11-03T17:02:47Z"
+					}
+				}	
+			]
+		}`)
+	})
+
+	triggers, _, err := client.Functions.ListTriggers(ctx, "123-456")
+	require.NoError(t, err)
+
+	expectedTriggers := []FunctionsTrigger{
+		{
+			Name:      "trigger",
+			Namespace: "123-456",
+			Function:  "my_func",
+			Type:      "SCHEDULED",
+			IsEnabled: true,
+			CreatedAt: time.Date(2022, 10, 5, 13, 46, 59, 0, time.UTC),
+			UpdatedAt: time.Date(2022, 10, 17, 18, 41, 30, 0, time.UTC),
+			ScheduledDetails: &TriggerScheduledDetails{
+				Cron: "* * * * *",
+				Body: map[string]interface{}{
+					"foo": "bar",
+				},
+			},
+			ScheduledRuns: &TriggerScheduledRuns{
+				NextRunAt: time.Date(2022, 11, 3, 17, 3, 2, 0, time.UTC),
+			},
+		},
+		{
+			Name:      "trigger1",
+			Namespace: "123-456",
+			Function:  "sample/hello",
+			Type:      "SCHEDULED",
+			IsEnabled: true,
+			CreatedAt: time.Date(2022, 10, 14, 20, 29, 43, 0, time.UTC),
+			UpdatedAt: time.Date(2022, 10, 14, 20, 29, 43, 0, time.UTC),
+			ScheduledDetails: &TriggerScheduledDetails{
+				Cron: "* * * * *",
+				Body: map[string]interface{}{},
+			},
+			ScheduledRuns: &TriggerScheduledRuns{
+				LastRunAt: time.Date(2022, 11, 03, 17, 02, 43, 0, time.UTC),
+				NextRunAt: time.Date(2022, 11, 03, 17, 02, 47, 0, time.UTC),
+			},
+		},
+	}
+	assert.Equal(t, expectedTriggers, triggers)
+}
+
+func TestFunctions_GetTrigger(t *testing.T) {
+	setup()
+	defer teardown()
+	mux.HandleFunc("/v2/functions/namespaces/123-456/triggers/my-trigger", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{
+			"trigger": {
+				"name": "my-trigger",
+				"namespace": "123-456",
+				"function": "my_func",
+				"type": "SCHEDULED",
+				"is_enabled": true,
+				"created_at": "2022-10-05T13:46:59Z",
+				"updated_at": "2022-10-17T18:41:30Z",
+				"scheduled_details": {
+					"cron": "* * * * *",
+					"body": {
+						"foo": "bar"
+					}
+				},
+				"scheduled_runs": {
+					"next_run_at": "2022-11-03T17:03:02Z"
+				}
+			}	
+		}`)
+
+	})
+
+	trigger, _, err := client.Functions.GetTrigger(ctx, "123-456", "my-trigger")
+	require.NoError(t, err)
+
+	expectedTrigger := &FunctionsTrigger{
+		Name:      "my-trigger",
+		Namespace: "123-456",
+		Function:  "my_func",
+		Type:      "SCHEDULED",
+		IsEnabled: true,
+		CreatedAt: time.Date(2022, 10, 5, 13, 46, 59, 0, time.UTC),
+		UpdatedAt: time.Date(2022, 10, 17, 18, 41, 30, 0, time.UTC),
+		ScheduledDetails: &TriggerScheduledDetails{
+			Cron: "* * * * *",
+			Body: map[string]interface{}{
+				"foo": "bar",
+			},
+		},
+		ScheduledRuns: &TriggerScheduledRuns{
+			NextRunAt: time.Date(2022, 11, 3, 17, 3, 2, 0, time.UTC),
+		},
+	}
+	assert.Equal(t, expectedTrigger, trigger)
+
+}
+
+func TestFunctions_CreateTrigger(t *testing.T) {
+	setup()
+	defer teardown()
+	mux.HandleFunc("/v2/functions/namespaces/123-456/triggers", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPost)
+		fmt.Fprint(w, `{
+			"trigger": {
+				"name": "my-new-trigger",
+				"namespace": "123-456",
+				"function": "my_func",
+				"type": "SCHEDULED",
+				"is_enabled": true,
+				"created_at": "2022-10-05T13:46:59Z",
+				"updated_at": "2022-10-17T18:41:30Z",
+				"scheduled_details": {
+					"cron": "* * * * *",
+					"body": {
+						"foo": "bar"
+					}
+				},
+				"scheduled_runs": {
+					"next_run_at": "2022-11-03T17:03:02Z"
+				}
+			}
+		}`)
+	})
+
+	opts := FunctionsTriggerCreateRequest{
+		Name:      "my-new-trigger",
+		Function:  "my_func",
+		Type:      "SCHEDULED",
+		IsEnabled: true,
+		ScheduledDetails: &TriggerScheduledDetails{
+			Cron: "* * * * *",
+			Body: map[string]interface{}{
+				"foo": "bar",
+			},
+		},
+	}
+	trigger, _, err := client.Functions.CreateTrigger(ctx, "123-456", &opts)
+	require.NoError(t, err)
+	expectedTrigger := &FunctionsTrigger{
+		Name:      "my-new-trigger",
+		Namespace: "123-456",
+		Function:  "my_func",
+		Type:      "SCHEDULED",
+		IsEnabled: true,
+		CreatedAt: time.Date(2022, 10, 5, 13, 46, 59, 0, time.UTC),
+		UpdatedAt: time.Date(2022, 10, 17, 18, 41, 30, 0, time.UTC),
+		ScheduledDetails: &TriggerScheduledDetails{
+			Cron: "* * * * *",
+			Body: map[string]interface{}{
+				"foo": "bar",
+			},
+		},
+		ScheduledRuns: &TriggerScheduledRuns{
+			NextRunAt: time.Date(2022, 11, 3, 17, 3, 2, 0, time.UTC),
+		},
+	}
+
+	assert.Equal(t, expectedTrigger, trigger)
+}
+
+func TestFunctions_UpdateTrigger(t *testing.T) {
+	setup()
+	defer teardown()
+	mux.HandleFunc("/v2/functions/namespaces/123-456/triggers/my-trigger", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodPut)
+		fmt.Fprint(w, `{
+			"trigger": {
+			"name": "my-trigger",
+			"namespace": "123-456",
+			"function": "my_func",
+			"type": "SCHEDULED",
+			"is_enabled": false,
+			"created_at": "2022-10-05T13:46:59Z",
+			"updated_at": "2022-10-17T18:41:30Z",
+			"scheduled_details": {
+				"cron": "* * * * *",
+				"body": {
+					"foo": "bar"
+				}
+			},
+			"scheduled_runs": {
+				"next_run_at": "2022-11-03T17:03:02Z"
+			}
+		}
+	}`)
+	})
+
+	isEnabled := false
+	opts := FunctionsTriggerUpdateRequest{
+		IsEnabled: &isEnabled,
+	}
+
+	trigger, _, err := client.Functions.UpdateTrigger(ctx, "123-456", "my-trigger", &opts)
+	require.NoError(t, err)
+
+	expectedTrigger := &FunctionsTrigger{
+		Name:      "my-trigger",
+		Namespace: "123-456",
+		Function:  "my_func",
+		Type:      "SCHEDULED",
+		IsEnabled: false,
+		CreatedAt: time.Date(2022, 10, 5, 13, 46, 59, 0, time.UTC),
+		UpdatedAt: time.Date(2022, 10, 17, 18, 41, 30, 0, time.UTC),
+		ScheduledDetails: &TriggerScheduledDetails{
+			Cron: "* * * * *",
+			Body: map[string]interface{}{
+				"foo": "bar",
+			},
+		},
+		ScheduledRuns: &TriggerScheduledRuns{
+			NextRunAt: time.Date(2022, 11, 3, 17, 3, 2, 0, time.UTC),
+		},
+	}
+	assert.Equal(t, expectedTrigger, trigger)
+}
+
+func TestFunctions_DeleteTrigger(t *testing.T) {
+	setup()
+	defer teardown()
+	mux.HandleFunc("/v2/functions/namespaces/123-abc/triggers/my-trigger", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.Functions.DeleteTrigger(ctx, "123-abc", "my-trigger")
+	assert.NoError(t, err)
+}
diff --git a/go.mod b/go.mod
index 36753b1..aae2a89 100644
--- a/go.mod
+++ b/go.mod
@@ -1,14 +1,22 @@
 module github.com/digitalocean/godo
 
-go 1.14
+go 1.18
 
 require (
-	github.com/golang/protobuf v1.3.5 // indirect
-	github.com/google/go-querystring v1.0.0
+	github.com/google/go-querystring v1.1.0
 	github.com/stretchr/testify v1.4.0
-	golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
-	golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
-	google.golang.org/appengine v1.6.5 // indirect
+	golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
+	golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/golang/protobuf v1.5.2 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	golang.org/x/net v0.7.0 // indirect
+	google.golang.org/appengine v1.6.7 // indirect
+	google.golang.org/protobuf v1.28.0 // indirect
+	gopkg.in/yaml.v2 v2.2.2 // indirect
 )
 
 replace github.com/stretchr/objx => github.com/stretchr/objx v0.2.0
diff --git a/go.sum b/go.sum
index ccd0f08..7bbdf5b 100644
--- a/go.sum
+++ b/go.sum
@@ -1,42 +1,385 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
 github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y=
+golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
 google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/godo.go b/godo.go
index c6dde8e..c48a5f7 100644
--- a/godo.go
+++ b/godo.go
@@ -11,14 +11,17 @@ import (
 	"net/url"
 	"reflect"
 	"strconv"
+	"strings"
+	"sync"
 	"time"
 
 	"github.com/google/go-querystring/query"
 	"golang.org/x/oauth2"
+	"golang.org/x/time/rate"
 )
 
 const (
-	libraryVersion = "1.37.0"
+	libraryVersion = "1.99.0"
 	defaultBaseURL = "https://api.digitalocean.com/"
 	userAgent      = "godo/" + libraryVersion
 	mediaType      = "application/json"
@@ -40,42 +43,55 @@ type Client struct {
 	UserAgent string
 
 	// Rate contains the current rate limit for the client as determined by the most recent
-	// API call.
-	Rate Rate
+	// API call. It is not thread-safe. Please consider using GetRate() instead.
+	Rate    Rate
+	ratemtx sync.Mutex
 
 	// Services used for communicating with the API
 	Account           AccountService
 	Actions           ActionsService
+	Apps              AppsService
 	Balance           BalanceService
 	BillingHistory    BillingHistoryService
 	CDNs              CDNService
+	Certificates      CertificatesService
+	Databases         DatabasesService
 	Domains           DomainsService
 	Droplets          DropletsService
 	DropletActions    DropletActionsService
+	Firewalls         FirewallsService
+	FloatingIPs       FloatingIPsService
+	FloatingIPActions FloatingIPActionsService
+	Functions         FunctionsService
 	Images            ImagesService
 	ImageActions      ImageActionsService
 	Invoices          InvoicesService
 	Keys              KeysService
+	Kubernetes        KubernetesService
+	LoadBalancers     LoadBalancersService
+	Monitoring        MonitoringService
+	OneClick          OneClickService
+	Projects          ProjectsService
 	Regions           RegionsService
+	Registry          RegistryService
+	ReservedIPs       ReservedIPsService
+	ReservedIPActions ReservedIPActionsService
 	Sizes             SizesService
-	FloatingIPs       FloatingIPsService
-	FloatingIPActions FloatingIPActionsService
 	Snapshots         SnapshotsService
 	Storage           StorageService
 	StorageActions    StorageActionsService
 	Tags              TagsService
-	LoadBalancers     LoadBalancersService
-	Certificates      CertificatesService
-	Firewalls         FirewallsService
-	Projects          ProjectsService
-	Kubernetes        KubernetesService
-	Registry          RegistryService
-	Databases         DatabasesService
+	UptimeChecks      UptimeChecksService
 	VPCs              VPCsService
-	OneClick          OneClickService
 
 	// Optional function called after every successful request made to the DO APIs
 	onRequestCompleted RequestCompletionCallback
+
+	// Optional extra HTTP headers to set on every request to the API.
+	headers map[string]string
+
+	// Optional rate limiter to ensure QoS.
+	rateLimiter *rate.Limiter
 }
 
 // RequestCompletionCallback defines the type of the request callback function
@@ -89,6 +105,23 @@ type ListOptions struct {
 
 	// For paginated result sets, the number of results to include per page.
 	PerPage int `url:"per_page,omitempty"`
+
+	// Whether App responses should include project_id fields. The field will be empty if false or if omitted. (ListApps)
+	WithProjects bool `url:"with_projects,omitempty"`
+}
+
+// TokenListOptions specifies the optional parameters to various List methods that support token pagination.
+type TokenListOptions struct {
+	// For paginated result sets, page of results to retrieve.
+	Page int `url:"page,omitempty"`
+
+	// For paginated result sets, the number of results to include per page.
+	PerPage int `url:"per_page,omitempty"`
+
+	// For paginated result sets which support tokens, the token provided by the last set
+	// of results in order to retrieve the next set of results. This is expected to be faster
+	// than incrementing or decrementing the page number.
+	Token string `url:"page_token,omitempty"`
 }
 
 // Response is a DigitalOcean response. This wraps the standard http.Response returned from DigitalOcean.
@@ -103,6 +136,8 @@ type Response struct {
 	Meta *Meta
 
 	// Monitoring URI
+	// Deprecated: This field is not populated. To poll for the status of a
+	// newly created Droplet, use Links.Actions[0].HREF
 	Monitor string
 
 	Rate
@@ -162,11 +197,9 @@ func addOptions(s string, opt interface{}) (string, error) {
 // NewFromToken returns a new DigitalOcean API client with the given API
 // token.
 func NewFromToken(token string) *Client {
+	cleanToken := strings.Trim(strings.TrimSpace(token), "'")
 	ctx := context.Background()
-
-	config := &oauth2.Config{}
-	ts := config.TokenSource(ctx, &oauth2.Token{AccessToken: token})
-
+	ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: cleanToken})
 	return NewClient(oauth2.NewClient(ctx, ts))
 }
 
@@ -184,35 +217,44 @@ func NewClient(httpClient *http.Client) *Client {
 	baseURL, _ := url.Parse(defaultBaseURL)
 
 	c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}
+
 	c.Account = &AccountServiceOp{client: c}
 	c.Actions = &ActionsServiceOp{client: c}
+	c.Apps = &AppsServiceOp{client: c}
 	c.Balance = &BalanceServiceOp{client: c}
 	c.BillingHistory = &BillingHistoryServiceOp{client: c}
 	c.CDNs = &CDNServiceOp{client: c}
 	c.Certificates = &CertificatesServiceOp{client: c}
+	c.Databases = &DatabasesServiceOp{client: c}
 	c.Domains = &DomainsServiceOp{client: c}
 	c.Droplets = &DropletsServiceOp{client: c}
 	c.DropletActions = &DropletActionsServiceOp{client: c}
 	c.Firewalls = &FirewallsServiceOp{client: c}
 	c.FloatingIPs = &FloatingIPsServiceOp{client: c}
 	c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c}
+	c.Functions = &FunctionsServiceOp{client: c}
 	c.Images = &ImagesServiceOp{client: c}
 	c.ImageActions = &ImageActionsServiceOp{client: c}
 	c.Invoices = &InvoicesServiceOp{client: c}
 	c.Keys = &KeysServiceOp{client: c}
+	c.Kubernetes = &KubernetesServiceOp{client: c}
 	c.LoadBalancers = &LoadBalancersServiceOp{client: c}
+	c.Monitoring = &MonitoringServiceOp{client: c}
+	c.OneClick = &OneClickServiceOp{client: c}
 	c.Projects = &ProjectsServiceOp{client: c}
 	c.Regions = &RegionsServiceOp{client: c}
+	c.Registry = &RegistryServiceOp{client: c}
+	c.ReservedIPs = &ReservedIPsServiceOp{client: c}
+	c.ReservedIPActions = &ReservedIPActionsServiceOp{client: c}
 	c.Sizes = &SizesServiceOp{client: c}
 	c.Snapshots = &SnapshotsServiceOp{client: c}
 	c.Storage = &StorageServiceOp{client: c}
 	c.StorageActions = &StorageActionsServiceOp{client: c}
 	c.Tags = &TagsServiceOp{client: c}
-	c.Kubernetes = &KubernetesServiceOp{client: c}
-	c.Registry = &RegistryServiceOp{client: c}
-	c.Databases = &DatabasesServiceOp{client: c}
+	c.UptimeChecks = &UptimeChecksServiceOp{client: c}
 	c.VPCs = &VPCsServiceOp{client: c}
-	c.OneClick = &OneClickServiceOp{client: c}
+
+	c.headers = make(map[string]string)
 
 	return c
 }
@@ -253,6 +295,26 @@ func SetUserAgent(ua string) ClientOpt {
 	}
 }
 
+// SetRequestHeaders sets optional HTTP headers on the client that are
+// sent on each HTTP request.
+func SetRequestHeaders(headers map[string]string) ClientOpt {
+	return func(c *Client) error {
+		for k, v := range headers {
+			c.headers[k] = v
+		}
+		return nil
+	}
+}
+
+// SetStaticRateLimit sets an optional client-side rate limiter that restricts
+// the number of queries per second that the client can send to enforce QoS.
+func SetStaticRateLimit(rps float64) ClientOpt {
+	return func(c *Client) error {
+		c.rateLimiter = rate.NewLimiter(rate.Limit(rps), 1)
+		return nil
+	}
+}
+
 // NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the
 // BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the
 // value pointed to by body is JSON encoded and included in as the request body.
@@ -262,22 +324,37 @@ func (c *Client) NewRequest(ctx context.Context, method, urlStr string, body int
 		return nil, err
 	}
 
-	buf := new(bytes.Buffer)
-	if body != nil {
-		err = json.NewEncoder(buf).Encode(body)
+	var req *http.Request
+	switch method {
+	case http.MethodGet, http.MethodHead, http.MethodOptions:
+		req, err = http.NewRequest(method, u.String(), nil)
 		if err != nil {
 			return nil, err
 		}
+
+	default:
+		buf := new(bytes.Buffer)
+		if body != nil {
+			err = json.NewEncoder(buf).Encode(body)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		req, err = http.NewRequest(method, u.String(), buf)
+		if err != nil {
+			return nil, err
+		}
+		req.Header.Set("Content-Type", mediaType)
 	}
 
-	req, err := http.NewRequest(method, u.String(), buf)
-	if err != nil {
-		return nil, err
+	for k, v := range c.headers {
+		req.Header.Add(k, v)
 	}
 
-	req.Header.Add("Content-Type", mediaType)
-	req.Header.Add("Accept", mediaType)
-	req.Header.Add("User-Agent", c.UserAgent)
+	req.Header.Set("Accept", mediaType)
+	req.Header.Set("User-Agent", c.UserAgent)
+
 	return req, nil
 }
 
@@ -286,6 +363,14 @@ func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) {
 	c.onRequestCompleted = rc
 }
 
+// GetRate returns the current rate limit for the client as determined by the most recent
+// API call. It is thread-safe.
+func (c *Client) GetRate() Rate {
+	c.ratemtx.Lock()
+	defer c.ratemtx.Unlock()
+	return c.Rate
+}
+
 // newResponse creates a new Response for the provided http.Response
 func newResponse(r *http.Response) *Response {
 	response := Response{Response: r}
@@ -313,6 +398,13 @@ func (r *Response) populateRate() {
 // pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface,
 // the raw response will be written to v, without attempting to decode it.
 func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) {
+	if c.rateLimiter != nil {
+		err := c.rateLimiter.Wait(ctx)
+		if err != nil {
+			return nil, err
+		}
+	}
+
 	resp, err := DoRequestWithClient(ctx, c.client, req)
 	if err != nil {
 		return nil, err
@@ -322,20 +414,33 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res
 	}
 
 	defer func() {
+		// Ensure the response body is fully read and closed
+		// before we reconnect, so that we reuse the same TCPConnection.
+		// Close the previous response's body. But read at least some of
+		// the body so if it's small the underlying TCP connection will be
+		// re-used. No need to check for errors: if it fails, the Transport
+		// won't reuse it anyway.
+		const maxBodySlurpSize = 2 << 10
+		if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize {
+			io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize)
+		}
+
 		if rerr := resp.Body.Close(); err == nil {
 			err = rerr
 		}
 	}()
 
 	response := newResponse(resp)
+	c.ratemtx.Lock()
 	c.Rate = response.Rate
+	c.ratemtx.Unlock()
 
 	err = CheckResponse(resp)
 	if err != nil {
 		return response, err
 	}
 
-	if v != nil {
+	if resp.StatusCode != http.StatusNoContent && v != nil {
 		if w, ok := v.(io.Writer); ok {
 			_, err = io.Copy(w, resp.Body)
 			if err != nil {
@@ -378,6 +483,7 @@ func (r *ErrorResponse) Error() string {
 // CheckResponse checks the API response for errors, and returns them if present. A response is considered an
 // error if it has a status code outside the 200 range. API error responses are expected to have either no response
 // body, or a JSON response body that maps to ErrorResponse. Any other response body will be silently ignored.
+// If the API error response does not include the request ID in its body, the one from its header will be used.
 func CheckResponse(r *http.Response) error {
 	if c := r.StatusCode; c >= 200 && c <= 299 {
 		return nil
@@ -392,6 +498,10 @@ func CheckResponse(r *http.Response) error {
 		}
 	}
 
+	if errorResponse.RequestID == "" {
+		errorResponse.RequestID = r.Header.Get("x-request-id")
+	}
+
 	return errorResponse
 }
 
@@ -399,8 +509,15 @@ func (r Rate) String() string {
 	return Stringify(r)
 }
 
+// PtrTo returns a pointer to the provided input.
+func PtrTo[T any](v T) *T {
+	return &v
+}
+
 // String is a helper routine that allocates a new string value
 // to store v and returns a pointer to it.
+//
+// Deprecated: Use PtrTo instead.
 func String(v string) *string {
 	p := new(string)
 	*p = v
@@ -410,6 +527,8 @@ func String(v string) *string {
 // Int is a helper routine that allocates a new int32 value
 // to store v and returns a pointer to it, but unlike Int32
 // its argument value is an int.
+//
+// Deprecated: Use PtrTo instead.
 func Int(v int) *int {
 	p := new(int)
 	*p = v
@@ -418,6 +537,8 @@ func Int(v int) *int {
 
 // Bool is a helper routine that allocates a new bool value
 // to store v and returns a pointer to it.
+//
+// Deprecated: Use PtrTo instead.
 func Bool(v bool) *bool {
 	p := new(bool)
 	*p = v
diff --git a/godo_test.go b/godo_test.go
index b82fe42..62f4979 100644
--- a/godo_test.go
+++ b/godo_test.go
@@ -10,8 +10,11 @@ import (
 	"net/url"
 	"reflect"
 	"strings"
+	"sync"
 	"testing"
 	"time"
+
+	"golang.org/x/time/rate"
 )
 
 var (
@@ -84,10 +87,13 @@ func testClientServices(t *testing.T, c *Client) {
 		"ImageActions",
 		"Invoices",
 		"Keys",
+		"Monitoring",
 		"Regions",
 		"Sizes",
 		"FloatingIPs",
 		"FloatingIPActions",
+		"ReservedIPs",
+		"ReservedIPActions",
 		"Tags",
 	}
 
@@ -125,10 +131,38 @@ func TestNewClient(t *testing.T) {
 }
 
 func TestNewFromToken(t *testing.T) {
-	c := NewFromToken("my-token")
+	c := NewFromToken("myToken")
 	testClientDefaults(t, c)
 }
 
+func TestNewFromToken_cleaned(t *testing.T) {
+	testTokens := []string{"myToken ", " myToken", " myToken ", "'myToken'", " 'myToken' "}
+	expected := "Bearer myToken"
+
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/foo", func(w http.ResponseWriter, r *http.Request) {
+		w.WriteHeader(http.StatusOK)
+	})
+
+	for _, tt := range testTokens {
+		t.Run(tt, func(t *testing.T) {
+			c := NewFromToken(tt)
+			req, _ := c.NewRequest(ctx, http.MethodGet, server.URL+"/foo", nil)
+			resp, err := c.Do(ctx, req, nil)
+			if err != nil {
+				t.Fatalf("Do(): %v", err)
+			}
+
+			authHeader := resp.Request.Header.Get("Authorization")
+			if authHeader != expected {
+				t.Errorf("Authorization header = %v, expected %v", authHeader, expected)
+			}
+		})
+	}
+}
+
 func TestNew(t *testing.T) {
 	c, err := New(nil)
 
@@ -146,7 +180,7 @@ func TestNewRequest(t *testing.T) {
 		`{"name":"l","region":"","size":"","image":0,`+
 			`"ssh_keys":null,"backups":false,"ipv6":false,`+
 			`"private_networking":false,"monitoring":false,"tags":null}`+"\n"
-	req, _ := c.NewRequest(ctx, http.MethodGet, inURL, inBody)
+	req, _ := c.NewRequest(ctx, http.MethodPost, inURL, inBody)
 
 	// test relative URL was expanded
 	if req.URL.String() != outURL {
@@ -166,6 +200,29 @@ func TestNewRequest(t *testing.T) {
 	}
 }
 
+func TestNewRequest_get(t *testing.T) {
+	c := NewClient(nil)
+
+	inURL, outURL := "/foo", defaultBaseURL+"foo"
+	req, _ := c.NewRequest(ctx, http.MethodGet, inURL, nil)
+
+	// test relative URL was expanded
+	if req.URL.String() != outURL {
+		t.Errorf("NewRequest(%v) URL = %v, expected %v", inURL, req.URL, outURL)
+	}
+
+	// test the content-type header is not set
+	if contentType := req.Header.Get("Content-Type"); contentType != "" {
+		t.Errorf("NewRequest() Content-Type = %v, expected empty string", contentType)
+	}
+
+	// test default user-agent is attached to the request
+	userAgent := req.Header.Get("User-Agent")
+	if c.UserAgent != userAgent {
+		t.Errorf("NewRequest() User-Agent = %v, expected %v", userAgent, c.UserAgent)
+	}
+}
+
 func TestNewRequest_withUserData(t *testing.T) {
 	c := NewClient(nil)
 
@@ -174,7 +231,36 @@ func TestNewRequest_withUserData(t *testing.T) {
 		`{"name":"l","region":"","size":"","image":0,`+
 			`"ssh_keys":null,"backups":false,"ipv6":false,`+
 			`"private_networking":false,"monitoring":false,"user_data":"u","tags":null}`+"\n"
-	req, _ := c.NewRequest(ctx, http.MethodGet, inURL, inBody)
+	req, _ := c.NewRequest(ctx, http.MethodPost, inURL, inBody)
+
+	// test relative URL was expanded
+	if req.URL.String() != outURL {
+		t.Errorf("NewRequest(%v) URL = %v, expected %v", inURL, req.URL, outURL)
+	}
+
+	// test body was JSON encoded
+	body, _ := ioutil.ReadAll(req.Body)
+	if string(body) != outBody {
+		t.Errorf("NewRequest(%v)Body = %v, expected %v", inBody, string(body), outBody)
+	}
+
+	// test default user-agent is attached to the request
+	userAgent := req.Header.Get("User-Agent")
+	if c.UserAgent != userAgent {
+		t.Errorf("NewRequest() User-Agent = %v, expected %v", userAgent, c.UserAgent)
+	}
+}
+
+func TestNewRequest_withDropletAgent(t *testing.T) {
+	c := NewClient(nil)
+
+	boolVal := true
+	inURL, outURL := "/foo", defaultBaseURL+"foo"
+	inBody, outBody := &DropletCreateRequest{Name: "l", WithDropletAgent: &boolVal},
+		`{"name":"l","region":"","size":"","image":0,`+
+			`"ssh_keys":null,"backups":false,"ipv6":false,`+
+			`"private_networking":false,"monitoring":false,"tags":null,"with_droplet_agent":true}`+"\n"
+	req, _ := c.NewRequest(ctx, http.MethodPost, inURL, inBody)
 
 	// test relative URL was expanded
 	if req.URL.String() != outURL {
@@ -216,6 +302,28 @@ func TestNewRequest_withCustomUserAgent(t *testing.T) {
 	}
 }
 
+func TestNewRequest_withCustomHeaders(t *testing.T) {
+	expectedIdentity := "identity"
+	expectedCustom := "x_test_header"
+
+	c, err := New(nil, SetRequestHeaders(map[string]string{
+		"Accept-Encoding": expectedIdentity,
+		"X-Test-Header":   expectedCustom,
+	}))
+	if err != nil {
+		t.Fatalf("New() unexpected error: %v", err)
+	}
+
+	req, _ := c.NewRequest(ctx, http.MethodGet, "/foo", nil)
+
+	if got := req.Header.Get("Accept-Encoding"); got != expectedIdentity {
+		t.Errorf("New() Custom Accept Encoding Header = %s; expected %s", got, expectedIdentity)
+	}
+	if got := req.Header.Get("X-Test-Header"); got != expectedCustom {
+		t.Errorf("New() Custom Accept Encoding Header = %s; expected %s", got, expectedCustom)
+	}
+}
+
 func TestDo(t *testing.T) {
 	setup()
 	defer teardown()
@@ -282,46 +390,94 @@ func TestDo_redirectLoop(t *testing.T) {
 }
 
 func TestCheckResponse(t *testing.T) {
-	res := &http.Response{
-		Request:    &http.Request{},
-		StatusCode: http.StatusBadRequest,
-		Body: ioutil.NopCloser(strings.NewReader(`{"message":"m",
-			"errors": [{"resource": "r", "field": "f", "code": "c"}]}`)),
-	}
-	err := CheckResponse(res).(*ErrorResponse)
+	testHeaders := make(http.Header, 1)
+	testHeaders.Set("x-request-id", "dead-beef")
 
-	if err == nil {
-		t.Fatalf("Expected error response.")
-	}
-
-	expected := &ErrorResponse{
-		Response: res,
-		Message:  "m",
-	}
-	if !reflect.DeepEqual(err, expected) {
-		t.Errorf("Error = %#v, expected %#v", err, expected)
-	}
-}
-
-// ensure that we properly handle API errors that do not contain a response
-// body
-func TestCheckResponse_noBody(t *testing.T) {
-	res := &http.Response{
-		Request:    &http.Request{},
-		StatusCode: http.StatusBadRequest,
-		Body:       ioutil.NopCloser(strings.NewReader("")),
+	tests := []struct {
+		title    string
+		input    *http.Response
+		expected *ErrorResponse
+	}{
+		{
+			title: "default (no request_id)",
+			input: &http.Response{
+				Request:    &http.Request{},
+				StatusCode: http.StatusBadRequest,
+				Body: ioutil.NopCloser(strings.NewReader(`{"message":"m",
+			"errors": [{"resource": "r", "field": "f", "code": "c"}]}`)),
+			},
+			expected: &ErrorResponse{
+				Message: "m",
+			},
+		},
+		{
+			title: "request_id in body",
+			input: &http.Response{
+				Request:    &http.Request{},
+				StatusCode: http.StatusBadRequest,
+				Body: ioutil.NopCloser(strings.NewReader(`{"message":"m", "request_id": "dead-beef",
+			"errors": [{"resource": "r", "field": "f", "code": "c"}]}`)),
+			},
+			expected: &ErrorResponse{
+				Message:   "m",
+				RequestID: "dead-beef",
+			},
+		},
+		{
+			title: "request_id in header",
+			input: &http.Response{
+				Request:    &http.Request{},
+				StatusCode: http.StatusBadRequest,
+				Header:     testHeaders,
+				Body: ioutil.NopCloser(strings.NewReader(`{"message":"m",
+			"errors": [{"resource": "r", "field": "f", "code": "c"}]}`)),
+			},
+			expected: &ErrorResponse{
+				Message:   "m",
+				RequestID: "dead-beef",
+			},
+		},
+		// This tests that the ID in the body takes precedence to ensure we maintain the current
+		// behavior. In practice, the IDs in the header and body should always be the same.
+		{
+			title: "request_id in both",
+			input: &http.Response{
+				Request:    &http.Request{},
+				StatusCode: http.StatusBadRequest,
+				Header:     testHeaders,
+				Body: ioutil.NopCloser(strings.NewReader(`{"message":"m", "request_id": "dead-beef-body",
+			"errors": [{"resource": "r", "field": "f", "code": "c"}]}`)),
+			},
+			expected: &ErrorResponse{
+				Message:   "m",
+				RequestID: "dead-beef-body",
+			},
+		},
+		// ensure that we properly handle API errors that do not contain a
+		// response body
+		{
+			title: "no body",
+			input: &http.Response{
+				Request:    &http.Request{},
+				StatusCode: http.StatusBadRequest,
+				Body:       ioutil.NopCloser(strings.NewReader("")),
+			},
+			expected: &ErrorResponse{},
+		},
 	}
-	err := CheckResponse(res).(*ErrorResponse)
 
-	if err == nil {
-		t.Errorf("Expected error response.")
-	}
+	for _, tt := range tests {
+		t.Run(tt.title, func(t *testing.T) {
+			err := CheckResponse(tt.input).(*ErrorResponse)
+			if err == nil {
+				t.Fatalf("Expected error response.")
+			}
+			tt.expected.Response = tt.input
 
-	expected := &ErrorResponse{
-		Response: res,
-	}
-	if !reflect.DeepEqual(err, expected) {
-		t.Errorf("Error = %#v, expected %#v", err, expected)
+			if !reflect.DeepEqual(err, tt.expected) {
+				t.Errorf("Error = %#v, expected %#v", err, tt.expected)
+			}
+		})
 	}
 }
 
@@ -354,6 +510,9 @@ func TestDo_rateLimit(t *testing.T) {
 	if !client.Rate.Reset.IsZero() {
 		t.Errorf("Client rate reset not initialized to zero value")
 	}
+	if client.Rate != client.GetRate() {
+		t.Errorf("Client rate is not the same as client.GetRate()")
+	}
 
 	req, _ := client.NewRequest(ctx, http.MethodGet, "/", nil)
 	_, err := client.Do(context.Background(), req, nil)
@@ -371,6 +530,49 @@ func TestDo_rateLimit(t *testing.T) {
 	if client.Rate.Reset.UTC() != reset {
 		t.Errorf("Client rate reset = %v, expected %v", client.Rate.Reset, reset)
 	}
+	if client.Rate != client.GetRate() {
+		t.Errorf("Client rate is not the same as client.GetRate()")
+	}
+}
+
+func TestDo_rateLimitRace(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Add(headerRateLimit, "60")
+		w.Header().Add(headerRateRemaining, "59")
+		w.Header().Add(headerRateReset, "1372700873")
+	})
+
+	var (
+		wg    sync.WaitGroup
+		wait  = make(chan struct{})
+		count = 100
+	)
+	wg.Add(count)
+	for i := 0; i < count; i++ {
+		go func() {
+			<-wait
+			req, _ := client.NewRequest(ctx, http.MethodGet, "/", nil)
+			_, err := client.Do(context.Background(), req, nil)
+			if err != nil {
+				t.Errorf("Do(): %v", err)
+			}
+			wg.Done()
+		}()
+	}
+	wg.Add(count)
+	for i := 0; i < count; i++ {
+		go func() {
+			<-wait
+			_ = client.GetRate()
+			wg.Done()
+		}()
+	}
+
+	close(wait)
+	wg.Wait()
 }
 
 func TestDo_rateLimit_errorResponse(t *testing.T) {
@@ -413,6 +615,32 @@ func checkCurrentPage(t *testing.T, resp *Response, expectedPage int) {
 	}
 }
 
+func checkNextPageToken(t *testing.T, resp *Response, expectedNextPageToken string) {
+	t.Helper()
+	links := resp.Links
+	pageToken, err := links.NextPageToken()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if pageToken != expectedNextPageToken {
+		t.Fatalf("expected next page token to be '%s', was '%s'", expectedNextPageToken, pageToken)
+	}
+}
+
+func checkPreviousPageToken(t *testing.T, resp *Response, expectedPreviousPageToken string) {
+	t.Helper()
+	links := resp.Links
+	pageToken, err := links.PrevPageToken()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if pageToken != expectedPreviousPageToken {
+		t.Fatalf("expected previous page token to be '%s', was '%s'", expectedPreviousPageToken, pageToken)
+	}
+}
+
 func TestDo_completion_callback(t *testing.T) {
 	setup()
 	defer teardown()
@@ -541,17 +769,22 @@ func TestCustomBaseURL(t *testing.T) {
 	}
 }
 
+func TestSetStaticRateLimit(t *testing.T) {
+	rps := float64(5)
+	c, err := New(nil, SetStaticRateLimit(rps))
+	if err != nil {
+		t.Fatalf("New() unexpected error: %v", err)
+	}
+
+	expected := rate.NewLimiter(rate.Limit(rps), 1)
+	if got := c.rateLimiter; *got != *expected {
+		t.Errorf("rateLimiter = %+v; expected %+v", got, expected)
+	}
+}
+
 func TestCustomBaseURL_badURL(t *testing.T) {
 	baseURL := ":"
 	_, err := New(nil, SetBaseURL(baseURL))
 
 	testURLParseError(t, err)
 }
-
-func intPtr(val int) *int {
-	return &val
-}
-
-func boolPtr(val bool) *bool {
-	return &val
-}
diff --git a/image_actions.go b/image_actions.go
index 976f7c6..2ee508c 100644
--- a/image_actions.go
+++ b/image_actions.go
@@ -4,18 +4,20 @@ import (
 	"context"
 	"fmt"
 	"net/http"
+	"net/url"
 )
 
 // ImageActionsService is an interface for interfacing with the image actions
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#image-actions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Image-Actions
 type ImageActionsService interface {
 	Get(context.Context, int, int) (*Action, *Response, error)
+	GetByURI(context.Context, string) (*Action, *Response, error)
 	Transfer(context.Context, int, *ActionRequest) (*Action, *Response, error)
 	Convert(context.Context, int) (*Action, *Response, error)
 }
 
-// ImageActionsServiceOp handles communition with the image action related methods of the
+// ImageActionsServiceOp handles communication with the image action related methods of the
 // DigitalOcean API.
 type ImageActionsServiceOp struct {
 	client *Client
@@ -86,7 +88,20 @@ func (i *ImageActionsServiceOp) Get(ctx context.Context, imageID, actionID int)
 	}
 
 	path := fmt.Sprintf("v2/images/%d/actions/%d", imageID, actionID)
+	return i.get(ctx, path)
+}
+
+// GetByURI gets an action for a particular image by URI.
+func (i *ImageActionsServiceOp) GetByURI(ctx context.Context, rawurl string) (*Action, *Response, error) {
+	u, err := url.Parse(rawurl)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return i.get(ctx, u.Path)
+}
 
+func (i *ImageActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) {
 	req, err := i.client.NewRequest(ctx, http.MethodGet, path, nil)
 	if err != nil {
 		return nil, nil, err
diff --git a/images.go b/images.go
index 64e72e7..5db3747 100644
--- a/images.go
+++ b/images.go
@@ -10,7 +10,7 @@ const imageBasePath = "v2/images"
 
 // ImagesService is an interface for interfacing with the images
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#images
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Images
 type ImagesService interface {
 	List(context.Context, *ListOptions) ([]Image, *Response, error)
 	ListDistribution(ctx context.Context, opt *ListOptions) ([]Image, *Response, error)
@@ -52,7 +52,9 @@ type Image struct {
 
 // ImageUpdateRequest represents a request to update an image.
 type ImageUpdateRequest struct {
-	Name string `json:"name"`
+	Name         string `json:"name,omitempty"`
+	Distribution string `json:"distribution,omitempty"`
+	Description  string `json:"description,omitempty"`
 }
 
 // CustomImageCreateRequest represents a request to create a custom image.
@@ -132,6 +134,7 @@ func (s *ImagesServiceOp) GetBySlug(ctx context.Context, slug string) (*Image, *
 	return s.get(ctx, interface{}(slug))
 }
 
+// Create a new image
 func (s *ImagesServiceOp) Create(ctx context.Context, createRequest *CustomImageCreateRequest) (*Image, *Response, error) {
 	if createRequest == nil {
 		return nil, nil, NewArgError("createRequest", "cannot be nil")
diff --git a/images_test.go b/images_test.go
index 49f49b4..9533825 100644
--- a/images_test.go
+++ b/images_test.go
@@ -353,12 +353,16 @@ func TestImages_Update(t *testing.T) {
 	defer teardown()
 
 	updateRequest := &ImageUpdateRequest{
-		Name: "name",
+		Name:         "name",
+		Distribution: "Fedora",
+		Description:  "Just testing...",
 	}
 
 	mux.HandleFunc("/v2/images/12345", func(w http.ResponseWriter, r *http.Request) {
 		expected := map[string]interface{}{
-			"name": "name",
+			"name":         "name",
+			"distribution": "Fedora",
+			"description":  "Just testing...",
 		}
 
 		var v map[string]interface{}
diff --git a/invoices.go b/invoices.go
index cc111f8..39bffbc 100644
--- a/invoices.go
+++ b/invoices.go
@@ -12,7 +12,7 @@ const invoicesBasePath = "v2/customers/my/invoices"
 
 // InvoicesService is an interface for interfacing with the Invoice
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2/#invoices
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Billing
 type InvoicesService interface {
 	Get(context.Context, string, *ListOptions) (*Invoice, *Response, error)
 	GetPDF(context.Context, string) ([]byte, *Response, error)
@@ -49,6 +49,7 @@ type InvoiceItem struct {
 	StartTime        time.Time `json:"start_time"`
 	EndTime          time.Time `json:"end_time"`
 	ProjectName      string    `json:"project_name"`
+	Category         string    `json:"category"`
 }
 
 // InvoiceList contains a paginated list of all of a customer's invoices.
@@ -170,7 +171,7 @@ func (s *InvoicesServiceOp) List(ctx context.Context, opt *ListOptions) (*Invoic
 	return root, resp, err
 }
 
-// Get a summary of metadata and summarized usage for an Invoice
+// GetSummary returns a summary of metadata and summarized usage for an Invoice
 func (s *InvoicesServiceOp) GetSummary(ctx context.Context, invoiceUUID string) (*InvoiceSummary, *Response, error) {
 	path := fmt.Sprintf("%s/%s/summary", invoicesBasePath, invoiceUUID)
 
@@ -188,7 +189,7 @@ func (s *InvoicesServiceOp) GetSummary(ctx context.Context, invoiceUUID string)
 	return root, resp, err
 }
 
-// Get the pdf for an Invoice
+// GetPDF returns the pdf for an Invoice
 func (s *InvoicesServiceOp) GetPDF(ctx context.Context, invoiceUUID string) ([]byte, *Response, error) {
 	path := fmt.Sprintf("%s/%s/pdf", invoicesBasePath, invoiceUUID)
 
@@ -206,7 +207,7 @@ func (s *InvoicesServiceOp) GetPDF(ctx context.Context, invoiceUUID string) ([]b
 	return root.Bytes(), resp, err
 }
 
-// Get the csv for an Invoice
+// GetCSV returns the csv for an Invoice
 func (s *InvoicesServiceOp) GetCSV(ctx context.Context, invoiceUUID string) ([]byte, *Response, error) {
 	path := fmt.Sprintf("%s/%s/csv", invoicesBasePath, invoiceUUID)
 
diff --git a/invoices_test.go b/invoices_test.go
index a3d53d5..2c5fc94 100644
--- a/invoices_test.go
+++ b/invoices_test.go
@@ -28,7 +28,8 @@ func TestInvoices_GetInvoices(t *testing.T) {
 					"duration_unit": "Hours",
 					"start_time": "2018-06-20T08:44:38Z",
 					"end_time": "2018-06-21T08:44:38Z",
-					"project_name": "My project"
+					"project_name": "My project",
+					"category": "iaas"
 				},
 				{
 					"product": "Load Balancers",
@@ -41,7 +42,8 @@ func TestInvoices_GetInvoices(t *testing.T) {
 					"duration_unit": "Hours",
 					"start_time": "2018-06-20T08:44:38Z",
 					"end_time": "2018-06-21T08:44:38Z",
-					"project_name": "My Second Project"
+					"project_name": "My Second Project",
+					"category": "paas"
 				}
 			],
 			"meta": {
@@ -68,6 +70,7 @@ func TestInvoices_GetInvoices(t *testing.T) {
 			StartTime:        time.Date(2018, 6, 20, 8, 44, 38, 0, time.UTC),
 			EndTime:          time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
 			ProjectName:      "My project",
+			Category:         "iaas",
 		},
 		{
 			Product:          "Load Balancers",
@@ -81,6 +84,7 @@ func TestInvoices_GetInvoices(t *testing.T) {
 			StartTime:        time.Date(2018, 6, 20, 8, 44, 38, 0, time.UTC),
 			EndTime:          time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
 			ProjectName:      "My Second Project",
+			Category:         "paas",
 		},
 	}
 	actualItems := invoice.InvoiceItems
diff --git a/keys.go b/keys.go
index b97554d..cd0bd29 100644
--- a/keys.go
+++ b/keys.go
@@ -8,9 +8,9 @@ import (
 
 const keysBasePath = "v2/account/keys"
 
-// KeysService is an interface for interfacing with the keys
+// KeysService is an interface for interfacing with the SSH keys
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#keys
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/SSH-Keys
 type KeysService interface {
 	List(context.Context, *ListOptions) ([]Key, *Response, error)
 	GetByID(context.Context, int) (*Key, *Response, error)
@@ -22,7 +22,7 @@ type KeysService interface {
 	DeleteByFingerprint(context.Context, string) (*Response, error)
 }
 
-// KeysServiceOp handles communication with key related method of the
+// KeysServiceOp handles communication with SSH key related method of the
 // DigitalOcean API.
 type KeysServiceOp struct {
 	client *Client
@@ -38,7 +38,7 @@ type Key struct {
 	PublicKey   string `json:"public_key,omitempty"`
 }
 
-// KeyUpdateRequest represents a request to update a DigitalOcean key.
+// KeyUpdateRequest represents a request to update an SSH key stored in a DigitalOcean account.
 type KeyUpdateRequest struct {
 	Name string `json:"name"`
 }
@@ -57,13 +57,13 @@ func (s Key) String() string {
 	return Stringify(s)
 }
 
-// KeyCreateRequest represents a request to create a new key.
+// KeyCreateRequest represents a request to create a new SSH key.
 type KeyCreateRequest struct {
 	Name      string `json:"name"`
 	PublicKey string `json:"public_key"`
 }
 
-// List all keys
+// List all SSH keys
 func (s *KeysServiceOp) List(ctx context.Context, opt *ListOptions) ([]Key, *Response, error) {
 	path := keysBasePath
 	path, err := addOptions(path, opt)
@@ -107,7 +107,7 @@ func (s *KeysServiceOp) get(ctx context.Context, path string) (*Key, *Response,
 	return root.SSHKey, resp, err
 }
 
-// GetByID gets a Key by id
+// GetByID gets an SSH key by its ID
 func (s *KeysServiceOp) GetByID(ctx context.Context, keyID int) (*Key, *Response, error) {
 	if keyID < 1 {
 		return nil, nil, NewArgError("keyID", "cannot be less than 1")
@@ -117,7 +117,7 @@ func (s *KeysServiceOp) GetByID(ctx context.Context, keyID int) (*Key, *Response
 	return s.get(ctx, path)
 }
 
-// GetByFingerprint gets a Key by by fingerprint
+// GetByFingerprint gets an SSH key by its fingerprint
 func (s *KeysServiceOp) GetByFingerprint(ctx context.Context, fingerprint string) (*Key, *Response, error) {
 	if len(fingerprint) < 1 {
 		return nil, nil, NewArgError("fingerprint", "cannot not be empty")
@@ -127,7 +127,7 @@ func (s *KeysServiceOp) GetByFingerprint(ctx context.Context, fingerprint string
 	return s.get(ctx, path)
 }
 
-// Create a key using a KeyCreateRequest
+// Create an SSH key using a KeyCreateRequest
 func (s *KeysServiceOp) Create(ctx context.Context, createRequest *KeyCreateRequest) (*Key, *Response, error) {
 	if createRequest == nil {
 		return nil, nil, NewArgError("createRequest", "cannot be nil")
@@ -147,7 +147,7 @@ func (s *KeysServiceOp) Create(ctx context.Context, createRequest *KeyCreateRequ
 	return root.SSHKey, resp, err
 }
 
-// UpdateByID updates a key name by ID.
+// UpdateByID updates an SSH key name by ID.
 func (s *KeysServiceOp) UpdateByID(ctx context.Context, keyID int, updateRequest *KeyUpdateRequest) (*Key, *Response, error) {
 	if keyID < 1 {
 		return nil, nil, NewArgError("keyID", "cannot be less than 1")
@@ -172,7 +172,7 @@ func (s *KeysServiceOp) UpdateByID(ctx context.Context, keyID int, updateRequest
 	return root.SSHKey, resp, err
 }
 
-// UpdateByFingerprint updates a key name by fingerprint.
+// UpdateByFingerprint updates an SSH key name by fingerprint.
 func (s *KeysServiceOp) UpdateByFingerprint(ctx context.Context, fingerprint string, updateRequest *KeyUpdateRequest) (*Key, *Response, error) {
 	if len(fingerprint) < 1 {
 		return nil, nil, NewArgError("fingerprint", "cannot be empty")
@@ -197,7 +197,7 @@ func (s *KeysServiceOp) UpdateByFingerprint(ctx context.Context, fingerprint str
 	return root.SSHKey, resp, err
 }
 
-// Delete key using a path
+// Delete an SSH key using a path
 func (s *KeysServiceOp) delete(ctx context.Context, path string) (*Response, error) {
 	req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
 	if err != nil {
@@ -209,7 +209,7 @@ func (s *KeysServiceOp) delete(ctx context.Context, path string) (*Response, err
 	return resp, err
 }
 
-// DeleteByID deletes a key by its id
+// DeleteByID deletes an SSH key by its id
 func (s *KeysServiceOp) DeleteByID(ctx context.Context, keyID int) (*Response, error) {
 	if keyID < 1 {
 		return nil, NewArgError("keyID", "cannot be less than 1")
@@ -219,7 +219,7 @@ func (s *KeysServiceOp) DeleteByID(ctx context.Context, keyID int) (*Response, e
 	return s.delete(ctx, path)
 }
 
-// DeleteByFingerprint deletes a key by its fingerprint
+// DeleteByFingerprint deletes an SSH key by its fingerprint
 func (s *KeysServiceOp) DeleteByFingerprint(ctx context.Context, fingerprint string) (*Response, error) {
 	if len(fingerprint) < 1 {
 		return nil, NewArgError("fingerprint", "cannot be empty")
diff --git a/kubernetes.go b/kubernetes.go
index 9b80fef..38c380a 100644
--- a/kubernetes.go
+++ b/kubernetes.go
@@ -21,18 +21,22 @@ const (
 
 // KubernetesService is an interface for interfacing with the Kubernetes endpoints
 // of the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#kubernetes
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Kubernetes
 type KubernetesService interface {
 	Create(context.Context, *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error)
 	Get(context.Context, string) (*KubernetesCluster, *Response, error)
 	GetUser(context.Context, string) (*KubernetesClusterUser, *Response, error)
 	GetUpgrades(context.Context, string) ([]*KubernetesVersion, *Response, error)
 	GetKubeConfig(context.Context, string) (*KubernetesClusterConfig, *Response, error)
+	GetKubeConfigWithExpiry(context.Context, string, int64) (*KubernetesClusterConfig, *Response, error)
 	GetCredentials(context.Context, string, *KubernetesClusterCredentialsGetRequest) (*KubernetesClusterCredentials, *Response, error)
 	List(context.Context, *ListOptions) ([]*KubernetesCluster, *Response, error)
 	Update(context.Context, string, *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error)
 	Upgrade(context.Context, string, *KubernetesClusterUpgradeRequest) (*Response, error)
 	Delete(context.Context, string) (*Response, error)
+	DeleteSelective(context.Context, string, *KubernetesClusterDeleteSelectiveRequest) (*Response, error)
+	DeleteDangerous(context.Context, string) (*Response, error)
+	ListAssociatedResourcesForDeletion(context.Context, string) (*KubernetesAssociatedResources, *Response, error)
 
 	CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error)
 	GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error)
@@ -45,6 +49,11 @@ type KubernetesService interface {
 	DeleteNode(ctx context.Context, clusterID, poolID, nodeID string, req *KubernetesNodeDeleteRequest) (*Response, error)
 
 	GetOptions(context.Context) (*KubernetesOptions, *Response, error)
+	AddRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error)
+	RemoveRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error)
+
+	RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error)
+	GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error)
 }
 
 var _ KubernetesService = &KubernetesServiceOp{}
@@ -62,10 +71,14 @@ type KubernetesClusterCreateRequest struct {
 	Tags        []string `json:"tags,omitempty"`
 	VPCUUID     string   `json:"vpc_uuid,omitempty"`
 
+	// Create cluster with highly available control plane
+	HA bool `json:"ha"`
+
 	NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"`
 
 	MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy"`
 	AutoUpgrade       bool                         `json:"auto_upgrade"`
+	SurgeUpgrade      bool                         `json:"surge_upgrade"`
 }
 
 // KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster.
@@ -74,6 +87,17 @@ type KubernetesClusterUpdateRequest struct {
 	Tags              []string                     `json:"tags,omitempty"`
 	MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
 	AutoUpgrade       *bool                        `json:"auto_upgrade,omitempty"`
+	SurgeUpgrade      bool                         `json:"surge_upgrade,omitempty"`
+
+	// Convert cluster to run highly available control plane
+	HA *bool `json:"ha,omitempty"`
+}
+
+// KubernetesClusterDeleteSelectiveRequest represents a delete selective request to delete a cluster and it's associated resources.
+type KubernetesClusterDeleteSelectiveRequest struct {
+	Volumes         []string `json:"volumes"`
+	VolumeSnapshots []string `json:"volume_snapshots"`
+	LoadBalancers   []string `json:"load_balancers"`
 }
 
 // KubernetesClusterUpgradeRequest represents a request to upgrade a Kubernetes cluster.
@@ -81,6 +105,21 @@ type KubernetesClusterUpgradeRequest struct {
 	VersionSlug string `json:"version,omitempty"`
 }
 
+// Taint represents a Kubernetes taint that can be associated with a node pool
+// (and, transitively, with all nodes of that pool).
+type Taint struct {
+	Key    string
+	Value  string
+	Effect string
+}
+
+func (t Taint) String() string {
+	if t.Value == "" {
+		return fmt.Sprintf("%s:%s", t.Key, t.Effect)
+	}
+	return fmt.Sprintf("%s=%s:%s", t.Key, t.Value, t.Effect)
+}
+
 // KubernetesNodePoolCreateRequest represents a request to create a node pool for a
 // Kubernetes cluster.
 type KubernetesNodePoolCreateRequest struct {
@@ -89,6 +128,7 @@ type KubernetesNodePoolCreateRequest struct {
 	Count     int               `json:"count,omitempty"`
 	Tags      []string          `json:"tags,omitempty"`
 	Labels    map[string]string `json:"labels,omitempty"`
+	Taints    []Taint           `json:"taints,omitempty"`
 	AutoScale bool              `json:"auto_scale,omitempty"`
 	MinNodes  int               `json:"min_nodes,omitempty"`
 	MaxNodes  int               `json:"max_nodes,omitempty"`
@@ -101,6 +141,7 @@ type KubernetesNodePoolUpdateRequest struct {
 	Count     *int              `json:"count,omitempty"`
 	Tags      []string          `json:"tags,omitempty"`
 	Labels    map[string]string `json:"labels,omitempty"`
+	Taints    *[]Taint          `json:"taints,omitempty"`
 	AutoScale *bool             `json:"auto_scale,omitempty"`
 	MinNodes  *int              `json:"min_nodes,omitempty"`
 	MaxNodes  *int              `json:"max_nodes,omitempty"`
@@ -126,6 +167,22 @@ type KubernetesClusterCredentialsGetRequest struct {
 	ExpirySeconds *int `json:"expiry_seconds,omitempty"`
 }
 
+// KubernetesClusterRegistryRequest represents clusters to integrate with docr registry
+type KubernetesClusterRegistryRequest struct {
+	ClusterUUIDs []string `json:"cluster_uuids,omitempty"`
+}
+
+type KubernetesRunClusterlintRequest struct {
+	IncludeGroups []string `json:"include_groups"`
+	ExcludeGroups []string `json:"exclude_groups"`
+	IncludeChecks []string `json:"include_checks"`
+	ExcludeChecks []string `json:"exclude_checks"`
+}
+
+type KubernetesGetClusterlintRequest struct {
+	RunId string `json:"run_id"`
+}
+
 // KubernetesCluster represents a Kubernetes cluster.
 type KubernetesCluster struct {
 	ID            string   `json:"id,omitempty"`
@@ -139,16 +196,26 @@ type KubernetesCluster struct {
 	Tags          []string `json:"tags,omitempty"`
 	VPCUUID       string   `json:"vpc_uuid,omitempty"`
 
+	// Cluster runs a highly available control plane
+	HA bool `json:"ha,omitempty"`
+
 	NodePools []*KubernetesNodePool `json:"node_pools,omitempty"`
 
 	MaintenancePolicy *KubernetesMaintenancePolicy `json:"maintenance_policy,omitempty"`
 	AutoUpgrade       bool                         `json:"auto_upgrade,omitempty"`
+	SurgeUpgrade      bool                         `json:"surge_upgrade,omitempty"`
+	RegistryEnabled   bool                         `json:"registry_enabled,omitempty"`
 
 	Status    *KubernetesClusterStatus `json:"status,omitempty"`
 	CreatedAt time.Time                `json:"created_at,omitempty"`
 	UpdatedAt time.Time                `json:"updated_at,omitempty"`
 }
 
+// URN returns the Kubernetes cluster's ID in the format of DigitalOcean URN.
+func (kc KubernetesCluster) URN() string {
+	return ToURN("Kubernetes", kc.ID)
+}
+
 // KubernetesClusterUser represents a Kubernetes cluster user.
 type KubernetesClusterUser struct {
 	Username string   `json:"username,omitempty"`
@@ -178,13 +245,36 @@ type KubernetesMaintenancePolicy struct {
 type KubernetesMaintenancePolicyDay int
 
 const (
+	// KubernetesMaintenanceDayAny sets the KubernetesMaintenancePolicyDay to any
+	// day of the week
 	KubernetesMaintenanceDayAny KubernetesMaintenancePolicyDay = iota
+
+	// KubernetesMaintenanceDayMonday sets the KubernetesMaintenancePolicyDay to
+	// Monday
 	KubernetesMaintenanceDayMonday
+
+	// KubernetesMaintenanceDayTuesday sets the KubernetesMaintenancePolicyDay to
+	// Tuesday
 	KubernetesMaintenanceDayTuesday
+
+	// KubernetesMaintenanceDayWednesday sets the KubernetesMaintenancePolicyDay to
+	// Wednesday
 	KubernetesMaintenanceDayWednesday
+
+	// KubernetesMaintenanceDayThursday sets the KubernetesMaintenancePolicyDay to
+	// Thursday
 	KubernetesMaintenanceDayThursday
+
+	// KubernetesMaintenanceDayFriday sets the KubernetesMaintenancePolicyDay to
+	// Friday
 	KubernetesMaintenanceDayFriday
+
+	// KubernetesMaintenanceDaySaturday sets the KubernetesMaintenancePolicyDay to
+	// Saturday
 	KubernetesMaintenanceDaySaturday
+
+	// KubernetesMaintenanceDaySunday sets the KubernetesMaintenancePolicyDay to
+	// Sunday
 	KubernetesMaintenanceDaySunday
 )
 
@@ -230,6 +320,7 @@ func (k KubernetesMaintenancePolicyDay) String() string {
 
 }
 
+// UnmarshalJSON parses the JSON string into KubernetesMaintenancePolicyDay
 func (k *KubernetesMaintenancePolicyDay) UnmarshalJSON(data []byte) error {
 	var val string
 	if err := json.Unmarshal(data, &val); err != nil {
@@ -244,6 +335,7 @@ func (k *KubernetesMaintenancePolicyDay) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+// MarshalJSON returns the JSON string for KubernetesMaintenancePolicyDay
 func (k KubernetesMaintenancePolicyDay) MarshalJSON() ([]byte, error) {
 	if KubernetesMaintenanceDayAny <= k && k <= KubernetesMaintenanceDaySunday {
 		return json.Marshal(days[k])
@@ -305,6 +397,7 @@ type KubernetesNodePool struct {
 	Count     int               `json:"count,omitempty"`
 	Tags      []string          `json:"tags,omitempty"`
 	Labels    map[string]string `json:"labels,omitempty"`
+	Taints    []Taint           `json:"taints,omitempty"`
 	AutoScale bool              `json:"auto_scale,omitempty"`
 	MinNodes  int               `json:"min_nodes,omitempty"`
 	MaxNodes  int               `json:"max_nodes,omitempty"`
@@ -338,8 +431,9 @@ type KubernetesOptions struct {
 
 // KubernetesVersion is a DigitalOcean Kubernetes release.
 type KubernetesVersion struct {
-	Slug              string `json:"slug,omitempty"`
-	KubernetesVersion string `json:"kubernetes_version,omitempty"`
+	Slug              string   `json:"slug,omitempty"`
+	KubernetesVersion string   `json:"kubernetes_version,omitempty"`
+	SupportedFeatures []string `json:"supported_features,omitempty"`
 }
 
 // KubernetesNodeSize is a node sizes supported for Kubernetes clusters.
@@ -354,6 +448,41 @@ type KubernetesRegion struct {
 	Slug string `json:"slug"`
 }
 
+// ClusterlintDiagnostic is a diagnostic returned from clusterlint.
+type ClusterlintDiagnostic struct {
+	CheckName string             `json:"check_name"`
+	Severity  string             `json:"severity"`
+	Message   string             `json:"message"`
+	Object    *ClusterlintObject `json:"object"`
+}
+
+// ClusterlintObject is the object a clusterlint diagnostic refers to.
+type ClusterlintObject struct {
+	Kind      string              `json:"kind"`
+	Name      string              `json:"name"`
+	Namespace string              `json:"namespace"`
+	Owners    []*ClusterlintOwner `json:"owners,omitempty"`
+}
+
+// ClusterlintOwner indicates the resource that owns the offending object.
+type ClusterlintOwner struct {
+	Kind string `json:"kind"`
+	Name string `json:"name"`
+}
+
+// KubernetesAssociatedResources represents a cluster's associated resources
+type KubernetesAssociatedResources struct {
+	Volumes         []*AssociatedResource `json:"volumes"`
+	VolumeSnapshots []*AssociatedResource `json:"volume_snapshots"`
+	LoadBalancers   []*AssociatedResource `json:"load_balancers"`
+}
+
+// AssociatedResource is the object to represent a Kubernetes cluster associated resource's ID and Name.
+type AssociatedResource struct {
+	ID   string `json:"id"`
+	Name string `json:"name"`
+}
+
 type kubernetesClustersRoot struct {
 	Clusters []*KubernetesCluster `json:"kubernetes_clusters,omitempty"`
 	Links    *Links               `json:"links,omitempty"`
@@ -457,6 +586,54 @@ func (svc *KubernetesServiceOp) Delete(ctx context.Context, clusterID string) (*
 	return resp, nil
 }
 
+// DeleteSelective deletes a Kubernetes cluster and the specified associated resources.
+// Users can choose to delete specific volumes, volume snapshots or load balancers along with the cluster
+// There is no way to recover a cluster or the specified resources once destroyed.
+func (svc *KubernetesServiceOp) DeleteSelective(ctx context.Context, clusterID string, request *KubernetesClusterDeleteSelectiveRequest) (*Response, error) {
+	path := fmt.Sprintf("%s/%s/destroy_with_associated_resources/selective", kubernetesClustersPath, clusterID)
+	req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, request)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// DeleteDangerous deletes a Kubernetes cluster and all its associated resources. There is no way to recover a cluster
+// or it's associated resources once destroyed.
+func (svc *KubernetesServiceOp) DeleteDangerous(ctx context.Context, clusterID string) (*Response, error) {
+	path := fmt.Sprintf("%s/%s/destroy_with_associated_resources/dangerous", kubernetesClustersPath, clusterID)
+	req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// ListAssociatedResourcesForDeletion lists a Kubernetes cluster's resources that can be selected
+// for deletion along with the cluster. See DeleteSelective
+// Associated resources include volumes, volume snapshots and load balancers.
+func (svc *KubernetesServiceOp) ListAssociatedResourcesForDeletion(ctx context.Context, clusterID string) (*KubernetesAssociatedResources, *Response, error) {
+	path := fmt.Sprintf("%s/%s/destroy_with_associated_resources", kubernetesClustersPath, clusterID)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(KubernetesAssociatedResources)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root, resp, nil
+}
+
 // List returns a list of the Kubernetes clusters visible with the caller's API token.
 func (svc *KubernetesServiceOp) List(ctx context.Context, opts *ListOptions) ([]*KubernetesCluster, *Response, error) {
 	path := kubernetesClustersPath
@@ -509,6 +686,27 @@ func (svc *KubernetesServiceOp) GetKubeConfig(ctx context.Context, clusterID str
 	return res, resp, nil
 }
 
+// GetKubeConfigWithExpiry returns a Kubernetes config file for the specified cluster with expiry_seconds.
+func (svc *KubernetesServiceOp) GetKubeConfigWithExpiry(ctx context.Context, clusterID string, expirySeconds int64) (*KubernetesClusterConfig, *Response, error) {
+	path := fmt.Sprintf("%s/%s/kubeconfig", kubernetesClustersPath, clusterID)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	q := req.URL.Query()
+	q.Add("expiry_seconds", fmt.Sprintf("%d", expirySeconds))
+	req.URL.RawQuery = q.Encode()
+	configBytes := bytes.NewBuffer(nil)
+	resp, err := svc.client.Do(ctx, req, configBytes)
+	if err != nil {
+		return nil, resp, err
+	}
+	res := &KubernetesClusterConfig{
+		KubeconfigYAML: configBytes.Bytes(),
+	}
+	return res, resp, nil
+}
+
 // GetCredentials returns a Kubernetes API server credentials for the specified cluster.
 func (svc *KubernetesServiceOp) GetCredentials(ctx context.Context, clusterID string, get *KubernetesClusterCredentialsGetRequest) (*KubernetesClusterCredentials, *Response, error) {
 	path := fmt.Sprintf("%s/%s/credentials", kubernetesClustersPath, clusterID)
@@ -695,3 +893,79 @@ func (svc *KubernetesServiceOp) GetOptions(ctx context.Context) (*KubernetesOpti
 	}
 	return root.Options, resp, nil
 }
+
+// AddRegistry integrates docr registry with all the specified clusters
+func (svc *KubernetesServiceOp) AddRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) {
+	path := fmt.Sprintf("%s/registry", kubernetesBasePath)
+	request, err := svc.client.NewRequest(ctx, http.MethodPost, path, req)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, request, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// RemoveRegistry removes docr registry support for all the specified clusters
+func (svc *KubernetesServiceOp) RemoveRegistry(ctx context.Context, req *KubernetesClusterRegistryRequest) (*Response, error) {
+	path := fmt.Sprintf("%s/registry", kubernetesBasePath)
+	request, err := svc.client.NewRequest(ctx, http.MethodDelete, path, req)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, request, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+type runClusterlintRoot struct {
+	RunID string `json:"run_id"`
+}
+
+// RunClusterlint schedules a clusterlint run for the specified cluster
+func (svc *KubernetesServiceOp) RunClusterlint(ctx context.Context, clusterID string, req *KubernetesRunClusterlintRequest) (string, *Response, error) {
+	path := fmt.Sprintf("%s/%s/clusterlint", kubernetesClustersPath, clusterID)
+	request, err := svc.client.NewRequest(ctx, http.MethodPost, path, req)
+	if err != nil {
+		return "", nil, err
+	}
+	root := new(runClusterlintRoot)
+	resp, err := svc.client.Do(ctx, request, root)
+	if err != nil {
+		return "", resp, err
+	}
+	return root.RunID, resp, nil
+}
+
+type clusterlintDiagnosticsRoot struct {
+	Diagnostics []*ClusterlintDiagnostic
+}
+
+// GetClusterlintResults fetches the diagnostics after clusterlint run completes
+func (svc *KubernetesServiceOp) GetClusterlintResults(ctx context.Context, clusterID string, req *KubernetesGetClusterlintRequest) ([]*ClusterlintDiagnostic, *Response, error) {
+	path := fmt.Sprintf("%s/%s/clusterlint", kubernetesClustersPath, clusterID)
+	if req != nil {
+		v := make(url.Values)
+		if req.RunId != "" {
+			v.Set("run_id", req.RunId)
+		}
+		if query := v.Encode(); query != "" {
+			path = path + "?" + query
+		}
+	}
+
+	request, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(clusterlintDiagnosticsRoot)
+	resp, err := svc.client.Do(ctx, request, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Diagnostics, resp, nil
+}
diff --git a/kubernetes_test.go b/kubernetes_test.go
index 4c9adbd..2a54281 100644
--- a/kubernetes_test.go
+++ b/kubernetes_test.go
@@ -19,7 +19,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) {
 	kubeSvc := client.Kubernetes
 
 	wantClusters := []*KubernetesCluster{
-		&KubernetesCluster{
+		{
 			ID:            "8d91899c-0739-4a1a-acc5-deadbeefbb8f",
 			Name:          "blablabla",
 			RegionSlug:    "nyc1",
@@ -61,7 +61,7 @@ func TestKubernetesClusters_ListClusters(t *testing.T) {
 			CreatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
 			UpdatedAt: time.Date(2018, 6, 21, 8, 44, 38, 0, time.UTC),
 		},
-		&KubernetesCluster{
+		{
 			ID:            "deadbeef-dead-4aa5-beef-deadbeef347d",
 			Name:          "antoine",
 			RegionSlug:    "nyc1",
@@ -367,6 +367,16 @@ func TestKubernetesClusters_Get(t *testing.T) {
 	require.Equal(t, want, got)
 }
 
+func TestKubernetesCluster_ToURN(t *testing.T) {
+	cluster := &KubernetesCluster{
+		ID: "deadbeef-dead-4aa5-beef-deadbeef347d",
+	}
+	want := "do:kubernetes:deadbeef-dead-4aa5-beef-deadbeef347d"
+	got := cluster.URN()
+
+	require.Equal(t, want, got)
+}
+
 func TestKubernetesClusters_GetUser(t *testing.T) {
 	setup()
 	defer teardown()
@@ -411,6 +421,26 @@ func TestKubernetesClusters_GetKubeConfig(t *testing.T) {
 	require.Equal(t, blob, got.KubeconfigYAML)
 }
 
+func TestKubernetesClusters_GetKubeConfigWithExpiry(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+	want := "some YAML"
+	blob := []byte(want)
+	mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/kubeconfig", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		expirySeconds, ok := r.URL.Query()["expiry_seconds"]
+		assert.True(t, ok)
+		assert.Len(t, expirySeconds, 1)
+		assert.Contains(t, expirySeconds, "3600")
+		fmt.Fprint(w, want)
+	})
+	got, _, err := kubeSvc.GetKubeConfigWithExpiry(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d", 3600)
+	require.NoError(t, err)
+	require.Equal(t, blob, got.KubeconfigYAML)
+}
+
 func TestKubernetesClusters_GetCredentials(t *testing.T) {
 	setup()
 	defer teardown()
@@ -462,7 +492,7 @@ func TestKubernetesClusters_GetCredentials_WithExpirySeconds(t *testing.T) {
 		fmt.Fprint(w, jBlob)
 	})
 	got, _, err := kubeSvc.GetCredentials(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d", &KubernetesClusterCredentialsGetRequest{
-		ExpirySeconds: intPtr(60 * 60),
+		ExpirySeconds: PtrTo(60 * 60),
 	})
 	require.NoError(t, err)
 	require.Equal(t, want, got)
@@ -522,8 +552,10 @@ func TestKubernetesClusters_Create(t *testing.T) {
 		ServiceSubnet: "10.245.0.0/16",
 		Tags:          []string{"cluster-tag-1", "cluster-tag-2"},
 		VPCUUID:       "880b7f98-f062-404d-b33c-458d545696f6",
+		HA:            true,
+		SurgeUpgrade:  true,
 		NodePools: []*KubernetesNodePool{
-			&KubernetesNodePool{
+			{
 				ID:     "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
 				Size:   "s-1vcpu-1gb",
 				Count:  2,
@@ -538,13 +570,15 @@ func TestKubernetesClusters_Create(t *testing.T) {
 		},
 	}
 	createRequest := &KubernetesClusterCreateRequest{
-		Name:        want.Name,
-		RegionSlug:  want.RegionSlug,
-		VersionSlug: want.VersionSlug,
-		Tags:        want.Tags,
-		VPCUUID:     want.VPCUUID,
+		Name:         want.Name,
+		RegionSlug:   want.RegionSlug,
+		VersionSlug:  want.VersionSlug,
+		Tags:         want.Tags,
+		VPCUUID:      want.VPCUUID,
+		SurgeUpgrade: true,
+		HA:           true,
 		NodePools: []*KubernetesNodePoolCreateRequest{
-			&KubernetesNodePoolCreateRequest{
+			{
 				Size:      want.NodePools[0].Size,
 				Count:     want.NodePools[0].Count,
 				Name:      want.NodePools[0].Name,
@@ -572,6 +606,8 @@ func TestKubernetesClusters_Create(t *testing.T) {
 			"cluster-tag-2"
 		],
 		"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
+		"ha": true,
+		"surge_upgrade": true,
 		"node_pools": [
 			{
 				"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
@@ -626,7 +662,7 @@ func TestKubernetesClusters_Create_AutoScalePool(t *testing.T) {
 		Tags:          []string{"cluster-tag-1", "cluster-tag-2"},
 		VPCUUID:       "880b7f98-f062-404d-b33c-458d545696f6",
 		NodePools: []*KubernetesNodePool{
-			&KubernetesNodePool{
+			{
 				ID:        "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
 				Size:      "s-1vcpu-1gb",
 				Count:     2,
@@ -649,7 +685,7 @@ func TestKubernetesClusters_Create_AutoScalePool(t *testing.T) {
 		Tags:        want.Tags,
 		VPCUUID:     want.VPCUUID,
 		NodePools: []*KubernetesNodePoolCreateRequest{
-			&KubernetesNodePoolCreateRequest{
+			{
 				Size:      want.NodePools[0].Size,
 				Count:     want.NodePools[0].Count,
 				Name:      want.NodePools[0].Name,
@@ -729,8 +765,10 @@ func TestKubernetesClusters_Update(t *testing.T) {
 		ServiceSubnet: "10.245.0.0/16",
 		Tags:          []string{"cluster-tag-1", "cluster-tag-2"},
 		VPCUUID:       "880b7f98-f062-404d-b33c-458d545696f6",
+		SurgeUpgrade:  true,
+		HA:            true,
 		NodePools: []*KubernetesNodePool{
-			&KubernetesNodePool{
+			{
 				ID:    "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
 				Size:  "s-1vcpu-1gb",
 				Count: 2,
@@ -750,6 +788,7 @@ func TestKubernetesClusters_Update(t *testing.T) {
 		Name:              want.Name,
 		Tags:              want.Tags,
 		MaintenancePolicy: want.MaintenancePolicy,
+		SurgeUpgrade:      true,
 	}
 
 	jBlob := `
@@ -766,6 +805,8 @@ func TestKubernetesClusters_Update(t *testing.T) {
 			"cluster-tag-2"
 		],
 		"vpc_uuid": "880b7f98-f062-404d-b33c-458d545696f6",
+		"ha": true,
+		"surge_upgrade": true,
 		"node_pools": [
 			{
 				"id": "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
@@ -787,7 +828,7 @@ func TestKubernetesClusters_Update(t *testing.T) {
 	}
 }`
 
-	expectedReqJSON := `{"name":"antoine-test-cluster","tags":["cluster-tag-1","cluster-tag-2"],"maintenance_policy":{"start_time":"00:00","duration":"","day":"monday"}}
+	expectedReqJSON := `{"name":"antoine-test-cluster","tags":["cluster-tag-1","cluster-tag-2"],"maintenance_policy":{"start_time":"00:00","duration":"","day":"monday"},"surge_upgrade":true}
 `
 
 	mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f", func(w http.ResponseWriter, r *http.Request) {
@@ -825,7 +866,7 @@ func TestKubernetesClusters_Update_FalseAutoUpgrade(t *testing.T) {
 		Tags:          []string{"cluster-tag-1", "cluster-tag-2"},
 		VPCUUID:       "880b7f98-f062-404d-b33c-458d545696f6",
 		NodePools: []*KubernetesNodePool{
-			&KubernetesNodePool{
+			{
 				ID:    "8d91899c-0739-4a1a-acc5-deadbeefbb8a",
 				Size:  "s-1vcpu-1gb",
 				Count: 2,
@@ -839,7 +880,7 @@ func TestKubernetesClusters_Update_FalseAutoUpgrade(t *testing.T) {
 		},
 	}
 	updateRequest := &KubernetesClusterUpdateRequest{
-		AutoUpgrade: boolPtr(false),
+		AutoUpgrade: PtrTo(false),
 	}
 
 	jBlob := `
@@ -935,6 +976,114 @@ func TestKubernetesClusters_Destroy(t *testing.T) {
 	require.NoError(t, err)
 }
 
+func TestKubernetesClusters_DeleteDangerous(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+
+	mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/destroy_with_associated_resources/dangerous", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := kubeSvc.DeleteDangerous(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d")
+	require.NoError(t, err)
+}
+
+func TestKubernetesClusters_DeleteSelective(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+
+	deleteRequest := &KubernetesClusterDeleteSelectiveRequest{
+		Volumes:         []string{"2241"},
+		VolumeSnapshots: []string{"7258"},
+		LoadBalancers:   []string{"9873"},
+	}
+
+	expectedReqJSON := `{"volumes":["2241"],"volume_snapshots":["7258"],"load_balancers":["9873"]}
+`
+
+	mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/destroy_with_associated_resources/selective", func(w http.ResponseWriter, r *http.Request) {
+		buf := new(bytes.Buffer)
+		buf.ReadFrom(r.Body)
+		require.Equal(t, expectedReqJSON, buf.String())
+
+		v := new(KubernetesClusterDeleteSelectiveRequest)
+		err := json.NewDecoder(buf).Decode(v)
+		require.NoError(t, err)
+
+		testMethod(t, r, http.MethodDelete)
+		require.Equal(t, v, deleteRequest)
+	})
+
+	_, err := kubeSvc.DeleteSelective(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d", deleteRequest)
+	require.NoError(t, err)
+}
+
+func TestKubernetesClusters_ListAssociatedResourcesForDeletion(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+	expectedRes := &KubernetesAssociatedResources{
+		Volumes: []*AssociatedResource{
+			{
+				ID:   "2241",
+				Name: "test-volume-1",
+			},
+		},
+		VolumeSnapshots: []*AssociatedResource{
+			{
+				ID:   "2425",
+				Name: "test-volume-snapshot-1",
+			},
+		},
+		LoadBalancers: []*AssociatedResource{
+			{
+				ID:   "4235",
+				Name: "test-load-balancer-1",
+			},
+		},
+	}
+	jBlob := `
+{
+	"volumes":
+	[
+		{
+		  "id": "2241",
+		  "name":"test-volume-1"
+		}
+	],
+	"volume_snapshots":
+	[
+		{
+		  "id":"2425",
+		  "name":"test-volume-snapshot-1"
+		}
+	],
+	"load_balancers":
+	[
+		{
+		  "id":"4235",
+		  "name":"test-load-balancer-1"
+		}
+	]
+}
+`
+
+	mux.HandleFunc("/v2/kubernetes/clusters/deadbeef-dead-4aa5-beef-deadbeef347d/destroy_with_associated_resources", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, jBlob)
+	})
+
+	ar, _, err := kubeSvc.ListAssociatedResourcesForDeletion(ctx, "deadbeef-dead-4aa5-beef-deadbeef347d")
+	require.NoError(t, err)
+	require.Equal(t, expectedRes, ar)
+
+}
+
 func TestKubernetesClusters_CreateNodePool(t *testing.T) {
 	setup()
 	defer teardown()
@@ -1181,7 +1330,7 @@ func TestKubernetesClusters_UpdateNodePool(t *testing.T) {
 	}
 	updateRequest := &KubernetesNodePoolUpdateRequest{
 		Name:  "a better name",
-		Count: intPtr(4),
+		Count: PtrTo(4),
 		Tags:  []string{"tag-1", "tag-2"},
 	}
 
@@ -1235,7 +1384,7 @@ func TestKubernetesClusters_UpdateNodePool_ZeroCount(t *testing.T) {
 		MaxNodes:  0,
 	}
 	updateRequest := &KubernetesNodePoolUpdateRequest{
-		Count: intPtr(0),
+		Count: PtrTo(0),
 	}
 
 	jBlob := `
@@ -1290,9 +1439,9 @@ func TestKubernetesClusters_UpdateNodePool_AutoScale(t *testing.T) {
 		MaxNodes:  10,
 	}
 	updateRequest := &KubernetesNodePoolUpdateRequest{
-		AutoScale: boolPtr(true),
-		MinNodes:  intPtr(0),
-		MaxNodes:  intPtr(10),
+		AutoScale: PtrTo(true),
+		MinNodes:  PtrTo(0),
+		MaxNodes:  PtrTo(10),
 	}
 
 	jBlob := `
@@ -1428,7 +1577,16 @@ func TestKubernetesVersions_List(t *testing.T) {
 
 	want := &KubernetesOptions{
 		Versions: []*KubernetesVersion{
-			{Slug: "1.10.0-gen0", KubernetesVersion: "1.10.0"},
+			{
+				Slug:              "1.10.0-gen0",
+				KubernetesVersion: "1.10.0",
+				SupportedFeatures: []string{
+					"cluster-autoscaler",
+					"docr-integration",
+					"ha-control-plane",
+					"token-authentication",
+				},
+			},
 		},
 		Regions: []*KubernetesRegion{
 			{Name: "New York 3", Slug: "nyc3"},
@@ -1443,7 +1601,13 @@ func TestKubernetesVersions_List(t *testing.T) {
 		"versions": [
 			{
 				"slug": "1.10.0-gen0",
-				"kubernetes_version": "1.10.0"
+				"kubernetes_version": "1.10.0",
+				"supported_features": [
+					"cluster-autoscaler",
+					"docr-integration",
+					"ha-control-plane",
+					"token-authentication"
+				]
 			}
 		],
 		"regions": [
@@ -1471,6 +1635,216 @@ func TestKubernetesVersions_List(t *testing.T) {
 	require.Equal(t, want, got)
 }
 
+func TestKubernetesClusterRegistry_Add(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+
+	addRequest := &KubernetesClusterRegistryRequest{
+		ClusterUUIDs: []string{"8d91899c-0739-4a1a-acc5-deadbeefbb8f"},
+	}
+
+	mux.HandleFunc("/v2/kubernetes/registry", func(w http.ResponseWriter, r *http.Request) {
+		v := new(KubernetesClusterRegistryRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		require.Equal(t, v, addRequest)
+	})
+
+	_, err := kubeSvc.AddRegistry(ctx, addRequest)
+	require.NoError(t, err)
+}
+
+func TestKubernetesClusterRegistry_Remove(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+
+	remove := &KubernetesClusterRegistryRequest{
+		ClusterUUIDs: []string{"8d91899c-0739-4a1a-acc5-deadbeefbb8f"},
+	}
+
+	mux.HandleFunc("/v2/kubernetes/registry", func(w http.ResponseWriter, r *http.Request) {
+		v := new(KubernetesClusterRegistryRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		testMethod(t, r, http.MethodDelete)
+		require.Equal(t, v, remove)
+	})
+
+	_, err := kubeSvc.RemoveRegistry(ctx, remove)
+	require.NoError(t, err)
+}
+
+func TestKubernetesRunClusterlint_WithRequestBody(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+	request := &KubernetesRunClusterlintRequest{IncludeGroups: []string{"doks"}}
+	want := "1234"
+	jBlob := `
+{
+	"run_id": "1234"
+}`
+
+	mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/clusterlint", func(w http.ResponseWriter, r *http.Request) {
+		v := new(KubernetesRunClusterlintRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		require.Equal(t, v, request)
+		fmt.Fprint(w, jBlob)
+	})
+
+	runID, _, err := kubeSvc.RunClusterlint(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", request)
+	require.NoError(t, err)
+	assert.Equal(t, want, runID)
+
+}
+
+func TestKubernetesRunClusterlint_WithoutRequestBody(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+	want := "1234"
+	jBlob := `
+{
+	"run_id": "1234"
+}`
+
+	mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/clusterlint", func(w http.ResponseWriter, r *http.Request) {
+		v := new(KubernetesRunClusterlintRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		require.Equal(t, v, &KubernetesRunClusterlintRequest{})
+		fmt.Fprint(w, jBlob)
+	})
+
+	runID, _, err := kubeSvc.RunClusterlint(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", &KubernetesRunClusterlintRequest{})
+	require.NoError(t, err)
+	assert.Equal(t, want, runID)
+
+}
+
+func TestKubernetesGetClusterlint_WithRunID(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+	r := &KubernetesGetClusterlintRequest{RunId: "1234"}
+	jBlob := `
+{
+	"run_id": "1234",
+  	"requested_at": "2019-10-30T05:34:07Z",
+  	"completed_at": "2019-10-30T05:34:11Z",
+  	"diagnostics": [
+		{
+      		"check_name": "unused-config-map",
+      		"severity": "warning",
+      		"message": "Unused config map",
+      		"object": {
+        		"kind": "config map",
+        		"name": "foo",
+        		"namespace": "kube-system"
+      		}
+    	}
+  	]
+}`
+
+	expected := []*ClusterlintDiagnostic{
+		{
+			CheckName: "unused-config-map",
+			Severity:  "warning",
+			Message:   "Unused config map",
+			Object: &ClusterlintObject{
+				Kind:      "config map",
+				Name:      "foo",
+				Namespace: "kube-system",
+				Owners:    nil,
+			},
+		},
+	}
+	mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/clusterlint", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		require.Equal(t, "run_id=1234", r.URL.Query().Encode())
+		fmt.Fprint(w, jBlob)
+	})
+
+	diagnostics, _, err := kubeSvc.GetClusterlintResults(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", r)
+	require.NoError(t, err)
+	assert.Equal(t, expected, diagnostics)
+
+}
+
+func TestKubernetesGetClusterlint_WithoutRunID(t *testing.T) {
+	setup()
+	defer teardown()
+
+	kubeSvc := client.Kubernetes
+	r := &KubernetesGetClusterlintRequest{}
+	jBlob := `
+{
+	"run_id": "1234",
+  	"requested_at": "2019-10-30T05:34:07Z",
+  	"completed_at": "2019-10-30T05:34:11Z",
+  	"diagnostics": [
+		{
+      		"check_name": "unused-config-map",
+      		"severity": "warning",
+      		"message": "Unused config map",
+      		"object": {
+        		"kind": "config map",
+        		"name": "foo",
+        		"namespace": "kube-system"
+      		}
+    	}
+  	]
+}`
+
+	expected := []*ClusterlintDiagnostic{
+		{
+			CheckName: "unused-config-map",
+			Severity:  "warning",
+			Message:   "Unused config map",
+			Object: &ClusterlintObject{
+				Kind:      "config map",
+				Name:      "foo",
+				Namespace: "kube-system",
+				Owners:    nil,
+			},
+		},
+	}
+
+	mux.HandleFunc("/v2/kubernetes/clusters/8d91899c-0739-4a1a-acc5-deadbeefbb8f/clusterlint", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		require.Equal(t, "", r.URL.Query().Encode())
+		fmt.Fprint(w, jBlob)
+	})
+
+	diagnostics, _, err := kubeSvc.GetClusterlintResults(ctx, "8d91899c-0739-4a1a-acc5-deadbeefbb8f", r)
+	require.NoError(t, err)
+	assert.Equal(t, expected, diagnostics)
+
+}
+
 var maintenancePolicyDayTests = []struct {
 	name  string
 	json  string
@@ -1505,13 +1879,9 @@ func TestWeekday_UnmarshalJSON(t *testing.T) {
 			var got KubernetesMaintenancePolicyDay
 			err := json.Unmarshal([]byte(ts.json), &got)
 			valid := err == nil
-			if valid != ts.valid {
-				t.Errorf("valid unmarshal case\n\tgot: %+v\n\twant : %+v", valid, ts.valid)
-			}
-
-			if valid && got != ts.day {
-				t.Errorf("\ninput: %s\ngot : %+v\nwant  : %+v\n",
-					ts.day, got, ts.day)
+			assert.Equal(t, ts.valid, valid)
+			if valid {
+				assert.Equal(t, ts.day, got)
 			}
 		})
 	}
@@ -1522,13 +1892,9 @@ func TestWeekday_MarshalJSON(t *testing.T) {
 		t.Run(ts.name, func(t *testing.T) {
 			out, err := json.Marshal(ts.day)
 			valid := err == nil
-			if valid != ts.valid {
-				t.Errorf("valid marshal case\n\tgot: %+v\n\twant : %+v", valid, ts.valid)
-			}
-
-			if valid && ts.json != string(out) {
-				t.Errorf("\ninput: %s\ngot : %+v\nwant  : %+v\n",
-					ts.day, string(out), ts.json)
+			assert.Equal(t, ts.valid, valid)
+			if valid {
+				assert.Equal(t, ts.json, string(out))
 			}
 		})
 	}
diff --git a/links.go b/links.go
index 6f350bf..4b5db97 100644
--- a/links.go
+++ b/links.go
@@ -32,6 +32,16 @@ func (l *Links) CurrentPage() (int, error) {
 	return l.Pages.current()
 }
 
+// NextPageToken is the page token to request the next page of the list
+func (l *Links) NextPageToken() (string, error) {
+	return l.Pages.nextPageToken()
+}
+
+// PrevPageToken is the page token to request the previous page of the list
+func (l *Links) PrevPageToken() (string, error) {
+	return l.Pages.prevPageToken()
+}
+
 func (p *Pages) current() (int, error) {
 	switch {
 	case p == nil:
@@ -50,6 +60,28 @@ func (p *Pages) current() (int, error) {
 	return 0, nil
 }
 
+func (p *Pages) nextPageToken() (string, error) {
+	if p == nil || p.Next == "" {
+		return "", nil
+	}
+	token, err := pageTokenFromURL(p.Next)
+	if err != nil {
+		return "", err
+	}
+	return token, nil
+}
+
+func (p *Pages) prevPageToken() (string, error) {
+	if p == nil || p.Prev == "" {
+		return "", nil
+	}
+	token, err := pageTokenFromURL(p.Prev)
+	if err != nil {
+		return "", err
+	}
+	return token, nil
+}
+
 // IsLastPage returns true if the current page is the last
 func (l *Links) IsLastPage() bool {
 	if l.Pages == nil {
@@ -77,6 +109,14 @@ func pageForURL(urlText string) (int, error) {
 	return page, nil
 }
 
+func pageTokenFromURL(urlText string) (string, error) {
+	u, err := url.ParseRequestURI(urlText)
+	if err != nil {
+		return "", err
+	}
+	return u.Query().Get("page_token"), nil
+}
+
 // Get a link action by id.
 func (la *LinkAction) Get(ctx context.Context, client *Client) (*Action, *Response, error) {
 	return client.Actions.Get(ctx, la.ID)
diff --git a/links_test.go b/links_test.go
index 2c7753d..303b5e6 100644
--- a/links_test.go
+++ b/links_test.go
@@ -130,25 +130,32 @@ func TestLinks_ParseMissing(t *testing.T) {
 
 func TestLinks_ParseURL(t *testing.T) {
 	type linkTest struct {
-		name, url string
-		expected  int
+		name, url         string
+		expectedPage      int
+		expectedPageToken string
 	}
 
 	linkTests := []linkTest{
 		{
-			name:     "prev",
-			url:      "https://api.digitalocean.com/v2/droplets/?page=1",
-			expected: 1,
+			name:         "prev",
+			url:          "https://api.digitalocean.com/v2/droplets/?page=1",
+			expectedPage: 1,
 		},
 		{
-			name:     "last",
-			url:      "https://api.digitalocean.com/v2/droplets/?page=5",
-			expected: 5,
+			name:         "last",
+			url:          "https://api.digitalocean.com/v2/droplets/?page=5",
+			expectedPage: 5,
 		},
 		{
-			name:     "nexta",
-			url:      "https://api.digitalocean.com/v2/droplets/?page=2",
-			expected: 2,
+			name:         "next",
+			url:          "https://api.digitalocean.com/v2/droplets/?page=2",
+			expectedPage: 2,
+		},
+		{
+			name:              "page token",
+			url:               "https://api.digitalocean.com/v2/droplets/?page=2&page_token=aaa",
+			expectedPage:      2,
+			expectedPageToken: "aaa",
 		},
 	}
 
@@ -158,9 +165,15 @@ func TestLinks_ParseURL(t *testing.T) {
 			t.Fatal(err)
 		}
 
-		if p != lT.expected {
+		if p != lT.expectedPage {
 			t.Errorf("expected page for '%s' to be '%d', was '%d'",
-				lT.url, lT.expected, p)
+				lT.url, lT.expectedPage, p)
+		}
+
+		pageToken, err := pageTokenFromURL(lT.url)
+		if pageToken != lT.expectedPageToken {
+			t.Errorf("expected pageToken for '%s' to be '%s', was '%s'",
+				lT.url, lT.expectedPageToken, pageToken)
 		}
 	}
 
@@ -197,3 +210,47 @@ func TestLinks_ParseEmptyString(t *testing.T) {
 		}
 	}
 }
+
+func TestLinks_NextPageToken(t *testing.T) {
+	t.Run("happy token", func(t *testing.T) {
+		checkNextPageToken(t, &Response{Links: &Links{
+			Pages: &Pages{
+				Next: "https://api.digitalocean.com/v2/droplets/?page_token=aaa",
+			},
+		}}, "aaa")
+	})
+	t.Run("empty token", func(t *testing.T) {
+		checkNextPageToken(t, &Response{Links: &Links{
+			Pages: &Pages{
+				Next: "https://api.digitalocean.com/v2/droplets/",
+			},
+		}}, "")
+	})
+	t.Run("no next page", func(t *testing.T) {
+		checkNextPageToken(t, &Response{Links: &Links{
+			Pages: &Pages{},
+		}}, "")
+	})
+}
+
+func TestLinks_ParseNextPageToken(t *testing.T) {
+	t.Run("happy token", func(t *testing.T) {
+		checkPreviousPageToken(t, &Response{Links: &Links{
+			Pages: &Pages{
+				Prev: "https://api.digitalocean.com/v2/droplets/?page_token=aaa",
+			},
+		}}, "aaa")
+	})
+	t.Run("empty token", func(t *testing.T) {
+		checkPreviousPageToken(t, &Response{Links: &Links{
+			Pages: &Pages{
+				Prev: "https://api.digitalocean.com/v2/droplets/",
+			},
+		}}, "")
+	})
+	t.Run("no next page", func(t *testing.T) {
+		checkPreviousPageToken(t, &Response{Links: &Links{
+			Pages: &Pages{},
+		}}, "")
+	})
+}
diff --git a/load_balancers.go b/load_balancers.go
index 86e9776..6a9a70e 100644
--- a/load_balancers.go
+++ b/load_balancers.go
@@ -12,7 +12,7 @@ const forwardingRulesPath = "forwarding_rules"
 const dropletsPath = "droplets"
 
 // LoadBalancersService is an interface for managing load balancers with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#load-balancers
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Load-Balancers
 type LoadBalancersService interface {
 	Get(context.Context, string) (*LoadBalancer, *Response, error)
 	List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error)
@@ -28,23 +28,32 @@ type LoadBalancersService interface {
 // LoadBalancer represents a DigitalOcean load balancer configuration.
 // Tags can only be provided upon the creation of a Load Balancer.
 type LoadBalancer struct {
-	ID                     string           `json:"id,omitempty"`
-	Name                   string           `json:"name,omitempty"`
-	IP                     string           `json:"ip,omitempty"`
-	Algorithm              string           `json:"algorithm,omitempty"`
-	Status                 string           `json:"status,omitempty"`
-	Created                string           `json:"created_at,omitempty"`
-	ForwardingRules        []ForwardingRule `json:"forwarding_rules,omitempty"`
-	HealthCheck            *HealthCheck     `json:"health_check,omitempty"`
-	StickySessions         *StickySessions  `json:"sticky_sessions,omitempty"`
-	Region                 *Region          `json:"region,omitempty"`
-	DropletIDs             []int            `json:"droplet_ids,omitempty"`
-	Tag                    string           `json:"tag,omitempty"`
-	Tags                   []string         `json:"tags,omitempty"`
-	RedirectHttpToHttps    bool             `json:"redirect_http_to_https,omitempty"`
-	EnableProxyProtocol    bool             `json:"enable_proxy_protocol,omitempty"`
-	EnableBackendKeepalive bool             `json:"enable_backend_keepalive,omitempty"`
-	VPCUUID                string           `json:"vpc_uuid,omitempty"`
+	ID   string `json:"id,omitempty"`
+	Name string `json:"name,omitempty"`
+	IP   string `json:"ip,omitempty"`
+	// SizeSlug is mutually exclusive with SizeUnit. Only one should be specified
+	SizeSlug string `json:"size,omitempty"`
+	// SizeUnit is mutually exclusive with SizeSlug. Only one should be specified
+	SizeUnit                     uint32           `json:"size_unit,omitempty"`
+	Algorithm                    string           `json:"algorithm,omitempty"`
+	Status                       string           `json:"status,omitempty"`
+	Created                      string           `json:"created_at,omitempty"`
+	ForwardingRules              []ForwardingRule `json:"forwarding_rules,omitempty"`
+	HealthCheck                  *HealthCheck     `json:"health_check,omitempty"`
+	StickySessions               *StickySessions  `json:"sticky_sessions,omitempty"`
+	Region                       *Region          `json:"region,omitempty"`
+	DropletIDs                   []int            `json:"droplet_ids,omitempty"`
+	Tag                          string           `json:"tag,omitempty"`
+	Tags                         []string         `json:"tags,omitempty"`
+	RedirectHttpToHttps          bool             `json:"redirect_http_to_https,omitempty"`
+	EnableProxyProtocol          bool             `json:"enable_proxy_protocol,omitempty"`
+	EnableBackendKeepalive       bool             `json:"enable_backend_keepalive,omitempty"`
+	VPCUUID                      string           `json:"vpc_uuid,omitempty"`
+	DisableLetsEncryptDNSRecords *bool            `json:"disable_lets_encrypt_dns_records,omitempty"`
+	ValidateOnly                 bool             `json:"validate_only,omitempty"`
+	ProjectID                    string           `json:"project_id,omitempty"`
+	HTTPIdleTimeoutSeconds       *uint64          `json:"http_idle_timeout_seconds,omitempty"`
+	Firewall                     *LBFirewall      `json:"firewall,omitempty"`
 }
 
 // String creates a human-readable description of a LoadBalancer.
@@ -52,6 +61,7 @@ func (l LoadBalancer) String() string {
 	return Stringify(l)
 }
 
+// URN returns the load balancer ID in a valid DO API URN form.
 func (l LoadBalancer) URN() string {
 	return ToURN("LoadBalancer", l.ID)
 }
@@ -60,16 +70,26 @@ func (l LoadBalancer) URN() string {
 // Modifying the returned LoadBalancerRequest will not modify the original LoadBalancer.
 func (l LoadBalancer) AsRequest() *LoadBalancerRequest {
 	r := LoadBalancerRequest{
-		Name:                   l.Name,
-		Algorithm:              l.Algorithm,
-		ForwardingRules:        append([]ForwardingRule(nil), l.ForwardingRules...),
-		DropletIDs:             append([]int(nil), l.DropletIDs...),
-		Tag:                    l.Tag,
-		RedirectHttpToHttps:    l.RedirectHttpToHttps,
-		EnableProxyProtocol:    l.EnableProxyProtocol,
-		EnableBackendKeepalive: l.EnableBackendKeepalive,
-		HealthCheck:            l.HealthCheck,
-		VPCUUID:                l.VPCUUID,
+		Name:                         l.Name,
+		Algorithm:                    l.Algorithm,
+		SizeSlug:                     l.SizeSlug,
+		SizeUnit:                     l.SizeUnit,
+		ForwardingRules:              append([]ForwardingRule(nil), l.ForwardingRules...),
+		DropletIDs:                   append([]int(nil), l.DropletIDs...),
+		Tag:                          l.Tag,
+		RedirectHttpToHttps:          l.RedirectHttpToHttps,
+		EnableProxyProtocol:          l.EnableProxyProtocol,
+		EnableBackendKeepalive:       l.EnableBackendKeepalive,
+		HealthCheck:                  l.HealthCheck,
+		VPCUUID:                      l.VPCUUID,
+		DisableLetsEncryptDNSRecords: l.DisableLetsEncryptDNSRecords,
+		ValidateOnly:                 l.ValidateOnly,
+		ProjectID:                    l.ProjectID,
+		HTTPIdleTimeoutSeconds:       l.HTTPIdleTimeoutSeconds,
+	}
+
+	if l.DisableLetsEncryptDNSRecords != nil {
+		*r.DisableLetsEncryptDNSRecords = *l.DisableLetsEncryptDNSRecords
 	}
 
 	if l.HealthCheck != nil {
@@ -83,6 +103,11 @@ func (l LoadBalancer) AsRequest() *LoadBalancerRequest {
 	if l.Region != nil {
 		r.Region = l.Region.Slug
 	}
+
+	if l.Firewall != nil {
+		r.Firewall = l.Firewall.deepCopy()
+	}
+
 	return &r
 }
 
@@ -129,21 +154,57 @@ func (s StickySessions) String() string {
 	return Stringify(s)
 }
 
+// LBFirewall holds the allow and deny rules for a loadbalancer's firewall.
+// Currently, allow and deny rules support cidrs and ips.
+// Please use the helper methods (IPSourceFirewall/CIDRSourceFirewall) to format the allow/deny rules.
+type LBFirewall struct {
+	Allow []string `json:"allow,omitempty"`
+	Deny  []string `json:"deny,omitempty"`
+}
+
+func (lbf *LBFirewall) deepCopy() *LBFirewall {
+	return &LBFirewall{
+		Allow: append([]string(nil), lbf.Allow...),
+		Deny:  append([]string(nil), lbf.Deny...),
+	}
+}
+
+// IPSourceFirewall takes an IP (string) and returns a formatted ip source firewall rule
+func IPSourceFirewall(ip string) string { return fmt.Sprintf("ip:%s", ip) }
+
+// CIDRSourceFirewall takes a CIDR notation IP address and prefix length string
+// like "192.0.2.0/24" and returns a formatted cidr source firewall rule
+func CIDRSourceFirewall(cidr string) string { return fmt.Sprintf("cidr:%s", cidr) }
+
+// String creates a human-readable description of an LBFirewall instance.
+func (f LBFirewall) String() string {
+	return Stringify(f)
+}
+
 // LoadBalancerRequest represents the configuration to be applied to an existing or a new load balancer.
 type LoadBalancerRequest struct {
-	Name                   string           `json:"name,omitempty"`
-	Algorithm              string           `json:"algorithm,omitempty"`
-	Region                 string           `json:"region,omitempty"`
-	ForwardingRules        []ForwardingRule `json:"forwarding_rules,omitempty"`
-	HealthCheck            *HealthCheck     `json:"health_check,omitempty"`
-	StickySessions         *StickySessions  `json:"sticky_sessions,omitempty"`
-	DropletIDs             []int            `json:"droplet_ids,omitempty"`
-	Tag                    string           `json:"tag,omitempty"`
-	Tags                   []string         `json:"tags,omitempty"`
-	RedirectHttpToHttps    bool             `json:"redirect_http_to_https,omitempty"`
-	EnableProxyProtocol    bool             `json:"enable_proxy_protocol,omitempty"`
-	EnableBackendKeepalive bool             `json:"enable_backend_keepalive,omitempty"`
-	VPCUUID                string           `json:"vpc_uuid,omitempty"`
+	Name      string `json:"name,omitempty"`
+	Algorithm string `json:"algorithm,omitempty"`
+	Region    string `json:"region,omitempty"`
+	// SizeSlug is mutually exclusive with SizeUnit. Only one should be specified
+	SizeSlug string `json:"size,omitempty"`
+	// SizeUnit is mutually exclusive with SizeSlug. Only one should be specified
+	SizeUnit                     uint32           `json:"size_unit,omitempty"`
+	ForwardingRules              []ForwardingRule `json:"forwarding_rules,omitempty"`
+	HealthCheck                  *HealthCheck     `json:"health_check,omitempty"`
+	StickySessions               *StickySessions  `json:"sticky_sessions,omitempty"`
+	DropletIDs                   []int            `json:"droplet_ids,omitempty"`
+	Tag                          string           `json:"tag,omitempty"`
+	Tags                         []string         `json:"tags,omitempty"`
+	RedirectHttpToHttps          bool             `json:"redirect_http_to_https,omitempty"`
+	EnableProxyProtocol          bool             `json:"enable_proxy_protocol,omitempty"`
+	EnableBackendKeepalive       bool             `json:"enable_backend_keepalive,omitempty"`
+	VPCUUID                      string           `json:"vpc_uuid,omitempty"`
+	DisableLetsEncryptDNSRecords *bool            `json:"disable_lets_encrypt_dns_records,omitempty"`
+	ValidateOnly                 bool             `json:"validate_only,omitempty"`
+	ProjectID                    string           `json:"project_id,omitempty"`
+	HTTPIdleTimeoutSeconds       *uint64          `json:"http_idle_timeout_seconds,omitempty"`
+	Firewall                     *LBFirewall      `json:"firewall,omitempty"`
 }
 
 // String creates a human-readable description of a LoadBalancerRequest.
diff --git a/load_balancers_test.go b/load_balancers_test.go
index b31de3e..6eae9c2 100644
--- a/load_balancers_test.go
+++ b/load_balancers_test.go
@@ -7,6 +7,7 @@ import (
 	"testing"
 
 	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 var lbListJSONResponse = `
@@ -65,7 +66,14 @@ var lbListJSONResponse = `
             "droplet_ids":[
                 2,
                 21
-            ]
+            ],
+            "disable_lets_encrypt_dns_records": true,
+            "project_id": "6929eef6-4e45-11ed-bdc3-0242ac120002",
+            "http_idle_timeout_seconds": 60,
+            "firewall": {
+                "deny": ["cidr:1.2.0.0/16"],
+                "allow": ["ip:1.2.3.4"]
+            }
         }
     ],
     "links":{
@@ -145,7 +153,14 @@ var lbCreateJSONResponse = `
             21
         ],
         "redirect_http_to_https":true,
-        "vpc_uuid":"880b7f98-f062-404d-b33c-458d545696f6"
+        "vpc_uuid":"880b7f98-f062-404d-b33c-458d545696f6",
+        "disable_lets_encrypt_dns_records": true,
+        "project_id": "6929eef6-4e45-11ed-bdc3-0242ac120002",
+        "http_idle_timeout_seconds": 60,
+        "firewall": {
+            "deny": ["cidr:1.2.0.0/16"],
+            "allow": ["ip:1.2.3.4"]
+        }
     }
 }
 `
@@ -205,7 +220,14 @@ var lbGetJSONResponse = `
         "droplet_ids":[
             2,
             21
-        ]
+        ],
+        "disable_lets_encrypt_dns_records": false,
+        "project_id": "6929eef6-4e45-11ed-bdc3-0242ac120002",
+        "http_idle_timeout_seconds": 60,
+        "firewall": {
+            "deny": ["cidr:1.2.0.0/16"],
+            "allow": ["ip:1.2.3.4"]
+        }
     }
 }
 `
@@ -218,6 +240,7 @@ var lbUpdateJSONResponse = `
         "ip":"12.34.56.78",
         "algorithm":"least_connections",
         "status":"active",
+        "size_unit":2,
         "created_at":"2016-12-15T14:19:09Z",
         "forwarding_rules":[
             {
@@ -269,7 +292,13 @@ var lbUpdateJSONResponse = `
         "droplet_ids":[
             2,
             21
-        ]
+        ],
+        "project_id": "6929eef6-4e45-11ed-bdc3-0242ac120002",
+        "http_idle_timeout_seconds": 60,
+        "firewall": {
+            "deny": ["cidr:1.3.0.0/16"],
+            "allow": ["ip:1.2.3.5"]
+        }
     }
 }
 `
@@ -279,17 +308,16 @@ func TestLoadBalancers_Get(t *testing.T) {
 	defer teardown()
 
 	path := "/v2/load_balancers"
-	loadBalancerId := "37e6be88-01ec-4ec7-9bc6-a514d4719057"
-	path = fmt.Sprintf("%s/%s", path, loadBalancerId)
+	loadBalancerID := "37e6be88-01ec-4ec7-9bc6-a514d4719057"
+	path = fmt.Sprintf("%s/%s", path, loadBalancerID)
 	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
 		testMethod(t, r, http.MethodGet)
 		fmt.Fprint(w, lbGetJSONResponse)
 	})
 
-	loadBalancer, _, err := client.LoadBalancers.Get(ctx, loadBalancerId)
-	if err != nil {
-		t.Errorf("LoadBalancers.Get returned error: %v", err)
-	}
+	loadBalancer, _, err := client.LoadBalancers.Get(ctx, loadBalancerID)
+	require.NoError(t, err)
+	expectedTimeout := uint64(60)
 
 	expected := &LoadBalancer{
 		ID:        "37e6be88-01ec-4ec7-9bc6-a514d4719057",
@@ -329,9 +357,18 @@ func TestLoadBalancers_Get(t *testing.T) {
 			Available: true,
 			Features:  []string{"private_networking", "backups", "ipv6", "metadata", "storage"},
 		},
-		DropletIDs: []int{2, 21},
+		DropletIDs:             []int{2, 21},
+		ProjectID:              "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		HTTPIdleTimeoutSeconds: &expectedTimeout,
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.4"},
+			Deny:  []string{"cidr:1.2.0.0/16"},
+		},
 	}
 
+	disableLetsEncryptDNSRecords := false
+	expected.DisableLetsEncryptDNSRecords = &disableLetsEncryptDNSRecords
+
 	assert.Equal(t, expected, loadBalancer)
 }
 
@@ -371,6 +408,11 @@ func TestLoadBalancers_Create(t *testing.T) {
 		DropletIDs:          []int{2, 21},
 		RedirectHttpToHttps: true,
 		VPCUUID:             "880b7f98-f062-404d-b33c-458d545696f6",
+		ProjectID:           "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.4"},
+			Deny:  []string{"cidr:1.2.0.0/16"},
+		},
 	}
 
 	path := "/v2/load_balancers"
@@ -388,10 +430,9 @@ func TestLoadBalancers_Create(t *testing.T) {
 	})
 
 	loadBalancer, _, err := client.LoadBalancers.Create(ctx, createRequest)
-	if err != nil {
-		t.Errorf("LoadBalancers.Create returned error: %v", err)
-	}
+	require.NoError(t, err)
 
+	expectedTimeout := uint64(60)
 	expected := &LoadBalancer{
 		ID:        "8268a81c-fcf5-423e-a337-bbfe95817f23",
 		Name:      "example-lb-01",
@@ -437,13 +478,140 @@ func TestLoadBalancers_Create(t *testing.T) {
 			Available: true,
 			Features:  []string{"private_networking", "backups", "ipv6", "metadata", "storage"},
 		},
+		Tags:                   []string{"my-tag"},
+		DropletIDs:             []int{2, 21},
+		RedirectHttpToHttps:    true,
+		VPCUUID:                "880b7f98-f062-404d-b33c-458d545696f6",
+		ProjectID:              "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		HTTPIdleTimeoutSeconds: &expectedTimeout,
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.4"},
+			Deny:  []string{"cidr:1.2.0.0/16"},
+		},
+	}
+
+	disableLetsEncryptDNSRecords := true
+	expected.DisableLetsEncryptDNSRecords = &disableLetsEncryptDNSRecords
+
+	assert.Equal(t, expected, loadBalancer)
+}
+
+func TestLoadBalancers_CreateValidateSucceeds(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &LoadBalancerRequest{
+		Name:      "example-lb-01",
+		Algorithm: "round_robin",
+		Region:    "nyc1",
+		ForwardingRules: []ForwardingRule{
+			{
+				EntryProtocol:  "https",
+				EntryPort:      443,
+				TargetProtocol: "http",
+				TargetPort:     80,
+				CertificateID:  "a-b-c",
+			},
+		},
+		HealthCheck: &HealthCheck{
+			Protocol:               "http",
+			Port:                   80,
+			Path:                   "/index.html",
+			CheckIntervalSeconds:   10,
+			ResponseTimeoutSeconds: 5,
+			UnhealthyThreshold:     3,
+			HealthyThreshold:       5,
+		},
+		StickySessions: &StickySessions{
+			Type:             "cookies",
+			CookieName:       "DO-LB",
+			CookieTtlSeconds: 5,
+		},
+		Tag:                 "my-tag",
 		Tags:                []string{"my-tag"},
 		DropletIDs:          []int{2, 21},
 		RedirectHttpToHttps: true,
 		VPCUUID:             "880b7f98-f062-404d-b33c-458d545696f6",
+		ValidateOnly:        true,
 	}
 
-	assert.Equal(t, expected, loadBalancer)
+	path := "/v2/load_balancers"
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		v := new(LoadBalancerRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		w.WriteHeader(http.StatusNoContent)
+
+		testMethod(t, r, http.MethodPost)
+		assert.Equal(t, createRequest, v)
+	})
+
+	loadBalancer, resp, err := client.LoadBalancers.Create(ctx, createRequest)
+	require.NoError(t, err)
+	assert.Equal(t, http.StatusNoContent, resp.StatusCode)
+	assert.NotNil(t, resp)
+	assert.Nil(t, nil, loadBalancer)
+}
+
+func TestLoadBalancers_CreateValidateFails(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &LoadBalancerRequest{
+		Name:      "example-lb-01",
+		Algorithm: "round_robin",
+		Region:    "nyc1",
+		ForwardingRules: []ForwardingRule{
+			{
+				EntryProtocol:  "https",
+				EntryPort:      443,
+				TargetProtocol: "http",
+				TargetPort:     80,
+				CertificateID:  "a-b-c",
+			},
+		},
+		HealthCheck: &HealthCheck{
+			Protocol:               "http",
+			Port:                   80,
+			Path:                   "/index.html",
+			CheckIntervalSeconds:   10,
+			ResponseTimeoutSeconds: 5,
+			UnhealthyThreshold:     3,
+			HealthyThreshold:       5,
+		},
+		StickySessions: &StickySessions{
+			Type:             "cookies",
+			CookieName:       "DO-LB",
+			CookieTtlSeconds: 5,
+		},
+		Tag:                 "my-tag",
+		Tags:                []string{"my-tag"},
+		DropletIDs:          []int{2, 21},
+		RedirectHttpToHttps: true,
+		VPCUUID:             "880b7f98-f062-404d-b33c-458d545696f6",
+		ValidateOnly:        true,
+	}
+
+	path := "/v2/load_balancers"
+	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+		v := new(LoadBalancerRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		w.WriteHeader(http.StatusUnprocessableEntity)
+		testMethod(t, r, http.MethodPost)
+		assert.Equal(t, createRequest, v)
+	})
+
+	loadBalancer, resp, err := client.LoadBalancers.Create(ctx, createRequest)
+	require.Error(t, err)
+	assert.Equal(t, http.StatusUnprocessableEntity, resp.StatusCode)
+	assert.Nil(t, nil, loadBalancer)
 }
 
 func TestLoadBalancers_Update(t *testing.T) {
@@ -454,6 +622,7 @@ func TestLoadBalancers_Update(t *testing.T) {
 		Name:      "example-lb-01",
 		Algorithm: "least_connections",
 		Region:    "nyc1",
+		SizeUnit:  2,
 		ForwardingRules: []ForwardingRule{
 			{
 				EntryProtocol:  "http",
@@ -482,11 +651,16 @@ func TestLoadBalancers_Update(t *testing.T) {
 			Type: "none",
 		},
 		DropletIDs: []int{2, 21},
+		ProjectID:  "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.5"},
+			Deny:  []string{"cidr:1.3.0.0/16"},
+		},
 	}
 
 	path := "/v2/load_balancers"
-	loadBalancerId := "8268a81c-fcf5-423e-a337-bbfe95817f23"
-	path = fmt.Sprintf("%s/%s", path, loadBalancerId)
+	loadBalancerID := "8268a81c-fcf5-423e-a337-bbfe95817f23"
+	path = fmt.Sprintf("%s/%s", path, loadBalancerID)
 
 	mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
 		v := new(LoadBalancerRequest)
@@ -501,17 +675,17 @@ func TestLoadBalancers_Update(t *testing.T) {
 		fmt.Fprint(w, lbUpdateJSONResponse)
 	})
 
-	loadBalancer, _, err := client.LoadBalancers.Update(ctx, loadBalancerId, updateRequest)
-	if err != nil {
-		t.Errorf("LoadBalancers.Update returned error: %v", err)
-	}
+	loadBalancer, _, err := client.LoadBalancers.Update(ctx, loadBalancerID, updateRequest)
+	require.NoError(t, err)
 
+	expectedTimeout := uint64(60)
 	expected := &LoadBalancer{
 		ID:        "8268a81c-fcf5-423e-a337-bbfe95817f23",
 		Name:      "example-lb-01",
 		IP:        "12.34.56.78",
 		Algorithm: "least_connections",
 		Status:    "active",
+		SizeUnit:  2,
 		Created:   "2016-12-15T14:19:09Z",
 		ForwardingRules: []ForwardingRule{
 			{
@@ -547,7 +721,14 @@ func TestLoadBalancers_Update(t *testing.T) {
 			Available: true,
 			Features:  []string{"private_networking", "backups", "ipv6", "metadata", "storage"},
 		},
-		DropletIDs: []int{2, 21},
+		DropletIDs:                   []int{2, 21},
+		DisableLetsEncryptDNSRecords: nil,
+		ProjectID:                    "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		HTTPIdleTimeoutSeconds:       &expectedTimeout,
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.5"},
+			Deny:  []string{"cidr:1.3.0.0/16"},
+		},
 	}
 
 	assert.Equal(t, expected, loadBalancer)
@@ -565,10 +746,9 @@ func TestLoadBalancers_List(t *testing.T) {
 
 	loadBalancers, resp, err := client.LoadBalancers.List(ctx, nil)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.List returned error: %v", err)
-	}
+	require.NoError(t, err)
 
+	expectedTimeout := uint64(60)
 	expectedLBs := []LoadBalancer{
 		{
 			ID:        "37e6be88-01ec-4ec7-9bc6-a514d4719057",
@@ -607,9 +787,17 @@ func TestLoadBalancers_List(t *testing.T) {
 				Available: true,
 				Features:  []string{"private_networking", "backups", "ipv6", "metadata", "storage"},
 			},
-			DropletIDs: []int{2, 21},
+			DropletIDs:             []int{2, 21},
+			ProjectID:              "6929eef6-4e45-11ed-bdc3-0242ac120002",
+			HTTPIdleTimeoutSeconds: &expectedTimeout,
+			Firewall: &LBFirewall{
+				Allow: []string{"ip:1.2.3.4"},
+				Deny:  []string{"cidr:1.2.0.0/16"},
+			},
 		},
 	}
+	disableLetsEncryptDNSRecords := true
+	expectedLBs[0].DisableLetsEncryptDNSRecords = &disableLetsEncryptDNSRecords
 
 	assert.Equal(t, expectedLBs, loadBalancers)
 
@@ -631,9 +819,7 @@ func TestLoadBalancers_List_Pagination(t *testing.T) {
 	opts := &ListOptions{Page: 2}
 	_, resp, err := client.LoadBalancers.List(ctx, opts)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.List returned error: %v", err)
-	}
+	require.NoError(t, err)
 
 	assert.Equal(t, "http://localhost:3001/v2/load_balancers?page=2&per_page=1", resp.Links.Pages.Next)
 	assert.Equal(t, "http://localhost:3001/v2/load_balancers?page=3&per_page=1", resp.Links.Pages.Last)
@@ -652,9 +838,7 @@ func TestLoadBalancers_Delete(t *testing.T) {
 
 	_, err := client.LoadBalancers.Delete(ctx, lbID)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.Delete returned error: %v", err)
-	}
+	assert.NoError(t, err)
 }
 
 func TestLoadBalancers_AddDroplets(t *testing.T) {
@@ -682,9 +866,7 @@ func TestLoadBalancers_AddDroplets(t *testing.T) {
 
 	_, err := client.LoadBalancers.AddDroplets(ctx, lbID, dropletIdsRequest.IDs...)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.AddDroplets returned error: %v", err)
-	}
+	assert.NoError(t, err)
 }
 
 func TestLoadBalancers_RemoveDroplets(t *testing.T) {
@@ -712,9 +894,7 @@ func TestLoadBalancers_RemoveDroplets(t *testing.T) {
 
 	_, err := client.LoadBalancers.RemoveDroplets(ctx, lbID, dropletIdsRequest.IDs...)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.RemoveDroplets returned error: %v", err)
-	}
+	assert.NoError(t, err)
 }
 
 func TestLoadBalancers_AddForwardingRules(t *testing.T) {
@@ -756,9 +936,7 @@ func TestLoadBalancers_AddForwardingRules(t *testing.T) {
 
 	_, err := client.LoadBalancers.AddForwardingRules(ctx, lbID, frr.Rules...)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.AddForwardingRules returned error: %v", err)
-	}
+	assert.NoError(t, err)
 }
 
 func TestLoadBalancers_RemoveForwardingRules(t *testing.T) {
@@ -799,16 +977,16 @@ func TestLoadBalancers_RemoveForwardingRules(t *testing.T) {
 
 	_, err := client.LoadBalancers.RemoveForwardingRules(ctx, lbID, frr.Rules...)
 
-	if err != nil {
-		t.Errorf("LoadBalancers.RemoveForwardingRules returned error: %v", err)
-	}
+	assert.NoError(t, err)
 }
 
 func TestLoadBalancers_AsRequest(t *testing.T) {
+	lbIdleTimeout := uint64(60)
 	lb := &LoadBalancer{
 		ID:        "37e6be88-01ec-4ec7-9bc6-a514d4719057",
 		Name:      "test-loadbalancer",
 		IP:        "10.0.0.1",
+		SizeSlug:  "lb-small",
 		Algorithm: "least_connections",
 		Status:    "active",
 		Created:   "2011-06-24T12:00:00Z",
@@ -833,7 +1011,15 @@ func TestLoadBalancers_AsRequest(t *testing.T) {
 		EnableProxyProtocol:    true,
 		EnableBackendKeepalive: true,
 		VPCUUID:                "880b7f98-f062-404d-b33c-458d545696f6",
+		ProjectID:              "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		ValidateOnly:           true,
+		HTTPIdleTimeoutSeconds: &lbIdleTimeout,
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.5"},
+			Deny:  []string{"cidr:1.3.0.0/16"},
+		},
 	}
+
 	lb.DropletIDs = make([]int, 1, 2)
 	lb.DropletIDs[0] = 12345
 	lb.ForwardingRules = make([]ForwardingRule, 1, 2)
@@ -848,7 +1034,8 @@ func TestLoadBalancers_AsRequest(t *testing.T) {
 		Name:      "test-loadbalancer",
 		Algorithm: "least_connections",
 		Region:    "lon1",
-		ForwardingRules: []ForwardingRule{ForwardingRule{
+		SizeSlug:  "lb-small",
+		ForwardingRules: []ForwardingRule{{
 			EntryProtocol:  "http",
 			EntryPort:      80,
 			TargetProtocol: "http",
@@ -873,6 +1060,13 @@ func TestLoadBalancers_AsRequest(t *testing.T) {
 		EnableProxyProtocol:    true,
 		EnableBackendKeepalive: true,
 		VPCUUID:                "880b7f98-f062-404d-b33c-458d545696f6",
+		ProjectID:              "6929eef6-4e45-11ed-bdc3-0242ac120002",
+		HTTPIdleTimeoutSeconds: &lbIdleTimeout,
+		ValidateOnly:           true,
+		Firewall: &LBFirewall{
+			Allow: []string{"ip:1.2.3.5"},
+			Deny:  []string{"cidr:1.3.0.0/16"},
+		},
 	}
 
 	r := lb.AsRequest()
@@ -899,13 +1093,13 @@ func TestLoadBalancers_AsRequest(t *testing.T) {
 	})
 	assert.Equal(t, []int{12345, 54321}, r.DropletIDs)
 	assert.Equal(t, []ForwardingRule{
-		ForwardingRule{
+		{
 			EntryProtocol:  "http",
 			EntryPort:      80,
 			TargetProtocol: "http",
 			TargetPort:     80,
 		},
-		ForwardingRule{
+		{
 			EntryProtocol:  "https",
 			EntryPort:      443,
 			TargetProtocol: "https",
diff --git a/metrics/metrics.go b/metrics/metrics.go
new file mode 100644
index 0000000..cc2ac6a
--- /dev/null
+++ b/metrics/metrics.go
@@ -0,0 +1,81 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metrics is a minimal copy of github.com/prometheus/common/model
+// providing types to work with the Prometheus-style results in a DigitalOcean
+// Monitoring metrics response.
+package metrics
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+const (
+	// MetricNameLabel is the label name indicating the metric name of a
+	// timeseries.
+	MetricNameLabel = "__name__"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not.  All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+func (l LabelSet) String() string {
+	lstrs := make([]string, 0, len(l))
+	for l, v := range l {
+		lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+	}
+
+	sort.Strings(lstrs)
+	return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// A LabelValue is an associated value for a MetricLabelName.
+type LabelValue string
+
+// A LabelName is a key for a Metric.
+type LabelName string
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+func (m Metric) String() string {
+	metricName, hasName := m[MetricNameLabel]
+	numLabels := len(m) - 1
+	if !hasName {
+		numLabels = len(m)
+	}
+	labelStrings := make([]string, 0, numLabels)
+	for label, value := range m {
+		if label != MetricNameLabel {
+			labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+		}
+	}
+
+	switch numLabels {
+	case 0:
+		if hasName {
+			return string(metricName)
+		}
+		return "{}"
+	default:
+		sort.Strings(labelStrings)
+		return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+	}
+}
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
new file mode 100644
index 0000000..1e03752
--- /dev/null
+++ b/metrics/metrics_test.go
@@ -0,0 +1,66 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"testing"
+)
+
+func TestMetricToString(t *testing.T) {
+	scenarios := []struct {
+		name     string
+		input    Metric
+		expected string
+	}{
+		{
+			name: "valid metric without __name__ label",
+			input: Metric{
+				"first_name":   "electro",
+				"occupation":   "robot",
+				"manufacturer": "westinghouse",
+			},
+			expected: `{first_name="electro", manufacturer="westinghouse", occupation="robot"}`,
+		},
+		{
+			name: "valid metric with __name__ label",
+			input: Metric{
+				"__name__":     "electro",
+				"occupation":   "robot",
+				"manufacturer": "westinghouse",
+			},
+			expected: `electro{manufacturer="westinghouse", occupation="robot"}`,
+		},
+		{
+			name: "empty metric with __name__ label",
+			input: Metric{
+				"__name__": "fooname",
+			},
+			expected: "fooname",
+		},
+		{
+			name:     "empty metric",
+			input:    Metric{},
+			expected: "{}",
+		},
+	}
+
+	for _, scenario := range scenarios {
+		t.Run(scenario.name, func(t *testing.T) {
+			actual := scenario.input.String()
+			if actual != scenario.expected {
+				t.Errorf("expected string output %s but got %s", actual, scenario.expected)
+			}
+		})
+	}
+}
diff --git a/metrics/time.go b/metrics/time.go
new file mode 100644
index 0000000..2d50795
--- /dev/null
+++ b/metrics/time.go
@@ -0,0 +1,164 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	// MinimumTick is the minimum supported time resolution. This has to be
+	// at least time.Second in order for the code below to work.
+	minimumTick = time.Millisecond
+	// second is the Time duration equivalent to one second.
+	second = int64(time.Second / minimumTick)
+	// The number of nanoseconds per minimum tick.
+	nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+	// Earliest is the earliest Time representable. Handy for
+	// initializing a high watermark.
+	Earliest = Time(math.MinInt64)
+	// Latest is the latest Time representable. Handy for initializing
+	// a low watermark.
+	Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+	Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+	return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+	return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+	return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+	return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+	return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+	return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+	return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+	return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+	return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+	return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+	return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+	return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+	return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+	p := strings.Split(string(b), ".")
+	switch len(p) {
+	case 1:
+		v, err := strconv.ParseInt(string(p[0]), 10, 64)
+		if err != nil {
+			return err
+		}
+		*t = Time(v * second)
+
+	case 2:
+		v, err := strconv.ParseInt(string(p[0]), 10, 64)
+		if err != nil {
+			return err
+		}
+		v *= second
+
+		prec := dotPrecision - len(p[1])
+		if prec < 0 {
+			p[1] = p[1][:dotPrecision]
+		} else if prec > 0 {
+			p[1] = p[1] + strings.Repeat("0", prec)
+		}
+
+		va, err := strconv.ParseInt(p[1], 10, 32)
+		if err != nil {
+			return err
+		}
+
+		// If the value was something like -0.1 the negative is lost in the
+		// parsing because of the leading zero, this ensures that we capture it.
+		if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+			*t = Time(v+va) * -1
+		} else {
+			*t = Time(v + va)
+		}
+
+	default:
+		return fmt.Errorf("invalid time %q", string(b))
+	}
+	return nil
+}
diff --git a/metrics/time_test.go b/metrics/time_test.go
new file mode 100644
index 0000000..6fc7702
--- /dev/null
+++ b/metrics/time_test.go
@@ -0,0 +1,121 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"strconv"
+	"testing"
+	"time"
+)
+
+func TestComparators(t *testing.T) {
+	t1a := TimeFromUnix(0)
+	t1b := TimeFromUnix(0)
+	t2 := TimeFromUnix(2*second - 1)
+
+	if !t1a.Equal(t1b) {
+		t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
+	}
+	if t1a.Equal(t2) {
+		t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
+	}
+
+	if !t1a.Before(t2) {
+		t.Fatalf("Expected %s to be before %s", t1a, t2)
+	}
+	if t1a.Before(t1b) {
+		t.Fatalf("Expected %s to not be before %s", t1a, t1b)
+	}
+
+	if !t2.After(t1a) {
+		t.Fatalf("Expected %s to be after %s", t2, t1a)
+	}
+	if t1b.After(t1a) {
+		t.Fatalf("Expected %s to not be after %s", t1b, t1a)
+	}
+}
+
+func TestTimeConversions(t *testing.T) {
+	unixSecs := int64(1136239445)
+	unixNsecs := int64(123456789)
+	unixNano := unixSecs*1e9 + unixNsecs
+
+	t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
+	t2 := time.Unix(unixSecs, unixNsecs)
+
+	ts := TimeFromUnixNano(unixNano)
+	if !ts.Time().Equal(t1) {
+		t.Fatalf("Expected %s, got %s", t1, ts.Time())
+	}
+
+	// Test available precision.
+	ts = TimeFromUnixNano(t2.UnixNano())
+	if !ts.Time().Equal(t1) {
+		t.Fatalf("Expected %s, got %s", t1, ts.Time())
+	}
+
+	if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
+		t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
+	}
+}
+
+func TestDuration(t *testing.T) {
+	duration := time.Second + time.Minute + time.Hour
+	goTime := time.Unix(1136239445, 0)
+
+	ts := TimeFromUnix(goTime.Unix())
+	if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
+		t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
+	}
+
+	earlier := ts.Add(-duration)
+	delta := ts.Sub(earlier)
+	if delta != duration {
+		t.Fatalf("Expected %s to be equal to %s", delta, duration)
+	}
+}
+
+func TestTimeJSON(t *testing.T) {
+	tests := []struct {
+		in  Time
+		out string
+	}{
+		{Time(1), `0.001`},
+		{Time(-1), `-0.001`},
+	}
+
+	for i, test := range tests {
+		t.Run(strconv.Itoa(i), func(t *testing.T) {
+			b, err := test.in.MarshalJSON()
+			if err != nil {
+				t.Fatalf("Error marshaling time: %v", err)
+			}
+
+			if string(b) != test.out {
+				t.Errorf("Mismatch in marshal expected=%s actual=%s", test.out, b)
+			}
+
+			var tm Time
+			if err := tm.UnmarshalJSON(b); err != nil {
+				t.Fatalf("Error Unmarshaling time: %v", err)
+			}
+
+			if !test.in.Equal(tm) {
+				t.Fatalf("Mismatch after Unmarshal expected=%v actual=%v", test.in, tm)
+			}
+
+		})
+	}
+
+}
diff --git a/metrics/values.go b/metrics/values.go
new file mode 100644
index 0000000..ae39ef2
--- /dev/null
+++ b/metrics/values.go
@@ -0,0 +1,100 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given time.
+type SampleValue float64
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return fmt.Errorf("sample value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = SampleValue(f)
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+func (v SampleValue) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+	if v == o {
+		return true
+	}
+	return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+	Timestamp Time
+	Value     SampleValue
+}
+
+func (s SamplePair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Value)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+	Metric Metric       `json:"metric"`
+	Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+	vals := make([]string, len(ss.Values))
+	for i, v := range ss.Values {
+		vals[i] = v.String()
+	}
+	return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
diff --git a/metrics/values_test.go b/metrics/values_test.go
new file mode 100644
index 0000000..c3a36b1
--- /dev/null
+++ b/metrics/values_test.go
@@ -0,0 +1,114 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"encoding/json"
+	"math"
+	"testing"
+)
+
+func TestEqualValues(t *testing.T) {
+	tests := map[string]struct {
+		in1, in2 SampleValue
+		want     bool
+	}{
+		"equal floats": {
+			in1:  3.14,
+			in2:  3.14,
+			want: true,
+		},
+		"unequal floats": {
+			in1:  3.14,
+			in2:  3.1415,
+			want: false,
+		},
+		"positive inifinities": {
+			in1:  SampleValue(math.Inf(+1)),
+			in2:  SampleValue(math.Inf(+1)),
+			want: true,
+		},
+		"negative inifinities": {
+			in1:  SampleValue(math.Inf(-1)),
+			in2:  SampleValue(math.Inf(-1)),
+			want: true,
+		},
+		"different inifinities": {
+			in1:  SampleValue(math.Inf(+1)),
+			in2:  SampleValue(math.Inf(-1)),
+			want: false,
+		},
+		"number and infinity": {
+			in1:  42,
+			in2:  SampleValue(math.Inf(+1)),
+			want: false,
+		},
+		"number and NaN": {
+			in1:  42,
+			in2:  SampleValue(math.NaN()),
+			want: false,
+		},
+		"NaNs": {
+			in1:  SampleValue(math.NaN()),
+			in2:  SampleValue(math.NaN()),
+			want: true, // !!!
+		},
+	}
+
+	for name, test := range tests {
+		got := test.in1.Equal(test.in2)
+		if got != test.want {
+			t.Errorf("Comparing %s, %f and %f: got %t, want %t", name, test.in1, test.in2, got, test.want)
+		}
+	}
+}
+
+func TestSamplePairJSON(t *testing.T) {
+	input := []struct {
+		plain string
+		value SamplePair
+	}{
+		{
+			plain: `[1234.567,"123.1"]`,
+			value: SamplePair{
+				Value:     123.1,
+				Timestamp: 1234567,
+			},
+		},
+	}
+
+	for _, test := range input {
+		b, err := json.Marshal(test.value)
+		if err != nil {
+			t.Error(err)
+			continue
+		}
+
+		if string(b) != test.plain {
+			t.Errorf("encoding error: expected %q, got %q", test.plain, b)
+			continue
+		}
+
+		var sp SamplePair
+		err = json.Unmarshal(b, &sp)
+		if err != nil {
+			t.Error(err)
+			continue
+		}
+
+		if sp != test.value {
+			t.Errorf("decoding error: expected %v, got %v", test.value, sp)
+		}
+	}
+}
diff --git a/monitoring.go b/monitoring.go
new file mode 100644
index 0000000..937bb8d
--- /dev/null
+++ b/monitoring.go
@@ -0,0 +1,374 @@
+package godo
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/digitalocean/godo/metrics"
+)
+
+const (
+	monitoringBasePath     = "v2/monitoring"
+	alertPolicyBasePath    = monitoringBasePath + "/alerts"
+	dropletMetricsBasePath = monitoringBasePath + "/metrics/droplet"
+
+	DropletCPUUtilizationPercent        = "v1/insights/droplet/cpu"
+	DropletMemoryUtilizationPercent     = "v1/insights/droplet/memory_utilization_percent"
+	DropletDiskUtilizationPercent       = "v1/insights/droplet/disk_utilization_percent"
+	DropletPublicOutboundBandwidthRate  = "v1/insights/droplet/public_outbound_bandwidth"
+	DropletPublicInboundBandwidthRate   = "v1/insights/droplet/public_inbound_bandwidth"
+	DropletPrivateOutboundBandwidthRate = "v1/insights/droplet/private_outbound_bandwidth"
+	DropletPrivateInboundBandwidthRate  = "v1/insights/droplet/private_inbound_bandwidth"
+	DropletDiskReadRate                 = "v1/insights/droplet/disk_read"
+	DropletDiskWriteRate                = "v1/insights/droplet/disk_write"
+	DropletOneMinuteLoadAverage         = "v1/insights/droplet/load_1"
+	DropletFiveMinuteLoadAverage        = "v1/insights/droplet/load_5"
+	DropletFifteenMinuteLoadAverage     = "v1/insights/droplet/load_15"
+
+	LoadBalancerCPUUtilizationPercent                = "v1/insights/lbaas/avg_cpu_utilization_percent"
+	LoadBalancerConnectionUtilizationPercent         = "v1/insights/lbaas/connection_utilization_percent"
+	LoadBalancerDropletHealth                        = "v1/insights/lbaas/droplet_health"
+	LoadBalancerTLSUtilizationPercent                = "v1/insights/lbaas/tls_connections_per_second_utilization_percent"
+	LoadBalancerIncreaseInHTTPErrorRatePercentage5xx = "v1/insights/lbaas/increase_in_http_error_rate_percentage_5xx"
+	LoadBalancerIncreaseInHTTPErrorRatePercentage4xx = "v1/insights/lbaas/increase_in_http_error_rate_percentage_4xx"
+	LoadBalancerIncreaseInHTTPErrorRateCount5xx      = "v1/insights/lbaas/increase_in_http_error_rate_count_5xx"
+	LoadBalancerIncreaseInHTTPErrorRateCount4xx      = "v1/insights/lbaas/increase_in_http_error_rate_count_4xx"
+	LoadBalancerHighHttpResponseTime                 = "v1/insights/lbaas/high_http_request_response_time"
+	LoadBalancerHighHttpResponseTime50P              = "v1/insights/lbaas/high_http_request_response_time_50p"
+	LoadBalancerHighHttpResponseTime95P              = "v1/insights/lbaas/high_http_request_response_time_95p"
+	LoadBalancerHighHttpResponseTime99P              = "v1/insights/lbaas/high_http_request_response_time_99p"
+
+	DbaasFifteenMinuteLoadAverage = "v1/dbaas/alerts/load_15_alerts"
+	DbaasMemoryUtilizationPercent = "v1/dbaas/alerts/memory_utilization_alerts"
+	DbaasDiskUtilizationPercent   = "v1/dbaas/alerts/disk_utilization_alerts"
+	DbaasCPUUtilizationPercent    = "v1/dbaas/alerts/cpu_alerts"
+)
+
+// MonitoringService is an interface for interfacing with the
+// monitoring endpoints of the DigitalOcean API
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Monitoring
+type MonitoringService interface {
+	ListAlertPolicies(context.Context, *ListOptions) ([]AlertPolicy, *Response, error)
+	GetAlertPolicy(context.Context, string) (*AlertPolicy, *Response, error)
+	CreateAlertPolicy(context.Context, *AlertPolicyCreateRequest) (*AlertPolicy, *Response, error)
+	UpdateAlertPolicy(context.Context, string, *AlertPolicyUpdateRequest) (*AlertPolicy, *Response, error)
+	DeleteAlertPolicy(context.Context, string) (*Response, error)
+
+	GetDropletBandwidth(context.Context, *DropletBandwidthMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletAvailableMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletCPU(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletFilesystemFree(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletFilesystemSize(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletLoad1(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletLoad5(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletLoad15(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletCachedMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletFreeMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+	GetDropletTotalMemory(context.Context, *DropletMetricsRequest) (*MetricsResponse, *Response, error)
+}
+
+// MonitoringServiceOp handles communication with monitoring related methods of the
+// DigitalOcean API.
+type MonitoringServiceOp struct {
+	client *Client
+}
+
+var _ MonitoringService = &MonitoringServiceOp{}
+
+// AlertPolicy represents a DigitalOcean alert policy
+type AlertPolicy struct {
+	UUID        string          `json:"uuid"`
+	Type        string          `json:"type"`
+	Description string          `json:"description"`
+	Compare     AlertPolicyComp `json:"compare"`
+	Value       float32         `json:"value"`
+	Window      string          `json:"window"`
+	Entities    []string        `json:"entities"`
+	Tags        []string        `json:"tags"`
+	Alerts      Alerts          `json:"alerts"`
+	Enabled     bool            `json:"enabled"`
+}
+
+// Alerts represents the alerts section of an alert policy
+type Alerts struct {
+	Slack []SlackDetails `json:"slack"`
+	Email []string       `json:"email"`
+}
+
+// SlackDetails represents the details required to send a slack alert
+type SlackDetails struct {
+	URL     string `json:"url"`
+	Channel string `json:"channel"`
+}
+
+// AlertPolicyComp represents an alert policy comparison operation
+type AlertPolicyComp string
+
+const (
+	// GreaterThan is the comparison >
+	GreaterThan AlertPolicyComp = "GreaterThan"
+	// LessThan is the comparison <
+	LessThan AlertPolicyComp = "LessThan"
+)
+
+// AlertPolicyCreateRequest holds the info for creating a new alert policy
+type AlertPolicyCreateRequest struct {
+	Type        string          `json:"type"`
+	Description string          `json:"description"`
+	Compare     AlertPolicyComp `json:"compare"`
+	Value       float32         `json:"value"`
+	Window      string          `json:"window"`
+	Entities    []string        `json:"entities"`
+	Tags        []string        `json:"tags"`
+	Alerts      Alerts          `json:"alerts"`
+	Enabled     *bool           `json:"enabled"`
+}
+
+// AlertPolicyUpdateRequest holds the info for updating an existing alert policy
+type AlertPolicyUpdateRequest struct {
+	Type        string          `json:"type"`
+	Description string          `json:"description"`
+	Compare     AlertPolicyComp `json:"compare"`
+	Value       float32         `json:"value"`
+	Window      string          `json:"window"`
+	Entities    []string        `json:"entities"`
+	Tags        []string        `json:"tags"`
+	Alerts      Alerts          `json:"alerts"`
+	Enabled     *bool           `json:"enabled"`
+}
+
+type alertPoliciesRoot struct {
+	AlertPolicies []AlertPolicy `json:"policies"`
+	Links         *Links        `json:"links"`
+	Meta          *Meta         `json:"meta"`
+}
+
+type alertPolicyRoot struct {
+	AlertPolicy *AlertPolicy `json:"policy,omitempty"`
+}
+
+// DropletMetricsRequest holds the information needed to retrieve Droplet various metrics.
+type DropletMetricsRequest struct {
+	HostID string
+	Start  time.Time
+	End    time.Time
+}
+
+// DropletBandwidthMetricsRequest holds the information needed to retrieve Droplet bandwidth metrics.
+type DropletBandwidthMetricsRequest struct {
+	DropletMetricsRequest
+	Interface string
+	Direction string
+}
+
+// MetricsResponse holds a Metrics query response.
+type MetricsResponse struct {
+	Status string      `json:"status"`
+	Data   MetricsData `json:"data"`
+}
+
+// MetricsData holds the data portion of a Metrics response.
+type MetricsData struct {
+	ResultType string                 `json:"resultType"`
+	Result     []metrics.SampleStream `json:"result"`
+}
+
+// ListAlertPolicies all alert policies
+func (s *MonitoringServiceOp) ListAlertPolicies(ctx context.Context, opt *ListOptions) ([]AlertPolicy, *Response, error) {
+	path := alertPolicyBasePath
+	path, err := addOptions(path, opt)
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(alertPoliciesRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+	return root.AlertPolicies, resp, err
+}
+
+// GetAlertPolicy gets a single alert policy
+func (s *MonitoringServiceOp) GetAlertPolicy(ctx context.Context, uuid string) (*AlertPolicy, *Response, error) {
+	path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid)
+
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(alertPolicyRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.AlertPolicy, resp, err
+}
+
+// CreateAlertPolicy creates a new alert policy
+func (s *MonitoringServiceOp) CreateAlertPolicy(ctx context.Context, createRequest *AlertPolicyCreateRequest) (*AlertPolicy, *Response, error) {
+	if createRequest == nil {
+		return nil, nil, NewArgError("createRequest", "cannot be nil")
+	}
+
+	req, err := s.client.NewRequest(ctx, http.MethodPost, alertPolicyBasePath, createRequest)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(alertPolicyRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.AlertPolicy, resp, err
+}
+
+// UpdateAlertPolicy updates an existing alert policy
+func (s *MonitoringServiceOp) UpdateAlertPolicy(ctx context.Context, uuid string, updateRequest *AlertPolicyUpdateRequest) (*AlertPolicy, *Response, error) {
+	if uuid == "" {
+		return nil, nil, NewArgError("uuid", "cannot be empty")
+	}
+	if updateRequest == nil {
+		return nil, nil, NewArgError("updateRequest", "cannot be nil")
+	}
+
+	path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid)
+	req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(alertPolicyRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.AlertPolicy, resp, err
+}
+
+// DeleteAlertPolicy deletes an existing alert policy
+func (s *MonitoringServiceOp) DeleteAlertPolicy(ctx context.Context, uuid string) (*Response, error) {
+	if uuid == "" {
+		return nil, NewArgError("uuid", "cannot be empty")
+	}
+
+	path := fmt.Sprintf("%s/%s", alertPolicyBasePath, uuid)
+	req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.client.Do(ctx, req, nil)
+
+	return resp, err
+}
+
+// GetDropletBandwidth retrieves Droplet bandwidth metrics.
+func (s *MonitoringServiceOp) GetDropletBandwidth(ctx context.Context, args *DropletBandwidthMetricsRequest) (*MetricsResponse, *Response, error) {
+	path := dropletMetricsBasePath + "/bandwidth"
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	q := req.URL.Query()
+	q.Add("host_id", args.HostID)
+	q.Add("interface", args.Interface)
+	q.Add("direction", args.Direction)
+	q.Add("start", fmt.Sprintf("%d", args.Start.Unix()))
+	q.Add("end", fmt.Sprintf("%d", args.End.Unix()))
+	req.URL.RawQuery = q.Encode()
+
+	root := new(MetricsResponse)
+	resp, err := s.client.Do(ctx, req, root)
+
+	return root, resp, err
+}
+
+// GetDropletCPU retrieves Droplet CPU metrics.
+func (s *MonitoringServiceOp) GetDropletCPU(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/cpu", args)
+}
+
+// GetDropletFilesystemFree retrieves Droplet filesystem free metrics.
+func (s *MonitoringServiceOp) GetDropletFilesystemFree(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/filesystem_free", args)
+}
+
+// GetDropletFilesystemSize retrieves Droplet filesystem size metrics.
+func (s *MonitoringServiceOp) GetDropletFilesystemSize(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/filesystem_size", args)
+}
+
+// GetDropletLoad1 retrieves Droplet load 1 metrics.
+func (s *MonitoringServiceOp) GetDropletLoad1(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/load_1", args)
+}
+
+// GetDropletLoad5 retrieves Droplet load 5 metrics.
+func (s *MonitoringServiceOp) GetDropletLoad5(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/load_5", args)
+}
+
+// GetDropletLoad15 retrieves Droplet load 15 metrics.
+func (s *MonitoringServiceOp) GetDropletLoad15(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/load_15", args)
+}
+
+// GetDropletCachedMemory retrieves Droplet cached memory metrics.
+func (s *MonitoringServiceOp) GetDropletCachedMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/memory_cached", args)
+}
+
+// GetDropletFreeMemory retrieves Droplet free memory metrics.
+func (s *MonitoringServiceOp) GetDropletFreeMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/memory_free", args)
+}
+
+// GetDropletTotalMemory retrieves Droplet total memory metrics.
+func (s *MonitoringServiceOp) GetDropletTotalMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/memory_total", args)
+}
+
+// GetDropletAvailableMemory retrieves Droplet available memory metrics.
+func (s *MonitoringServiceOp) GetDropletAvailableMemory(ctx context.Context, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	return s.getDropletMetrics(ctx, "/memory_available", args)
+}
+
+func (s *MonitoringServiceOp) getDropletMetrics(ctx context.Context, path string, args *DropletMetricsRequest) (*MetricsResponse, *Response, error) {
+	fullPath := dropletMetricsBasePath + path
+	req, err := s.client.NewRequest(ctx, http.MethodGet, fullPath, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	q := req.URL.Query()
+	q.Add("host_id", args.HostID)
+	q.Add("start", fmt.Sprintf("%d", args.Start.Unix()))
+	q.Add("end", fmt.Sprintf("%d", args.End.Unix()))
+	req.URL.RawQuery = q.Encode()
+
+	root := new(MetricsResponse)
+	resp, err := s.client.Do(ctx, req, root)
+
+	return root, resp, err
+}
diff --git a/monitoring_test.go b/monitoring_test.go
new file mode 100644
index 0000000..408fb6b
--- /dev/null
+++ b/monitoring_test.go
@@ -0,0 +1,1307 @@
+package godo
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"reflect"
+	"testing"
+	"time"
+
+	"github.com/digitalocean/godo/metrics"
+	"github.com/stretchr/testify/assert"
+)
+
+var (
+	listEmptyPoliciesJSON = `
+	{
+		"policies": [
+		],
+		"meta": {
+			"total": 0
+		}
+	}
+	`
+
+	listPoliciesJSON = `
+	{
+		"policies": [
+		{
+		  "uuid": "669befc9-3cbc-45fc-85f0-2c966f133730",
+		  "type": "v1/insights/droplet/cpu",
+		  "description": "description of policy",
+		  "compare": "LessThan",
+		  "value": 75,
+		  "window": "5m",
+		  "entities": [],
+		  "tags": [
+			"test-tag"
+		  ],
+		  "alerts": {
+			"slack": [
+			  {
+				"url": "https://hooks.slack.com/services/T1234567/AAAAAAAAA/ZZZZZZ",
+				"channel": "#alerts-test"
+			  }
+			],
+			"email": ["bob@example.com"]
+		  },
+		  "enabled": true
+		},
+		{
+		  "uuid": "777befc9-3cbc-45fc-85f0-2c966f133737",
+		  "type": "v1/insights/droplet/cpu",
+		  "description": "description of policy #2",
+		  "compare": "LessThan",
+		  "value": 90,
+		  "window": "5m",
+		  "entities": [],
+		  "tags": [
+			"test-tag-2"
+		  ],
+		  "alerts": {
+			"slack": [
+			  {
+				"url": "https://hooks.slack.com/services/T1234567/AAAAAAAAA/ZZZZZZ",
+				"channel": "#alerts-test"
+			  }
+			],
+			"email": ["bob@example.com", "alice@example.com"]
+		  },
+		  "enabled": false
+		}
+		],
+		"links": {
+			"pages":{
+				"next":"http://example.com/v2/monitoring/alerts/?page=3",
+				"prev":"http://example.com/v2/monitoring/alerts/?page=1",
+				"last":"http://example.com/v2/monitoring/alerts/?page=3",
+				"first":"http://example.com/v2/monitoring/alerts/?page=1"
+			}
+		},
+		"meta": {
+			"total": 2
+		}
+	}
+	`
+
+	createAlertPolicyJSON = `
+	{
+		"policy": {
+          "uuid": "669befc9-3cbc-45fc-85f0-2c966f133730",
+		  "alerts": {
+			"email": [
+			  "bob@example.com"
+			],
+			"slack": [
+			  {
+				"channel": "#alerts-test",
+				"url": "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ"
+			  }
+			]
+		  },
+		  "compare": "LessThan",
+		  "description": "description of policy",
+		  "enabled": true,
+		  "entities": [
+		  ],
+		  "tags": [
+			"test-tag"
+		  ],
+		  "type": "v1/insights/droplet/cpu",
+		  "value": 75,
+		  "window": "5m"
+		}
+	}
+	`
+
+	updateAlertPolicyJSON = `
+	{
+		"policy": {
+          "uuid": "769befc9-3cbc-45fc-85f0-2c966f133730",
+		  "alerts": {
+			"email": [
+			  "bob@example.com"
+			],
+			"slack": [
+			  {
+				"channel": "#alerts-test",
+				"url": "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ"
+			  }
+			]
+		  },
+		  "compare": "GreaterThan",
+		  "description": "description of updated policy",
+		  "enabled": true,
+		  "entities": [
+		  ],
+		  "tags": [
+			"test-tag"
+		  ],
+		  "type": "v1/insights/droplet/cpu",
+		  "value": 75,
+		  "window": "5m"
+		}
+	}
+	`
+
+	getPolicyJSON = `
+	{
+		"policy": {
+          "uuid": "669befc9-3cbc-45fc-85f0-2c966f133730",
+		  "alerts": {
+			"email": [
+			  "bob@example.com"
+			],
+			"slack": [
+			  {
+				"channel": "#alerts-test",
+				"url": "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ"
+			  }
+			]
+		  },
+		  "compare": "LessThan",
+		  "description": "description of policy",
+		  "enabled": true,
+		  "entities": [
+		  ],
+		  "tags": [
+			"test-tag"
+		  ],
+		  "type": "v1/insights/droplet/cpu",
+		  "value": 75,
+		  "window": "5m"
+		}
+	}
+	`
+
+	bandwidthRespJSON = `
+	{
+		"status": "success",
+		"data": {
+			"resultType": "matrix",
+			"result": [
+				{
+					"metric": {
+						"direction": "inbound",
+						"host_id": "222651441",
+						"interface": "private"
+					},
+					"values": [
+						[
+							1634052360,
+							"0.016600450090265357"
+						],
+						[
+							1634052480,
+							"0.015085955677299055"
+						],
+						[
+							1634052600,
+							"0.014941163855322308"
+						],
+						[
+							1634052720,
+							"0.016214285714285712"
+						]
+					]
+				}
+			]
+		}
+	}`
+
+	memoryRespJSON = `
+	{
+		"status": "success",
+		"data": {
+			"resultType": "matrix",
+			"result": [
+			{
+				"metric": {
+				"host_id": "123"
+				},
+				"values": [
+				[
+					1635386880,
+					"1028956160"
+				],
+				[
+					1635387000,
+					"1028956160"
+				],
+				[
+					1635387120,
+					"1028956160"
+				]
+				]
+			}
+			]
+		}
+	}`
+
+	filesystemRespJSON = `
+			{
+		"status": "success",
+		"data": {
+			"resultType": "matrix",
+			"result": [
+			{
+				"metric": {
+					"device": "/dev/vda1",
+					"fstype": "ext4",
+					"host_id": "123",
+					"mountpoint": "/"
+				},
+				"values": [
+					[
+						1635386880,
+						"25832407040"
+					],
+					[
+						1635387000,
+						"25832407040"
+					],
+					[
+						1635387120,
+						"25832407040"
+					]
+				]
+			}
+			]
+		}
+	}`
+
+	loadRespJSON = `
+	{
+		"status": "success",
+		"data": {
+			"resultType": "matrix",
+			"result": [
+			{
+				"metric": {
+				"host_id": "123"
+				},
+				"values": [
+				[
+					1635386880,
+					"0.04"
+				],
+				[
+					1635387000,
+					"0.03"
+				],
+				[
+					1635387120,
+					"0.01"
+				]
+				]
+			}
+			]
+		}
+	}`
+
+	cpuRespJSON = `
+	{
+		"status": "success",
+		"data": {
+			"resultType": "matrix",
+			"result": [
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "idle"
+				},
+				"values": [
+				[
+					1635386880,
+					"122901.18"
+				],
+				[
+					1635387000,
+					"123020.92"
+				],
+				[
+					1635387120,
+					"123140.8"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "iowait"
+				},
+				"values": [
+				[
+					1635386880,
+					"14.99"
+				],
+				[
+					1635387000,
+					"15.01"
+				],
+				[
+					1635387120,
+					"15.01"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "irq"
+				},
+				"values": [
+				[
+					1635386880,
+					"0"
+				],
+				[
+					1635387000,
+					"0"
+				],
+				[
+					1635387120,
+					"0"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "nice"
+				},
+				"values": [
+				[
+					1635386880,
+					"66.35"
+				],
+				[
+					1635387000,
+					"66.35"
+				],
+				[
+					1635387120,
+					"66.35"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "softirq"
+				},
+				"values": [
+				[
+					1635386880,
+					"2.13"
+				],
+				[
+					1635387000,
+					"2.13"
+				],
+				[
+					1635387120,
+					"2.13"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "steal"
+				},
+				"values": [
+				[
+					1635386880,
+					"7.89"
+				],
+				[
+					1635387000,
+					"7.9"
+				],
+				[
+					1635387120,
+					"7.91"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "system"
+				},
+				"values": [
+				[
+					1635386880,
+					"140.09"
+				],
+				[
+					1635387000,
+					"140.2"
+				],
+				[
+					1635387120,
+					"140.23"
+				]
+				]
+			},
+			{
+				"metric": {
+				"host_id": "123",
+				"mode": "user"
+				},
+				"values": [
+				[
+					1635386880,
+					"278.57"
+				],
+				[
+					1635387000,
+					"278.65"
+				],
+				[
+					1635387120,
+					"278.69"
+				]
+				]
+			}
+			]
+		}
+	}`
+
+	testCPUResponse = &MetricsResponse{
+		Status: "success",
+		Data: MetricsData{
+			ResultType: "matrix",
+			Result: []metrics.SampleStream{
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "idle",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     122901.18,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     123020.92,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     123140.8,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "iowait",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     14.99,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     15.01,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     15.01,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "irq",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     0,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     0,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     0,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "nice",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     66.35,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     66.35,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     66.35,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "softirq",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     2.13,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     2.13,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     2.13,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "steal",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     7.89,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     7.9,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     7.91,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "system",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     140.09,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     140.2,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     140.23,
+						},
+					},
+				},
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+						"mode":    "user",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     278.57,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     278.65,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     278.69,
+						},
+					},
+				},
+			},
+		},
+	}
+
+	testLoadResponse = &MetricsResponse{
+		Status: "success",
+		Data: MetricsData{
+			ResultType: "matrix",
+			Result: []metrics.SampleStream{
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     0.04,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     0.03,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     0.01,
+						},
+					},
+				},
+			},
+		},
+	}
+
+	testFilesystemResponse = &MetricsResponse{
+		Status: "success",
+		Data: MetricsData{
+			ResultType: "matrix",
+			Result: []metrics.SampleStream{
+				{
+					Metric: metrics.Metric{
+						"device":     "/dev/vda1",
+						"fstype":     "ext4",
+						"host_id":    "123",
+						"mountpoint": "/",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     25832407040,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     25832407040,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     25832407040,
+						},
+					},
+				},
+			},
+		},
+	}
+
+	testMemoryResponse = &MetricsResponse{
+		Status: "success",
+		Data: MetricsData{
+			ResultType: "matrix",
+			Result: []metrics.SampleStream{
+				{
+					Metric: metrics.Metric{
+						"host_id": "123",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1635386880000,
+							Value:     1.02895616e+09,
+						},
+						{
+							Timestamp: 1635387000000,
+							Value:     1.02895616e+09,
+						},
+						{
+							Timestamp: 1635387120000,
+							Value:     1.02895616e+09,
+						},
+					},
+				},
+			},
+		},
+	}
+)
+
+func TestAlertPolicies_List(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/monitoring/alerts", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, listPoliciesJSON)
+	})
+
+	policies, resp, err := client.Monitoring.ListAlertPolicies(ctx, nil)
+	if err != nil {
+		t.Errorf("Monitoring.ListAlertPolicies returned error: %v", err)
+	}
+
+	expectedPolicies := []AlertPolicy{
+		{UUID: "669befc9-3cbc-45fc-85f0-2c966f133730", Type: DropletCPUUtilizationPercent, Description: "description of policy", Compare: "LessThan", Value: 75, Window: "5m", Entities: []string{}, Tags: []string{"test-tag"}, Alerts: Alerts{Slack: []SlackDetails{{URL: "https://hooks.slack.com/services/T1234567/AAAAAAAAA/ZZZZZZ", Channel: "#alerts-test"}}, Email: []string{"bob@example.com"}}, Enabled: true},
+		{UUID: "777befc9-3cbc-45fc-85f0-2c966f133737", Type: DropletCPUUtilizationPercent, Description: "description of policy #2", Compare: "LessThan", Value: 90, Window: "5m", Entities: []string{}, Tags: []string{"test-tag-2"}, Alerts: Alerts{Slack: []SlackDetails{{URL: "https://hooks.slack.com/services/T1234567/AAAAAAAAA/ZZZZZZ", Channel: "#alerts-test"}}, Email: []string{"bob@example.com", "alice@example.com"}}, Enabled: false},
+	}
+	if !reflect.DeepEqual(policies, expectedPolicies) {
+		t.Errorf("Monitoring.ListAlertPolicies returned policies %+v, expected %+v", policies, expectedPolicies)
+	}
+
+	expectedMeta := &Meta{Total: 2}
+	if !reflect.DeepEqual(resp.Meta, expectedMeta) {
+		t.Errorf("Monitoring.ListAlertPolicies returned meta %+v, expected %+v", resp.Meta, expectedMeta)
+	}
+}
+
+func TestAlertPolicies_ListEmpty(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/monitoring/alerts", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, listEmptyPoliciesJSON)
+	})
+
+	policies, _, err := client.Monitoring.ListAlertPolicies(ctx, nil)
+	if err != nil {
+		t.Errorf("Monitoring.ListAlertPolicies returned error: %v", err)
+	}
+
+	expected := []AlertPolicy{}
+	if !reflect.DeepEqual(policies, expected) {
+		t.Errorf("Monitoring.ListAlertPolicies returned %+v, expected %+v", policies, expected)
+	}
+}
+
+func TestAlertPolicies_ListPaging(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/monitoring/alerts", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, listPoliciesJSON)
+	})
+
+	_, resp, err := client.Monitoring.ListAlertPolicies(ctx, nil)
+	if err != nil {
+		t.Errorf("Monitoring.ListAlertPolicies returned error: %v", err)
+	}
+	checkCurrentPage(t, resp, 2)
+}
+
+func TestAlertPolicy_Get(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/monitoring/alerts/669befc9-3cbc-45fc-85f0-2c966f133730", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, getPolicyJSON)
+	})
+
+	policy, _, err := client.Monitoring.GetAlertPolicy(ctx, "669befc9-3cbc-45fc-85f0-2c966f133730")
+	if err != nil {
+		t.Errorf("Monitoring.GetAlertPolicy returned error: %v", err)
+	}
+	expected := &AlertPolicy{UUID: "669befc9-3cbc-45fc-85f0-2c966f133730", Type: DropletCPUUtilizationPercent, Description: "description of policy", Compare: "LessThan", Value: 75, Window: "5m", Entities: []string{}, Tags: []string{"test-tag"}, Alerts: Alerts{Slack: []SlackDetails{{URL: "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ", Channel: "#alerts-test"}}, Email: []string{"bob@example.com"}}, Enabled: true}
+	if !reflect.DeepEqual(policy, expected) {
+		t.Errorf("Monitoring.CreateAlertPolicy returned %+v, expected %+v", policy, expected)
+	}
+}
+
+func TestAlertPolicy_Create(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &AlertPolicyCreateRequest{
+		Type:        DropletCPUUtilizationPercent,
+		Description: "description of policy",
+		Compare:     "LessThan",
+		Value:       75,
+		Window:      "5m",
+		Entities:    []string{},
+		Tags:        []string{"test-tag"},
+		Alerts: Alerts{
+			Email: []string{"bob@example.com"},
+			Slack: []SlackDetails{
+				{
+					Channel: "#alerts-test",
+					URL:     "https://hooks.slack.com/services/T1234567/AAAAAAAAA/ZZZZZZ",
+				},
+			},
+		},
+	}
+
+	mux.HandleFunc("/v2/monitoring/alerts", func(w http.ResponseWriter, r *http.Request) {
+		v := new(AlertPolicyCreateRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		if !reflect.DeepEqual(v, createRequest) {
+			t.Errorf("Request body = %+v, expected %+v", v, createRequest)
+		}
+
+		fmt.Fprintf(w, createAlertPolicyJSON)
+	})
+
+	policy, _, err := client.Monitoring.CreateAlertPolicy(ctx, createRequest)
+	if err != nil {
+		t.Errorf("Monitoring.CreateAlertPolicy returned error: %v", err)
+	}
+
+	expected := &AlertPolicy{UUID: "669befc9-3cbc-45fc-85f0-2c966f133730", Type: DropletCPUUtilizationPercent, Description: "description of policy", Compare: "LessThan", Value: 75, Window: "5m", Entities: []string{}, Tags: []string{"test-tag"}, Alerts: Alerts{Slack: []SlackDetails{{URL: "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ", Channel: "#alerts-test"}}, Email: []string{"bob@example.com"}}, Enabled: true}
+
+	if !reflect.DeepEqual(policy, expected) {
+		t.Errorf("Monitoring.CreateAlertPolicy returned %+v, expected %+v", policy, expected)
+	}
+}
+
+func TestAlertPolicy_Delete(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/monitoring/alerts/669befc9-3cbc-45fc-85f0-2c966f133730", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.Monitoring.DeleteAlertPolicy(ctx, "669befc9-3cbc-45fc-85f0-2c966f133730")
+	if err != nil {
+		t.Errorf("Monitoring.DeleteAlertPolicy returned error: %v", err)
+	}
+}
+
+func TestAlertPolicy_Update(t *testing.T) {
+	setup()
+	defer teardown()
+
+	updateRequest := &AlertPolicyUpdateRequest{
+		Type:        DropletCPUUtilizationPercent,
+		Description: "description of updated policy",
+		Compare:     "GreaterThan",
+		Value:       75,
+		Window:      "5m",
+		Entities:    []string{},
+		Tags:        []string{"test-tag"},
+		Alerts: Alerts{
+			Email: []string{"bob@example.com"},
+			Slack: []SlackDetails{
+				{
+					Channel: "#alerts-test",
+					URL:     "https://hooks.slack.com/services/T1234567/AAAAAAAAA/ZZZZZZ",
+				},
+			},
+		},
+	}
+
+	mux.HandleFunc("/v2/monitoring/alerts/769befc9-3cbc-45fc-85f0-2c966f133730", func(w http.ResponseWriter, r *http.Request) {
+		v := new(AlertPolicyUpdateRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		testMethod(t, r, http.MethodPut)
+		if !reflect.DeepEqual(v, updateRequest) {
+			t.Errorf("Request body = %+v, expected %+v", v, updateRequest)
+		}
+
+		fmt.Fprintf(w, updateAlertPolicyJSON)
+	})
+
+	policy, _, err := client.Monitoring.UpdateAlertPolicy(ctx, "769befc9-3cbc-45fc-85f0-2c966f133730", updateRequest)
+	if err != nil {
+		t.Errorf("Monitoring.UpdateAlertPolicy returned error: %v", err)
+	}
+
+	expected := &AlertPolicy{UUID: "769befc9-3cbc-45fc-85f0-2c966f133730", Type: DropletCPUUtilizationPercent, Description: "description of updated policy", Compare: "GreaterThan", Value: 75, Window: "5m", Entities: []string{}, Tags: []string{"test-tag"}, Alerts: Alerts{Slack: []SlackDetails{{URL: "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ", Channel: "#alerts-test"}}, Email: []string{"bob@example.com"}}, Enabled: true}
+
+	if !reflect.DeepEqual(policy, expected) {
+		t.Errorf("Monitoring.UpdateAlertPolicy returned %+v, expected %+v", policy, expected)
+	}
+}
+
+func TestGetDropletBandwidth(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletBandwidthMetricsRequest{
+		DropletMetricsRequest: DropletMetricsRequest{HostID: "123",
+			Start: now.Add(-300 * time.Second),
+			End:   now,
+		},
+		Interface: "private",
+		Direction: "inbound",
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/bandwidth", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		inter := r.URL.Query().Get("interface")
+		direction := r.URL.Query().Get("direction")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, metricReq.Interface, inter)
+		assert.Equal(t, metricReq.Direction, direction)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, bandwidthRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletBandwidth(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletBandwidthMetrics returned error: %v", err)
+	}
+
+	expected := &MetricsResponse{
+		Status: "success",
+		Data: MetricsData{
+			ResultType: "matrix",
+			Result: []metrics.SampleStream{
+				{
+					Metric: metrics.Metric{
+						"host_id":   "222651441",
+						"direction": "inbound",
+						"interface": "private",
+					},
+					Values: []metrics.SamplePair{
+						{
+							Timestamp: 1634052360000,
+							Value:     0.016600450090265357,
+						},
+						{
+							Timestamp: 1634052480000,
+							Value:     0.015085955677299055,
+						},
+						{
+							Timestamp: 1634052600000,
+							Value:     0.014941163855322308,
+						},
+						{
+							Timestamp: 1634052720000,
+							Value:     0.016214285714285712,
+						},
+					},
+				},
+			},
+		},
+	}
+
+	assert.Equal(t, expected, metricsResp)
+}
+
+func TestGetDropletTotalMemory(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/memory_total", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, memoryRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletTotalMemory(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletTotalMemory returned error: %v", err)
+	}
+
+	assert.Equal(t, testMemoryResponse, metricsResp)
+}
+
+func TestGetDropletFreeMemory(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/memory_free", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, memoryRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletFreeMemory(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletFreeMemory returned error: %v", err)
+	}
+
+	assert.Equal(t, testMemoryResponse, metricsResp)
+}
+
+func TestGetDropletAvailableMemory(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/memory_available", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, memoryRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletAvailableMemory(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletAvailableMemory returned error: %v", err)
+	}
+
+	assert.Equal(t, testMemoryResponse, metricsResp)
+}
+
+func TestGetDropletCachedMemory(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/memory_cached", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, memoryRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletCachedMemory(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletCachedMemory returned error: %v", err)
+	}
+
+	assert.Equal(t, testMemoryResponse, metricsResp)
+}
+
+func TestGetDropletFilesystemFree(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/filesystem_free", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, filesystemRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletFilesystemFree(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletFilesystemFree returned error: %v", err)
+	}
+
+	assert.Equal(t, testFilesystemResponse, metricsResp)
+}
+
+func TestGetDropletFilesystemSize(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/filesystem_size", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, filesystemRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletFilesystemSize(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletFilesystemSize returned error: %v", err)
+	}
+
+	assert.Equal(t, testFilesystemResponse, metricsResp)
+}
+
+func TestGetDropletLoad1(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/load_1", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, loadRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletLoad1(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletLoad1 returned error: %v", err)
+	}
+
+	assert.Equal(t, testLoadResponse, metricsResp)
+}
+
+func TestGetDropletLoad5(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/load_5", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, loadRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletLoad5(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletLoad5 returned error: %v", err)
+	}
+
+	assert.Equal(t, testLoadResponse, metricsResp)
+}
+
+func TestGetDropletLoad15(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/load_15", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, loadRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletLoad15(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletLoad15 returned error: %v", err)
+	}
+
+	assert.Equal(t, testLoadResponse, metricsResp)
+}
+
+func TestGetDropletCPU(t *testing.T) {
+	setup()
+	defer teardown()
+	now := time.Now()
+	metricReq := &DropletMetricsRequest{
+		HostID: "123",
+		Start:  now.Add(-300 * time.Second),
+		End:    now,
+	}
+
+	mux.HandleFunc("/v2/monitoring/metrics/droplet/cpu", func(w http.ResponseWriter, r *http.Request) {
+		hostID := r.URL.Query().Get("host_id")
+		start := r.URL.Query().Get("start")
+		end := r.URL.Query().Get("end")
+
+		assert.Equal(t, metricReq.HostID, hostID)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.Start.Unix()), start)
+		assert.Equal(t, fmt.Sprintf("%d", metricReq.End.Unix()), end)
+		testMethod(t, r, http.MethodGet)
+
+		fmt.Fprintf(w, cpuRespJSON)
+	})
+
+	metricsResp, _, err := client.Monitoring.GetDropletCPU(ctx, metricReq)
+	if err != nil {
+		t.Errorf("Monitoring.GetDropletCPU returned error: %v", err)
+	}
+
+	assert.Equal(t, testCPUResponse, metricsResp)
+}
diff --git a/projects.go b/projects.go
index 172c2c9..b59134b 100644
--- a/projects.go
+++ b/projects.go
@@ -17,7 +17,7 @@ const (
 )
 
 // ProjectsService is an interface for creating and managing Projects with the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2/#projects
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects
 type ProjectsService interface {
 	List(context.Context, *ListOptions) ([]Project, *Response, error)
 	GetDefault(context.Context) (*Project, *Response, error)
@@ -117,7 +117,7 @@ type ProjectResource struct {
 	Status     string                `json:"status,omitempty"`
 }
 
-// ProjetResourceLinks specify the link for more information about the resource.
+// ProjectResourceLinks specify the link for more information about the resource.
 type ProjectResourceLinks struct {
 	Self string `json:"self"`
 }
@@ -252,7 +252,6 @@ func (p *ProjectsServiceOp) ListResources(ctx context.Context, projectID string,
 
 // AssignResources assigns one or more resources to a project. AssignResources
 // accepts resources in two possible formats:
-
 //  1. The resource type, like `&Droplet{ID: 1}` or `&FloatingIP{IP: "1.2.3.4"}`
 //  2. A valid DO URN as a string, like "do:droplet:1234"
 //
diff --git a/projects_test.go b/projects_test.go
index 62c0818..ae6574d 100644
--- a/projects_test.go
+++ b/projects_test.go
@@ -335,6 +335,13 @@ func TestProjects_ListResources(t *testing.T) {
 				Self: "http://example.com/v2/floating_ips/1.2.3.4",
 			},
 		},
+		{
+			URN:        "do:reservedip:1.2.3.4",
+			AssignedAt: "2018-09-27 00:00:00",
+			Links: &ProjectResourceLinks{
+				Self: "http://example.com/v2/reserved_ips/1.2.3.4",
+			},
+		},
 	}
 
 	mux.HandleFunc("/v2/projects/project-1/resources", func(w http.ResponseWriter, r *http.Request) {
@@ -378,6 +385,13 @@ func TestProjects_ListResourcesWithMultiplePages(t *testing.T) {
 				"links": {
 					"self": "http://example.com/v2/floating_ips/1.2.3.4"
 				}
+			},
+			{
+				"urn": "do:reservedip:1.2.3.4",
+				"assigned_at": "2018-09-27 00:00:00",
+				"links": {
+					"self": "http://example.com/v2/reserved_ips/1.2.3.4"
+				}
 			}
 		],
 		"links": {
@@ -420,6 +434,13 @@ func TestProjects_ListResourcesWithPageNumber(t *testing.T) {
 				"links": {
 					"self": "http://example.com/v2/floating_ips/1.2.3.4"
 				}
+			},
+			{
+				"urn": "do:reservedip:1.2.3.4",
+				"assigned_at": "2018-09-27 00:00:00",
+				"links": {
+					"self": "http://example.com/v2/reserved_ips/1.2.3.4"
+				}
 			}
 		],
 		"links": {
@@ -452,6 +473,7 @@ func TestProjects_AssignFleetResourcesWithTypes(t *testing.T) {
 	assignableResources := []interface{}{
 		&Droplet{ID: 1234},
 		&FloatingIP{IP: "1.2.3.4"},
+		&ReservedIP{IP: "1.2.3.4"},
 	}
 
 	mockResp := `
@@ -470,6 +492,13 @@ func TestProjects_AssignFleetResourcesWithTypes(t *testing.T) {
 				"links": {
 					"self": "http://example.com/v2/floating_ips/1.2.3.4"
 				}
+			},
+			{
+				"urn": "do:reservedip:1.2.3.4",
+				"assigned_at": "2018-09-27 00:00:00",
+				"links": {
+					"self": "http://example.com/v2/reserved_ips/1.2.3.4"
+				}
 			}
 		]
 	}`
@@ -482,7 +511,7 @@ func TestProjects_AssignFleetResourcesWithTypes(t *testing.T) {
 		}
 
 		req := strings.TrimSuffix(string(reqBytes), "\n")
-		expectedReq := `{"resources":["do:droplet:1234","do:floatingip:1.2.3.4"]}`
+		expectedReq := `{"resources":["do:droplet:1234","do:floatingip:1.2.3.4","do:reservedip:1.2.3.4"]}`
 		if req != expectedReq {
 			t.Errorf("projects assign req didn't match up:\n expected %+v\n got %+v\n", expectedReq, req)
 		}
@@ -503,6 +532,7 @@ func TestProjects_AssignFleetResourcesWithStrings(t *testing.T) {
 	assignableResources := []interface{}{
 		"do:droplet:1234",
 		"do:floatingip:1.2.3.4",
+		"do:reservedip:1.2.3.4",
 	}
 
 	mockResp := `
@@ -521,6 +551,13 @@ func TestProjects_AssignFleetResourcesWithStrings(t *testing.T) {
 				"links": {
 					"self": "http://example.com/v2/floating_ips/1.2.3.4"
 				}
+			},
+			{
+				"urn": "do:reservedip:1.2.3.4",
+				"assigned_at": "2018-09-27 00:00:00",
+				"links": {
+					"self": "http://example.com/v2/reserved_ips/1.2.3.4"
+				}
 			}
 		]
 	}`
@@ -533,7 +570,7 @@ func TestProjects_AssignFleetResourcesWithStrings(t *testing.T) {
 		}
 
 		req := strings.TrimSuffix(string(reqBytes), "\n")
-		expectedReq := `{"resources":["do:droplet:1234","do:floatingip:1.2.3.4"]}`
+		expectedReq := `{"resources":["do:droplet:1234","do:floatingip:1.2.3.4","do:reservedip:1.2.3.4"]}`
 		if req != expectedReq {
 			t.Errorf("projects assign req didn't match up:\n expected %+v\n got %+v\n", expectedReq, req)
 		}
@@ -554,6 +591,7 @@ func TestProjects_AssignFleetResourcesWithStringsAndTypes(t *testing.T) {
 	assignableResources := []interface{}{
 		"do:droplet:1234",
 		&FloatingIP{IP: "1.2.3.4"},
+		&ReservedIP{IP: "1.2.3.4"},
 	}
 
 	mockResp := `
@@ -572,6 +610,13 @@ func TestProjects_AssignFleetResourcesWithStringsAndTypes(t *testing.T) {
 				"links": {
 					"self": "http://example.com/v2/floating_ips/1.2.3.4"
 				}
+			},
+			{
+				"urn": "do:reservedip:1.2.3.4",
+				"assigned_at": "2018-09-27 00:00:00",
+				"links": {
+					"self": "http://example.com/v2/reserved_ips/1.2.3.4"
+				}
 			}
 		]
 	}`
@@ -584,7 +629,7 @@ func TestProjects_AssignFleetResourcesWithStringsAndTypes(t *testing.T) {
 		}
 
 		req := strings.TrimSuffix(string(reqBytes), "\n")
-		expectedReq := `{"resources":["do:droplet:1234","do:floatingip:1.2.3.4"]}`
+		expectedReq := `{"resources":["do:droplet:1234","do:floatingip:1.2.3.4","do:reservedip:1.2.3.4"]}`
 		if req != expectedReq {
 			t.Errorf("projects assign req didn't match up:\n expected %+v\n got %+v\n", expectedReq, req)
 		}
diff --git a/regions.go b/regions.go
index b07175e..ea82f2f 100644
--- a/regions.go
+++ b/regions.go
@@ -7,7 +7,7 @@ import (
 
 // RegionsService is an interface for interfacing with the regions
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#regions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Regions
 type RegionsService interface {
 	List(context.Context, *ListOptions) ([]Region, *Response, error)
 }
diff --git a/registry.go b/registry.go
index 1b5c40b..b0c2432 100644
--- a/registry.go
+++ b/registry.go
@@ -18,16 +18,26 @@ const (
 
 // RegistryService is an interface for interfacing with the Registry endpoints
 // of the DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#registry
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Container-Registry
 type RegistryService interface {
 	Create(context.Context, *RegistryCreateRequest) (*Registry, *Response, error)
 	Get(context.Context) (*Registry, *Response, error)
 	Delete(context.Context) (*Response, error)
 	DockerCredentials(context.Context, *RegistryDockerCredentialsRequest) (*DockerCredentials, *Response, error)
 	ListRepositories(context.Context, string, *ListOptions) ([]*Repository, *Response, error)
+	ListRepositoriesV2(context.Context, string, *TokenListOptions) ([]*RepositoryV2, *Response, error)
 	ListRepositoryTags(context.Context, string, string, *ListOptions) ([]*RepositoryTag, *Response, error)
 	DeleteTag(context.Context, string, string, string) (*Response, error)
+	ListRepositoryManifests(context.Context, string, string, *ListOptions) ([]*RepositoryManifest, *Response, error)
 	DeleteManifest(context.Context, string, string, string) (*Response, error)
+	StartGarbageCollection(context.Context, string, ...*StartGarbageCollectionRequest) (*GarbageCollection, *Response, error)
+	GetGarbageCollection(context.Context, string) (*GarbageCollection, *Response, error)
+	ListGarbageCollections(context.Context, string, *ListOptions) ([]*GarbageCollection, *Response, error)
+	UpdateGarbageCollection(context.Context, string, string, *UpdateGarbageCollectionRequest) (*GarbageCollection, *Response, error)
+	GetOptions(context.Context) (*RegistryOptions, *Response, error)
+	GetSubscription(context.Context) (*RegistrySubscription, *Response, error)
+	UpdateSubscription(context.Context, *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error)
+	ValidateName(context.Context, *RegistryValidateNameRequest) (*Response, error)
 }
 
 var _ RegistryService = &RegistryServiceOp{}
@@ -39,7 +49,9 @@ type RegistryServiceOp struct {
 
 // RegistryCreateRequest represents a request to create a registry.
 type RegistryCreateRequest struct {
-	Name string `json:"name,omitempty"`
+	Name                 string `json:"name,omitempty"`
+	SubscriptionTierSlug string `json:"subscription_tier_slug,omitempty"`
+	Region               string `json:"region,omitempty"`
 }
 
 // RegistryDockerCredentialsRequest represents a request to retrieve docker
@@ -51,8 +63,11 @@ type RegistryDockerCredentialsRequest struct {
 
 // Registry represents a registry.
 type Registry struct {
-	Name      string    `json:"name,omitempty"`
-	CreatedAt time.Time `json:"created_at,omitempty"`
+	Name                       string    `json:"name,omitempty"`
+	StorageUsageBytes          uint64    `json:"storage_usage_bytes,omitempty"`
+	StorageUsageBytesUpdatedAt time.Time `json:"storage_usage_bytes_updated_at,omitempty"`
+	CreatedAt                  time.Time `json:"created_at,omitempty"`
+	Region                     string    `json:"region,omitempty"`
 }
 
 // Repository represents a repository
@@ -63,6 +78,15 @@ type Repository struct {
 	TagCount     uint64         `json:"tag_count,omitempty"`
 }
 
+// RepositoryV2 represents a repository in the V2 format
+type RepositoryV2 struct {
+	RegistryName   string              `json:"registry_name,omitempty"`
+	Name           string              `json:"name,omitempty"`
+	TagCount       uint64              `json:"tag_count,omitempty"`
+	ManifestCount  uint64              `json:"manifest_count,omitempty"`
+	LatestManifest *RepositoryManifest `json:"latest_manifest,omitempty"`
+}
+
 // RepositoryTag represents a repository tag
 type RepositoryTag struct {
 	RegistryName        string    `json:"registry_name,omitempty"`
@@ -74,6 +98,24 @@ type RepositoryTag struct {
 	UpdatedAt           time.Time `json:"updated_at,omitempty"`
 }
 
+// RepositoryManifest represents a repository manifest
+type RepositoryManifest struct {
+	RegistryName        string    `json:"registry_name,omitempty"`
+	Repository          string    `json:"repository,omitempty"`
+	Digest              string    `json:"digest,omitempty"`
+	CompressedSizeBytes uint64    `json:"compressed_size_bytes,omitempty"`
+	SizeBytes           uint64    `json:"size_bytes,omitempty"`
+	UpdatedAt           time.Time `json:"updated_at,omitempty"`
+	Tags                []string  `json:"tags,omitempty"`
+	Blobs               []*Blob   `json:"blobs,omitempty"`
+}
+
+// Blob represents a registry blob
+type Blob struct {
+	Digest              string `json:"digest,omitempty"`
+	CompressedSizeBytes uint64 `json:"compressed_size_bytes,omitempty"`
+}
+
 type registryRoot struct {
 	Registry *Registry `json:"registry,omitempty"`
 }
@@ -84,12 +126,120 @@ type repositoriesRoot struct {
 	Meta         *Meta         `json:"meta"`
 }
 
+type repositoriesV2Root struct {
+	Repositories []*RepositoryV2 `json:"repositories,omitempty"`
+	Links        *Links          `json:"links,omitempty"`
+	Meta         *Meta           `json:"meta"`
+}
+
 type repositoryTagsRoot struct {
 	Tags  []*RepositoryTag `json:"tags,omitempty"`
 	Links *Links           `json:"links,omitempty"`
 	Meta  *Meta            `json:"meta"`
 }
 
+type repositoryManifestsRoot struct {
+	Manifests []*RepositoryManifest `json:"manifests,omitempty"`
+	Links     *Links                `json:"links,omitempty"`
+	Meta      *Meta                 `json:"meta"`
+}
+
+// GarbageCollection represents a garbage collection.
+type GarbageCollection struct {
+	UUID         string                `json:"uuid"`
+	RegistryName string                `json:"registry_name"`
+	Status       string                `json:"status"`
+	Type         GarbageCollectionType `json:"type"`
+	CreatedAt    time.Time             `json:"created_at"`
+	UpdatedAt    time.Time             `json:"updated_at"`
+	BlobsDeleted uint64                `json:"blobs_deleted"`
+	FreedBytes   uint64                `json:"freed_bytes"`
+}
+
+type garbageCollectionRoot struct {
+	GarbageCollection *GarbageCollection `json:"garbage_collection,omitempty"`
+}
+
+type garbageCollectionsRoot struct {
+	GarbageCollections []*GarbageCollection `json:"garbage_collections,omitempty"`
+	Links              *Links               `json:"links,omitempty"`
+	Meta               *Meta                `json:"meta"`
+}
+
+type GarbageCollectionType string
+
+const (
+	// GCTypeUntaggedManifestsOnly indicates that a garbage collection should
+	// only delete untagged manifests.
+	GCTypeUntaggedManifestsOnly = GarbageCollectionType("untagged manifests only")
+	// GCTypeUnreferencedBlobsOnly indicates that a garbage collection should
+	// only delete unreferenced blobs.
+	GCTypeUnreferencedBlobsOnly = GarbageCollectionType("unreferenced blobs only")
+	// GCTypeUntaggedManifestsAndUnreferencedBlobs indicates that a garbage
+	// collection should delete both untagged manifests and unreferenced blobs.
+	GCTypeUntaggedManifestsAndUnreferencedBlobs = GarbageCollectionType("untagged manifests and unreferenced blobs")
+)
+
+// StartGarbageCollectionRequest represents options to a garbage collection
+// start request.
+type StartGarbageCollectionRequest struct {
+	Type GarbageCollectionType `json:"type"`
+}
+
+// UpdateGarbageCollectionRequest represents a request to update a garbage
+// collection.
+type UpdateGarbageCollectionRequest struct {
+	Cancel bool `json:"cancel"`
+}
+
+// RegistryOptions are options for users when creating or updating a registry.
+type RegistryOptions struct {
+	SubscriptionTiers []*RegistrySubscriptionTier `json:"subscription_tiers,omitempty"`
+	AvailableRegions  []string                    `json:"available_regions"`
+}
+
+type registryOptionsRoot struct {
+	Options *RegistryOptions `json:"options"`
+}
+
+// RegistrySubscriptionTier is a subscription tier for container registry.
+type RegistrySubscriptionTier struct {
+	Name                   string `json:"name"`
+	Slug                   string `json:"slug"`
+	IncludedRepositories   uint64 `json:"included_repositories"`
+	IncludedStorageBytes   uint64 `json:"included_storage_bytes"`
+	AllowStorageOverage    bool   `json:"allow_storage_overage"`
+	IncludedBandwidthBytes uint64 `json:"included_bandwidth_bytes"`
+	MonthlyPriceInCents    uint64 `json:"monthly_price_in_cents"`
+	Eligible               bool   `json:"eligible,omitempty"`
+	// EligibilityReasons is included when Eligible is false, and indicates the
+	// reasons why this tier is not available to the user.
+	EligibilityReasons []string `json:"eligibility_reasons,omitempty"`
+}
+
+// RegistrySubscription is a user's subscription.
+type RegistrySubscription struct {
+	Tier      *RegistrySubscriptionTier `json:"tier"`
+	CreatedAt time.Time                 `json:"created_at"`
+	UpdatedAt time.Time                 `json:"updated_at"`
+}
+
+type registrySubscriptionRoot struct {
+	Subscription *RegistrySubscription `json:"subscription"`
+}
+
+// RegistrySubscriptionUpdateRequest represents a request to update the
+// subscription plan for a registry.
+type RegistrySubscriptionUpdateRequest struct {
+	TierSlug string `json:"tier_slug"`
+}
+
+// RegistryValidateNameRequest represents a request to validate that a
+// container registry name is available for use.
+type RegistryValidateNameRequest struct {
+	Name string `json:"name"`
+}
+
 // Get retrieves the details of a Registry.
 func (svc *RegistryServiceOp) Get(ctx context.Context) (*Registry, *Response, error) {
 	req, err := svc.client.NewRequest(ctx, http.MethodGet, registryPath, nil)
@@ -194,6 +344,30 @@ func (svc *RegistryServiceOp) ListRepositories(ctx context.Context, registry str
 	return root.Repositories, resp, nil
 }
 
+// ListRepositoriesV2 returns a list of the Repositories in a registry.
+func (svc *RegistryServiceOp) ListRepositoriesV2(ctx context.Context, registry string, opts *TokenListOptions) ([]*RepositoryV2, *Response, error) {
+	path := fmt.Sprintf("%s/%s/repositoriesV2", registryPath, registry)
+	path, err := addOptions(path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(repositoriesV2Root)
+
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	resp.Links = root.Links
+	resp.Meta = root.Meta
+
+	return root.Repositories, resp, nil
+}
+
 // ListRepositoryTags returns a list of the RepositoryTags available within the given repository.
 func (svc *RegistryServiceOp) ListRepositoryTags(ctx context.Context, registry, repository string, opts *ListOptions) ([]*RepositoryTag, *Response, error) {
 	path := fmt.Sprintf("%s/%s/repositories/%s/tags", registryPath, registry, url.PathEscape(repository))
@@ -237,6 +411,30 @@ func (svc *RegistryServiceOp) DeleteTag(ctx context.Context, registry, repositor
 	return resp, nil
 }
 
+// ListRepositoryManifests returns a list of the RepositoryManifests available within the given repository.
+func (svc *RegistryServiceOp) ListRepositoryManifests(ctx context.Context, registry, repository string, opts *ListOptions) ([]*RepositoryManifest, *Response, error) {
+	path := fmt.Sprintf("%s/%s/repositories/%s/digests", registryPath, registry, url.PathEscape(repository))
+	path, err := addOptions(path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(repositoryManifestsRoot)
+
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	resp.Links = root.Links
+	resp.Meta = root.Meta
+
+	return root.Manifests, resp, nil
+}
+
 // DeleteManifest deletes a manifest by its digest within a given repository.
 func (svc *RegistryServiceOp) DeleteManifest(ctx context.Context, registry, repository, digest string) (*Response, error) {
 	path := fmt.Sprintf("%s/%s/repositories/%s/digests/%s", registryPath, registry, url.PathEscape(repository), digest)
@@ -251,3 +449,164 @@ func (svc *RegistryServiceOp) DeleteManifest(ctx context.Context, registry, repo
 
 	return resp, nil
 }
+
+// StartGarbageCollection requests a garbage collection for the specified
+// registry.
+func (svc *RegistryServiceOp) StartGarbageCollection(ctx context.Context, registry string, request ...*StartGarbageCollectionRequest) (*GarbageCollection, *Response, error) {
+	path := fmt.Sprintf("%s/%s/garbage-collection", registryPath, registry)
+	var requestParams interface{}
+	if len(request) < 1 {
+		// default to only garbage collecting unreferenced blobs for backwards
+		// compatibility
+		requestParams = &StartGarbageCollectionRequest{
+			Type: GCTypeUnreferencedBlobsOnly,
+		}
+	} else {
+		requestParams = request[0]
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodPost, path, requestParams)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(garbageCollectionRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.GarbageCollection, resp, err
+}
+
+// GetGarbageCollection retrieves the currently-active garbage collection for
+// the specified registry; if there are no active garbage collections, then
+// return a 404/NotFound error. There can only be one active garbage
+// collection on a registry.
+func (svc *RegistryServiceOp) GetGarbageCollection(ctx context.Context, registry string) (*GarbageCollection, *Response, error) {
+	path := fmt.Sprintf("%s/%s/garbage-collection", registryPath, registry)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(garbageCollectionRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.GarbageCollection, resp, nil
+}
+
+// ListGarbageCollections retrieves all garbage collections (active and
+// inactive) for the specified registry.
+func (svc *RegistryServiceOp) ListGarbageCollections(ctx context.Context, registry string, opts *ListOptions) ([]*GarbageCollection, *Response, error) {
+	path := fmt.Sprintf("%s/%s/garbage-collections", registryPath, registry)
+	path, err := addOptions(path, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(garbageCollectionsRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	if root.Links != nil {
+		resp.Links = root.Links
+	}
+	if root.Meta != nil {
+		resp.Meta = root.Meta
+	}
+
+	return root.GarbageCollections, resp, nil
+}
+
+// UpdateGarbageCollection updates the specified garbage collection for the
+// specified registry. While only the currently-active garbage collection can
+// be updated we still require the exact garbage collection to be specified to
+// avoid race conditions that might may arise from issuing an update to the
+// implicit "currently-active" garbage collection. Returns the updated garbage
+// collection.
+func (svc *RegistryServiceOp) UpdateGarbageCollection(ctx context.Context, registry, gcUUID string, request *UpdateGarbageCollectionRequest) (*GarbageCollection, *Response, error) {
+	path := fmt.Sprintf("%s/%s/garbage-collection/%s", registryPath, registry, gcUUID)
+	req, err := svc.client.NewRequest(ctx, http.MethodPut, path, request)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(garbageCollectionRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.GarbageCollection, resp, nil
+}
+
+// GetOptions returns options the user can use when creating or updating a
+// registry.
+func (svc *RegistryServiceOp) GetOptions(ctx context.Context) (*RegistryOptions, *Response, error) {
+	path := fmt.Sprintf("%s/options", registryPath)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(registryOptionsRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.Options, resp, nil
+}
+
+// GetSubscription retrieves the user's subscription.
+func (svc *RegistryServiceOp) GetSubscription(ctx context.Context) (*RegistrySubscription, *Response, error) {
+	path := fmt.Sprintf("%s/subscription", registryPath)
+	req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(registrySubscriptionRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Subscription, resp, nil
+}
+
+// UpdateSubscription updates the user's registry subscription.
+func (svc *RegistryServiceOp) UpdateSubscription(ctx context.Context, request *RegistrySubscriptionUpdateRequest) (*RegistrySubscription, *Response, error) {
+	path := fmt.Sprintf("%s/subscription", registryPath)
+	req, err := svc.client.NewRequest(ctx, http.MethodPost, path, request)
+	if err != nil {
+		return nil, nil, err
+	}
+	root := new(registrySubscriptionRoot)
+	resp, err := svc.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	return root.Subscription, resp, nil
+}
+
+// ValidateName validates that a container registry name is available for use.
+func (svc *RegistryServiceOp) ValidateName(ctx context.Context, request *RegistryValidateNameRequest) (*Response, error) {
+	path := fmt.Sprintf("%s/validate-name", registryPath)
+	req, err := svc.client.NewRequest(ctx, http.MethodPost, path, request)
+	if err != nil {
+		return nil, err
+	}
+	resp, err := svc.client.Do(ctx, req, nil)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/registry_test.go b/registry_test.go
index 898d614..d234615 100644
--- a/registry_test.go
+++ b/registry_test.go
@@ -3,7 +3,9 @@ package godo
 import (
 	"encoding/json"
 	"fmt"
+	"html/template"
 	"net/http"
+	"strings"
 	"testing"
 	"time"
 
@@ -13,17 +15,33 @@ import (
 
 const (
 	testRegistry          = "test-registry"
+	testRegion            = "r1"
 	testRepository        = "test/repository"
 	testEncodedRepository = "test%2Frepository"
 	testTag               = "test-tag"
 	testDigest            = "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f"
 	testCompressedSize    = 2789669
 	testSize              = 5843968
+	testGCBlobsDeleted    = 42
+	testGCFreedBytes      = 666
+	testGCStatus          = "requested"
+	testGCUUID            = "mew-mew-id"
+	testGCType            = GCTypeUnreferencedBlobsOnly
 )
 
 var (
-	testTime       = time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC)
-	testTimeString = testTime.Format(time.RFC3339)
+	testTime              = time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC)
+	testTimeString        = testTime.Format(time.RFC3339)
+	testGarbageCollection = &GarbageCollection{
+		UUID:         testGCUUID,
+		RegistryName: testRegistry,
+		Status:       testGCStatus,
+		CreatedAt:    testTime,
+		UpdatedAt:    testTime,
+		BlobsDeleted: testGCBlobsDeleted,
+		FreedBytes:   testGCFreedBytes,
+		Type:         testGCType,
+	}
 )
 
 func TestRegistry_Create(t *testing.T) {
@@ -31,20 +49,41 @@ func TestRegistry_Create(t *testing.T) {
 	defer teardown()
 
 	want := &Registry{
-		Name:      testRegistry,
-		CreatedAt: testTime,
+		Name:                       testRegistry,
+		StorageUsageBytes:          0,
+		StorageUsageBytesUpdatedAt: testTime,
+		CreatedAt:                  testTime,
+		Region:                     testRegion,
 	}
 
 	createRequest := &RegistryCreateRequest{
-		Name: want.Name,
+		Name:                 want.Name,
+		SubscriptionTierSlug: "basic",
+		Region:               testRegion,
 	}
 
 	createResponseJSON := `
 {
 	"registry": {
 		"name": "` + testRegistry + `",
-        "created_at": "` + testTimeString + `"
-	}
+		"storage_usage_bytes": 0,
+        "storage_usage_bytes_updated_at": "` + testTimeString + `",
+        "created_at": "` + testTimeString + `",
+		"region": "` + testRegion + `"
+	},
+    "subscription": {
+      "tier": {
+        "name": "Basic",
+        "slug": "basic",
+        "included_repositories": 5,
+        "included_storage_bytes": 5368709120,
+        "allow_storage_overage": true,
+        "included_bandwidth_bytes": 5368709120,
+        "monthly_price_in_cents": 500
+      },
+      "created_at": "` + testTimeString + `",
+      "updated_at": "` + testTimeString + `"
+    }
 }`
 
 	mux.HandleFunc("/v2/registry", func(w http.ResponseWriter, r *http.Request) {
@@ -69,13 +108,21 @@ func TestRegistry_Get(t *testing.T) {
 	defer teardown()
 
 	want := &Registry{
-		Name: testRegistry,
+		Name:                       testRegistry,
+		StorageUsageBytes:          0,
+		StorageUsageBytesUpdatedAt: testTime,
+		CreatedAt:                  testTime,
+		Region:                     testRegion,
 	}
 
 	getResponseJSON := `
 {
 	"registry": {
-		"name": "` + testRegistry + `"
+		"name": "` + testRegistry + `",
+		"storage_usage_bytes": 0,
+        "storage_usage_bytes_updated_at": "` + testTimeString + `",
+        "created_at": "` + testTimeString + `",
+		"region": "` + testRegion + `"
 	}
 }`
 
@@ -120,13 +167,13 @@ func TestRegistry_DockerCredentials(t *testing.T) {
 		},
 		{
 			name:                  "read-only + custom expiry",
-			params:                &RegistryDockerCredentialsRequest{ExpirySeconds: intPtr(60 * 60)},
+			params:                &RegistryDockerCredentialsRequest{ExpirySeconds: PtrTo(60 * 60)},
 			expectedReadWrite:     "false",
 			expectedExpirySeconds: "3600",
 		},
 		{
 			name:                  "read/write + custom expiry",
-			params:                &RegistryDockerCredentialsRequest{ReadWrite: true, ExpirySeconds: intPtr(60 * 60)},
+			params:                &RegistryDockerCredentialsRequest{ReadWrite: true, ExpirySeconds: PtrTo(60 * 60)},
 			expectedReadWrite:     "true",
 			expectedExpirySeconds: "3600",
 		},
@@ -224,6 +271,109 @@ func TestRepository_List(t *testing.T) {
 	assert.Equal(t, wantRespMeta, gotRespMeta)
 }
 
+func TestRepository_ListV2(t *testing.T) {
+	setup()
+	defer teardown()
+
+	wantRepositories := []*RepositoryV2{
+		{
+			RegistryName:  testRegistry,
+			Name:          testRepository,
+			TagCount:      2,
+			ManifestCount: 1,
+			LatestManifest: &RepositoryManifest{
+				Digest:              "sha256:abc",
+				RegistryName:        testRegistry,
+				Repository:          testRepository,
+				CompressedSizeBytes: testCompressedSize,
+				SizeBytes:           testSize,
+				UpdatedAt:           testTime,
+				Tags:                []string{"v1", "v2"},
+				Blobs: []*Blob{
+					{
+						Digest:              "sha256:blob1",
+						CompressedSizeBytes: 100,
+					},
+					{
+						Digest:              "sha256:blob2",
+						CompressedSizeBytes: 200,
+					},
+				},
+			},
+		},
+	}
+	baseLinkPage := fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositoriesV2", testRegistry)
+	getResponseJSON := `{
+	"repositories": [
+		{
+			"registry_name": "` + testRegistry + `",
+			"name": "` + testRepository + `",
+			"tag_count": 2,
+			"manifest_count": 1,
+			"latest_manifest": {
+				"digest": "sha256:abc",
+				"registry_name": "` + testRegistry + `",
+				"repository": "` + testRepository + `",
+				"compressed_size_bytes": ` + fmt.Sprintf("%d", testCompressedSize) + `,
+				"size_bytes": ` + fmt.Sprintf("%d", testSize) + `,
+				"updated_at": "` + testTimeString + `",
+				"tags": [
+					"v1",
+					"v2"
+				],
+				"blobs": [
+					{
+						"digest": "sha256:blob1",
+						"compressed_size_bytes": 100
+					},
+					{
+						"digest": "sha256:blob2",
+						"compressed_size_bytes": 200
+					}
+				]
+			}
+		}
+	],
+	"links": {
+	    "pages": {
+			"first":    "` + baseLinkPage + `?page=1&page_size=1",
+			"prev":     "` + baseLinkPage + `?page=2&page_size=1&page_token=aaa",
+			"next":     "` + baseLinkPage + `?page=4&page_size=1&page_token=ccc",
+			"last":     "` + baseLinkPage + `?page=5&page_size=1"
+		}
+	},
+	"meta": {
+	    "total": 5
+	}
+}`
+
+	mux.HandleFunc(fmt.Sprintf("/v2/registry/%s/repositoriesV2", testRegistry), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		testFormValues(t, r, map[string]string{"page": "3", "per_page": "1", "page_token": "bbb"})
+		fmt.Fprint(w, getResponseJSON)
+	})
+	got, response, err := client.Registry.ListRepositoriesV2(ctx, testRegistry, &TokenListOptions{Page: 3, PerPage: 1, Token: "bbb"})
+	require.NoError(t, err)
+	require.Equal(t, wantRepositories, got)
+
+	gotRespLinks := response.Links
+	wantRespLinks := &Links{
+		Pages: &Pages{
+			First: fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositoriesV2?page=1&page_size=1", testRegistry),
+			Prev:  fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositoriesV2?page=2&page_size=1&page_token=aaa", testRegistry),
+			Next:  fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositoriesV2?page=4&page_size=1&page_token=ccc", testRegistry),
+			Last:  fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositoriesV2?page=5&page_size=1", testRegistry),
+		},
+	}
+	assert.Equal(t, wantRespLinks, gotRespLinks)
+
+	gotRespMeta := response.Meta
+	wantRespMeta := &Meta{
+		Total: 5,
+	}
+	assert.Equal(t, wantRespMeta, gotRespMeta)
+}
+
 func TestRepository_ListTags(t *testing.T) {
 	setup()
 	defer teardown()
@@ -299,6 +449,94 @@ func TestRegistry_DeleteTag(t *testing.T) {
 	require.NoError(t, err)
 }
 
+func TestRegistry_ListManifests(t *testing.T) {
+	setup()
+	defer teardown()
+
+	wantTags := []*RepositoryManifest{
+		{
+			RegistryName:        testRegistry,
+			Repository:          testRepository,
+			Digest:              testDigest,
+			CompressedSizeBytes: testCompressedSize,
+			SizeBytes:           testSize,
+			UpdatedAt:           testTime,
+			Tags:                []string{"latest", "v1", "v2"},
+			Blobs: []*Blob{
+				{
+					Digest:              "sha256:blob1",
+					CompressedSizeBytes: 998,
+				},
+				{
+					Digest:              "sha256:blob2",
+					CompressedSizeBytes: 1,
+				},
+			},
+		},
+	}
+	getResponseJSON := `{
+	"manifests": [
+		{
+			"registry_name": "` + testRegistry + `",
+			"repository": "` + testRepository + `",
+			"digest": "` + testDigest + `",
+			"compressed_size_bytes": ` + fmt.Sprintf("%d", testCompressedSize) + `,
+			"size_bytes": ` + fmt.Sprintf("%d", testSize) + `,
+			"updated_at": "` + testTimeString + `",
+			"tags": [ "latest", "v1", "v2" ],
+			"blobs": [
+				{
+					"digest": "sha256:blob1",
+					"compressed_size_bytes": 998
+				},
+				{
+
+					"digest": "sha256:blob2",
+					"compressed_size_bytes": 1
+				}
+			]
+		}
+	],
+	"links": {
+	    "pages": {
+			"first": "https://api.digitalocean.com/v2/registry/` + testRegistry + `/repositories/` + testEncodedRepository + `/digests?page=1&page_size=1",
+			"prev": "https://api.digitalocean.com/v2/registry/` + testRegistry + `/repositories/` + testEncodedRepository + `/digests?page=2&page_size=1",
+			"next": "https://api.digitalocean.com/v2/registry/` + testRegistry + `/repositories/` + testEncodedRepository + `/digests?page=4&page_size=1",
+			"last": "https://api.digitalocean.com/v2/registry/` + testRegistry + `/repositories/` + testEncodedRepository + `/digests?page=5&page_size=1"
+		}
+	},
+	"meta": {
+	    "total": 5
+	}
+}`
+
+	mux.HandleFunc(fmt.Sprintf("/v2/registry/%s/repositories/%s/digests", testRegistry, testRepository), func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		testFormValues(t, r, map[string]string{"page": "3", "per_page": "1"})
+		fmt.Fprint(w, getResponseJSON)
+	})
+	got, response, err := client.Registry.ListRepositoryManifests(ctx, testRegistry, testRepository, &ListOptions{Page: 3, PerPage: 1})
+	require.NoError(t, err)
+	require.Equal(t, wantTags, got)
+
+	gotRespLinks := response.Links
+	wantRespLinks := &Links{
+		Pages: &Pages{
+			First: fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositories/%s/digests?page=1&page_size=1", testRegistry, testEncodedRepository),
+			Prev:  fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositories/%s/digests?page=2&page_size=1", testRegistry, testEncodedRepository),
+			Next:  fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositories/%s/digests?page=4&page_size=1", testRegistry, testEncodedRepository),
+			Last:  fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/repositories/%s/digests?page=5&page_size=1", testRegistry, testEncodedRepository),
+		},
+	}
+	assert.Equal(t, wantRespLinks, gotRespLinks)
+
+	gotRespMeta := response.Meta
+	wantRespMeta := &Meta{
+		Total: 5,
+	}
+	assert.Equal(t, wantRespMeta, gotRespMeta)
+}
+
 func TestRegistry_DeleteManifest(t *testing.T) {
 	setup()
 	defer teardown()
@@ -310,3 +548,420 @@ func TestRegistry_DeleteManifest(t *testing.T) {
 	_, err := client.Registry.DeleteManifest(ctx, testRegistry, testRepository, testDigest)
 	require.NoError(t, err)
 }
+
+func reifyTemplateStr(t *testing.T, tmplStr string, v interface{}) string {
+	tmpl, err := template.New("meow").Parse(tmplStr)
+	require.NoError(t, err)
+
+	s := &strings.Builder{}
+	err = tmpl.Execute(s, v)
+	require.NoError(t, err)
+
+	return s.String()
+}
+
+func TestGarbageCollection_Start(t *testing.T) {
+	setup()
+	defer teardown()
+
+	want := testGarbageCollection
+	requestResponseJSONTmpl := `
+{
+  "garbage_collection": {
+    "uuid": "{{.UUID}}",
+    "registry_name": "{{.RegistryName}}",
+    "status": "{{.Status}}",
+    "type": "{{.Type}}",
+    "created_at": "{{.CreatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+    "updated_at": "{{.UpdatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+    "blobs_deleted": {{.BlobsDeleted}},
+    "freed_bytes": {{.FreedBytes}}
+  }
+}`
+	requestResponseJSON := reifyTemplateStr(t, requestResponseJSONTmpl, want)
+
+	createRequest := &StartGarbageCollectionRequest{
+		Type: GCTypeUnreferencedBlobsOnly,
+	}
+	mux.HandleFunc("/v2/registry/"+testRegistry+"/garbage-collection",
+		func(w http.ResponseWriter, r *http.Request) {
+			v := new(StartGarbageCollectionRequest)
+			err := json.NewDecoder(r.Body).Decode(v)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			testMethod(t, r, http.MethodPost)
+			require.Equal(t, v, createRequest)
+			fmt.Fprint(w, requestResponseJSON)
+		})
+
+	got, _, err := client.Registry.StartGarbageCollection(ctx, testRegistry)
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+}
+
+func TestGarbageCollection_Get(t *testing.T) {
+	setup()
+	defer teardown()
+
+	want := testGarbageCollection
+	requestResponseJSONTmpl := `
+{
+  "garbage_collection": {
+    "uuid": "{{.UUID}}",
+    "registry_name": "{{.RegistryName}}",
+    "status": "{{.Status}}",
+    "type": "{{.Type}}",
+    "created_at": "{{.CreatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+    "updated_at": "{{.UpdatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+    "blobs_deleted": {{.BlobsDeleted}},
+    "freed_bytes": {{.FreedBytes}}
+  }
+}`
+	requestResponseJSON := reifyTemplateStr(t, requestResponseJSONTmpl, want)
+
+	mux.HandleFunc("/v2/registry/"+testRegistry+"/garbage-collection",
+		func(w http.ResponseWriter, r *http.Request) {
+			testMethod(t, r, http.MethodGet)
+			fmt.Fprint(w, requestResponseJSON)
+		})
+
+	got, _, err := client.Registry.GetGarbageCollection(ctx, testRegistry)
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+}
+
+func TestGarbageCollection_List(t *testing.T) {
+	setup()
+	defer teardown()
+
+	want := []*GarbageCollection{testGarbageCollection}
+	requestResponseJSONTmpl := `
+{
+  "garbage_collections": [
+    {
+      "uuid": "{{.UUID}}",
+      "registry_name": "{{.RegistryName}}",
+      "status": "{{.Status}}",
+      "type": "{{.Type}}",
+      "created_at": "{{.CreatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+      "updated_at": "{{.UpdatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+      "blobs_deleted": {{.BlobsDeleted}},
+      "freed_bytes": {{.FreedBytes}}
+    }
+  ],
+	"links": {
+	    "pages": {
+			"next": "https://api.digitalocean.com/v2/registry/` + testRegistry + `/garbage-collections?page=2",
+			"last": "https://api.digitalocean.com/v2/registry/` + testRegistry + `/garbage-collections?page=2"
+		}
+	},
+	"meta": {
+	    "total": 2
+	}
+}`
+	requestResponseJSON := reifyTemplateStr(t, requestResponseJSONTmpl, testGarbageCollection)
+
+	mux.HandleFunc("/v2/registry/"+testRegistry+"/garbage-collections",
+		func(w http.ResponseWriter, r *http.Request) {
+			testMethod(t, r, http.MethodGet)
+			testFormValues(t, r, map[string]string{"page": "1", "per_page": "1"})
+			fmt.Fprint(w, requestResponseJSON)
+		})
+
+	got, resp, err := client.Registry.ListGarbageCollections(ctx, testRegistry, &ListOptions{Page: 1, PerPage: 1})
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+
+	gotRespLinks := resp.Links
+	wantRespLinks := &Links{
+		Pages: &Pages{
+			Next: fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/garbage-collections?page=2", testRegistry),
+			Last: fmt.Sprintf("https://api.digitalocean.com/v2/registry/%s/garbage-collections?page=2", testRegistry),
+		},
+	}
+	assert.Equal(t, wantRespLinks, gotRespLinks)
+
+	gotRespMeta := resp.Meta
+	wantRespMeta := &Meta{
+		Total: 2,
+	}
+	assert.Equal(t, wantRespMeta, gotRespMeta)
+}
+
+func TestGarbageCollection_Update(t *testing.T) {
+	setup()
+	defer teardown()
+
+	updateRequest := &UpdateGarbageCollectionRequest{
+		Cancel: true,
+	}
+
+	want := testGarbageCollection
+	requestResponseJSONTmpl := `
+{
+  "garbage_collection": {
+    "uuid": "{{.UUID}}",
+    "registry_name": "{{.RegistryName}}",
+    "status": "{{.Status}}",
+    "type": "{{.Type}}",
+    "created_at": "{{.CreatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+    "updated_at": "{{.UpdatedAt.Format "2006-01-02T15:04:05Z07:00"}}",
+    "blobs_deleted": {{.BlobsDeleted}},
+    "freed_bytes": {{.FreedBytes}}
+  }
+}`
+	requestResponseJSON := reifyTemplateStr(t, requestResponseJSONTmpl, want)
+
+	mux.HandleFunc("/v2/registry/"+testRegistry+"/garbage-collection/"+testGCUUID,
+		func(w http.ResponseWriter, r *http.Request) {
+			v := new(UpdateGarbageCollectionRequest)
+			err := json.NewDecoder(r.Body).Decode(v)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			testMethod(t, r, http.MethodPut)
+			require.Equal(t, v, updateRequest)
+			fmt.Fprint(w, requestResponseJSON)
+		})
+
+	got, _, err := client.Registry.UpdateGarbageCollection(ctx, testRegistry, testGCUUID, updateRequest)
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+}
+
+func TestRegistry_GetOptions(t *testing.T) {
+	responseJSON := `
+{
+  "options": {
+	"available_regions": [
+		"r1",
+		"r2"
+	],
+    "subscription_tiers": [
+      {
+        "name": "Starter",
+        "slug": "starter",
+        "included_repositories": 1,
+        "included_storage_bytes": 524288000,
+        "allow_storage_overage": false,
+        "included_bandwidth_bytes": 524288000,
+        "monthly_price_in_cents": 0,
+        "eligible": false,
+        "eligibility_reasons": [
+          "OverStorageLimit",
+          "OverRepositoryLimit"
+        ]
+      },
+      {
+        "name": "Basic",
+        "slug": "basic",
+        "included_repositories": 5,
+        "included_storage_bytes": 5368709120,
+        "allow_storage_overage": true,
+        "included_bandwidth_bytes": 5368709120,
+        "monthly_price_in_cents": 500,
+        "eligible": false,
+        "eligibility_reasons": [
+          "OverRepositoryLimit"
+        ]
+      },
+      {
+        "name": "Professional",
+        "slug": "professional",
+        "included_repositories": 0,
+        "included_storage_bytes": 107374182400,
+        "allow_storage_overage": true,
+        "included_bandwidth_bytes": 107374182400,
+        "monthly_price_in_cents": 2000,
+        "eligible": true
+      }
+    ]
+  }
+}`
+	want := &RegistryOptions{
+		AvailableRegions: []string{
+			"r1",
+			"r2",
+		},
+		SubscriptionTiers: []*RegistrySubscriptionTier{
+			{
+				Name:                   "Starter",
+				Slug:                   "starter",
+				IncludedRepositories:   1,
+				IncludedStorageBytes:   524288000,
+				AllowStorageOverage:    false,
+				IncludedBandwidthBytes: 524288000,
+				MonthlyPriceInCents:    0,
+				Eligible:               false,
+				EligibilityReasons: []string{
+					"OverStorageLimit",
+					"OverRepositoryLimit",
+				},
+			},
+			{
+				Name:                   "Basic",
+				Slug:                   "basic",
+				IncludedRepositories:   5,
+				IncludedStorageBytes:   5368709120,
+				AllowStorageOverage:    true,
+				IncludedBandwidthBytes: 5368709120,
+				MonthlyPriceInCents:    500,
+				Eligible:               false,
+				EligibilityReasons: []string{
+					"OverRepositoryLimit",
+				},
+			},
+			{
+				Name:                   "Professional",
+				Slug:                   "professional",
+				IncludedRepositories:   0,
+				IncludedStorageBytes:   107374182400,
+				AllowStorageOverage:    true,
+				IncludedBandwidthBytes: 107374182400,
+				MonthlyPriceInCents:    2000,
+				Eligible:               true,
+			},
+		},
+	}
+
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/registry/options", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, responseJSON)
+	})
+
+	got, _, err := client.Registry.GetOptions(ctx)
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+}
+
+func TestRegistry_GetSubscription(t *testing.T) {
+	setup()
+	defer teardown()
+
+	want := &RegistrySubscription{
+		Tier: &RegistrySubscriptionTier{
+			Name:                   "Basic",
+			Slug:                   "basic",
+			IncludedRepositories:   5,
+			IncludedStorageBytes:   5368709120,
+			AllowStorageOverage:    true,
+			IncludedBandwidthBytes: 5368709120,
+			MonthlyPriceInCents:    500,
+		},
+		CreatedAt: testTime,
+		UpdatedAt: testTime,
+	}
+
+	getResponseJSON := `
+{
+  "subscription": {
+    "tier": {
+      "name": "Basic",
+      "slug": "basic",
+      "included_repositories": 5,
+      "included_storage_bytes": 5368709120,
+      "allow_storage_overage": true,
+      "included_bandwidth_bytes": 5368709120,
+      "monthly_price_in_cents": 500
+    },
+    "created_at": "` + testTimeString + `",
+    "updated_at": "` + testTimeString + `"
+  }
+}
+`
+
+	mux.HandleFunc("/v2/registry/subscription", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, getResponseJSON)
+	})
+	got, _, err := client.Registry.GetSubscription(ctx)
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+}
+
+func TestRegistry_UpdateSubscription(t *testing.T) {
+	setup()
+	defer teardown()
+
+	updateRequest := &RegistrySubscriptionUpdateRequest{
+		TierSlug: "professional",
+	}
+
+	want := &RegistrySubscription{
+		Tier: &RegistrySubscriptionTier{
+			Name:                   "Professional",
+			Slug:                   "professional",
+			IncludedRepositories:   0,
+			IncludedStorageBytes:   107374182400,
+			AllowStorageOverage:    true,
+			IncludedBandwidthBytes: 107374182400,
+			MonthlyPriceInCents:    2000,
+			Eligible:               true,
+		},
+		CreatedAt: testTime,
+		UpdatedAt: testTime,
+	}
+
+	updateResponseJSON := `{
+  "subscription": {
+    "tier": {
+        "name": "Professional",
+        "slug": "professional",
+        "included_repositories": 0,
+        "included_storage_bytes": 107374182400,
+        "allow_storage_overage": true,
+        "included_bandwidth_bytes": 107374182400,
+        "monthly_price_in_cents": 2000,
+        "eligible": true
+      },
+    "created_at": "` + testTimeString + `",
+    "updated_at": "` + testTimeString + `"
+  }
+}`
+
+	mux.HandleFunc("/v2/registry/subscription",
+		func(w http.ResponseWriter, r *http.Request) {
+			v := new(RegistrySubscriptionUpdateRequest)
+			err := json.NewDecoder(r.Body).Decode(v)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			testMethod(t, r, http.MethodPost)
+			require.Equal(t, v, updateRequest)
+			fmt.Fprint(w, updateResponseJSON)
+		})
+
+	got, _, err := client.Registry.UpdateSubscription(ctx, updateRequest)
+	require.NoError(t, err)
+	require.Equal(t, want, got)
+}
+
+func TestRegistry_ValidateName(t *testing.T) {
+	setup()
+	defer teardown()
+
+	validateNameRequest := &RegistryValidateNameRequest{
+		Name: testRegistry,
+	}
+
+	mux.HandleFunc("/v2/registry/validate-name", func(w http.ResponseWriter, r *http.Request) {
+		v := new(RegistryValidateNameRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		require.Equal(t, v, validateNameRequest)
+	})
+
+	_, err := client.Registry.ValidateName(ctx, validateNameRequest)
+	require.NoError(t, err)
+}
diff --git a/reserved_ips.go b/reserved_ips.go
new file mode 100644
index 0000000..5370c14
--- /dev/null
+++ b/reserved_ips.go
@@ -0,0 +1,148 @@
+package godo
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+)
+
+const resourceType = "ReservedIP"
+const reservedIPsBasePath = "v2/reserved_ips"
+
+// ReservedIPsService is an interface for interfacing with the reserved IPs
+// endpoints of the Digital Ocean API.
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs
+type ReservedIPsService interface {
+	List(context.Context, *ListOptions) ([]ReservedIP, *Response, error)
+	Get(context.Context, string) (*ReservedIP, *Response, error)
+	Create(context.Context, *ReservedIPCreateRequest) (*ReservedIP, *Response, error)
+	Delete(context.Context, string) (*Response, error)
+}
+
+// ReservedIPsServiceOp handles communication with the reserved IPs related methods of the
+// DigitalOcean API.
+type ReservedIPsServiceOp struct {
+	client *Client
+}
+
+var _ ReservedIPsService = &ReservedIPsServiceOp{}
+
+// ReservedIP represents a Digital Ocean reserved IP.
+type ReservedIP struct {
+	Region    *Region  `json:"region"`
+	Droplet   *Droplet `json:"droplet"`
+	IP        string   `json:"ip"`
+	ProjectID string   `json:"project_id"`
+	Locked    bool     `json:"locked"`
+}
+
+func (f ReservedIP) String() string {
+	return Stringify(f)
+}
+
+// URN returns the reserved IP in a valid DO API URN form.
+func (f ReservedIP) URN() string {
+	return ToURN(resourceType, f.IP)
+}
+
+type reservedIPsRoot struct {
+	ReservedIPs []ReservedIP `json:"reserved_ips"`
+	Links       *Links       `json:"links"`
+	Meta        *Meta        `json:"meta"`
+}
+
+type reservedIPRoot struct {
+	ReservedIP *ReservedIP `json:"reserved_ip"`
+	Links      *Links      `json:"links,omitempty"`
+}
+
+// ReservedIPCreateRequest represents a request to create a reserved IP.
+// Specify DropletID to assign the reserved IP to a Droplet or Region
+// to reserve it to the region.
+type ReservedIPCreateRequest struct {
+	Region    string `json:"region,omitempty"`
+	DropletID int    `json:"droplet_id,omitempty"`
+	ProjectID string `json:"project_id,omitempty"`
+}
+
+// List all reserved IPs.
+func (r *ReservedIPsServiceOp) List(ctx context.Context, opt *ListOptions) ([]ReservedIP, *Response, error) {
+	path := reservedIPsBasePath
+	path, err := addOptions(path, opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := r.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(reservedIPsRoot)
+	resp, err := r.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+
+	return root.ReservedIPs, resp, err
+}
+
+// Get an individual reserved IP.
+func (r *ReservedIPsServiceOp) Get(ctx context.Context, ip string) (*ReservedIP, *Response, error) {
+	path := fmt.Sprintf("%s/%s", reservedIPsBasePath, ip)
+
+	req, err := r.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(reservedIPRoot)
+	resp, err := r.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.ReservedIP, resp, err
+}
+
+// Create a reserved IP. If the DropletID field of the request is not empty,
+// the reserved IP will also be assigned to the droplet.
+func (r *ReservedIPsServiceOp) Create(ctx context.Context, createRequest *ReservedIPCreateRequest) (*ReservedIP, *Response, error) {
+	path := reservedIPsBasePath
+
+	req, err := r.client.NewRequest(ctx, http.MethodPost, path, createRequest)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(reservedIPRoot)
+	resp, err := r.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+
+	return root.ReservedIP, resp, err
+}
+
+// Delete a reserved IP.
+func (r *ReservedIPsServiceOp) Delete(ctx context.Context, ip string) (*Response, error) {
+	path := fmt.Sprintf("%s/%s", reservedIPsBasePath, ip)
+
+	req, err := r.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := r.client.Do(ctx, req, nil)
+
+	return resp, err
+}
diff --git a/reserved_ips_actions.go b/reserved_ips_actions.go
new file mode 100644
index 0000000..8a9e240
--- /dev/null
+++ b/reserved_ips_actions.go
@@ -0,0 +1,109 @@
+package godo
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+)
+
+// ReservedIPActionsService is an interface for interfacing with the
+// reserved IPs actions endpoints of the Digital Ocean API.
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IP-Actions
+type ReservedIPActionsService interface {
+	Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error)
+	Unassign(ctx context.Context, ip string) (*Action, *Response, error)
+	Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error)
+	List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error)
+}
+
+// ReservedIPActionsServiceOp handles communication with the reserved IPs
+// action related methods of the DigitalOcean API.
+type ReservedIPActionsServiceOp struct {
+	client *Client
+}
+
+// Assign a reserved IP to a droplet.
+func (s *ReservedIPActionsServiceOp) Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) {
+	request := &ActionRequest{
+		"type":       "assign",
+		"droplet_id": dropletID,
+	}
+	return s.doAction(ctx, ip, request)
+}
+
+// Unassign a rerserved IP from the droplet it is currently assigned to.
+func (s *ReservedIPActionsServiceOp) Unassign(ctx context.Context, ip string) (*Action, *Response, error) {
+	request := &ActionRequest{"type": "unassign"}
+	return s.doAction(ctx, ip, request)
+}
+
+// Get an action for a particular reserved IP by id.
+func (s *ReservedIPActionsServiceOp) Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) {
+	path := fmt.Sprintf("%s/%d", reservedIPActionPath(ip), actionID)
+	return s.get(ctx, path)
+}
+
+// List the actions for a particular reserved IP.
+func (s *ReservedIPActionsServiceOp) List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) {
+	path := reservedIPActionPath(ip)
+	path, err := addOptions(path, opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return s.list(ctx, path)
+}
+
+func (s *ReservedIPActionsServiceOp) doAction(ctx context.Context, ip string, request *ActionRequest) (*Action, *Response, error) {
+	path := reservedIPActionPath(ip)
+
+	req, err := s.client.NewRequest(ctx, http.MethodPost, path, request)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(actionRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.Event, resp, err
+}
+
+func (s *ReservedIPActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) {
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(actionRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.Event, resp, err
+}
+
+func (s *ReservedIPActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) {
+	req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(actionsRoot)
+	resp, err := s.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+
+	return root.Actions, resp, err
+}
+
+func reservedIPActionPath(ip string) string {
+	return fmt.Sprintf("%s/%s/actions", reservedIPsBasePath, ip)
+}
diff --git a/reserved_ips_actions_test.go b/reserved_ips_actions_test.go
new file mode 100644
index 0000000..f329f29
--- /dev/null
+++ b/reserved_ips_actions_test.go
@@ -0,0 +1,167 @@
+package godo
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"reflect"
+	"testing"
+)
+
+func TestReservedIPsActions_Assign(t *testing.T) {
+	setup()
+	defer teardown()
+	dropletID := 12345
+	assignRequest := &ActionRequest{
+		"droplet_id": float64(dropletID), // encoding/json decodes numbers as floats
+		"type":       "assign",
+	}
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) {
+		v := new(ActionRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		if !reflect.DeepEqual(v, assignRequest) {
+			t.Errorf("Request body = %#v, expected %#v", v, assignRequest)
+		}
+
+		fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`)
+
+	})
+
+	assign, _, err := client.ReservedIPActions.Assign(ctx, "192.168.0.1", 12345)
+	if err != nil {
+		t.Errorf("ReservedIPsActions.Assign returned error: %v", err)
+	}
+
+	expected := &Action{Status: "in-progress"}
+	if !reflect.DeepEqual(assign, expected) {
+		t.Errorf("ReservedIPsActions.Assign returned %+v, expected %+v", assign, expected)
+	}
+}
+
+func TestReservedIPsActions_Unassign(t *testing.T) {
+	setup()
+	defer teardown()
+
+	unassignRequest := &ActionRequest{
+		"type": "unassign",
+	}
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) {
+		v := new(ActionRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		if !reflect.DeepEqual(v, unassignRequest) {
+			t.Errorf("Request body = %+v, expected %+v", v, unassignRequest)
+		}
+
+		fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`)
+	})
+
+	action, _, err := client.ReservedIPActions.Unassign(ctx, "192.168.0.1")
+	if err != nil {
+		t.Errorf("ReservedIPsActions.Get returned error: %v", err)
+	}
+
+	expected := &Action{Status: "in-progress"}
+	if !reflect.DeepEqual(action, expected) {
+		t.Errorf("ReservedIPsActions.Get returned %+v, expected %+v", action, expected)
+	}
+}
+
+func TestReservedIPsActions_Get(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1/actions/456", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprintf(w, `{"action":{"status":"in-progress"}}`)
+	})
+
+	action, _, err := client.ReservedIPActions.Get(ctx, "192.168.0.1", 456)
+	if err != nil {
+		t.Errorf("ReservedIPsActions.Get returned error: %v", err)
+	}
+
+	expected := &Action{Status: "in-progress"}
+	if !reflect.DeepEqual(action, expected) {
+		t.Errorf("ReservedIPsActions.Get returned %+v, expected %+v", action, expected)
+	}
+}
+
+func TestReservedIPsActions_List(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprintf(w, `{"actions":[{"status":"in-progress"}]}`)
+	})
+
+	actions, _, err := client.ReservedIPActions.List(ctx, "192.168.0.1", nil)
+	if err != nil {
+		t.Errorf("ReservedIPsActions.List returned error: %v", err)
+	}
+
+	expected := []Action{{Status: "in-progress"}}
+	if !reflect.DeepEqual(actions, expected) {
+		t.Errorf("ReservedIPsActions.List returned %+v, expected %+v", actions, expected)
+	}
+}
+
+func TestReservedIPsActions_ListMultiplePages(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{"actions":[{"status":"in-progress"}], "links":{"pages":{"next":"http://example.com/v2/reserved_ips/192.168.0.1/actions?page=2"}}}`)
+	})
+
+	_, resp, err := client.ReservedIPActions.List(ctx, "192.168.0.1", nil)
+	if err != nil {
+		t.Errorf("ReservedIPsActions.List returned error: %v", err)
+	}
+
+	checkCurrentPage(t, resp, 1)
+}
+
+func TestReservedIPsActions_ListPageByNumber(t *testing.T) {
+	setup()
+	defer teardown()
+
+	jBlob := `
+	{
+		"actions":[{"status":"in-progress"}],
+		"links":{
+			"pages":{
+				"next":"http://example.com/v2/reserved_ips/?page=3",
+				"prev":"http://example.com/v2/reserved_ips/?page=1",
+				"last":"http://example.com/v2/reserved_ips/?page=3",
+				"first":"http://example.com/v2/reserved_ips/?page=1"
+			}
+		}
+	}`
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1/actions", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, jBlob)
+	})
+
+	opt := &ListOptions{Page: 2}
+	_, resp, err := client.ReservedIPActions.List(ctx, "192.168.0.1", opt)
+	if err != nil {
+		t.Errorf("ReservedIPsActions.List returned error: %v", err)
+	}
+
+	checkCurrentPage(t, resp, 2)
+}
diff --git a/reserved_ips_test.go b/reserved_ips_test.go
new file mode 100644
index 0000000..585a14e
--- /dev/null
+++ b/reserved_ips_test.go
@@ -0,0 +1,167 @@
+package godo
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"reflect"
+	"testing"
+)
+
+func TestReservedIPs_ListReservedIPs(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{"reserved_ips": [
+			{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false},
+			{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}],
+			"meta":{"total":2}
+		}`)
+	})
+
+	reservedIPs, resp, err := client.ReservedIPs.List(ctx, nil)
+	if err != nil {
+		t.Errorf("ReservedIPs.List returned error: %v", err)
+	}
+
+	expectedReservedIPs := []ReservedIP{
+		{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"},
+		{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 2}, IP: "192.168.0.2", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"},
+	}
+	if !reflect.DeepEqual(reservedIPs, expectedReservedIPs) {
+		t.Errorf("ReservedIPs.List returned reserved IPs %+v, expected %+v", reservedIPs, expectedReservedIPs)
+	}
+
+	expectedMeta := &Meta{
+		Total: 2,
+	}
+	if !reflect.DeepEqual(resp.Meta, expectedMeta) {
+		t.Errorf("ReservedIPs.List returned meta %+v, expected %+v", resp.Meta, expectedMeta)
+	}
+}
+
+func TestReservedIPs_ListReservedIPsMultiplePages(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{"reserved_ips": [
+			{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false},
+			{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}],
+			"links":{"pages":{"next":"http://example.com/v2/reserved_ips/?page=2"}}}
+		`)
+	})
+
+	_, resp, err := client.ReservedIPs.List(ctx, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	checkCurrentPage(t, resp, 1)
+}
+
+func TestReservedIPs_RetrievePageByNumber(t *testing.T) {
+	setup()
+	defer teardown()
+
+	jBlob := `
+	{
+		"reserved_ips": [
+			{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false},
+			{"region":{"slug":"nyc3"},"droplet":{"id":2},"ip":"192.168.0.2","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}],
+		"links":{
+			"pages":{
+				"next":"http://example.com/v2/reserved_ips/?page=3",
+				"prev":"http://example.com/v2/reserved_ips/?page=1",
+				"last":"http://example.com/v2/reserved_ips/?page=3",
+				"first":"http://example.com/v2/reserved_ips/?page=1"
+			}
+		}
+	}`
+
+	mux.HandleFunc("/v2/reserved_ips", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, jBlob)
+	})
+
+	opt := &ListOptions{Page: 2}
+	_, resp, err := client.ReservedIPs.List(ctx, opt)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	checkCurrentPage(t, resp, 2)
+}
+
+func TestReservedIPs_Get(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, `{"reserved_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}}`)
+	})
+
+	reservedIP, _, err := client.ReservedIPs.Get(ctx, "192.168.0.1")
+	if err != nil {
+		t.Errorf("domain.Get returned error: %v", err)
+	}
+
+	expected := &ReservedIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"}
+	if !reflect.DeepEqual(reservedIP, expected) {
+		t.Errorf("ReservedIPs.Get returned %+v, expected %+v", reservedIP, expected)
+	}
+}
+
+func TestReservedIPs_Create(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &ReservedIPCreateRequest{
+		Region:    "nyc3",
+		DropletID: 1,
+		ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032",
+	}
+
+	mux.HandleFunc("/v2/reserved_ips", func(w http.ResponseWriter, r *http.Request) {
+		v := new(ReservedIPCreateRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		if !reflect.DeepEqual(v, createRequest) {
+			t.Errorf("Request body = %+v, expected %+v", v, createRequest)
+		}
+
+		fmt.Fprint(w, `{"reserved_ip":{"region":{"slug":"nyc3"},"droplet":{"id":1},"ip":"192.168.0.1","project_id":"46d8977a-35cd-11ed-909f-43c99bbf6032", "locked":false}}`)
+	})
+
+	reservedIP, _, err := client.ReservedIPs.Create(ctx, createRequest)
+	if err != nil {
+		t.Errorf("ReservedIPs.Create returned error: %v", err)
+	}
+
+	expected := &ReservedIP{Region: &Region{Slug: "nyc3"}, Droplet: &Droplet{ID: 1}, IP: "192.168.0.1", Locked: false, ProjectID: "46d8977a-35cd-11ed-909f-43c99bbf6032"}
+	if !reflect.DeepEqual(reservedIP, expected) {
+		t.Errorf("ReservedIPs.Create returned %+v, expected %+v", reservedIP, expected)
+	}
+}
+
+func TestReservedIPs_Destroy(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/reserved_ips/192.168.0.1", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.ReservedIPs.Delete(ctx, "192.168.0.1")
+	if err != nil {
+		t.Errorf("ReservedIPs.Delete returned error: %v", err)
+	}
+}
diff --git a/sizes.go b/sizes.go
index d2b93ea..a3cb745 100644
--- a/sizes.go
+++ b/sizes.go
@@ -7,7 +7,7 @@ import (
 
 // SizesService is an interface for interfacing with the size
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#sizes
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Sizes
 type SizesService interface {
 	List(context.Context, *ListOptions) ([]Size, *Response, error)
 }
@@ -31,6 +31,7 @@ type Size struct {
 	Regions      []string `json:"regions,omitempty"`
 	Available    bool     `json:"available,omitempty"`
 	Transfer     float64  `json:"transfer,omitempty"`
+	Description  string   `json:"description,omitempty"`
 }
 
 func (s Size) String() string {
diff --git a/sizes_test.go b/sizes_test.go
index 5c6bdd3..784a88f 100644
--- a/sizes_test.go
+++ b/sizes_test.go
@@ -11,15 +11,66 @@ func TestSizes_List(t *testing.T) {
 	setup()
 	defer teardown()
 
+	expectedSizes := []Size{
+		{
+			Slug:         "s-1vcpu-1gb",
+			Memory:       1024,
+			Vcpus:        1,
+			Disk:         25,
+			PriceMonthly: 5,
+			PriceHourly:  0.00744,
+			Regions:      []string{"nyc1", "nyc2"},
+			Available:    true,
+			Transfer:     1,
+			Description:  "Basic",
+		},
+		{
+			Slug:         "512mb",
+			Memory:       512,
+			Vcpus:        1,
+			Disk:         20,
+			PriceMonthly: 5,
+			PriceHourly:  0.00744,
+			Regions:      []string{"nyc1", "nyc2"},
+			Available:    true,
+			Transfer:     1,
+			Description:  "Legacy Basic",
+		},
+	}
+
 	mux.HandleFunc("/v2/sizes", func(w http.ResponseWriter, r *http.Request) {
 		testMethod(t, r, http.MethodGet)
 		fmt.Fprint(w, `{
 			"sizes": [
 				{
-					"slug": "1"
+					"slug": "s-1vcpu-1gb",
+					"memory": 1024,
+					"vcpus": 1,
+					"disk": 25,
+					"transfer": 1,
+					"price_monthly": 5,
+					"price_hourly": 0.00744,
+					"regions": [
+						"nyc1",
+						"nyc2"
+					],
+					"available": true,
+					"description": "Basic"
 				},
 				{
-					"slug": "2"
+					"slug": "512mb",
+					"memory": 512,
+					"vcpus": 1,
+					"disk": 20,
+					"transfer": 1,
+					"price_monthly": 5,
+					"price_hourly": 0.00744,
+					"regions": [
+						"nyc1",
+						"nyc2"
+					],
+					"available": true,
+					"description": "Legacy Basic"
 				}
 			],
 			"meta": {
@@ -33,7 +84,6 @@ func TestSizes_List(t *testing.T) {
 		t.Errorf("Sizes.List returned error: %v", err)
 	}
 
-	expectedSizes := []Size{{Slug: "1"}, {Slug: "2"}}
 	if !reflect.DeepEqual(sizes, expectedSizes) {
 		t.Errorf("Sizes.List returned sizes %+v, expected %+v", sizes, expectedSizes)
 	}
@@ -103,10 +153,11 @@ func TestSize_String(t *testing.T) {
 		Regions:      []string{"1", "2"},
 		Available:    true,
 		Transfer:     789,
+		Description:  "Basic",
 	}
 
 	stringified := size.String()
-	expected := `godo.Size{Slug:"slize", Memory:123, Vcpus:456, Disk:789, PriceMonthly:123, PriceHourly:456, Regions:["1" "2"], Available:true, Transfer:789}`
+	expected := `godo.Size{Slug:"slize", Memory:123, Vcpus:456, Disk:789, PriceMonthly:123, PriceHourly:456, Regions:["1" "2"], Available:true, Transfer:789, Description:"Basic"}`
 	if expected != stringified {
 		t.Errorf("Size.String returned %+v, expected %+v", stringified, expected)
 	}
diff --git a/snapshots.go b/snapshots.go
index cf95ccc..13a06ca 100644
--- a/snapshots.go
+++ b/snapshots.go
@@ -10,7 +10,7 @@ const snapshotBasePath = "v2/snapshots"
 
 // SnapshotsService is an interface for interfacing with the snapshots
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#snapshots
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Snapshots
 type SnapshotsService interface {
 	List(context.Context, *ListOptions) ([]Snapshot, *Response, error)
 	ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error)
@@ -75,7 +75,7 @@ func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) (
 	return s.list(ctx, opt, &listOpt)
 }
 
-// Get retrieves an snapshot by id.
+// Get retrieves a snapshot by id.
 func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) {
 	return s.get(ctx, snapshotID)
 }
diff --git a/storage.go b/storage.go
index e1dda59..7700ffa 100644
--- a/storage.go
+++ b/storage.go
@@ -15,7 +15,7 @@ const (
 
 // StorageService is an interface for interfacing with the storage
 // endpoints of the Digital Ocean API.
-// See: https://developers.digitalocean.com/documentation/v2/#block-storage
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Block-Storage
 type StorageService interface {
 	ListVolumes(context.Context, *ListVolumeParams) ([]Volume, *Response, error)
 	GetVolume(context.Context, string) (*Volume, *Response, error)
@@ -60,6 +60,7 @@ func (f Volume) String() string {
 	return Stringify(f)
 }
 
+// URN returns the volume ID as a valid DO API URN
 func (f Volume) URN() string {
 	return ToURN("Volume", f.ID)
 }
diff --git a/storage_actions.go b/storage_actions.go
index 234aba9..49e30cf 100644
--- a/storage_actions.go
+++ b/storage_actions.go
@@ -8,7 +8,7 @@ import (
 
 // StorageActionsService is an interface for interfacing with the
 // storage actions endpoints of the Digital Ocean API.
-// See: https://developers.digitalocean.com/documentation/v2#storage-actions
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Block-Storage-Actions
 type StorageActionsService interface {
 	Attach(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error)
 	DetachByDropletID(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error)
@@ -23,7 +23,7 @@ type StorageActionsServiceOp struct {
 	client *Client
 }
 
-// StorageAttachment represents the attachement of a block storage
+// StorageAttachment represents the attachment of a block storage
 // volume to a specific Droplet under the device name.
 type StorageAttachment struct {
 	DropletID int `json:"droplet_id"`
diff --git a/strings.go b/strings.go
index 4d5c0ad..f92893e 100644
--- a/strings.go
+++ b/strings.go
@@ -10,6 +10,8 @@ import (
 
 var timestampType = reflect.TypeOf(Timestamp{})
 
+// ResourceWithURN is an interface for interfacing with the types
+// that implement the URN method.
 type ResourceWithURN interface {
 	URN() string
 }
diff --git a/tags.go b/tags.go
index 6301e15..a19a4b0 100644
--- a/tags.go
+++ b/tags.go
@@ -10,7 +10,7 @@ const tagsBasePath = "v2/tags"
 
 // TagsService is an interface for interfacing with the tags
 // endpoints of the DigitalOcean API
-// See: https://developers.digitalocean.com/documentation/v2#tags
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Tags
 type TagsService interface {
 	List(context.Context, *ListOptions) ([]Tag, *Response, error)
 	Get(context.Context, string) (*Tag, *Response, error)
@@ -95,7 +95,7 @@ type Tag struct {
 	Resources *TaggedResources `json:"resources,omitempty"`
 }
 
-//TagCreateRequest represents the JSON structure of a request of that type.
+// TagCreateRequest represents the JSON structure of a request of that type.
 type TagCreateRequest struct {
 	Name string `json:"name"`
 }
diff --git a/tags_test.go b/tags_test.go
index 1a75799..b491ae7 100644
--- a/tags_test.go
+++ b/tags_test.go
@@ -286,6 +286,18 @@ func TestTags_ListPaging(t *testing.T) {
 	checkCurrentPage(t, resp, 2)
 }
 
+func assertStringTagEqual(t *testing.T, tag string, compareTag, errorMessage string) {
+	if tag != compareTag {
+		t.Errorf(errorMessage)
+	}
+}
+
+func assertIntTagEqual(t *testing.T, tag, compareTag int, errorMessage string) {
+	if tag != compareTag {
+		t.Errorf(errorMessage)
+	}
+}
+
 func TestTags_Get(t *testing.T) {
 	setup()
 	defer teardown()
@@ -300,61 +312,21 @@ func TestTags_Get(t *testing.T) {
 		t.Errorf("Tags.Get returned error: %v", err)
 	}
 
-	if tag.Name != "testing-1" {
-		t.Errorf("Tags.Get return an incorrect name, got %+v, expected %+v", tag.Name, "testing-1")
-	}
-
-	if tag.Resources.Count != 5 {
-		t.Errorf("Tags.Get return an incorrect resource count, got %+v, expected %+v", tag.Resources.Count, 5)
-	}
-
-	if tag.Resources.LastTaggedURI != "https://api.digitalocean.com/v2/droplets/1" {
-		t.Errorf("Tags.Get return an incorrect last tagged uri %+v, expected %+v", tag.Resources.LastTaggedURI, "https://api.digitalocean.com/v2/droplets/1")
-	}
-
-	if tag.Resources.Droplets.Count != 1 {
-		t.Errorf("Tags.Get return an incorrect droplet resource count, got %+v, expected %+v", tag.Resources.Droplets.Count, 1)
-	}
-
-	if tag.Resources.Droplets.LastTagged.ID != 1 {
-		t.Errorf("Tags.Get return an incorrect last tagged droplet %+v, expected %+v", tag.Resources.Droplets.LastTagged.ID, 1)
-	}
-
-	if tag.Resources.Droplets.LastTaggedURI != "https://api.digitalocean.com/v2/droplets/1" {
-		t.Errorf("Tags.Get return an incorrect last tagged droplet uri %+v, expected %+v", tag.Resources.Droplets.LastTaggedURI, "https://api.digitalocean.com/v2/droplets/1")
-	}
-
-	if tag.Resources.Images.Count != 1 {
-		t.Errorf("Tags.Get return an incorrect image resource count, got %+v, expected %+v", tag.Resources.Images.Count, 1)
-	}
-
-	if tag.Resources.Images.LastTaggedURI != "https://api.digitalocean.com/v2/images/1" {
-		t.Errorf("Tags.Get return an incorrect last tagged droplet uri %+v, expected %+v", tag.Resources.Images.LastTaggedURI, "https://api.digitalocean.com/v2/images/1")
-	}
-
-	if tag.Resources.Volumes.Count != 1 {
-		t.Errorf("Tags.Get return an incorrect volume resource count, got %+v, expected %+v", tag.Resources.Volumes.Count, 1)
-	}
-
-	if tag.Resources.Volumes.LastTaggedURI != "https://api.digitalocean.com/v2/volumes/abc" {
-		t.Errorf("Tags.Get return an incorrect last tagged volume uri %+v, expected %+v", tag.Resources.Volumes.LastTaggedURI, "https://api.digitalocean.com/v2/volumes/abc")
-	}
-
-	if tag.Resources.VolumeSnapshots.Count != 1 {
-		t.Errorf("Tags.Get return an incorrect volume snapshot resource count, got %+v, expected %+v", tag.Resources.VolumeSnapshots.Count, 1)
-	}
-
-	if tag.Resources.VolumeSnapshots.LastTaggedURI != "https://api.digitalocean.com/v2/snapshots/1" {
-		t.Errorf("Tags.Get return an incorrect last tagged volume snapshot uri %+v, expected %+v", tag.Resources.VolumeSnapshots.LastTaggedURI, "https://api.digitalocean.com/v2/snapshots/1")
-	}
-
-	if tag.Resources.Databases.Count != 1 {
-		t.Errorf("Tags.Get return an incorrect database resource count, got %+v, expected %+v", tag.Resources.Databases.Count, 1)
-	}
+	assertStringTagEqual(t, tag.Name, "testing-1", fmt.Sprintf("Tags.Get return an incorrect name, got %+v, expected %+v", tag.Name, "testing-1"))
+	assertIntTagEqual(t, tag.Resources.Count, 5, fmt.Sprintf("Tags.Get return an incorrect resource count, got %+v, expected %+v", tag.Resources.Count, 5))
+	assertStringTagEqual(t, tag.Resources.LastTaggedURI, "https://api.digitalocean.com/v2/droplets/1", fmt.Sprintf("Tags.Get return an incorrect last tagged uri %+v, expected %+v", tag.Resources.LastTaggedURI, "https://api.digitalocean.com/v2/droplets/1"))
+	assertIntTagEqual(t, tag.Resources.Droplets.Count, 1, fmt.Sprintf("Tags.Get return an incorrect droplet resource count, got %+v, expected %+v", tag.Resources.Droplets.Count, 1))
+	assertIntTagEqual(t, tag.Resources.Droplets.LastTagged.ID, 1, fmt.Sprintf("Tags.Get return an incorrect last tagged droplet %+v, expected %+v", tag.Resources.Droplets.LastTagged.ID, 1))
+	assertStringTagEqual(t, tag.Resources.Droplets.LastTaggedURI, "https://api.digitalocean.com/v2/droplets/1", fmt.Sprintf("Tags.Get return an incorrect last tagged droplet uri %+v, expected %+v", tag.Resources.Droplets.LastTaggedURI, "https://api.digitalocean.com/v2/droplets/1"))
+	assertIntTagEqual(t, tag.Resources.Images.Count, 1, fmt.Sprintf("Tags.Get return an incorrect image resource count, got %+v, expected %+v", tag.Resources.Images.Count, 1))
+	assertStringTagEqual(t, tag.Resources.Images.LastTaggedURI, "https://api.digitalocean.com/v2/images/1", fmt.Sprintf("Tags.Get return an incorrect last tagged droplet uri %+v, expected %+v", tag.Resources.Images.LastTaggedURI, "https://api.digitalocean.com/v2/images/1"))
+	assertIntTagEqual(t, tag.Resources.Volumes.Count, 1, fmt.Sprintf("Tags.Get return an incorrect volume resource count, got %+v, expected %+v", tag.Resources.Volumes.Count, 1))
+	assertStringTagEqual(t, tag.Resources.Volumes.LastTaggedURI, "https://api.digitalocean.com/v2/volumes/abc", fmt.Sprintf("Tags.Get return an incorrect last tagged volume uri %+v, expected %+v", tag.Resources.Volumes.LastTaggedURI, "https://api.digitalocean.com/v2/volumes/abc"))
+	assertIntTagEqual(t, tag.Resources.VolumeSnapshots.Count, 1, fmt.Sprintf("Tags.Get return an incorrect volume snapshot resource count, got %+v, expected %+v", tag.Resources.VolumeSnapshots.Count, 1))
+	assertStringTagEqual(t, tag.Resources.VolumeSnapshots.LastTaggedURI, "https://api.digitalocean.com/v2/snapshots/1", fmt.Sprintf("Tags.Get return an incorrect last tagged volume snapshot uri %+v, expected %+v", tag.Resources.VolumeSnapshots.LastTaggedURI, "https://api.digitalocean.com/v2/snapshots/1"))
+	assertIntTagEqual(t, tag.Resources.Databases.Count, 1, fmt.Sprintf("Tags.Get return an incorrect database resource count, got %+v, expected %+v", tag.Resources.Databases.Count, 1))
+	assertStringTagEqual(t, tag.Resources.Databases.LastTaggedURI, "https://api.digitalocean.com/v2/databases/1", fmt.Sprintf("Tags.Get return an incorrect last tagged database uri %+v, expected %+v", tag.Resources.Databases.LastTaggedURI, "https://api.digitalocean.com/v2/databases/1"))
 
-	if tag.Resources.Databases.LastTaggedURI != "https://api.digitalocean.com/v2/databases/1" {
-		t.Errorf("Tags.Get return an incorrect last tagged database uri %+v, expected %+v", tag.Resources.Databases.LastTaggedURI, "https://api.digitalocean.com/v2/databases/1")
-	}
 }
 
 func TestTags_Create(t *testing.T) {
diff --git a/uptime.go b/uptime.go
new file mode 100644
index 0000000..915d6c7
--- /dev/null
+++ b/uptime.go
@@ -0,0 +1,342 @@
+package godo
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"path"
+)
+
+const uptimeChecksBasePath = "/v2/uptime/checks"
+
+// UptimeChecksService is an interface for creating and managing Uptime checks with the DigitalOcean API.
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Uptime
+type UptimeChecksService interface {
+	List(context.Context, *ListOptions) ([]UptimeCheck, *Response, error)
+	Get(context.Context, string) (*UptimeCheck, *Response, error)
+	GetState(context.Context, string) (*UptimeCheckState, *Response, error)
+	Create(context.Context, *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error)
+	Update(context.Context, string, *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error)
+	Delete(context.Context, string) (*Response, error)
+	GetAlert(context.Context, string, string) (*UptimeAlert, *Response, error)
+	ListAlerts(context.Context, string, *ListOptions) ([]UptimeAlert, *Response, error)
+	CreateAlert(context.Context, string, *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error)
+	UpdateAlert(context.Context, string, string, *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error)
+	DeleteAlert(context.Context, string, string) (*Response, error)
+}
+
+// UptimeChecksServiceOp handles communication with Uptime Check methods of the DigitalOcean API.
+type UptimeChecksServiceOp struct {
+	client *Client
+}
+
+// UptimeCheck represents a DigitalOcean UptimeCheck configuration.
+type UptimeCheck struct {
+	ID      string   `json:"id"`
+	Name    string   `json:"name"`
+	Type    string   `json:"type"`
+	Target  string   `json:"target"`
+	Regions []string `json:"regions"`
+	Enabled bool     `json:"enabled"`
+}
+
+// UptimeAlert represents a DigitalOcean Uptime Alert configuration.
+type UptimeAlert struct {
+	ID            string         `json:"id"`
+	Name          string         `json:"name"`
+	Type          string         `json:"type"`
+	Threshold     int            `json:"threshold"`
+	Comparison    string         `json:"comparison"`
+	Notifications *Notifications `json:"notifications"`
+	Period        string         `json:"period"`
+}
+
+// Notifications represents a DigitalOcean Notifications configuration.
+type Notifications struct {
+	Email []string       `json:"email"`
+	Slack []SlackDetails `json:"slack"`
+}
+
+// UptimeCheckState represents a DigitalOcean Uptime Check's state configuration.
+type UptimeCheckState struct {
+	Regions        map[string]UptimeRegion `json:"regions"`
+	PreviousOutage UptimePreviousOutage    `json:"previous_outage"`
+}
+
+type UptimeRegion struct {
+	Status                    string  `json:"status"`
+	StatusChangedAt           string  `json:"status_changed_at"`
+	ThirtyDayUptimePercentage float32 `json:"thirty_day_uptime_percentage"`
+}
+
+// UptimePreviousOutage represents a DigitalOcean Uptime Check's previous outage configuration.
+type UptimePreviousOutage struct {
+	Region          string `json:"region"`
+	StartedAt       string `json:"started_at"`
+	EndedAt         string `json:"ended_at"`
+	DurationSeconds int    `json:"duration_seconds"`
+}
+
+// CreateUptimeCheckRequest represents the request to create a new uptime check.
+type CreateUptimeCheckRequest struct {
+	Name    string   `json:"name"`
+	Type    string   `json:"type"`
+	Target  string   `json:"target"`
+	Regions []string `json:"regions"`
+	Enabled bool     `json:"enabled"`
+}
+
+// UpdateUptimeCheckRequest represents the request to update uptime check information.
+type UpdateUptimeCheckRequest struct {
+	Name    string   `json:"name"`
+	Type    string   `json:"type"`
+	Target  string   `json:"target"`
+	Regions []string `json:"regions"`
+	Enabled bool     `json:"enabled"`
+}
+
+// CreateUptimeUptimeAlertRequest represents the request to create a new Uptime Alert.
+type CreateUptimeAlertRequest struct {
+	Name          string         `json:"name"`
+	Type          string         `json:"type"`
+	Threshold     int            `json:"threshold"`
+	Comparison    string         `json:"comparison"`
+	Notifications *Notifications `json:"notifications"`
+	Period        string         `json:"period"`
+}
+
+// UpdateUptimeAlertRequest represents the request to create a new alert.
+type UpdateUptimeAlertRequest struct {
+	Name          string         `json:"name"`
+	Type          string         `json:"type"`
+	Threshold     int            `json:"threshold"`
+	Comparison    string         `json:"comparison"`
+	Notifications *Notifications `json:"notifications"`
+	Period        string         `json:"period"`
+}
+
+type uptimeChecksRoot struct {
+	UptimeChecks []UptimeCheck `json:"checks"`
+	Links        *Links        `json:"links"`
+	Meta         *Meta         `json:"meta"`
+}
+
+type uptimeCheckStateRoot struct {
+	UptimeCheckState UptimeCheckState `json:"state"`
+}
+
+type uptimeAlertsRoot struct {
+	UptimeAlerts []UptimeAlert `json:"alerts"`
+	Links        *Links        `json:"links"`
+	Meta         *Meta         `json:"meta"`
+}
+
+type uptimeCheckRoot struct {
+	UptimeCheck *UptimeCheck `json:"check"`
+}
+
+type uptimeAlertRoot struct {
+	UptimeAlert *UptimeAlert `json:"alert"`
+}
+
+var _ UptimeChecksService = &UptimeChecksServiceOp{}
+
+// List Checks.
+func (p *UptimeChecksServiceOp) List(ctx context.Context, opts *ListOptions) ([]UptimeCheck, *Response, error) {
+	path, err := addOptions(uptimeChecksBasePath, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeChecksRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+
+	return root.UptimeChecks, resp, err
+}
+
+// GetState of uptime check.
+func (p *UptimeChecksServiceOp) GetState(ctx context.Context, uptimeCheckID string) (*UptimeCheckState, *Response, error) {
+	path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/state")
+
+	req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeCheckStateRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return &root.UptimeCheckState, resp, err
+}
+
+// Get retrieves a single uptime check by its ID.
+func (p *UptimeChecksServiceOp) Get(ctx context.Context, uptimeCheckID string) (*UptimeCheck, *Response, error) {
+	path := path.Join(uptimeChecksBasePath, uptimeCheckID)
+
+	req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeCheckRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.UptimeCheck, resp, err
+}
+
+// Create a new uptime check.
+func (p *UptimeChecksServiceOp) Create(ctx context.Context, cr *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error) {
+	req, err := p.client.NewRequest(ctx, http.MethodPost, uptimeChecksBasePath, cr)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeCheckRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.UptimeCheck, resp, err
+}
+
+// Update an uptime check.
+func (p *UptimeChecksServiceOp) Update(ctx context.Context, uptimeCheckID string, ur *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error) {
+	path := path.Join(uptimeChecksBasePath, uptimeCheckID)
+	req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeCheckRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.UptimeCheck, resp, err
+}
+
+// Delete an existing uptime check.
+func (p *UptimeChecksServiceOp) Delete(ctx context.Context, uptimeCheckID string) (*Response, error) {
+	path := path.Join(uptimeChecksBasePath, uptimeCheckID)
+	req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.client.Do(ctx, req, nil)
+}
+
+// alerts
+
+// ListAlerts lists alerts for a check.
+func (p *UptimeChecksServiceOp) ListAlerts(ctx context.Context, uptimeCheckID string, opts *ListOptions) ([]UptimeAlert, *Response, error) {
+	fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts")
+	path, err := addOptions(fullPath, opts)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeAlertsRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+
+	return root.UptimeAlerts, resp, err
+}
+
+// CreateAlert creates a new check alert.
+func (p *UptimeChecksServiceOp) CreateAlert(ctx context.Context, uptimeCheckID string, cr *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error) {
+	fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts")
+	req, err := p.client.NewRequest(ctx, http.MethodPost, fullPath, cr)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeAlertRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.UptimeAlert, resp, err
+}
+
+// GetAlert retrieves a single uptime check alert by its ID.
+func (p *UptimeChecksServiceOp) GetAlert(ctx context.Context, uptimeCheckID string, alertID string) (*UptimeAlert, *Response, error) {
+	path := fmt.Sprintf("v2/uptime/checks/%s/alerts/%s", uptimeCheckID, alertID)
+
+	req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeAlertRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.UptimeAlert, resp, err
+}
+
+// UpdateAlert updates an check's alert.
+func (p *UptimeChecksServiceOp) UpdateAlert(ctx context.Context, uptimeCheckID string, alertID string, ur *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error) {
+	path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID)
+	req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(uptimeAlertRoot)
+	resp, err := p.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	return root.UptimeAlert, resp, err
+}
+
+// DeleteAlert deletes an existing check's alert.
+func (p *UptimeChecksServiceOp) DeleteAlert(ctx context.Context, uptimeCheckID string, alertID string) (*Response, error) {
+	path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID)
+	req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.client.Do(ctx, req, nil)
+}
diff --git a/uptime_test.go b/uptime_test.go
new file mode 100644
index 0000000..e3797e4
--- /dev/null
+++ b/uptime_test.go
@@ -0,0 +1,596 @@
+package godo
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+func TestUptimeChecks_List(t *testing.T) {
+	setup()
+	defer teardown()
+
+	expectedUptimeChecks := []UptimeCheck{
+		{
+			ID:   "uptimecheck-1",
+			Name: "uptimecheck-1",
+		},
+		{
+			ID:   "uptimecheck-2",
+			Name: "uptimecheck-2",
+		},
+	}
+
+	mux.HandleFunc("/v2/uptime/checks", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		resp, _ := json.Marshal(expectedUptimeChecks)
+		fmt.Fprint(w, fmt.Sprintf(`{"checks":%s, "meta": {"total": 2}}`, string(resp)))
+	})
+
+	uptimeChecks, resp, err := client.UptimeChecks.List(ctx, nil)
+	if err != nil {
+		t.Errorf("UptimeChecks.List returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(uptimeChecks, expectedUptimeChecks) {
+		t.Errorf("UptimeChecks.List returned uptime checks %+v, expected %+v", uptimeChecks, expectedUptimeChecks)
+	}
+
+	expectedMeta := &Meta{Total: 2}
+	if !reflect.DeepEqual(resp.Meta, expectedMeta) {
+		t.Errorf("UptimeChecks.List returned meta %+v, expected %+v", resp.Meta, expectedMeta)
+	}
+}
+
+func TestUptimeChecks_ListWithMultiplePages(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mockResp := `
+	{
+		"checks": [
+			{
+				"uuid": "check-1",
+				"name": "check-1"
+			},
+			{
+				"uuid": "check-2",
+				"name": "check-2"
+			}
+		],
+		"links": {
+			"pages": {
+				"next": "http://example.com/v2/uptime/checks?page=2"
+			}
+		}
+	}`
+
+	mux.HandleFunc("/v2/uptime/checks", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, mockResp)
+	})
+
+	_, resp, err := client.UptimeChecks.List(ctx, nil)
+	if err != nil {
+		t.Errorf("UptimeChecks.List returned error: %v", err)
+	}
+
+	checkCurrentPage(t, resp, 1)
+}
+
+func TestUptimeChecks_ListWithPageNumber(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mockResp := `
+	{
+		"checks": [
+			{
+				"uuid": "check-1",
+				"name": "check-1"
+			},
+			{
+				"uuid": "check-2",
+				"name": "check-2"
+			}
+		],
+		"links": {
+			"pages": {
+				"next": "http://example.com/v2/uptime/checks?page=3",
+				"prev": "http://example.com/v2/uptime/checks?page=1",
+				"last": "http://example.com/v2/uptime/checks?page=3",
+				"first": "http://example.com/v2/uptime/checks?page=1"
+			}
+		}
+	}`
+
+	mux.HandleFunc("/v2/uptime/checks", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, mockResp)
+	})
+
+	_, resp, err := client.UptimeChecks.List(ctx, &ListOptions{Page: 2})
+	if err != nil {
+		t.Errorf("UptimeChecks.List returned error: %v", err)
+	}
+
+	checkCurrentPage(t, resp, 2)
+}
+
+func TestUptimeChecks_GetState(t *testing.T) {
+	setup()
+	defer teardown()
+
+	uptimeCheckState := &UptimeCheckState{
+		Regions: map[string]UptimeRegion{
+			"us_east": {
+				Status:                    "UP",
+				StatusChangedAt:           "2022-03-17T22:28:51Z",
+				ThirtyDayUptimePercentage: 97.99,
+			},
+			"eu_west": {
+				Status:                    "UP",
+				StatusChangedAt:           "2022-03-17T22:28:51Z",
+				ThirtyDayUptimePercentage: 97.99,
+			},
+		},
+		PreviousOutage: UptimePreviousOutage{
+			Region:          "us_east",
+			StartedAt:       "2022-03-17T18:04:55Z",
+			EndedAt:         "2022-03-17T18:06:55Z",
+			DurationSeconds: 120,
+		},
+	}
+
+	mux.HandleFunc("/v2/uptime/checks/check-1/state", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		resp, _ := json.Marshal(uptimeCheckState)
+		fmt.Fprint(w, fmt.Sprintf(`{"state":%s}`, string(resp)))
+	})
+
+	resp, _, err := client.UptimeChecks.GetState(ctx, "check-1")
+	if err != nil {
+		t.Errorf("UptimeChecks.GetState returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(resp, uptimeCheckState) {
+		t.Errorf("UptimeChecks.GetUptimeCheckState returned %+v, expected %+v", resp, uptimeCheckState)
+	}
+}
+
+func TestUptimeChecks_GetWithID(t *testing.T) {
+	setup()
+	defer teardown()
+
+	uptimeCheck := &UptimeCheck{
+		ID:   "check-1",
+		Name: "check-1",
+	}
+
+	mux.HandleFunc("/v2/uptime/checks/check-1", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		resp, _ := json.Marshal(uptimeCheck)
+		fmt.Fprint(w, fmt.Sprintf(`{"check":%s}`, string(resp)))
+	})
+
+	resp, _, err := client.UptimeChecks.Get(ctx, "check-1")
+	if err != nil {
+		t.Errorf("UptimeChecks.Get returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(resp, uptimeCheck) {
+		t.Errorf("UptimeChecks.Get returned %+v, expected %+v", resp, uptimeCheck)
+	}
+}
+
+func TestUptimeChecks_Create(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &CreateUptimeCheckRequest{
+		Name:    "my check",
+		Type:    "https",
+		Target:  "https://www.landingpage.com",
+		Enabled: true,
+	}
+
+	createResp := &UptimeCheck{
+		ID:      "check-id",
+		Name:    createRequest.Name,
+		Type:    createRequest.Type,
+		Target:  createRequest.Target,
+		Enabled: createRequest.Enabled,
+	}
+
+	mux.HandleFunc("/v2/uptime/checks", func(w http.ResponseWriter, r *http.Request) {
+		v := new(CreateUptimeCheckRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		if !reflect.DeepEqual(v, createRequest) {
+			t.Errorf("Request body = %+v, expected %+v", v, createRequest)
+		}
+
+		resp, _ := json.Marshal(createResp)
+		fmt.Fprintf(w, fmt.Sprintf(`{"check":%s}`, string(resp)))
+	})
+
+	uptimeCheck, _, err := client.UptimeChecks.Create(ctx, createRequest)
+	if err != nil {
+		t.Errorf("UptimeChecks.Create returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(uptimeCheck, createResp) {
+		t.Errorf("UptimeChecks.Create returned %+v, expected %+v", uptimeCheck, createResp)
+	}
+}
+
+func TestUptimeChecks_Update(t *testing.T) {
+	setup()
+	defer teardown()
+
+	updateRequest := &UpdateUptimeCheckRequest{
+		Name:    "my check",
+		Type:    "https",
+		Target:  "https://www.landingpage.com",
+		Enabled: true,
+	}
+	updateResp := &UptimeCheck{
+		ID:      "check-id",
+		Name:    updateRequest.Name,
+		Type:    updateRequest.Type,
+		Target:  updateRequest.Target,
+		Enabled: updateRequest.Enabled,
+	}
+
+	mux.HandleFunc("/v2/uptime/checks/check-id", func(w http.ResponseWriter, r *http.Request) {
+		reqBytes, respErr := ioutil.ReadAll(r.Body)
+		if respErr != nil {
+			t.Error("uptime checks mock didn't work")
+		}
+
+		req := strings.TrimSuffix(string(reqBytes), "\n")
+		expectedReq := `{"name":"my check","type":"https","target":"https://www.landingpage.com","regions":null,"enabled":true}`
+		if req != expectedReq {
+			t.Errorf("check req didn't match up:\n expected %+v\n got %+v\n", expectedReq, req)
+		}
+
+		resp, _ := json.Marshal(updateResp)
+		fmt.Fprintf(w, fmt.Sprintf(`{"check":%s}`, string(resp)))
+	})
+
+	uptimeCheck, _, err := client.UptimeChecks.Update(ctx, "check-id", updateRequest)
+	if err != nil {
+		t.Errorf("UptimeChecks.Update returned error: %v", err)
+	}
+	if !reflect.DeepEqual(uptimeCheck, updateResp) {
+		t.Errorf("UptimeChecks.Update returned %+v, expected %+v", uptimeCheck, updateResp)
+	}
+}
+
+func TestUptimeChecks_Delete(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/uptime/checks/check-1", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.UptimeChecks.Delete(ctx, "check-1")
+	if err != nil {
+		t.Errorf("UptimeChecks.Delete returned error: %v", err)
+	}
+}
+
+func TestUptimeAlert_Delete(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mux.HandleFunc("/v2/uptime/checks/check-1/alerts/alert-1", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodDelete)
+	})
+
+	_, err := client.UptimeChecks.DeleteAlert(ctx, "check-1", "alert-1")
+	if err != nil {
+		t.Errorf("UptimeChecks.Delete returned error: %v", err)
+	}
+}
+
+func TestUptimeAlert_Update(t *testing.T) {
+	setup()
+	defer teardown()
+
+	updateRequest := &UpdateUptimeAlertRequest{
+		Name:       "my alert",
+		Type:       "latency",
+		Threshold:  300,
+		Comparison: "greater_than",
+		Period:     "2m",
+		Notifications: &Notifications{
+			Email: []string{
+				"email",
+			},
+			Slack: []SlackDetails{},
+		},
+	}
+	updateResp := &UptimeAlert{
+		ID:            "alert-id",
+		Name:          updateRequest.Name,
+		Type:          updateRequest.Type,
+		Threshold:     updateRequest.Threshold,
+		Comparison:    updateRequest.Comparison,
+		Period:        updateRequest.Period,
+		Notifications: updateRequest.Notifications,
+	}
+
+	mux.HandleFunc("/v2/uptime/checks/check-id/alerts/alert-id", func(w http.ResponseWriter, r *http.Request) {
+		reqBytes, respErr := ioutil.ReadAll(r.Body)
+		if respErr != nil {
+			t.Error("alerts mock didn't work")
+		}
+
+		req := strings.TrimSuffix(string(reqBytes), "\n")
+		expectedReq := `{"name":"my alert","type":"latency","threshold":300,"comparison":"greater_than","notifications":{"email":["email"],"slack":[]},"period":"2m"}`
+		if req != expectedReq {
+			t.Errorf("check req didn't match up:\n expected %+v\n got %+v\n", expectedReq, req)
+		}
+
+		resp, _ := json.Marshal(updateResp)
+		fmt.Fprintf(w, fmt.Sprintf(`{"alert":%s}`, string(resp)))
+	})
+
+	alert, _, err := client.UptimeChecks.UpdateAlert(ctx, "check-id", "alert-id", updateRequest)
+	if err != nil {
+		t.Errorf("UptimeChecks.UpdateAlertreturned error: %v", err)
+	}
+	if !reflect.DeepEqual(alert, updateResp) {
+		t.Errorf("UptimeChecks.UpdateAlert returned %+v, expected %+v", alert, updateResp)
+	}
+}
+
+func TestUptimeAlert_Create(t *testing.T) {
+	setup()
+	defer teardown()
+
+	createRequest := &CreateUptimeAlertRequest{
+		Name:       "my alert",
+		Type:       "latency",
+		Threshold:  300,
+		Comparison: "greater_than",
+		Period:     "2m",
+		Notifications: &Notifications{
+			Email: []string{
+				"email",
+			},
+			Slack: []SlackDetails{},
+		},
+	}
+
+	createResp := &UptimeAlert{
+		ID:            "alert-id",
+		Name:          createRequest.Name,
+		Type:          createRequest.Type,
+		Threshold:     createRequest.Threshold,
+		Comparison:    createRequest.Comparison,
+		Period:        createRequest.Period,
+		Notifications: createRequest.Notifications,
+	}
+	mux.HandleFunc("/v2/uptime/checks/check-id/alerts", func(w http.ResponseWriter, r *http.Request) {
+		v := new(CreateUptimeAlertRequest)
+		err := json.NewDecoder(r.Body).Decode(v)
+		if err != nil {
+			t.Fatalf("decode json: %v", err)
+		}
+
+		testMethod(t, r, http.MethodPost)
+		if !reflect.DeepEqual(v, createRequest) {
+			t.Errorf("Request body = %+v, expected %+v", v, createRequest)
+		}
+
+		resp, _ := json.Marshal(createResp)
+		fmt.Fprintf(w, fmt.Sprintf(`{"alert":%s}`, string(resp)))
+	})
+
+	uptimeCheck, _, err := client.UptimeChecks.CreateAlert(ctx, "check-id", createRequest)
+	if err != nil {
+		t.Errorf("UptimeChecks.CreateAlert returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(uptimeCheck, createResp) {
+		t.Errorf("UptimeChecks.CreateAlert returned %+v, expected %+v", uptimeCheck, createResp)
+	}
+}
+
+func TestUptimeAlert_GetWithID(t *testing.T) {
+	setup()
+	defer teardown()
+
+	alert := &UptimeAlert{
+		ID:         "alert-1",
+		Name:       "my alert",
+		Type:       "latency",
+		Threshold:  300,
+		Comparison: "greater_than",
+		Period:     "2m",
+		Notifications: &Notifications{
+			Email: []string{
+				"email",
+			},
+			Slack: []SlackDetails{},
+		},
+	}
+
+	mux.HandleFunc("/v2/uptime/checks/check-1/alerts/alert-1", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		resp, _ := json.Marshal(alert)
+		fmt.Fprint(w, fmt.Sprintf(`{"alert":%s}`, string(resp)))
+	})
+
+	resp, _, err := client.UptimeChecks.GetAlert(ctx, "check-1", "alert-1")
+	if err != nil {
+		t.Errorf("UptimeChecks.GetAlert returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(resp, alert) {
+		t.Errorf("UptimeChecks.GetAlert returned %+v, expected %+v", resp, alert)
+	}
+}
+
+func TestUptimeAlerts_List(t *testing.T) {
+	setup()
+	defer teardown()
+
+	expectedAlerts := []UptimeAlert{
+		{
+			ID:         "alert-1",
+			Name:       "my alert",
+			Type:       "latency",
+			Threshold:  300,
+			Comparison: "greater_than",
+			Period:     "2m",
+			Notifications: &Notifications{
+				Email: []string{
+					"email",
+				},
+				Slack: []SlackDetails{},
+			},
+		},
+		{
+			ID:         "alert-2",
+			Name:       "my alert",
+			Type:       "latency",
+			Threshold:  300,
+			Comparison: "greater_than",
+			Period:     "2m",
+			Notifications: &Notifications{
+				Email: []string{
+					"email2",
+				},
+				Slack: []SlackDetails{},
+			},
+		},
+	}
+
+	mux.HandleFunc("/v2/uptime/checks/check-1/alerts", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		resp, _ := json.Marshal(expectedAlerts)
+		fmt.Fprint(w, fmt.Sprintf(`{"alerts":%s, "meta": {"total": 2}}`, string(resp)))
+	})
+
+	alerts, resp, err := client.UptimeChecks.ListAlerts(ctx, "check-1", nil)
+	if err != nil {
+		t.Errorf("UptimeChecks.ListAlerts returned error: %v", err)
+	}
+
+	if !reflect.DeepEqual(alerts, expectedAlerts) {
+		t.Errorf("UptimeChecks.ListAlerts returned uptime checks %+v, expected %+v", alerts, expectedAlerts)
+	}
+
+	expectedMeta := &Meta{Total: 2}
+	if !reflect.DeepEqual(resp.Meta, expectedMeta) {
+		t.Errorf("UptimeChecks.List returned meta %+v, expected %+v", resp.Meta, expectedMeta)
+	}
+}
+
+func TestUptimeAlerts_ListWithMultiplePages(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mockResp := `
+	{
+		"alerts": [{
+			"id": "alert-1",
+			"name": "Landing page degraded performance",
+			"type": "latency",
+			"threshold": 300,
+			"comparison": "greater_than",
+			"notifications": {
+				"email": [
+					"bob@example.com"
+				],
+				"slack": [{
+					"channel": "Production Alerts",
+					"url": "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ"
+				}]
+			},
+			"period": "2m"
+		}],
+		"links": {
+			"pages": {
+				"next": "http://example.com/v2/uptime/checks/check-1/alerts?page=2"
+			}
+		},
+		"meta": {
+			"total": 1
+		}
+	}`
+
+	mux.HandleFunc("/v2/uptime/checks/check-1/alerts", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, mockResp)
+	})
+
+	_, resp, err := client.UptimeChecks.ListAlerts(ctx, "check-1", nil)
+	if err != nil {
+		t.Errorf("UptimeChecks.ListAlerts returned error: %v", err)
+	}
+
+	checkCurrentPage(t, resp, 1)
+}
+
+func TestUptimeAlerts_ListWithPageNumber(t *testing.T) {
+	setup()
+	defer teardown()
+
+	mockResp := `
+	{
+		"alerts": [
+		  {
+			"id": "alert-1",
+			"name": "Landing page degraded performance",
+			"type": "latency",
+			"threshold": 300,
+			"comparison": "greater_than",
+			"notifications": {
+			  "email": [
+				"bob@example.com"
+			  ],
+			  "slack": [
+				{
+				  "channel": "Production Alerts",
+				  "url": "https://hooks.slack.com/services/T1234567/AAAAAAAA/ZZZZZZ"
+				}
+			  ]
+			},
+			"period": "2m"
+		  }
+		],
+		"links": {
+		  "pages": {
+			"next": "http://example.com/v2/uptime/checks?page=3",
+			"prev": "http://example.com/v2/uptime/checks?page=1",
+			"last": "http://example.com/v2/uptime/checks?page=3",
+			"first": "http://example.com/v2/uptime/checks?page=1"
+		  }
+		}
+	  }`
+
+	mux.HandleFunc("/v2/uptime/checks/check-1/alerts", func(w http.ResponseWriter, r *http.Request) {
+		testMethod(t, r, http.MethodGet)
+		fmt.Fprint(w, mockResp)
+	})
+
+	_, resp, err := client.UptimeChecks.ListAlerts(ctx, "check-1", &ListOptions{Page: 2})
+	if err != nil {
+		t.Errorf("UptimeChecks.ListAlerts returned error: %v", err)
+	}
+
+	checkCurrentPage(t, resp, 2)
+}
diff --git a/util/droplet_test.go b/util/droplet_test.go
index 823d312..9c30cc3 100644
--- a/util/droplet_test.go
+++ b/util/droplet_test.go
@@ -2,28 +2,38 @@ package util
 
 import (
 	"context"
-
-	"golang.org/x/oauth2"
+	"fmt"
+	"log"
 
 	"github.com/digitalocean/godo"
 )
 
 func ExampleWaitForActive() {
-	// build client
-	pat := "mytoken"
-	token := &oauth2.Token{AccessToken: pat}
-	t := oauth2.StaticTokenSource(token)
-
-	ctx := context.TODO()
-	oauthClient := oauth2.NewClient(ctx, t)
-	client := godo.NewClient(oauthClient)
-
-	// create your droplet and retrieve the create action uri
-	uri := "https://api.digitalocean.com/v2/actions/xxxxxxxx"
+	// Create a godo client.
+	client := godo.NewFromToken("dop_v1_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
 
-	// block until until the action is complete
-	err := WaitForActive(ctx, client, uri)
+	// Create a Droplet.
+	droplet, resp, err := client.Droplets.Create(context.Background(), &godo.DropletCreateRequest{
+		Name:   "test-droplet",
+		Region: "nyc3",
+		Size:   "s-1vcpu-1gb",
+		Image: godo.DropletCreateImage{
+			Slug: "ubuntu-20-04-x64",
+		},
+	})
 	if err != nil {
-		panic(err)
+		log.Fatalf("failed to create droplet: %v\n", err)
 	}
+
+	// Find the Droplet create action, then wait for it to complete.
+	for _, action := range resp.Links.Actions {
+		if action.Rel == "create" {
+			// Block until the action is complete.
+			if err := WaitForActive(context.Background(), client, action.HREF); err != nil {
+				log.Fatalf("error waiting for droplet to become active: %v\n", err)
+			}
+		}
+	}
+
+	fmt.Println(droplet.Name)
 }
diff --git a/util/image.go b/util/image.go
new file mode 100644
index 0000000..85dde3a
--- /dev/null
+++ b/util/image.go
@@ -0,0 +1,57 @@
+package util
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/digitalocean/godo"
+)
+
+const (
+	// availableFailure is the amount of times we can fail before deciding
+	// the check for available is a total failure. This can help account
+	// for servers randomly not answering.
+	availableFailure = 3
+)
+
+// WaitForAvailable waits for a image to become available
+func WaitForAvailable(ctx context.Context, client *godo.Client, monitorURI string) error {
+	if len(monitorURI) == 0 {
+		return fmt.Errorf("create had no monitor uri")
+	}
+
+	completed := false
+	failCount := 0
+	for !completed {
+		action, _, err := client.ImageActions.GetByURI(ctx, monitorURI)
+
+		if err != nil {
+			select {
+			case <-ctx.Done():
+				return err
+			default:
+			}
+			if failCount <= availableFailure {
+				failCount++
+				continue
+			}
+			return err
+		}
+
+		switch action.Status {
+		case godo.ActionInProgress:
+			select {
+			case <-time.After(5 * time.Second):
+			case <-ctx.Done():
+				return err
+			}
+		case godo.ActionCompleted:
+			completed = true
+		default:
+			return fmt.Errorf("unknown status: [%s]", action.Status)
+		}
+	}
+
+	return nil
+}
diff --git a/util/image_test.go b/util/image_test.go
new file mode 100644
index 0000000..a5e6b5d
--- /dev/null
+++ b/util/image_test.go
@@ -0,0 +1,36 @@
+package util
+
+import (
+	"context"
+	"fmt"
+	"log"
+
+	"github.com/digitalocean/godo"
+)
+
+func ExampleWaitForAvailable() {
+	// Create a godo client.
+	client := godo.NewFromToken("dop_v1_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
+
+	// Create an Image.
+	image, resp, err := client.Images.Create(context.Background(), &godo.CustomImageCreateRequest{
+		Name:   "test-image",
+		Url:    "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64.vmdk",
+		Region: "nyc3",
+	})
+	if err != nil {
+		log.Fatalf("failed to create image: %v\n", err)
+	}
+
+	// Find the Image create action, then wait for it to complete.
+	for _, action := range resp.Links.Actions {
+		if action.Rel == "create" {
+			// Block until the action is complete.
+			if err := WaitForAvailable(context.Background(), client, action.HREF); err != nil {
+				log.Fatalf("error waiting for image to become active: %v\n", err)
+			}
+		}
+	}
+
+	fmt.Println(image.Name)
+}
diff --git a/vpcs.go b/vpcs.go
index 7fbeaf8..f4f22e1 100644
--- a/vpcs.go
+++ b/vpcs.go
@@ -10,11 +10,12 @@ const vpcsBasePath = "/v2/vpcs"
 
 // VPCsService is an interface for managing Virtual Private Cloud configurations with the
 // DigitalOcean API.
-// See: https://developers.digitalocean.com/documentation/v2#vpcs
+// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/VPCs
 type VPCsService interface {
 	Create(context.Context, *VPCCreateRequest) (*VPC, *Response, error)
 	Get(context.Context, string) (*VPC, *Response, error)
 	List(context.Context, *ListOptions) ([]*VPC, *Response, error)
+	ListMembers(context.Context, string, *VPCListMembersRequest, *ListOptions) ([]*VPCMember, *Response, error)
 	Update(context.Context, string, *VPCUpdateRequest) (*VPC, *Response, error)
 	Set(context.Context, string, ...VPCSetField) (*VPC, *Response, error)
 	Delete(context.Context, string) (*Response, error)
@@ -39,6 +40,7 @@ type VPCCreateRequest struct {
 type VPCUpdateRequest struct {
 	Name        string `json:"name,omitempty"`
 	Description string `json:"description,omitempty"`
+	Default     *bool  `json:"default,omitempty"`
 }
 
 // VPCSetField allows one to set individual fields within a VPC configuration.
@@ -54,6 +56,16 @@ type VPCSetName string
 // Ex.: VPCs.Set(..., VPCSetDescription("vpc description"))
 type VPCSetDescription string
 
+// VPCSetDefault is used when one wants to enable the `default` field of a VPC, to
+// set a VPC as the default one in the region
+// Ex.: VPCs.Set(..., VPCSetDefault())
+func VPCSetDefault() VPCSetField {
+	return &vpcSetDefault{}
+}
+
+// vpcSetDefault satisfies the VPCSetField interface
+type vpcSetDefault struct{}
+
 // VPC represents a DigitalOcean Virtual Private Cloud configuration.
 type VPC struct {
 	ID          string    `json:"id,omitempty"`
@@ -66,6 +78,16 @@ type VPC struct {
 	Default     bool      `json:"default,omitempty"`
 }
 
+type VPCListMembersRequest struct {
+	ResourceType string `url:"resource_type,omitempty"`
+}
+
+type VPCMember struct {
+	URN       string    `json:"urn,omitempty"`
+	Name      string    `json:"name,omitempty"`
+	CreatedAt time.Time `json:"created_at,omitempty"`
+}
+
 type vpcRoot struct {
 	VPC *VPC `json:"vpc"`
 }
@@ -76,6 +98,12 @@ type vpcsRoot struct {
 	Meta  *Meta  `json:"meta"`
 }
 
+type vpcMembersRoot struct {
+	Members []*VPCMember `json:"members"`
+	Links   *Links       `json:"links"`
+	Meta    *Meta        `json:"meta"`
+}
+
 // Get returns the details of a Virtual Private Cloud.
 func (v *VPCsServiceOp) Get(ctx context.Context, id string) (*VPC, *Response, error) {
 	path := vpcsBasePath + "/" + id
@@ -161,6 +189,10 @@ func (n VPCSetDescription) vpcSetField(in map[string]interface{}) {
 	in["description"] = n
 }
 
+func (*vpcSetDefault) vpcSetField(in map[string]interface{}) {
+	in["default"] = true
+}
+
 // Set updates specific properties of a Virtual Private Cloud.
 func (v *VPCsServiceOp) Set(ctx context.Context, id string, fields ...VPCSetField) (*VPC, *Response, error) {
 	path := vpcsBasePath + "/" + id
@@ -199,3 +231,35 @@ func (v *VPCsServiceOp) Delete(ctx context.Context, id string) (*Response, error
 
 	return resp, nil
 }
+
+func (v *VPCsServiceOp) ListMembers(ctx context.Context, id string, request *VPCListMembersRequest, opt *ListOptions) ([]*VPCMember, *Response, error) {
+	path := vpcsBasePath + "/" + id + "/members"
+	pathWithResourceType, err := addOptions(path, request)
+	if err != nil {
+		return nil, nil, err
+	}
+	pathWithOpts, err := addOptions(pathWithResourceType, opt)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	req, err := v.client.NewRequest(ctx, http.MethodGet, pathWithOpts, nil)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	root := new(vpcMembersRoot)
+	resp, err := v.client.Do(ctx, req, root)
+	if err != nil {
+		return nil, resp, err
+	}
+	if l := root.Links; l != nil {
+		resp.Links = l
+	}
+	if m := root.Meta; m != nil {
+		resp.Meta = m
+	}
+
+	return root.Members, resp, nil
+
+}
diff --git a/vpcs_test.go b/vpcs_test.go
index ab7d680..71ca81f 100644
--- a/vpcs_test.go
+++ b/vpcs_test.go
@@ -1,9 +1,11 @@
 package godo
 
 import (
+	"bytes"
 	"encoding/json"
 	"fmt"
 	"net/http"
+	"strings"
 	"testing"
 	"time"
 
@@ -140,82 +142,167 @@ func TestVPCs_Create(t *testing.T) {
 }
 
 func TestVPCs_Update(t *testing.T) {
-	setup()
-	defer teardown()
 
-	svc := client.VPCs
-	path := "/v2/vpcs"
-	want := vTestObj
-	id := "880b7f98-f062-404d-b33c-458d545696f6"
-	req := &VPCUpdateRequest{
-		Name:        "my-new-vpc",
-		Description: "vpc description",
+	tests := []struct {
+		desc                string
+		id                  string
+		req                 *VPCUpdateRequest
+		mockResponse        string
+		expectedRequestBody string
+		expectedUpdatedVPC  *VPC
+	}{
+		{
+			desc: "setting name and description without default argument",
+			id:   "880b7f98-f062-404d-b33c-458d545696f6",
+			req: &VPCUpdateRequest{
+				Name:        "my-new-vpc",
+				Description: "vpc description",
+			},
+			mockResponse: `
+			{
+			  "vpc":
+			` + vTestJSON + `
+			}
+			`,
+			expectedRequestBody: `{"name":"my-new-vpc","description":"vpc description"}`,
+			expectedUpdatedVPC:  vTestObj,
+		},
+
+		{
+			desc: "setting the default vpc option",
+			id:   "880b7f98-f062-404d-b33c-458d545696f6",
+			req: &VPCUpdateRequest{
+				Name:        "my-new-vpc",
+				Description: "vpc description",
+				Default:     PtrTo(false),
+			},
+			mockResponse: `
+			{
+			  "vpc":
+			` + vTestJSON + `
+			}
+			`,
+			expectedRequestBody: `{"name":"my-new-vpc","description":"vpc description","default":false}`,
+			expectedUpdatedVPC:  vTestObj,
+		},
+
+		{
+			desc: "setting the default vpc option",
+			id:   "880b7f98-f062-404d-b33c-458d545696f6",
+			req: &VPCUpdateRequest{
+				Name:        "my-new-vpc",
+				Description: "vpc description",
+				Default:     PtrTo(true),
+			},
+			mockResponse: `
+			{
+			  "vpc":
+			` + vTestJSON + `
+			}
+			`,
+			expectedRequestBody: `{"name":"my-new-vpc","description":"vpc description","default":true}`,
+			expectedUpdatedVPC:  vTestObj,
+		},
 	}
-	jsonBlob := `
-{
-  "vpc":
-` + vTestJSON + `
-}
-`
 
-	mux.HandleFunc(path+"/"+id, func(w http.ResponseWriter, r *http.Request) {
-		c := new(VPCUpdateRequest)
-		err := json.NewDecoder(r.Body).Decode(c)
-		if err != nil {
-			t.Fatal(err)
-		}
+	for _, tt := range tests {
+		setup()
 
-		testMethod(t, r, http.MethodPut)
-		require.Equal(t, c, req)
-		fmt.Fprint(w, jsonBlob)
-	})
+		mux.HandleFunc("/v2/vpcs/"+tt.id, func(w http.ResponseWriter, r *http.Request) {
+			buf := new(bytes.Buffer)
+			buf.ReadFrom(r.Body)
+			require.Equal(t, tt.expectedRequestBody, strings.TrimSpace(buf.String()))
 
-	got, _, err := svc.Update(ctx, id, req)
-	require.NoError(t, err)
-	require.Equal(t, want, got)
+			v := new(VPCUpdateRequest)
+			err := json.NewDecoder(buf).Decode(v)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			testMethod(t, r, http.MethodPut)
+			fmt.Fprint(w, tt.mockResponse)
+		})
+
+		got, _, err := client.VPCs.Update(ctx, tt.id, tt.req)
+
+		teardown()
+
+		require.NoError(t, err)
+		require.Equal(t, tt.expectedUpdatedVPC, got)
+	}
 }
 
 func TestVPCs_Set(t *testing.T) {
-	setup()
-	defer teardown()
 
-	type setRequest struct {
-		Name        string `json:"name"`
-		Description string `json:"description"`
-	}
+	tests := []struct {
+		desc                string
+		id                  string
+		updateFields        []VPCSetField
+		mockResponse        string
+		expectedRequestBody string
+		expectedUpdatedVPC  *VPC
+	}{
+		{
+			desc: "setting name and description",
+			id:   "880b7f98-f062-404d-b33c-458d545696f6",
+			updateFields: []VPCSetField{
+				VPCSetName("my-new-vpc"),
+				VPCSetDescription("vpc description"),
+			},
+			mockResponse: `
+			{
+			  "vpc":
+			` + vTestJSON + `
+			}
+			`,
+			expectedRequestBody: `{"description":"vpc description","name":"my-new-vpc"}`,
+			expectedUpdatedVPC:  vTestObj,
+		},
 
-	svc := client.VPCs
-	path := "/v2/vpcs"
-	want := vTestObj
-	id := "880b7f98-f062-404d-b33c-458d545696f6"
-	name := "my-new-vpc"
-	desc := "vpc description"
-	req := &setRequest{
-		Name:        name,
-		Description: desc,
+		{
+			desc: "setting the default vpc option",
+			id:   "880b7f98-f062-404d-b33c-458d545696f6",
+			updateFields: []VPCSetField{
+				VPCSetName("my-new-vpc"),
+				VPCSetDescription("vpc description"),
+				VPCSetDefault(),
+			},
+			mockResponse: `
+			{
+			  "vpc":
+			` + vTestJSON + `
+			}
+			`,
+			expectedRequestBody: `{"default":true,"description":"vpc description","name":"my-new-vpc"}`,
+			expectedUpdatedVPC:  vTestObj,
+		},
 	}
-	jsonBlob := `
-{
-  "vpc":
-` + vTestJSON + `
-}
-`
 
-	mux.HandleFunc(path+"/"+id, func(w http.ResponseWriter, r *http.Request) {
-		c := new(setRequest)
-		err := json.NewDecoder(r.Body).Decode(c)
-		if err != nil {
-			t.Fatal(err)
-		}
+	for _, tt := range tests {
+		setup()
 
-		testMethod(t, r, http.MethodPatch)
-		require.Equal(t, c, req)
-		fmt.Fprint(w, jsonBlob)
-	})
+		mux.HandleFunc("/v2/vpcs/"+tt.id, func(w http.ResponseWriter, r *http.Request) {
+			buf := new(bytes.Buffer)
+			buf.ReadFrom(r.Body)
+			require.Equal(t, tt.expectedRequestBody, strings.TrimSpace(buf.String()))
 
-	got, _, err := svc.Set(ctx, id, VPCSetName(name), VPCSetDescription(desc))
-	require.NoError(t, err)
-	require.Equal(t, want, got)
+			v := new(VPCUpdateRequest)
+			err := json.NewDecoder(buf).Decode(v)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			testMethod(t, r, http.MethodPatch)
+			fmt.Fprint(w, tt.mockResponse)
+		})
+
+		got, _, err := client.VPCs.Set(ctx, tt.id, tt.updateFields...)
+
+		teardown()
+
+		require.NoError(t, err)
+		require.Equal(t, tt.expectedUpdatedVPC, got)
+	}
 }
 
 func TestVPCs_Delete(t *testing.T) {
@@ -233,3 +320,117 @@ func TestVPCs_Delete(t *testing.T) {
 	_, err := svc.Delete(ctx, id)
 	require.NoError(t, err)
 }
+
+func TestVPCs_ListMembers(t *testing.T) {
+	tests := []struct {
+		desc          string
+		expectedQuery string
+		query         *VPCListMembersRequest
+		resp          string
+		want          []*VPCMember
+	}{
+		{
+			desc:          "list all members",
+			expectedQuery: "",
+			query:         nil,
+			resp: `{
+		"members": [
+			{
+				"urn": "do:loadbalancer:fb294d78-d193-4cb2-8737-ea620993591b",
+				"name": "nyc1-load-balancer-01",
+				"created_at": "2020-03-16T19:30:48Z"
+			},
+			{
+				"urn": "do:dbaas:13f7a2f6-43df-4c4a-8129-8733267ddeea",
+				"name": "db-postgresql-nyc1-55986",
+				"created_at": "2020-03-15T19:30:48Z"
+			},
+			{
+				"urn": "do:kubernetes:da39d893-96e1-4e4d-971d-1fdda33a46b1",
+				"name": "k8s-nyc1-1584127772221",
+				"created_at": "2020-03-14T19:30:48Z"
+			},
+			{
+				"urn": "do:droplet:86e29982-03a7-4946-8a07-a0114dff8754",
+				"name": "ubuntu-s-1vcpu-1gb-nyc1-01",
+				"created_at": "2020-03-13T19:30:48Z"
+			}
+		],
+		"links": {
+		},
+		"meta": {
+			"total": 4
+		}
+		}`,
+			want: []*VPCMember{
+				{
+					URN:       "do:loadbalancer:fb294d78-d193-4cb2-8737-ea620993591b",
+					Name:      "nyc1-load-balancer-01",
+					CreatedAt: time.Date(2020, 3, 16, 19, 30, 48, 0, time.UTC),
+				},
+				{
+					URN:       "do:dbaas:13f7a2f6-43df-4c4a-8129-8733267ddeea",
+					Name:      "db-postgresql-nyc1-55986",
+					CreatedAt: time.Date(2020, 3, 15, 19, 30, 48, 0, time.UTC),
+				},
+				{
+					URN:       "do:kubernetes:da39d893-96e1-4e4d-971d-1fdda33a46b1",
+					Name:      "k8s-nyc1-1584127772221",
+					CreatedAt: time.Date(2020, 3, 14, 19, 30, 48, 0, time.UTC),
+				},
+				{
+					URN:       "do:droplet:86e29982-03a7-4946-8a07-a0114dff8754",
+					Name:      "ubuntu-s-1vcpu-1gb-nyc1-01",
+					CreatedAt: time.Date(2020, 3, 13, 19, 30, 48, 0, time.UTC),
+				},
+			},
+		},
+		{
+			desc:          "list droplet members",
+			expectedQuery: "droplet",
+			query:         &VPCListMembersRequest{ResourceType: "droplet"},
+			resp: `{
+		"members": [
+			{
+				"urn": "do:droplet:86e29982-03a7-4946-8a07-a0114dff8754",
+				"name": "ubuntu-s-1vcpu-1gb-nyc1-01",
+				"created_at": "2020-03-13T19:30:48Z"
+			}
+		],
+		"links": {
+		},
+		"meta": {
+			"total": 1
+		}
+		}`,
+			want: []*VPCMember{
+				{
+					URN:       "do:droplet:86e29982-03a7-4946-8a07-a0114dff8754",
+					Name:      "ubuntu-s-1vcpu-1gb-nyc1-01",
+					CreatedAt: time.Date(2020, 3, 13, 19, 30, 48, 0, time.UTC),
+				},
+			},
+		},
+	}
+
+	id := "880b7f98-f062-404d-b33c-458d545696f6"
+	path := "/v2/vpcs/" + id + "/members"
+
+	for _, tt := range tests {
+		t.Run(tt.desc, func(t *testing.T) {
+			setup()
+			defer teardown()
+
+			mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {
+				testMethod(t, r, http.MethodGet)
+				require.Equal(t, tt.expectedQuery, r.URL.Query().Get("resource_type"))
+				fmt.Fprint(w, tt.resp)
+			})
+
+			got, _, err := client.VPCs.ListMembers(ctx, id, tt.query, nil)
+
+			require.NoError(t, err)
+			require.Equal(t, tt.want, got)
+		})
+	}
+}

More details

Full run details

Historical runs