diff --git a/.gitignore b/.gitignore
index 6c36532e9..e3c7bb441 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,4 +18,5 @@ wrangler.jsonc
!.husky/pre-commit
.smoke
-.svelte-kit
\ No newline at end of file
+.svelte-kit
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index faf447a8a..a9b0bdfdd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,182 @@
+## v0.41.2
+
+### 🚀 Features
+
+- **cloudflare**: Cloudflare Advanced Certificate Pack resource - by **John Royal** in https://github.com/sam-goodwin/alchemy/issues/487 [(b3a2f)](https://github.com/sam-goodwin/alchemy/commit/b3a2f425)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.41.1...v0.41.2)
+
+---
+
+## v0.41.1
+
+### 🐞 Bug Fixes
+
+- **cloudflare**: Consistent ports for miniflare dev server - by **John Royal** in https://github.com/sam-goodwin/alchemy/issues/496 [(5564c)](https://github.com/sam-goodwin/alchemy/commit/5564cb91)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.41.0...v0.41.1)
+
+---
+
+## v0.41.0
+
+### 🚀 Features
+
+- **cloudflare**: Containers - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/476 [(43e88)](https://github.com/sam-goodwin/alchemy/commit/43e88e95)
+- **github**: Add repo webhook resource - by **Justin Bennett** in https://github.com/sam-goodwin/alchemy/issues/477 [(2c997)](https://github.com/sam-goodwin/alchemy/commit/2c997d6f)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.40.1...v0.41.0)
+
+---
+
+## v0.40.1
+
+*No significant changes*
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.40.0...v0.40.1)
+
+---
+
+## v0.40.0
+
+### 🚀 Features
+
+- **cloudflare**: Miniflare dev server - by **John Royal** in https://github.com/sam-goodwin/alchemy/issues/396 [(3d219)](https://github.com/sam-goodwin/alchemy/commit/3d21941c)
+
+### 🐞 Bug Fixes
+
+- **cloudflare**: DOStateStore undefined fix - by **John Royal** in https://github.com/sam-goodwin/alchemy/issues/480 [(7d909)](https://github.com/sam-goodwin/alchemy/commit/7d9095e0)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.39.1...v0.40.0)
+
+---
+
+## v0.39.1
+
+### 🐞 Bug Fixes
+
+- **core**: Stage scope not being adopted - by **Michael K** in https://github.com/sam-goodwin/alchemy/issues/469 [(b949a)](https://github.com/sam-goodwin/alchemy/commit/b949aade)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.39.0...v0.39.1)
+
+---
+
+## v0.39.0
+
+### 🚀 Features
+
+- **cloudflare**: Support Worker.domains for custom domains - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/468 [(7a357)](https://github.com/sam-goodwin/alchemy/commit/7a357763)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.38.1...v0.39.0)
+
+---
+
+## v0.38.1
+
+### 🐞 Bug Fixes
+
+- **cloudflare**: Do state store fails to upload - by **John Royal** in https://github.com/sam-goodwin/alchemy/issues/465 [(ca966)](https://github.com/sam-goodwin/alchemy/commit/ca966235)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.38.0...v0.38.1)
+
+---
+
+## v0.38.0
+
+### 🚀 Features
+
+- **cli**:
+ - Complete cli overhaul with trpc-cli, zod, and clack/prompts - by **Aman Varshney** in https://github.com/sam-goodwin/alchemy/issues/405 [(dea9e)](https://github.com/sam-goodwin/alchemy/commit/dea9ed1e)
+- **cloudflare**:
+ - Pin default worker compatibility date to build time - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/460 [(10035)](https://github.com/sam-goodwin/alchemy/commit/100355b0)
+ - Add URL support to WorkerStub - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/464 [(4fda9)](https://github.com/sam-goodwin/alchemy/commit/4fda99da)
+- **core**:
+ - Replace Resource - by **Michael K** in https://github.com/sam-goodwin/alchemy/issues/417 [(27133)](https://github.com/sam-goodwin/alchemy/commit/271331e1)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.37.2...v0.38.0)
+
+---
+
+## v0.37.2
+
+### 🐞 Bug Fixes
+
+- **cloudflare**: Defensively resolve __dirname and worker.ts > worker.js in DOStateStore - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/452 [(c63fd)](https://github.com/sam-goodwin/alchemy/commit/c63fdd60)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.37.1...v0.37.2)
+
+---
+
+## v0.37.1
+
+### 🚀 Features
+
+- **cloudflare**: Relax Durable Object RPC type constraint - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/445 [(107e7)](https://github.com/sam-goodwin/alchemy/commit/107e79de)
+
+### 🐞 Bug Fixes
+
+- **cloudflare**: DOStateStore init uploads a worker and not a version - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/447 [(30cc6)](https://github.com/sam-goodwin/alchemy/commit/30cc6424)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.37.0...v0.37.1)
+
+---
+
+## v0.37.0
+
+### 🚀 Features
+
+- **cloudflare**: Add `run_worker_first: string[]` option - by **Rahul Mishra** in https://github.com/sam-goodwin/alchemy/issues/440 [(d4b0d)](https://github.com/sam-goodwin/alchemy/commit/d4b0de34)
+- **stripe**: Price meter support - by **Nick Balestra-Foster** in https://github.com/sam-goodwin/alchemy/issues/410 [(9315d)](https://github.com/sam-goodwin/alchemy/commit/9315d742)
+
+### 🐞 Bug Fixes
+
+- **cloudflare**:
+ - Adopt DO that have migration tags - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/437 [(bcbd7)](https://github.com/sam-goodwin/alchemy/commit/bcbd7fdb)
+ - Website resource respects cwd prop for wrangler.jsonc placement - by **John Royal** in https://github.com/sam-goodwin/alchemy/issues/443 [(bef17)](https://github.com/sam-goodwin/alchemy/commit/bef17985)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.36.0...v0.37.0)
+
+---
+
+## v0.36.0
+
+### 🚀 Features
+
+- **docker**: Docker provider - by **Pavitra Golchha** in https://github.com/sam-goodwin/alchemy/issues/189 [(6f973)](https://github.com/sam-goodwin/alchemy/commit/6f973983)
+
+### 🐞 Bug Fixes
+
+- **cli**: Improve package manager handling in create-alchemy - by **Nico Baier** in https://github.com/sam-goodwin/alchemy/issues/423 [(d0c7c)](https://github.com/sam-goodwin/alchemy/commit/d0c7ce83)
+- **core**: Allow colors in CI environments, only disable for NO_COLOR - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/429 [(a194a)](https://github.com/sam-goodwin/alchemy/commit/a194ab5a)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.35.1...v0.36.0)
+
+---
+
+## v0.35.1
+
+### 🐞 Bug Fixes
+
+- **fs**: Better support for windows file system - by **Michael K** in https://github.com/sam-goodwin/alchemy/issues/430 [(8dd9f)](https://github.com/sam-goodwin/alchemy/commit/8dd9f196)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.35.0...v0.35.1)
+
+---
+
+## v0.35.0
+
+### 🐞 Bug Fixes
+
+- **cloudflare**:
+ - Set force=true when deleting a Worker - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/432 [(2b21d)](https://github.com/sam-goodwin/alchemy/commit/2b21d41e)
+ - Call wfp endpoint when deleting workers - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/434 [(d109d)](https://github.com/sam-goodwin/alchemy/commit/d109d984)
+- **core**:
+ - Ensure alchemy providers are globally registered - by **Sam Goodwin** in https://github.com/sam-goodwin/alchemy/issues/433 [(b0528)](https://github.com/sam-goodwin/alchemy/commit/b05284f6)
+
+##### [View changes on GitHub](https://github.com/sam-goodwin/alchemy/compare/v0.34.3...v0.35.0)
+
+---
+
## v0.34.3
### 🚀 Features
diff --git a/alchemy-web/docs/concepts/adoption.md b/alchemy-web/docs/concepts/adoption.md
index 3aebb3146..c3d611d14 100644
--- a/alchemy-web/docs/concepts/adoption.md
+++ b/alchemy-web/docs/concepts/adoption.md
@@ -1,5 +1,5 @@
---
-order: 10
+order: 11
title: Adoption
description: Learn how to adopt existing infrastructure with Alchemy resources instead of failing when resources already exist.
---
diff --git a/alchemy-web/docs/concepts/destroy.md b/alchemy-web/docs/concepts/destroy.md
index e91594d5c..2c423637a 100644
--- a/alchemy-web/docs/concepts/destroy.md
+++ b/alchemy-web/docs/concepts/destroy.md
@@ -68,3 +68,8 @@ if (this.phase === "delete") {
return this.destroy();
}
```
+
+## Related Concepts
+
+- **[Replace](./replace.md)** - How to replace resources that can not be updated
+
diff --git a/alchemy-web/docs/concepts/replace.md b/alchemy-web/docs/concepts/replace.md
new file mode 100644
index 000000000..4d0916ca1
--- /dev/null
+++ b/alchemy-web/docs/concepts/replace.md
@@ -0,0 +1,57 @@
+---
+order: 10
+title: Replace
+description: Learn how to safely replace infrastructure resources with Alchemy. Understand the risks and best practices for resource replacement.
+---
+
+# Replace
+
+It is some times impossible to UPDATE a resource, e.g. you cannot rename a R2 Bucket name.
+In these cases, you need to perform a REPLACE operation to:
+
+1. create a new version of the Resource and update references
+2. delete the old version of the Resource (or leave it orphaned)
+
+## Trigger Replacement
+
+During the **update phase**, you can trigger a replacement by calling `this.replace()`:
+
+```typescript
+// Implementation pattern
+if (this.phase === "update") {
+ if (this.output.name !== props.name) {
+ // trigger replace and terminate this `"update"` phase
+ this.replace();
+ // (unreachable code)
+ } else {
+ return updateResource();
+ }
+}
+```
+
+## Create new
+
+After you call `this.replace()`, the `"update"` phase will terminate and be re-invoked with `"create"` (to create the new resource).
+
+```ts
+if (this.phase === "create") {
+ return createNewResource();
+}
+```
+
+## Delete old
+
+After all downstream dependencies have been updated and you finally call `app.finalize()`, Alchemy will then invoke the `"delete"` phase on the old resource.
+
+```ts
+const app = await alchemy("app");
+
+// ... create resources
+
+await app.finalize(); // finalize scopes by deleting "orphaned" and "replaced" resources
+```
+
+## Related Concepts
+
+- **[Destroy](./destroy.md)** - How to destroy resources
+- **[Scope](./scope.md)** - Scope lifecycle
diff --git a/alchemy-web/docs/guides/cloudflare-worker.md b/alchemy-web/docs/guides/cloudflare-worker.md
index f328eedfd..c5716eeb3 100644
--- a/alchemy-web/docs/guides/cloudflare-worker.md
+++ b/alchemy-web/docs/guides/cloudflare-worker.md
@@ -175,6 +175,32 @@ const frontend = await Worker("frontend", {
});
```
+## Self-Binding
+
+A worker can bind to itself using `Self` or `WorkerRef`:
+
+```ts
+import { Worker, Self, WorkerRef } from "alchemy/cloudflare";
+
+// Using Self
+const workerWithSelf = await Worker("my-worker", {
+ name: "my-worker",
+ entrypoint: "./src/worker.ts",
+ bindings: {
+ SELF: Self,
+ },
+});
+
+// Using WorkerRef with the worker's own ID
+const workerWithRef = await Worker("my-worker", {
+ name: "my-worker",
+ entrypoint: "./src/worker.ts",
+ bindings: {
+ SELF: WorkerRef("my-worker"),
+ },
+});
+```
+
## Circular Worker Bindings
When workers need to bind to each other (circular dependency), use `WorkerStub` to break the cycle:
@@ -331,6 +357,26 @@ const worker = await Worker("api", {
> [!TIP]
> See the [Route](../providers/cloudflare/route.md) for more information.
+## Custom Domains
+
+Bind custom domains directly to your worker for a simpler routing setup:
+
+```ts
+import { Worker } from "alchemy/cloudflare";
+
+const worker = await Worker("api", {
+ name: "api-worker",
+ entrypoint: "./src/api.ts",
+ domains: ["api.example.com", "admin.example.com"],
+});
+
+// Access the created domains
+console.log(worker.domains); // Array of created CustomDomain resources
+```
+
+> [!TIP]
+> See the [Routes and Domains](https://developers.cloudflare.com/workers/configuration/routing/#what-is-best-for-me) Cloudflare docs to help decide between when to use a Route vs a Domain.
+
## Workers for Platforms
Deploy workers to dispatch namespaces for multi-tenant architectures using Cloudflare's Workers for Platforms:
diff --git a/alchemy-web/docs/guides/dev.md b/alchemy-web/docs/guides/dev.md
new file mode 100644
index 000000000..d7ac3aab9
--- /dev/null
+++ b/alchemy-web/docs/guides/dev.md
@@ -0,0 +1,174 @@
+---
+order: 7
+title: Development Mode
+description: Learn how to use Alchemy's development mode to run your application locally.
+---
+
+# Development Mode (Beta)
+
+Alchemy's development mode provides a powerful local development experience for Cloudflare Workers, featuring hot reloading, local resource emulation, and seamless integration with remote Cloudflare services.
+
+> **Note:** Development mode is currently in beta. Some features may not work as expected.
+
+## Overview
+
+To run Alchemy in development mode, use the `--dev` flag when running your `alchemy.run.ts` script:
+
+```bash
+bun run alchemy.run.ts --dev
+npx tsx alchemy.run.ts --dev
+```
+
+This starts Alchemy in development mode, which will:
+
+- Emulate Cloudflare Workers and associated resources locally using Miniflare
+- Hot reload Workers when you make changes to your code
+
+### Watching Your Alchemy Configuration
+
+Alchemy does not watch your `alchemy.run.ts` file for changes. To automatically apply changes to your configuration, you can the watch mode associated with your runtime environment. For example:
+
+```bash
+# Using bun's watch mode
+bun run --watch alchemy.run.ts
+
+# Using Node.js watch mode
+npx tsx --watch alchemy.run.ts
+```
+
+Development mode is enabled automatically when the `--watch` flag is detected.
+
+### Programmatic Configuration
+
+You can also enable dev mode programmatically by setting the `dev` option:
+
+```typescript
+const app = await alchemy("my-app", {
+ dev: true
+});
+```
+
+## Configuration
+
+When running in dev mode, Alchemy runs your Cloudflare Workers locally using Miniflare, and will be available on a randomly selected port. You can specify the port by setting the `port` property on the `Worker` resource:
+
+```typescript
+const worker = await Worker("my-worker", {
+ entrypoint: "worker.ts",
+ dev: {
+ port: 3000
+ }
+});
+
+console.log(worker.url); // http://localhost:3000
+```
+
+## Website Development
+
+When using the `Website` resource in development mode, you can specify a custom development command that Alchemy will run locally:
+
+```typescript
+const website = await Website("my-website", {
+ dev: {
+ command: "npm run dev",
+ url: "http://localhost:5173",
+ }
+});
+```
+
+If no command is specified, Alchemy will automatically detect and run the appropriate dev command based on your project's package manager:
+
+- **bun**: `bun vite dev`
+- **npm**: `npx vite dev`
+- **pnpm**: `pnpm vite dev`
+- **yarn**: `yarn vite dev`
+
+### Vite Integration
+
+For projects using Vite, Alchemy integrates with the [Cloudflare Vite plugin](https://developers.cloudflare.com/workers/development-testing/vite/) to provide enhanced local development capabilities. This integration enables better support for certain binding types when running locally.
+
+To enable Vite integration, configure your `vite.config.ts` with the Cloudflare plugin:
+
+```typescript
+import { cloudflare } from "@cloudflare/vite-plugin";
+import { defineConfig } from "vite";
+
+export default defineConfig({
+ plugins: [
+ cloudflare({
+ persistState: process.env.ALCHEMY_CLOUDFLARE_PERSIST_PATH
+ ? {
+ path: process.env.ALCHEMY_CLOUDFLARE_PERSIST_PATH,
+ }
+ : undefined,
+ }),
+ ],
+});
+```
+
+The Vite integration provides improved support for the following binding types (marked with ✅ in the "Vite" column of the supported resources table below).
+
+## Bindings
+
+By default, Alchemy emulates resources such as [D1 Databases](../providers/cloudflare/d1-database.md), [KV Namespaces](../providers/cloudflare/kv-namespace.md), and [R2 Buckets](../providers/cloudflare/bucket.md) locally.
+
+Alchemy also supports [remote bindings](https://developers.cloudflare.com/workers/development-testing/#remote-bindings) for select resources. For resources that allow either local or remote execution, you can set the `dev` property on the resource to `{ remote: true }`:
+
+```typescript
+const db = await D1Database("my-db", {
+ dev: { remote: true }
+});
+
+const kv = await KVNamespace("my-kv", {
+ dev: { remote: true }
+});
+
+const r2 = await R2Bucket("my-r2", {
+ dev: { remote: true }
+});
+```
+
+Some resources only support remote execution, such as [AI Gateways](../providers/cloudflare/ai-gateway.md). These resources will automatically be run remotely, so usage will be billed the same as if you were running them in production.
+
+### Supported Resources
+
+The following bindings are supported in dev mode:
+
+| Resource | Local | Remote | Vite |
+|----------|-------|--------|------|
+| AI | ❌ | ✅ | ❌ |
+| Analytics Engine | ✅ | ❌ | ❌ |
+| Assets | ✅ | ❌ | ❌ |
+| Browser Rendering | ❌ | ✅ | ❌ |
+| D1 Database | ✅ | ✅ | ✅ |
+| Dispatch Namespace | ❌ | ✅ | ❌ |
+| Durable Object Namespace | ✅ | ❌ | ❌ |
+| Hyperdrive | ✅ | ❌ | ❌ |
+| Images | ✅ | ✅ | ❌ |
+| JSON | ✅ | ❌ | ❌ |
+| KV Namespace | ✅ | ✅ | ✅ |
+| Pipeline | ✅ | ❌ | ❌ |
+| Queue | ✅ | ✅ | ❌ |
+| R2 Bucket | ✅ | ✅ | ✅ |
+| Secret | ✅ | ❌ | ❌ |
+| Secret Key | ❌ | ❌ | ❌ |
+| Service | ✅ | ✅ | ❌ |
+| Vectorize Index | ❌ | ✅ | ❌ |
+| Version Metadata | ✅ | ❌ | ❌ |
+| Workflow | ✅ | ❌ | ❌ |
+| Text | ✅ | ❌ | ❌ |
+
+## Limitations
+
+- Hot reloading for Workers is only supported when the `entrypoint` property is set. To hot reload an inline script, you must use an external watcher to monitor your `alchemy.run.ts` file.
+- Local Workers can push to remote queues, but cannot consume from them.
+- Hyperdrive support is experimental. Hyperdrive configurations that use Cloudflare Access are not supported, and only configurations provisioned in the same `alchemy.run.ts` file will work. This is a [limitation from Cloudflare that is actively being worked on](https://developers.cloudflare.com/workers/development-testing/#unsupported-remote-bindings).
+
+## Best Practices
+
+1. **Use local resources for development** - Faster iteration and no API costs
+2. **Test with remote resources** - Validate integration before deployment
+3. **Leverage hot reloading** - Use entrypoint files for automatic rebuilds
+4. **Monitor build output** - Watch for compilation errors and warnings
+5. **Configure Worker ports explicitly** - Avoid conflicts in multi-worker setups
+6. **Use external watchers** - For automatic restarts when configuration changes
diff --git a/alchemy-web/docs/providers/cloudflare/certificate-pack.md b/alchemy-web/docs/providers/cloudflare/certificate-pack.md
new file mode 100644
index 000000000..7de91adbd
--- /dev/null
+++ b/alchemy-web/docs/providers/cloudflare/certificate-pack.md
@@ -0,0 +1,205 @@
+---
+title: Cloudflare Certificate Pack
+description: Learn how to create and manage Cloudflare Advanced Certificate Packs for flexible SSL/TLS certificates with multiple Certificate Authorities and custom configurations.
+---
+
+# Certificate Pack
+
+The Certificate Pack resource lets you create and manage [Cloudflare Advanced Certificate Packs](https://developers.cloudflare.com/api/resources/ssl/subresources/certificate_packs/) for flexible SSL/TLS certificates with multiple Certificate Authority options.
+
+**Important Requirements:**
+- **Advanced Certificate Manager (ACM) must be activated:** Before using Certificate Packs, you must activate ACM in your Cloudflare dashboard. Navigate to your domain > **SSL/TLS** > **Edge Certificates** and click **Activate** for Advanced Certificate Manager. This requires a $10/month subscription per domain.
+- Requires a paid Cloudflare plan (not available on Free plans)
+- Certificate provisioning can take up to 10 minutes
+- Most properties are immutable after creation (only `cloudflareBranding` can be updated)
+
+## Basic Example
+
+Create a basic certificate pack with Let's Encrypt for your domain.
+
+```ts
+import { Zone, CertificatePack } from "alchemy/cloudflare";
+
+const zone = await Zone("my-zone", {
+ name: "example.com",
+});
+
+const basicCert = await CertificatePack("my-cert", {
+ zone: zone,
+ certificateAuthority: "lets_encrypt",
+ hosts: ["example.com", "www.example.com"],
+ validationMethod: "txt",
+ validityDays: 90,
+});
+```
+
+## Enterprise Certificate with Google Trust Services
+
+Create an enterprise-grade certificate with Google Trust Services and Cloudflare branding.
+
+```ts
+const enterpriseCert = await CertificatePack("enterprise-cert", {
+ zone: "example.com", // Can use zone ID string or Zone resource
+ certificateAuthority: "google",
+ hosts: ["example.com", "*.example.com", "api.example.com"],
+ validationMethod: "txt",
+ validityDays: 365,
+ cloudflareBranding: true,
+});
+```
+
+## Wildcard Certificate with SSL.com
+
+Create a wildcard certificate using SSL.com with email validation.
+
+```ts
+const wildcardCert = await CertificatePack("wildcard-cert", {
+ zone: myZone,
+ certificateAuthority: "ssl_com",
+ hosts: ["example.com", "*.example.com"],
+ validationMethod: "email",
+ validityDays: 365,
+});
+```
+
+## Multi-Domain Certificate
+
+Create a certificate covering multiple subdomains with Let's Encrypt.
+
+```ts
+const multiDomainCert = await CertificatePack("multi-cert", {
+ zone: "example.com",
+ certificateAuthority: "lets_encrypt",
+ hosts: [
+ "example.com",
+ "www.example.com",
+ "api.example.com",
+ "admin.example.com",
+ "blog.example.com"
+ ],
+ validationMethod: "http",
+ validityDays: 90,
+});
+```
+
+## Properties
+
+| Property | Type | Required | Description |
+|----------|------|----------|-------------|
+| `zone` | `string \| Zone` | Yes | Zone resource or zone ID where the certificate will be created |
+| `certificateAuthority` | `"google" \| "lets_encrypt" \| "ssl_com"` | Yes | Certificate Authority to use for issuing the certificate |
+| `hosts` | `string[]` | Yes | List of hostnames (max 50, must include zone apex) |
+| `validationMethod` | `"txt" \| "http" \| "email"` | Yes | Domain ownership validation method |
+| `validityDays` | `14 \| 30 \| 90 \| 365` | Yes | Certificate validity period in days |
+| `cloudflareBranding` | `boolean` | No | Add Cloudflare branding subdomain as Common Name (default: false) |
+| `type` | `"advanced"` | No | Certificate type (only "advanced" supported, default: "advanced") |
+| `delete` | `boolean` | No | Whether to delete the certificate pack on destroy (default: true) |
+
+## Certificate Authorities
+
+### Let's Encrypt (`lets_encrypt`)
+- **Cost:** Free
+- **Best for:** Basic SSL needs, development environments
+- **Validity:** Shorter periods (14, 30, 90 days)
+- **Features:** Standard domain validation
+
+### Google Trust Services (`google`)
+- **Cost:** Paid
+- **Best for:** Enterprise applications, production environments
+- **Validity:** Up to 365 days
+- **Features:** Enhanced validation, enterprise support
+
+### SSL.com (`ssl_com`)
+- **Cost:** Commercial
+- **Best for:** Commercial applications requiring extended validation
+- **Validity:** Up to 365 days
+- **Features:** Extended validation options, commercial support
+
+## Validation Methods
+
+### TXT Record (`txt`)
+- Add DNS TXT record to prove domain ownership
+- Most reliable method for automation
+- Works with all domain configurations
+
+### HTTP File (`http`)
+- Upload verification file to domain's web server
+- Requires web server access
+- Good for domains with existing websites
+
+### Email (`email`)
+- Receive validation email at admin addresses
+- Requires access to domain admin email
+- Manual validation process
+
+## Important Notes
+
+### Immutable Properties
+Most certificate pack properties cannot be changed after creation. To modify these properties, you must delete and recreate the certificate pack:
+
+- Certificate Authority (`certificateAuthority`)
+- Hostnames (`hosts`)
+- Validation Method (`validationMethod`)
+- Validity Period (`validityDays`)
+- Type (`type`)
+
+### Updateable Properties
+Only `cloudflareBranding` can be updated after creation:
+
+```ts
+// Update to enable Cloudflare branding
+const updatedCert = await CertificatePack("my-cert", {
+ zone: zone,
+ certificateAuthority: "lets_encrypt", // Must match original
+ hosts: ["example.com", "www.example.com"], // Must match original
+ validationMethod: "txt", // Must match original
+ validityDays: 90, // Must match original
+ cloudflareBranding: true, // Only this can change
+});
+```
+
+### Host Requirements
+- Maximum 50 hostnames per certificate pack
+- Must include the zone apex (root domain)
+- Supports wildcards (e.g., `*.example.com`)
+- Cannot be empty
+
+### Provisioning Time
+- Certificate packs take time to provision and become active
+- Full deployment can take up to 10 minutes
+- Monitor the `status` property to track progress
+
+### Subscription Requirements
+Advanced Certificate Packs require a paid Cloudflare plan. Free plans cannot create certificate packs and will receive subscription-related errors.
+
+## Status Values
+
+Certificate packs progress through various status values during their lifecycle:
+
+- `initializing` - Certificate pack creation in progress
+- `pending_validation` - Waiting for domain validation
+- `pending_issuance` - Certificate being issued by CA
+- `pending_deployment` - Certificate being deployed to edge
+- `active` - Certificate is live and serving traffic
+- `expired` - Certificate has expired
+- `deleted` - Certificate pack has been deleted
+
+Error states include `*_timed_out` variants when operations exceed time limits.
+
+## Helper Functions
+
+### Wait for Certificate to Become Active
+
+```ts
+import { waitForCertificatePackActive } from "alchemy/cloudflare/certificate-pack";
+
+// Wait for certificate to become active (up to 10 minutes)
+const finalStatus = await waitForCertificatePackActive(
+ api,
+ zone.id,
+ certificatePack.id,
+ 10 * 60 * 1000 // 10 minutes timeout
+);
+
+console.log(`Certificate pack is now: ${finalStatus}`);
+```
\ No newline at end of file
diff --git a/alchemy-web/docs/providers/cloudflare/container.md b/alchemy-web/docs/providers/cloudflare/container.md
new file mode 100644
index 000000000..b0abb405a
--- /dev/null
+++ b/alchemy-web/docs/providers/cloudflare/container.md
@@ -0,0 +1,142 @@
+---
+title: Container
+description: Deploy Docker containers on Cloudflare's global network
+---
+
+# Container
+
+A Container is a running Docker image running in Cloudflare's global network, managed by a Cloudflare Durable Object.
+
+> [!CAUTION]
+> Cloudflare Containers is still in [Beta](https://blog.cloudflare.com/containers-are-available-in-public-beta-for-simple-global-and-programmable/).
+
+You'll need:
+
+1. a `Dockerfile` for your Container
+2. an `alchemy.run.ts` to deploy to Cloudflare
+3. a `MyContainer` class to own a running Container Instance
+4. a `worker.ts` that exports `fetch` and routes requests to Container Instances
+
+## Container Class
+
+A Container's lifecycle is managed by a Durable Object class that you define.
+
+We recommend using the `Container` class from `@cloudflare/containers` since it takes care of the basic container lifecycle for you:
+
+```ts
+import { Container } from "@cloudflare/containers";
+import type { worker } from "../alchemy.run.ts";
+
+export class MyContainer extends Container {
+ declare env: typeof worker.Env;
+
+ defaultPort = 8080; // The default port for the container to listen on
+ sleepAfter = "3m"; // Sleep the container if no requests are made in this timeframe
+
+ envVars = {
+ MESSAGE: "I was passed in via the container class!",
+ };
+
+ override onStart() {
+ console.log("Container successfully started");
+ }
+
+ override onStop() {
+ console.log("Container successfully shut down");
+ }
+
+ override onError(error: unknown) {
+ console.log("Container error:", error);
+ }
+}
+```
+
+## Container Resource
+
+Now, create a `Container` Resource in your `alchemy.run.ts` file and connect it to your `MyContainer` class:
+
+```ts
+import { Container, Worker } from "alchemy/cloudflare";
+import { Image } from "alchemy/docker";
+// import the type of your Container's implementation
+import type { MyContainer } from "./src/container.ts";
+
+const container = await Container("my-container", {
+ className: "MyContainer", // <- and ^
+});
+```
+
+This will build your Dockerfile and prepare it for publishing to Cloudflare's Image Registry.
+
+> [!TIP]
+> The default behavior is effectively `docker build . -t my-container` but you can customize the configuration:
+>
+> ```ts
+> const container = await Container("my-container", {
+> className: "MyContainer",
+> name: "your-container",
+> tag: "some-tag",
+> build: {
+> context: import.meta.dir,
+> dockerfile: "Dockerfile.dev",
+> },
+> });
+> ```
+
+## Bind to Worker
+
+To deploy the `Container` to Cloudflare, you need to bind it to a `Worker`:
+
+```ts
+export const worker = await Worker("my-worker", {
+ name: "my-worker",
+ entrypoint: "./src/worker.ts",
+ bindings: {
+ MY_CONTAINER: container,
+ },
+});
+```
+
+> [!NOTE]
+> Binding a Container to a Worker will also bind a Durable Object Namespace to the Worker.
+
+## Route Requests
+
+To route requests, have your Worker's `fetch` handler resolve a Durable Object instance and proxy the `request` to it:
+
+```ts
+import { getContainer } from "@cloudflare/containers";
+import type { worker } from "../alchemy.run.ts";
+
+// the class must be exported for Cloudflare
+export { MyContainer } from "./container.ts";
+
+export default {
+ async fetch(request: Request, env: typeof worker.Env): Promise {
+ const container = getContainer(env.CONTAINER, "container");
+ return container.fetch(request);
+ },
+};
+```
+
+> [!TIP]
+> Notice how the type of our Worker environment is inferred with `typeof worker.Env`, see the [Type-safe Bindings](../../concepts/bindings.md#type-safe-bindings) documentation for more information.
+
+## Complex Routing
+
+Cloudflare's unique design allows you to implement your own routing strategies in pure JavaScript.
+
+### Round-Robin
+
+For example, you can round-robin requests across a fixed pool by simply generating a random instance ID between 0 and the number of instances:
+
+```ts
+export async function loadBalance(
+ binding: DurableObjectNamespace,
+ instances = 3
+): Promise> {
+ const containerId = binding.idFromName(`instance-${rand(0, instances)}`);
+ const container = binding.get(containerId);
+ return container.fetch(request);
+}
+```
diff --git a/alchemy-web/docs/providers/cloudflare/custom-domain.md b/alchemy-web/docs/providers/cloudflare/custom-domain.md
index 92b037773..a003713b7 100644
--- a/alchemy-web/docs/providers/cloudflare/custom-domain.md
+++ b/alchemy-web/docs/providers/cloudflare/custom-domain.md
@@ -7,9 +7,43 @@ description: Learn how to configure and manage Custom Domains for your Cloudflar
The CustomDomain resource lets you attach a [custom domain](https://developers.cloudflare.com/workers/configuration/routing/custom-domains/) to a Cloudflare Worker.
-## Minimal Example
+## Worker Domains
-Bind a domain to a worker:
+The simplest way to bind custom domains is directly on the Worker:
+
+```ts
+import { Worker } from "alchemy/cloudflare";
+
+const worker = await Worker("api", {
+ name: "api-worker",
+ entrypoint: "./src/api.ts",
+ domains: ["api.example.com", "admin.example.com"],
+});
+
+// Access the created domains
+console.log(worker.domains); // Array of created CustomDomain resources
+```
+
+With additional options:
+
+```ts
+const worker = await Worker("api", {
+ name: "api-worker",
+ entrypoint: "./src/api.ts",
+ domains: [
+ {
+ domainName: "api.example.com",
+ zoneId: "YOUR_ZONE_ID", // Optional - will be inferred if not provided
+ adopt: true, // Adopt existing domain if it exists
+ },
+ "admin.example.com", // Zone ID will be inferred
+ ],
+});
+```
+
+## CustomDomain Resource
+
+You can also create custom domains independently:
```ts
import { Worker, CustomDomain } from "alchemy/cloudflare";
@@ -26,7 +60,7 @@ const domain = await CustomDomain("api-domain", {
});
```
-## With Environment
+### With Environment
Bind a domain to a specific worker environment:
@@ -40,3 +74,6 @@ const domain = await CustomDomain("staging-domain", {
environment: "staging",
});
```
+
+> [!TIP]
+> See the [Routes and Domains](https://developers.cloudflare.com/workers/configuration/routing/#what-is-best-for-me) Cloudflare docs to help decide between when to use a Route vs a Domain.
diff --git a/alchemy-web/docs/providers/cloudflare/worker.md b/alchemy-web/docs/providers/cloudflare/worker.md
index 8f812fd4f..d36ae4185 100644
--- a/alchemy-web/docs/providers/cloudflare/worker.md
+++ b/alchemy-web/docs/providers/cloudflare/worker.md
@@ -174,6 +174,32 @@ const frontend = await Worker("frontend", {
});
```
+## Self-Binding
+
+A worker can bind to itself using `Self` or `WorkerRef`:
+
+```ts
+import { Worker, Self, WorkerRef } from "alchemy/cloudflare";
+
+// Using Self
+const workerWithSelf = await Worker("my-worker", {
+ name: "my-worker",
+ entrypoint: "./src/worker.ts",
+ bindings: {
+ SELF: Self,
+ },
+});
+
+// Using WorkerRef with the worker's own ID
+const workerWithRef = await Worker("my-worker", {
+ name: "my-worker",
+ entrypoint: "./src/worker.ts",
+ bindings: {
+ SELF: WorkerRef("my-worker"),
+ },
+});
+```
+
## Circular Worker Bindings
When workers need to bind to each other (circular dependency), use `WorkerStub` to break the cycle:
@@ -330,6 +356,26 @@ const worker = await Worker("api", {
> [!TIP]
> See the [Route](./route.md) for more information.
+## Custom Domains
+
+Bind custom domains directly to your worker for a simpler routing setup:
+
+```ts
+import { Worker } from "alchemy/cloudflare";
+
+const worker = await Worker("api", {
+ name: "api-worker",
+ entrypoint: "./src/api.ts",
+ domains: ["api.example.com", "admin.example.com"],
+});
+
+// Access the created domains
+console.log(worker.domains); // Array of created CustomDomain resources
+```
+
+> [!TIP]
+> See the [Routes and Domains](https://developers.cloudflare.com/workers/configuration/routing/#what-is-best-for-me) Cloudflare docs to help decide between when to use a Route vs a Domain.
+
## Workers for Platforms
Deploy workers to dispatch namespaces for multi-tenant architectures using Cloudflare's Workers for Platforms:
diff --git a/alchemy-web/docs/providers/docker/container.md b/alchemy-web/docs/providers/docker/container.md
new file mode 100644
index 000000000..68c63caa5
--- /dev/null
+++ b/alchemy-web/docs/providers/docker/container.md
@@ -0,0 +1,86 @@
+---
+title: Container
+description: Deploy and manage Docker containers with Alchemy
+---
+
+# Container
+
+The `Container` resource allows you to create and manage Docker containers using Alchemy.
+
+## Usage
+
+```typescript
+import * as docker from "alchemy/docker";
+
+const myContainer = await docker.Container("my-container", {
+ image: "nginx:latest",
+ name: "web-server",
+ ports: [{ external: 80, internal: 80 }],
+ start: true
+});
+```
+
+## Properties
+
+| Name | Type | Required | Description |
+|------|------|----------|--------------|
+| `image` | `RemoteImage \| string` | Yes | Docker image to use for the container |
+| `name` | `string` | No | Name for the container |
+| `command` | `string[]` | No | Command to run in the container |
+| `environment` | `Record` | No | Environment variables for the container |
+| `ports` | `{ external: number \| string, internal: number \| string, protocol?: "tcp" \| "udp" }[]` | No | Port mappings from host to container |
+| `volumes` | `{ hostPath: string, containerPath: string, readOnly?: boolean }[]` | No | Volume mappings from host paths to container paths |
+| `networks` | `{ name: string, aliases?: string[] }[]` | No | Networks to connect to |
+| `restart` | `"no" \| "always" \| "on-failure" \| "unless-stopped"` | No | Restart policy |
+| `removeOnExit` | `boolean` | No | Whether to remove the container when it exits |
+| `start` | `boolean` | No | Start the container after creation |
+
+## Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| `id` | `string` | The ID of the container |
+| `state` | `"created" \| "running" \| "paused" \| "stopped" \| "exited"` | The current state of the container |
+| `createdAt` | `number` | Time when the container was created |
+
+## Example
+
+```typescript
+import * as docker from "alchemy/docker";
+
+// Create a Docker network
+const network = await docker.Network("app-network", {
+ name: "microservices-network"
+});
+
+// Pull the Redis image
+const redisImage = await docker.RemoteImage("redis-image", {
+ name: "redis",
+ tag: "alpine"
+});
+
+// Run Redis container
+const redis = await docker.Container("redis", {
+ image: redisImage.imageRef,
+ name: "redis",
+ networks: [{ name: network.name }],
+ start: true
+});
+
+// Run the application container
+const app = await docker.Container("app", {
+ image: "my-node-app:latest",
+ name: "web-app",
+ ports: [{ external: 3000, internal: 3000 }],
+ networks: [{ name: network.name }],
+ environment: {
+ REDIS_HOST: "redis",
+ NODE_ENV: "production"
+ },
+ volumes: [
+ { hostPath: "./logs", containerPath: "/app/logs" }
+ ],
+ restart: "always",
+ start: true
+});
+```
diff --git a/alchemy-web/docs/providers/docker/image.md b/alchemy-web/docs/providers/docker/image.md
new file mode 100644
index 000000000..eaa36f538
--- /dev/null
+++ b/alchemy-web/docs/providers/docker/image.md
@@ -0,0 +1,82 @@
+---
+title: Image
+description: Build and manage Docker images with Alchemy
+---
+
+# Image
+
+The `Image` resource allows you to build and manage Docker images from local Dockerfiles using Alchemy.
+
+## Usage
+
+```typescript
+import * as docker from "alchemy/docker";
+
+const myImage = await docker.Image("app-image", {
+ name: "my-app",
+ tag: "v1.0",
+ build: {
+ context: "./app"
+ }
+});
+```
+
+## Properties
+
+| Name | Type | Required | Description |
+|------|------|----------|-------------|
+| `name` | `string` | Yes | Docker image name |
+| `tag` | `string` | No | Tag for the image (defaults to "latest") |
+| `build` | `{ context: string, dockerfile?: string, target?: string, buildArgs?: Record }` | Yes | Build configuration |
+| `build.context` | `string` | Yes | Path to the build context (directory containing Dockerfile) |
+| `build.dockerfile` | `string` | No | Path to the Dockerfile (relative to context, defaults to "Dockerfile") |
+| `build.target` | `string` | No | Target stage to build in multi-stage Dockerfiles |
+| `build.buildArgs` | `Record` | No | Build arguments to pass to Docker build |
+| `skipPush` | `boolean` | No | Skip pushing the image to a registry (default: true) |
+
+## Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| `imageRef` | `string` | Full image reference (name:tag) |
+| `imageId` | `string` | Docker image ID |
+| `createdAt` | `number` | Time when the image was built |
+
+## Example
+
+```typescript
+import * as docker from "alchemy/docker";
+
+// Build a Docker image from a Dockerfile
+const appImage = await docker.Image("app-image", {
+ name: "my-node-app",
+ tag: "1.0",
+ build: {
+ context: "./app",
+ dockerfile: "Dockerfile.prod",
+ buildArgs: {
+ NODE_ENV: "production",
+ API_VERSION: "v2"
+ }
+ }
+});
+
+// Use the built image in a container
+const appContainer = await docker.Container("app", {
+ image: appImage,
+ ports: [{ external: 3000, internal: 3000 }],
+ restart: "always",
+ start: true
+});
+
+// For multi-stage builds, you can target a specific stage
+const builderImage = await docker.Image("builder", {
+ name: "app-builder",
+ tag: "latest",
+ build: {
+ context: "./app",
+ target: "builder" // Target the 'builder' stage in a multi-stage Dockerfile
+ },
+ skipPush: true
+});
+```
diff --git a/alchemy-web/docs/providers/docker/index.md b/alchemy-web/docs/providers/docker/index.md
new file mode 100644
index 000000000..c8f193122
--- /dev/null
+++ b/alchemy-web/docs/providers/docker/index.md
@@ -0,0 +1,108 @@
+---
+title: Docker Provider
+description: Deploy and manage Docker resources using Alchemy
+---
+
+# Docker Provider
+
+The Docker provider allows you to create, manage, and orchestrate Docker resources directly from your Alchemy applications. With this provider, you can pull images, run containers, create networks, and more, all using the familiar Alchemy Resource syntax.
+
+## Resources
+
+The Docker provider includes the following resources:
+
+- [RemoteImage](./remote-image.md) - Pull and manage Docker images
+- [Image](./image.md) - Build Docker images from local Dockerfiles
+- [Container](./container.md) - Run and manage Docker containers
+- [Network](./network.md) - Create and manage Docker networks
+- [Volume](./volume.md) - Create and manage persistent Docker volumes
+
+## Example
+
+Here's a complete example of using the Docker provider to create a web application with Redis, custom images, and persistent volumes:
+
+```typescript
+import * as docker from "alchemy/docker";
+
+// Create a Docker network
+const network = await docker.Network("app-network", {
+ name: "my-application-network"
+});
+
+// Create a persistent volume for Redis data
+const redisVolume = await docker.Volume("redis-data", {
+ name: "redis-data",
+ labels: [
+ { name: "app", value: "my-application" },
+ { name: "service", value: "redis" }
+ ]
+});
+
+// Pull Redis image
+const redisImage = await docker.RemoteImage("redis-image", {
+ name: "redis",
+ tag: "alpine"
+});
+
+// Run Redis container with persistent volume
+const redis = await docker.Container("redis", {
+ image: redisImage.imageRef,
+ name: "redis",
+ networks: [{ name: network.name }],
+ volumes: [
+ {
+ hostPath: redisVolume.name,
+ containerPath: "/data"
+ }
+ ],
+ start: true
+});
+
+// Build a custom application image from local Dockerfile
+const appImage = await docker.Image("app-image", {
+ name: "my-web-app",
+ tag: "latest",
+ build: {
+ context: "./app",
+ buildArgs: {
+ NODE_ENV: "production"
+ }
+ }
+});
+
+// Create a volume for application logs
+const logsVolume = await docker.Volume("logs-volume", {
+ name: "app-logs",
+ labels: {
+ "com.example.environment": "production",
+ "com.example.backup": "daily"
+ }
+});
+
+// Run the application container
+const app = await docker.Container("app", {
+ image: appImage, // Using the custom built image
+ name: "web-app",
+ ports: [{ external: 3000, internal: 3000 }],
+ networks: [{ name: network.name }],
+ volumes: [
+ {
+ hostPath: logsVolume.name,
+ containerPath: "/app/logs"
+ }
+ ],
+ environment: {
+ REDIS_HOST: "redis",
+ NODE_ENV: "production"
+ },
+ restart: "always",
+ start: true
+});
+
+// Output the URL
+export const url = `http://localhost:3000`;
+```
+
+## Additional Resources
+
+For more complex examples, see the [Docker Example](https://github.com/sam-goodwin/alchemy/tree/main/examples/docker) in the Alchemy repository.
diff --git a/alchemy-web/docs/providers/docker/network.md b/alchemy-web/docs/providers/docker/network.md
new file mode 100644
index 000000000..7121c0bee
--- /dev/null
+++ b/alchemy-web/docs/providers/docker/network.md
@@ -0,0 +1,120 @@
+---
+title: Network
+description: Create and manage Docker networks with Alchemy
+---
+
+# Network
+
+The `Network` resource allows you to create and manage Docker networks using Alchemy, enabling container-to-container communication.
+
+## Usage
+
+```typescript
+import * as docker from "alchemy/docker";
+
+const network = await docker.Network("app-network", {
+ name: "app-network"
+});
+```
+
+## Properties
+
+| Name | Type | Required | Description |
+|------|------|----------|--------------|
+| `name` | `string` | Yes | Network name |
+| `driver` | `"bridge" \| "host" \| "none" \| "overlay" \| "macvlan" \| string` | No | Network driver to use |
+| `enableIPv6` | `boolean` | No | Enable IPv6 on the network |
+| `labels` | `Record` | No | Network-scoped alias for containers |
+
+## Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| `id` | `string` | Network ID |
+| `createdAt` | `number` | Time when the network was created |
+
+## Example
+
+```typescript
+import * as docker from "alchemy/docker";
+
+// Create a simple bridge network
+const appNetwork = await docker.Network("app-network", {
+ name: "app-network"
+});
+
+// Create a custom network with driver
+const overlayNetwork = await docker.Network("overlay-network", {
+ name: "overlay-network",
+ driver: "overlay",
+ enableIPv6: true,
+ labels: {
+ "com.example.description": "Network for application services"
+ }
+});
+
+// Create containers connected to the network
+const service1 = await docker.Container("service1", {
+ image: "service1:latest",
+ name: "service1",
+ networks: [{ name: appNetwork.name }],
+ start: true
+});
+
+const service2 = await docker.Container("service2", {
+ image: "service2:latest",
+ name: "service2",
+ networks: [{ name: appNetwork.name }],
+ environment: {
+ // Service discovery using container names
+ SERVICE1_URL: `http://service1:8080`
+ },
+ start: true
+});
+```
+
+## Network Communication
+
+When containers are connected to the same Docker network, they can communicate with each other using the container names as hostnames. This built-in DNS resolution simplifies service discovery in multi-container applications.
+
+```typescript
+const service1 = await docker.Container("service1", {
+ image: "service1:latest",
+ name: "service1",
+ networks: [{ name: appNetwork.name }],
+ start: true
+});
+
+const service2 = await docker.Container("service2", {
+ image: "service2:latest",
+ name: "service2",
+ networks: [{ name: appNetwork.name }],
+ environment: {
+ // Service discovery using container names
+ SERVICE1_URL: `http://service1:8080`
+ },
+ start: true
+});
+```
+
+Or, you can set aliases for the container to make it accessible by multiple names:
+
+```typescript
+const service1 = await docker.Container("service1", {
+ image: "service1:latest",
+ name: "service1",
+ networks: [{ name: appNetwork.name, aliases: ["api"] }],
+ start: true
+});
+
+const service2 = await docker.Container("service2", {
+ image: "service2:latest",
+ name: "service2",
+ networks: [{ name: appNetwork.name }],
+ environment: {
+ // Service discovery using container names
+ SERVICE1_URL: `http://api:8080`
+ },
+ start: true
+});
+```
diff --git a/alchemy-web/docs/providers/docker/remote-image.md b/alchemy-web/docs/providers/docker/remote-image.md
new file mode 100644
index 000000000..b924ebb41
--- /dev/null
+++ b/alchemy-web/docs/providers/docker/remote-image.md
@@ -0,0 +1,56 @@
+---
+title: RemoteImage
+description: Pull and manage Docker images with Alchemy
+---
+
+# RemoteImage
+
+The `RemoteImage` resource allows you to pull and manage Docker images using Alchemy.
+
+## Usage
+
+```typescript
+import * as docker from "alchemy/docker";
+
+const myImage = await docker.RemoteImage("nginx", {
+ name: "nginx",
+ tag: "latest",
+});
+```
+
+## Properties
+
+| Name | Type | Required | Description |
+|------|------|----------|--------------|
+| `name` | `string` | Yes | Docker image name (e.g., "nginx") |
+| `tag` | `string` | No | Tag for the image (e.g., "latest" or "1.19-alpine") |
+| `alwaysPull` | `boolean` | No | Always attempt to pull the image, even if it exists locally |
+
+## Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| `imageRef` | `string` | Full image reference (name:tag) |
+| `createdAt` | `number` | Time when the image was created or pulled |
+
+## Example
+
+```typescript
+import * as docker from "alchemy/docker";
+
+// Pull the nginx image
+const nginxImage = await docker.RemoteImage("nginx", {
+ name: "nginx",
+ tag: "latest"
+});
+
+// Pull a specific version of Node.js
+const nodeImage = await docker.RemoteImage("node-app", {
+ name: "node",
+ tag: "16-alpine",
+ alwaysPull: true
+});
+
+// The full image reference can be used when creating containers
+console.log(`Pulled image: ${nginxImage.imageRef}`);
+```
diff --git a/alchemy-web/docs/providers/docker/volume.md b/alchemy-web/docs/providers/docker/volume.md
new file mode 100644
index 000000000..94e35508c
--- /dev/null
+++ b/alchemy-web/docs/providers/docker/volume.md
@@ -0,0 +1,121 @@
+---
+title: Volume
+description: Create and manage Docker volumes with Alchemy
+---
+
+# Volume
+
+The `Volume` resource allows you to create and manage persistent Docker volumes using Alchemy.
+
+## Usage
+
+```typescript
+import * as docker from "alchemy/docker";
+
+const myVolume = await docker.Volume("data-volume", {
+ name: "app-data",
+ driver: "local"
+});
+```
+
+## Properties
+
+| Name | Type | Required | Description |
+|------|------|----------|-------------|
+| `name` | `string` | Yes | Docker volume name |
+| `driver` | `string` | No | Volume driver to use (defaults to "local") |
+| `driverOpts` | `Record` | No | Driver-specific options |
+| `labels` | `VolumeLabel[] \| Record` | No | Custom metadata labels for the volume |
+
+The `VolumeLabel` interface has the following structure:
+```typescript
+interface VolumeLabel {
+ name: string; // Label name
+ value: string; // Label value
+}
+```
+
+## Outputs
+
+| Name | Type | Description |
+|------|------|-------------|
+| `id` | `string` | Volume ID (same as name for Docker volumes) |
+| `mountpoint` | `string` | Volume mountpoint path on the host |
+| `createdAt` | `number` | Time when the volume was created |
+
+## Example
+
+```typescript
+import * as docker from "alchemy/docker";
+
+// Create a simple Docker volume for persistent data
+const dataVolume = await docker.Volume("data-volume", {
+ name: "postgres-data"
+});
+
+// Create a Docker volume with custom driver options
+const dbVolume = await docker.Volume("db-data", {
+ name: "mysql-data",
+ driver: "local",
+ driverOpts: {
+ "type": "nfs",
+ "o": "addr=10.0.0.1,rw",
+ "device": ":/path/to/dir"
+ }
+});
+
+// Create a volume with labels (array format)
+const logsVolume = await docker.Volume("logs-volume", {
+ name: "app-logs",
+ labels: [
+ { name: "com.example.environment", value: "production" },
+ { name: "com.example.created-by", value: "alchemy" }
+ ]
+});
+
+// Create a volume with labels (record format)
+const configVolume = await docker.Volume("config-volume", {
+ name: "app-config",
+ labels: {
+ "com.example.environment": "staging",
+ "com.example.created-by": "alchemy"
+ }
+});
+
+// Use volumes with a container
+const dbContainer = await docker.Container("database", {
+ image: "postgres:14",
+ name: "postgres",
+ volumes: [
+ {
+ hostPath: dataVolume.name, // Reference the volume by name
+ containerPath: "/var/lib/postgresql/data"
+ },
+ {
+ hostPath: logsVolume.name,
+ containerPath: "/var/log/postgresql",
+ readOnly: false
+ }
+ ],
+ environment: {
+ POSTGRES_PASSWORD: "secret"
+ },
+ restart: "always",
+ start: true
+});
+```
+
+## Using Docker Volumes for Persistence
+
+Docker volumes are the preferred mechanism for persisting data generated by and used by Docker containers. Their benefits include:
+
+1. **Data Persistence**: Data stored in volumes persists even when containers are stopped or removed
+2. **Performance**: Better performance than bind mounts, especially on Windows and macOS
+3. **Portability**: Volumes can be easily backed up, restored, and migrated
+4. **Driver Support**: Support for various storage backends through volume drivers
+
+When using Docker volumes with Alchemy, it's a common pattern to:
+1. Create volumes with meaningful names
+2. Assign metadata using labels
+3. Reference volumes in containers by name
+4. Configure volume permissions with the `readOnly` flag when mounting
diff --git a/alchemy-web/docs/providers/github/index.md b/alchemy-web/docs/providers/github/index.md
index b4fdf3169..faf7e022c 100644
--- a/alchemy-web/docs/providers/github/index.md
+++ b/alchemy-web/docs/providers/github/index.md
@@ -8,12 +8,13 @@ GitHub is a web-based version control and collaboration platform that provides G
- [Comment](./comment.md) - Create and manage comments on issues and pull requests
- [RepositoryEnvironment](./repository-environment.md) - Create and manage deployment environments with protection rules
+- [RepositoryWebhook](./repository-webhook.md) - Create and manage repository webhooks for event notifications
- [Secret](./secret.md) - Create and manage GitHub Actions and Dependabot secrets
## Example Usage
```ts
-import { Comment, RepositoryEnvironment, GitHubSecret } from "alchemy/github";
+import { Comment, RepositoryEnvironment, RepositoryWebhook, GitHubSecret } from "alchemy/github";
// Create a repository environment
const prodEnv = await RepositoryEnvironment("production", {
@@ -32,6 +33,15 @@ const prodEnv = await RepositoryEnvironment("production", {
},
});
+// Create a webhook for CI/CD notifications
+const webhook = await RepositoryWebhook("ci-webhook", {
+ owner: "my-org",
+ repository: "my-repo",
+ url: "https://ci.example.com/webhook",
+ secret: alchemy.secret("GITHUB_WEBHOOK_SECRET"),
+ events: ["push", "pull_request", "release"],
+});
+
// Create a secret for the environment
const secret = await GitHubSecret("deploy-key", {
owner: "my-org",
diff --git a/alchemy-web/docs/providers/github/repository-webhook.md b/alchemy-web/docs/providers/github/repository-webhook.md
new file mode 100644
index 000000000..325d89daa
--- /dev/null
+++ b/alchemy-web/docs/providers/github/repository-webhook.md
@@ -0,0 +1,186 @@
+# RepositoryWebhook
+
+Manage GitHub repository webhooks with automatic lifecycle management.
+
+Webhooks allow external services to be notified when certain events happen in a repository. This resource manages the full lifecycle of repository webhooks including creation, updates, and deletion.
+
+## Basic Usage
+
+Create a simple webhook for push events:
+
+```ts
+import { RepositoryWebhook } from "alchemy/github";
+
+const webhook = await RepositoryWebhook("my-webhook", {
+ owner: "my-org",
+ repository: "my-repo",
+ url: "https://my-service.com/github-webhook",
+ events: ["push"]
+});
+```
+
+## With Secret Validation
+
+Add webhook secret for payload validation:
+
+```ts
+import { RepositoryWebhook } from "alchemy/github";
+import { alchemy } from "alchemy";
+
+const webhook = await RepositoryWebhook("secure-webhook", {
+ owner: "my-org",
+ repository: "my-repo",
+ url: "https://ci.example.com/webhook",
+ secret: alchemy.secret("GITHUB_WEBHOOK_SECRET"),
+ events: ["push", "pull_request", "release"],
+ contentType: "application/json"
+});
+```
+
+## Multiple Events
+
+Listen to multiple GitHub events:
+
+```ts
+import { RepositoryWebhook } from "alchemy/github";
+
+const ciWebhook = await RepositoryWebhook("ci-webhook", {
+ owner: "my-org",
+ repository: "my-repo",
+ url: "https://ci.example.com/webhook",
+ events: [
+ "push",
+ "pull_request",
+ "release",
+ "issues",
+ "issue_comment"
+ ]
+});
+```
+
+## All Events
+
+Create a webhook that listens to all repository events:
+
+```ts
+import { RepositoryWebhook } from "alchemy/github";
+
+const monitoringWebhook = await RepositoryWebhook("monitoring-webhook", {
+ owner: "my-org",
+ repository: "my-repo",
+ url: "https://monitoring.internal.com/github",
+ events: ["*"], // Listen to all events
+ insecureSsl: true, // For internal services with self-signed certs
+ contentType: "application/x-www-form-urlencoded"
+});
+```
+
+## Custom SSL Configuration
+
+For internal services or development environments:
+
+```ts
+import { RepositoryWebhook } from "alchemy/github";
+
+const devWebhook = await RepositoryWebhook("dev-webhook", {
+ owner: "my-org",
+ repository: "my-repo",
+ url: "https://localhost:3000/webhook",
+ insecureSsl: true, // Skip SSL verification
+ active: false, // Create inactive webhook
+ events: ["push", "pull_request"]
+});
+```
+
+## Properties
+
+| Property | Type | Required | Description |
+|----------|------|----------|-------------|
+| `owner` | string | ✅ | Repository owner (user or organization) |
+| `repository` | string | ✅ | Repository name |
+| `url` | string | ✅ | The URL to which the payloads will be delivered |
+| `secret` | string | | Webhook secret for payload validation |
+| `contentType` | `"application/json"` \| `"application/x-www-form-urlencoded"` | | The media type used to serialize the payloads (default: `"application/json"`) |
+| `insecureSsl` | boolean | | Determines whether the SSL certificate of the host for url will be verified (default: `false`) |
+| `active` | boolean | | Determines if notifications are sent when the webhook is triggered (default: `true`) |
+| `events` | string[] | | Determines what events the hook is triggered for (default: `["push"]`) |
+| `token` | string | | Optional GitHub API token (overrides environment variable) |
+
+## Returns
+
+| Property | Type | Description |
+|----------|------|-------------|
+| `id` | string | Resource identifier |
+| `webhookId` | number | The numeric ID of the webhook in GitHub |
+| `url` | string | The webhook URL that was configured |
+| `createdAt` | string | Time at which the webhook was created |
+| `updatedAt` | string | Time at which the webhook was last updated |
+| `pingUrl` | string | The ping URL for testing the webhook |
+| `testUrl` | string | The test URL for the webhook |
+
+## GitHub Events
+
+Common GitHub webhook events you can listen to:
+
+- `push` - Any Git push to a Repository
+- `pull_request` - Pull request activity
+- `issues` - Issue activity
+- `issue_comment` - Issue comment activity
+- `release` - Release activity
+- `create` - Branch or tag created
+- `delete` - Branch or tag deleted
+- `fork` - Repository forked
+- `star` - Repository starred
+- `watch` - Repository watched
+- `workflow_run` - GitHub Actions workflow run
+- `*` - All events
+
+For a complete list of available events, see the [GitHub Webhooks documentation](https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads).
+
+## Authentication
+
+The resource requires a GitHub token with appropriate permissions:
+
+```bash
+export GITHUB_TOKEN="ghp_your_token_here"
+```
+
+The token must have:
+- `repo` scope for private repositories
+- `public_repo` scope for public repositories
+- Admin access to the repository to manage webhooks
+
+## Error Handling
+
+The resource handles common GitHub API errors:
+
+- **403 Forbidden**: Token lacks admin rights to the repository
+- **422 Unprocessable Entity**: Invalid webhook configuration (bad URL, invalid events, etc.)
+- **404 Not Found**: Repository doesn't exist or token lacks access
+
+## Updates
+
+When updating a webhook, you can change:
+- Webhook URL
+- Events list
+- Secret
+- Content type
+- SSL verification settings
+- Active status
+
+The webhook ID remains the same during updates.
+
+## Security
+
+- Always use HTTPS URLs for webhook endpoints
+- Use webhook secrets to validate payload authenticity
+- Consider using `insecureSsl: false` (default) for production
+- Regularly rotate webhook secrets
+
+## Best Practices
+
+1. **Use secrets**: Always configure webhook secrets for payload validation
+2. **Specific events**: Only listen to events your application needs
+3. **Error handling**: Implement proper error handling in your webhook receiver
+4. **Testing**: Use the `pingUrl` to test webhook connectivity
+5. **Monitoring**: Monitor webhook delivery success rates in GitHub
\ No newline at end of file
diff --git a/alchemy-web/docs/providers/stripe/price.md b/alchemy-web/docs/providers/stripe/price.md
index 380b2ef3c..3689e9857 100644
--- a/alchemy-web/docs/providers/stripe/price.md
+++ b/alchemy-web/docs/providers/stripe/price.md
@@ -153,3 +153,33 @@ const cappedUsagePrice = await Price("api-calls-capped", {
],
});
```
+
+## Billing Meters
+
+For advanced usage tracking, you can associate a price with a Stripe Billing Meter:
+
+```ts
+import { Price } from "alchemy/stripe";
+
+// First create a meter (not shown - requires Meter resource)
+// const meter = await Meter("api-usage-meter", { ... });
+
+const meteredPrice = await Price("api-usage-with-meter", {
+ product: "prod_xyz",
+ currency: "usd",
+ billingScheme: "tiered",
+ tiersMode: "graduated",
+ recurring: {
+ interval: "month",
+ usageType: "metered", // Required for meter association
+ meter: "meter_123abc" // Associate with billing meter
+ },
+ tiers: [
+ { upTo: 10000, unitAmountDecimal: "0" },
+ { upTo: 25000, unitAmountDecimal: "0.002" },
+ { upTo: "inf", flatAmountDecimal: "3000" }
+ ],
+});
+```
+
+**Note**: Meters can only be associated with prices that have `recurring.usageType = 'metered'`.
diff --git a/alchemy/.gitignore b/alchemy/.gitignore
index 16675cf6c..fee38e27d 100644
--- a/alchemy/.gitignore
+++ b/alchemy/.gitignore
@@ -1,2 +1,4 @@
-bin/*
-!bin/*.ts
\ No newline at end of file
+bin/alchemy.mjs
+!templates/**/wrangler.jsonc
+.nuxt
+workers/*.js
diff --git a/alchemy/bin/alchemy.ts b/alchemy/bin/alchemy.ts
index 0659da664..6f9180cad 100644
--- a/alchemy/bin/alchemy.ts
+++ b/alchemy/bin/alchemy.ts
@@ -1,93 +1,88 @@
#!/usr/bin/env node
-import { parseArgs } from "node:util";
-import { createAlchemy } from "./create-alchemy.ts";
-import { bootstrapS3 } from "./bootstrap-s3.ts";
-// Parse command-line arguments. We allow unknown flags because different
-// sub-commands may accept different sets.
-const { values, positionals } = parseArgs({
- allowPositionals: true,
- // We keep the option list flat – sub-commands will decide which ones they care about.
- options: {
- template: { type: "string" },
- yes: { type: "boolean", short: "y" },
- overwrite: { type: "boolean" },
- help: { type: "boolean", short: "h" },
- version: { type: "boolean", short: "v" },
- region: { type: "string" },
- prefix: { type: "string" },
- },
-});
-
-// First positional is the sub-command (e.g. `create`)
-const command = positionals.shift();
-
-const usage = `Usage: alchemy [options]
-
-Available commands:
- create Scaffold a new project
- bootstrap Bootstrap cloud resources for alchemy
-
-Bootstrap options:
- --region AWS region (defaults to AWS profile default)
- --prefix S3 bucket name prefix (default: alchemy-state)
-`;
+import { createCli, trpcServer, zod as z } from "trpc-cli";
+import { createAlchemy } from "./commands/create.ts";
+import { getPackageVersion } from "./services/get-package-version.ts";
+import {
+ PackageManagerSchema,
+ ProjectNameSchema,
+ TemplateSchema,
+ type CreateInput,
+} from "./types.ts";
-if (!command) {
- console.error(usage);
- process.exit(1);
-}
+const t = trpcServer.initTRPC.create();
-switch (command) {
- case "create": {
- // The first remaining positional is treated as the project name.
- const name = positionals.shift();
-
- // If the user explicitly requested help or version, forward the flags even
- // when no project name is provided so the underlying handler can display
- // the appropriate information.
- if (values.help || values.version) {
- await createAlchemy({
+const router = t.router({
+ create: t.procedure
+ .meta({
+ description: "Create a new Alchemy project",
+ })
+ .input(
+ z.tuple([
+ ProjectNameSchema.optional(),
+ z
+ .object({
+ template: TemplateSchema.optional(),
+ packageManager: PackageManagerSchema.optional(),
+ yes: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe("Skip prompts and use defaults"),
+ overwrite: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe("Overwrite existing directory"),
+ bun: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe("Use Bun as the package manager"),
+ npm: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe("Use npm as the package manager"),
+ pnpm: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe("Use pnpm as the package manager"),
+ yarn: z
+ .boolean()
+ .optional()
+ .default(false)
+ .describe("Use Yarn as the package manager"),
+ install: z
+ .boolean()
+ .optional()
+ .describe("Install dependencies after scaffolding"),
+ })
+ .optional()
+ .default({}),
+ ]),
+ )
+ .mutation(async ({ input }) => {
+ const [name, options] = input;
+ const isTest = process.env.NODE_ENV === "test";
+ const combinedInput: CreateInput = {
name,
- template: values.template as string | undefined,
- yes: values.yes as boolean | undefined,
- overwrite: values.overwrite as boolean | undefined,
- help: values.help as boolean | undefined,
- version: values.version as boolean | undefined,
- });
- break;
- }
-
- await createAlchemy({
- name,
- template: values.template as string | undefined,
- yes: values.yes as boolean | undefined,
- overwrite: values.overwrite as boolean | undefined,
- help: values.help as boolean | undefined,
- version: values.version as boolean | undefined,
- });
- break;
- }
+ ...options,
+ yes: isTest || options.yes,
+ };
+ await createAlchemy(combinedInput);
+ }),
+});
- case "bootstrap": {
- const subcommand = positionals.shift();
+export type AppRouter = typeof router;
- if (subcommand === "s3") {
- await bootstrapS3({
- region: values.region as string | undefined,
- prefix: values.prefix as string | undefined,
- help: values.help as boolean | undefined,
- });
- } else {
- console.error(`Unknown bootstrap subcommand: ${subcommand || "(none)"}`);
- console.error("Available bootstrap subcommands:");
- console.error(" s3 Create S3 bucket for state storage");
- process.exit(1);
- }
- break;
- }
+const cli = createCli({
+ router,
+ name: "alchemy",
+ version: getPackageVersion(),
+ description:
+ "🧪 Welcome to Alchemy! Creating infrastructure as code with JavaScript and TypeScript.",
+});
- default:
- console.error(`Unknown command: ${command}`);
- process.exit(1);
-}
+cli.run();
diff --git a/alchemy/bin/commands/create.ts b/alchemy/bin/commands/create.ts
new file mode 100644
index 000000000..95989fe81
--- /dev/null
+++ b/alchemy/bin/commands/create.ts
@@ -0,0 +1,263 @@
+import {
+ cancel,
+ confirm,
+ intro,
+ isCancel,
+ log,
+ note,
+ outro,
+ select,
+ spinner,
+ text,
+} from "@clack/prompts";
+import * as fs from "fs-extra";
+import { existsSync } from "node:fs";
+import { join, resolve } from "node:path";
+import pc from "picocolors";
+
+import { throwWithContext } from "../errors.ts";
+import { detectPackageManager } from "../services/package-manager.ts";
+import { copyTemplate } from "../services/template-manager.ts";
+import type { CreateInput, ProjectContext, TemplateType } from "../types.ts";
+import { ProjectNameSchema, TEMPLATE_DEFINITIONS } from "../types.ts";
+
+const isTest = process.env.NODE_ENV === "test";
+
+async function createProjectContext(
+ cliOptions: CreateInput,
+): Promise {
+ const detectedPm = detectPackageManager();
+ const options = { yes: isTest, ...cliOptions };
+
+ let name: string;
+ if (options.name) {
+ const result = ProjectNameSchema.safeParse(options.name);
+ if (!result.success) {
+ throw new Error(
+ `Invalid project name: ${result.error.errors[0]?.message}`,
+ );
+ }
+ name = options.name;
+ log.info(`Using project name: ${pc.yellow(name)}`);
+ } else {
+ const nameResult = await text({
+ message: "What is your project name?",
+ placeholder: "my-alchemy-app",
+ validate: (value) => {
+ const result = ProjectNameSchema.safeParse(value);
+ return result.success ? undefined : result.error.errors[0]?.message;
+ },
+ });
+
+ if (isCancel(nameResult)) {
+ cancel(pc.red("Operation cancelled."));
+ process.exit(0);
+ }
+
+ name = nameResult;
+ }
+
+ let selectedTemplate: TemplateType;
+ if (options.template) {
+ selectedTemplate = options.template;
+ log.info(`Using template: ${pc.yellow(selectedTemplate)}`);
+ } else {
+ const templateResult = await select({
+ message: "Which template would you like to use?",
+ options: TEMPLATE_DEFINITIONS.map((t) => ({
+ label: t.description,
+ value: t.name as TemplateType,
+ })),
+ });
+
+ if (isCancel(templateResult)) {
+ cancel(pc.red("Operation cancelled."));
+ process.exit(0);
+ }
+
+ selectedTemplate = templateResult;
+ }
+
+ const templateDefinition = TEMPLATE_DEFINITIONS.find(
+ (t) => t.name === selectedTemplate,
+ );
+ if (!templateDefinition) {
+ throw new Error(
+ `Template '${pc.yellow(selectedTemplate)}' not found. Available templates: ${TEMPLATE_DEFINITIONS.map((t) => pc.cyan(t.name)).join(", ")}`,
+ );
+ }
+
+ const path = resolve(process.cwd(), name);
+ let packageManager = options.packageManager || detectedPm;
+
+ // Override package manager if specific flags are provided
+ if (options.bun) packageManager = "bun";
+ else if (options.npm) packageManager = "npm";
+ else if (options.pnpm) packageManager = "pnpm";
+ else if (options.yarn) packageManager = "yarn";
+
+ let shouldInstall = true;
+ if (options.install !== undefined) {
+ shouldInstall = options.install;
+ log.info(
+ `Dependencies installation: ${pc.yellow(shouldInstall ? "enabled" : "disabled")}`,
+ );
+ } else if (!options.yes) {
+ const installResult = await confirm({
+ message: "Install dependencies?",
+ initialValue: true,
+ });
+
+ if (isCancel(installResult)) {
+ cancel(pc.red("Operation cancelled."));
+ process.exit(0);
+ }
+
+ shouldInstall = installResult;
+ }
+
+ return {
+ name,
+ path,
+ template: selectedTemplate,
+ packageManager,
+ isTest,
+ options: {
+ ...options,
+ install: shouldInstall,
+ },
+ };
+}
+
+async function handleDirectoryOverwrite(
+ context: ProjectContext,
+): Promise {
+ if (!existsSync(context.path)) {
+ return;
+ }
+
+ let shouldOverwrite = false;
+
+ if (context.options.overwrite) {
+ shouldOverwrite = true;
+ log.warn(
+ `Directory ${pc.yellow(context.name)} already exists. Overwriting due to ${pc.cyan("--overwrite")} flag.`,
+ );
+ } else {
+ const overwriteResult = await confirm({
+ message: `Directory ${pc.yellow(context.name)} already exists. Overwrite?`,
+ initialValue: false,
+ });
+
+ if (isCancel(overwriteResult)) {
+ cancel(pc.red("Operation cancelled."));
+ process.exit(0);
+ }
+
+ shouldOverwrite = overwriteResult;
+ }
+
+ if (!shouldOverwrite) {
+ cancel(pc.red("Operation cancelled."));
+ process.exit(0);
+ }
+
+ const s = spinner();
+ s.start(`Removing existing directory: ${pc.yellow(context.path)}`);
+ try {
+ await fs.rm(context.path, { recursive: true, force: true });
+ s.stop(`Directory ${pc.yellow(context.path)} removed.`);
+ } catch (error) {
+ s.stop(pc.red(`Failed to remove directory ${pc.yellow(context.path)}.`));
+ throwWithContext(error, "Directory removal failed");
+ }
+}
+
+async function initializeTemplate(context: ProjectContext): Promise {
+ const templateDefinition = TEMPLATE_DEFINITIONS.find(
+ (t) => t.name === context.template,
+ );
+ if (!templateDefinition) {
+ throw new Error(`Template definition not found for: ${context.template}`);
+ }
+
+ try {
+ await copyTemplate(context.template, context);
+ } catch (error) {
+ throwWithContext(
+ error,
+ `Template initialization failed for '${context.template}'`,
+ );
+ }
+
+ // Create .gitignore if it doesn't exist
+ const gitignorePath = join(context.path, ".gitignore");
+ if (!existsSync(gitignorePath)) {
+ try {
+ await fs.writeFile(
+ gitignorePath,
+ "node_modules/\n.env\n.env.local\ndist/\nlib/\n.wrangler/\nwrangler.jsonc\n*.tsbuildinfo\n",
+ );
+ } catch (error) {
+ const errorMsg = error instanceof Error ? error.message : String(error);
+ log.warn(`Failed to create .gitignore: ${errorMsg}`);
+ }
+ }
+}
+
+export async function createAlchemy(cliOptions: CreateInput): Promise {
+ try {
+ intro(pc.cyan("🧪 Welcome to Alchemy!"));
+ log.info("Creating a new Alchemy project...");
+
+ const context = await createProjectContext(cliOptions);
+
+ log.info(`Detected package manager: ${pc.green(context.packageManager)}`);
+
+ await handleDirectoryOverwrite(context);
+
+ await initializeTemplate(context);
+
+ const installInstructions =
+ context.options.install === false
+ ? `
+${pc.cyan("📦 Install dependencies:")}
+ cd ${context.name}
+ ${context.packageManager} install
+
+`
+ : "";
+
+ note(
+ `
+${pc.cyan("📁 Navigate to your project:")}
+ cd ${context.name}
+
+${installInstructions}${pc.cyan("🚀 Deploy your project:")}
+ ${context.packageManager} run deploy
+
+${pc.cyan("🧹 Destroy your project:")}
+ ${context.packageManager} run destroy
+
+${pc.cyan("📚 Learn more:")}
+ https://alchemy.run
+`,
+ "Next Steps:",
+ );
+
+ outro(
+ pc.green(`✅ Project ${pc.yellow(context.name)} created successfully!`),
+ );
+ } catch (error) {
+ log.error("An unexpected error occurred:");
+ if (error instanceof Error) {
+ log.error(`${pc.red("Error:")} ${error.message}`);
+ if (error.stack && process.env.DEBUG) {
+ log.error(`${pc.gray("Stack trace:")}\n${error.stack}`);
+ }
+ } else {
+ log.error(pc.red(String(error)));
+ }
+ process.exit(1);
+ }
+}
diff --git a/alchemy/bin/constants.ts b/alchemy/bin/constants.ts
new file mode 100644
index 000000000..73b73f4f7
--- /dev/null
+++ b/alchemy/bin/constants.ts
@@ -0,0 +1,13 @@
+import path from "node:path";
+import { fileURLToPath } from "node:url";
+import { getPackageVersion } from "./services/get-package-version.ts";
+
+const __filename = fileURLToPath(import.meta.url);
+const distPath = path.dirname(__filename);
+export const PKG_ROOT = path.join(distPath, "../");
+
+export const dependencyVersionMap = {
+ alchemy: getPackageVersion(),
+} as const;
+
+export type DependencyVersionMap = keyof typeof dependencyVersionMap;
diff --git a/alchemy/bin/create-alchemy.ts b/alchemy/bin/create-alchemy.ts
deleted file mode 100644
index ee1d883f9..000000000
--- a/alchemy/bin/create-alchemy.ts
+++ /dev/null
@@ -1,1214 +0,0 @@
-#!/usr/bin/env node
-
-import { confirm, input, select } from "@inquirer/prompts";
-import { applyEdits, modify } from "jsonc-parser";
-import { execSync } from "node:child_process";
-import { existsSync } from "node:fs";
-import * as fs from "node:fs/promises";
-import { join, resolve } from "node:path";
-
-const isTest = process.env.NODE_ENV === "test";
-
-// Package manager detection
-type PackageManager = "bun" | "npm" | "pnpm" | "yarn";
-
-// CLI options interface
-interface CliOptions {
- name?: string;
- template?: string;
- yes?: boolean;
- overwrite?: boolean;
- help?: boolean;
- version?: boolean;
-}
-
-// Mutable state that will be initialised inside `createAlchemy()` and
-// reused by the helper utilities defined later in the file. Keeping these
-// at module scope preserves the existing references inside helper
-// functions without requiring any further changes.
-let options: CliOptions = { yes: isTest };
-let pm: PackageManager;
-let alchemyVersion: string;
-let projectPath: string;
-let projectName: string;
-let template: string;
-
-// Define templates
-const templates: Template[] = [
- {
- name: "typescript",
- description: "Basic TypeScript Worker project",
- init: initTypescriptProject,
- },
- {
- name: "vite",
- description: "React Vite.js application",
- init: initViteProject,
- },
- {
- name: "astro",
- description: "Astro application with SSR",
- init: initAstroProject,
- },
- {
- name: "react-router",
- description: "React Router application",
- init: initReactRouterProject,
- },
- {
- name: "sveltekit",
- description: "SvelteKit application",
- init: initSvelteKitProject,
- },
- {
- name: "tanstack-start",
- description: "TanStack Start application",
- init: initTanstackStartProject,
- },
- {
- name: "rwsdk",
- description: "Redwood SDK application",
- init: initRedwoodProject,
- },
- {
- name: "nuxt",
- description: "Nuxt.js application",
- init: initNuxtProject,
- },
-];
-
-// -------------------------------------------------------------------------------------------------
-// Public API – this is invoked by the parent `alchemy` CLI wrapper. It reproduces the original
-// behaviour but relies on the caller to provide the already-parsed command-line options.
-// -------------------------------------------------------------------------------------------------
-
-export async function createAlchemy(
- cliOptions: Partial = {},
-): Promise {
- // Merge with defaults so helper functions can keep referencing the global `options`
- options = { yes: isTest, ...cliOptions };
-
- // Handle help / version flags early
- if (options.help) {
- console.log(`
-Usage: alchemy create [options]
-
-Options:
--h, --help Show help
--v, --version Show version
- Project name (positional / non-interactive)
---template= Template name (non-interactive)
--y, --yes Skip confirmations (non-interactive)
---overwrite Overwrite existing directory
-
-Templates:
-${templates.map((t) => ` ${t.name.padEnd(15)} ${t.description}`).join("\n")}
-`);
- return;
- }
-
- if (options.version) {
- console.log("0.28.0");
- return;
- }
-
- console.log("🧪 Welcome to Alchemy!");
- console.log("Creating a new Alchemy project...\n");
-
- pm = detectPackageManager();
- console.log(`Detected package manager: ${pm}\n`);
-
- // Acquire project name – prompt if not provided
- if (options.name) {
- projectName = options.name;
- console.log(`Using project name: ${projectName}`);
- } else {
- projectName = await input({
- message: "What is your project name?",
- default: "my-alchemy-app",
- validate: (input) => {
- if (!input.trim()) return "Project name is required";
- if (!/^[a-z0-9-_]+$/i.test(input))
- return "Project name can only contain letters, numbers, hyphens, and underscores";
- return true;
- },
- });
- }
-
- // Validate project name (even if provided non-interactively)
- if (!projectName.trim()) {
- throw new Error("Project name is required");
- }
- if (!/^[a-z0-9-_]+$/i.test(projectName)) {
- throw new Error(
- "Project name can only contain letters, numbers, hyphens, and underscores",
- );
- }
-
- // Select template – prompt if not provided
- if (options.template) {
- template = options.template;
- console.log(`Using template: ${template}`);
-
- if (!templates.find((t) => t.name === template)) {
- throw new Error(
- `Template '${template}' not found. Available templates: ${templates.map((t) => t.name).join(", ")}`,
- );
- }
- } else {
- template = await select({
- message: "Which template would you like to use?",
- choices: templates.map((t) => ({
- name: t.description,
- value: t.name,
- })),
- });
- }
-
- const selectedTemplate = templates.find((t) => t.name === template)!;
-
- // Prepare working directory
- projectPath = resolve(process.cwd(), projectName);
-
- if (existsSync(projectPath)) {
- let overwriteConfirmed: boolean;
- if (options.overwrite || options.yes) {
- overwriteConfirmed = true;
- console.log(
- `Directory ${projectName} already exists. Overwriting due to CLI flag.`,
- );
- } else {
- overwriteConfirmed = await confirm({
- message: `Directory ${projectName} already exists. Overwrite?`,
- default: false,
- });
- }
-
- if (!overwriteConfirmed) {
- console.log("Cancelled.");
- return;
- }
- }
-
- console.log(`\n🔨 Creating ${template} project in ${projectPath}...`);
-
- alchemyVersion = `alchemy${isTest ? "@file:../../alchemy" : ""}`;
-
- // Execute the template initialisation
- await selectedTemplate.init(projectName, projectPath);
-
- // Ensure a .gitignore exists
- const gitignorePath = join(projectPath, ".gitignore");
- if (!existsSync(gitignorePath)) {
- await fs.writeFile(
- gitignorePath,
- "node_modules/\n.env\n.env.local\ndist/\nlib/\n.wrangler/\nwrangler.jsonc\n*.tsbuildinfo\n",
- );
- }
-
- console.log(`\n✅ Project ${projectName} created successfully!`);
- console.log("\n📁 Navigate to your project:");
- console.log(` cd ${projectName}`);
- console.log("\n🚀 Deploy your project:");
- console.log(` ${pm} run deploy`);
- console.log("\n🧹 Destroy your project:");
- console.log(` ${pm} run destroy`);
- console.log("\n📚 Learn more: https://alchemy.run");
-}
-
-// Template definitions
-interface Template {
- name: string;
- description: string;
- init: (projectName: string, projectPath: string) => Promise;
-}
-
-async function initTypescriptProject(
- projectName: string,
- projectPath: string,
-): Promise {
- await mkdir(projectPath);
-
- const commands = getPackageManagerCommands(pm);
-
- // Initialize project
- execCommand(commands.init, projectPath);
-
- await createEnvTs(projectPath);
- await initWranglerRunTs(projectPath, {
- entrypoint: "src/worker.ts",
- });
- await appendGitignore(projectPath);
-
- // Create basic project structure
- await mkdir(projectPath, "src");
-
- // Create worker.ts
- await fs.writeFile(
- join(projectPath, "src", "worker.ts"),
- `import type { worker } from "../alchemy.run.ts";
-
-export default {
- async fetch(request: Request, env: typeof worker.Env, ctx: ExecutionContext): Promise {
- return new Response("Hello World from ${projectName}!");
- },
-};
-`,
- );
-
- // Create tsconfig.json
- await writeJsonFile(join(projectPath, "tsconfig.json"), {
- compilerOptions: {
- target: "ESNext",
- module: "ESNext",
- moduleResolution: "Bundler",
- strict: true,
- esModuleInterop: true,
- skipLibCheck: true,
- allowImportingTsExtensions: true,
- rewriteRelativeImportExtensions: true,
- types: ["@cloudflare/workers-types", "@types/node"],
- },
- include: ["src/**/*", "types/**/*", "alchemy.run.ts"],
- });
-
- await writeJsonFile(join(projectPath, "package.json"), {
- name: projectName,
- version: "0.0.0",
- description: "Alchemy Typescript Project",
- type: "module",
- scripts: {
- build: "tsc -b",
- deploy: "tsx ./alchemy.run.ts",
- destroy: "tsx ./alchemy.run.ts --destroy",
- },
- devDependencies: {
- "@cloudflare/workers-types": "latest",
- "@types/node": "^24.0.1",
- alchemy: "^0.28.0",
- typescript: "^5.8.3",
- },
- });
-
- // Install dependencies
- install({
- devDependencies: [
- alchemyVersion,
- "@cloudflare/workers-types",
- "@types/node",
- "typescript",
- ],
- });
-}
-
-async function initViteProject(
- projectName: string,
- projectPath: string,
-): Promise {
- npx(`create-vite@6.5.0 ${projectName} --template react-ts`);
- const root = projectPath;
- await rm(join(root, "tsconfig.app.json"));
- await rm(join(root, "tsconfig.node.json"));
-
- await initWebsiteProject(projectPath, {
- entrypoint: "worker/index.ts",
- devDependencies: ["@cloudflare/vite-plugin"],
- });
-
- await fs.writeFile(
- join(root, "vite.config.ts"),
- `import { defineConfig } from 'vite'
-import react from '@vitejs/plugin-react'
-
-import { cloudflare } from "@cloudflare/vite-plugin";
-
-// https://vite.dev/config/
-export default defineConfig({
- plugins: [react(), cloudflare()],
-});
-`,
- );
- await writeJsonFile(join(root, "tsconfig.json"), {
- exclude: ["test"],
- include: ["types/**/*.ts", "src/**/*.ts", "alchemy.run.ts"],
- compilerOptions: {
- target: "es2021",
- lib: ["es2021"],
- jsx: "react-jsx",
- module: "es2022",
- moduleResolution: "Bundler",
- resolveJsonModule: true,
- allowJs: true,
- checkJs: false,
- noEmit: true,
- isolatedModules: true,
- allowSyntheticDefaultImports: true,
- forceConsistentCasingInFileNames: true,
- allowImportingTsExtensions: true,
- rewriteRelativeImportExtensions: true,
- strict: true,
- skipLibCheck: true,
- types: ["@cloudflare/workers-types", "./types/env.d.ts"],
- },
- });
- await mkdir(root, "worker");
- await fs.writeFile(
- join(root, "worker", "index.ts"),
- `export default {
- fetch(request) {
- const url = new URL(request.url);
-
- if (url.pathname.startsWith("/api/")) {
- return Response.json({
- name: "Cloudflare",
- });
- }
- return new Response(null, { status: 404 });
- },
-} satisfies ExportedHandler;
-`,
- );
-}
-
-async function initAstroProject(
- projectName: string,
- projectPath: string,
-): Promise {
- create(
- `astro@latest ${projectName} -- --no-git --no-deploy --install ${options.yes ? "--yes" : ""}`,
- );
-
- await initWebsiteProject(projectPath, {
- scripts: {
- dev: "astro dev",
- build: "astro check && astro build",
- },
- devDependencies: ["@astrojs/cloudflare"],
- });
-
- // Update astro.config.mjs
- await fs.writeFile(
- join(projectPath, "astro.config.mjs"),
- `import { defineConfig } from 'astro/config';
-import cloudflare from '@astrojs/cloudflare';
-
-// https://astro.build/config
-export default defineConfig({
- output: 'server',
- adapter: cloudflare(),
-});
-`,
- );
-
- // Create API route example
- await mkdir(projectPath, "src", "pages", "api");
- await fs.writeFile(
- join(projectPath, "src", "pages", "api", "hello.ts"),
- `import type { APIRoute } from 'astro';
-
-export const GET: APIRoute = async ({ request }) => {
- // Access Cloudflare runtime context
- const runtime = request.cf;
-
- return new Response(JSON.stringify({
- message: "Hello from Astro API on Cloudflare!",
- timestamp: new Date().toISOString(),
- colo: runtime?.colo || "unknown",
- country: runtime?.country || "unknown",
- city: runtime?.city || "unknown",
- }), {
- status: 200,
- headers: {
- 'Content-Type': 'application/json',
- },
- });
-};
-`,
- );
-}
-
-async function initReactRouterProject(
- projectName: string,
- projectPath: string,
-): Promise {
- create(
- `cloudflare@2.49.3 ${projectName} -- --framework=react-router --no-git --no-deploy ${options.yes ? "--yes" : ""}`,
- );
-
- await initWebsiteProject(projectPath, {
- entrypoint: "workers/app.ts",
- devDependencies: ["@cloudflare/vite-plugin"],
- tsconfig: "tsconfig.node.json",
- });
-
- await modifyTsConfig(projectPath, {
- tsconfig: "tsconfig.node.json",
- });
-
- await modifyJsoncFile(join(projectPath, "tsconfig.json"), {
- "compilerOptions.types": undefined,
- "compilerOptions.noEmit": undefined,
- });
-
- await fs.writeFile(
- join(projectPath, "vite.config.ts"),
- `import { reactRouter } from "@react-router/dev/vite";
-import { cloudflare } from "@cloudflare/vite-plugin";
-import tailwindcss from "@tailwindcss/vite";
-import { defineConfig } from "vite";
-import tsconfigPaths from "vite-tsconfig-paths";
-
-export default defineConfig({
- plugins: [
- cloudflare({ viteEnvironment: { name: "ssr" } }),
- tailwindcss(),
- reactRouter(),
- tsconfigPaths(),
- ],
-});
-`,
- );
-}
-
-async function initSvelteKitProject(
- projectName: string,
- projectPath: string,
-): Promise {
- npx(
- `-y sv@latest create --install=${pm} --types=ts ${options.yes ? "--template minimal --no-add-ons" : ""} ${projectName}`,
- );
-
- await initWebsiteProject(projectPath, {
- // entrypoint: "src/routes/index.svelte",
- });
-
- install({
- devDependencies: [
- "@sveltejs/adapter-cloudflare",
- "@sveltejs/vite-plugin-svelte",
- ],
- });
-
- // Update svelte.config.js
- await fs.writeFile(
- join(projectPath, "svelte.config.js"),
- `import adapter from '@sveltejs/adapter-cloudflare';
-import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
-
-/** @type {import('@sveltejs/kit').Config} */
-const config = {
- preprocess: vitePreprocess(),
- kit: {
- adapter: adapter()
- }
-};
-
-export default config;
-`,
- );
-
- // Create vite.config.ts
- await fs.writeFile(
- join(projectPath, "vite.config.ts"),
- `import { sveltekit } from '@sveltejs/kit/vite';
-import { defineConfig } from 'vite';
-
-export default defineConfig({
-\tplugins: [sveltekit()],
-});
-`,
- );
-}
-
-async function initRedwoodProject(): Promise {
- npx(`-y create-rwsdk@latest ${projectName}`);
- install();
- execCommand(`${getPackageManagerCommands(pm).run} dev:init`, projectPath);
- await initWebsiteProject(projectPath, {
- scripts: {
- deploy: "tsx --env-file .env ./alchemy.run.ts",
- destroy: "tsx --env-file .env ./alchemy.run.ts --destroy",
- },
- });
- await modifyJsoncFile(join(projectPath, "tsconfig.json"), {
- include: undefined,
- "compilerOptions.types": ["@cloudflare/workers-types", "types/**/*.ts"],
- });
-}
-
-async function initTanstackStartProject(
- projectName: string,
- projectPath: string,
-): Promise {
- npx(
- `gitpick TanStack/router/tree/main/examples/react/start-basic ${projectName}`,
- );
-
- await initWebsiteProject(projectPath);
-
- await Promise.all([
- rm(join(projectPath, "postcss.config.mjs")),
- rm(join(projectPath, "tailwind.config.mjs")),
- ]);
-
- install({
- dependencies: ["tailwind-merge@3"],
- devDependencies: ["tailwindcss@4", "@tailwindcss/vite@latest", "postcss"],
- });
-
- await Promise.all([
- fs.writeFile(
- join(projectPath, "vite.config.ts"),
- `import tailwindcss from "@tailwindcss/vite";
-import { tanstackStart } from "@tanstack/react-start/plugin/vite";
-import { cloudflareWorkersDevEnvironmentShim } from "alchemy/cloudflare";
-import { defineConfig, PluginOption } from "vite";
-import tsConfigPaths from "vite-tsconfig-paths";
-
-export default defineConfig({
- server: {
- port: 3000,
- },
- build: {
- target: "esnext",
- rollupOptions: {
- external: ["node:async_hooks", "cloudflare:workers"],
- },
- },
- plugins: [
- tailwindcss() as PluginOption,
- cloudflareWorkersDevEnvironmentShim(),
- tsConfigPaths({
- projects: ["./tsconfig.json"],
- }),
- tanstackStart({
- target: "cloudflare-module",
- tsr: {
- routeTreeFileHeader: [
- "/** biome-ignore-all lint/suspicious/noExplicitAny: code generated by @tanstack/react-start */",
- ],
- quoteStyle: "double",
- },
- }),
- ],
-});
-`,
- ),
-
- fs.writeFile(
- join(projectPath, "src", "styles", "app.css"),
- `@import "tailwindcss";
-
-:root {
- --border: var(--color-zinc-200);
- --popover: var(--color-white);
- --popover-foreground: var(--color-zinc-950);
-}
-
-@theme {
- --font-sans: var(--font-sans, Inter), ui-sans-serif, system-ui, sans-serif,
- "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
-}
-`,
- ),
- ]);
-}
-
-async function initNuxtProject(): Promise {
- // create(
- // `cloudflare@latest -- ${projectName} --framework=nuxt --no-git --no-deploy`,
- // );
- npx(
- `nuxi@3.25.1 init nuxt --packageManager npm --no-install --no-gitInit ${isTest ? "--template=ui -M @nuxt/ui" : ""}`,
- );
- // npx nuxi@3.25.1 init nuxt --packageManager npm --no-install --no-gitInit
- /*
--M, --modules Nuxt modules to install (comma separated without spaces)
-
- ◻ @nuxt/content – The file-based CMS with support for Markdown, YAML, JSON
-◻ @nuxt/eslint – Project-aware, easy-to-use, extensible and future-proof ESLint integration
-◻ @nuxt/fonts – Add custom web fonts with performance in mind
-◻ @nuxt/icon – Icon module for Nuxt with 200,000+ ready to use icons from Iconify
-◻ @nuxt/image – Add images with progressive processing, lazy-loading, resizing and providers
-support
-◻ @nuxt/scripts – Add 3rd-party scripts without sacrificing performance
-◻ @nuxt/test-utils – Test utilities for Nuxt
-◻ @nuxt/ui – The Intuitive UI Library powered by Reka UI and Tailwind CSS
-*/
-
- await initWebsiteProject(projectPath, {
- scripts: {
- build: "nuxt build",
- },
- include: ["server/**/*.ts"],
- });
-
- await fs.writeFile(
- join(projectPath, "nuxt.config.ts"),
- `// https://nuxt.com/docs/api/configuration/nuxt-config
-export default defineNuxtConfig({
- compatibilityDate: '2025-05-15',
- devtools: { enabled: true },
- nitro: {
- preset: "cloudflare_module",
- cloudflare: {
- deployConfig: true,
- nodeCompat: true
- }
- },
- modules: ["nitro-cloudflare-dev"],
-});
-`,
- );
-
- install({
- devDependencies: ["nitro-cloudflare-dev"],
- });
-
- install();
-
- await mkdir(projectPath, "server", "api");
- await fs.writeFile(
- join(projectPath, "server", "api", "hello.ts"),
- `// see: https://nuxt.com/docs/guide/directory-structure/server
-
-export default defineEventHandler((event) => {
- return {
- hello: "world",
- };
-});
-`,
- );
-
- await mkdir(projectPath, "server", "middleware");
- await fs.writeFile(
- join(projectPath, "server", "middleware", "hello.ts"),
- `// see: https://nuxt.com/docs/guide/directory-structure/server#server-middleware
-
-export default defineEventHandler((event) => {
- console.log('New request: ' + getRequestURL(event))
-})
-`,
- );
- await fs.writeFile(
- join(projectPath, "server", "middleware", "auth.ts"),
- `// see: https://nuxt.com/docs/guide/directory-structure/server#server-middleware
-
-export default defineEventHandler((event) => {
- event.context.auth = { user: 123 }
-});
-`,
- );
-}
-
-interface WebsiteOptions {
- entrypoint?: string;
- tsconfig?: string;
- scripts?: Record;
- include?: string[];
- types?: string[];
- devDependencies?: string[];
- dependencies?: string[];
-}
-
-/**
- * Unified initialization function for website projects that use create-cloudflare
- */
-async function initWebsiteProject(
- projectPath: string,
- options: WebsiteOptions = {},
-): Promise {
- await createEnvTs(projectPath);
- await cleanupWrangler(projectPath);
- await modifyTsConfig(projectPath, options);
- await modifyPackageJson(projectPath, options?.scripts);
-
- // Create alchemy.run.ts
- await initWranglerRunTs(projectPath, options);
-
- await appendGitignore(projectPath);
- await appendEnv(projectPath);
-
- install({
- dependencies: options.dependencies,
- devDependencies: [
- "@cloudflare/workers-types",
- alchemyVersion,
- ...(pm === "bun" ? [] : ["tsx"]),
- "typescript",
- ...(options.devDependencies ?? []),
- ],
- });
-}
-
-async function appendGitignore(projectPath: string): Promise {
- try {
- await fs.writeFile(
- join(projectPath, ".gitignore"),
- [
- await fs.readFile(join(projectPath, ".gitignore"), "utf-8"),
- ".alchemy/",
- ".env",
- ].join("\n"),
- );
- } catch {
- await fs.writeFile(join(projectPath, ".gitignore"), ".alchemy/");
- }
-}
-
-async function initWranglerRunTs(
- projectPath: string,
- options?: {
- entrypoint?: string;
- },
-): Promise {
- // Create alchemy.run.ts
- await fs.writeFile(
- join(projectPath, "alchemy.run.ts"),
- createAlchemyRunTs(projectName, options),
- );
-}
-
-function createAlchemyRunTs(
- projectName: string,
- options?: {
- entrypoint?: string;
- },
-): string {
- const adopt = isTest ? "\n adopt: true," : "";
- if (template === "typescript") {
- return `///
-
-import alchemy from "alchemy";
-import { Worker } from "alchemy/cloudflare";
-
-const app = await alchemy("${projectName}");
-
-export const worker = await Worker("worker", {
- name: "${projectName}",${adopt}
- entrypoint: "${options?.entrypoint || "./src/worker.ts"}",
-});
-
-console.log(worker.url);
-
-await app.finalize();
-`;
- } else if (template === "rwsdk") {
- return `///
-import alchemy from "alchemy";
-import { D1Database, DurableObjectNamespace, Redwood } from "alchemy/cloudflare";
-
-const app = await alchemy("${projectName}");
-
-const database = await D1Database("database", {
- name: "${projectName}-db",${adopt}
- migrationsDir: "migrations",
-});
-
-export const worker = await Redwood("website", {
- name: "${projectName}-website",
- command: "${detectPackageManager()} run build",${adopt}
- bindings: {
- AUTH_SECRET_KEY: alchemy.secret(process.env.AUTH_SECRET_KEY),
- DB: database,
- SESSION_DURABLE_OBJECT: new DurableObjectNamespace("session", {
- className: "SessionDurableObject",
- }),
- },
-});
-
-console.log({
- url: worker.url,
-});
-
-await app.finalize();
- `;
- }
-
- // Map template names to their corresponding resource names
- const resourceMap: Record = {
- vite: "Vite",
- astro: "Astro",
- "react-router": "ReactRouter",
- sveltekit: "SvelteKit",
- "tanstack-start": "TanStackStart",
- rwsdk: "Redwood",
- nuxt: "Nuxt",
- };
-
- const resourceName = resourceMap[template];
- if (!resourceName) {
- throw new Error(`Unknown template: ${template}`);
- }
-
- // Special configuration for Vite template
- const config =
- options?.entrypoint !== undefined
- ? `{
- main: "${options?.entrypoint || "./src/index.ts"}",
- command: "${detectPackageManager()} run build",${adopt}
-}`
- : `{
- command: "${detectPackageManager()} run build",${adopt}
-}`;
-
- return `///
-
-import alchemy from "alchemy";
-import { ${resourceName} } from "alchemy/cloudflare";
-
-const app = await alchemy("${projectName}");
-
-export const worker = await ${resourceName}("website", ${config});
-
-console.log({
- url: worker.url,
-});
-
-await app.finalize();
-`;
-}
-
-async function tryReadFile(path: string): Promise {
- try {
- return await fs.readFile(path, "utf-8");
- } catch {
- return undefined;
- }
-}
-
-async function appendFile(path: string, content: string): Promise {
- const existingContent = await tryReadFile(path);
- await fs.writeFile(
- path,
- `${existingContent ? `${existingContent}\n` : ""}${content}`,
- );
-}
-
-async function appendEnv(projectPath: string): Promise {
- await appendFile(join(projectPath, ".env"), "ALCHEMY_PASSWORD=change-me");
- await appendFile(
- join(projectPath, ".env.example"),
- "ALCHEMY_PASSWORD=change-me",
- );
-}
-
-async function createEnvTs(
- projectPath: string,
- identifier = "worker",
-): Promise {
- // Create env.d.ts for proper typing
- await mkdir(projectPath, "types");
- await fs.writeFile(
- join(projectPath, "types", "env.d.ts"),
- `// This file infers types for the cloudflare:workers environment from your Alchemy Worker.
-// @see https://alchemy.run/docs/concepts/bindings.html#type-safe-bindings
-
-import type { ${identifier} } from "../alchemy.run.ts";
-
-export type CloudflareEnv = typeof ${identifier}.Env;
-
-declare global {
- type Env = CloudflareEnv;
-}
-
-declare module "cloudflare:workers" {
- namespace Cloudflare {
- export interface Env extends CloudflareEnv {}
- }
-}
-`,
- );
-}
-
-async function writeJsonFile(file: string, content: any): Promise {
- await fs.writeFile(file, JSON.stringify(content, null, 2));
-}
-
-async function cleanupWrangler(projectPath: string): Promise {
- if (existsSync(join(projectPath, "worker-configuration.d.ts"))) {
- await fs.unlink(join(projectPath, "worker-configuration.d.ts"));
- }
- if (existsSync(join(projectPath, "wrangler.jsonc"))) {
- await fs.unlink(join(projectPath, "wrangler.jsonc"));
- }
-}
-
-/**
- * Modifies a JSON/JSONC file with the given modifications
- */
-async function modifyJsoncFile(
- file: string,
- modifications: Record,
-): Promise {
- if (!existsSync(file)) {
- return; // No file to modify
- }
-
- const content = await fs.readFile(file, "utf-8");
- let modifiedContent = content;
-
- for (const [path, value] of Object.entries(modifications)) {
- const pathArray = path.split(".");
- const edits = modify(modifiedContent, pathArray, value, {
- formattingOptions: {
- tabSize: 2,
- insertSpaces: true,
- eol: "\n",
- },
- });
- modifiedContent = applyEdits(modifiedContent, edits);
- }
-
- await fs.writeFile(file, modifiedContent);
-}
-
-/**
- * Modifies tsconfig.json to set proper Cloudflare Workers types and remove worker-configuration.d.ts
- */
-async function modifyTsConfig(
- projectPath: string,
- options: WebsiteOptions = {},
-): Promise {
- const tsconfigPath = join(projectPath, options.tsconfig ?? "tsconfig.json");
-
- if (!existsSync(tsconfigPath)) {
- return; // No tsconfig.json to modify
- }
-
- const tsconfigContent = await fs.readFile(tsconfigPath, "utf-8");
-
- // Set compilerOptions.types to ["@cloudflare/workers-types"]
- const typesEdit = modify(
- tsconfigContent,
- ["compilerOptions", "types"],
- ["@cloudflare/workers-types", "./types/env.d.ts", ...(options.types ?? [])],
- {
- formattingOptions: {
- tabSize: 2,
- insertSpaces: true,
- eol: "\n",
- },
- },
- );
-
- let modifiedContent = applyEdits(tsconfigContent, typesEdit);
-
- // Parse the JSON to get the current includes array
- const { parseTree, getNodeValue, findNodeAtLocation } = await import(
- "jsonc-parser"
- );
- const tree = parseTree(modifiedContent);
- const includeNode = tree ? findNodeAtLocation(tree, ["include"]) : undefined;
- const currentIncludes = includeNode ? getNodeValue(includeNode) : [];
-
- // Filter out worker-configuration.d.ts and ensure required files are included
- let newIncludes = Array.isArray(currentIncludes) ? [...currentIncludes] : [];
-
- // Remove worker-configuration.d.ts if it exists
- newIncludes = newIncludes.filter(
- (include) =>
- include !== "worker-configuration.d.ts" &&
- include !== "./worker-configuration.d.ts",
- );
-
- await fs.writeFile(
- tsconfigPath,
- applyEdits(
- modifiedContent,
- modify(
- modifiedContent,
- ["include"],
- Array.from(
- new Set([
- "alchemy.run.ts",
- "types/**/*.ts",
- ...newIncludes.filter(
- (include) =>
- include !== "worker-configuration.d.ts" &&
- include !== "./worker-configuration.d.ts",
- ),
- ...(options.include ?? []),
- ]),
- ),
- {
- formattingOptions: {
- tabSize: 2,
- insertSpaces: true,
- eol: "\n",
- },
- },
- ),
- ),
- );
-}
-
-/**
- * Modifies package.json for website projects to add proper scripts and type: "module"
- */
-async function modifyPackageJson(
- projectPath: string,
- scripts?: Record,
-): Promise {
- const packageJsonPath = join(projectPath, "package.json");
-
- if (!existsSync(packageJsonPath)) {
- return; // No package.json to modify
- }
-
- const packageJson = {
- type: "module",
- ...JSON.parse(await fs.readFile(packageJsonPath, "utf-8")),
- };
-
- // Determine deploy command based on package manager
- const deployCommand =
- pm === "bun"
- ? "bun --env-file=./.env ./alchemy.run.ts"
- : "tsx --env-file=./.env ./alchemy.run.ts";
-
- // Add/update scripts
- if (!packageJson.scripts) {
- packageJson.scripts = {};
- }
-
- packageJson.scripts.build = scripts?.build || "vite build";
- packageJson.scripts.deploy = scripts?.deploy || deployCommand;
- packageJson.scripts.destroy =
- scripts?.destroy || `${deployCommand} --destroy`;
-
- packageJson.scripts = {
- ...Object.fromEntries(
- Object.entries(packageJson.scripts).sort(([a], [b]) =>
- a.localeCompare(b),
- ),
- ),
- };
-
- // Write back to file with proper formatting
- await fs.writeFile(packageJsonPath, JSON.stringify(packageJson, null, 2));
-}
-
-function detectPackageManager(): PackageManager {
- // Check npm_execpath for bun
- if (process.env.npm_execpath?.includes("bun")) {
- return "bun";
- }
-
- // Check npm_config_user_agent
- const userAgent = process.env.npm_config_user_agent;
- if (userAgent) {
- if (userAgent.startsWith("bun")) return "bun";
- if (userAgent.startsWith("pnpm")) return "pnpm";
- if (userAgent.startsWith("yarn")) return "yarn";
- if (userAgent.startsWith("npm")) return "npm";
- }
-
- // Default fallback
- return "npm";
-}
-
-function getPackageManagerCommands(pm: PackageManager) {
- const commands = {
- bun: {
- init: "bun init -y",
- install: "bun install",
- add: "bun add",
- addDev: "bun add -D",
- run: "bun run",
- create: "bun create",
- x: "bunx",
- },
- npm: {
- init: "npm init -y",
- install: "npm install",
- add: "npm install",
- addDev: "npm install --save-dev",
- run: "npm run",
- create: "npm create",
- x: "npx",
- },
- pnpm: {
- init: "pnpm init",
- install: "pnpm install",
- add: "pnpm add",
- addDev: "pnpm add -D",
- run: "pnpm run",
- create: "pnpm create",
- x: "pnpm dlx",
- },
- yarn: {
- init: "yarn init -y",
- install: "yarn install",
- add: "yarn add",
- addDev: "yarn add -D",
- run: "yarn",
- create: "yarn create",
- x: "yarn dlx",
- },
- };
-
- return commands[pm];
-}
-
-async function rm(path: string): Promise {
- if (existsSync(path)) {
- await fs.rm(path, { recursive: true });
- }
-}
-
-async function mkdir(...path: string[]): Promise {
- await fs.mkdir(join(...path), {
- recursive: true,
- });
-}
-
-function execCommand(command: string, cwd: string = process.cwd()): void {
- console.log(command);
- try {
- execSync(command, { stdio: "inherit", cwd });
- } catch {
- console.error(`Failed to execute: ${command}`);
- process.exit(1);
- }
-}
-
-function install({
- dependencies,
- devDependencies,
- cwd = projectPath,
-}: {
- dependencies?: string[];
- devDependencies?: string[];
- cwd?: string;
-} = {}) {
- if (!dependencies && !devDependencies) {
- execCommand(getPackageManagerCommands(pm).install, cwd);
- }
- if (dependencies) {
- execCommand(
- `${getPackageManagerCommands(pm).add} ${dependencies.join(" ")}`,
- cwd,
- );
- }
- if (devDependencies) {
- execCommand(
- `${getPackageManagerCommands(pm).addDev} ${devDependencies.join(" ")}`,
- cwd,
- );
- }
-}
-
-function npx(command: string, cwd: string = process.cwd()): void {
- execCommand(
- `${getPackageManagerCommands(pm).x} ${options.yes ? "--yes" : ""} ${command}`,
- cwd,
- );
-}
-
-function create(command: string, cwd: string = process.cwd()): void {
- execCommand(
- `${getPackageManagerCommands(pm).create} ${options.yes ? "-y" : ""} ${command}`,
- cwd,
- );
-}
diff --git a/alchemy/bin/errors.ts b/alchemy/bin/errors.ts
new file mode 100644
index 000000000..e38b44f17
--- /dev/null
+++ b/alchemy/bin/errors.ts
@@ -0,0 +1,11 @@
+import { log } from "@clack/prompts";
+import pc from "picocolors";
+
+export function throwWithContext(error: unknown, context: string): never {
+ const errorMsg = error instanceof Error ? error.message : String(error);
+ log.error(pc.red(`❌ ${context}`));
+ log.error(pc.gray(` ${errorMsg}`));
+ throw new Error(`${context}: ${errorMsg}`, {
+ cause: error instanceof Error ? error : new Error(String(error)),
+ });
+}
diff --git a/alchemy/bin/services/dependencies.ts b/alchemy/bin/services/dependencies.ts
new file mode 100644
index 000000000..fdaff1629
--- /dev/null
+++ b/alchemy/bin/services/dependencies.ts
@@ -0,0 +1,47 @@
+import { log } from "@clack/prompts";
+import fs from "fs-extra";
+import path from "node:path";
+
+import {
+ type DependencyVersionMap,
+ dependencyVersionMap,
+} from "../constants.ts";
+
+export const addPackageDependencies = async (opts: {
+ dependencies?: DependencyVersionMap[];
+ devDependencies?: DependencyVersionMap[];
+ projectDir: string;
+}): Promise => {
+ const { dependencies = [], devDependencies = [], projectDir } = opts;
+
+ const pkgJsonPath = path.join(projectDir, "package.json");
+
+ const pkgJson = await fs.readJson(pkgJsonPath);
+
+ if (!pkgJson.dependencies) pkgJson.dependencies = {};
+ if (!pkgJson.devDependencies) pkgJson.devDependencies = {};
+
+ for (const pkgName of dependencies) {
+ const version = dependencyVersionMap[pkgName];
+ if (version) {
+ pkgJson.dependencies[pkgName] = version;
+ } else {
+ log.warn(`Warning: Dependency ${pkgName} not found in version map.`);
+ }
+ }
+
+ for (const pkgName of devDependencies) {
+ const version = dependencyVersionMap[pkgName];
+ if (version) {
+ pkgJson.devDependencies[pkgName] = version;
+ } else {
+ log.warn(`Warning: Dev dependency ${pkgName} not found in version map.`);
+ }
+ }
+
+ await fs.writeJson(pkgJsonPath, pkgJson, {
+ spaces: 2,
+ });
+};
+
+export const addPackageDependency = addPackageDependencies;
diff --git a/alchemy/bin/services/get-package-version.ts b/alchemy/bin/services/get-package-version.ts
new file mode 100644
index 000000000..89ce896bf
--- /dev/null
+++ b/alchemy/bin/services/get-package-version.ts
@@ -0,0 +1,11 @@
+import fs from "fs-extra";
+import path from "node:path";
+import { PKG_ROOT } from "../constants.ts";
+
+export const getPackageVersion = () => {
+ const packageJsonPath = path.join(PKG_ROOT, "package.json");
+
+ const packageJsonContent = fs.readJSONSync(packageJsonPath);
+
+ return packageJsonContent.version ?? "1.0.0";
+};
diff --git a/alchemy/bin/services/package-manager.ts b/alchemy/bin/services/package-manager.ts
new file mode 100644
index 000000000..1bbd41fa3
--- /dev/null
+++ b/alchemy/bin/services/package-manager.ts
@@ -0,0 +1,113 @@
+import { log } from "@clack/prompts";
+import { execa } from "execa";
+import * as fs from "fs-extra";
+import type { PackageManager, ProjectContext } from "../types.ts";
+
+export function detectPackageManager(): PackageManager {
+ if (fs.pathExistsSync("bun.lockb")) return "bun";
+ if (fs.pathExistsSync("pnpm-lock.yaml")) return "pnpm";
+ if (fs.pathExistsSync("yarn.lock")) return "yarn";
+
+ if (process.env.npm_execpath?.includes("bun")) {
+ return "bun";
+ }
+
+ const userAgent = process.env.npm_config_user_agent;
+ if (userAgent) {
+ if (userAgent.startsWith("bun")) return "bun";
+ if (userAgent.startsWith("pnpm")) return "pnpm";
+ if (userAgent.startsWith("yarn")) return "yarn";
+ if (userAgent.startsWith("npm")) return "npm";
+ }
+
+ return "npm";
+}
+
+export function getPackageManagerCommands(pm: PackageManager) {
+ const commands = {
+ bun: {
+ init: "bun init -y",
+ install: "bun install",
+ add: "bun add",
+ addDev: "bun add -D",
+ run: "bun run",
+ create: "bun create",
+ x: "bunx",
+ },
+ npm: {
+ init: "npm init -y",
+ install: "npm install",
+ add: "npm install",
+ addDev: "npm install --save-dev",
+ run: "npm run",
+ create: "npm create",
+ x: "npx",
+ },
+ pnpm: {
+ init: "pnpm init",
+ install: "pnpm install",
+ add: "pnpm add",
+ addDev: "pnpm add -D",
+ run: "pnpm run",
+ create: "pnpm create",
+ x: "pnpm dlx",
+ },
+ yarn: {
+ init: "yarn init -y",
+ install: "yarn install",
+ add: "yarn add",
+ addDev: "yarn add -D",
+ run: "yarn",
+ create: "yarn create",
+ x: "yarn dlx",
+ },
+ };
+
+ return commands[pm];
+}
+
+export async function installDependencies(
+ context: ProjectContext,
+ {
+ dependencies,
+ devDependencies,
+ cwd,
+ }: {
+ dependencies?: string[];
+ devDependencies?: string[];
+ cwd?: string;
+ } = {},
+): Promise {
+ const targetCwd = cwd || context.path;
+ const pm = context.packageManager;
+ const commands = getPackageManagerCommands(pm);
+
+ try {
+ if (!dependencies && !devDependencies) {
+ await execa(commands.install, {
+ cwd: targetCwd,
+ shell: true,
+ stdio: "pipe",
+ });
+ }
+
+ if (dependencies) {
+ await execa(`${commands.add} ${dependencies.join(" ")}`, {
+ cwd: targetCwd,
+ shell: true,
+ stdio: "pipe",
+ });
+ }
+
+ if (devDependencies) {
+ await execa(`${commands.addDev} ${devDependencies.join(" ")}`, {
+ cwd: targetCwd,
+ shell: true,
+ stdio: "pipe",
+ });
+ }
+ } catch (error) {
+ log.error("Failed to install dependencies");
+ throw error;
+ }
+}
diff --git a/alchemy/bin/services/template-manager.ts b/alchemy/bin/services/template-manager.ts
new file mode 100644
index 000000000..524ac96b8
--- /dev/null
+++ b/alchemy/bin/services/template-manager.ts
@@ -0,0 +1,137 @@
+import { log, spinner } from "@clack/prompts";
+import { execa } from "execa";
+import * as fs from "fs-extra";
+import { globby } from "globby";
+import { existsSync } from "node:fs";
+import * as path from "node:path";
+import { join } from "node:path";
+
+import { PKG_ROOT } from "../constants.ts";
+import { throwWithContext } from "../errors.ts";
+import type { ProjectContext } from "../types.ts";
+import { addPackageDependencies } from "./dependencies.ts";
+import {
+ getPackageManagerCommands,
+ installDependencies,
+} from "./package-manager.ts";
+
+export async function copyTemplate(
+ templateName: string,
+ context: ProjectContext,
+): Promise {
+ const templatePath = path.join(PKG_ROOT, "templates", templateName);
+
+ if (!existsSync(templatePath)) {
+ throw new Error(`Template '${templateName}' not found at ${templatePath}`);
+ }
+
+ const filesToRename = ["_gitignore", "_npmrc", "_env", "_env.example"];
+
+ try {
+ const copySpinner = spinner();
+ copySpinner.start("Copying template files...");
+
+ const files = await globby("**/*", {
+ cwd: templatePath,
+ dot: true,
+ });
+
+ for (const file of files) {
+ const srcPath = join(templatePath, file);
+ let destFile = file;
+
+ const basename = path.basename(file);
+ if (filesToRename.includes(basename)) {
+ const newBasename = `.${basename.slice(1)}`;
+ destFile = path.join(path.dirname(file), newBasename);
+ }
+
+ const destPath = join(context.path, destFile);
+
+ await fs.ensureDir(path.dirname(destPath));
+ await fs.copy(srcPath, destPath);
+ }
+
+ copySpinner.stop("Template files copied successfully");
+
+ await updateTemplatePackageJson(context);
+
+ await addPackageDependencies({
+ devDependencies: ["alchemy"],
+ projectDir: context.path,
+ });
+
+ if (context.options.install !== false) {
+ const installSpinner = spinner();
+ installSpinner.start("Installing dependencies...");
+ try {
+ await installDependencies(context);
+ installSpinner.stop("Dependencies installed successfully");
+ } catch (error) {
+ installSpinner.stop("Failed to install dependencies");
+ throw error;
+ }
+ } else {
+ log.info("Skipping dependency installation");
+ }
+
+ if (templateName === "rwsdk") {
+ await handleRwsdkPostInstall(context);
+ }
+
+ log.success("Project setup complete!");
+ } catch (error) {
+ throwWithContext(error, `Failed to copy template '${templateName}'`);
+ }
+}
+
+async function updateTemplatePackageJson(
+ context: ProjectContext,
+): Promise {
+ const packageJsonPath = join(context.path, "package.json");
+
+ if (!existsSync(packageJsonPath)) {
+ return;
+ }
+
+ const packageJson = await fs.readJson(packageJsonPath);
+
+ packageJson.name = context.name;
+
+ const deployCommand =
+ context.packageManager === "bun"
+ ? "bun --env-file=./.env ./alchemy.run.ts"
+ : "tsx --env-file=./.env ./alchemy.run.ts";
+
+ if (packageJson.scripts) {
+ packageJson.scripts.deploy = deployCommand;
+ packageJson.scripts.destroy = `${deployCommand} --destroy`;
+ }
+
+ await fs.writeJson(packageJsonPath, packageJson, { spaces: 2 });
+}
+
+async function handleRwsdkPostInstall(context: ProjectContext): Promise {
+ try {
+ const migrationsDir = join(context.path, "migrations");
+ await fs.ensureDir(migrationsDir);
+
+ const commands = getPackageManagerCommands(context.packageManager);
+ const devInitCommand = `${commands.run} dev:init`;
+
+ if (context.options.install !== false) {
+ await execa(devInitCommand, {
+ cwd: context.path,
+ shell: true,
+ });
+ } else {
+ log.info(
+ `To complete rwsdk setup, run: cd ${context.name} && ${devInitCommand}`,
+ );
+ }
+ } catch (_error) {
+ log.warn(
+ "Failed to complete rwsdk setup. You may need to run 'dev:init' manually.",
+ );
+ }
+}
diff --git a/alchemy/bin/types.ts b/alchemy/bin/types.ts
new file mode 100644
index 000000000..200f8fc62
--- /dev/null
+++ b/alchemy/bin/types.ts
@@ -0,0 +1,83 @@
+import { z } from "zod";
+
+export const TEMPLATE_DEFINITIONS = [
+ { name: "typescript", description: "TypeScript Worker" },
+ { name: "vite", description: "React Vite" },
+ { name: "astro", description: "Astro SSR" },
+ { name: "react-router", description: "React Router" },
+ { name: "sveltekit", description: "SvelteKit" },
+ { name: "tanstack-start", description: "TanStack Start" },
+ { name: "rwsdk", description: "Redwood SDK" },
+ { name: "nuxt", description: "Nuxt.js" },
+] as const;
+
+const templateNames = TEMPLATE_DEFINITIONS.map((t) => t.name);
+
+export const TemplateSchema = z
+ .enum(templateNames as [string, ...string[]])
+ .describe("Project template type");
+export type TemplateType = z.infer;
+
+export const PackageManagerSchema = z
+ .enum(["bun", "npm", "pnpm", "yarn"])
+ .describe("Package manager");
+export type PackageManager = z.infer;
+
+export const ProjectNameSchema = z
+ .string()
+ .min(1, "Project name cannot be empty")
+ .max(255, "Project name must be less than 255 characters")
+ .refine(
+ (name) => name === "." || !name.startsWith("."),
+ "Project name cannot start with a dot (except for '.')",
+ )
+ .refine(
+ (name) => name === "." || !name.startsWith("-"),
+ "Project name cannot start with a dash",
+ )
+ .refine((name) => {
+ const invalidChars = ["<", ">", ":", '"', "|", "?", "*"];
+ return !invalidChars.some((char) => name.includes(char));
+ }, "Project name contains invalid characters")
+ .refine(
+ (name) => name.toLowerCase() !== "node_modules",
+ "Project name is reserved",
+ )
+ .describe("Project name or path");
+export type ProjectName = z.infer;
+
+export interface ProjectContext {
+ name: string;
+ path: string;
+ template: TemplateType;
+ packageManager: PackageManager;
+ isTest: boolean;
+ options: CreateInput;
+}
+
+export interface WebsiteOptions {
+ entrypoint?: string;
+ tsconfig?: string;
+ scripts?: Record;
+ include?: string[];
+ types?: string[];
+ devDependencies?: string[];
+ dependencies?: string[];
+}
+
+export type CreateInput = {
+ name?: string;
+ template?: TemplateType;
+ packageManager?: PackageManager;
+ bun?: boolean;
+ npm?: boolean;
+ pnpm?: boolean;
+ yarn?: boolean;
+ yes?: boolean;
+ overwrite?: boolean;
+ install?: boolean;
+};
+
+export type CLIInput = CreateInput & {
+ projectDirectory?: string;
+};
diff --git a/alchemy/package.json b/alchemy/package.json
index ef6ed553b..db76893f3 100644
--- a/alchemy/package.json
+++ b/alchemy/package.json
@@ -1,6 +1,6 @@
{
"name": "alchemy",
- "version": "0.34.3",
+ "version": "0.41.2",
"type": "module",
"module": "./lib/index.js",
"license": "Apache-2.0",
@@ -11,7 +11,9 @@
},
"scripts": {
"build:cli": "./scripts/bundle-cli.sh",
- "build": "rm -rf ./*.tsbuildinfo && rm -rf ./lib && tsc -b && bun run build:cli",
+ "build:workers": "bun ./scripts/build-workers.ts",
+ "build": "bun ./scripts/generate-build-date.ts && rm -rf ./*.tsbuildinfo && rm -rf ./lib && tsc -b && bun run build:cli && bun run build:workers",
+ "dev:cli": "./scripts/bundle-cli.sh --watch",
"docs:gen": "rm -rf ./docs && typedoc",
"postbuild": "cpx 'src/**/*/types.d.ts' lib",
"publish:npm": "cp ../README.md . && bun run build && bun run build:cli && npm publish && rm README.md"
@@ -24,7 +26,9 @@
"files": [
"bin",
"lib",
- "src"
+ "src",
+ "templates",
+ "workers"
],
"exports": {
".": {
@@ -55,6 +59,10 @@
"bun": "./src/dns/index.ts",
"import": "./lib/dns/index.js"
},
+ "./docker": {
+ "bun": "./src/docker/index.ts",
+ "import": "./lib/docker/index.js"
+ },
"./esbuild": {
"bun": "./src/esbuild/index.ts",
"import": "./lib/esbuild/index.js"
@@ -148,7 +156,6 @@
"ai": "^4.0.0",
"arktype": "^2.0.0",
"cloudflare": "^4.0.0",
- "dofs": "^0.0.1",
"hono": "^4.0.0",
"prettier": "^3.0.0",
"stripe": "^17.0.0",
@@ -161,30 +168,40 @@
"@aws-sdk/client-resource-groups-tagging-api": "^3.830.0",
"@aws-sdk/client-s3": "3.726.1",
"@biomejs/biome": "^1.9.4",
+ "@clack/prompts": "^0.11.0",
+ "@cloudflare/containers": "^0.0.13",
"@cloudflare/puppeteer": "^1.0.2",
- "@cloudflare/workers-types": "^4.20250303.0",
+ "@cloudflare/workers-types": "^4.20250620.0",
"@iarna/toml": "^2.2.5",
- "@inquirer/prompts": "^7.0.0",
"@octokit/rest": "^21.1.1",
"@types/bun": "latest",
"@types/diff": "^5.0.0",
+ "@types/fs-extra": "^11.0.4",
"@types/libsodium-wrappers": "^0.7.14",
"@types/node": "latest",
"@types/turndown": "^5.0.5",
"ai": "^4.1.16",
+ "alchemy": "^0.37.1",
"arktype": "^2.1.16",
"braintrust": "^0.0.201",
"change-case": "^5.4.4",
"cloudflare": "^4.2.0",
"cpx": "^1.5.0",
+ "dofs": "^0.1.0",
+ "execa": "^9.6.0",
+ "fs-extra": "^11.3.0",
+ "globby": "^14.1.0",
"libsodium-wrappers": "^0.7.15",
- "miniflare": "^4.0.0",
+ "miniflare": "^4.20250617.4",
+ "nitro-cloudflare-dev": "^0.2.2",
"openpgp": "^6.1.0",
+ "picocolors": "^1.1.1",
"prettier": "^3.5.3",
+ "trpc-cli": "^0.9.2",
"turndown": "^7.2.0",
"typedoc": "^0.28.1",
"typedoc-plugin-markdown": "^4.6.0",
- "typescript": "latest",
+ "typescript": "^5.8.3",
"vite": "^6.0.7",
"vitepress": "^1.6.3",
"wrangler": "^3.114.0",
diff --git a/alchemy/scripts/build-workers.ts b/alchemy/scripts/build-workers.ts
new file mode 100644
index 000000000..0a960b774
--- /dev/null
+++ b/alchemy/scripts/build-workers.ts
@@ -0,0 +1,21 @@
+import { readdir } from "node:fs/promises";
+import path from "node:path";
+import { bundle } from "../src/esbuild/bundle";
+
+const WORKERS_DIR = path.join(__dirname, "..", "workers");
+
+const workers = await readdir(WORKERS_DIR);
+
+for (const worker of workers) {
+ if (!worker.endsWith(".ts")) {
+ continue;
+ }
+ await bundle({
+ entryPoint: path.join(WORKERS_DIR, worker),
+ outdir: WORKERS_DIR,
+ bundle: true,
+ format: "esm",
+ target: "es2022",
+ external: ["cloudflare:*", "node:crypto"],
+ });
+}
\ No newline at end of file
diff --git a/alchemy/scripts/bundle-cli.sh b/alchemy/scripts/bundle-cli.sh
index d87340589..054851c67 100755
--- a/alchemy/scripts/bundle-cli.sh
+++ b/alchemy/scripts/bundle-cli.sh
@@ -1,5 +1,13 @@
#!/usr/bin/env sh
+WATCH_FLAG=""
+for arg in "$@"; do
+ if [ "$arg" = "--watch" ]; then
+ WATCH_FLAG="--watch"
+ break
+ fi
+done
+
esbuild bin/alchemy.ts \
--bundle \
--platform=node \
@@ -8,4 +16,5 @@ esbuild bin/alchemy.ts \
--format=esm \
--external:node:* \
--main-fields=module,main \
- --banner:js="import { createRequire as __createRequire } from 'node:module'; const require = __createRequire(import.meta.url);"
\ No newline at end of file
+ --banner:js="import { createRequire as __createRequire } from 'node:module'; const require = __createRequire(import.meta.url);" \
+ $WATCH_FLAG
diff --git a/alchemy/scripts/generate-build-date.ts b/alchemy/scripts/generate-build-date.ts
new file mode 100644
index 000000000..1f9e277e7
--- /dev/null
+++ b/alchemy/scripts/generate-build-date.ts
@@ -0,0 +1,31 @@
+#!/usr/bin/env bun
+
+/**
+ * This script generates a TypeScript file containing the build date
+ * for use as the default worker compatibility date.
+ */
+
+import { writeFileSync } from 'node:fs';
+import { resolve } from 'node:path';
+
+// Generate today's date in YYYY-MM-DD format
+const buildDate = new Date().toISOString().split('T')[0];
+
+// TypeScript content to generate
+const content = `// This file is auto-generated during build
+// Do not edit manually
+
+/**
+ * The build date used as the default worker compatibility date.
+ * This is set to the date when the package was built.
+ */
+export const BUILD_DATE = "${buildDate}";
+`;
+
+// Write to src directory so it gets included in the package
+const outputPath = resolve(import.meta.dirname, '..', 'src', 'build-date.ts');
+
+writeFileSync(outputPath, content, 'utf8');
+
+console.log(`Generated build date: ${buildDate}`);
+console.log(`Written to: ${outputPath}`);
\ No newline at end of file
diff --git a/alchemy/src/alchemy.ts b/alchemy/src/alchemy.ts
index 244f8b38a..4ad3bc10f 100644
--- a/alchemy/src/alchemy.ts
+++ b/alchemy/src/alchemy.ts
@@ -1,6 +1,7 @@
import fs from "node:fs/promises";
import path from "node:path";
+import { ReplacedSignal } from "./apply.ts";
import { DestroyedSignal, destroy } from "./destroy.ts";
import { env } from "./env.ts";
import {
@@ -15,9 +16,9 @@ import { isRuntime } from "./runtime/global.ts";
import { Scope } from "./scope.ts";
import { secret } from "./secret.ts";
import type { StateStoreType } from "./state.ts";
+import type { LoggerApi } from "./util/cli.ts";
import { logger } from "./util/logger.ts";
import { TelemetryClient } from "./util/telemetry/client.ts";
-import type { LoggerApi } from "./util/cli.ts";
/**
* Parses CLI arguments to extract alchemy options
@@ -33,6 +34,14 @@ function parseCliArgs(): Partial {
options.phase = "read";
}
+ if (
+ args.includes("--dev") ||
+ args.includes("--watch") ||
+ process.execArgv.includes("--watch")
+ ) {
+ options.dev = true;
+ }
+
// Parse quiet flag
if (args.includes("--quiet")) {
options.quiet = true;
@@ -181,21 +190,30 @@ async function _alchemy(
const root = new Scope({
...mergedOptions,
appName,
- stage:
- mergedOptions?.stage ?? process.env.ALCHEMY_STAGE ?? process.env.USER,
phase,
password: mergedOptions?.password ?? process.env.ALCHEMY_PASSWORD,
telemetryClient,
});
+ const stage = new Scope({
+ ...mergedOptions,
+ scopeName:
+ mergedOptions?.stage ?? process.env.ALCHEMY_STAGE ?? process.env.USER,
+ parent: root,
+ appName,
+ stage:
+ mergedOptions?.stage ?? process.env.ALCHEMY_STAGE ?? process.env.USER,
+ });
try {
Scope.storage.enterWith(root);
+ Scope.storage.enterWith(stage);
} catch {
// we are in Cloudflare Workers, we will emulate the enterWith behavior
// see Scope.finalize for where we pop the global scope
Scope.globals.push(root);
+ Scope.globals.push(stage);
}
if (mergedOptions?.phase === "destroy") {
- await destroy(root);
+ await destroy(stage);
return process.exit(0);
}
return root;
@@ -327,6 +345,12 @@ export interface AlchemyOptions {
* @default "up"
*/
phase?: Phase;
+ /**
+ * Determines whether Alchemy will run in dev mode.
+ *
+ * @default - `true` if `--dev` or `--watch` is passed as a CLI argument, `false` otherwise
+ */
+ dev?: boolean;
/**
* Name to scope the resource state under (e.g. `.alchemy/{stage}/..`).
*
@@ -464,7 +488,9 @@ async function run(
}
return await _scope.run(async () => fn.bind(_scope)(_scope));
} catch (error) {
- if (!(error instanceof DestroyedSignal)) {
+ if (
+ !(error instanceof DestroyedSignal || error instanceof ReplacedSignal)
+ ) {
_scope.fail();
}
throw error;
diff --git a/alchemy/src/apply.ts b/alchemy/src/apply.ts
index 0625b9938..869627942 100644
--- a/alchemy/src/apply.ts
+++ b/alchemy/src/apply.ts
@@ -12,7 +12,7 @@ import {
type Resource,
type ResourceProps,
} from "./resource.ts";
-import { Scope } from "./scope.ts";
+import { Scope, type PendingDeletions } from "./scope.ts";
import { serialize } from "./serde.ts";
import type { State } from "./state.ts";
import { formatFQN } from "./util/cli.ts";
@@ -33,6 +33,8 @@ export function apply(
return _apply(resource, props, options);
}
+export class ReplacedSignal extends Error {}
+
async function _apply(
resource: PendingResource,
props: ResourceProps | undefined,
@@ -88,6 +90,7 @@ async function _apply(
};
await scope.state.set(resource[ResourceID], state);
}
+ const oldOutput = state.output;
const alwaysUpdate =
options?.alwaysUpdate ?? provider.options?.alwaysUpdate ?? false;
@@ -162,33 +165,84 @@ async function _apply(
props: state.oldProps,
state,
replace: () => {
+ if (phase === "create") {
+ throw new Error(
+ `Resource ${resource[ResourceKind]} ${resource[ResourceFQN]} cannot be replaced in create phase.`,
+ );
+ }
if (isReplaced) {
logger.warn(
`Resource ${resource[ResourceKind]} ${resource[ResourceFQN]} is already marked as REPLACE`,
);
- return;
}
+
isReplaced = true;
+ throw new ReplacedSignal();
},
});
- const output = await alchemy.run(
- resource[ResourceID],
- {
- isResource: true,
- parent: scope,
- },
- async (scope) => {
- options?.resolveInnerScope?.(scope);
- return provider.handler.bind(ctx)(resource[ResourceID], props);
- },
- );
+ let output: Resource;
+ try {
+ output = await alchemy.run(
+ resource[ResourceID],
+ {
+ isResource: true,
+ parent: scope,
+ },
+ async (scope) => {
+ options?.resolveInnerScope?.(scope);
+ return provider.handler.bind(ctx)(resource[ResourceID], props);
+ },
+ );
+ } catch (error) {
+ if (error instanceof ReplacedSignal) {
+ if (scope.children.get(resource[ResourceID])?.children.size! > 0) {
+ throw new Error(
+ `Resource ${resource[ResourceFQN]} has children and cannot be replaced.`,
+ );
+ }
+
+ output = await alchemy.run(
+ resource[ResourceID],
+ { isResource: true, parent: scope },
+ async () =>
+ provider.handler.bind(
+ context({
+ scope,
+ phase: "create",
+ kind: resource[ResourceKind],
+ id: resource[ResourceID],
+ fqn: resource[ResourceFQN],
+ seq: resource[ResourceSeq],
+ props: state.props,
+ state,
+ replace: () => {
+ throw new Error(
+ `Resource ${resource[ResourceKind]} ${resource[ResourceFQN]} cannot be replaced in create phase.`,
+ );
+ },
+ }),
+ )(resource[ResourceID], props),
+ );
+
+ const pendingDeletions =
+ (await scope.get("pendingDeletions")) ?? [];
+ pendingDeletions.push({
+ resource: oldOutput,
+ oldProps: state.oldProps,
+ });
+ await scope.set("pendingDeletions", pendingDeletions);
+ } else {
+ throw error;
+ }
+ }
if (!quiet) {
logger.task(resource[ResourceFQN], {
- prefix: phase === "create" ? "created" : "updated",
+ prefix:
+ phase === "create" ? "created" : isReplaced ? "replaced" : "updated",
prefixColor: "greenBright",
resource: formatFQN(resource[ResourceFQN]),
- message: `${phase === "create" ? "Created" : "Updated"} Resource`,
+ message: `${phase === "create" ? "Created" : isReplaced ? "Replaced" : "Updated"} Resource`,
status: "success",
});
}
@@ -213,9 +267,6 @@ async function _apply(
props,
// deps: [...deps],
});
- // if (output !== undefined) {
- // resource[Provide](output as Out);
- // }
return output as any;
} catch (error) {
scope.telemetryClient.record({
diff --git a/alchemy/src/build-date.ts b/alchemy/src/build-date.ts
new file mode 100644
index 000000000..cb59e7bdb
--- /dev/null
+++ b/alchemy/src/build-date.ts
@@ -0,0 +1,8 @@
+// This file is auto-generated during build
+// Do not edit manually
+
+/**
+ * The build date used as the default worker compatibility date.
+ * This is set to the date when the package was built.
+ */
+export const BUILD_DATE = "2025-06-26";
diff --git a/alchemy/src/cloudflare/api.ts b/alchemy/src/cloudflare/api.ts
index cfddb1207..810cf7e69 100644
--- a/alchemy/src/cloudflare/api.ts
+++ b/alchemy/src/cloudflare/api.ts
@@ -196,7 +196,8 @@ export class CloudflareApi {
(error) =>
error instanceof InternalError ||
error instanceof TooManyRequestsError ||
- error instanceof ForbiddenError,
+ error instanceof ForbiddenError ||
+ error.code === "ECONNRESET",
10, // Maximum 10 attempts (1 initial + 9 retries)
1000, // Start with 1s delay, will exponentially increase
);
diff --git a/alchemy/src/cloudflare/bindings.ts b/alchemy/src/cloudflare/bindings.ts
index f1b7bb43e..3cf6d6cdd 100644
--- a/alchemy/src/cloudflare/bindings.ts
+++ b/alchemy/src/cloudflare/bindings.ts
@@ -11,6 +11,7 @@ import type { Assets } from "./assets.ts";
import type { Bound } from "./bound.ts";
import type { BrowserRendering } from "./browser-rendering.ts";
import type { R2BucketResource } from "./bucket.ts";
+import type { Container } from "./container.ts";
import type { D1DatabaseResource } from "./d1-database.ts";
import type { DispatchNamespaceResource } from "./dispatch-namespace.ts";
import type { DurableObjectNamespace } from "./durable-object-namespace.ts";
@@ -44,11 +45,12 @@ export type Binding =
| Ai
| AiGatewayResource
| Assets
+ | Container
| CloudflareSecret
| D1DatabaseResource
| DispatchNamespaceResource
| AnalyticsEngineDataset
- | DurableObjectNamespace
+ | DurableObjectNamespace
| HyperdriveResource
| Images
| KVNamespaceResource
diff --git a/alchemy/src/cloudflare/bound.ts b/alchemy/src/cloudflare/bound.ts
index 1bf5a3cf1..5c5b86e76 100644
--- a/alchemy/src/cloudflare/bound.ts
+++ b/alchemy/src/cloudflare/bound.ts
@@ -7,6 +7,7 @@ import type { Assets } from "./assets.ts";
import type { Binding, Json, Self } from "./bindings.ts";
import type { BrowserRendering } from "./browser-rendering.ts";
import type { R2BucketResource as _R2Bucket } from "./bucket.ts";
+import type { Container as _Container } from "./container.ts";
import type { D1DatabaseResource } from "./d1-database.ts";
import type { DispatchNamespaceResource } from "./dispatch-namespace.ts";
import type { DurableObjectNamespace as _DurableObjectNamespace } from "./durable-object-namespace.ts";
@@ -36,7 +37,7 @@ type BoundWorker<
export type Bound = T extends _DurableObjectNamespace<
infer O
>
- ? DurableObjectNamespace
+ ? DurableObjectNamespace
: T extends { type: "kv_namespace" }
? KVNamespace
: T extends WorkerStub
@@ -87,4 +88,11 @@ export type Bound = T extends _DurableObjectNamespace<
? Service
: T extends Json
? T
- : Service;
+ : T extends _Container<
+ infer Obj
+ >
+ ? DurableObjectNamespace<
+ Obj &
+ Rpc.DurableObjectBranded
+ >
+ : Service;
diff --git a/alchemy/src/cloudflare/bucket.ts b/alchemy/src/cloudflare/bucket.ts
index 98b8347b5..95749f375 100644
--- a/alchemy/src/cloudflare/bucket.ts
+++ b/alchemy/src/cloudflare/bucket.ts
@@ -87,6 +87,17 @@ export interface BucketProps {
* Whether to adopt an existing bucket
*/
adopt?: boolean;
+
+ /**
+ * Whether to emulate the bucket locally when Alchemy is running in watch mode.
+ */
+ dev?: {
+ /**
+ * Whether to run the bucket remotely instead of locally
+ * @default false
+ */
+ remote?: boolean;
+ };
}
/**
@@ -248,6 +259,7 @@ const R2BucketResource = Resource(
jurisdiction: props.jurisdiction || "default",
type: "r2_bucket",
accountId: api.accountId,
+ dev: props.dev,
});
},
);
diff --git a/alchemy/src/cloudflare/bundle/bundle-worker-dev.ts b/alchemy/src/cloudflare/bundle/bundle-worker-dev.ts
new file mode 100644
index 000000000..4c36d38bc
--- /dev/null
+++ b/alchemy/src/cloudflare/bundle/bundle-worker-dev.ts
@@ -0,0 +1,127 @@
+import type esbuild from "esbuild";
+import type { Bindings } from "../bindings.ts";
+import type { WorkerProps } from "../worker.ts";
+import { createAliasPlugin } from "./alias-plugin.ts";
+import { external, external_als } from "./external.ts";
+import { getNodeJSCompatMode } from "./nodejs-compat-mode.ts";
+import { nodeJsCompatPlugin } from "./nodejs-compat.ts";
+import { wasmPlugin } from "./wasm-plugin.ts";
+
+interface DevWorkerContext {
+ context: esbuild.BuildContext;
+ dispose: () => Promise;
+}
+
+declare global {
+ var _ALCHEMY_DEV_WORKER_CONTEXTS: Map | undefined;
+}
+
+const activeContexts = () =>
+ (globalThis._ALCHEMY_DEV_WORKER_CONTEXTS ??= new Map());
+
+/**
+ * Creates an esbuild context for watching and hot-reloading a worker
+ */
+export async function createWorkerDevContext(
+ workerName: string,
+ props: WorkerProps & {
+ entrypoint: string;
+ compatibilityDate: string;
+ compatibilityFlags: string[];
+ },
+ hooks: HotReloadHooks,
+) {
+ // Clean up any existing context for this worker
+ const existing = activeContexts().get(workerName);
+ if (existing) {
+ await existing.dispose();
+ }
+
+ if (!props.entrypoint) {
+ throw new Error(
+ "A worker dev context was created, but no entry point was provided.",
+ );
+ }
+
+ const esbuild = await import("esbuild");
+ const nodeJsCompatMode = await getNodeJSCompatMode(
+ props.compatibilityDate,
+ props.compatibilityFlags,
+ );
+
+ const projectRoot = props.projectRoot ?? process.cwd();
+
+ const context = await esbuild.context({
+ entryPoints: [props.entrypoint],
+ format: props.format === "cjs" ? "cjs" : "esm",
+ target: "esnext",
+ platform: "node",
+ minify: false,
+ bundle: true,
+ ...props.bundle,
+ write: false, // We want the result in memory for hot reloading
+ conditions: ["workerd", "worker", "import", "module", "browser"],
+ mainFields: ["module", "main"],
+ absWorkingDir: projectRoot,
+ keepNames: true,
+ loader: {
+ ".sql": "text",
+ ".json": "json",
+ ...props.bundle?.loader,
+ },
+ plugins: [
+ wasmPlugin,
+ ...(props.bundle?.plugins ?? []),
+ ...(nodeJsCompatMode === "v2" ? [await nodeJsCompatPlugin()] : []),
+ ...(props.bundle?.alias
+ ? [
+ createAliasPlugin({
+ alias: props.bundle?.alias,
+ projectRoot,
+ }),
+ ]
+ : []),
+ hotReloadPlugin(hooks),
+ ],
+ external: [
+ ...(nodeJsCompatMode === "als" ? external_als : external),
+ ...(props.bundle?.external ?? []),
+ ],
+ });
+
+ await context.watch();
+
+ activeContexts().set(workerName, {
+ context,
+ dispose: async () => {
+ await context.dispose();
+ activeContexts().delete(workerName);
+ },
+ });
+}
+
+interface HotReloadHooks {
+ onBuildStart: () => void | Promise;
+ onBuildEnd: (script: string) => void | Promise;
+ onBuildError: (errors: esbuild.Message[]) => void | Promise;
+}
+
+function hotReloadPlugin(hooks: HotReloadHooks): esbuild.Plugin {
+ return {
+ name: "alchemy-hot-reload",
+ setup(build) {
+ build.onStart(hooks.onBuildStart);
+ build.onEnd(async (result) => {
+ if (result.errors.length > 0) {
+ await hooks.onBuildError(result.errors);
+ return;
+ }
+
+ if (result.outputFiles && result.outputFiles.length > 0) {
+ const newScript = result.outputFiles[0].text;
+ await hooks.onBuildEnd(newScript);
+ }
+ });
+ },
+ };
+}
diff --git a/alchemy/src/cloudflare/bundle/bundle-worker.ts b/alchemy/src/cloudflare/bundle/bundle-worker.ts
index 5ced5053c..20a9e41db 100644
--- a/alchemy/src/cloudflare/bundle/bundle-worker.ts
+++ b/alchemy/src/cloudflare/bundle/bundle-worker.ts
@@ -34,11 +34,6 @@ export async function bundleWorkerScript(
props.compatibilityFlags,
);
- if (nodeJsCompatMode === "v1") {
- throw new Error(
- "You must set your compatibilty date >= 2024-09-23 when using 'nodejs_compat' compatibility flag",
- );
- }
const main = props.entrypoint;
if (props.noBundle) {
@@ -71,7 +66,7 @@ export async function bundleWorkerScript(
).flat(),
),
);
- const useColor = !(process.env.CI || process.env.NO_COLOR);
+ const useColor = !process.env.NO_COLOR;
logger.log(
`${useColor ? kleur.gray("worker:") : "worker:"} ${useColor ? kleur.blue(props.name) : props.name}`,
);
@@ -102,7 +97,8 @@ export async function bundleWorkerScript(
platform: "node",
minify: false,
...(props.bundle || {}),
- conditions: ["workerd", "worker", "browser"],
+ conditions: ["workerd", "worker", "import", "module", "browser"],
+ mainFields: ["module", "main"],
absWorkingDir: projectRoot,
keepNames: true, // Important for Durable Object classes
loader: {
diff --git a/alchemy/src/cloudflare/bundle/nodejs-compat-mode.ts b/alchemy/src/cloudflare/bundle/nodejs-compat-mode.ts
index f120c26bc..c14b5fb54 100644
--- a/alchemy/src/cloudflare/bundle/nodejs-compat-mode.ts
+++ b/alchemy/src/cloudflare/bundle/nodejs-compat-mode.ts
@@ -1,4 +1,3 @@
-import type { NodeJSCompatMode } from "miniflare";
import { logger } from "../../util/logger.ts";
/**
@@ -19,8 +18,12 @@ export async function getNodeJSCompatMode(
props?: {
noBundle?: boolean;
},
-): Promise {
- const { getNodeCompat } = await import("miniflare");
+) {
+ const { getNodeCompat } = await import("miniflare").catch(() => {
+ throw new Error(
+ "Miniflare is not installed, but is required to determine the Node.js compatibility mode for Workers. Please run `npm install miniflare`.",
+ );
+ });
const {
mode,
hasNodejsCompatFlag,
@@ -46,5 +49,11 @@ export async function getNodeJSCompatMode(
);
}
+ if (mode === "v1") {
+ throw new Error(
+ "You must set your compatibilty date >= 2024-09-23 when using 'nodejs_compat' compatibility flag",
+ );
+ }
+
return mode;
}
diff --git a/alchemy/src/cloudflare/certificate-pack.ts b/alchemy/src/cloudflare/certificate-pack.ts
new file mode 100644
index 000000000..84a55a4f1
--- /dev/null
+++ b/alchemy/src/cloudflare/certificate-pack.ts
@@ -0,0 +1,698 @@
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import { logger } from "../util/logger.ts";
+import { handleApiError } from "./api-error.ts";
+import {
+ createCloudflareApi,
+ type CloudflareApi,
+ type CloudflareApiOptions,
+} from "./api.ts";
+import type { Zone } from "./zone.ts";
+
+/**
+ * Certificate Authority options for Advanced Certificate Packs
+ */
+export type CertificateAuthority = "google" | "lets_encrypt" | "ssl_com";
+
+/**
+ * Validation method for certificate verification
+ */
+export type ValidationMethod = "txt" | "http" | "email";
+
+/**
+ * Validity period options for certificates
+ */
+export type ValidityDays = 14 | 30 | 90 | 365;
+
+/**
+ * Certificate pack status values during lifecycle
+ */
+export type CertificatePackStatus =
+ | "initializing"
+ | "pending_validation"
+ | "deleted"
+ | "pending_issuance"
+ | "pending_deployment"
+ | "pending_deletion"
+ | "pending_expiration"
+ | "expired"
+ | "active"
+ | "initializing_timed_out"
+ | "validation_timed_out"
+ | "issuance_timed_out"
+ | "deployment_timed_out"
+ | "deletion_timed_out"
+ | "pending_cleanup"
+ | "staging_deployment"
+ | "staging_active"
+ | "deactivating"
+ | "inactive"
+ | "backup_issued"
+ | "holding_deployment";
+
+/**
+ * Properties for creating a Certificate Pack
+ */
+export interface CertificatePackProps extends CloudflareApiOptions {
+ /**
+ * The zone to create the certificate pack for
+ * Can be a Zone resource, zone ID string, or omitted to auto-infer from hosts
+ */
+ zone?: string | Zone;
+
+ /**
+ * Certificate Authority to use for issuing the certificate
+ * - google: Google Trust Services (Enterprise features)
+ * - lets_encrypt: Let's Encrypt (Free, shorter validity periods)
+ * - ssl_com: SSL.com (Commercial certificates with extended validation)
+ *
+ * **Note:** This property is immutable after creation. To change the CA,
+ * you must delete and recreate the certificate pack.
+ */
+ certificateAuthority: CertificateAuthority;
+
+ /**
+ * List of hostnames to include in the certificate
+ * Maximum 50 hosts, must include the zone apex (root domain)
+ * Supports wildcards (e.g., "*.example.com")
+ *
+ * **Note:** This property is immutable after creation.
+ */
+ hosts: string[];
+
+ /**
+ * Certificate type - only "advanced" is supported
+ *
+ * **Note:** This property is immutable after creation.
+ * @default "advanced"
+ */
+ type?: "advanced";
+
+ /**
+ * Method used to validate domain ownership
+ * - txt: DNS TXT record validation
+ * - http: HTTP file validation
+ * - email: Email validation
+ *
+ * **Note:** This property is immutable after creation.
+ */
+ validationMethod: ValidationMethod;
+
+ /**
+ * Certificate validity period in days
+ * Available options: 14, 30, 90, or 365 days
+ *
+ * **Note:** This property is immutable after creation.
+ */
+ validityDays: ValidityDays;
+
+ /**
+ * Whether to add Cloudflare branding subdomain as Common Name
+ * Adds sni.cloudflaressl.com subdomain when enabled
+ *
+ * **Note:** This is the only property that can be updated after creation.
+ * @default false
+ */
+ cloudflareBranding?: boolean;
+
+ /**
+ * Whether to delete the certificate pack
+ * If set to false, the pack will remain but the resource will be removed from state
+ *
+ * @default true
+ */
+ delete?: boolean;
+}
+
+/**
+ * Output returned after Certificate Pack creation/update
+ */
+export interface CertificatePack
+ extends Resource<"cloudflare::CertificatePack"> {
+ /**
+ * The unique ID of the certificate pack
+ */
+ id: string;
+
+ /**
+ * Certificate Authority used for the certificate
+ */
+ certificateAuthority: CertificateAuthority;
+
+ /**
+ * Whether Cloudflare branding is enabled
+ */
+ cloudflareBranding: boolean;
+
+ /**
+ * List of hostnames included in the certificate
+ */
+ hosts: string[];
+
+ /**
+ * Current status of the certificate pack
+ */
+ status: CertificatePackStatus;
+
+ /**
+ * Certificate type
+ */
+ type: "advanced";
+
+ /**
+ * Validation method used for domain verification
+ */
+ validationMethod: ValidationMethod;
+
+ /**
+ * Certificate validity period in days
+ */
+ validityDays: ValidityDays;
+
+ /**
+ * Zone ID the certificate pack belongs to
+ */
+ zoneId: string;
+
+ /**
+ * Zone name (domain)
+ */
+ zoneName: string;
+}
+
+/**
+ * Creates and manages Cloudflare Advanced Certificate Packs.
+ *
+ * Advanced Certificate Packs provide flexible SSL/TLS certificates with
+ * multiple Certificate Authority options, custom validity periods, and
+ * support for up to 50 hostnames per certificate.
+ *
+ * **Important Notes:**
+ * - Requires a paid Cloudflare plan (not available on Free plans)
+ * - Certificate provisioning can take up to 10 minutes
+ * - Most properties are immutable after creation (only cloudflareBranding can be updated)
+ * - To change immutable properties, you must delete and recreate the certificate pack
+ *
+ * @example
+ * // Create a basic certificate pack with Let's Encrypt
+ * const basicCert = await CertificatePack("my-cert", {
+ * zone: myZone,
+ * certificateAuthority: "lets_encrypt",
+ * hosts: ["example.com", "www.example.com"],
+ * validationMethod: "txt",
+ * validityDays: 90
+ * });
+ *
+ * @example
+ * // Create an enterprise certificate with Google Trust Services
+ * const enterpriseCert = await CertificatePack("enterprise-cert", {
+ * zone: "example.com",
+ * certificateAuthority: "google",
+ * hosts: ["example.com", "*.example.com", "api.example.com"],
+ * validationMethod: "txt",
+ * validityDays: 365,
+ * cloudflareBranding: true
+ * });
+ *
+ * @example
+ * // Create a wildcard certificate with SSL.com
+ * const wildcardCert = await CertificatePack("wildcard-cert", {
+ * zone: myZone,
+ * certificateAuthority: "ssl_com",
+ * hosts: ["example.com", "*.example.com"],
+ * validationMethod: "email",
+ * validityDays: 365
+ * });
+ *
+ * @example
+ * // Create a certificate for multiple subdomains
+ * const multiDomainCert = await CertificatePack("multi-cert", {
+ * zone: "example.com",
+ * certificateAuthority: "lets_encrypt",
+ * hosts: [
+ * "example.com",
+ * "www.example.com",
+ * "api.example.com",
+ * "admin.example.com",
+ * "blog.example.com"
+ * ],
+ * validationMethod: "http",
+ * validityDays: 90
+ * });
+ *
+ * @see https://developers.cloudflare.com/api/resources/ssl/subresources/certificate_packs/
+ */
+export const CertificatePack = Resource(
+ "cloudflare::CertificatePack",
+ async function (
+ this: Context,
+ _id: string,
+ props: CertificatePackProps,
+ ): Promise {
+ // Create Cloudflare API client with automatic account discovery
+ const api = await createCloudflareApi(props);
+
+ // Resolve zone ID and zone name
+ let zoneId: string;
+ let zoneName: string;
+
+ if (props.zone) {
+ // Zone provided - use it
+ if (typeof props.zone === "string") {
+ zoneId = props.zone;
+ // Try to get zone name from API for better error messages
+ try {
+ const zoneResponse = await api.get(`/zones/${zoneId}`);
+ if (zoneResponse.ok) {
+ const zoneData = (await zoneResponse.json()) as {
+ result: { name: string };
+ };
+ zoneName = zoneData.result.name;
+ } else {
+ zoneName = zoneId; // Fallback to ID
+ }
+ } catch {
+ zoneName = zoneId; // Fallback to ID
+ }
+ } else {
+ zoneId = props.zone.id;
+ zoneName = props.zone.name || props.zone.id;
+ }
+ } else {
+ // Auto-infer zone from the first host
+ if (props.hosts.length === 0) {
+ throw new Error(
+ "At least one host must be specified when zone is not provided",
+ );
+ }
+
+ logger.log(`Auto-inferring zone from hostname: ${props.hosts[0]}`);
+ const zoneInfo = await findZoneForHostname(api, props.hosts[0]);
+ zoneId = zoneInfo.zoneId;
+ zoneName = zoneInfo.zoneName;
+ logger.log(`Auto-inferred zone: ${zoneName} (${zoneId})`);
+ }
+
+ if (this.phase === "delete") {
+ if (this.output?.id && props.delete !== false) {
+ const deleteResponse = await api.delete(
+ `/zones/${zoneId}/ssl/certificate_packs/${this.output.id}`,
+ );
+
+ if (!deleteResponse.ok && deleteResponse.status !== 404) {
+ await handleApiError(
+ deleteResponse,
+ "delete",
+ "certificate pack",
+ this.output.id,
+ );
+ }
+ } else {
+ logger.warn("Certificate pack not found, skipping delete");
+ }
+ return this.destroy();
+ }
+
+ if (this.phase === "update" && this.output?.id) {
+ // Validate immutable properties
+ const currentPack = this.output;
+
+ if (props.certificateAuthority !== currentPack.certificateAuthority) {
+ throw new Error(
+ `Cannot change certificateAuthority from '${currentPack.certificateAuthority}' to '${props.certificateAuthority}'. Certificate Authority is immutable after creation. You must delete and recreate the certificate pack.`,
+ );
+ }
+
+ if (
+ JSON.stringify(props.hosts.sort()) !==
+ JSON.stringify(currentPack.hosts.sort())
+ ) {
+ throw new Error(
+ `Cannot change hosts from [${currentPack.hosts.join(", ")}] to [${props.hosts.join(", ")}]. Hosts are immutable after creation. You must delete and recreate the certificate pack.`,
+ );
+ }
+
+ if (props.validationMethod !== currentPack.validationMethod) {
+ throw new Error(
+ `Cannot change validationMethod from '${currentPack.validationMethod}' to '${props.validationMethod}'. Validation method is immutable after creation. You must delete and recreate the certificate pack.`,
+ );
+ }
+
+ if (props.validityDays !== currentPack.validityDays) {
+ throw new Error(
+ `Cannot change validityDays from ${currentPack.validityDays} to ${props.validityDays}. Validity period is immutable after creation. You must delete and recreate the certificate pack.`,
+ );
+ }
+
+ const type = props.type || "advanced";
+ if (type !== currentPack.type) {
+ throw new Error(
+ `Cannot change type from '${currentPack.type}' to '${type}'. Type is immutable after creation. You must delete and recreate the certificate pack.`,
+ );
+ }
+
+ // Only cloudflareBranding can be updated
+ if (props.cloudflareBranding !== currentPack.cloudflareBranding) {
+ logger.log(
+ `Updating certificate pack cloudflare branding from ${currentPack.cloudflareBranding} to ${props.cloudflareBranding}`,
+ );
+
+ const updateResponse = await api.patch(
+ `/zones/${zoneId}/ssl/certificate_packs/${this.output.id}`,
+ {
+ cloudflare_branding: props.cloudflareBranding || false,
+ },
+ );
+
+ if (!updateResponse.ok) {
+ await handleApiError(
+ updateResponse,
+ "update",
+ "certificate pack",
+ this.output.id,
+ );
+ }
+ }
+
+ // Get updated certificate pack details
+ const response = await api.get(
+ `/zones/${zoneId}/ssl/certificate_packs/${this.output.id}`,
+ );
+
+ if (!response.ok) {
+ await handleApiError(
+ response,
+ "get",
+ "certificate pack",
+ this.output.id,
+ );
+ }
+
+ const updatedPack = (
+ (await response.json()) as { result: CloudflareCertificatePack }
+ ).result;
+
+ return this({
+ id: updatedPack.id,
+ certificateAuthority: updatedPack.certificate_authority,
+ cloudflareBranding: updatedPack.cloudflare_branding,
+ hosts: updatedPack.hosts,
+ status: updatedPack.status,
+ type: updatedPack.type,
+ validationMethod: updatedPack.validation_method,
+ validityDays: updatedPack.validity_days,
+ zoneId: updatedPack.zone_id,
+ zoneName: zoneName,
+ });
+ }
+
+ // Create new certificate pack
+ if (props.hosts.length === 0) {
+ throw new Error("At least one host must be specified");
+ }
+
+ if (props.hosts.length > 50) {
+ throw new Error("Maximum 50 hosts are allowed per certificate pack");
+ }
+
+ // Validate that zone apex is included
+ const hasZoneApex = props.hosts.some(
+ (host) => host === zoneName || (zoneName && host === zoneName),
+ );
+
+ if (!hasZoneApex && zoneName) {
+ logger.warn(
+ `Zone apex '${zoneName}' is not included in hosts. This may cause certificate validation issues.`,
+ );
+ }
+
+ // Check for existing certificate pack that matches our configuration
+ const existingPack = await findMatchingCertificatePack(api, zoneId, props);
+
+ if (existingPack) {
+ // Adopt the existing certificate pack
+ logger.log(
+ `Adopting existing certificate pack ${existingPack.id} instead of creating a new one`,
+ );
+
+ return this({
+ id: existingPack.id,
+ certificateAuthority: existingPack.certificate_authority,
+ cloudflareBranding: existingPack.cloudflare_branding,
+ hosts: existingPack.hosts,
+ status: existingPack.status,
+ type: existingPack.type,
+ validationMethod: existingPack.validation_method,
+ validityDays: existingPack.validity_days,
+ zoneId: existingPack.zone_id,
+ zoneName: zoneName,
+ });
+ }
+
+ logger.log(
+ `Creating certificate pack with ${props.hosts.length} hosts using ${props.certificateAuthority}`,
+ );
+
+ const createResponse = await api.post(
+ `/zones/${zoneId}/ssl/certificate_packs/order`,
+ {
+ certificate_authority: props.certificateAuthority,
+ cloudflare_branding: props.cloudflareBranding || false,
+ hosts: props.hosts,
+ type: props.type || "advanced",
+ validation_method: props.validationMethod,
+ validity_days: props.validityDays,
+ },
+ );
+
+ if (!createResponse.ok) {
+ const errorText = await createResponse.text();
+
+ // Provide helpful error messages for common issues
+ if (errorText.includes("subscription")) {
+ throw new Error(
+ `Failed to create certificate pack: Advanced Certificate Packs require a paid Cloudflare plan. Please upgrade your subscription to use this feature.\n\nOriginal error: ${errorText}`,
+ );
+ }
+
+ if (errorText.includes("quota")) {
+ throw new Error(
+ `Failed to create certificate pack: Certificate pack quota exceeded. Please check your account limits.\n\nOriginal error: ${errorText}`,
+ );
+ }
+
+ // Throw generic error for other cases
+ throw new Error(
+ `Failed to create certificate pack: ${createResponse.statusText}\n\n${errorText}`,
+ );
+ }
+
+ const createdPack = (
+ (await createResponse.json()) as { result: CloudflareCertificatePack }
+ ).result;
+
+ logger.log(
+ `Certificate pack created with ID ${createdPack.id}. Status: ${createdPack.status}. Note: Certificate provisioning can take up to 10 minutes.`,
+ );
+
+ return this({
+ id: createdPack.id,
+ certificateAuthority: createdPack.certificate_authority,
+ cloudflareBranding: createdPack.cloudflare_branding,
+ hosts: createdPack.hosts,
+ status: createdPack.status,
+ type: createdPack.type,
+ validationMethod: createdPack.validation_method,
+ validityDays: createdPack.validity_days,
+ zoneId: createdPack.zone_id,
+ zoneName: zoneName,
+ });
+ },
+);
+
+/**
+ * Cloudflare Certificate Pack API response format
+ */
+interface CloudflareCertificatePack {
+ id: string;
+ certificate_authority: CertificateAuthority;
+ cloudflare_branding: boolean;
+ hosts: string[];
+ status: CertificatePackStatus;
+ type: "advanced";
+ validation_method: ValidationMethod;
+ validity_days: ValidityDays;
+ zone_id: string;
+}
+
+/**
+ * Helper function to wait for certificate pack to reach active status
+ * Useful for testing or when you need to ensure the certificate is ready
+ *
+ * @param api CloudflareApi instance
+ * @param zoneId Zone ID
+ * @param certificatePackId Certificate pack ID
+ * @param timeoutMs Maximum time to wait in milliseconds (default: 15 minutes)
+ * @returns Promise resolving to the final certificate pack status
+ *
+ * @example
+ * // Wait for certificate to become active
+ * const finalStatus = await waitForCertificatePackActive(
+ * api,
+ * zoneId,
+ * certificatePack.id,
+ * 10 * 60 * 1000 // 10 minutes
+ * );
+ * console.log(`Certificate pack is now: ${finalStatus}`);
+ */
+export async function waitForCertificatePackActive(
+ api: CloudflareApi,
+ zoneId: string,
+ certificatePackId: string,
+ timeoutMs: number = 15 * 60 * 1000, // 15 minutes default
+): Promise {
+ const startTime = Date.now();
+ const pollInterval = 30 * 1000; // Poll every 30 seconds
+
+ while (Date.now() - startTime < timeoutMs) {
+ const response = await api.get(
+ `/zones/${zoneId}/ssl/certificate_packs/${certificatePackId}`,
+ );
+
+ if (!response.ok) {
+ throw new Error(
+ `Failed to check certificate pack status: ${response.statusText}`,
+ );
+ }
+
+ const pack = (
+ (await response.json()) as { result: CloudflareCertificatePack }
+ ).result;
+
+ // Return immediately if active or in a final error state
+ if (pack.status === "active") {
+ return pack.status;
+ }
+
+ if (
+ pack.status.includes("timed_out") ||
+ pack.status === "expired" ||
+ pack.status === "deleted"
+ ) {
+ return pack.status;
+ }
+
+ // Wait before next poll
+ await new Promise((resolve) => setTimeout(resolve, pollInterval));
+ }
+
+ throw new Error(
+ `Certificate pack did not become active within ${timeoutMs / 1000 / 60} minutes`,
+ );
+}
+
+/**
+ * Helper function to find zone ID from a hostname
+ * Searches for the zone that matches the hostname or its parent domains
+ *
+ * @param api CloudflareApi instance
+ * @param hostname The hostname to find the zone for
+ * @returns Promise resolving to the zone ID and zone name
+ */
+async function findZoneForHostname(
+ api: CloudflareApi,
+ hostname: string,
+): Promise<{ zoneId: string; zoneName: string }> {
+ // Remove wildcard prefix if present
+ const cleanHostname = hostname.replace(/^\*\./, "");
+
+ // Get all zones and find the best match
+ const response = await api.get("/zones");
+
+ if (!response.ok) {
+ throw new Error(`Failed to list zones: ${response.statusText}`);
+ }
+
+ const zonesData = (await response.json()) as {
+ result: Array<{ id: string; name: string }>;
+ };
+
+ // Find the zone that best matches the hostname
+ // We look for the longest matching zone name (most specific)
+ let bestMatch: { zoneId: string; zoneName: string } | null = null;
+ let longestMatch = 0;
+
+ for (const zone of zonesData.result) {
+ if (
+ cleanHostname === zone.name ||
+ cleanHostname.endsWith(`.${zone.name}`)
+ ) {
+ if (zone.name.length > longestMatch) {
+ longestMatch = zone.name.length;
+ bestMatch = { zoneId: zone.id, zoneName: zone.name };
+ }
+ }
+ }
+
+ if (!bestMatch) {
+ throw new Error(
+ `Could not find zone for hostname '${hostname}'. Available zones: ${zonesData.result.map((z) => z.name).join(", ")}`,
+ );
+ }
+
+ return bestMatch;
+}
+
+/**
+ * Helper function to find existing certificate packs that match the given configuration
+ * Used to adopt existing certificates instead of creating duplicates
+ *
+ * @param api CloudflareApi instance
+ * @param zoneId Zone ID to search in
+ * @param props Certificate pack properties to match
+ * @returns Promise resolving to matching certificate pack or null if none found
+ */
+async function findMatchingCertificatePack(
+ api: CloudflareApi,
+ zoneId: string,
+ props: CertificatePackProps,
+): Promise {
+ const response = await api.get(`/zones/${zoneId}/ssl/certificate_packs`);
+
+ if (!response.ok) {
+ throw new Error(`Failed to list certificate packs: ${response.statusText}`);
+ }
+
+ const packsData = (await response.json()) as {
+ result: CloudflareCertificatePack[];
+ };
+
+ // Find a certificate pack that matches our configuration
+ for (const pack of packsData.result) {
+ // Skip deleted or expired packs
+ if (pack.status === "deleted" || pack.status === "expired") {
+ continue;
+ }
+
+ // Check if the configuration matches
+ if (
+ pack.certificate_authority === props.certificateAuthority &&
+ pack.validation_method === props.validationMethod &&
+ pack.validity_days === props.validityDays &&
+ pack.type === (props.type || "advanced")
+ ) {
+ // Check if all requested hosts are covered by this certificate pack
+ const packHosts = new Set(pack.hosts);
+ const allHostsCovered = props.hosts.every((host) => packHosts.has(host));
+
+ if (allHostsCovered) {
+ logger.log(
+ `Found existing certificate pack ${pack.id} that covers all requested hosts`,
+ );
+ return pack;
+ }
+ }
+ }
+
+ return null;
+}
diff --git a/alchemy/src/cloudflare/container.ts b/alchemy/src/cloudflare/container.ts
new file mode 100644
index 000000000..8d18c0a42
--- /dev/null
+++ b/alchemy/src/cloudflare/container.ts
@@ -0,0 +1,843 @@
+import type { Context } from "../context.ts";
+import { Image, type ImageProps, type ImageRegistry } from "../docker/image.ts";
+import { Resource } from "../resource.ts";
+import { secret } from "../secret.ts";
+import {
+ type CloudflareApi,
+ type CloudflareApiOptions,
+ createCloudflareApi,
+} from "./api.ts";
+
+export interface ContainerProps
+ extends Omit,
+ Partial {
+ className: string;
+ maxInstances?: number;
+ scriptName?: string;
+ instanceType?: InstanceType;
+ observability?: DeploymentObservability;
+ schedulingPolicy?: SchedulingPolicy;
+}
+
+/**
+ * @see https://developers.cloudflare.com/containers/pricing/
+ */
+export type InstanceType = "dev" | "basic" | "standard" | (string & {});
+
+export function isContainer(binding: any): binding is Container {
+ return binding.type === "container";
+}
+
+export type Container = {
+ type: "container";
+ id: string;
+ name?: string;
+ className: string;
+ image: Image;
+ maxInstances?: number;
+ scriptName?: string;
+ sqlite?: true;
+ instanceType?: InstanceType;
+ observability?: DeploymentObservability;
+ schedulingPolicy?: SchedulingPolicy;
+ /**
+ * @internal
+ */
+ __phantom?: T;
+};
+
+export async function Container(
+ id: string,
+ props: ContainerProps,
+): Promise> {
+ // Otherwise, obtain Cloudflare registry credentials automatically
+ const api = await createCloudflareApi(props);
+ const creds = await getContainerCredentials(api);
+
+ const registry: ImageRegistry = {
+ server: "registry.cloudflare.com",
+ username: creds.username || creds.user!,
+ password: secret(creds.password),
+ };
+
+ // Ensure repository name is namespaced with accountId
+ const repoBase = props.name ?? id;
+ const repoName = repoBase.includes("/")
+ ? repoBase
+ : `${api.accountId}/${repoBase}`;
+
+ // Replace disallowed "latest" tag with timestamp
+ const finalTag =
+ props.tag === undefined || props.tag === "latest"
+ ? `latest-${Date.now()}`
+ : props.tag;
+
+ const image = await Image(id, {
+ build: props.build,
+ name: repoName,
+ tag: finalTag,
+ skipPush: false,
+ registry,
+ });
+
+ return {
+ type: "container",
+ id,
+ name: props.name ?? id,
+ className: props.className,
+ image,
+ maxInstances: props.maxInstances,
+ scriptName: props.scriptName,
+ instanceType: props.instanceType,
+ observability: props.observability,
+ schedulingPolicy: props.schedulingPolicy,
+ sqlite: true,
+ };
+}
+
+export interface ContainerApplicationRollout {
+ strategy: "rolling";
+ kind?: "full_auto";
+ stepPercentage: number;
+ targetConfiguration: {
+ image: string;
+ instance_type?: InstanceType;
+ observability: {
+ logs: {
+ enabled: boolean;
+ };
+ };
+ };
+}
+
+export interface ContainerApplicationProps extends CloudflareApiOptions {
+ name: string;
+ schedulingPolicy?: SchedulingPolicy;
+ instances?: number;
+ /**
+ * The instance type to be used for the deployment.
+ *
+ * @default "dev"
+ */
+ instanceType?: InstanceType;
+ /**
+ * The observability configuration for the deployment.
+ */
+ observability?: DeploymentObservability;
+ /**
+ * The maximum number of instances to be used for the deployment.
+ */
+ maxInstances?: number;
+ image: Image;
+ registryId?: string;
+ durableObjects?: {
+ namespaceId: string;
+ };
+ rollout?: ContainerApplicationRollout;
+}
+
+export type SchedulingPolicy =
+ | "moon"
+ | "gpu"
+ | "regional"
+ | "fill_metals"
+ | "default";
+
+export interface ContainerApplication
+ extends Resource<"cloudflare::ContainerApplication"> {
+ id: string;
+ name: string;
+}
+
+/**
+ * Deploy and manage container applications on Cloudflare's global network.
+ *
+ * ContainerApplication creates a managed container deployment that runs your Docker images
+ * with automatic scaling, scheduling, and integration with Cloudflare's services.
+ *
+ * @example
+ * // Deploy a simple web application container
+ * const webApp = await ContainerApplication("my-web-app", {
+ * name: "my-web-app",
+ * image: await Image("web-app", {
+ * name: "web-app",
+ * build: {
+ * context: "./docker/web-app"
+ * }
+ * }),
+ * instances: 1,
+ * maxInstances: 3
+ * });
+ *
+ * @example
+ * // Deploy a container with GPU support for AI workloads
+ * const aiApp = await ContainerApplication("ai-inference", {
+ * name: "ai-inference",
+ * image: await Image("ai-model", {
+ * name: "ai-model",
+ * build: {
+ * context: "./docker/ai"
+ * }
+ * }),
+ * schedulingPolicy: "gpu",
+ * instances: 2,
+ * maxInstances: 5
+ * });
+ *
+ * @example
+ * // Deploy a container integrated with Durable Objects
+ * const doApp = await ContainerApplication("stateful-app", {
+ * name: "stateful-app",
+ * image: await Image("do-app", {
+ * name: "do-app",
+ * build: {
+ * context: "./container"
+ * }
+ * }),
+ * durableObjects: {
+ * namespaceId: myDurableObjectNamespace.id
+ * },
+ * instances: 1,
+ * maxInstances: 10
+ * });
+ *
+ * @example
+ * // Create a Container binding for use in a Worker
+ * const worker = await Worker("my-worker", {
+ * name: "my-worker",
+ * entrypoint: "./src/worker.ts",
+ * bindings: {
+ * MY_CONTAINER: new Container("my-container", {
+ * className: "MyContainerClass",
+ * image: await Image("container-do", {
+ * name: "container-do",
+ * context: "./docker/durable-object"
+ * }),
+ * maxInstances: 100,
+ * name: "my-container-do"
+ * })
+ * }
+ * });
+ */
+export const ContainerApplication = Resource(
+ "cloudflare::ContainerApplication",
+ async function (
+ this: Context,
+ _id: string,
+ props: ContainerApplicationProps,
+ ): Promise {
+ const api = await createCloudflareApi(props);
+ if (this.phase === "delete") {
+ if (this.output?.id) {
+ // Delete the container application
+ await deleteContainerApplication(api, this.output.id);
+ }
+ return this.destroy();
+ }
+ // Prefer the immutable repo digest if present. Falls back to the tag reference.
+ const imageReference = props.image.repoDigest ?? props.image.imageRef;
+
+ const configuration = {
+ image: imageReference,
+ instance_type: props.instanceType ?? "dev",
+ observability: {
+ logs: {
+ enabled: true,
+ },
+ logging: {
+ enabled: true,
+ },
+ },
+ };
+ if (this.phase === "update") {
+ const application = await updateContainerApplication(
+ api,
+ this.output.id,
+ {
+ instances: props.instances ?? 1,
+ max_instances: props.maxInstances ?? 10,
+ scheduling_policy: props.schedulingPolicy ?? "default",
+ configuration,
+ },
+ );
+ // TODO(sam): should we wait for the rollout to complete?
+ await createContainerApplicationRollout(api, application.id, {
+ description: "Progressive update",
+ strategy: "rolling",
+ kind: props.rollout?.kind ?? "full_auto",
+ step_percentage: props.rollout?.stepPercentage ?? 25,
+ target_configuration: configuration,
+ });
+ return this({
+ id: application.id,
+ name: application.name,
+ });
+ } else {
+ const application = await createContainerApplication(api, {
+ name: props.name,
+ scheduling_policy: props.schedulingPolicy ?? "default",
+ instances: props.instances ?? 1,
+ max_instances: props.maxInstances ?? 1,
+ durable_objects: props.durableObjects
+ ? {
+ namespace_id: props.durableObjects.namespaceId,
+ }
+ : undefined,
+ constraints: {
+ tier: 1,
+ },
+ configuration: {
+ image: imageReference,
+ instance_type: props.instanceType ?? "dev",
+ observability: {
+ logs: {
+ enabled: true,
+ },
+ },
+ },
+ });
+
+ return this({
+ id: application.id,
+ name: application.name,
+ });
+ }
+ },
+);
+
+export interface ContainerApplicationData {
+ name: string;
+ scheduling_policy: string;
+ instances: number;
+ max_instances: number;
+ constraints: {
+ tier: number;
+ [key: string]: any;
+ };
+ configuration: {
+ image: string;
+ location: string;
+ vcpu: number;
+ memory_mib: number;
+ disk: any;
+ network: any;
+ command: string[];
+ entrypoint: string[];
+ runtime: string;
+ deployment_type: string;
+ observability: any;
+ memory: string;
+ [key: string]: any;
+ };
+ durable_objects: {
+ namespace_id: string;
+ [key: string]: any;
+ };
+ id: string;
+ account_id: string;
+ created_at: string;
+ version: number;
+ durable_object_namespace_id: string;
+ health: {
+ instances: any;
+ [key: string]: any;
+ };
+ [key: string]: any;
+}
+
+export async function listContainerApplications(
+ api: CloudflareApi,
+): Promise {
+ const deployments = await api.get(
+ `/accounts/${api.accountId}/containers/applications`,
+ );
+ const response = (await deployments.json()) as any as {
+ result: ContainerApplicationData[];
+ errors: { message: string }[];
+ };
+ if (deployments.ok) {
+ return response.result;
+ }
+ throw Error(
+ `Failed to list container applications: ${response.errors.map((e) => e.message).join(", ")}`,
+ );
+}
+
+export interface CreateContainerApplicationBody {
+ name: string;
+ max_instances: number;
+ configuration: DeploymentConfiguration;
+ durable_objects?: {
+ namespace_id: string;
+ };
+ instances?: number;
+ scheduling_policy?: string;
+ constraints?: { tier: number };
+ [key: string]: any;
+}
+
+export async function createContainerApplication(
+ api: CloudflareApi,
+ body: CreateContainerApplicationBody,
+) {
+ const response = await api.post(
+ `/accounts/${api.accountId}/containers/applications`,
+ body,
+ );
+ const result = (await response.json()) as {
+ result: ContainerApplicationData;
+ errors: { message: string }[];
+ };
+ if (response.ok) {
+ return result.result;
+ }
+
+ throw Error(
+ `Failed to create container application: ${result.errors?.map((e: { message: string }) => e.message).join(", ") ?? "Unknown error"}`,
+ );
+}
+
+type Region =
+ | "AFR"
+ | "APAC"
+ | "EEUR"
+ | "ENAM"
+ | "WNAM"
+ | "ME"
+ | "OC"
+ | "SAM"
+ | "WEUR"
+ | (string & {});
+
+type City =
+ | "AFR"
+ | "APAC"
+ | "EEUR"
+ | "ENAM"
+ | "WNAM"
+ | "ME"
+ | "OC"
+ | "SAM"
+ | "WEUR"
+ | (string & {});
+
+export type UpdateApplicationRequestBody = {
+ /**
+ * Number of deployments to maintain within this applicaiton. This can be used to scale the appliation up/down.
+ */
+ instances?: number;
+ max_instances?: number;
+ affinities?: {
+ colocation?: "datacenter";
+ };
+ scheduling_policy?: SchedulingPolicy;
+ constraints?: {
+ region?: Region;
+ tier?: number;
+ regions?: Array;
+ cities?: Array;
+ };
+ /**
+ * The deployment configuration of all deployments created by this application.
+ * Right now, if you modify the application configuration, only new deployments
+ * created will have the new configuration. You can delete old deployments to
+ * release new instances.
+ *
+ * TODO(sam): should this trigger a replacement?
+ */
+ configuration?: DeploymentConfiguration;
+};
+
+export async function updateContainerApplication(
+ api: CloudflareApi,
+ applicationId: string,
+ body: UpdateApplicationRequestBody,
+) {
+ const response = await api.patch(
+ `/accounts/${api.accountId}/containers/applications/${applicationId}`,
+ body,
+ );
+ const result = (await response.json()) as {
+ result: ContainerApplicationData;
+ errors: { message: string }[];
+ };
+ if (response.ok) {
+ return result.result;
+ }
+
+ throw Error(
+ `Failed to create container application: ${result.errors?.map((e: { message: string }) => e.message).join(", ") ?? "Unknown error"}`,
+ );
+}
+
+export async function deleteContainerApplication(
+ api: CloudflareApi,
+ applicationId: string,
+) {
+ const response = await api.delete(
+ `/accounts/${api.accountId}/containers/applications/${applicationId}`,
+ );
+ const result = (await response.json()) as any;
+ if (response.ok) {
+ return result.result;
+ }
+ throw Error(
+ `Failed to delete container application: ${result.errors?.map((e: { message: string }) => e.message).join(", ") ?? "Unknown error"}`,
+ );
+}
+
+interface CreateRolloutApplicationRequest {
+ description: string;
+ strategy: "rolling";
+ kind?: "full_auto";
+ step_percentage: number;
+ target_configuration: DeploymentConfiguration;
+}
+
+interface CreateRolloutApplicationResponse {
+ id: string;
+ created_at: string;
+ last_updated_at: string;
+ description: string;
+ status: "progressing" | "completed" | "failed" | (string & {});
+ health: {
+ instances: {
+ healthy: number;
+ failed: number;
+ starting: number;
+ scheduling: number;
+ };
+ };
+ kind: "full_auto" | (string & {});
+ strategy: "rolling" | (string & {});
+ current_configuration: {
+ image: string;
+ observability?: {
+ logs?: {
+ enabled: boolean;
+ };
+ logging?: {
+ enabled: boolean;
+ };
+ };
+ };
+ target_configuration: DeploymentConfiguration;
+ current_version: number;
+ target_version: number;
+ steps: Array<{
+ id: number;
+ status: "progressing" | "pending" | "completed" | "failed" | (string & {});
+ step_size: {
+ percentage: number;
+ };
+ description: string;
+ started_at?: string;
+ }>;
+ progress: {
+ total_steps: number;
+ current_step: number;
+ updated_instances: number;
+ total_instances: number;
+ };
+}
+
+export async function createContainerApplicationRollout(
+ api: CloudflareApi,
+ applicationId: string,
+ body: CreateRolloutApplicationRequest,
+) {
+ const response = await api.post(
+ `/accounts/${api.accountId}/containers/applications/${applicationId}/rollouts`,
+ body,
+ );
+ const result = (await response.json()) as {
+ result: CreateRolloutApplicationResponse;
+ errors: { message: string }[];
+ };
+ if (response.ok) {
+ return result.result;
+ }
+ throw Error(
+ `Failed to create container application rollout: ${result.errors.map((e: { message: string }) => e.message).join(", ")}`,
+ );
+}
+
+export type ImageRegistryCredentialsConfiguration = {
+ permissions: Array<"pull" | "push">;
+ expiration_minutes: number;
+};
+
+export async function getContainerCredentials(
+ api: CloudflareApi,
+ registryId = "registry.cloudflare.com",
+) {
+ const credentials = await api.post(
+ `/accounts/${api.accountId}/containers/registries/${registryId}/credentials`,
+ {
+ permissions: ["pull", "push"],
+ expiration_minutes: 60,
+ } satisfies ImageRegistryCredentialsConfiguration,
+ );
+ const result = (await credentials.json()) as {
+ result: {
+ user?: string;
+ username?: string;
+ password: string;
+ };
+ errors: { message: string }[];
+ };
+ if (credentials.ok) {
+ return result.result;
+ }
+ throw Error(
+ `Failed to get container credentials: ${result.errors.map((e: { message: string }) => e.message).join(", ")}`,
+ );
+}
+
+// The Cloudflare managed registry is special in that the namespaces for repos should always
+// start with the Cloudflare Account tag
+// This is a helper to generate the image tag with correct namespace attached to the Cloudflare Registry host
+export const getCloudflareRegistryWithAccountNamespace = (
+ accountID: string,
+ tag: string,
+): string => {
+ return `${getCloudflareContainerRegistry()}/${accountID}/${tag}`;
+};
+
+// default cloudflare managed registry, can be overriden with the env var - CLOUDFLARE_CONTAINER_REGISTRY
+export const getCloudflareContainerRegistry = () => {
+ // previously defaulted to registry.cloudchamber.cfdata.org
+ return process.env.CLOUDFLARE_CONTAINER_REGISTRY ?? "registry.cloudflare.com";
+};
+
+/**
+ * Given a container image that is a registry link, this function
+ * returns true if the link points the Cloudflare container registry
+ * (defined as per `getCloudflareContainerRegistry` above)
+ */
+export function isCloudflareRegistryLink(image: string) {
+ const cfRegistry = getCloudflareContainerRegistry();
+ return image.includes(cfRegistry);
+}
+
+/** Prefixes with the cloudflare-dev namespace. The name should be the container's DO classname, and the tag a build uuid. */
+export const getDevContainerImageName = (name: string, tag: string) => {
+ return `${MF_DEV_CONTAINER_PREFIX}/${name.toLowerCase()}:${tag}`;
+};
+
+export const MF_DEV_CONTAINER_PREFIX = "cloudflare-dev";
+
+export interface ContainerIdentity {
+ account_id: string;
+ external_account_id: string;
+ legacy_identity: string;
+ capabilities: string[];
+ limits: {
+ account_id: string;
+ vcpu_per_deployment: number;
+ memory_mib_per_deployment: number;
+ memory_per_deployment: string;
+ disk_per_deployment: string;
+ disk_mb_per_deployment: number;
+ total_vcpu: number;
+ total_memory_mib: number;
+ node_group: string;
+ ipv4s: number;
+ network_modes: string[];
+ total_disk_mb: number;
+ total_memory: string;
+ };
+ locations: any[];
+ defaults: {
+ vcpus: number;
+ memory_mib: number;
+ memory: string;
+ disk_mb: number;
+ };
+}
+
+export async function getContainerIdentity(api: CloudflareApi) {
+ const metrics = await api.get(`/accounts/${api.accountId}/containers/me`);
+ const result = (await metrics.json()) as {
+ result: ContainerIdentity;
+ errors: { message: string }[];
+ };
+ if (metrics.ok) {
+ return result.result;
+ }
+ throw Error(
+ `Failed to get container me: ${result.errors.map((e: { message: string }) => e.message).join(", ")}`,
+ );
+}
+
+/**
+ * Duration string. From Go documentation:
+ * A string representing the duration in the form "3d1h3m". Leading zero units are omitted.
+ * As a special case, durations less than one second format use a smaller unit (milli-, micro-, or nanoseconds)
+ * to ensure that the leading digit is non-zero.
+ */
+export type Duration = string;
+
+interface DeploymentObservability {
+ logs?: {
+ enabled: boolean;
+ };
+}
+
+export type DeploymentConfiguration = {
+ /**
+ * The image to be used for the deployment.
+ */
+ image: string;
+ /**
+ * The instance type to be used for the deployment.
+ */
+ instance_type?: InstanceType;
+ /**
+ * The observability configuration for the deployment.
+ */
+ observability?: DeploymentObservability;
+ /**
+ * A list of SSH public key IDs from the account
+ */
+ ssh_public_key_ids?: Array;
+ /**
+ * A list of objects with secret names and the their access types from the account
+ */
+ secrets?: Array<{
+ /**
+ * The name of the secret within the container
+ */
+ name: string;
+ type: "env";
+ /**
+ * Corresponding secret name from the account
+ */
+ secret: string;
+ }>;
+ /**
+ * Specify the vcpu to be used for the deployment. The default will be the one configured for the account.
+ */
+ vcpu?: number;
+ /**
+ * Specify the memory to be used for the deployment. The default will be the one configured for the account.
+ */
+ memory?: string;
+ /**
+ * The disk configuration for this deployment
+ */
+ disk?: {
+ size: string;
+ };
+ /**
+ * Container environment variables
+ */
+ environment_variables?: Array<{
+ name: string;
+ value: string;
+ }>;
+ /**
+ * Deployment labels
+ */
+ labels?: Array<{
+ name: string;
+ value: string;
+ }>;
+ network?: {
+ /**
+ * Assign an IPv4 address to the deployment. One of 'none' (default), 'predefined' (allocate one from a set of IPv4 addresses in the global pool), 'account' (allocate one from a set of IPv4 addresses preassigned in the account pool). Only applicable to "public" mode.
+ *
+ */
+ assign_ipv4?: "none" | "predefined" | "account";
+ /**
+ * Assign an IPv6 address to the deployment. One of 'predefined' (allocate one from a set of IPv6 addresses in the global pool), 'account' (allocate one from a set of IPv6 addresses preassigned in the account pool). The container will always be assigned to an IPv6 if the networking mode is "public".
+ *
+ */
+ assign_ipv6?: "none" | "predefined" | "account";
+ mode?: "public" | "private";
+ };
+ command?: string[];
+ entrypoint?: string[];
+ dns?: {
+ /**
+ * List of DNS servers that the deployment will use to resolve domain names. You can only specify a maximum of 3.
+ */
+ servers?: Array;
+ /**
+ * The container resolver will append these domains to every resolve query. For example, if you have 'google.com',
+ * and your deployment queries 'web', it will append 'google.com' to 'web' in the search query before trying 'web'.
+ * Limited to 6 domains.
+ */
+ searches?: Array;
+ };
+ ports?: Array<{
+ /**
+ * The name of the port. The port name should be unique for each deployment. Minimum length of 1 and maximum length of 15. No consecutive dashes. If the name is 'web-ui', the container will receive an environment variable as follows:
+ * - CLOUDFLARE_PORT_WEB_UI: Port inside the container
+ * - CLOUDFLARE_HOST_PORT_WEB_UI: Port outside the container
+ * - CLOUDFLARE_HOST_IP_WEB_UI: Address of the external network interface the port is allocated on
+ * - CLOUDFLARE_HOST_ADDR_WEB_UI: CLOUDFLARE_HOST_ADDR_WEB_UI ':' CLOUDFLARE_HOST_PORT_WEB_UI
+ *
+ */
+ name: string;
+ /**
+ * Optional port number, it's assigned only if the user specified it. If it's not specified, the datacenter scheduler will decide it.
+ */
+ port?: number;
+ }>;
+ /**
+ * Health and readiness checks for this deployment.
+ */
+ checks?: Array<{
+ /**
+ * Optional name for the check. If omitted, a name will be generated automatically.
+ */
+ name?: string;
+ /**
+ * The type of check to perform. A TCP check succeeds if it can connect to the provided port. An HTTP check succeeds if it receives a successful HTTP response (2XX)
+ */
+ type: "http" | "tcp";
+ /**
+ * Connect to the port using TLS
+ */
+ tls?: boolean;
+ /**
+ * The name of the port defined in the "ports" property of the deployment
+ */
+ port: string;
+ /**
+ * Configuration for HTTP checks. Only valid when "type" is "http"
+ */
+ http?: {
+ method?: "GET" | "POST" | "PUT" | "PATCH" | "DELETE" | "OPTIONS" | "HEAD";
+ /**
+ * If the method is one of POST, PATCH or PUT, this is required. It's the body that will be passed to the HTTP healthcheck request.
+ */
+ body?: string;
+ /**
+ * Path that will be used to perform the healthcheck.
+ */
+ path?: string;
+ /**
+ * HTTP headers to include in the request.
+ */
+ headers?: Record;
+ };
+ /**
+ * How often the check should be performed
+ */
+ interval: Duration;
+ /**
+ * The amount of time to wait for the check to complete before considering the check to have failed
+ */
+ timeout: Duration;
+ /**
+ * Number of times to attempt the check before considering it to have failed
+ */
+ retries?: number;
+ /**
+ * The kind of check. A failed "healthy" check affects a deployment's "healthy" status, while a failed "ready" check affects a deployment's "ready" status.
+ */
+ kind: "health" | "ready";
+ }>;
+};
diff --git a/alchemy/src/cloudflare/custom-domain.ts b/alchemy/src/cloudflare/custom-domain.ts
index 3e0c66a85..61c058dd8 100644
--- a/alchemy/src/cloudflare/custom-domain.ts
+++ b/alchemy/src/cloudflare/custom-domain.ts
@@ -7,6 +7,7 @@ import {
type CloudflareApi,
type CloudflareApiOptions,
} from "./api.ts";
+import { inferZoneIdFromPattern } from "./route.ts";
/**
* Properties for creating or updating a CustomDomain
@@ -19,8 +20,10 @@ export interface CustomDomainProps extends CloudflareApiOptions {
/**
* Cloudflare Zone ID for the domain
+ *
+ * @default - inferred from the domain name
*/
- zoneId: string;
+ zoneId?: string;
/**
* Name of the worker to bind to the domain
@@ -32,6 +35,14 @@ export interface CustomDomainProps extends CloudflareApiOptions {
* @default "production"
*/
environment?: string;
+
+ /**
+ * If true, adopt an existing custom domain binding during creation.
+ * If false and the domain already exists, creation will fail.
+ * This only applies during the create phase.
+ * @default false
+ */
+ adopt?: boolean;
}
/**
@@ -85,6 +96,15 @@ export interface CustomDomain
* workerName: apiWorker.name // Use the name from the Worker resource
* });
*
+ * @example
+ * // Adopt an existing domain binding during creation
+ * const existingDomain = await CustomDomain("api-domain", {
+ * name: "api.example.com",
+ * zoneId: "YOUR_ZONE_ID",
+ * workerName: "my-api-worker",
+ * adopt: true // If domain already exists, adopt it instead of failing
+ * });
+ *
* @see https://developers.cloudflare.com/api/resources/workers/subresources/domains/
*/
export const CustomDomain = Resource(
@@ -97,23 +117,15 @@ export const CustomDomain = Resource(
// Create Cloudflare API client with automatic account discovery
const api = await createCloudflareApi(props);
- // Validate required properties
- if (!props.name) {
- throw new Error("Domain name (props.name) is required");
- }
- if (!props.zoneId) {
- throw new Error("Zone ID (props.zoneId) is required");
- }
- if (!props.workerName) {
- throw new Error("Worker name (props.workerName) is required");
- }
-
if (this.phase === "delete") {
await deleteCustomDomain(this, api, logicalId, props);
return this.destroy();
}
// Create or Update phase
- return await ensureCustomDomain(this, api, logicalId, props);
+ return await ensureCustomDomain(this, api, logicalId, {
+ ...props,
+ zoneId: props.zoneId ?? (await inferZoneIdFromPattern(api, props.name)),
+ });
},
);
@@ -134,19 +146,10 @@ async function deleteCustomDomain(
return; // Exit early if no ID
}
- logger.log(
- `Deleting CustomDomain binding ${domainIdToDelete} for ${domainHostname}`,
- );
const response = await api.delete(
`/accounts/${api.accountId}/workers/domains/${domainIdToDelete}`,
);
- logger.log(
- `Delete result for ${domainIdToDelete} (${domainHostname}):`,
- response.status,
- response.statusText,
- );
-
// 404 is acceptable during deletion for idempotency
if (!response.ok && response.status !== 404) {
await handleApiError(
@@ -167,13 +170,14 @@ async function ensureCustomDomain(
context: Context,
api: CloudflareApi,
_logicalId: string,
- props: CustomDomainProps,
+ props: CustomDomainProps & {
+ zoneId: string;
+ },
): Promise {
const environment = props.environment || "production";
const domainHostname = props.name;
// Check if domain binding already exists for this account
- logger.log(`Checking existing domain bindings for account ${api.accountId}`);
const listResponse = await api.get(
`/accounts/${api.accountId}/workers/domains`,
);
@@ -211,12 +215,15 @@ async function ensureCustomDomain(
let currentDomainId = existingBinding?.id;
const bindingExists = !!existingBinding;
- logger.log(
- `Domain binding status for ${domainHostname} (Zone: ${props.zoneId}):`,
- bindingExists
- ? `Found (ID: ${currentDomainId}, Worker: ${existingBinding.service}, Env: ${existingBinding.environment})`
- : "Not found",
- );
+ // Handle the case where domain already exists during create phase
+ if (context.phase === "create" && bindingExists) {
+ if (!props.adopt) {
+ throw new Error(
+ `CustomDomain for ${domainHostname} already exists in zone ${props.zoneId}. ` +
+ "Set adopt: true to take control of the existing domain binding.",
+ );
+ }
+ }
// Determine if we need to update (binding exists but has different service or environment)
const needsUpdate =
@@ -231,9 +238,6 @@ async function ensureCustomDomain(
// Cloudflare's PUT /accounts/{account_id}/workers/domains acts as an upsert
if (!bindingExists || needsUpdate) {
operationPerformed = bindingExists ? "update" : "create";
- logger.log(
- `${operationPerformed === "update" ? "Updating" : "Creating"} domain binding: ${domainHostname} (Zone: ${props.zoneId}) → ${props.workerName}:${environment}`,
- );
const putPayload = {
zone_id: props.zoneId,
@@ -273,13 +277,6 @@ async function ensureCustomDomain(
resultantBinding = putResult.result;
currentDomainId = resultantBinding.id; // Update ID from the PUT response
- logger.log(
- `Successfully ${operationPerformed}d binding, new ID: ${currentDomainId}`,
- );
- } else {
- logger.log(
- `Domain binding already exists and is up to date: ${domainHostname} (ID: ${currentDomainId}) → ${props.workerName}:${environment}`,
- );
}
// Ensure we have the final binding details
diff --git a/alchemy/src/cloudflare/d1-database.ts b/alchemy/src/cloudflare/d1-database.ts
index a27020c5a..ab5bb9ca4 100644
--- a/alchemy/src/cloudflare/d1-database.ts
+++ b/alchemy/src/cloudflare/d1-database.ts
@@ -93,6 +93,16 @@ export interface D1DatabaseProps extends CloudflareApiOptions {
* This is analogous to wrangler's `migrations_dir`.
*/
migrationsDir?: string;
+ /**
+ * Whether to emulate the database locally when Alchemy is running in watch mode.
+ */
+ dev?: {
+ /**
+ * Whether to run the database remotely instead of locally
+ * @default false
+ */
+ remote?: boolean;
+ };
}
export function isD1Database(
@@ -241,13 +251,9 @@ const D1DatabaseResource = Resource(
const databaseName = props.name ?? id;
if (this.phase === "delete") {
- logger.log("Deleting D1 database:", databaseName);
if (props.delete !== false) {
- // Delete D1 database
- logger.log("Deleting D1 database:", databaseName);
await deleteDatabase(api, this.output?.id);
}
-
// Return void (a deleted database has no content)
return this.destroy();
}
@@ -352,6 +358,7 @@ const D1DatabaseResource = Resource(
type: "d1",
id: dbData.result.uuid || "",
name: databaseName,
+ dev: props.dev,
fileSize: dbData.result.file_size,
numTables: dbData.result.num_tables,
version: dbData.result.version,
diff --git a/alchemy/src/cloudflare/do-state-store/internal.ts b/alchemy/src/cloudflare/do-state-store/internal.ts
index 1efc2523d..37a8837b2 100644
--- a/alchemy/src/cloudflare/do-state-store/internal.ts
+++ b/alchemy/src/cloudflare/do-state-store/internal.ts
@@ -1,9 +1,9 @@
-import path from "node:path";
-import { bundle } from "../../esbuild/index.ts";
+import { logger } from "../../util/logger.ts";
import { withExponentialBackoff } from "../../util/retry.ts";
import { handleApiError } from "../api-error.ts";
import type { CloudflareApi } from "../api.ts";
-import { putWorker } from "../worker.ts";
+import type { WorkerMetadata } from "../worker-metadata.ts";
+import { getWorkerTemplate } from "../worker/get-worker-template.ts";
import type { DOStateStoreAPI } from "./types.ts";
interface DOStateStoreClientOptions {
@@ -82,6 +82,30 @@ export class DOStateStoreClient {
});
}
+ async waitUntilReady(): Promise {
+ // This ensures the token is correct and the worker is ready to use.
+ let last: Response | undefined;
+ let delay = 1000;
+ for (let i = 0; i < 20; i++) {
+ const res = await this.validate();
+ if (res.ok) {
+ return;
+ }
+ if (!last) {
+ logger.log("Waiting for state store deployment...");
+ }
+ last = res;
+ // Exponential backoff with jitter
+ const jitter = Math.random() * 0.1 * delay;
+ await new Promise((resolve) => setTimeout(resolve, delay + jitter));
+ delay *= 1.5; // Increase the delay for next attempt
+ delay = Math.min(delay, 10000); // Cap at 10 seconds
+ }
+ throw new Error(
+ `Failed to access state store: ${last?.status} ${last?.statusText}`,
+ );
+ }
+
async fetch(path: string, init: RequestInit = {}): Promise {
const url = new URL(path, this.options.url);
url.searchParams.set("app", this.options.app);
@@ -96,7 +120,7 @@ export class DOStateStoreClient {
}
}
-const TAG = "alchemy-state-store:2025-06-03";
+const TAG = "alchemy-state-store:2025-06-23";
const cache = new Map();
@@ -116,33 +140,50 @@ export async function upsertStateStoreWorker(
cache.set(key, TAG);
return;
}
- const script = await bundleWorkerScript();
- await putWorker(api, workerName, script, {
- main_module: "worker.js",
- compatibility_date: "2025-06-01",
- compatibility_flags: ["nodejs_compat"],
- bindings: [
- {
- name: "DOFS_STATE_STORE",
- type: "durable_object_namespace",
- class_name: "DOFSStateStore",
- },
- {
- name: "DOFS_TOKEN",
- type: "secret_text",
- text: token,
- },
- ],
- migrations: !found
- ? {
- new_sqlite_classes: ["DOFSStateStore"],
- }
- : undefined,
- tags: [TAG],
- observability: {
- enabled: true,
- },
- });
+ const formData = new FormData();
+ const worker = await getWorkerTemplate("do-state-store");
+ formData.append(worker.name, worker);
+ formData.append(
+ "metadata",
+ new Blob([
+ JSON.stringify({
+ main_module: worker.name,
+ compatibility_date: "2025-06-01",
+ compatibility_flags: ["nodejs_compat"],
+ bindings: [
+ {
+ name: "DOFS_STATE_STORE",
+ type: "durable_object_namespace",
+ class_name: "DOFSStateStore",
+ },
+ {
+ name: "DOFS_TOKEN",
+ type: "secret_text",
+ text: token,
+ },
+ ],
+ migrations: !found
+ ? {
+ new_sqlite_classes: ["DOFSStateStore"],
+ }
+ : undefined,
+ tags: [TAG],
+ observability: {
+ enabled: true,
+ },
+ } satisfies WorkerMetadata),
+ ]),
+ );
+
+ // Put the worker with migration tag v1
+ const response = await api.put(
+ `/accounts/${api.accountId}/workers/scripts/${workerName}`,
+ formData,
+ );
+ if (!response.ok) {
+ throw await handleApiError(response, "upload", "worker", workerName);
+ }
+
const subdomainRes = await api.post(
`/accounts/${api.accountId}/workers/scripts/${workerName}/subdomain`,
{ enabled: true, preview_enabled: false },
@@ -151,7 +192,7 @@ export async function upsertStateStoreWorker(
},
);
if (!subdomainRes.ok) {
- await handleApiError(
+ throw await handleApiError(
subdomainRes,
"creating worker subdomain",
"worker",
@@ -191,7 +232,7 @@ export async function getAccountSubdomain(api: CloudflareApi) {
const res = await api.get(`/accounts/${api.accountId}/workers/subdomain`);
if (!res.ok) {
throw new Error(
- `Failed to get account subdomain: ${res.status} ${res.statusText}`,
+ `Failed to get account subdomain: ${res.status} ${res.statusText}: ${await res.text().catch(() => "unknown error")}`,
);
}
const json: { result: { subdomain: string } } = await res.json();
@@ -199,18 +240,3 @@ export async function getAccountSubdomain(api: CloudflareApi) {
cache.set(key, subdomain);
return subdomain;
}
-
-async function bundleWorkerScript() {
- const result = await bundle({
- entryPoint: path.join(__dirname, "worker.ts"),
- bundle: true,
- format: "esm",
- target: "es2022",
- external: ["cloudflare:*", "node:crypto"],
- write: false,
- });
- if (!result.outputFiles?.[0]) {
- throw new Error("Failed to bundle worker.ts");
- }
- return result.outputFiles[0].text;
-}
diff --git a/alchemy/src/cloudflare/do-state-store/store.ts b/alchemy/src/cloudflare/do-state-store/store.ts
index ed2c114a9..601034815 100644
--- a/alchemy/src/cloudflare/do-state-store/store.ts
+++ b/alchemy/src/cloudflare/do-state-store/store.ts
@@ -4,11 +4,8 @@ import type { Scope } from "../../scope.ts";
import type { State, StateStore } from "../../state.ts";
import { deserializeState } from "../../state.ts";
import { createCloudflareApi, type CloudflareApiOptions } from "../api.ts";
-import {
- DOStateStoreClient,
- getAccountSubdomain,
- upsertStateStoreWorker,
-} from "./internal.ts";
+import { getAccountSubdomain } from "../worker/subdomain.ts";
+import { DOStateStoreClient, upsertStateStoreWorker } from "./internal.ts";
export interface DOStateStoreOptions extends CloudflareApiOptions {
/**
@@ -95,33 +92,19 @@ export class DOStateStore implements StateStore {
this.options.worker?.force ?? false,
),
]);
+ if (!subdomain) {
+ throw new Error(
+ "Failed to access state store worker because the workers.dev subdomain is not available.",
+ );
+ }
const client = new DOStateStoreClient({
app: this.scope.appName ?? "alchemy",
stage: this.scope.stage,
url: `https://${workerName}.${subdomain}.workers.dev`,
token,
});
- // This ensures the token is correct and the worker is ready to use.
- let last: Response | undefined;
- let delay = 1000;
- for (let i = 0; i < 20; i++) {
- const res = await client.validate();
- if (res.ok) {
- return client;
- }
- if (!last) {
- console.log("Waiting for state store deployment...");
- }
- last = res;
- // Exponential backoff with jitter
- const jitter = Math.random() * 0.1 * delay;
- await new Promise((resolve) => setTimeout(resolve, delay + jitter));
- delay *= 1.5; // Increase the delay for next attempt
- delay = Math.min(delay, 10000); // Cap at 10 seconds
- }
- throw new Error(
- `Failed to access state store: ${last?.status} ${last?.statusText}`,
- );
+ await client.waitUntilReady();
+ return client;
}
private async getClient() {
diff --git a/alchemy/src/cloudflare/durable-object-namespace.ts b/alchemy/src/cloudflare/durable-object-namespace.ts
index 951ca686d..d36502e12 100644
--- a/alchemy/src/cloudflare/durable-object-namespace.ts
+++ b/alchemy/src/cloudflare/durable-object-namespace.ts
@@ -41,9 +41,8 @@ export function isDurableObjectNamespace(
* environment: "production"
* });
*/
-export class DurableObjectNamespace<
- T extends Rpc.DurableObjectBranded | undefined = undefined,
-> implements DurableObjectNamespaceInput
+export class DurableObjectNamespace
+ implements DurableObjectNamespaceInput
{
public readonly type = "durable_object_namespace" as const;
// alias for bindingName to be consistent with other bindings
diff --git a/alchemy/src/cloudflare/images.ts b/alchemy/src/cloudflare/images.ts
index 5c03ab23f..343a089cb 100644
--- a/alchemy/src/cloudflare/images.ts
+++ b/alchemy/src/cloudflare/images.ts
@@ -56,4 +56,18 @@
*/
export class Images {
public readonly type = "images";
+ public readonly dev?: {
+ remote?: boolean;
+ };
+ constructor(props?: {
+ dev?: {
+ /**
+ * Whether to run the images binding remotely instead of locally
+ * @default false
+ */
+ remote?: boolean;
+ };
+ }) {
+ this.dev = props?.dev;
+ }
}
diff --git a/alchemy/src/cloudflare/index.ts b/alchemy/src/cloudflare/index.ts
index 4c5d3a990..eefc4cbfa 100644
--- a/alchemy/src/cloudflare/index.ts
+++ b/alchemy/src/cloudflare/index.ts
@@ -13,6 +13,8 @@ export * from "./browser-rendering.ts";
export * from "./bucket.ts";
export * from "./bundle/external.ts";
export * from "./bundle/local-dev-cloudflare-shim.ts";
+export * from "./certificate-pack.ts";
+export * from "./container.ts";
export * from "./custom-domain.ts";
export * from "./d1-clone.ts";
export * from "./d1-database.ts";
diff --git a/alchemy/src/cloudflare/kv-namespace.ts b/alchemy/src/cloudflare/kv-namespace.ts
index 64207488e..d5268624a 100644
--- a/alchemy/src/cloudflare/kv-namespace.ts
+++ b/alchemy/src/cloudflare/kv-namespace.ts
@@ -41,6 +41,17 @@ export interface KVNamespaceProps extends CloudflareApiOptions {
* @default true
*/
delete?: boolean;
+
+ /**
+ * Whether to emulate the KV namespace locally when Alchemy is running in watch mode.
+ */
+ dev?: {
+ /**
+ * Whether to run the KV namespace remotely instead of locally
+ * @default false
+ */
+ remote?: boolean;
+ };
}
/**
@@ -251,6 +262,7 @@ const _KVNamespace = Resource(
namespaceId,
title: props.title,
values: props.values,
+ dev: props.dev,
createdAt: createdAt,
modifiedAt: Date.now(),
});
diff --git a/alchemy/src/cloudflare/pipeline.ts b/alchemy/src/cloudflare/pipeline.ts
index 8b7b4c1a1..f64ec39aa 100644
--- a/alchemy/src/cloudflare/pipeline.ts
+++ b/alchemy/src/cloudflare/pipeline.ts
@@ -522,6 +522,7 @@ export async function createPipeline(
api: CloudflareApi,
pipelineName: string,
props: PipelineProps,
+ attempt = 0,
): Promise {
// Prepare the create payload
const createPayload = preparePipelinePayload(api, pipelineName, props);
@@ -532,6 +533,11 @@ export async function createPipeline(
);
if (!createResponse.ok) {
+ if (createResponse.status === 404 && attempt < 3) {
+ // bucket does not exist, this might be transient, let's retry
+ await new Promise((resolve) => setTimeout(resolve, 1000 * (1 + attempt)));
+ return await createPipeline(api, pipelineName, props, attempt + 1);
+ }
return await handleApiError(
createResponse,
"creating",
diff --git a/alchemy/src/cloudflare/queue.ts b/alchemy/src/cloudflare/queue.ts
index c1caf5a8e..3bee0dbb5 100644
--- a/alchemy/src/cloudflare/queue.ts
+++ b/alchemy/src/cloudflare/queue.ts
@@ -72,6 +72,17 @@ export interface QueueProps extends CloudflareApiOptions {
* @default false
*/
adopt?: boolean;
+
+ /**
+ * Whether to emulate the queue locally when Alchemy is running in watch mode.
+ */
+ dev?: {
+ /**
+ * Whether to run the queue remotely instead of locally
+ * @default false
+ */
+ remote?: boolean;
+ };
}
export function isQueue(eventSource: any): eventSource is Queue {
@@ -289,6 +300,7 @@ const QueueResource = Resource("cloudflare::Queue", async function <
createdOn: queueData.result.created_on || new Date().toISOString(),
modifiedOn: queueData.result.modified_on || new Date().toISOString(),
accountId: api.accountId,
+ dev: props.dev,
// phantom properties
Body: undefined as T,
Batch: undefined! as MessageBatch,
diff --git a/alchemy/src/cloudflare/route.ts b/alchemy/src/cloudflare/route.ts
index 25cf75ee9..f4d371523 100644
--- a/alchemy/src/cloudflare/route.ts
+++ b/alchemy/src/cloudflare/route.ts
@@ -153,13 +153,7 @@ export const Route = Resource(
// Get or infer zone ID (only needed for create/update phases)
let zoneId = props.zoneId;
if (!zoneId) {
- const inferredZoneId = await inferZoneIdFromPattern(props.pattern, {
- accountId: props.accountId,
- apiKey: props.apiKey,
- apiToken: props.apiToken,
- baseUrl: props.baseUrl,
- email: props.email,
- });
+ const inferredZoneId = await inferZoneIdFromPattern(api, props.pattern);
if (!inferredZoneId) {
throw new Error(
@@ -423,17 +417,17 @@ function extractDomainFromPattern(pattern: string): string {
* @param apiOptions API options for Cloudflare API calls
* @returns Promise resolving to zone ID or null if not found
*/
-async function inferZoneIdFromPattern(
+export async function inferZoneIdFromPattern(
+ api: CloudflareApi,
pattern: string,
- apiOptions: Partial,
-): Promise {
+): Promise {
const domain = extractDomainFromPattern(pattern);
// Handle wildcard domains by removing the wildcard part
const cleanDomain = domain.replace(/^\*\./, "");
// Try to find zone for the exact domain first
- let zone = await getZoneByDomain(cleanDomain, apiOptions);
+ let zone = await getZoneByDomain(api, cleanDomain);
if (zone) {
return zone.id;
}
@@ -442,11 +436,14 @@ async function inferZoneIdFromPattern(
const domainParts = cleanDomain.split(".");
for (let i = 1; i < domainParts.length - 1; i++) {
const parentDomain = domainParts.slice(i).join(".");
- zone = await getZoneByDomain(parentDomain, apiOptions);
+ zone = await getZoneByDomain(api, parentDomain);
if (zone) {
return zone.id;
}
}
- return null;
+ throw new Error(
+ `Could not infer zone ID for route pattern "${pattern}". ` +
+ "Please ensure the domain is managed by Cloudflare or specify an explicit zoneId.",
+ );
}
diff --git a/alchemy/src/cloudflare/vite.ts b/alchemy/src/cloudflare/vite.ts
index a8eaa3038..b65fe319e 100644
--- a/alchemy/src/cloudflare/vite.ts
+++ b/alchemy/src/cloudflare/vite.ts
@@ -1,4 +1,5 @@
import path from "node:path";
+import { detectPackageManager } from "../util/detect-package-manager.ts";
import type { Assets } from "./assets.ts";
import type { Bindings } from "./bindings.ts";
import { Website, type WebsiteProps } from "./website.ts";
@@ -17,6 +18,13 @@ export async function Vite(
props: ViteProps,
): Promise> {
const defaultAssets = path.join("dist", "client");
+ const packageManager = detectPackageManager();
+ const devCommand = {
+ npm: "npx vite dev",
+ bun: "bun vite dev",
+ pnpm: "pnpm vite dev",
+ yarn: "yarn vite dev",
+ }[packageManager];
return Website(id, {
...props,
spa: true,
@@ -27,5 +35,9 @@ export async function Vite(
dist: props.assets.dist ?? defaultAssets,
}
: (props.assets ?? defaultAssets),
+ dev: props.dev ?? {
+ command: devCommand,
+ url: "http://localhost:5173",
+ },
});
}
diff --git a/alchemy/src/cloudflare/website.ts b/alchemy/src/cloudflare/website.ts
index d597183a3..359035cd4 100644
--- a/alchemy/src/cloudflare/website.ts
+++ b/alchemy/src/cloudflare/website.ts
@@ -94,6 +94,14 @@ export interface WebsiteProps
* @default false
*/
spa?: boolean;
+
+ /**
+ * Configure the command to use in development mode
+ */
+ dev?: {
+ command: string;
+ url: string;
+ };
}
export type Website = B extends { ASSETS: any }
@@ -114,34 +122,53 @@ export async function Website(
{
parent: Scope.current,
},
- async () => {
+ async (scope) => {
+ // directory from which all relative paths are resolved
const cwd = path.resolve(props.cwd || process.cwd());
- const fileName =
+
+ function resolveAbsPath(f: S): S {
+ return (
+ f ? (path.isAbsolute(f) ? f : path.resolve(cwd, f)) : undefined
+ ) as S;
+ }
+
+ // absolute path to the wrangler.jsonc file
+ const wranglerJsonPath = resolveAbsPath(
typeof wrangler === "boolean"
? "wrangler.jsonc"
: typeof wrangler === "string"
? wrangler
- : (wrangler?.path ?? "wrangler.jsonc");
- const wranglerPath =
- fileName && path.relative(cwd, path.join(cwd, fileName));
- const wranglerMain =
- typeof wrangler === "object"
- ? (wrangler.main ?? props.main)
- : props.main;
+ : (wrangler?.path ?? "wrangler.jsonc"),
+ );
- const workerName = props.name ?? id;
+ // absolute path to the worker entrypoint
+ const mainPath = resolveAbsPath(props.main);
+
+ // absolute path to the worker entrypoint (if different in wrangler.jsonc)
+ const wranglerMainPath = resolveAbsPath(
+ typeof wrangler === "object"
+ ? (wrangler.main ?? props.main!)
+ : props.main,
+ );
- const assetDir =
+ // absolute path to the assets directory
+ const assetsDirPath = resolveAbsPath(
typeof props.assets === "string"
? props.assets
- : (props.assets?.dist ?? "dist");
+ : (props.assets?.dist ?? "dist"),
+ );
+
+ const workerName = props.name ?? id;
const workerProps = {
...props,
compatibilityDate:
props.compatibilityDate ?? DEFAULT_COMPATIBILITY_DATE,
name: workerName,
- entrypoint: props.main,
+ entrypoint: mainPath
+ ? // this path should be relative to the process.cwd() because it is used by esbuild
+ path.relative(process.cwd(), mainPath)
+ : undefined,
assets: {
html_handling: "auto-trailing-slash",
not_found_handling: props.spa ? "single-page-application" : "none",
@@ -158,22 +185,35 @@ export default {
};`,
url: true,
adopt: true,
+ dev: props.dev
+ ? {
+ command: props.dev.command,
+ url: props.dev.url,
+ cwd,
+ }
+ : undefined,
} as WorkerProps & { name: string };
if (wrangler) {
+ // paths in wrangler.jsonc must be relative to it
+ const relativeToWrangler = (f: S): S =>
+ (f ? path.relative(path.dirname(wranglerJsonPath), f) : f) as S;
await WranglerJson("wrangler.jsonc", {
- path: wranglerPath,
+ path: wranglerJsonPath,
worker: workerProps,
- main: wranglerMain,
+ main: relativeToWrangler(wranglerMainPath),
// hard-code the assets directory because we haven't yet included the assets binding
assets: {
binding: "ASSETS",
- directory: assetDir,
+ // path must be relative to the wrangler.jsonc file
+ directory: relativeToWrangler(assetsDirPath),
},
});
}
- if (props.command) {
+ const isDev = scope.dev && !!props.dev;
+
+ if (props.command && !isDev) {
await Exec("build", {
cwd,
command: props.command,
@@ -188,9 +228,12 @@ export default {
...workerProps.bindings,
// we don't include the Assets binding until after build to make sure the asset manifest is correct
// we generate the wrangler.json using all the bind
- ASSETS: await Assets("assets", {
- path: assetDir,
- }),
+ ASSETS: isDev
+ ? undefined
+ : await Assets("assets", {
+ // Assets are discovered from proces.cwd(), not Website.cwd or wrangler.jsonc
+ path: path.relative(process.cwd(), assetsDirPath),
+ }),
},
} as WorkerProps & { name: string })) as Website;
},
diff --git a/alchemy/src/cloudflare/worker-metadata.ts b/alchemy/src/cloudflare/worker-metadata.ts
index 1528830df..d90126bf6 100644
--- a/alchemy/src/cloudflare/worker-metadata.ts
+++ b/alchemy/src/cloudflare/worker-metadata.ts
@@ -1,20 +1,24 @@
import path from "node:path";
-import type { Context } from "../context.ts";
+import { assertNever } from "../util/assert-never.ts";
import { logger } from "../util/logger.ts";
-import { slugify } from "../util/slugify.ts";
+import type { CloudflareApi } from "./api.ts";
import {
Self,
type Bindings,
type WorkerBindingDurableObjectNamespace,
type WorkerBindingSpec,
} from "./bindings.ts";
+import { isContainer, type Container } from "./container.ts";
import {
isDurableObjectNamespace,
type DurableObjectNamespace,
} from "./durable-object-namespace.ts";
import { createAssetConfig, type AssetUploadResult } from "./worker-assets.ts";
-import type { SingleStepMigration } from "./worker-migration.ts";
-import type { AssetsConfig, Worker, WorkerProps } from "./worker.ts";
+import type {
+ MultiStepMigration,
+ SingleStepMigration,
+} from "./worker-migration.ts";
+import type { AssetsConfig, WorkerProps } from "./worker.ts";
/**
* Metadata returned by Cloudflare API for a worker script
@@ -191,19 +195,27 @@ export interface WorkerMetadata {
cron: string;
suspended: boolean;
}[];
+ containers?: { class_name: string }[];
}
-export async function prepareWorkerMetadata(
- ctx: Context>,
- oldBindings: WorkerBindingSpec[] | undefined,
- oldTags: string[] | undefined,
+export async function prepareWorkerMetadata(
+ api: CloudflareApi,
props: WorkerProps & {
compatibilityDate: string;
compatibilityFlags: string[];
workerName: string;
+ migrationTag?: string;
+ assetUploadResult?: AssetUploadResult;
},
- assetUploadResult?: AssetUploadResult,
): Promise {
+ const oldSettings = await getWorkerSettings(api, props.workerName);
+ const oldTags: string[] | undefined = Array.from(
+ new Set([
+ ...(oldSettings?.default_environment?.script?.tags ?? []),
+ ...(oldSettings?.tags ?? []),
+ ]),
+ );
+ const oldBindings = oldSettings?.bindings;
// we use Cloudflare Worker tags to store a mapping between Alchemy's stable identifier and the binding name
// e.g.
// {
@@ -212,7 +224,7 @@ export async function prepareWorkerMetadata(
// will be stored as alchemy:do:stable-id:BINDING_NAME
// TODO(sam): should we base64 encode to ensure no `:` collision risk?
const bindingNameToStableId = Object.fromEntries(
- oldTags?.flatMap((tag) => {
+ oldTags?.flatMap((tag: string) => {
// alchemy:do:{stableId}:{bindingName}
if (tag.startsWith("alchemy:do:")) {
const [, , stableId, bindingName] = tag.split(":");
@@ -222,6 +234,14 @@ export async function prepareWorkerMetadata(
}) ?? [],
);
+ const oldMigrationTag = oldTags?.flatMap((tag: string) => {
+ if (tag.startsWith("alchemy:migration-tag:")) {
+ return [tag.slice("alchemy:migration-tag:".length)];
+ }
+ return [];
+ })[0];
+ const newMigrationTag = bumpMigrationTagVersion(oldMigrationTag);
+
const deletedClasses = oldBindings?.flatMap((oldBinding) => {
if (
oldBinding.type === "durable_object_namespace" &&
@@ -240,8 +260,9 @@ export async function prepareWorkerMetadata(
// try and find the DO binding by stable id
const object = Object.values(props.bindings).find(
- (binding): binding is DurableObjectNamespace =>
- isDurableObjectNamespace(binding) && binding.id === stableId,
+ (binding): binding is DurableObjectNamespace | Container =>
+ (isDurableObjectNamespace(binding) || isContainer(binding)) &&
+ binding.id === stableId,
);
if (object) {
// we found the corresponding object, it should not be deleted
@@ -284,29 +305,30 @@ export async function prepareWorkerMetadata(
},
// TODO(sam): base64 encode instead? 0 collision risk vs readability.
tags: [
- `alchemy:id:${slugify(ctx.fqn)}`,
// encode a mapping table of Durable Object stable ID -> binding name
// we use this to reliably compute class migrations based on server-side state
...Object.entries(props.bindings ?? {}).flatMap(
([bindingName, binding]) =>
- isDurableObjectNamespace(binding)
+ isDurableObjectNamespace(binding) || isContainer(binding)
? // TODO(sam): base64 encode if contains `:`?
[`alchemy:do:${binding.id}:${bindingName}`]
: [],
),
+ // encode the migraiton tag if there is one so we can avoid the failed PutWorker after adoption
+ ...(newMigrationTag ? [`alchemy:migration-tag:${newMigrationTag}`] : []),
],
migrations: {
- new_classes: props.migrations?.new_classes ?? [],
- deleted_classes: [
- ...(deletedClasses ?? []),
- ...(props.migrations?.deleted_classes ?? []),
- ],
- renamed_classes: props.migrations?.renamed_classes ?? [],
- transferred_classes: props.migrations?.transferred_classes ?? [],
- new_sqlite_classes: props.migrations?.new_sqlite_classes ?? [],
+ old_tag: oldMigrationTag,
+ new_tag: newMigrationTag,
+ new_classes: [],
+ deleted_classes: [...(deletedClasses ?? [])],
+ renamed_classes: [],
+ transferred_classes: [],
+ new_sqlite_classes: [],
},
};
+ const assetUploadResult = props.assetUploadResult;
// If we have asset upload results, add them to the metadata
if (assetUploadResult) {
meta.assets = {
@@ -369,7 +391,10 @@ export async function prepareWorkerMetadata(
type: "durable_object_namespace",
name: bindingName,
class_name: binding.className,
- script_name: binding.scriptName,
+ script_name:
+ binding.scriptName === props.workerName
+ ? undefined
+ : binding.scriptName,
environment: binding.environment,
namespace_id: binding.namespaceId,
});
@@ -494,15 +519,35 @@ export async function prepareWorkerMetadata(
key_base64: binding.key_base64?.unencrypted,
key_jwk: binding.key_jwk?.unencrypted,
});
+ } else if (binding.type === "container") {
+ meta.bindings.push({
+ type: "durable_object_namespace",
+ class_name: binding.className,
+ name: bindingName,
+ });
+ if (
+ binding.scriptName === undefined ||
+ // TODO(sam): not sure if Cloudflare Api would like us using `this` worker name here
+ binding.scriptName === props.workerName
+ ) {
+ // we do not need configure class migrations for cross-script bindings
+ configureClassMigration(bindingName, binding);
+ }
+ (meta.containers ??= []).push({ class_name: binding.className });
} else {
- // @ts-expect-error - we should never reach here
- throw new Error(`Unsupported binding type: ${binding.type}`);
+ assertNever(
+ binding,
+ `Unsupported binding type: ${
+ // @ts-expect-error - types think it's never
+ binding.type
+ }`,
+ );
}
}
function configureClassMigration(
bindingName: string,
- newBinding: DurableObjectNamespace,
+ newBinding: DurableObjectNamespace | Container,
) {
let prevBinding: WorkerBindingDurableObjectNamespace | undefined;
if (oldBindings) {
@@ -577,3 +622,70 @@ export async function prepareWorkerMetadata(
}
return meta;
}
+
+export function bumpMigrationTagVersion(tag?: string) {
+ if (tag) {
+ if (!tag.match(/^v\d+$/)) {
+ throw new Error(`Invalid tag format: ${tag}. Expected format: v`);
+ }
+ return `v${Number.parseInt(tag.slice(1)) + 1}`;
+ }
+ return undefined;
+}
+
+interface WorkerSettings {
+ bindings: WorkerBindingSpec[];
+ compatibility_date: string;
+ compatibility_flags: string[];
+ migrations: SingleStepMigration | MultiStepMigration;
+ [key: string]: any;
+}
+
+async function getWorkerSettings(
+ api: CloudflareApi,
+ workerName: string,
+): Promise {
+ // Fetch the bindings for a worker by calling the Cloudflare API endpoint:
+ // GET /accounts/:account_id/workers/scripts/:script_name/bindings
+ // See: https://developers.cloudflare.com/api/resources/workers/subresources/scripts/subresources/script_and_version_settings/methods/get/
+ const response = await api.get(
+ `/accounts/${api.accountId}/workers/scripts/${workerName}/settings`,
+ );
+ if (response.status === 404) {
+ return undefined;
+ }
+ if (!response.ok) {
+ throw new Error(
+ `Error getting worker bindings: ${response.status} ${response.statusText}`,
+ );
+ }
+ // The result is an object with a "result" property containing the bindings array
+ const { result, success, errors } = (await response.json()) as {
+ result: {
+ bindings: WorkerBindingSpec[];
+ compatibility_date: string;
+ compatibility_flags: string[];
+ migrations: SingleStepMigration | MultiStepMigration;
+ [key: string]: any;
+ };
+ success: boolean;
+ errors: Array<{
+ code: number;
+ message: string;
+ documentation_url: string;
+ [key: string]: any;
+ }>;
+ messages: Array<{
+ code: number;
+ message: string;
+ documentation_url: string;
+ [key: string]: any;
+ }>;
+ };
+ if (!success) {
+ throw new Error(
+ `Error getting worker bindings: ${response.status} ${response.statusText}\nErrors:\n${errors.map((e) => `- [${e.code}] ${e.message} (${e.documentation_url})`).join("\n")}`,
+ );
+ }
+ return result;
+}
diff --git a/alchemy/src/cloudflare/worker-stub.ts b/alchemy/src/cloudflare/worker-stub.ts
index 81687d7c8..6aaa3c55e 100644
--- a/alchemy/src/cloudflare/worker-stub.ts
+++ b/alchemy/src/cloudflare/worker-stub.ts
@@ -7,6 +7,7 @@ import {
type CloudflareApi,
type CloudflareApiOptions,
} from "./api.ts";
+import { configureURL } from "./worker.ts";
/**
* Properties for creating a Worker stub
@@ -19,6 +20,14 @@ export interface WorkerStubProps<
*/
name: string;
+ /**
+ * Whether to enable a workers.dev URL for this worker
+ *
+ * If true, the worker will be available at {name}.{subdomain}.workers.dev
+ * @default true
+ */
+ url?: boolean;
+
/**
* The RPC class to use for the worker.
*
@@ -39,6 +48,12 @@ export interface WorkerStub<
*/
name: string;
+ /**
+ * The worker's URL if enabled
+ * Format: {name}.{subdomain}.workers.dev
+ */
+ url?: string;
+
/**
* Optional type branding for the worker's RPC entrypoint.
*
@@ -59,12 +74,21 @@ export function isWorkerStub(resource: Resource): resource is WorkerStub {
* exists and creates an empty one if needed.
*
* @example
- * // Reserve a worker name without deploying code
+ * // Reserve a worker name without deploying code, with URL enabled (default)
* const workerStub = await WorkerStub("my-worker", {
* name: "my-reserved-worker"
* });
*
- * console.log(`Worker ${workerStub.name} exists: ${!workerStub.created}`);
+ * console.log(`Worker ${workerStub.name} is available at: ${workerStub.url}`);
+ *
+ * @example
+ * // Reserve a worker name without enabling URL
+ * const workerStub = await WorkerStub("my-worker", {
+ * name: "my-reserved-worker",
+ * url: false
+ * });
+ *
+ * console.log(`Worker ${workerStub.name} created without URL`);
*/
export const WorkerStub = Resource("cloudflare::WorkerStub", async function <
RPC extends Rpc.WorkerEntrypointBranded = Rpc.WorkerEntrypointBranded,
@@ -84,11 +108,16 @@ export const WorkerStub = Resource("cloudflare::WorkerStub", async function <
await createEmptyWorker(api, props.name);
}
+ // Configure URL if requested (defaults to true)
+ const enableUrl = props.url ?? true;
+ const workerUrl = await configureURL(this, api, props.name, enableUrl);
+
// Return the worker stub info
return this({
type: "service",
__rpc__: props.rpc as unknown as RPC,
...props,
+ url: workerUrl,
}) as WorkerStub;
});
diff --git a/alchemy/src/cloudflare/worker.ts b/alchemy/src/cloudflare/worker.ts
index 6c3f60b26..3bc56a87f 100644
--- a/alchemy/src/cloudflare/worker.ts
+++ b/alchemy/src/cloudflare/worker.ts
@@ -1,4 +1,13 @@
+import { spawn } from "node:child_process";
+import {
+ existsSync,
+ mkdirSync,
+ readFileSync,
+ unlinkSync,
+ writeFileSync,
+} from "node:fs";
import path from "node:path";
+import { BUILD_DATE } from "../build-date.ts";
import type { Context } from "../context.ts";
import type { BundleProps } from "../esbuild/bundle.ts";
import { InnerResourceScope, Resource, ResourceKind } from "../resource.ts";
@@ -10,9 +19,9 @@ import { Secret, secret } from "../secret.ts";
import { serializeScope } from "../serde.ts";
import type { type } from "../type.ts";
import { getContentType } from "../util/content-type.ts";
+import { DeferredPromise } from "../util/deferred-promise.ts";
import { logger } from "../util/logger.ts";
import { withExponentialBackoff } from "../util/retry.ts";
-import { slugify } from "../util/slugify.ts";
import { CloudflareApiError, handleApiError } from "./api-error.ts";
import {
type CloudflareApi,
@@ -24,15 +33,19 @@ import {
type Binding,
type Bindings,
Json,
+ type WorkerBindingDurableObjectNamespace,
type WorkerBindingService,
type WorkerBindingSpec,
} from "./bindings.ts";
import type { Bound } from "./bound.ts";
import { isBucket } from "./bucket.ts";
+import { createWorkerDevContext } from "./bundle/bundle-worker-dev.ts";
import {
type NoBundleResult,
bundleWorkerScript,
} from "./bundle/bundle-worker.ts";
+import { type Container, ContainerApplication } from "./container.ts";
+import { CustomDomain } from "./custom-domain.ts";
import { isD1Database } from "./d1-database.ts";
import type { DispatchNamespaceResource } from "./dispatch-namespace.ts";
import {
@@ -56,12 +69,14 @@ import { Route } from "./route.ts";
import { isVectorizeIndex } from "./vectorize-index.ts";
import { type AssetUploadResult, uploadAssets } from "./worker-assets.ts";
import {
- type WorkerMetadata,
type WorkerScriptMetadata,
+ bumpMigrationTagVersion,
prepareWorkerMetadata,
} from "./worker-metadata.ts";
-import type { SingleStepMigration } from "./worker-migration.ts";
import { WorkerStub, isWorkerStub } from "./worker-stub.ts";
+import type { MiniflareWorkerOptions } from "./worker/miniflare-worker-options.ts";
+import { miniflareServer } from "./worker/miniflare.ts";
+import { getAccountSubdomain } from "./worker/subdomain.ts";
import { Workflow, isWorkflow, upsertWorkflow } from "./workflow.ts";
/**
@@ -98,11 +113,12 @@ export interface AssetsConfig {
/**
* When true, requests will always invoke the Worker script.
+ * If an array is passed, the worker will be invoked for matching requests.
* Otherwise, attempt to serve an asset matching the request, falling back to the Worker script.
*
* @default false
*/
- run_worker_first?: boolean;
+ run_worker_first?: boolean | string[];
}
export interface BaseWorkerProps<
@@ -172,11 +188,6 @@ export interface BaseWorkerProps<
enabled?: boolean;
};
- /**
- * Migrations to apply to the worker
- */
- migrations?: SingleStepMigration;
-
/**
* Whether to adopt the Worker if it already exists when creating
*/
@@ -184,7 +195,7 @@ export interface BaseWorkerProps<
/**
* The compatibility date for the worker
- * @default "2025-04-26"
+ * @default BUILD_DATE - automatically pinned to the package build date
*/
compatibilityDate?: string;
@@ -220,27 +231,66 @@ export interface BaseWorkerProps<
* Routes to create for this worker.
*
* Each route maps a URL pattern to this worker script.
+ *
+ * @example
+ * await Worker("my-worker", {
+ * routes: [
+ * "sub.example.com/*",
+ * { pattern: "sub.example.com/*", zoneId: "1234567890" },
+ * ],
+ * });
*/
- routes?: Array<{
- /**
- * URL pattern for the route
- * @example "sub.example.com/*"
- */
- pattern: string;
- /**
- * Zone ID for the route. If not provided, will be automatically inferred from the route pattern.
- */
- zoneId?: string;
- /**
- * Whether this is a custom domain route
- */
- customDomain?: boolean;
- /**
- * Whether to adopt an existing route with the same pattern if it exists
- * @default false
- */
- adopt?: boolean;
- }>;
+ routes?: Array<
+ | string
+ | {
+ /**
+ * URL pattern for the route
+ * @example "sub.example.com/*"
+ */
+ pattern: string;
+ /**
+ * Zone ID for the route. If not provided, will be automatically inferred from the route pattern.
+ */
+ zoneId?: string;
+ /**
+ * Whether to adopt an existing route with the same pattern if it exists
+ * @default false
+ */
+ adopt?: boolean;
+ }
+ >;
+
+ /**
+ * Custom domains to bind to the worker
+ *
+ * @example
+ * await Worker("my-worker", {
+ * domains: [
+ * "example.com",
+ * { name: "example.com", zoneId: "1234567890" },
+ * ],
+ * });
+ */
+ domains?: (
+ | string
+ | {
+ /**
+ * The domain name to bind to the worker
+ */
+ domainName: string;
+ /**
+ * Zone ID for the domain.
+ *
+ * @default - If not provided, will be automatically inferred from the domain name.
+ */
+ zoneId?: string;
+ /**
+ * Whether to adopt an existing domain if it exists
+ * @default false
+ */
+ adopt?: boolean;
+ }
+ )[];
/**
* The RPC class to use for the worker.
@@ -266,6 +316,32 @@ export interface BaseWorkerProps<
* @example "pr-123"
*/
version?: string;
+
+ /**
+ * Configuration for local development. By default, when Alchemy is running in development mode,
+ * the worker will be emulated locally and available at a randomly selected port.
+ */
+ dev?:
+ | boolean
+ | {
+ /**
+ * Port to use for local development
+ */
+ port?: number;
+ /**
+ * EXPERIMENTAL: Whether to run the worker remotely instead of locally.
+ *
+ * When this is enabled, hot reloading will not work.
+ *
+ * @default false
+ */
+ remote?: boolean;
+ }
+ | {
+ command: string;
+ url: string;
+ cwd?: string;
+ };
}
export interface InlineWorkerProps<
@@ -373,7 +449,7 @@ export type Worker<
B extends Bindings | undefined = Bindings | undefined,
RPC extends Rpc.WorkerEntrypointBranded = Rpc.WorkerEntrypointBranded,
> = Resource<"cloudflare::Worker"> &
- Omit, "url" | "script" | "routes"> &
+ Omit, "url" | "script" | "routes" | "domains"> &
globalThis.Service & {
/** @internal phantom property */
__rpc__?: RPC;
@@ -423,6 +499,11 @@ export type Worker<
*/
routes?: Route[];
+ /**
+ * The custom domains that were created for this worker
+ */
+ domains?: CustomDomain[];
+
// phantom property (for typeof myWorker.Env)
Env: B extends Bindings
? {
@@ -867,7 +948,7 @@ export function Worker(
});
}
-export const DEFAULT_COMPATIBILITY_DATE = "2025-04-20";
+export const DEFAULT_COMPATIBILITY_DATE = BUILD_DATE;
export const _Worker = Resource(
"cloudflare::Worker",
@@ -883,9 +964,6 @@ export const _Worker = Resource(
throw new Error("entrypoint must be provided when noBundle is true");
}
- // Create Cloudflare API client with automatic account discovery
- const api = await createCloudflareApi(props);
-
// Use the provided name
const workerName = props.name ?? id;
@@ -893,13 +971,201 @@ export const _Worker = Resource(
props.compatibilityDate ?? DEFAULT_COMPATIBILITY_DATE;
const compatibilityFlags = props.compatibilityFlags ?? [];
- const uploadWorkerScript = async (props: WorkerProps) => {
- const [oldBindings, oldMetadata] = await Promise.all([
- getWorkerBindings(api, workerName),
- getWorkerScriptMetadata(api, workerName),
- ]);
- const oldTags = oldMetadata?.default_environment?.script?.tags;
+ if (this.scope.dev && this.phase !== "delete" && props.dev !== false) {
+ // Get current timestamp
+ const now = Date.now();
+ if (typeof props.dev === "object" && "command" in props.dev) {
+ upsertDevCommand({
+ id,
+ command: props.dev.command,
+ cwd: props.dev.cwd ?? process.cwd(),
+ env: props.env ?? {},
+ });
+ return this({
+ type: "service",
+ id,
+ entrypoint: props.entrypoint,
+ name: workerName,
+ compatibilityDate,
+ compatibilityFlags,
+ format: props.format || "esm", // Include format in the output
+ bindings: props.bindings ?? ({} as B),
+ env: props.env,
+ observability: props.observability,
+ createdAt: now,
+ updatedAt: now,
+ eventSources: props.eventSources,
+ url: props.dev.url,
+ dev: props.dev,
+ // Include assets configuration in the output
+ assets: props.assets,
+ // Include cron triggers in the output
+ crons: props.crons,
+ // phantom property
+ Env: undefined!,
+ } as unknown as Worker);
+ }
+
+ const sharedOptions: Omit = {
+ name: workerName,
+ compatibilityDate,
+ compatibilityFlags,
+ bindings: props.bindings ?? ({} as B),
+ port: typeof props.dev === "object" ? props.dev.port : undefined,
+ };
+
+ let url: string;
+
+ // If entrypoint is provided, set up hot reloading with esbuild context
+ if (props.entrypoint) {
+ const startPromise = new DeferredPromise();
+
+ await createWorkerDevContext(
+ workerName,
+ {
+ ...props,
+ entrypoint: props.entrypoint,
+ compatibilityDate,
+ compatibilityFlags,
+ },
+ {
+ onBuildStart: () => {
+ logger.task("miniflare-server", {
+ message: `${startPromise.status === "pending" ? "Building" : "Rebuilding"}`,
+ status: "pending",
+ resource: id,
+ prefix: "build",
+ prefixColor: "cyanBright",
+ });
+ },
+ onBuildEnd: async (newScript) => {
+ try {
+ // Hot reload callback - update the miniflare worker
+ const server = await miniflareServer.push({
+ ...sharedOptions,
+ script: newScript,
+ });
+ if (startPromise.status === "pending") {
+ logger.task("miniflare-server", {
+ message: `Started Miniflare server at ${server.url}`,
+ status: "success",
+ resource: id,
+ prefix: "ready",
+ prefixColor: "cyanBright",
+ });
+ startPromise.resolve(server.url);
+ } else {
+ logger.task("miniflare-server", {
+ message: `Updated Miniflare server at ${server.url}`,
+ status: "success",
+ resource: id,
+ prefix: "reload",
+ prefixColor: "cyanBright",
+ });
+ }
+ } catch (error) {
+ if (startPromise.status === "pending") {
+ logger.error(error);
+ logger.task("miniflare-server", {
+ message: "Failed to start Miniflare server",
+ status: "failure",
+ resource: id,
+ prefix: "error",
+ prefixColor: "redBright",
+ });
+ startPromise.reject(
+ new Error(
+ `Failed to start Miniflare server for worker "${workerName}"`,
+ { cause: error },
+ ),
+ );
+ } else {
+ logger.task("miniflare-server", {
+ message: "Failed to update Miniflare server",
+ status: "failure",
+ resource: id,
+ prefix: "error",
+ prefixColor: "redBright",
+ });
+ logger.error(error);
+ }
+ }
+ },
+ onBuildError: (errors) => {
+ if (startPromise.status === "pending") {
+ logger.task("miniflare-server", {
+ message: "Failed to build worker",
+ status: "failure",
+ resource: id,
+ prefix: "error",
+ prefixColor: "redBright",
+ });
+ startPromise.reject(errors);
+ } else {
+ logger.task("miniflare-server", {
+ message: "Failed to rebuild worker",
+ status: "failure",
+ resource: id,
+ prefix: "error",
+ prefixColor: "redBright",
+ });
+ logger.error(errors);
+ }
+ },
+ },
+ );
+
+ url = await startPromise.value;
+ } else {
+ // Fallback to one-time bundling for inline scripts
+ const scriptContent =
+ props.script ??
+ (await bundleWorkerScript({
+ name: workerName,
+ ...props,
+ compatibilityDate,
+ compatibilityFlags,
+ }));
+ const server = await miniflareServer.push({
+ ...sharedOptions,
+ script:
+ typeof scriptContent === "string"
+ ? scriptContent
+ : scriptContent[props.entrypoint!].toString(),
+ });
+ url = server.url;
+ }
+
+ return this({
+ type: "service",
+ id,
+ entrypoint: props.entrypoint,
+ name: workerName,
+ compatibilityDate,
+ compatibilityFlags,
+ format: props.format || "esm", // Include format in the output
+ bindings: props.bindings ?? ({} as B),
+ env: props.env,
+ observability: props.observability,
+ createdAt: now,
+ updatedAt: now,
+ eventSources: props.eventSources,
+ url,
+ dev: props.dev,
+ // Include assets configuration in the output
+ assets: props.assets,
+ // Include cron triggers in the output
+ crons: props.crons,
+ // phantom property
+ Env: undefined!,
+ } as unknown as Worker);
+ }
+
+ // Create Cloudflare API client with automatic account discovery
+ const api = await createCloudflareApi(props);
+
+ const uploadWorkerScript = async (props: WorkerProps) => {
// Get the script content - either from props.script, or by bundling
const scriptBundle =
props.script ??
@@ -913,6 +1179,7 @@ export const _Worker = Resource(
// Find any assets bindings
const assetsBindings: { name: string; assets: Assets }[] = [];
const workflowsBindings: Workflow[] = [];
+ const containersBindings: Container[] = [];
if (props.bindings) {
for (const [bindingName, binding] of Object.entries(props.bindings)) {
@@ -921,6 +1188,8 @@ export const _Worker = Resource(
assetsBindings.push({ name: bindingName, assets: binding });
} else if (binding.type === "workflow") {
workflowsBindings.push(binding);
+ } else if (binding.type === "container") {
+ containersBindings.push(binding);
}
}
}
@@ -949,34 +1218,18 @@ export const _Worker = Resource(
});
}
- // Prepare metadata with bindings
- const scriptMetadata = await prepareWorkerMetadata(
- this,
- oldBindings,
- oldTags,
- {
- ...props,
- compatibilityDate,
- compatibilityFlags,
- workerName,
- },
- assetUploadResult,
- );
-
// Deploy worker (either as version or live worker)
- const versionResult = await putWorker(
- api,
+ const versionResult = await putWorker(api, {
+ ...props,
workerName,
scriptBundle,
- scriptMetadata,
dispatchNamespace,
- props.version
- ? {
- versionLabel: props.version,
- message: `Version ${props.version}`,
- }
- : undefined,
- );
+ version: props.version,
+ compatibilityDate,
+ compatibilityFlags,
+ assetUploadResult,
+ });
+ versionResult.versionId;
for (const workflow of workflowsBindings) {
if (
@@ -991,6 +1244,47 @@ export const _Worker = Resource(
}
}
+ for (const container of containersBindings) {
+ async function findNamespaceId() {
+ const response = await api.get(
+ `/accounts/${api.accountId}/workers/scripts/${workerName}/versions/${versionResult.deploymentId}`,
+ );
+ const result = (await response.json()) as {
+ result: {
+ resources: {
+ bindings: WorkerBindingSpec[];
+ };
+ };
+ };
+ const namespaceId = result.result.resources.bindings.find(
+ (binding): binding is WorkerBindingDurableObjectNamespace =>
+ binding.type === "durable_object_namespace" &&
+ binding.class_name === container.className,
+ )?.namespace_id;
+ if (!namespaceId) {
+ throw new Error(
+ `Namespace ID not found for container ${container.id}`,
+ );
+ }
+ return namespaceId;
+ }
+ await ContainerApplication(container.id, {
+ accountId: props.accountId,
+ apiKey: props.apiKey,
+ apiToken: props.apiToken,
+ baseUrl: props.baseUrl,
+ email: props.email,
+ image: container.image,
+ name: container.id,
+ instanceType: container.instanceType,
+ observability: container.observability,
+ durableObjects: {
+ namespaceId: await findNamespaceId(),
+ },
+ schedulingPolicy: container.schedulingPolicy,
+ });
+ }
+
await Promise.all(
props.eventSources?.map((eventSource) => {
if (isQueue(eventSource) || isQueueEventSource(eventSource)) {
@@ -1018,7 +1312,9 @@ export const _Worker = Resource(
if (props.version) {
// For versions, use the preview URL if available
workerUrl = versionResult?.previewUrl;
- } else {
+ } else if (!props.namespace) {
+ // namespaces don't support URLs
+
// For regular workers, use the normal URL configuration
workerUrl = await configureURL(
this,
@@ -1048,35 +1344,22 @@ export const _Worker = Resource(
}
}
- return { scriptBundle, scriptMetadata, workerUrl, now, versionResult };
+ return { scriptBundle, workerUrl, now, versionResult };
};
if (this.phase === "delete") {
// Delete any queue consumers attached to this worker first
await deleteQueueConsumers(api, workerName);
- // @ts-ignore
- await uploadWorkerScript({
- ...props,
- entrypoint: undefined,
- noBundle: false,
- script:
- props.format === "cjs"
- ? "addEventListener('fetch', event => { event.respondWith(new Response('hello world')); });"
- : "export default { fetch(request) { return new Response('hello world'); }, queue: () => {} }",
- bindings: {} as B,
- // we are writing a stub worker (to remove binding/event source dependencies)
- // queue consumers will no longer exist by this point
- eventSources: undefined,
- // stub worker doesn't need dispatch namespace
- namespace: undefined,
- });
-
await withExponentialBackoff(
() =>
- deleteWorker(this, api, {
- ...props,
+ deleteWorker(api, {
workerName,
+ namespace:
+ typeof props.namespace === "string"
+ ? props.namespace
+ : props.namespace?.namespaceId,
+ url: this.output.url,
}),
(err) =>
(err.status === 400 &&
@@ -1091,10 +1374,6 @@ export const _Worker = Resource(
return this.destroy();
}
- // Validate input - we need either script, entryPoint, or bundle
- if (!props.script && !props.entrypoint) {
- throw new Error("One of script or entrypoint must be provided");
- }
if (this.phase === "create") {
if (props.version) {
@@ -1107,34 +1386,76 @@ export const _Worker = Resource(
}
// We always "adopt" when publishing versions
} else if (!props.adopt) {
- await assertWorkerDoesNotExist(this, api, workerName);
+ await assertWorkerDoesNotExist(api, workerName);
}
}
- const { scriptMetadata, workerUrl, now } = await uploadWorkerScript(props);
+ const { workerUrl, now } = await uploadWorkerScript(props);
- // Create routes if provided and capture their outputs
- let createdRoutes: Route[] = [];
- if (props.routes && props.routes.length > 0) {
- // Validate for duplicate patterns
- const patterns = props.routes.map((route) => route.pattern);
- const duplicates = patterns.filter(
- (pattern, index) => patterns.indexOf(pattern) !== index,
+ function ensureNoDuplicates(name: string, items: string[]) {
+ const duplicates = items.filter(
+ (pattern, index) => items.indexOf(pattern) !== index,
);
if (duplicates.length > 0) {
- throw new Error(
- `Duplicate route patterns found: ${duplicates.join(", ")}`,
- );
+ throw new Error(`Duplicate ${name} found: ${duplicates.join(", ")}`);
}
+ }
+
+ let createdDomains: CustomDomain[] | undefined;
+ if (props.domains?.length) {
+ ensureNoDuplicates(
+ "Custom Domain",
+ props.domains.map((domain) =>
+ typeof domain === "string" ? domain : domain.domainName,
+ ),
+ );
+
+ createdDomains = await Promise.all(
+ props.domains.map(async (customDomain) => {
+ const domainName =
+ typeof customDomain === "string"
+ ? customDomain
+ : customDomain.domainName;
+ return await CustomDomain(domainName, {
+ workerName,
+ name: domainName,
+ zoneId:
+ typeof customDomain === "string"
+ ? undefined
+ : customDomain.zoneId,
+ adopt:
+ typeof customDomain === "string"
+ ? false
+ : (customDomain.adopt ?? props.adopt),
+ });
+ }),
+ );
+ }
+
+ // Create routes if provided and capture their outputs
+ let createdRoutes: Route[] | undefined;
+ if (props.routes && props.routes.length > 0) {
+ ensureNoDuplicates(
+ "Route",
+ props.routes.map((route) =>
+ typeof route === "string" ? route : route.pattern,
+ ),
+ );
// Create Route resources for each route and capture their outputs
createdRoutes = await Promise.all(
props.routes.map(async (routeConfig) => {
- return await Route(routeConfig.pattern, {
- pattern: routeConfig.pattern,
+ const pattern =
+ typeof routeConfig === "string" ? routeConfig : routeConfig.pattern;
+ return await Route(pattern, {
+ pattern,
script: workerName,
- zoneId: routeConfig.zoneId, // Route resource will handle inference if not provided
- adopt: routeConfig.adopt ?? false,
+ zoneId:
+ typeof routeConfig === "string" ? undefined : routeConfig.zoneId, // Route resource will handle inference if not provided
+ adopt:
+ typeof routeConfig === "string"
+ ? false
+ : (routeConfig.adopt ?? props.adopt),
accountId: props.accountId,
apiKey: props.apiKey,
apiToken: props.apiToken,
@@ -1181,17 +1502,20 @@ export const _Worker = Resource(
format: props.format || "esm", // Include format in the output
bindings: exportBindings(),
env: props.env,
- observability: scriptMetadata.observability,
+ observability: props.observability,
createdAt: now,
updatedAt: now,
eventSources: props.eventSources,
url: workerUrl,
+ dev: props.dev,
// Include assets configuration in the output
assets: props.assets,
// Include cron triggers in the output
crons: props.crons,
// Include the created routes in the output
- routes: createdRoutes.length > 0 ? createdRoutes : undefined,
+ routes: createdRoutes,
+ // Include the created domains in the output
+ domains: createdDomains,
// Include the dispatch namespace in the output
namespace: props.namespace,
// Include version information in the output
@@ -1202,16 +1526,21 @@ export const _Worker = Resource(
},
);
-export async function deleteWorker(
- ctx: Context>,
+export async function deleteWorker(
api: CloudflareApi,
- props: WorkerProps & { workerName: string },
+ props: {
+ workerName: string;
+ namespace?: string | DispatchNamespaceResource;
+ url?: string;
+ },
) {
const workerName = props.workerName;
// Delete worker
const deleteResponse = await api.delete(
- `/accounts/${api.accountId}/workers/scripts/${workerName}`,
+ props.namespace
+ ? `/accounts/${api.accountId}/workers/dispatch/namespaces/${props.namespace}/scripts/${workerName}?force=true`
+ : `/accounts/${api.accountId}/workers/scripts/${workerName}?force=true`,
);
// Check for success (2xx status code)
@@ -1220,7 +1549,7 @@ export async function deleteWorker(
}
// Disable the URL if it was enabled
- if (ctx.output?.url) {
+ if (props.url) {
try {
await api.post(
`/accounts/${api.accountId}/workers/scripts/${workerName}/subdomain`,
@@ -1238,22 +1567,82 @@ export async function deleteWorker(
return;
}
-interface PutWorkerOptions {
- versionLabel?: string;
- message?: string;
- dispatchNamespace?: string;
+function upsertDevCommand(props: {
+ id: string;
+ command: string;
+ cwd: string;
+ env: Record;
+}) {
+ const persistFile = path.join(process.cwd(), ".alchemy", `${props.id}.pid`);
+ if (existsSync(persistFile)) {
+ const pid = Number.parseInt(readFileSync(persistFile, "utf8"));
+ try {
+ // Actually kill the process if it's alive
+ process.kill(pid, "SIGTERM");
+ } catch {
+ // ignore
+ }
+ try {
+ unlinkSync(persistFile);
+ } catch {
+ // ignore
+ }
+ }
+ const command = props.command.split(" ");
+ const proc = spawn(command[0], command.slice(1), {
+ env: {
+ ...process.env,
+ ...props.env,
+ ALCHEMY_CLOUDFLARE_PERSIST_PATH: path.join(
+ process.cwd(),
+ ".alchemy",
+ "miniflare",
+ ),
+ },
+ stdio: ["inherit", "inherit", "inherit"],
+ });
+ process.on("SIGINT", () => {
+ try {
+ unlinkSync(persistFile);
+ } catch {
+ // ignore
+ }
+ proc.kill("SIGINT");
+ process.exit(0);
+ });
+ if (proc.pid) {
+ mkdirSync(path.dirname(persistFile), { recursive: true });
+ writeFileSync(persistFile, proc.pid.toString());
+ }
}
-async function putWorkerInternal(
+type PutWorkerOptions = WorkerProps & {
+ dispatchNamespace?: string;
+ migrationTag?: string;
+ workerName: string;
+ scriptBundle: string | NoBundleResult;
+ version: string | undefined;
+ compatibilityDate: string;
+ compatibilityFlags: string[];
+ assetUploadResult: AssetUploadResult | undefined;
+};
+
+export async function putWorker(
api: CloudflareApi,
- workerName: string,
- scriptBundle: string | NoBundleResult,
- scriptMetadata: WorkerMetadata,
- options: PutWorkerOptions = {},
-): Promise<{ versionId?: string; previewUrl?: string }> {
+ props: PutWorkerOptions,
+): Promise<{ versionId?: string; previewUrl?: string; deploymentId: string }> {
+ const {
+ //
+ dispatchNamespace,
+ migrationTag,
+ workerName,
+ scriptBundle,
+ version,
+ } = props;
+ const scriptMetadata = await prepareWorkerMetadata(api, props);
+
return withExponentialBackoff(
async () => {
- const { versionLabel, message, dispatchNamespace } = options;
const scriptName =
scriptMetadata.main_module ?? scriptMetadata.body_part!;
@@ -1284,17 +1673,30 @@ async function putWorkerInternal(
}
// Prepare metadata - add version annotations if this is a version
- const finalMetadata = versionLabel
+ const finalMetadata = version
? {
...scriptMetadata,
// Exclude migrations for worker versions - they're not allowed
migrations: undefined,
annotations: {
- "workers/tag": versionLabel,
- ...(message && { "workers/message": message.substring(0, 100) }),
+ "workers/tag": version,
+ "workers/message": `Version ${version}`,
},
}
- : scriptMetadata;
+ : {
+ ...scriptMetadata,
+ migrations: scriptMetadata.migrations
+ ? {
+ ...scriptMetadata.migrations,
+ old_tag: migrationTag,
+ new_tag: bumpMigrationTagVersion(migrationTag),
+ }
+ : undefined,
+ };
+
+ if (process.env.DEBUG) {
+ console.log(`metadata(${scriptName}):`, finalMetadata);
+ }
// Add metadata as JSON
formData.append(
@@ -1307,8 +1709,12 @@ async function putWorkerInternal(
// Determine endpoint and HTTP method
let endpoint: string;
let method: "PUT" | "POST";
-
- if (versionLabel) {
+ if (version) {
+ if (dispatchNamespace) {
+ throw new Error(
+ "Worker Preview Versions are not supported in Workers for Platforms",
+ );
+ }
// Upload worker version using the versions API
endpoint = `/accounts/${api.accountId}/workers/scripts/${workerName}/versions`;
method = "POST";
@@ -1335,30 +1741,57 @@ async function putWorkerInternal(
// Check if the upload was successful
if (!uploadResponse.ok) {
- await handleApiError(
- uploadResponse,
- versionLabel ? "uploading worker version" : "uploading worker script",
- "worker",
- workerName,
- );
+ try {
+ return await handleApiError(
+ uploadResponse,
+ version ? "uploading worker version" : "uploading worker script",
+ "worker",
+ workerName,
+ );
+ } catch (error) {
+ if (error instanceof CloudflareApiError && error.status === 412) {
+ // this happens when adopting a Worker managed with Wrangler
+ // because wrangler includes a migration tag and we do not
+ // currently, the only way to discover the old_tag is through the error message
+ // Get Worker Script Settings is meant to return it (according to the docs)
+ // but it doesn't work at runtime
+ //
+ // so, we catch the error and parse out the tag and then retry
+ if (error.message.includes("when expected tag is")) {
+ const newTag = error.message.match(
+ /when expected tag is ['"]?(v\d+)['"]?/,
+ )?.[1];
+ if (newTag) {
+ return await putWorker(api, {
+ ...props,
+ migrationTag: newTag,
+ });
+ }
+ } else {
+ throw error;
+ }
+ } else {
+ throw error;
+ }
+ }
}
-
- // Handle version response
- if (versionLabel) {
- const responseData = (await uploadResponse.json()) as {
- result: {
- id: string;
- number: number;
- metadata: {
- has_preview: boolean;
- };
- annotations?: {
- "workers/tag"?: string;
- };
+ const responseData = (await uploadResponse.json()) as {
+ result: {
+ id: string;
+ number: number;
+ metadata: {
+ has_preview: boolean;
+ };
+ annotations?: {
+ "workers/tag"?: string;
};
+ deployment_id: string;
};
- const result = responseData.result;
+ };
+ const result = responseData.result;
+ // Handle version response
+ if (props.version) {
// Get the account's workers.dev subdomain to construct preview URL
let previewUrl: string | undefined;
if (result.metadata?.has_preview) {
@@ -1385,10 +1818,13 @@ async function putWorkerInternal(
return {
versionId: result.id,
previewUrl,
+ deploymentId: result.deployment_id,
};
}
- return {};
+ return {
+ deploymentId: result.deployment_id,
+ };
},
(err) =>
err.status === 404 ||
@@ -1403,27 +1839,6 @@ async function putWorkerInternal(
);
}
-export async function putWorker(
- api: CloudflareApi,
- workerName: string,
- scriptBundle: string | NoBundleResult,
- scriptMetadata: WorkerMetadata,
- dispatchNamespace?: string,
- version?: { versionLabel: string; message?: string },
-): Promise<{ versionId?: string; previewUrl?: string }> {
- return await putWorkerInternal(
- api,
- workerName,
- scriptBundle,
- scriptMetadata,
- {
- dispatchNamespace,
- versionLabel: version?.versionLabel,
- message: version?.message,
- },
- );
-}
-
export async function checkWorkerExists(
api: CloudflareApi,
workerName: string,
@@ -1434,8 +1849,7 @@ export async function checkWorkerExists(
return response.status === 200;
}
-export async function assertWorkerDoesNotExist(
- ctx: Context>,
+export async function assertWorkerDoesNotExist(
api: CloudflareApi,
workerName: string,
) {
@@ -1454,14 +1868,6 @@ export async function assertWorkerDoesNotExist(
);
}
- if (
- metadata.default_environment?.script.tags.includes(
- `alchemy:id:${slugify(ctx.fqn)}`,
- )
- ) {
- return true;
- }
-
throw new Error(
`Worker with name '${workerName}' already exists. Please use a unique name.`,
);
@@ -1472,7 +1878,7 @@ export async function assertWorkerDoesNotExist(
}
export async function configureURL(
- ctx: Context>,
+ ctx: Context> | Context,
api: CloudflareApi,
workerName: string,
url: boolean,
@@ -1489,31 +1895,10 @@ export async function configureURL(
);
// Get the account's workers.dev subdomain
- const subdomainResponse = await api.get(
- `/accounts/${api.accountId}/workers/subdomain`,
- );
-
- if (!subdomainResponse.ok) {
- throw new Error(
- `Could not fetch workers.dev subdomain: ${subdomainResponse.status} ${subdomainResponse.statusText}`,
- );
- }
- const subdomainData: {
- result: {
- subdomain: string;
- };
- } = await subdomainResponse.json();
- const subdomain = subdomainData.result?.subdomain;
+ const subdomain = await getAccountSubdomain(api);
if (subdomain) {
workerUrl = `https://${workerName}.${subdomain}.workers.dev`;
-
- // Add a delay when the subdomain is first created.
- // This is to prevent an issue where a negative cache-hit
- // causes the subdomain to be unavailable for 30 seconds.
- if (ctx.phase === "create" || !ctx.output?.url) {
- await new Promise((resolve) => setTimeout(resolve, 3000));
- }
}
} else if (url === false && ctx.output?.url) {
// Explicitly disable URL if it was previously enabled
@@ -1551,51 +1936,6 @@ export async function getWorkerScriptMetadata(
return ((await response.json()) as any).result as WorkerScriptMetadata;
}
-async function getWorkerBindings(api: CloudflareApi, workerName: string) {
- // Fetch the bindings for a worker by calling the Cloudflare API endpoint:
- // GET /accounts/:account_id/workers/scripts/:script_name/bindings
- // See: https://developers.cloudflare.com/api/resources/workers/subresources/scripts/subresources/script_and_version_settings/methods/get/
- const response = await api.get(
- `/accounts/${api.accountId}/workers/scripts/${workerName}/settings`,
- );
- if (response.status === 404) {
- return undefined;
- }
- if (!response.ok) {
- throw new Error(
- `Error getting worker bindings: ${response.status} ${response.statusText}`,
- );
- }
- // The result is an object with a "result" property containing the bindings array
- const { result, success, errors } = (await response.json()) as {
- result: {
- bindings: WorkerBindingSpec[];
- compatibility_date: string;
- compatibility_flags: string[];
- [key: string]: any;
- };
- success: boolean;
- errors: Array<{
- code: number;
- message: string;
- documentation_url: string;
- [key: string]: any;
- }>;
- messages: Array<{
- code: number;
- message: string;
- documentation_url: string;
- [key: string]: any;
- }>;
- };
- if (!success) {
- throw new Error(
- `Error getting worker bindings: ${response.status} ${response.statusText}\nErrors:\n${errors.map((e) => `- [${e.code}] ${e.message} (${e.documentation_url})`).join("\n")}`,
- );
- }
- return result.bindings;
-}
-
/**
* Lists and deletes all queue consumers for a specific worker
* @param ctx Worker context containing eventSources
diff --git a/alchemy/src/cloudflare/worker/get-worker-template.ts b/alchemy/src/cloudflare/worker/get-worker-template.ts
new file mode 100644
index 000000000..d9062ce2c
--- /dev/null
+++ b/alchemy/src/cloudflare/worker/get-worker-template.ts
@@ -0,0 +1,14 @@
+import { readFile } from "node:fs/promises";
+import { dirname, join } from "node:path";
+import { fileURLToPath } from "node:url";
+
+export async function getWorkerTemplate(
+ name: "do-state-store" | "mixed-mode-proxy-worker",
+) {
+ const dir = dirname(fileURLToPath(import.meta.url));
+ const path = join(dir, "..", "..", "..", "workers", `${name}.js`);
+ const template = await readFile(path, "utf8");
+ return new File([template], `${name}.js`, {
+ type: "application/javascript+module",
+ });
+}
diff --git a/alchemy/src/cloudflare/worker/http-server.ts b/alchemy/src/cloudflare/worker/http-server.ts
new file mode 100644
index 000000000..8f2c74f59
--- /dev/null
+++ b/alchemy/src/cloudflare/worker/http-server.ts
@@ -0,0 +1,80 @@
+import http from "node:http";
+import type { AddressInfo } from "node:net";
+import { Readable } from "node:stream";
+
+export class HTTPServer {
+ server: http.Server;
+ ready: Promise;
+
+ constructor(options: {
+ port?: number;
+ fetch: (request: Request) => Promise;
+ }) {
+ const { promise, resolve } = Promise.withResolvers();
+ this.ready = promise;
+ this.server = http
+ .createServer(async (req, res) => {
+ const response = await options.fetch(toWebRequest(req));
+ await writeNodeResponse(res, response);
+ })
+ .listen(options.port, resolve);
+ }
+
+ get port() {
+ return (this.server.address() as AddressInfo).port;
+ }
+
+ get hostname() {
+ const address = (this.server.address() as AddressInfo)?.address;
+ if (address === "::") {
+ return "localhost";
+ }
+ return address;
+ }
+
+ get url() {
+ return `http://${this.hostname}:${this.port}`;
+ }
+
+ stop() {
+ return new Promise((resolve, reject) => {
+ this.server.close((err) => {
+ if (err) {
+ reject(err);
+ } else {
+ resolve(undefined);
+ }
+ });
+ });
+ }
+}
+
+function toWebRequest(request: http.IncomingMessage): Request {
+ const method = request.method ?? "GET";
+ return new Request(`http://${request.headers.host}${request.url ?? "/"}`, {
+ method,
+ headers: request.headers as Record,
+ body:
+ ["GET", "HEAD", "OPTIONS"].includes(method) || !request.readable
+ ? undefined
+ : (Readable.toWeb(request) as unknown as BodyInit),
+ });
+}
+
+async function writeNodeResponse(res: http.ServerResponse, response: Response) {
+ res.statusCode = response.status;
+ response.headers.forEach((value, key) => {
+ res.setHeader(key, value);
+ });
+ await response.body?.pipeTo(
+ new WritableStream({
+ write(chunk) {
+ res.write(chunk);
+ },
+ close() {
+ res.end();
+ },
+ }),
+ );
+ res.end();
+}
diff --git a/alchemy/src/cloudflare/worker/miniflare-worker-options.ts b/alchemy/src/cloudflare/worker/miniflare-worker-options.ts
new file mode 100644
index 000000000..af41d1e7b
--- /dev/null
+++ b/alchemy/src/cloudflare/worker/miniflare-worker-options.ts
@@ -0,0 +1,559 @@
+import {
+ kCurrentWorker,
+ type RemoteProxyConnectionString,
+ type WorkerOptions,
+} from "miniflare";
+import assert from "node:assert";
+import { assertNever } from "../../util/assert-never.ts";
+import { logger } from "../../util/logger.ts";
+import { Self, type Binding, type WorkerBindingSpec } from "../bindings.ts";
+import type { WorkerProps } from "../worker.ts";
+
+export type MiniflareWorkerOptions = Pick<
+ WorkerProps,
+ | "bindings"
+ | "eventSources"
+ | "compatibilityDate"
+ | "compatibilityFlags"
+ | "format"
+> & {
+ name: string;
+ script: string;
+ port?: number;
+};
+
+type BindingType = Exclude["type"];
+
+const REMOTE_ONLY_BINDING_TYPES = [
+ "ai",
+ "ai_gateway",
+ "browser",
+ "dispatch_namespace",
+ "vectorize",
+] satisfies BindingType[];
+const REMOTE_OPTIONAL_BINDING_TYPES = [
+ "d1",
+ // "durable_object_namespace", This is supported in Miniflare but needs some wrangling to make it work with a remote proxy.
+ "images",
+ "kv_namespace",
+ "r2_bucket",
+ "queue",
+ "service",
+ // "workflow", same thing
+] satisfies BindingType[];
+
+type RemoteBindingType =
+ | (typeof REMOTE_ONLY_BINDING_TYPES)[number]
+ | (typeof REMOTE_OPTIONAL_BINDING_TYPES)[number];
+
+type RemoteBinding = Extract;
+
+export function buildRemoteBindings(
+ input: Pick,
+) {
+ const bindings: WorkerBindingSpec[] = [];
+ for (const [name, binding] of Object.entries(input.bindings ?? {})) {
+ if (isRemoteOnlyBinding(binding)) {
+ bindings.push(buildRemoteBinding(name, binding));
+ } else if (isRemoteOptionalBinding(binding) && isRemoteEnabled(binding)) {
+ bindings.push(buildRemoteBinding(name, binding));
+ }
+ }
+ return bindings;
+}
+
+function isRemoteOptionalBinding(binding: Binding): binding is RemoteBinding {
+ return (
+ typeof binding !== "string" &&
+ binding !== Self &&
+ typeof binding === "object" &&
+ "type" in binding &&
+ REMOTE_OPTIONAL_BINDING_TYPES.includes(binding.type as any)
+ );
+}
+
+function isRemoteOnlyBinding(binding: Binding): binding is RemoteBinding {
+ return (
+ typeof binding !== "string" &&
+ binding !== Self &&
+ typeof binding === "object" &&
+ "type" in binding &&
+ REMOTE_ONLY_BINDING_TYPES.includes(binding.type as any)
+ );
+}
+
+function isRemoteEnabled(binding: RemoteBinding): boolean {
+ return (
+ "dev" in binding &&
+ typeof binding.dev === "object" &&
+ "remote" in binding.dev &&
+ !!binding.dev.remote
+ );
+}
+
+function buildRemoteBinding(
+ name: string,
+ binding: RemoteBinding,
+): WorkerBindingSpec & { raw?: true } {
+ switch (binding.type) {
+ case "ai": {
+ return {
+ type: "ai",
+ name,
+ raw: true,
+ };
+ }
+ case "ai_gateway": {
+ return {
+ type: "ai",
+ name,
+ raw: true,
+ };
+ }
+ case "browser": {
+ return {
+ type: "browser",
+ name,
+ raw: true,
+ };
+ }
+ case "d1": {
+ return {
+ type: "d1",
+ name,
+ id: binding.id,
+ raw: true,
+ };
+ }
+ case "dispatch_namespace": {
+ return {
+ type: "dispatch_namespace",
+ name,
+ namespace: binding.namespace,
+ raw: true,
+ };
+ }
+ // case "durable_object_namespace": {
+ // return {
+ // type: "durable_object_namespace",
+ // name,
+ // class_name: binding.className,
+ // script_name: binding.scriptName,
+ // raw: true,
+ // };
+ // }
+ case "images": {
+ return {
+ type: "images",
+ name,
+ raw: true,
+ };
+ }
+ case "kv_namespace": {
+ return {
+ type: "kv_namespace",
+ name,
+ namespace_id:
+ "namespaceId" in binding ? binding.namespaceId : binding.id,
+ raw: true,
+ };
+ }
+ case "queue": {
+ return {
+ type: "queue",
+ name,
+ queue_name: binding.name,
+ raw: true,
+ };
+ }
+ case "r2_bucket": {
+ return {
+ type: "r2_bucket",
+ name,
+ bucket_name: binding.name,
+ raw: true,
+ };
+ }
+ case "service": {
+ return {
+ type: "service",
+ name,
+ service: "service" in binding ? binding.service : binding.name,
+ environment: "environment" in binding ? binding.environment : undefined,
+ };
+ }
+ case "vectorize": {
+ return {
+ type: "vectorize",
+ name,
+ index_name: binding.name,
+ raw: true,
+ };
+ }
+ // case "workflow": {
+ // return {
+ // type: "workflow",
+ // name,
+ // workflow_name: binding.workflowName,
+ // class_name: binding.className,
+ // script_name: binding.scriptName,
+ // raw: true,
+ // };
+ // }
+ default: {
+ assertNever(binding);
+ }
+ }
+}
+
+export function buildMiniflareWorkerOptions({
+ name: workerName,
+ script,
+ bindings,
+ format,
+ eventSources,
+ compatibilityDate,
+ compatibilityFlags,
+ remoteProxyConnectionString,
+}: MiniflareWorkerOptions & {
+ remoteProxyConnectionString: RemoteProxyConnectionString | undefined;
+}): WorkerOptions {
+ const options: WorkerOptions = {
+ name: workerName,
+ script,
+ modules: format !== "cjs",
+ compatibilityDate,
+ compatibilityFlags,
+ unsafeDirectSockets: [{ entrypoint: undefined, proxy: true }],
+ // containerEngine: {
+ // localDocker: {
+ // socketPath: "/var/run/docker.sock",
+ // }
+ // }
+ };
+ for (const [name, binding] of Object.entries(bindings ?? {})) {
+ if (typeof binding === "string") {
+ options.bindings = {
+ ...options.bindings,
+ [name]: binding,
+ };
+ continue;
+ }
+ if (binding === Self) {
+ options.serviceBindings = {
+ ...((options.serviceBindings as Record | undefined) ??
+ {}),
+ [name]: kCurrentWorker,
+ };
+ continue;
+ }
+ switch (binding.type) {
+ case "ai": {
+ assert(
+ remoteProxyConnectionString,
+ `Binding "${name}" of type "${binding.type}" requires a remoteProxyConnectionString, but none was provided.`,
+ );
+ options.ai = {
+ binding: name,
+ remoteProxyConnectionString,
+ };
+ break;
+ }
+ case "ai_gateway": {
+ assert(
+ remoteProxyConnectionString,
+ `Binding "${name}" of type "${binding.type}" requires a remoteProxyConnectionString, but none was provided.`,
+ );
+ options.ai = {
+ binding: name,
+ remoteProxyConnectionString,
+ };
+ break;
+ }
+ case "analytics_engine": {
+ (options.analyticsEngineDatasets ??= {})[name] = {
+ dataset: binding.dataset,
+ };
+ break;
+ }
+ case "assets": {
+ options.assets = {
+ binding: name,
+ directory: binding.path,
+ };
+ break;
+ }
+ case "browser": {
+ assert(
+ remoteProxyConnectionString,
+ `Binding "${name}" of type "${binding.type}" requires a remoteProxyConnectionString, but none was provided.`,
+ );
+ options.browserRendering = {
+ binding: name,
+ remoteProxyConnectionString,
+ };
+ break;
+ }
+ case "d1": {
+ (
+ (options.d1Databases ??= {}) as Record<
+ string,
+ {
+ id: string;
+ remoteProxyConnectionString?: RemoteProxyConnectionString;
+ }
+ >
+ )[name] = {
+ id: binding.id,
+ remoteProxyConnectionString: binding.dev?.remote
+ ? remoteProxyConnectionString
+ : undefined,
+ };
+ break;
+ }
+ case "dispatch_namespace": {
+ assert(
+ remoteProxyConnectionString,
+ `Binding "${name}" of type "${binding.type}" requires a remoteProxyConnectionString, but none was provided.`,
+ );
+ (options.dispatchNamespaces ??= {})[name] = {
+ namespace: binding.namespace,
+ remoteProxyConnectionString,
+ };
+ break;
+ }
+ case "durable_object_namespace": {
+ (options.durableObjects ??= {})[name] = {
+ className: binding.className,
+ scriptName: binding.scriptName,
+ useSQLite: binding.sqlite,
+ // namespaceId: binding.namespaceId,
+ // unsafeUniqueKey?: string | typeof kUnsafeEphemeralUniqueKey | undefined;
+ // unsafePreventEviction?: boolean | undefined;
+ // remoteProxyConnectionString: binding.local
+ // ? undefined
+ // : remoteProxyConnectionString,
+ };
+ if (!binding.scriptName || binding.scriptName === workerName) {
+ options.unsafeDirectSockets!.push({
+ entrypoint: binding.className,
+ proxy: true,
+ });
+ }
+ break;
+ }
+ case "hyperdrive": {
+ if ("access_client_id" in binding.origin) {
+ throw new Error(
+ `Hyperdrive with Cloudflare Access is not supported for locally emulated workers. Worker "${name}" is locally emulated but is bound to Hyperdrive "${name}", which has Cloudflare Access enabled.`,
+ );
+ }
+ logger.warnOnce(
+ `Hyperdrive bindings in locally emulated workers are experimental and may not work as expected. Worker "${name}" is locally emulated and bound to Hyperdrive "${name}".`,
+ );
+ const {
+ scheme = "postgres",
+ port = 5432,
+ password,
+ database,
+ host,
+ user,
+ } = binding.origin;
+ const connectionString = new URL(
+ `${scheme}://${user}:${password.unencrypted}@${host}:${port}/${database}?sslmode=${binding.mtls?.sslmode ?? "verify-full"}`,
+ );
+ (options.bindings ??= {})[name] = {
+ connectionString: connectionString.toString(),
+ database,
+ host,
+ password: password.unencrypted,
+ port,
+ scheme,
+ user,
+ };
+ break;
+ }
+ case "images": {
+ options.images = {
+ binding: name,
+ remoteProxyConnectionString: binding.dev?.remote
+ ? remoteProxyConnectionString
+ : undefined,
+ };
+ break;
+ }
+ case "json": {
+ (options.bindings ??= {})[name] = binding.json;
+ break;
+ }
+ case "kv_namespace": {
+ const normalized =
+ "id" in binding
+ ? { id: binding.id }
+ : { id: binding.namespaceId, dev: binding.dev };
+ (
+ (options.kvNamespaces ??= {}) as Record<
+ string,
+ {
+ id: string;
+ remoteProxyConnectionString?: RemoteProxyConnectionString;
+ }
+ >
+ )[name] = {
+ id: normalized.id,
+ remoteProxyConnectionString: normalized.dev?.remote
+ ? remoteProxyConnectionString
+ : undefined,
+ };
+ break;
+ }
+ case "pipeline": {
+ ((options.pipelines ??= {}) as Record)[name] =
+ binding.id;
+ break;
+ }
+ case "queue": {
+ (
+ (options.queueProducers ??= {}) as Record<
+ string,
+ {
+ queueName: string;
+ deliveryDelay?: number;
+ remoteProxyConnectionString?: RemoteProxyConnectionString;
+ }
+ >
+ )[name] = {
+ queueName: binding.name,
+ deliveryDelay: binding.settings?.deliveryDelay,
+ remoteProxyConnectionString: binding.dev?.remote
+ ? remoteProxyConnectionString
+ : undefined,
+ };
+ break;
+ }
+ case "r2_bucket": {
+ (
+ (options.r2Buckets ??= {}) as Record<
+ string,
+ {
+ id: string;
+ remoteProxyConnectionString?: RemoteProxyConnectionString;
+ }
+ >
+ )[name] = {
+ id: binding.name,
+ remoteProxyConnectionString: binding.dev?.remote
+ ? remoteProxyConnectionString
+ : undefined,
+ };
+ break;
+ }
+ case "secret": {
+ (options.bindings ??= {})[name] = binding.unencrypted;
+ break;
+ }
+ case "secrets_store_secret": {
+ options.secretsStoreSecrets = {
+ ...((options.secretsStoreSecrets as
+ | Record
+ | undefined) ?? {}),
+ [name]: {
+ store_id: binding.storeId,
+ secret_name: binding.name,
+ },
+ };
+ break;
+ }
+ case "secret_key": {
+ throw new Error(
+ `Secret key bindings are not supported for locally emulated workers. Worker "${name}" is locally emulated but is bound to secret key "${name}".`,
+ );
+ }
+ case "service": {
+ if (!("id" in binding)) {
+ throw new Error(
+ `Service bindings must have an id. Worker "${name}" is bound to service "${name}" but does not have an id.`,
+ );
+ }
+ if (isRemoteEnabled(binding)) {
+ (options.serviceBindings ??= {})[name] = {
+ name: binding.name,
+ remoteProxyConnectionString,
+ };
+ } else {
+ (options.serviceBindings ??= {})[name] = binding.name;
+ }
+ break;
+ }
+ case "vectorize": {
+ assert(
+ remoteProxyConnectionString,
+ `Binding "${name}" of type "${binding.type}" requires a remoteProxyConnectionString, but none was provided.`,
+ );
+ (options.vectorize ??= {})[name] = {
+ index_name: binding.name,
+ remoteProxyConnectionString,
+ };
+ break;
+ }
+ case "version_metadata": {
+ // This is how Wrangler does it:
+ // https://github.com/cloudflare/workers-sdk/blob/70ba9fbf905a9ba5fe158d0bc8d48f6bf31712a2/packages/wrangler/src/dev/miniflare.ts#L881
+ (options.bindings ??= {})[name] = {
+ id: crypto.randomUUID(),
+ tag: "",
+ timestamp: "0",
+ };
+ break;
+ }
+ case "workflow": {
+ (options.workflows ??= {})[name] = {
+ name: binding.workflowName,
+ className: binding.className,
+ scriptName: binding.scriptName,
+ // remoteProxyConnectionString:
+ // "local" in binding && binding.local
+ // ? undefined
+ // : remoteProxyConnectionString,
+ };
+ break;
+ }
+ case "container": {
+ (options.durableObjects ??= {})[name] = {
+ className: binding.className,
+ scriptName: binding.scriptName,
+ useSQLite: binding.sqlite,
+ container: {
+ imageName: binding.image.name,
+ },
+ // namespaceId: binding.namespaceId,
+ // unsafeUniqueKey?: string | typeof kUnsafeEphemeralUniqueKey | undefined;
+ // unsafePreventEviction?: boolean | undefined;
+ // remoteProxyConnectionString: binding.local
+ // ? undefined
+ // : remoteProxyConnectionString,
+ };
+ if (!binding.scriptName || binding.scriptName === workerName) {
+ options.unsafeDirectSockets!.push({
+ entrypoint: binding.className,
+ proxy: true,
+ });
+ }
+ break;
+ }
+ default: {
+ assertNever(binding);
+ }
+ }
+ }
+ for (const eventSource of eventSources ?? []) {
+ const queue = "queue" in eventSource ? eventSource.queue : eventSource;
+ if (queue.dev?.remote !== false) {
+ throw new Error(
+ `Locally emulated workers cannot consume remote queues. Worker "${workerName}" is locally emulated but is consuming remote queue "${queue.name}".`,
+ );
+ }
+ ((options.queueConsumers ??= []) as string[]).push(queue.name);
+ }
+ return options;
+}
diff --git a/alchemy/src/cloudflare/worker/miniflare.ts b/alchemy/src/cloudflare/worker/miniflare.ts
new file mode 100644
index 000000000..f068e17f3
--- /dev/null
+++ b/alchemy/src/cloudflare/worker/miniflare.ts
@@ -0,0 +1,199 @@
+import type {
+ Miniflare,
+ MiniflareOptions,
+ RemoteProxyConnectionString,
+ WorkerOptions,
+} from "miniflare";
+import path from "node:path";
+import { findOpenPort } from "../../util/find-open-port.ts";
+import { logger } from "../../util/logger.ts";
+import { HTTPServer } from "./http-server.ts";
+import {
+ buildMiniflareWorkerOptions,
+ buildRemoteBindings,
+ type MiniflareWorkerOptions,
+} from "./miniflare-worker-options.ts";
+import { createMixedModeProxy, type MixedModeProxy } from "./mixed-mode.ts";
+
+class MiniflareServer {
+ miniflare?: Miniflare;
+ workers = new Map();
+ servers = new Map();
+ mixedModeProxies = new Map();
+
+ stream = new WritableStream<{
+ worker: MiniflareWorkerOptions;
+ promise: PromiseWithResolvers;
+ }>({
+ write: async ({ worker, promise }) => {
+ try {
+ const server = await this.set(worker);
+ promise.resolve(server);
+ } catch (error) {
+ promise.reject(error);
+ }
+ },
+ close: async () => {
+ await this.dispose();
+ },
+ });
+ writer = this.stream.getWriter();
+
+ async push(worker: MiniflareWorkerOptions) {
+ const promise = Promise.withResolvers();
+ const [, server] = await Promise.all([
+ this.writer.write({ worker, promise }),
+ promise.promise,
+ ]);
+ return server;
+ }
+
+ async close() {
+ await this.writer.close();
+ }
+
+ private async set(worker: MiniflareWorkerOptions) {
+ this.workers.set(
+ worker.name as string,
+ buildMiniflareWorkerOptions({
+ ...worker,
+ remoteProxyConnectionString:
+ await this.maybeCreateMixedModeProxy(worker),
+ }),
+ );
+ if (this.miniflare) {
+ await this.miniflare.setOptions(await this.miniflareOptions());
+ } else {
+ const { Miniflare } = await import("miniflare").catch(() => {
+ throw new Error(
+ "Miniflare is not installed, but is required in local mode for Workers. Please run `npm install miniflare`.",
+ );
+ });
+
+ // Miniflare intercepts SIGINT and exits with 130, which is not a failure.
+ // No one likes to see a non-zero exit code when they Ctrl+C, so here's our workaround.
+ process.on("exit", (code) => {
+ if (code === 130) {
+ process.exit(0);
+ }
+ });
+
+ this.miniflare = new Miniflare(await this.miniflareOptions());
+ await this.miniflare.ready;
+ }
+ const existing = this.servers.get(worker.name);
+ if (existing) {
+ return existing;
+ }
+ const server = new HTTPServer({
+ port: worker.port ?? (await findOpenPort()),
+ fetch: this.createRequestHandler(worker.name as string),
+ });
+ this.servers.set(worker.name, server);
+ await server.ready;
+ return server;
+ }
+
+ private async dispose() {
+ await Promise.all([
+ this.miniflare?.dispose(),
+ ...Array.from(this.servers.values()).map((server) => server.stop()),
+ ...Array.from(this.mixedModeProxies.values()).map((proxy) =>
+ proxy.server.stop(),
+ ),
+ ]);
+ this.miniflare = undefined;
+ this.workers.clear();
+ this.servers.clear();
+ }
+
+ private async maybeCreateMixedModeProxy(
+ worker: MiniflareWorkerOptions,
+ ): Promise {
+ const bindings = buildRemoteBindings(worker);
+ if (bindings.length === 0) {
+ return undefined;
+ }
+ const existing = this.mixedModeProxies.get(worker.name);
+ if (
+ existing?.bindings.every((b) =>
+ bindings.find((b2) => b2.name === b.name && b2.type === b.type),
+ )
+ ) {
+ return existing.connectionString;
+ }
+ const proxy = await createMixedModeProxy({
+ name: `mixed-mode-proxy-${crypto.randomUUID()}`,
+ bindings,
+ });
+ this.mixedModeProxies.set(worker.name, proxy);
+ return proxy.connectionString;
+ }
+
+ private createRequestHandler(name: string) {
+ return async (req: Request) => {
+ try {
+ if (!this.miniflare) {
+ return new Response(
+ "[Alchemy] Miniflare is not initialized. Please try again.",
+ {
+ status: 503,
+ },
+ );
+ }
+ const miniflare = await this.miniflare?.getWorker(name);
+ if (!miniflare) {
+ return new Response(
+ `[Alchemy] Cannot find worker "${name}". Please try again.`,
+ {
+ status: 503,
+ },
+ );
+ }
+ const res = await miniflare.fetch(req.url, {
+ method: req.method,
+ headers: req.headers as any,
+ body: req.body as any,
+ redirect: "manual",
+ });
+ return res as unknown as Response;
+ } catch (error) {
+ logger.error(error);
+ return new Response(
+ `[Alchemy] Internal server error: ${String(error)}`,
+ {
+ status: 500,
+ },
+ );
+ }
+ };
+ }
+
+ private async miniflareOptions(): Promise {
+ const { getDefaultDevRegistryPath } = await import("miniflare");
+ return {
+ workers: Array.from(this.workers.values()),
+ defaultPersistRoot: path.join(process.cwd(), ".alchemy/miniflare"),
+ unsafeDevRegistryPath: getDefaultDevRegistryPath(),
+ analyticsEngineDatasetsPersist: true,
+ cachePersist: true,
+ d1Persist: true,
+ durableObjectsPersist: true,
+ kvPersist: true,
+ r2Persist: true,
+ secretsStorePersist: true,
+ workflowsPersist: true,
+ };
+ }
+}
+
+declare global {
+ var _ALCHEMY_MINIFLARE_SERVER: MiniflareServer | undefined;
+}
+
+export const miniflareServer = new Proxy({} as MiniflareServer, {
+ get: (_, prop: keyof MiniflareServer) => {
+ globalThis._ALCHEMY_MINIFLARE_SERVER ??= new MiniflareServer();
+ return globalThis._ALCHEMY_MINIFLARE_SERVER[prop];
+ },
+});
diff --git a/alchemy/src/cloudflare/worker/mixed-mode.ts b/alchemy/src/cloudflare/worker/mixed-mode.ts
new file mode 100644
index 000000000..79544cef7
--- /dev/null
+++ b/alchemy/src/cloudflare/worker/mixed-mode.ts
@@ -0,0 +1,220 @@
+import type { RemoteProxyConnectionString } from "miniflare";
+import { createCloudflareApi, type CloudflareApi } from "../api.ts";
+import type { WorkerBindingSpec } from "../bindings.ts";
+import type { CloudflareApiResponse } from "../types.ts";
+import type { WorkerMetadata } from "../worker-metadata.ts";
+import { getWorkerTemplate } from "./get-worker-template.ts";
+import { HTTPServer } from "./http-server.ts";
+import { getAccountSubdomain } from "./subdomain.ts";
+
+type WranglerSessionConfig =
+ | {
+ workers_dev: boolean;
+ minimal_mode: boolean;
+ }
+ | {
+ routes: string[];
+ minimal_mode: boolean;
+ };
+
+interface WorkersPreviewSession {
+ inspector_websocket: string;
+ prewarm: string;
+ token: string;
+}
+
+export async function createMixedModeProxy(input: {
+ name: string;
+ bindings: WorkerBindingSpec[];
+}) {
+ const api = await createCloudflareApi();
+ const script = await getWorkerTemplate("mixed-mode-proxy-worker");
+ const [token, subdomain] = await Promise.all([
+ createWorkersPreviewToken(api, {
+ name: input.name,
+ metadata: {
+ main_module: script.name,
+ compatibility_date: "2025-06-16",
+ bindings: input.bindings,
+ observability: {
+ enabled: false,
+ },
+ },
+ files: [script],
+ session: {
+ workers_dev: true,
+ minimal_mode: true,
+ },
+ }),
+ getAccountSubdomain(api),
+ ]);
+ return new MixedModeProxy(
+ `https://${input.name}.${subdomain}.workers.dev`,
+ token,
+ input.bindings,
+ );
+}
+
+const DEBUG: boolean = false;
+
+export class MixedModeProxy {
+ server: HTTPServer;
+
+ constructor(
+ readonly url: string,
+ readonly token: string,
+ readonly bindings: WorkerBindingSpec[],
+ ) {
+ this.server = new HTTPServer({
+ fetch: this.fetch.bind(this),
+ });
+ }
+
+ get connectionString() {
+ const hostname =
+ this.server.hostname === "::" ? "localhost" : this.server.hostname;
+ return new URL(
+ `http://${hostname}:${this.server.port}`,
+ ) as RemoteProxyConnectionString;
+ }
+
+ async fetch(req: Request) {
+ const origin = new URL(req.url);
+ const url = new URL(origin.pathname, this.url);
+ url.search = origin.search;
+ url.hash = origin.hash;
+
+ const headers = new Headers(req.headers);
+ headers.set("cf-workers-preview-token", this.token);
+ headers.set("host", new URL(this.url).hostname);
+ headers.delete("cf-connecting-ip");
+
+ const res = await fetch(url, {
+ method: req.method,
+ headers,
+ body: req.body,
+ redirect: "manual",
+ });
+
+ // Remove headers that are not supported by miniflare
+ const responseHeaders = new Headers(res.headers);
+ responseHeaders.delete("transfer-encoding");
+ responseHeaders.delete("content-encoding");
+
+ if (DEBUG) {
+ const clone = res.clone();
+ console.log({
+ request: {
+ url: url.toString(),
+ method: req.method,
+ headers,
+ body: req.body,
+ },
+ response: {
+ status: res.status,
+ headers: res.headers,
+ body: await res.text(),
+ },
+ });
+ return new Response(clone.body, {
+ status: clone.status,
+ headers: responseHeaders,
+ });
+ }
+
+ return new Response(res.body, {
+ status: res.status,
+ headers: responseHeaders,
+ });
+ }
+}
+
+async function createWorkersPreviewToken(
+ api: CloudflareApi,
+ input: {
+ name: string;
+ metadata: WorkerMetadata;
+ files: File[];
+ session: WranglerSessionConfig;
+ },
+) {
+ const session = await createWorkersPreviewSession(api);
+ const formData = new FormData();
+ formData.append("metadata", JSON.stringify(input.metadata));
+ for (const file of input.files) {
+ formData.append(file.name, file);
+ }
+ formData.append("wrangler-session-config", JSON.stringify(input.session));
+ const res = await api
+ .post(
+ `/accounts/${api.accountId}/workers/scripts/${input.name}/edge-preview`,
+ formData,
+ {
+ headers: {
+ "cf-preview-upload-config-token": session.token,
+ },
+ },
+ )
+ .then((res) =>
+ parseCloudflareResponse<{ preview_token: string }>(
+ res,
+ "Failed to create workers preview token",
+ ),
+ );
+ // Fire and forget prewarm call
+ // (see https://github.com/cloudflare/workers-sdk/blob/6c6afbd6072b96e78e67d3a863ed849c6aa49472/packages/wrangler/src/dev/create-worker-preview.ts#L338)
+ void prewarm(session.prewarm, res.preview_token);
+ return res.preview_token;
+}
+
+async function prewarm(url: string, previewToken: string) {
+ const res = await fetch(url, {
+ headers: {
+ "cf-workers-preview-token": previewToken,
+ },
+ });
+ if (!res.ok) {
+ console.error(`Failed to prewarm worker: ${res.status} ${res.statusText}`);
+ }
+}
+
+async function createWorkersPreviewSession(api: CloudflareApi) {
+ const { exchange_url } = await api
+ .get(`/accounts/${api.accountId}/workers/subdomain/edge-preview`)
+ .then((res) =>
+ parseCloudflareResponse<{
+ exchange_url: string;
+ token: string;
+ }>(res, "Failed to create workers preview session"),
+ );
+ return await fetch(exchange_url).then((res) =>
+ parseResponse(
+ res,
+ "Failed to create workers preview session",
+ ),
+ );
+}
+
+async function parseResponse(res: Response, message: string): Promise {
+ if (!res.ok) {
+ throw new Error(`${message} (${res.status} ${res.statusText})`);
+ }
+ const json: T = await res.json();
+ return json;
+}
+
+async function parseCloudflareResponse(
+ res: Response,
+ message: string,
+): Promise {
+ const json: CloudflareApiResponse = await res.json();
+ if (!json.success) {
+ throw new Error(
+ `${message} (${res.status} ${res.statusText} - ${json.errors.map((e) => `${e.code}: ${e.message}`).join(", ")})`,
+ );
+ }
+ if (!json.result) {
+ throw new Error(`${message} (${res.status} ${res.statusText})`);
+ }
+ return json.result;
+}
diff --git a/alchemy/src/cloudflare/worker/subdomain.ts b/alchemy/src/cloudflare/worker/subdomain.ts
new file mode 100644
index 000000000..18cab925f
--- /dev/null
+++ b/alchemy/src/cloudflare/worker/subdomain.ts
@@ -0,0 +1,16 @@
+import { memoize } from "../../util/memoize.ts";
+import type { CloudflareApi } from "../api.ts";
+
+export const getAccountSubdomain = memoize(
+ async (api: CloudflareApi) => {
+ const res = await api.get(`/accounts/${api.accountId}/workers/subdomain`);
+ if (!res.ok) {
+ throw new Error(
+ `Failed to get account subdomain: ${res.status} ${res.statusText}`,
+ );
+ }
+ const json: { result?: { subdomain: string } } = await res.json();
+ return json.result?.subdomain;
+ },
+ (api) => api.accountId,
+);
diff --git a/alchemy/src/cloudflare/workflow.ts b/alchemy/src/cloudflare/workflow.ts
index 9e9bc5cb2..806c55b1e 100644
--- a/alchemy/src/cloudflare/workflow.ts
+++ b/alchemy/src/cloudflare/workflow.ts
@@ -25,6 +25,13 @@ export interface WorkflowProps {
* @default - bound worker script
*/
scriptName?: string;
+ dev?: {
+ /**
+ * Whether to run the workflow remotely instead of locally
+ * @default false
+ */
+ remote?: boolean;
+ };
}
export function isWorkflow(binding: Binding): binding is Workflow {
diff --git a/alchemy/src/cloudflare/wrangler.json.ts b/alchemy/src/cloudflare/wrangler.json.ts
index 04cb57262..8b37bb904 100644
--- a/alchemy/src/cloudflare/wrangler.json.ts
+++ b/alchemy/src/cloudflare/wrangler.json.ts
@@ -447,6 +447,9 @@ function processBindings(
binding: string;
namespace: string;
}[] = [];
+ const containers: {
+ class_name: string;
+ }[] = [];
for (const eventSource of eventSources ?? []) {
if (isQueueEventSource(eventSource)) {
@@ -619,6 +622,15 @@ function processBindings(
});
} else if (binding.type === "secret_key") {
// no-op
+ } else if (binding.type === "container") {
+ durableObjects.push({
+ name: bindingName,
+ class_name: binding.className,
+ script_name: binding.scriptName,
+ });
+ containers.push({
+ class_name: binding.className,
+ });
} else {
// biome-ignore lint/correctness/noVoidTypeReturn: it returns never
return assertNever(binding);
diff --git a/alchemy/src/cloudflare/zone.ts b/alchemy/src/cloudflare/zone.ts
index 869835fd7..19662839f 100644
--- a/alchemy/src/cloudflare/zone.ts
+++ b/alchemy/src/cloudflare/zone.ts
@@ -2,7 +2,11 @@ import type { Context } from "../context.ts";
import { Resource } from "../resource.ts";
import { logger } from "../util/logger.ts";
import { handleApiError } from "./api-error.ts";
-import { createCloudflareApi, type CloudflareApiOptions } from "./api.ts";
+import {
+ createCloudflareApi,
+ type CloudflareApi,
+ type CloudflareApiOptions,
+} from "./api.ts";
import type {
AlwaysUseHTTPSValue,
AutomaticHTTPSRewritesValue,
@@ -554,7 +558,7 @@ async function getZoneSettings(
*
* @example
* // Look up a zone by domain name
- * const zone = await getZoneByDomain("example.com");
+ * const zone = await getZoneByDomain(api, "example.com");
* if (zone) {
* console.log(`Zone ID: ${zone.id}`);
* console.log(`Nameservers: ${zone.nameservers.join(", ")}`);
@@ -562,17 +566,12 @@ async function getZoneSettings(
*
* @example
* // Look up a zone with custom API options
- * const zone = await getZoneByDomain("example.com", {
- * apiToken: myApiToken,
- * accountId: "my-account-id"
- * });
+ * const zone = await getZoneByDomain(api, "example.com");
*/
export async function getZoneByDomain(
+ api: CloudflareApi,
domainName: string,
- options: Partial = {},
): Promise {
- const api = await createCloudflareApi(options);
-
const response = await api.get(
`/zones?name=${encodeURIComponent(domainName)}`,
);
diff --git a/alchemy/src/context.ts b/alchemy/src/context.ts
index bf62d28a2..043a6981a 100644
--- a/alchemy/src/context.ts
+++ b/alchemy/src/context.ts
@@ -53,7 +53,7 @@ export interface BaseContext {
* Indicate that this resource is being replaced.
* This will cause the resource to be deleted at the end of the stack's CREATE phase.
*/
- replace(): void;
+ replace(): never;
/**
* Terminate the resource lifecycle handler and destroy the resource.
*
@@ -97,7 +97,7 @@ export function context<
seq: number;
props: Props;
state: State;
- replace: () => void;
+ replace: () => never;
}): Context {
type InternalSymbols =
| typeof ResourceID
diff --git a/alchemy/src/destroy.ts b/alchemy/src/destroy.ts
index 1fef30765..753fc01d1 100644
--- a/alchemy/src/destroy.ts
+++ b/alchemy/src/destroy.ts
@@ -6,10 +6,11 @@ import {
ResourceFQN,
ResourceID,
ResourceKind,
+ type ResourceProps,
ResourceScope,
ResourceSeq,
} from "./resource.ts";
-import { isScope, Scope } from "./scope.ts";
+import { isScope, type PendingDeletions, Scope } from "./scope.ts";
import { formatFQN } from "./util/cli.ts";
import { logger } from "./util/logger.ts";
@@ -18,6 +19,10 @@ export class DestroyedSignal extends Error {}
export interface DestroyOptions {
quiet?: boolean;
strategy?: "sequential" | "parallel";
+ replace?: {
+ props?: ResourceProps | undefined;
+ output?: Resource;
+ };
}
function isScopeArgs(a: any): a is [scope: Scope, options?: DestroyOptions] {
@@ -40,7 +45,8 @@ export async function destroy(
} satisfies DestroyOptions;
await scope.run(async () => {
- // destroy all active resources
+ // destroy all active and pending resources
+ await scope.destroyPendingDeletions();
await destroyAll(Array.from(scope.resources.values()), options);
// then detect orphans and destroy them
@@ -89,10 +95,12 @@ export async function destroy(
try {
if (!quiet) {
logger.task(instance[ResourceFQN], {
- prefix: "deleting",
- prefixColor: "redBright",
+ prefix: options?.replace ? "cleanup" : "deleting",
+ prefixColor: options?.replace ? "magenta" : "redBright",
resource: formatFQN(instance[ResourceFQN]),
- message: "Deleting Resource...",
+ message: options?.replace
+ ? "Cleaning Up Old Resource..."
+ : "Deleting Resource...",
});
}
@@ -109,7 +117,7 @@ export async function destroy(
id: instance[ResourceID],
fqn: instance[ResourceFQN],
seq: instance[ResourceSeq],
- props: state.props,
+ props: options?.replace?.props ?? state.props,
state,
replace: () => {
throw new Error("Cannot replace a resource that is being deleted");
@@ -127,10 +135,10 @@ export async function destroy(
parent: scope,
},
async (scope) => {
- nestedScope = scope;
+ nestedScope = options?.replace?.props == null ? scope : undefined;
return await Provider.handler.bind(ctx)(
instance[ResourceID],
- state.props!,
+ options?.replace?.props ?? state.props!,
);
},
);
@@ -146,14 +154,30 @@ export async function destroy(
await destroy(nestedScope, options);
}
- await scope.delete(instance[ResourceID]);
+ if (options?.replace == null) {
+ if (nestedScope) {
+ await destroy(nestedScope, options);
+ }
+ await scope.deleteResource(instance[ResourceID]);
+ } else {
+ let pendingDeletions =
+ await state.output[ResourceScope].get(
+ "pendingDeletions",
+ );
+ pendingDeletions = pendingDeletions?.filter(
+ (deletion) => deletion.resource[ResourceID] !== instance[ResourceID],
+ );
+ await scope.set("pendingDeletions", pendingDeletions);
+ }
if (!quiet) {
logger.task(instance[ResourceFQN], {
- prefix: "deleted",
+ prefix: options?.replace ? "cleaned" : "deleted",
prefixColor: "greenBright",
resource: formatFQN(instance[ResourceFQN]),
- message: "Deleted Resource",
+ message: options?.replace
+ ? "Old Resource Cleanup Complete"
+ : "Deleted Resource",
status: "success",
});
}
@@ -165,11 +189,14 @@ export async function destroy(
export async function destroyAll(
resources: Resource[],
- options?: DestroyOptions,
+ options?: DestroyOptions & { force?: boolean },
) {
if (options?.strategy !== "parallel") {
const sorted = resources.sort((a, b) => b[ResourceSeq] - a[ResourceSeq]);
for (const resource of sorted) {
+ if (isScope(resource)) {
+ await resource.destroyPendingDeletions();
+ }
await destroy(resource, options);
}
} else {
diff --git a/alchemy/src/docker/api.ts b/alchemy/src/docker/api.ts
new file mode 100644
index 000000000..c7f3586aa
--- /dev/null
+++ b/alchemy/src/docker/api.ts
@@ -0,0 +1,439 @@
+import { spawn } from "node:child_process";
+import { exec } from "../os/exec.ts";
+
+/**
+ * Options for Docker API requests
+ */
+export interface DockerApiOptions {
+ /**
+ * Custom path to Docker binary
+ */
+ dockerPath?: string;
+}
+
+type VolumeInfo = {
+ CreatedAt: string;
+ Driver: string;
+ Labels: Record;
+ Mountpoint: string;
+ Name: string;
+ Options: Record;
+ Scope: string;
+};
+
+/**
+ * Docker API client that wraps Docker CLI commands
+ */
+export class DockerApi {
+ /** Path to Docker CLI */
+ readonly dockerPath: string;
+
+ /**
+ * Create a new Docker API client
+ *
+ * @param options Docker API options
+ */
+ constructor(options: DockerApiOptions = {}) {
+ this.dockerPath = options.dockerPath || "docker";
+ }
+
+ /**
+ * Run a Docker CLI command
+ *
+ * @param args Command arguments to pass to Docker CLI
+ * @returns Result of the command
+ */
+ async exec(args: string[]): Promise<{ stdout: string; stderr: string }> {
+ const command = `${this.dockerPath} ${args.join(" ")}`;
+ const result = (await exec(command, {
+ captureOutput: true,
+ shell: true,
+ env: process.env,
+ })) as { stdout: string; stderr: string };
+
+ return result;
+ }
+
+ /**
+ * Check if Docker daemon is running
+ *
+ * @returns True if Docker daemon is running
+ */
+ async isRunning(): Promise {
+ try {
+ // Use a quick, lightweight command to test if Docker is running
+ await this.exec(["version", "--format", "{{.Server.Version}}"]);
+ return true;
+ } catch (error) {
+ console.log(
+ `Docker daemon not running: ${error instanceof Error ? error.message : String(error)}`,
+ );
+ return false;
+ }
+ }
+
+ /**
+ * Pull Docker image
+ *
+ * @param image Image name and tag
+ * @returns Result of the pull command
+ */
+ async pullImage(image: string): Promise<{ stdout: string; stderr: string }> {
+ return this.exec(["pull", image]);
+ }
+
+ /**
+ * Build Docker image
+ *
+ * @param path Path to Dockerfile directory
+ * @param tag Tag for the image
+ * @param buildArgs Build arguments
+ * @returns Result of the build command
+ */
+ async buildImage(
+ path: string,
+ tag: string,
+ buildArgs: Record = {},
+ ): Promise<{ stdout: string; stderr: string }> {
+ const args = ["build", "-t", tag, path];
+
+ for (const [key, value] of Object.entries(buildArgs)) {
+ args.push("--build-arg", `${key}=${value}`);
+ }
+
+ return this.exec(args);
+ }
+
+ /**
+ * List Docker images
+ *
+ * @returns JSON string containing image list
+ */
+ async listImages(): Promise {
+ const { stdout } = await this.exec(["images", "--format", "{{json .}}"]);
+ return stdout;
+ }
+
+ /**
+ * Create Docker container
+ *
+ * @param image Image name
+ * @param name Container name
+ * @param options Container options
+ * @returns Container ID
+ */
+ async createContainer(
+ image: string,
+ name: string,
+ options: {
+ ports?: Record;
+ env?: Record;
+ volumes?: Record;
+ cmd?: string[];
+ } = {},
+ ): Promise {
+ const args = ["create", "--name", name];
+
+ // Add port mappings
+ if (options.ports) {
+ for (const [hostPort, containerPort] of Object.entries(options.ports)) {
+ args.push("-p", `${hostPort}:${containerPort}`);
+ }
+ }
+
+ // Add environment variables
+ if (options.env) {
+ for (const [key, value] of Object.entries(options.env)) {
+ args.push("-e", `${key}=${value}`);
+ }
+ }
+
+ // Add volume mappings
+ if (options.volumes) {
+ for (const [hostPath, containerPath] of Object.entries(options.volumes)) {
+ args.push("-v", `${hostPath}:${containerPath}`);
+ }
+ }
+
+ args.push(image);
+
+ // Add command if specified
+ if (options.cmd && options.cmd.length > 0) {
+ args.push(...options.cmd);
+ }
+
+ const { stdout } = await this.exec(args);
+ return stdout.trim();
+ }
+
+ /**
+ * Start Docker container
+ *
+ * @param containerId Container ID or name
+ */
+ async startContainer(containerId: string): Promise {
+ await this.exec(["start", containerId]);
+ }
+
+ /**
+ * Stop Docker container
+ *
+ * @param containerId Container ID or name
+ */
+ async stopContainer(containerId: string): Promise {
+ await this.exec(["stop", containerId]);
+ }
+
+ /**
+ * Remove Docker container
+ *
+ * @param containerId Container ID or name
+ * @param force Force removal
+ */
+ async removeContainer(containerId: string, force = false): Promise {
+ const args = ["rm"];
+ if (force) {
+ args.push("-f");
+ }
+ args.push(containerId);
+ await this.exec(args);
+ }
+
+ /**
+ * Get container logs
+ *
+ * @param containerId Container ID or name
+ * @returns Container logs
+ */
+ async getContainerLogs(containerId: string): Promise {
+ const { stdout } = await this.exec(["logs", containerId]);
+ return stdout;
+ }
+
+ /**
+ * Check if a container exists
+ *
+ * @param containerId Container ID or name
+ * @returns True if container exists
+ */
+ async containerExists(containerId: string): Promise {
+ try {
+ await this.exec(["inspect", containerId]);
+ return true;
+ } catch (_error) {
+ return false;
+ }
+ }
+
+ /**
+ * Create Docker network
+ *
+ * @param name Network name
+ * @param driver Network driver
+ * @returns Network ID
+ */
+ async createNetwork(name: string, driver = "bridge"): Promise {
+ const { stdout } = await this.exec([
+ "network",
+ "create",
+ "--driver",
+ driver,
+ name,
+ ]);
+ return stdout.trim();
+ }
+
+ /**
+ * Remove Docker network
+ *
+ * @param networkId Network ID or name
+ */
+ async removeNetwork(networkId: string): Promise {
+ await this.exec(["network", "rm", networkId]);
+ }
+
+ /**
+ * Connect container to network
+ *
+ * @param containerId Container ID or name
+ * @param networkId Network ID or name
+ */
+ async connectNetwork(
+ containerId: string,
+ networkId: string,
+ options: {
+ aliases?: string[];
+ } = {},
+ ): Promise {
+ const args = ["network", "connect"];
+ if (options.aliases) {
+ for (const alias of options.aliases) {
+ args.push("--alias", alias);
+ }
+ }
+ args.push(networkId, containerId);
+ await this.exec(args);
+ }
+
+ /**
+ * Disconnect container from network
+ *
+ * @param containerId Container ID or name
+ * @param networkId Network ID or name
+ */
+ async disconnectNetwork(
+ containerId: string,
+ networkId: string,
+ ): Promise {
+ await this.exec(["network", "disconnect", networkId, containerId]);
+ }
+
+ /**
+ * Create Docker volume
+ *
+ * @param name Volume name
+ * @param driver Volume driver
+ * @param driverOpts Driver options
+ * @param labels Volume labels
+ * @returns Volume name
+ */
+ async createVolume(
+ name: string,
+ driver = "local",
+ driverOpts: Record = {},
+ labels: Record = {},
+ ): Promise {
+ const args = ["volume", "create", "--name", name, "--driver", driver];
+
+ // Add driver options
+ for (const [key, value] of Object.entries(driverOpts)) {
+ args.push("--opt", `${key}=${value}`);
+ }
+
+ // Add labels
+ for (const [key, value] of Object.entries(labels)) {
+ args.push("--label", `${key}=${value}`);
+ }
+
+ const { stdout } = await this.exec(args);
+ return stdout.trim();
+ }
+
+ /**
+ * Remove Docker volume
+ *
+ * @param volumeName Volume name
+ * @param force Force removal of the volume
+ */
+ async removeVolume(volumeName: string, force = false): Promise {
+ const args = ["volume", "rm"];
+ if (force) {
+ args.push("--force");
+ }
+ args.push(volumeName);
+ await this.exec(args);
+ }
+
+ /**
+ * Get Docker volume information
+ *
+ * @param volumeName Volume name
+ * @returns Volume details in JSON format
+ */
+ async inspectVolume(volumeName: string): Promise {
+ const { stdout } = await this.exec(["volume", "inspect", volumeName]);
+ try {
+ return JSON.parse(stdout.trim()) as VolumeInfo[];
+ } catch (_error) {
+ return [];
+ }
+ }
+
+ /**
+ * Check if a volume exists
+ *
+ * @param volumeName Volume name
+ * @returns True if volume exists
+ */
+ async volumeExists(volumeName: string): Promise {
+ try {
+ await this.inspectVolume(volumeName);
+ return true;
+ } catch (_error) {
+ return false;
+ }
+ }
+
+ /**
+ * Login to a Docker registry
+ *
+ * @param registry Registry URL
+ * @param username Username for authentication
+ * @param password Password for authentication
+ * @returns Promise that resolves when login is successful
+ */
+ async login(
+ registry: string,
+ username: string,
+ password: string,
+ ): Promise {
+ return new Promise((resolve, reject) => {
+ const args = [
+ "login",
+ registry,
+ "--username",
+ username,
+ "--password-stdin",
+ ];
+
+ const child = spawn(this.dockerPath, args, {
+ stdio: ["pipe", "pipe", "pipe"],
+ });
+
+ let stdout = "";
+ let stderr = "";
+
+ child.stdout.on("data", (data) => {
+ stdout += data.toString();
+ });
+
+ child.stderr.on("data", (data) => {
+ stderr += data.toString();
+ });
+
+ child.on("close", (code) => {
+ if (code === 0) {
+ resolve();
+ } else {
+ reject(
+ new Error(
+ `Docker login failed with exit code ${code}: ${stderr || stdout}`,
+ ),
+ );
+ }
+ });
+
+ child.on("error", (err) => {
+ reject(new Error(`Docker login failed: ${err.message}`));
+ });
+
+ // Write password to stdin and close the stream
+ child.stdin.write(password);
+ child.stdin.end();
+ });
+ }
+
+ /**
+ * Logout from a Docker registry
+ *
+ * @param registry Registry URL
+ */
+ async logout(registry: string): Promise {
+ try {
+ await this.exec(["logout", registry]);
+ } catch (error) {
+ // Ignore logout errors as they're not critical
+ console.warn(`Docker logout failed: ${error}`);
+ }
+ }
+}
diff --git a/alchemy/src/docker/container.ts b/alchemy/src/docker/container.ts
new file mode 100644
index 000000000..da948066f
--- /dev/null
+++ b/alchemy/src/docker/container.ts
@@ -0,0 +1,267 @@
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import { DockerApi } from "./api.ts";
+import type { Image } from "./image.ts";
+import type { RemoteImage } from "./remote-image.ts";
+
+/**
+ * Port mapping configuration
+ */
+export interface PortMapping {
+ /**
+ * External port on the host
+ */
+ external: number | string;
+
+ /**
+ * Internal port inside the container
+ */
+ internal: number | string;
+
+ /**
+ * Protocol (tcp or udp)
+ */
+ protocol?: "tcp" | "udp";
+}
+
+/**
+ * Volume mapping configuration
+ */
+export interface VolumeMapping {
+ /**
+ * Host path
+ */
+ hostPath: string;
+
+ /**
+ * Container path
+ */
+ containerPath: string;
+
+ /**
+ * Read-only flag
+ */
+ readOnly?: boolean;
+}
+
+/**
+ * Network mapping configuration
+ */
+export interface NetworkMapping {
+ /**
+ * Network name or ID
+ */
+ name: string;
+
+ /**
+ * Aliases for the container in the network
+ */
+ aliases?: string[];
+}
+
+/**
+ * Properties for creating a Docker container
+ */
+export interface ContainerProps {
+ /**
+ * Image to use for the container
+ * Can be an Alchemy Image or RemoteImage resource or a string image reference
+ */
+ image: Image | RemoteImage | string;
+
+ /**
+ * Container name
+ */
+ name?: string;
+
+ /**
+ * Command to run in the container
+ */
+ command?: string[];
+
+ /**
+ * Environment variables
+ */
+ environment?: Record;
+
+ /**
+ * Port mappings
+ */
+ ports?: PortMapping[];
+
+ /**
+ * Volume mappings
+ */
+ volumes?: VolumeMapping[];
+
+ /**
+ * Restart policy
+ */
+ restart?: "no" | "always" | "on-failure" | "unless-stopped";
+
+ /**
+ * Networks to connect to
+ */
+ networks?: NetworkMapping[];
+
+ /**
+ * Whether to remove the container when it exits
+ */
+ removeOnExit?: boolean;
+
+ /**
+ * Start the container after creation
+ */
+ start?: boolean;
+}
+
+/**
+ * Docker Container resource
+ */
+export interface Container
+ extends Resource<"docker::Container">,
+ ContainerProps {
+ /**
+ * Container ID
+ */
+ id: string;
+
+ /**
+ * Container state
+ */
+ state?: "created" | "running" | "paused" | "stopped" | "exited";
+
+ /**
+ * Time when the container was created
+ */
+ createdAt: number;
+}
+
+/**
+ * Create and manage a Docker Container
+ *
+ * @example
+ * // Create a simple Nginx container
+ * const webContainer = await Container("web", {
+ * image: "nginx:latest",
+ * ports: [
+ * { external: 8080, internal: 80 }
+ * ],
+ * start: true
+ * });
+ *
+ * @example
+ * // Create a container with environment variables and volume mounts
+ * const appContainer = await Container("app", {
+ * image: customImage, // Using an Alchemy RemoteImage resource
+ * environment: {
+ * NODE_ENV: "production",
+ * API_KEY: "secret-key"
+ * },
+ * volumes: [
+ * { hostPath: "./data", containerPath: "/app/data" }
+ * ],
+ * ports: [
+ * { external: 3000, internal: 3000 }
+ * ],
+ * restart: "always",
+ * start: true
+ * });
+ */
+export const Container = Resource(
+ "docker::Container",
+ async function (
+ this: Context,
+ id: string,
+ props: ContainerProps,
+ ): Promise {
+ // Initialize Docker API client
+ const api = new DockerApi();
+
+ // Get image reference
+ const imageRef =
+ typeof props.image === "string" ? props.image : props.image.imageRef;
+
+ // Use provided name or generate one based on resource ID
+ const containerName =
+ props.name || `alchemy-${id.replace(/[^a-zA-Z0-9_.-]/g, "-")}`;
+
+ // Handle delete phase
+ if (this.phase === "delete") {
+ if (this.output?.id) {
+ // Stop container if running
+ await api.stopContainer(this.output.id);
+
+ // Remove container
+ await api.removeContainer(this.output.id, true);
+ }
+
+ // Return destroyed state
+ return this.destroy();
+ } else {
+ let containerState: NonNullable = "created";
+
+ if (this.phase === "update") {
+ // Check if container already exists (for update)
+ const containerExists = await api.containerExists(containerName);
+
+ if (containerExists) {
+ // Remove existing container for update
+ await api.removeContainer(containerName, true);
+ }
+ }
+
+ // Prepare port mappings
+ const portMappings: Record = {};
+ if (props.ports) {
+ for (const port of props.ports) {
+ const protocol = port.protocol || "tcp";
+ portMappings[`${port.external}`] = `${port.internal}/${protocol}`;
+ }
+ }
+
+ // Prepare volume mappings
+ const volumeMappings: Record = {};
+ if (props.volumes) {
+ for (const volume of props.volumes) {
+ const readOnlyFlag = volume.readOnly ? ":ro" : "";
+ volumeMappings[volume.hostPath] =
+ `${volume.containerPath}${readOnlyFlag}`;
+ }
+ }
+
+ // Create new container
+ const containerId = await api.createContainer(imageRef, containerName, {
+ ports: portMappings,
+ env: props.environment,
+ volumes: volumeMappings,
+ cmd: props.command,
+ });
+
+ // Connect to networks if specified
+ if (props.networks) {
+ for (const network of props.networks) {
+ const networkId =
+ typeof network === "string" ? network : network.name;
+ await api.connectNetwork(containerId, networkId, {
+ aliases: network.aliases,
+ });
+ }
+ }
+
+ // Start container if requested
+ if (props.start) {
+ await api.startContainer(containerId);
+ containerState = "running";
+ }
+
+ // Return the resource using this() to construct output
+ return this({
+ ...props,
+ id: containerId,
+ state: containerState,
+ createdAt: Date.now(),
+ });
+ }
+ },
+);
diff --git a/alchemy/src/docker/image.ts b/alchemy/src/docker/image.ts
new file mode 100644
index 000000000..20939f2c3
--- /dev/null
+++ b/alchemy/src/docker/image.ts
@@ -0,0 +1,270 @@
+import fs from "node:fs/promises";
+import path from "node:path";
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import type { Secret } from "../secret.ts";
+import { DockerApi } from "./api.ts";
+
+/**
+ * Options for building a Docker image
+ */
+export interface DockerBuildOptions {
+ /**
+ * Path to the build context directory
+ *
+ * @default - the `dirname(dockerfile)` if provided or otherwise `process.cwd()`
+ */
+ context?: string;
+
+ /**
+ * Path to the Dockerfile, relative to context
+ *
+ * @default - `Dockerfile`
+ */
+ dockerfile?: string;
+
+ /**
+ * Target build platform (e.g., linux/amd64)
+ */
+ platform?: string;
+
+ /**
+ * Build arguments as key-value pairs
+ */
+ buildArgs?: Record;
+
+ /**
+ * Target build stage in multi-stage builds
+ */
+ target?: string;
+
+ /**
+ * List of images to use for cache
+ */
+ cacheFrom?: string[];
+}
+
+export interface ImageRegistry {
+ username: string;
+ password: Secret;
+ server: string;
+}
+
+/**
+ * Properties for creating a Docker image
+ */
+export interface ImageProps {
+ /**
+ * Repository name for the image (e.g., "username/image")
+ */
+ name?: string;
+
+ /**
+ * Tag for the image (e.g., "latest")
+ */
+ tag?: string;
+
+ /**
+ * Build configuration
+ */
+ build?: DockerBuildOptions;
+
+ /**
+ * Registry credentials
+ */
+ registry?: ImageRegistry;
+
+ /**
+ * Whether to skip pushing the image to registry
+ */
+ skipPush?: boolean;
+}
+
+/**
+ * Docker Image resource
+ */
+export interface Image extends Resource<"docker::Image">, ImageProps {
+ /**
+ * Image name
+ */
+ name: string;
+
+ /**
+ * Full image reference (name:tag)
+ */
+ imageRef: string;
+
+ /**
+ * Image ID
+ */
+ imageId?: string;
+
+ /**
+ * Repository digest if pushed
+ */
+ repoDigest?: string;
+
+ /**
+ * Time when the image was built
+ */
+ builtAt: number;
+}
+
+/**
+ * Build and manage a Docker image from a Dockerfile
+ *
+ * @example
+ * // Build a Docker image from a Dockerfile
+ * const appImage = await Image("app-image", {
+ * name: "myapp",
+ * tag: "latest",
+ * build: {
+ * context: "./app",
+ * dockerfile: "Dockerfile",
+ * buildArgs: {
+ * NODE_ENV: "production"
+ * }
+ * }
+ * });
+ */
+export const Image = Resource(
+ "docker::Image",
+ async function (
+ this: Context,
+ id: string,
+ props: ImageProps,
+ ): Promise {
+ // Initialize Docker API client
+ const api = new DockerApi();
+
+ if (this.phase === "delete") {
+ // No action needed for delete as Docker images aren't automatically removed
+ // This is intentional as other resources might depend on the same image
+ return this.destroy();
+ } else {
+ // Normalize properties
+ const tag = props.tag || "latest";
+ const name = props.name || id;
+ const imageRef = `${name}:${tag}`;
+
+ let context: string;
+ let dockerfile: string;
+ if (props.build?.dockerfile && props.build?.context) {
+ context = path.resolve(props.build.context);
+ dockerfile = path.resolve(context, props.build.dockerfile);
+ } else if (props.build?.dockerfile) {
+ context = process.cwd();
+ dockerfile = path.resolve(context, props.build.dockerfile);
+ } else if (props.build?.context) {
+ context = path.resolve(props.build.context);
+ dockerfile = path.resolve(context, "Dockerfile");
+ } else {
+ context = process.cwd();
+ dockerfile = path.resolve(context, "Dockerfile");
+ }
+ await fs.access(context);
+ await fs.access(dockerfile);
+
+ // Prepare build options
+ const buildOptions: Record = props.build?.buildArgs || {};
+
+ // Add platform if specified
+ let buildArgs = ["build", "-t", imageRef];
+
+ if (props.build?.platform) {
+ buildArgs.push("--platform", props.build.platform);
+ }
+
+ // Add target if specified
+ if (props.build?.target) {
+ buildArgs.push("--target", props.build.target);
+ }
+
+ // Add cache sources if specified
+ if (props.build?.cacheFrom && props.build.cacheFrom.length > 0) {
+ for (const cacheSource of props.build.cacheFrom) {
+ buildArgs.push("--cache-from", cacheSource);
+ }
+ }
+
+ // Add build arguments
+ for (const [key, value] of Object.entries(buildOptions)) {
+ buildArgs.push("--build-arg", `${key}="${value}"`);
+ }
+
+ buildArgs.push("-f", dockerfile);
+
+ // Add context path
+ buildArgs.push(context);
+
+ // Execute build command
+ const { stdout } = await api.exec(buildArgs);
+
+ // Extract image ID from build output if available
+ const imageIdMatch = /Successfully built ([a-f0-9]+)/.exec(stdout);
+ const imageId = imageIdMatch ? imageIdMatch[1] : undefined;
+
+ // Handle push if required
+ let repoDigest: string | undefined;
+ let finalImageRef = imageRef;
+ if (props.registry && !props.skipPush) {
+ const { server, username, password } = props.registry;
+
+ // Ensure the registry server does not have trailing slash
+ const registryHost = server.replace(/\/$/, "");
+
+ // Determine if the built image already includes a registry host (e.g. ghcr.io/user/repo)
+ const firstSegment = imageRef.split("/")[0];
+ const hasRegistryPrefix = firstSegment.includes(".");
+
+ // Compose the target image reference that will be pushed
+ const targetImage = hasRegistryPrefix
+ ? imageRef // already fully-qualified
+ : `${registryHost}/${imageRef}`;
+
+ try {
+ // Authenticate to registry
+ await api.login(registryHost, username, password.unencrypted);
+
+ // Tag local image with fully qualified name if necessary
+ if (targetImage !== imageRef) {
+ await api.exec(["tag", imageRef, targetImage]);
+ }
+
+ // Push the image
+ const { stdout: pushOut } = await api.exec(["push", targetImage]);
+
+ // Attempt to extract the repo digest from push output
+ const digestMatch = /digest:\s+([a-z0-9]+:[a-f0-9]{64})/.exec(
+ pushOut,
+ );
+ if (digestMatch) {
+ const digestHash = digestMatch[1];
+ // Strip tag (anything after last :) to build image@digest reference
+ const [repoWithoutTag] =
+ targetImage.split(":").length > 2
+ ? [targetImage] // unlikely but safety
+ : [targetImage.substring(0, targetImage.lastIndexOf(":"))];
+ repoDigest = `${repoWithoutTag}@${digestHash}`;
+ }
+
+ // Update the final image reference to point at the pushed image
+ finalImageRef = targetImage;
+ } finally {
+ // Always try to logout – failures are non-fatal
+ await api.logout(registryHost);
+ }
+ }
+
+ // Return the resource using this() to construct output
+ return this({
+ ...props,
+ name,
+ imageRef: finalImageRef,
+ imageId,
+ repoDigest,
+ builtAt: Date.now(),
+ });
+ }
+ },
+);
diff --git a/alchemy/src/docker/index.ts b/alchemy/src/docker/index.ts
new file mode 100644
index 000000000..25ce51b82
--- /dev/null
+++ b/alchemy/src/docker/index.ts
@@ -0,0 +1,6 @@
+export * from "./api.ts";
+export * from "./remote-image.ts";
+export * from "./container.ts";
+export * from "./network.ts";
+export * from "./volume.ts";
+export * from "./image.ts";
diff --git a/alchemy/src/docker/network.ts b/alchemy/src/docker/network.ts
new file mode 100644
index 000000000..eeeae735c
--- /dev/null
+++ b/alchemy/src/docker/network.ts
@@ -0,0 +1,101 @@
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import { DockerApi } from "./api.ts";
+
+/**
+ * Properties for creating a Docker network
+ */
+export interface NetworkProps {
+ /**
+ * Network name
+ */
+ name: string;
+
+ /**
+ * Network driver to use
+ * @default "bridge"
+ */
+ driver?: "bridge" | "host" | "none" | "overlay" | "macvlan" | (string & {});
+
+ /**
+ * Enable IPv6 on the network
+ * @default false
+ */
+ enableIPv6?: boolean;
+
+ /**
+ * Network-scoped alias for containers
+ */
+ labels?: Record;
+}
+
+/**
+ * Docker Network resource
+ */
+export interface Network extends Resource<"docker::Network">, NetworkProps {
+ /**
+ * Network ID
+ */
+ id: string;
+
+ /**
+ * Time when the network was created
+ */
+ createdAt: number;
+}
+
+/**
+ * Create and manage a Docker Network
+ *
+ * @see https://docs.docker.com/engine/network/
+ *
+ * @example
+ * // Create a simple bridge network
+ * const appNetwork = await Network("app-network", {
+ * name: "app-network"
+ * });
+ *
+ * @example
+ * // Create a custom network with driver
+ * const overlayNetwork = await Network("overlay-network", {
+ * name: "overlay-network",
+ * driver: "overlay",
+ * enableIPv6: true,
+ * labels: {
+ * "com.example.description": "Network for application services"
+ * }
+ * });
+ */
+export const Network = Resource(
+ "docker::Network",
+ async function (
+ this: Context,
+ _id: string,
+ props: NetworkProps,
+ ): Promise {
+ // Initialize Docker API client
+ const api = new DockerApi();
+
+ // Handle delete phase
+ if (this.phase === "delete") {
+ if (this.output?.id) {
+ // Remove network
+ await api.removeNetwork(this.output.id);
+ }
+
+ // Return destroyed state
+ return this.destroy();
+ } else {
+ // Create the network
+ props.driver = props.driver || "bridge";
+ const networkId = await api.createNetwork(props.name, props.driver);
+
+ // Return the resource using this() to construct output
+ return this({
+ ...props,
+ id: networkId,
+ createdAt: Date.now(),
+ });
+ }
+ },
+);
diff --git a/alchemy/src/docker/remote-image.ts b/alchemy/src/docker/remote-image.ts
new file mode 100644
index 000000000..a548d2b51
--- /dev/null
+++ b/alchemy/src/docker/remote-image.ts
@@ -0,0 +1,83 @@
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import { DockerApi } from "./api.ts";
+
+/**
+ * Properties for creating a Docker image
+ */
+export interface RemoteImageProps {
+ /**
+ * Docker image name (e.g., "nginx")
+ */
+ name: string;
+
+ /**
+ * Tag for the image (e.g., "latest" or "1.19-alpine")
+ */
+ tag?: string;
+
+ /**
+ * Always attempt to pull the image, even if it exists locally
+ */
+ alwaysPull?: boolean;
+}
+
+/**
+ * Docker Remote Image resource
+ */
+export interface RemoteImage
+ extends Resource<"docker::RemoteImage">,
+ RemoteImageProps {
+ /**
+ * Full image reference (name:tag)
+ */
+ imageRef: string;
+
+ /**
+ * Time when the image was created or pulled
+ */
+ createdAt: number;
+}
+
+/**
+ * Create or reference a Docker Remote Image
+ *
+ * @example
+ * // Pull the nginx image
+ * const nginxImage = await RemoteImage("nginx", {
+ * name: "nginx",
+ * tag: "latest"
+ * });
+ *
+ */
+export const RemoteImage = Resource(
+ "docker::RemoteImage",
+ async function (
+ this: Context,
+ _id: string,
+ props: RemoteImageProps,
+ ): Promise {
+ // Initialize Docker API client
+ const api = new DockerApi();
+
+ if (this.phase === "delete") {
+ // No action needed for delete as Docker images aren't automatically removed
+ // This is intentional as other resources might depend on the same image
+ return this.destroy();
+ } else {
+ // Normalize properties
+ const tag = props.tag || "latest";
+ const imageRef = `${props.name}:${tag}`;
+
+ // Pull image
+ await api.pullImage(imageRef);
+
+ // Return the resource using this() to construct output
+ return this({
+ ...props,
+ imageRef,
+ createdAt: Date.now(),
+ });
+ }
+ },
+);
diff --git a/alchemy/src/docker/volume.ts b/alchemy/src/docker/volume.ts
new file mode 100644
index 000000000..6271ee429
--- /dev/null
+++ b/alchemy/src/docker/volume.ts
@@ -0,0 +1,154 @@
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import { DockerApi } from "./api.ts";
+
+/**
+ * Interface for volume label
+ */
+export interface VolumeLabel {
+ /**
+ * Label name
+ */
+ name: string;
+
+ /**
+ * Label value
+ */
+ value: string;
+}
+
+/**
+ * Properties for creating a Docker volume
+ */
+export interface VolumeProps {
+ /**
+ * Volume name
+ */
+ name: string;
+
+ /**
+ * Volume driver to use
+ * @default "local"
+ */
+ driver?: string;
+
+ /**
+ * Driver-specific options
+ */
+ driverOpts?: Record;
+
+ /**
+ * Custom metadata labels for the volume
+ */
+ labels?: VolumeLabel[] | Record;
+}
+
+/**
+ * Docker Volume resource
+ */
+export interface Volume extends Resource<"docker::Volume">, VolumeProps {
+ /**
+ * Volume ID (same as name for Docker volumes)
+ */
+ id: string;
+
+ /**
+ * Volume mountpoint path on the host
+ */
+ mountpoint?: string;
+
+ /**
+ * Time when the volume was created
+ */
+ createdAt: number;
+}
+
+/**
+ * Create and manage a Docker Volume
+ *
+ * @see https://docs.docker.com/engine/reference/commandline/volume/
+ *
+ * @example
+ * // Create a simple Docker volume
+ * const dataVolume = await Volume("data-volume", {
+ * name: "data-volume"
+ * });
+ *
+ * @example
+ * // Create a Docker volume with custom driver and options
+ * const dbVolume = await Volume("db-data", {
+ * name: "db-data",
+ * driver: "local",
+ * driverOpts: {
+ * "type": "nfs",
+ * "o": "addr=10.0.0.1,rw",
+ * "device": ":/path/to/dir"
+ * },
+ * labels: [
+ * { name: "com.example.usage", value: "database-storage" },
+ * { name: "com.example.backup", value: "weekly" }
+ * ]
+ * });
+ */
+export const Volume = Resource(
+ "docker::Volume",
+ async function (
+ this: Context,
+ _id: string,
+ props: VolumeProps,
+ ): Promise {
+ // Initialize Docker API client
+ const api = new DockerApi();
+
+ // Process labels to ensure consistent format
+ const processedLabels: Record = {};
+ if (props.labels) {
+ if (Array.isArray(props.labels)) {
+ // Convert array of label objects to Record
+ for (const label of props.labels) {
+ processedLabels[label.name] = label.value;
+ }
+ } else {
+ // Use Record directly
+ Object.assign(processedLabels, props.labels);
+ }
+ }
+
+ // Handle delete phase
+ if (this.phase === "delete") {
+ if (this.output?.name) {
+ // Remove volume
+ await api.removeVolume(this.output.name);
+ }
+
+ // Return destroyed state
+ return this.destroy();
+ } else {
+ // Set default driver if not provided
+ props.driver = props.driver || "local";
+ const driverOpts = props.driverOpts || {};
+
+ // Create the volume
+ const volumeName = await api.createVolume(
+ props.name,
+ props.driver,
+ driverOpts,
+ processedLabels,
+ );
+
+ // Get volume details to retrieve mountpoint
+ const volumeInfos = await api.inspectVolume(volumeName);
+ const mountpoint = volumeInfos[0].Mountpoint;
+
+ // Return the resource using this() to construct output
+ return this({
+ ...props,
+ id: volumeName,
+ mountpoint,
+ createdAt: Date.now(),
+ labels: Array.isArray(props.labels) ? props.labels : undefined,
+ driverOpts: props.driverOpts,
+ });
+ }
+ },
+);
diff --git a/alchemy/src/fs/file-system-state-store.ts b/alchemy/src/fs/file-system-state-store.ts
index 2ae05bb10..69c0d9fa1 100644
--- a/alchemy/src/fs/file-system-state-store.ts
+++ b/alchemy/src/fs/file-system-state-store.ts
@@ -7,6 +7,7 @@ import { deserializeState, type State, type StateStore } from "../state.ts";
import { ignore } from "../util/ignore.ts";
const stateRootDir = path.join(process.cwd(), ".alchemy");
+const ALCHEMY_SEPERATOR_CHAR = process.platform === "win32" ? "-" : ":";
export class FileSystemStateStore implements StateStore {
public readonly dir: string;
@@ -128,7 +129,10 @@ export class FileSystemStateStore implements StateStore {
throw new Error(`ID cannot include colons: ${key}`);
}
if (key.includes("/")) {
- key = key.replaceAll("/", ":");
+ //todo(michael): remove this next time we do a breaking change
+ //* windows doesn't support ":" in file paths, but we already use ":"
+ //* so now we use both to prevent breaking changes`
+ key = key.replaceAll("/", ALCHEMY_SEPERATOR_CHAR);
}
return path.join(this.dir, `${key}.json`);
}
diff --git a/alchemy/src/fs/file.ts b/alchemy/src/fs/file.ts
index cd011273e..eb73d3f80 100644
--- a/alchemy/src/fs/file.ts
+++ b/alchemy/src/fs/file.ts
@@ -184,9 +184,12 @@ export const File = Resource(
}
// Create directory and write file
- await fs.promises.mkdir(path.dirname(filePath), {
- recursive: true,
- });
+ const dirName = path.dirname(filePath);
+ if (dirName !== ".") {
+ await fs.promises.mkdir(dirName, {
+ recursive: true,
+ });
+ }
await fs.promises.writeFile(filePath, props.content);
diff --git a/alchemy/src/github/index.ts b/alchemy/src/github/index.ts
index d03e663eb..63db3dc00 100644
--- a/alchemy/src/github/index.ts
+++ b/alchemy/src/github/index.ts
@@ -1,3 +1,4 @@
export * from "./comment.ts";
export * from "./repository-environment.ts";
+export * from "./repository-webhook.ts";
export * from "./secret.ts";
diff --git a/alchemy/src/github/repository-webhook.ts b/alchemy/src/github/repository-webhook.ts
new file mode 100644
index 000000000..490435d99
--- /dev/null
+++ b/alchemy/src/github/repository-webhook.ts
@@ -0,0 +1,269 @@
+import type { Context } from "../context.ts";
+import { Resource } from "../resource.ts";
+import { logger } from "../util/logger.ts";
+import { createGitHubClient, verifyGitHubAuth } from "./client.ts";
+
+/**
+ * Properties for creating or updating a GitHub Repository Webhook
+ */
+export interface RepositoryWebhookProps {
+ /**
+ * Repository owner (user or organization)
+ */
+ owner: string;
+
+ /**
+ * Repository name
+ */
+ repository: string;
+
+ /**
+ * The URL to which the payloads will be delivered
+ */
+ url: string;
+
+ /**
+ * Webhook secret for payload validation
+ * @default undefined
+ */
+ secret?: string;
+
+ /**
+ * The media type used to serialize the payloads
+ * @default "application/json"
+ */
+ contentType?: "application/json" | "application/x-www-form-urlencoded";
+
+ /**
+ * Determines whether the SSL certificate of the host for url will be verified
+ * @default false
+ */
+ insecureSsl?: boolean;
+
+ /**
+ * Determines if notifications are sent when the webhook is triggered
+ * @default true
+ */
+ active?: boolean;
+
+ /**
+ * Determines what events the hook is triggered for
+ * @default ["push"]
+ */
+ events?: string[];
+
+ /**
+ * Optional GitHub API token (overrides environment variable)
+ * If not provided, will use GITHUB_TOKEN environment variable
+ * @default process.env.GITHUB_TOKEN
+ */
+ token?: string;
+}
+
+/**
+ * Output returned after Repository Webhook creation/update
+ */
+export interface RepositoryWebhook
+ extends Resource<"github::RepositoryWebhook">,
+ RepositoryWebhookProps {
+ /**
+ * The ID of the resource
+ */
+ id: string;
+
+ /**
+ * The numeric ID of the webhook in GitHub
+ */
+ webhookId: number;
+
+ /**
+ * The webhook URL that was configured
+ */
+ url: string;
+
+ /**
+ * Time at which the object was created
+ */
+ createdAt: string;
+
+ /**
+ * Time at which the object was last updated
+ */
+ updatedAt: string;
+
+ /**
+ * The ping URL for the webhook
+ */
+ pingUrl: string;
+
+ /**
+ * The test URL for the webhook
+ */
+ testUrl: string;
+}
+
+/**
+ * Resource for managing GitHub repository webhooks
+ *
+ * Webhooks allow external services to be notified when certain events happen in a repository.
+ * This resource manages the full lifecycle of repository webhooks including creation, updates, and deletion.
+ *
+ * @example
+ * // Create a basic webhook for push events
+ * const pushWebhook = await RepositoryWebhook("push-webhook", {
+ * owner: "my-org",
+ * repository: "my-repo",
+ * url: "https://my-service.com/github-webhook",
+ * events: ["push"]
+ * });
+ *
+ * @example
+ * // Create a webhook with secret validation for multiple events
+ * const ciWebhook = await RepositoryWebhook("ci-webhook", {
+ * owner: "my-org",
+ * repository: "my-repo",
+ * url: "https://ci.example.com/webhook",
+ * secret: "my-webhook-secret",
+ * events: ["push", "pull_request", "release"],
+ * contentType: "application/json"
+ * });
+ *
+ * @example
+ * // Create a webhook for all events with custom SSL settings
+ * const monitoringWebhook = await RepositoryWebhook("monitoring-webhook", {
+ * owner: "my-org",
+ * repository: "my-repo",
+ * url: "https://monitoring.internal.com/github",
+ * secret: "super-secret-key",
+ * events: ["*"], // All events
+ * insecureSsl: true, // For internal services with self-signed certs
+ * contentType: "application/x-www-form-urlencoded"
+ * });
+ */
+export const RepositoryWebhook = Resource(
+ "github::RepositoryWebhook",
+ async function (
+ this: Context,
+ _id: string,
+ props: RepositoryWebhookProps,
+ ): Promise {
+ // Create authenticated Octokit client
+ const octokit = await createGitHubClient({
+ token: props.token,
+ });
+
+ // Verify authentication and permissions
+ if (!this.quiet) {
+ await verifyGitHubAuth(octokit, props.owner, props.repository);
+ }
+
+ if (this.phase === "delete") {
+ if (this.output?.webhookId) {
+ try {
+ // Delete the webhook
+ await octokit.rest.repos.deleteWebhook({
+ owner: props.owner,
+ repo: props.repository,
+ hook_id: this.output.webhookId,
+ });
+ } catch (error: any) {
+ // Ignore 404 errors (webhook already deleted)
+ if (error.status === 404) {
+ logger.log("Webhook doesn't exist, ignoring");
+ } else {
+ throw error;
+ }
+ }
+ }
+
+ // Return void (a deleted resource has no content)
+ return this.destroy();
+ }
+
+ try {
+ const webhookConfig = {
+ url: props.url,
+ content_type: props.contentType || "application/json",
+ insecure_ssl: props.insecureSsl ? "1" : "0",
+ ...(props.secret && { secret: props.secret }),
+ };
+
+ const events = props.events || ["push"];
+ const active = props.active !== false; // Default to true
+
+ let webhookData;
+ let webhookId: number;
+
+ if (this.phase === "update" && this.output?.webhookId) {
+ // Update existing webhook
+ const { data: updatedWebhook } = await octokit.rest.repos.updateWebhook(
+ {
+ owner: props.owner,
+ repo: props.repository,
+ hook_id: this.output.webhookId,
+ config: webhookConfig,
+ events,
+ active,
+ },
+ );
+
+ webhookData = updatedWebhook;
+ webhookId = this.output.webhookId;
+ } else {
+ // Create new webhook
+ const { data: createdWebhook } = await octokit.rest.repos.createWebhook(
+ {
+ owner: props.owner,
+ repo: props.repository,
+ name: "web", // GitHub webhook type
+ config: webhookConfig,
+ events,
+ active,
+ },
+ );
+
+ webhookData = createdWebhook;
+ webhookId = createdWebhook.id;
+ }
+
+ // Return webhook details
+ return this({
+ id: `${props.owner}/${props.repository}/webhook/${webhookId}`,
+ webhookId,
+ owner: props.owner,
+ repository: props.repository,
+ url: props.url,
+ secret: props.secret,
+ contentType: props.contentType || "application/json",
+ insecureSsl: props.insecureSsl,
+ active: props.active,
+ events: props.events || ["push"],
+ token: props.token,
+ createdAt: webhookData.created_at,
+ updatedAt: webhookData.updated_at,
+ pingUrl: webhookData.ping_url,
+ testUrl: webhookData.test_url,
+ });
+ } catch (error: any) {
+ if (
+ error.status === 403 &&
+ error.message?.includes("Must have admin rights")
+ ) {
+ logger.error(
+ "\n⚠️ Error creating/updating GitHub webhook: You must have admin rights to the repository.",
+ );
+ logger.error(
+ "Make sure your GitHub token has the required permissions (repo scope for private repos).\n",
+ );
+ } else if (error.status === 422) {
+ logger.error(
+ "\n⚠️ Error creating/updating GitHub webhook: Invalid webhook configuration.",
+ );
+ logger.error("Check your webhook URL and event configuration.\n");
+ } else {
+ logger.error("Error creating/updating GitHub webhook:", error.message);
+ }
+ throw error;
+ }
+ },
+);
diff --git a/alchemy/src/os/exec.ts b/alchemy/src/os/exec.ts
index 30968a83a..2b64e0041 100644
--- a/alchemy/src/os/exec.ts
+++ b/alchemy/src/os/exec.ts
@@ -266,30 +266,72 @@ const defaultOptions: SpawnOptions = {
shell: true,
};
+/**
+ * Options for exec function
+ */
+export interface ExecOptions extends Partial