commit 177705cfb1bcf61f93d674c2240b97b6a5b43ec4 Author: Gib Date: Fri Jun 20 17:01:22 2025 -0500 Going from down to up, we are stopping at prettierrc as far as making sure we have everything configured. diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..8176828 --- /dev/null +++ b/.env.example @@ -0,0 +1,34 @@ +# When adding additional environment variables, the schema in "/src/env.js" +# should be updated accordingly. + +# Example: +# SERVERVAR="foo" +# NEXT_PUBLIC_CLIENTVAR="bar" + +### Server Variables ### +# Next Variables # Default Values: +NODE_ENV= # development +SKIP_ENV_VALIDATION= # false +# Sentry Variables # Default Values: +SENTRY_AUTH_TOKEN= +CI= # true + +### Client Variables ### +# Next Variables # Default Values: +NEXT_PUBLIC_SITE_URL= # http://localhost:3000 +# Supabase Variables +NEXT_PUBLIC_SUPABASE_URL= +NEXT_PUBLIC_SUPABASE_ANON_KEY= +# Sentry Variables # Default Values +NEXT_PUBLIC_SENTRY_DSN= +NEXT_PUBLIC_SENTRY_URL= # https://sentry.gbrown.org +NEXT_PUBLIC_SENTRY_ORG= # gib +NEXT_PUBLIC_SENTRY_PROJECT_NAME= + +# Drizzle & Supabase CLI Variables + # Default Values: +DB_USER= # postgres +DB_PASSWORD= +DB_HOST= # localhost +DB_PORT= # 5432 +DB_NAME= # postgres diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c24a835 --- /dev/null +++ b/.gitignore @@ -0,0 +1,46 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# database +/prisma/db.sqlite +/prisma/db.sqlite-journal +db.sqlite + +# next.js +/.next/ +/out/ +next-env.d.ts + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* +.pnpm-debug.log* + +# local env files +# do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables +.env +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo + +# idea files +.idea \ No newline at end of file diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000..75ebcfc --- /dev/null +++ b/.prettierrc @@ -0,0 +1,6 @@ +{ + "singleQuote": true, + "jsxSingleQuote": true, + "trailingComma": "all", + "tabWidth": 2 +} diff --git a/README.md b/README.md new file mode 100644 index 0000000..9f8c2c9 --- /dev/null +++ b/README.md @@ -0,0 +1,124 @@ +

+
+ Next Template +
+ Next Template +
+

+ +
+ +

How to run:

+
+ +### Clone the Repository & Install Dependencies + +```bash +git clone https://git.gbrown.org/gib/next-template.git +``` + +```bash +cd next-template +``` + +I would recommend using [bun](https://bun.sh/) to install dependencies. + +```bash +bun -i +``` + +You will also need docker installed on whatever host you plan to run the Supabase instance from, whether locally, or on a home server or a VPS or whatever. Or you can just use the Supabase SaaS if you want to have a much easier time, probably. I wouldn't know! + +### Add your environment variables + +Copy the example environment variable files and paste them in the same directory named `.env`. + +```bash +cp ./.env.example ./.env +``` + +```bash +cp ./scripts/supabase/docker/.env.example ./scripts/supabase/docker/.env +``` + +Add your secrets to the `.env` files you just copied. + +### Host Supabase Locally + + - Follow the instructions [here](https://supabase.com/docs/guides/self-hosting/docker) to host Supabase with Docker. + - You will need to make sure you have some way to connect to the postgres database from the host. I had to remove the database port from the supabase-pooler and add it to the supabase-db in order to directly connect to it. This will be important for generating our types. This is not strictly necessary, and honestly I think I may even just have the docker compose set up to do this already, as I can't figure out why I would want to port to the spooler open on my host anyways. + +### Create your database schema & generate your types. + +- Copy the contents of the schema file located in `./scripts/supabase/db/schema.sql` & paste it into the SQL editor on the Web UI of your Supabase instance. Run the SQL. There should be no errors & you should now be able to see the profiles & statuses tables in the table editor. +```bash +cat ./src/server/db/schema.sql | wl-copy # If you are on Linux (& using wayland & have wl-copy installed) +``` + +- Generate your types. + - This can be a bit weird depending on what your setup is. If you are running Supabase locally on the same host that you are running your dev server, then this should be straightforward. If you are using the Supabase SaaS, then this is even more straightforward. If you are like me, and you are connecting to a self hosted instance of Supabase on your home server while developing, then you must clone this reposity on your server so that the command line tool can generate the types from your open postgres port on your Host, which is why the docker compose is configured how it was & why I mentioned this earlier. + +You will need to run the supabase cli tool with sudo in my experience. What I would recommend to you is to run the command + +```bash +sudo npx supabase --help +``` + +You will be prompted to install the supabase cli tool if you do not already have it installed, which you probably don't since root is running this. After that, you can run the following command below, replacing the password and the port to match your own Supabase Postgres Database port & password. + +```bash +sudo npx supabase gen types typescript \ +--db-url "postgres://postgres:password@localhost:5432/postgres" \ +--schema public \ +> ./src/utils/supabase/types.ts +``` + +There is also a script in the `scripts` folder called `generate_types` which *should* do this for you. + +```bash +./scripts/generate_types +``` + +### Start your development environment. + +Run + + ```bash +bun dev + ``` + +to start your development environment with turbopack + +You can also run + +```bash +bun dev:slow +``` + +to start your development environment with webpack + +### Start your Production Environment. + +There are Dockerfiles & docker compose files that can be found in the `./scripts/docker` folder for the Next.js website. There is also a script called `reload_container` located in the `./scripts/` folder which was created to quickly update the container, but this will give you a better idea of what you need to do. First, build the image with + +```bash +sudo docker compose -f ./scripts/next/docker/compose.yml build +``` + +then you can run the container with + +```bash +sudo docker compose -f ./scripts/next/docker/compose up -d +``` + +Now, you may end up with some build errors. The `reload_containers` script swaps out the next config before it runs the docker build to skip any build errors, so you may want to do this as well, though you are welcome to fix the build errors as well, of course! + +### Fin + +I am sure I am missing a lot of stuff so feel free to open an issue if you have any questions or if you feel that I should add something here! + +
diff --git a/bun.lockb b/bun.lockb new file mode 100755 index 0000000..5ed3a69 Binary files /dev/null and b/bun.lockb differ diff --git a/components.json b/components.json new file mode 100644 index 0000000..a450d2f --- /dev/null +++ b/components.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "", + "css": "src/styles/globals.css", + "baseColor": "zinc", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "iconLibrary": "lucide" +} \ No newline at end of file diff --git a/drizzle.config.ts b/drizzle.config.ts new file mode 100644 index 0000000..6267e6b --- /dev/null +++ b/drizzle.config.ts @@ -0,0 +1,11 @@ +import { type Config } from 'drizzle-kit'; + +import { env } from '@/env'; + +export default { + schema: './src/server/db/schema.ts', + dialect: 'postgresql', + dbCredentials: { + url: `postgresql://${env.DB_USER}:${env.DB_PASSWORD}@${env.DB_HOST}:${env.DB_PORT}/${env.DB_NAME}`, + }, +} satisfies Config; diff --git a/eslint.config.js b/eslint.config.js new file mode 100644 index 0000000..abe4ec5 --- /dev/null +++ b/eslint.config.js @@ -0,0 +1,66 @@ +import { FlatCompat } from '@eslint/eslintrc'; +import tseslint from 'typescript-eslint'; +// @ts-ignore -- no types for this plugin +import drizzle from 'eslint-plugin-drizzle'; +import eslintPluginPrettierRecommended from 'eslint-plugin-prettier/recommended'; + +const compat = new FlatCompat({ + baseDirectory: import.meta.dirname, +}); + +export default tseslint.config( + { + ignores: ['.next'], + }, + ...compat.extends('next/core-web-vitals'), + { + files: ['**/*.ts', '**/*.tsx'], + plugins: { + drizzle, + }, + extends: [ + ...tseslint.configs.recommended, + ...tseslint.configs.recommendedTypeChecked, + ...tseslint.configs.stylisticTypeChecked, + //eslintPluginPrettierRecommended, + ], + rules: { + '@typescript-eslint/array-type': 'off', + '@typescript-eslint/consistent-type-definitions': 'off', + '@typescript-eslint/consistent-type-imports': [ + 'warn', + { prefer: 'type-imports', fixStyle: 'inline-type-imports' }, + ], + '@typescript-eslint/no-unused-vars': [ + 'warn', + { argsIgnorePattern: '^_' }, + ], + '@typescript-eslint/require-await': 'off', + '@typescript-eslint/no-misused-promises': [ + 'error', + { checksVoidReturn: { attributes: false } }, + ], + 'drizzle/enforce-delete-with-where': [ + 'error', + { drizzleObjectName: ['db', 'ctx.db'] }, + ], + 'drizzle/enforce-update-with-where': [ + 'error', + { drizzleObjectName: ['db', 'ctx.db'] }, + ], + '@typescript-eslint/no-explicit-any': 'warn', + '@typescript-eslint/no-floating-promises': 'warn', + '@typescript-eslint/no-unsafe-argument': 'warn', + }, + }, + { + linterOptions: { + reportUnusedDisableDirectives: true, + }, + languageOptions: { + parserOptions: { + projectService: true, + }, + }, + }, +); diff --git a/next.config.js b/next.config.js new file mode 100644 index 0000000..bbf2912 --- /dev/null +++ b/next.config.js @@ -0,0 +1,70 @@ +/* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. + * This is especially useful for Docker builds. + */ +import './src/env.js'; +import { withSentryConfig } from '@sentry/nextjs'; +import { withPlausibleProxy } from 'next-plausible'; + +/** @type {import("next").NextConfig} */ +const config = withPlausibleProxy({ + customDomain: 'https://plausible.gbrown.org', +})({ + output: 'standalone', + images: { + remotePatterns: [ + { + protocol: 'https', + hostname: '*.gbrown.org', + }, + ], + }, + serverExternalPackages: ['require-in-the-middle'], + experimental: { + serverActions: { + bodySizeLimit: '10mb', + }, + }, + turbopack: { + rules: { + '*.svg': { + loaders: [ + { + loader: '@svgr/webpack', + options: { + icon: true, + }, + }, + ], + as: '*.js', + }, + }, + }, +}); + +const sentryConfig = { + // For all available options, see: + // https://www.npmjs.com/package/@sentry/webpack-plugin#options + org: 'gib', + project: process.env.NEXT_PUBLIC_SENTRY_PROJECT_NAME, + sentryUrl: process.env.NEXT_PUBLIC_SENTRY_URL, + authToken: process.env.SENTRY_AUTH_TOKEN, + // Only print logs for uploading source maps in CI + silent: !process.env.CI, + // For all available options, see: + // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/ + // Upload a larger set of source maps for prettier stack traces (increases build time) + widenClientFileUpload: true, + // Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. + // This can increase your server load as well as your hosting bill. + // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client- + // side errors will fail. + tunnelRoute: '/monitoring', + // Automatically tree-shake Sentry logger statements to reduce bundle size + disableLogger: true, + // Capture React Component Names + reactComponentAnnotation: { + enabled: true, + }, +}; + +export default withSentryConfig(config, sentryConfig); diff --git a/package.json b/package.json new file mode 100644 index 0000000..328b2cf --- /dev/null +++ b/package.json @@ -0,0 +1,87 @@ +{ + "name": "next-template", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "build": "next build", + "check": "next lint && tsc --noEmit", + "db:generate": "drizzle-kit generate", + "db:migrate": "drizzle-kit migrate", + "db:push": "drizzle-kit push", + "db:studio": "drizzle-kit studio", + "dev": "next dev --turbo", + "dev:slow": "next dev", + "format:check": "prettier --check \"**/*.{ts,tsx,js,jsx,mdx}\" --cache", + "format:write": "prettier --write \"**/*.{ts,tsx,js,jsx,mdx}\" --cache", + "lint": "next lint", + "lint:fix": "next lint --fix", + "preview": "next build && next start", + "start": "next start", + "typecheck": "tsc --noEmit" + }, + "dependencies": { + "@hookform/resolvers": "^5.1.1", + "@radix-ui/react-avatar": "^1.1.10", + "@radix-ui/react-checkbox": "^1.3.2", + "@radix-ui/react-dialog": "^1.1.14", + "@radix-ui/react-dropdown-menu": "^2.1.15", + "@radix-ui/react-label": "^2.1.7", + "@radix-ui/react-popover": "^1.1.14", + "@radix-ui/react-progress": "^1.1.7", + "@radix-ui/react-slot": "^1.2.3", + "@sentry/nextjs": "^9.30.0", + "@supabase/ssr": "^0.6.1", + "@supabase/supabase-js": "^2.50.0", + "@t3-oss/env-nextjs": "^0.12.0", + "@tanstack/react-query": "^5.80.10", + "@tanstack/react-table": "^8.21.3", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "cmdk": "^1.1.1", + "date-fns": "^4.1.0", + "drizzle-orm": "^0.41.0", + "import-in-the-middle": "^1.14.2", + "lucide-react": "^0.518.0", + "next": "^15.3.4", + "next-plausible": "^3.12.4", + "postgres": "^3.4.7", + "react": "^19.1.0", + "react-day-picker": "^9.7.0", + "react-dom": "^19.1.0", + "react-hook-form": "^7.58.1", + "tailwind-merge": "^3.3.1", + "vaul": "^1.1.2", + "zod": "^3.25.67" + }, + "devDependencies": { + "@eslint/eslintrc": "^3.3.1", + "@tailwindcss/postcss": "^4.1.10", + "@types/cors": "^2.8.19", + "@types/express": "^5.0.3", + "@types/node": "^20.19.1", + "@types/react": "^19.1.8", + "@types/react-dom": "^19.1.6", + "drizzle-kit": "^0.30.6", + "eslint": "^9.29.0", + "eslint-config-next": "^15.3.4", + "eslint-config-prettier": "^10.1.5", + "eslint-plugin-drizzle": "^0.2.3", + "eslint-plugin-prettier": "^5.5.0", + "postcss": "^8.5.6", + "prettier": "^3.5.3", + "prettier-plugin-tailwindcss": "^0.6.13", + "tailwindcss": "^4.1.10", + "tw-animate-css": "^1.3.4", + "typescript": "^5.8.3", + "typescript-eslint": "^8.34.1" + }, + "ct3aMetadata": { + "initVersion": "7.39.3" + }, + "trustedDependencies": [ + "@sentry/cli", + "@tailwindcss/oxide", + "unrs-resolver" + ] +} diff --git a/postcss.config.js b/postcss.config.js new file mode 100644 index 0000000..a34a3d5 --- /dev/null +++ b/postcss.config.js @@ -0,0 +1,5 @@ +export default { + plugins: { + '@tailwindcss/postcss': {}, + }, +}; diff --git a/prettier.config.js b/prettier.config.js new file mode 100644 index 0000000..09f0482 --- /dev/null +++ b/prettier.config.js @@ -0,0 +1,4 @@ +/** @type {import('prettier').Config & import('prettier-plugin-tailwindcss').PluginOptions} */ +export default { + plugins: ['prettier-plugin-tailwindcss'], +}; diff --git a/public/favicon.ico b/public/favicon.ico new file mode 100644 index 0000000..b5336a4 Binary files /dev/null and b/public/favicon.ico differ diff --git a/scripts/files_to_clipboard b/scripts/files_to_clipboard new file mode 100755 index 0000000..9be0ff2 --- /dev/null +++ b/scripts/files_to_clipboard @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 + +import os +import sys +import argparse +from pathlib import Path +import pyperclip +import questionary + +# List of directories to exclude +EXCLUDED_DIRS = {'node_modules', '.next', '.venv', '.git', '__pycache__', '.idea', '.vscode', 'ui'} + +def collect_files(project_path): + """ + Collects files from the project directory, excluding specified directories and filtering by extensions. + Returns a list of file paths relative to the project directory. + """ + collected_files = [] + + for root, dirs, files in os.walk(project_path): + # Exclude specified directories + dirs[:] = [d for d in dirs if d not in EXCLUDED_DIRS] + + for file in files: + file_path = Path(root) / file + relative_path = file_path.relative_to(project_path) + collected_files.append(relative_path) + + return collected_files + +def main(): + # Parse command-line arguments + parser = argparse.ArgumentParser(description='Generate Markdown from selected files.') + parser.add_argument('path', nargs='?', default='.', help='Path to the project directory') + args = parser.parse_args() + + project_path = Path(args.path).resolve() + if not project_path.is_dir(): + print(f"Error: '{project_path}' is not a directory.") + sys.exit(1) + + # Collect files from the project directory + file_list = collect_files(project_path) + + if not file_list: + print("No files found in the project directory with the specified extensions.") + sys.exit(1) + + # Sort file_list for better organization + file_list.sort() + + # Interactive file selection using questionary + print("\nSelect the files you want to include:") + selected_files = questionary.checkbox( + "Press space to select files, and Enter when you're done:", + choices=[str(f) for f in file_list] + ).ask() + + if not selected_files: + print("No files selected.") + sys.exit(1) + + # Generate markdown + markdown_lines = [] + markdown_lines.append('') + + for selected_file in selected_files: + file_path = project_path / selected_file + try: + with open(file_path, 'r', encoding='utf-8') as f: + content = f.read() + # Determine the language for code block from file extension + language = file_path.suffix.lstrip('.') + markdown_lines.append(f'{selected_file}') + markdown_lines.append(f'```{language}') + markdown_lines.append(content) + markdown_lines.append('```') + markdown_lines.append('') + except Exception as e: + print(f"Error reading file {selected_file}: {e}") + + markdown_text = '\n'.join(markdown_lines) + + # Copy markdown content to clipboard + pyperclip.copy(markdown_text) + print("Markdown content has been copied to the clipboard.") + +if __name__ == "__main__": + # Check if required libraries are installed + try: + import questionary + import pyperclip + except ImportError as e: + missing_module = e.name + print(f"Error: Missing required module '{missing_module}'.") + print(f"Please install it by running: pip install {missing_module}") + sys.exit(1) + + main() diff --git a/scripts/next/config/next.config.build.js b/scripts/next/config/next.config.build.js new file mode 100644 index 0000000..6873c37 --- /dev/null +++ b/scripts/next/config/next.config.build.js @@ -0,0 +1,76 @@ +/* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. + * This is especially useful for Docker builds. + */ +import './src/env.js'; +import { withSentryConfig } from '@sentry/nextjs'; +import { withPlausibleProxy } from 'next-plausible'; + +/** @type {import("next").NextConfig} */ +const config = withPlausibleProxy({ + customDomain: 'https://plausible.gbrown.org', +})({ + output: 'standalone', + images: { + remotePatterns: [ + { + protocol: 'https', + hostname: '*.gbrown.org', + }, + ], + }, + serverExternalPackages: ['require-in-the-middle'], + experimental: { + serverActions: { + bodySizeLimit: '10mb', + }, + }, + typescript: { + ignoreBuildErrors: true, + }, + eslint: { + ignoreDuringBuilds: true, + }, + turbopack: { + rules: { + '*.svg': { + loaders: [ + { + loader: '@svgr/webpack', + options: { + icon: true, + }, + }, + ], + as: '*.js', + }, + }, + }, +}); + +const sentryConfig = { + // For all available options, see: + // https://www.npmjs.com/package/@sentry/webpack-plugin#options + org: 'gib', + project: process.env.NEXT_PUBLIC_SENTRY_PROJECT_NAME, + sentryUrl: process.env.NEXT_PUBLIC_SENTRY_URL, + authToken: process.env.SENTRY_AUTH_TOKEN, + // Only print logs for uploading source maps in CI + silent: !process.env.CI, + // For all available options, see: + // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/ + // Upload a larger set of source maps for prettier stack traces (increases build time) + widenClientFileUpload: true, + // Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. + // This can increase your server load as well as your hosting bill. + // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client- + // side errors will fail. + tunnelRoute: '/monitoring', + // Automatically tree-shake Sentry logger statements to reduce bundle size + disableLogger: true, + // Capture React Component Names + reactComponentAnnotation: { + enabled: true, + }, +}; + +export default withSentryConfig(config, sentryConfig); diff --git a/scripts/next/config/next.config.default.js b/scripts/next/config/next.config.default.js new file mode 100644 index 0000000..bbf2912 --- /dev/null +++ b/scripts/next/config/next.config.default.js @@ -0,0 +1,70 @@ +/* Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. + * This is especially useful for Docker builds. + */ +import './src/env.js'; +import { withSentryConfig } from '@sentry/nextjs'; +import { withPlausibleProxy } from 'next-plausible'; + +/** @type {import("next").NextConfig} */ +const config = withPlausibleProxy({ + customDomain: 'https://plausible.gbrown.org', +})({ + output: 'standalone', + images: { + remotePatterns: [ + { + protocol: 'https', + hostname: '*.gbrown.org', + }, + ], + }, + serverExternalPackages: ['require-in-the-middle'], + experimental: { + serverActions: { + bodySizeLimit: '10mb', + }, + }, + turbopack: { + rules: { + '*.svg': { + loaders: [ + { + loader: '@svgr/webpack', + options: { + icon: true, + }, + }, + ], + as: '*.js', + }, + }, + }, +}); + +const sentryConfig = { + // For all available options, see: + // https://www.npmjs.com/package/@sentry/webpack-plugin#options + org: 'gib', + project: process.env.NEXT_PUBLIC_SENTRY_PROJECT_NAME, + sentryUrl: process.env.NEXT_PUBLIC_SENTRY_URL, + authToken: process.env.SENTRY_AUTH_TOKEN, + // Only print logs for uploading source maps in CI + silent: !process.env.CI, + // For all available options, see: + // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/ + // Upload a larger set of source maps for prettier stack traces (increases build time) + widenClientFileUpload: true, + // Route browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. + // This can increase your server load as well as your hosting bill. + // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client- + // side errors will fail. + tunnelRoute: '/monitoring', + // Automatically tree-shake Sentry logger statements to reduce bundle size + disableLogger: true, + // Capture React Component Names + reactComponentAnnotation: { + enabled: true, + }, +}; + +export default withSentryConfig(config, sentryConfig); diff --git a/scripts/next/docker/Dockerfile b/scripts/next/docker/Dockerfile new file mode 100644 index 0000000..fb02486 --- /dev/null +++ b/scripts/next/docker/Dockerfile @@ -0,0 +1,60 @@ +# syntax=docker/dockerfile:1 +FROM oven/bun:latest AS base + +# Install dependencies only when needed +FROM base AS deps +# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed. +RUN apk add --no-cache libc6-compat +WORKDIR /app + +# Install dependencies with Bun +COPY package.json bun.lockb* ./ +RUN bun install --frozen-lockfile +RUN \ + if [ -f bun.lockb ]; then bun install --frozen-lockfile; \ + else echo "Lockfile not found." && exit 1; \ + fi +# Rebuild the source code only when needed +FROM base AS builder +WORKDIR /app +COPY --from=deps /app/node_modules ./node_modules +COPY . . + +# Next.js collects completely anonymous telemetry data about general usage. +# Learn more here: https://nextjs.org/telemetry +# Uncomment the following line in case you want to disable telemetry during the build. +# ENV NEXT_TELEMETRY_DISABLED=1 + +RUN bun run build + +# Production image, copy all the files and run next +FROM base AS runner +WORKDIR /app + +ENV NODE_ENV=production +# Uncomment the following line in case you want to disable telemetry during runtime. +# ENV NEXT_TELEMETRY_DISABLED=1 + +RUN addgroup --system --gid 1001 nodejs +RUN adduser --system --uid 1001 nextjs + +COPY --from=builder /app/public ./public + +# Set the correct permission for prerender cache +RUN mkdir .next +RUN chown nextjs:nodejs .next + +# Automatically leverage output traces to reduce image size +# https://nextjs.org/docs/advanced-features/output-file-tracing +COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./ +COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static + +USER nextjs + +EXPOSE 3000 + +ENV PORT=3000 +# server.js is created by next build from the standalone output +# https://nextjs.org/docs/pages/api-reference/config/next-config-js/output +ENV HOSTNAME="0.0.0.0" +CMD ["node", "server.js"] diff --git a/scripts/next/docker/compose.yaml b/scripts/next/docker/compose.yaml new file mode 100644 index 0000000..d5306df --- /dev/null +++ b/scripts/next/docker/compose.yaml @@ -0,0 +1,16 @@ +services: + next-template: + build: + context: ../../.. + dockerfile: scripts/next/docker/Dockerfile + image: nextjs + container_name: next-template + networks: + - next-template + ports: + - '3000:3000' + tty: true + restart: unless-stopped +networks: + next-template: + external: true diff --git a/scripts/next/update_container b/scripts/next/update_container new file mode 100755 index 0000000..8a2cf35 --- /dev/null +++ b/scripts/next/update_container @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +git pull +mv ./next.config.js ./scripts/next/config/next.config.default.js +cp ./scripts/next/config/next.config.build.js ./next.config.js +sudo docker compose -f ./scripts/next/docker/compose.yaml down +sudo docker compose -f ./scripts/next/docker/compose.yaml build +sudo docker compose -f ./scripts/next/docker/compose.yaml up -d +cp ./scripts/next/config/next.config.default.js ./next.config.js diff --git a/scripts/supabase/db/schema.sql b/scripts/supabase/db/schema.sql new file mode 100644 index 0000000..d67a532 --- /dev/null +++ b/scripts/supabase/db/schema.sql @@ -0,0 +1,126 @@ +-- Create a table for public profiles +create table profiles ( + id uuid references auth.users on delete cascade not null primary key, + updated_at timestamp with time zone, + email text unique, + full_name text, + avatar_url text, + provider text, + + constraint full_name_length check (char_length(full_name) >= 3 and char_length(full_name) <= 50) +); +-- Set up Row Level Security (RLS) +-- See https://supabase.com/docs/guides/auth/row-level-security for more details. +alter table profiles + enable row level security; + +create policy "Public profiles are viewable by everyone." on profiles + for select using (true); + +create policy "Users can insert their own profile." on profiles + for insert with check ((select auth.uid()) = id); + +create policy "Users can update own profile." on profiles + for update using ((select auth.uid()) = id); + +-- This trigger automatically creates a profile entry when a new user signs up via Supabase Auth. +-- See https://supabase.com/docs/guides/auth/managing-user-data#using-triggers for more details. +create function public.handle_new_user() +returns trigger +set search_path = '' +as $$ +begin + insert into public.profiles (id, email, full_name, avatar_url, provider, updated_at) + values ( + new.id, + new.email, + new.raw_user_meta_data->>'full_name', + new.raw_user_meta_data->>'avatar_url', + new.raw_user_meta_data->>'provider', + now() + ); + return new; +end; +$$ language plpgsql security definer; +create trigger on_auth_user_created + after insert on auth.users + for each row execute procedure public.handle_new_user(); + +-- Set up Storage! +insert into storage.buckets (id, name) + values ('avatars', 'avatars'); + +-- Set up access controls for storage. +-- See https://supabase.com/docs/guides/storage#policy-examples for more details. +create policy "Avatar images are publicly accessible." on storage.objects + for select using (bucket_id = 'avatars'); + +create policy "Anyone can upload an avatar." on storage.objects + for insert with check (bucket_id = 'avatars'); + +create policy "Anyone can update an avatar." on storage.objects + for update using (bucket_id = 'avatars'); + +create policy "Anyone can delete an avatar." on storage.objects + for delete using (bucket_id = 'avatars'); + +-- Create a table for public statuses +CREATE TABLE statuses ( + id uuid DEFAULT gen_random_uuid() PRIMARY KEY, + user_id uuid REFERENCES public.profiles ON DELETE CASCADE NOT NULL, + updated_by_id uuid REFERENCES public.profiles ON DELETE SET NULL DEFAULT auth.uid(), + created_at timestamp with time zone DEFAULT now() NOT NULL, + status text NOT NULL, + CONSTRAINT status_length CHECK (char_length(status) >= 3 AND char_length(status) <= 80) +); + +-- Set up Row Level Security (RLS) +ALTER TABLE statuses + ENABLE ROW LEVEL SECURITY; + +-- Policies +CREATE POLICY "Public statuses are viewable by everyone." ON statuses + FOR SELECT USING (true); + +-- RECREATE it using the recommended sub-select form +CREATE POLICY "Authenticated users can insert statuses for any user." + ON public.statuses + FOR INSERT + WITH CHECK ( + (SELECT auth.role()) = 'authenticated' + ); + +-- ADD an UPDATE policy so anyone signed-in can update *any* status +CREATE POLICY "Authenticated users can update statuses for any user." + ON public.statuses + FOR UPDATE + USING ( + (SELECT auth.role()) = 'authenticated' + ) + WITH CHECK ( + (SELECT auth.role()) = 'authenticated' + ); + +-- Function to add first status +CREATE FUNCTION public.handle_first_status() +RETURNS TRIGGER +SET search_path = '' +AS $$ +BEGIN + INSERT INTO public.statuses (user_id, updated_by_id, status) + VALUES ( + NEW.id, + NEW.id, + 'Just joined!' + ); + RETURN NEW; +END; +$$ LANGUAGE plpgsql SECURITY DEFINER; + +-- Create a separate trigger for the status +CREATE TRIGGER on_auth_user_created_add_status + AFTER INSERT ON auth.users + FOR EACH ROW EXECUTE PROCEDURE public.handle_first_status(); + +alter publication supabase_realtime add table profiles; +alter publication supabase_realtime add table statuses; diff --git a/scripts/supabase/docker/.env.example b/scripts/supabase/docker/.env.example new file mode 100644 index 0000000..96e8034 --- /dev/null +++ b/scripts/supabase/docker/.env.example @@ -0,0 +1,158 @@ +############ +# Secrets +# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION +############ + +POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password +JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long +ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE +SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q +DASHBOARD_USERNAME=gib +DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated +SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq +VAULT_ENC_KEY=your-encryption-key-32-chars-min + + +############ +# Database - You can change these to any PostgreSQL database that has logical replication enabled. +############ + +POSTGRES_HOST=db +POSTGRES_DB=postgres +POSTGRES_PORT=5432 +# default user is postgres + + +############ +# Supavisor -- Database pooler +############ +POOLER_PROXY_PORT_TRANSACTION=6543 +POOLER_DEFAULT_POOL_SIZE=20 +POOLER_MAX_CLIENT_CONN=100 +POOLER_TENANT_ID=your-tenant-id # Change me + + +############ +# API Proxy - Configuration for the Kong Reverse proxy. +############ + +KONG_HTTP_PORT=8000 +KONG_HTTPS_PORT=8443 + + +############ +# API - Configuration for PostgREST. +############ + +PGRST_DB_SCHEMAS=public,storage,graphql_public + + +############ +# Auth - Configuration for the GoTrue authentication server. +############ + +## General +SITE_URL=http://localhost:3000 # Change to URL of site used for email links/auth flows +ADDITIONAL_REDIRECT_URLS= # Change to include any redirect URIs needed +JWT_EXPIRY=3600 +DISABLE_SIGNUP=false +API_EXTERNAL_URL=http://localhost:8000 # Should be the same as the SITE URL usually. + +## Mailer Config +MAILER_URLPATHS_CONFIRMATION="/auth/callback" +MAILER_URLPATHS_INVITE="/auth/callback" +MAILER_URLPATHS_RECOVERY="/auth/callback" +MAILER_URLPATHS_EMAIL_CHANGE="/auth/callback" + +## Email auth +ENABLE_EMAIL_SIGNUP=true +ENABLE_EMAIL_AUTOCONFIRM=false +SMTP_ADMIN_EMAIL=admin@example.com +SMTP_HOST=supabase-mail +SMTP_PORT=2500 +SMTP_USER=fake_mail_user +SMTP_PASS=fake_mail_password +SMTP_SENDER_NAME=fake_sender +ENABLE_ANONYMOUS_USERS=false + + +MAILER_TEMPLATES_INVITE="https://git.gbrown.org/gib/tech-tracker-next/raw/branch/main/src/server/mail_templates/invite_user.html" +MAILER_TEMPLATES_CONFIRMATION="https://git.gbrown.org/gib/tech-tracker-next/raw/branch/main/src/server/mail_templates/confirm_signup.html" +MAILER_TEMPLATES_RECOVERY="https://git.gbrown.org/gib/tech-tracker-next/raw/branch/main/src/server/mail_templates/reset_password.html" +MAILER_TEMPLATES_MAGIC_LINK="https://git.gbrown.org/gib/tech-tracker-next/raw/branch/main/src/server/mail_templates/magic_link.html" +MAILER_TEMPLATES_EMAIL_CHANGE="https://git.gbrown.org/gib/tech-tracker-next/raw/branch/main/src/server/mail_templates/change_email_address.html" + +MAILER_SUBJECTS_INVITE="You've Been Invited!" +MAILER_SUBJECTS_CONFIRMATION="Confirm Your Email" +MAILER_SUBJECTS_RECOVERY="Reset Password" +MAILER_SUBJECTS_MAGIC_LINK="Magic Sign In Link" +MAILER_SUBJECTS_EMAIL_CHANGE="Change Email Address" + + +## Phone auth +ENABLE_PHONE_SIGNUP=false +ENABLE_PHONE_AUTOCONFIRM=false + + +# Apple Auth +APPLE_ENABLED=true +APPLE_CLIENT_ID= +APPLE_SECRET= +APPLE_REDIRECT_URI= +APPLE_TEAM_ID= +APPLE_KEY_ID= + +# Azure Auth +AZURE_ENABLED=true +AZURE_CLIENT_ID= +AZURE_SECRET= +AZURE_REDIRECT_URI= +AZURE_TENANT_ID= +AZURE_TENANT_URL= + +# Gib's Auth (Trying to set up Authentik) +#SAML_ENABLED=false +#SAML_PRIVATE_KEY= + + +############ +# Studio - Configuration for the Dashboard +############ + +STUDIO_DEFAULT_ORGANIZATION=gbrown +STUDIO_DEFAULT_PROJECT=Default Project + +STUDIO_PORT=3000 +# replace if you intend to use Studio outside of localhost +SUPABASE_PUBLIC_URL=https://localhost:8000 # Change to URL for this supabase instance + +# Enable webp support +IMGPROXY_ENABLE_WEBP_DETECTION=true + +# Add your OpenAI API key to enable SQL Editor Assistant +OPENAI_API_KEY= + + +############ +# Functions - Configuration for Functions +############ +# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet. +FUNCTIONS_VERIFY_JWT=false + + +############ +# Logs - Configuration for Logflare +# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction +############ + +LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key + +# Change vector.toml sinks to reflect this change +LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key + +# Docker socket location - this value will differ depending on your OS +DOCKER_SOCKET_LOCATION=/var/run/docker.sock + +# Google Cloud Project details +#GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID +#GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER diff --git a/scripts/supabase/docker/docker-compose.dev.yml b/scripts/supabase/docker/docker-compose.dev.yml new file mode 100644 index 0000000..85cb604 --- /dev/null +++ b/scripts/supabase/docker/docker-compose.dev.yml @@ -0,0 +1,41 @@ +networks: + supabase-network: + name: supabase-network + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 +services: + studio: + build: + context: . + dockerfile: studio/Dockerfile + target: dev + networks: [supabase-network] + ports: + - 8082:8082 + mail: + container_name: supabase-mail + image: inbucket/inbucket:3.0.3 + networks: [supabase-network] + ports: + - '2500:2500' # SMTP + - '9000:9000' # web interface + - '1100:1100' # POP3 + auth: + environment: + - GOTRUE_SMTP_USER= + - GOTRUE_SMTP_PASS= + meta: + ports: + - 5555:8080 + db: + restart: 'no' + volumes: + # Always use a fresh database when developing + - /var/lib/postgresql/data + # Seed data should be inserted last (alphabetical order) + - ../db/schema.sql:/docker-entrypoint-initdb.d/seed.sql + storage: + volumes: + - /var/lib/storage diff --git a/scripts/supabase/docker/docker-compose.s3.yml b/scripts/supabase/docker/docker-compose.s3.yml new file mode 100644 index 0000000..18c7866 --- /dev/null +++ b/scripts/supabase/docker/docker-compose.s3.yml @@ -0,0 +1,105 @@ + +networks: + supabase-network: + name: supabase-network + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 +services: + minio: + image: minio/minio + networks: [supabase-network] + ports: + - '9000:9000' + - '9001:9001' + environment: + MINIO_ROOT_USER: supa-storage + MINIO_ROOT_PASSWORD: secret1234 + command: server --console-address ":9001" /data + healthcheck: + test: [ "CMD", "curl", "-f", "http://minio:9000/minio/health/live" ] + interval: 2s + timeout: 10s + retries: 5 + volumes: + - ./volumes/storage:/data:z + + minio-createbucket: + image: minio/mc + networks: [supabase-network] + depends_on: + minio: + condition: service_healthy + entrypoint: > + /bin/sh -c " + /usr/bin/mc alias set supa-minio http://minio:9000 supa-storage secret1234; + /usr/bin/mc mb supa-minio/stub; + exit 0; + " + + storage: + container_name: supabase-storage + image: supabase/storage-api:v1.11.13 + networks: [supabase-network] + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + minio: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: s3 + GLOBAL_S3_BUCKET: stub + GLOBAL_S3_ENDPOINT: http://minio:9000 + GLOBAL_S3_PROTOCOL: http + GLOBAL_S3_FORCE_PATH_STYLE: true + AWS_ACCESS_KEY_ID: supa-storage + AWS_SECRET_ACCESS_KEY: secret1234 + AWS_DEFAULT_REGION: stub + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + volumes: + - ./volumes/storage:/var/lib/storage:z + + imgproxy: + container_name: supabase-imgproxy + image: darthsim/imgproxy:v3.8.0 + networks: [supabase-network] + healthcheck: + test: [ "CMD", "imgproxy", "health" ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} diff --git a/scripts/supabase/docker/docker-compose.yml b/scripts/supabase/docker/docker-compose.yml new file mode 100644 index 0000000..2136295 --- /dev/null +++ b/scripts/supabase/docker/docker-compose.yml @@ -0,0 +1,579 @@ +# Usage +# Start: docker compose up +# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up +# Stop: docker compose down +# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans +# Reset everything: ./reset.sh + +name: techtracker + +networks: + techtracker: + name: techtracker + driver: bridge + ipam: + config: + - subnet: 172.19.0.0/16 + +services: + + studio: + container_name: supabase-studio + image: supabase/studio:2025.05.19-sha-3487831 + networks: [techtracker] + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})" + ] + timeout: 10s + interval: 5s + retries: 3 + depends_on: + analytics: + condition: service_healthy + environment: + STUDIO_PG_META_URL: http://meta:8080 + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} + DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} + OPENAI_API_KEY: ${OPENAI_API_KEY:-} + + SUPABASE_URL: http://kong:8000 + SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + AUTH_JWT_SECRET: ${JWT_SECRET} + + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_URL: http://analytics:4000 + NEXT_PUBLIC_ENABLE_LOGS: true + # Comment to use Big Query backend for analytics + NEXT_ANALYTICS_BACKEND_PROVIDER: postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery + + kong: + container_name: supabase-kong + image: kong:2.8.1 + networks: [techtracker] + restart: unless-stopped + ports: + - ${KONG_HTTP_PORT}:8000/tcp + - ${KONG_HTTPS_PORT}:8443/tcp + volumes: + # https://github.com/supabase/supabase/issues/12661 + - ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z + depends_on: + analytics: + condition: service_healthy + environment: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml + # https://github.com/supabase/cli/issues/14 + KONG_DNS_ORDER: LAST,A,CNAME + KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth + KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k + KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} + DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} + # https://unix.stackexchange.com/a/294837 + entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' + + auth: + container_name: supabase-auth + image: supabase/gotrue:v2.172.1 + networks: [techtracker] + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:9999/health" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + GOTRUE_API_HOST: 0.0.0.0 + GOTRUE_API_PORT: 9999 + API_EXTERNAL_URL: ${API_EXTERNAL_URL} + + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + + GOTRUE_SITE_URL: ${SITE_URL} + GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} + GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} + + GOTRUE_JWT_ADMIN_ROLES: service_role + GOTRUE_JWT_AUD: authenticated + GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated + GOTRUE_JWT_EXP: ${JWT_EXPIRY} + GOTRUE_JWT_SECRET: ${JWT_SECRET} + + GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} + GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} + GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} + + # Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile. + # GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true + + # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true + # GOTRUE_SMTP_MAX_FREQUENCY: 1s + GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} + GOTRUE_SMTP_HOST: ${SMTP_HOST} + GOTRUE_SMTP_PORT: ${SMTP_PORT} + GOTRUE_SMTP_USER: ${SMTP_USER} + GOTRUE_SMTP_PASS: ${SMTP_PASS} + GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} + GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} + GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} + GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} + GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} + + GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} + GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + + GOTRUE_MAILER_TEMPLATES_INVITE: ${MAILER_TEMPLATES_INVITE} + GOTRUE_MAILER_TEMPLATES_CONFIRMATION: ${MAILER_TEMPLATES_CONFIRMATION} + GOTRUE_MAILER_TEMPLATES_RECOVERY: ${MAILER_TEMPLATES_RECOVERY} + GOTRUE_MAILER_TEMPLATES_MAGIC_LINK: ${MAILER_TEMPLATES_MAGIC_LINK} + GOTRUE_MAILER_TEMPLATES_EMAIL_CHANGE: ${MAILER_TEMPLATES_EMAIL_CHANGE} + + GOTRUE_MAILER_SUBJECTS_CONFIRMATION: ${MAILER_SUBJECTS_CONFIRMATION} + GOTRUE_MAILER_SUBJECTS_RECOVERY: ${MAILER_SUBJECTS_RECOVERY} + GOTRUE_MAILER_SUBJECTS_MAGIC_LINK: ${MAILER_SUBJECTS_MAGIC_LINK} + GOTRUE_MAILER_SUBJECTS_EMAIL_CHANGE: ${MAILER_SUBJECTS_EMAIL_CHANGE} + GOTRUE_MAILER_SUBJECTS_INVITE: ${MAILER_SUBJECTS_INVITE} + + GOTRUE_EXTERNAL_APPLE_ENABLED: ${APPLE_ENABLED} + GOTRUE_EXTERNAL_APPLE_CLIENT_ID: ${APPLE_CLIENT_ID} + GOTRUE_EXTERNAL_APPLE_SECRET: ${APPLE_SECRET} + GOTRUE_EXTERNAL_APPLE_REDIRECT_URI: ${APPLE_REDIRECT_URI} + + GOTRUE_EXTERNAL_AZURE_ENABLED: ${AZURE_ENABLED} + GOTRUE_EXTERNAL_AZURE_CLIENT_ID: ${AZURE_CLIENT_ID} + GOTRUE_EXTERNAL_AZURE_SECRET: ${AZURE_SECRET} + GOTRUE_EXTERNAL_AZURE_TENANT_ID: ${AZURE_TENANT_ID} + GOTRUE_EXTERNAL_AZURE_URL: ${AZURE_TENANT_URL} + GOTRUE_EXTERNAL_AZURE_REDIRECT_URI: ${AZURE_REDIRECT_URI} + + # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook + + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "" + + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt" + + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true" + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt" + + # GOTRUE_HOOK_SEND_SMS_ENABLED: "false" + # GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook" + # GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + # GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false" + # GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender" + # GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" + + rest: + container_name: supabase-rest + image: postgrest/postgrest:v12.2.12 + networks: [techtracker] + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} + PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgrest" + ] + + realtime: + # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain + container_name: realtime-dev.supabase-realtime + image: supabase/realtime:v2.34.47 + networks: [techtracker] + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "-H", + "Authorization: Bearer ${ANON_KEY}", + "http://localhost:4000/api/tenants/realtime-dev/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + PORT: 4000 + DB_HOST: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_USER: supabase_admin + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_NAME: ${POSTGRES_DB} + DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' + DB_ENC_KEY: supabaserealtime + API_JWT_SECRET: ${JWT_SECRET} + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + ERL_AFLAGS: -proto_dist inet_tcp + DNS_NODES: "''" + RLIMIT_NOFILE: "10000" + APP_NAME: realtime + SEED_SELF_HOST: true + RUN_JANITOR: true + + # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up + storage: + container_name: supabase-storage + image: supabase/storage-api:v1.22.17 + networks: [techtracker] + restart: unless-stopped + volumes: + - ./volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://storage:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: file + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + GLOBAL_S3_BUCKET: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + + imgproxy: + container_name: supabase-imgproxy + image: darthsim/imgproxy:v3.8.0 + networks: [techtracker] + restart: unless-stopped + volumes: + - ./volumes/storage:/var/lib/storage:z + healthcheck: + test: + [ + "CMD", + "imgproxy", + "health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_LOCAL_FILESYSTEM_ROOT: / + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} + + meta: + container_name: supabase-meta + image: supabase/postgres-meta:v0.89.0 + networks: [techtracker] + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + environment: + PG_META_PORT: 8080 + PG_META_DB_HOST: ${POSTGRES_HOST} + PG_META_DB_PORT: ${POSTGRES_PORT} + PG_META_DB_NAME: ${POSTGRES_DB} + PG_META_DB_USER: supabase_admin + PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} + + functions: + container_name: supabase-edge-functions + image: supabase/edge-runtime:v1.67.4 + networks: [techtracker] + restart: unless-stopped + volumes: + - ./volumes/functions:/home/deno/functions:Z + depends_on: + analytics: + condition: service_healthy + environment: + JWT_SECRET: ${JWT_SECRET} + SUPABASE_URL: http://kong:8000 + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} + SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 + VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" + command: + [ + "start", + "--main-service", + "/home/deno/functions/main" + ] + + analytics: + container_name: supabase-analytics + image: supabase/logflare:1.12.0 + networks: [techtracker] + restart: unless-stopped + ports: + - 4000:4000 + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + healthcheck: + test: + [ + "CMD", + "curl", + "http://localhost:4000/health" + ] + timeout: 5s + interval: 5s + retries: 10 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + environment: + LOGFLARE_NODE_HOST: 127.0.0.1 + DB_USERNAME: supabase_admin + DB_DATABASE: _supabase + DB_HOSTNAME: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_SCHEMA: _analytics + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_SINGLE_TENANT: true + LOGFLARE_SUPABASE_MODE: true + LOGFLARE_MIN_CLUSTER_SIZE: 1 + + # Comment variables to use Big Query backend for analytics + POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase + POSTGRES_BACKEND_SCHEMA: _analytics + LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} + + # Comment out everything below this point if you are using an external Postgres database + db: + container_name: supabase-db + image: supabase/postgres:15.8.1.060 + networks: [techtracker] + ports: + - ${POSTGRES_PORT}:${POSTGRES_PORT} + restart: unless-stopped + volumes: + - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z + # Must be superuser to create event trigger + - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z + # Must be superuser to alter reserved role + - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z + # Initialize the database settings with JWT_SECRET and JWT_EXP + - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z + # PGDATA directory is persisted between restarts + - ./volumes/db/data:/var/lib/postgresql/data:Z + # Changes required for internal supabase data such as _analytics + - ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z + # Changes required for Analytics support + - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z + # Changes required for Pooler support + - ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z + # Initial SQL that should run + - ../db/schema.sql:/docker-entrypoint-initdb.d/seed.sql + # Use named volume to persist pgsodium decryption key between restarts + - db-config:/etc/postgresql-custom + healthcheck: + test: + [ + "CMD", + "pg_isready", + "-U", + "postgres", + "-h", + "localhost" + ] + interval: 5s + timeout: 5s + retries: 10 + depends_on: + vector: + condition: service_healthy + environment: + POSTGRES_HOST: /var/run/postgresql + PGPORT: ${POSTGRES_PORT} + POSTGRES_PORT: ${POSTGRES_PORT} + PGPASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PGDATABASE: ${POSTGRES_DB} + POSTGRES_DB: ${POSTGRES_DB} + JWT_SECRET: ${JWT_SECRET} + JWT_EXP: ${JWT_EXPIRY} + command: + [ + "postgres", + "-c", + "config_file=/etc/postgresql/postgresql.conf", + "-c", + "log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs + ] + + vector: + container_name: supabase-vector + image: timberio/vector:0.28.1-alpine + networks: [techtracker] + restart: unless-stopped + volumes: + - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z + - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://vector:9001/health" + ] + timeout: 5s + interval: 5s + retries: 3 + environment: + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + command: + [ + "--config", + "/etc/vector/vector.yml" + ] + security_opt: + - "label=disable" + + # Update the DATABASE_URL if you are using an external Postgres database + supavisor: + container_name: supabase-pooler + image: supabase/supavisor:2.5.1 + networks: [techtracker] + restart: unless-stopped + ports: + #- ${POSTGRES_PORT}:5432 + - ${POOLER_PROXY_PORT_TRANSACTION}:6543 + volumes: + - ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "http://127.0.0.1:4000/api/health" + ] + interval: 10s + timeout: 5s + retries: 5 + depends_on: + db: + condition: service_healthy + analytics: + condition: service_healthy + environment: + PORT: 4000 + POSTGRES_PORT: ${POSTGRES_PORT} + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/_supabase + CLUSTER_POSTGRES: true + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + VAULT_ENC_KEY: ${VAULT_ENC_KEY} + API_JWT_SECRET: ${JWT_SECRET} + METRICS_JWT_SECRET: ${JWT_SECRET} + REGION: local + ERL_AFLAGS: -proto_dist inet_tcp + POOLER_TENANT_ID: ${POOLER_TENANT_ID} + POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} + POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} + POOLER_POOL_MODE: transaction + command: + [ + "/bin/sh", + "-c", + "/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server" + ] + +volumes: + db-config: + name: techtracker-config diff --git a/scripts/supabase/docker/volumes/api/kong.yml b/scripts/supabase/docker/volumes/api/kong.yml new file mode 100644 index 0000000..7abf425 --- /dev/null +++ b/scripts/supabase/docker/volumes/api/kong.yml @@ -0,0 +1,241 @@ +_format_version: '2.1' +_transform: true + +### +### Consumers / Users +### +consumers: + - username: DASHBOARD + - username: anon + keyauth_credentials: + - key: $SUPABASE_ANON_KEY + - username: service_role + keyauth_credentials: + - key: $SUPABASE_SERVICE_KEY + +### +### Access Control List +### +acls: + - consumer: anon + group: anon + - consumer: service_role + group: admin + +### +### Dashboard credentials +### +basicauth_credentials: + - consumer: DASHBOARD + username: $DASHBOARD_USERNAME + password: $DASHBOARD_PASSWORD + +### +### API Routes +### +services: + ## Open Auth routes + - name: auth-v1-open + url: http://auth:9999/verify + routes: + - name: auth-v1-open + strip_path: true + paths: + - /auth/v1/verify + plugins: + - name: cors + - name: auth-v1-open-callback + url: http://auth:9999/callback + routes: + - name: auth-v1-open-callback + strip_path: true + paths: + - /auth/v1/callback + plugins: + - name: cors + - name: auth-v1-open-authorize + url: http://auth:9999/authorize + routes: + - name: auth-v1-open-authorize + strip_path: true + paths: + - /auth/v1/authorize + plugins: + - name: cors + + ## Secure Auth routes + - name: auth-v1 + _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*' + url: http://auth:9999/ + routes: + - name: auth-v1-all + strip_path: true + paths: + - /auth/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure REST routes + - name: rest-v1 + _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*' + url: http://rest:3000/ + routes: + - name: rest-v1-all + strip_path: true + paths: + - /rest/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure GraphQL routes + - name: graphql-v1 + _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql' + url: http://rest:3000/rpc/graphql + routes: + - name: graphql-v1-all + strip_path: true + paths: + - /graphql/v1 + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: request-transformer + config: + add: + headers: + - Content-Profile:graphql_public + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure Realtime routes + - name: realtime-v1-ws + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/socket + protocol: ws + routes: + - name: realtime-v1-ws + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + - name: realtime-v1-rest + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/api + protocol: http + routes: + - name: realtime-v1-rest + strip_path: true + paths: + - /realtime/v1/api + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + ## Storage routes: the storage server manages its own auth + - name: storage-v1 + _comment: 'Storage: /storage/v1/* -> http://storage:5000/*' + url: http://storage:5000/ + routes: + - name: storage-v1-all + strip_path: true + paths: + - /storage/v1/ + plugins: + - name: cors + + ## Edge Functions routes + - name: functions-v1 + _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*' + url: http://functions:9000/ + routes: + - name: functions-v1-all + strip_path: true + paths: + - /functions/v1/ + plugins: + - name: cors + + ## Analytics routes + - name: analytics-v1 + _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*' + url: http://analytics:4000/ + routes: + - name: analytics-v1-all + strip_path: true + paths: + - /analytics/v1/ + + ## Secure Database routes + - name: meta + _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*' + url: http://meta:8080/ + routes: + - name: meta-all + strip_path: true + paths: + - /pg/ + plugins: + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + + ## Protected Dashboard - catch all remaining routes + - name: dashboard + _comment: 'Studio: /* -> http://studio:3000/*' + url: http://studio:3000/ + routes: + - name: dashboard-all + strip_path: true + paths: + - / + plugins: + - name: cors + - name: basic-auth + config: + hide_credentials: true diff --git a/scripts/supabase/docker/volumes/db/_supabase.sql b/scripts/supabase/docker/volumes/db/_supabase.sql new file mode 100644 index 0000000..6236ae1 --- /dev/null +++ b/scripts/supabase/docker/volumes/db/_supabase.sql @@ -0,0 +1,3 @@ +\set pguser `echo "$POSTGRES_USER"` + +CREATE DATABASE _supabase WITH OWNER :pguser; diff --git a/scripts/supabase/docker/volumes/db/jwt.sql b/scripts/supabase/docker/volumes/db/jwt.sql new file mode 100644 index 0000000..cfd3b16 --- /dev/null +++ b/scripts/supabase/docker/volumes/db/jwt.sql @@ -0,0 +1,5 @@ +\set jwt_secret `echo "$JWT_SECRET"` +\set jwt_exp `echo "$JWT_EXP"` + +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; diff --git a/scripts/supabase/docker/volumes/db/logs.sql b/scripts/supabase/docker/volumes/db/logs.sql new file mode 100644 index 0000000..255c0f4 --- /dev/null +++ b/scripts/supabase/docker/volumes/db/logs.sql @@ -0,0 +1,6 @@ +\set pguser `echo "$POSTGRES_USER"` + +\c _supabase +create schema if not exists _analytics; +alter schema _analytics owner to :pguser; +\c postgres diff --git a/scripts/supabase/docker/volumes/db/pooler.sql b/scripts/supabase/docker/volumes/db/pooler.sql new file mode 100644 index 0000000..162c5b9 --- /dev/null +++ b/scripts/supabase/docker/volumes/db/pooler.sql @@ -0,0 +1,6 @@ +\set pguser `echo "$POSTGRES_USER"` + +\c _supabase +create schema if not exists _supavisor; +alter schema _supavisor owner to :pguser; +\c postgres diff --git a/scripts/supabase/docker/volumes/db/realtime.sql b/scripts/supabase/docker/volumes/db/realtime.sql new file mode 100644 index 0000000..4d4b9ff --- /dev/null +++ b/scripts/supabase/docker/volumes/db/realtime.sql @@ -0,0 +1,4 @@ +\set pguser `echo "$POSTGRES_USER"` + +create schema if not exists _realtime; +alter schema _realtime owner to :pguser; diff --git a/scripts/supabase/docker/volumes/db/roles.sql b/scripts/supabase/docker/volumes/db/roles.sql new file mode 100644 index 0000000..8f7161a --- /dev/null +++ b/scripts/supabase/docker/volumes/db/roles.sql @@ -0,0 +1,8 @@ +-- NOTE: change to your own passwords for production environments +\set pgpass `echo "$POSTGRES_PASSWORD"` + +ALTER USER authenticator WITH PASSWORD :'pgpass'; +ALTER USER pgbouncer WITH PASSWORD :'pgpass'; +ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; diff --git a/scripts/supabase/docker/volumes/db/webhooks.sql b/scripts/supabase/docker/volumes/db/webhooks.sql new file mode 100644 index 0000000..5837b86 --- /dev/null +++ b/scripts/supabase/docker/volumes/db/webhooks.sql @@ -0,0 +1,208 @@ +BEGIN; + -- Create pg_net extension + CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + -- Create supabase_functions schema + CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + -- supabase_functions.migrations definition + CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() + ); + -- Initial supabase_functions migration + INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + -- supabase_functions.hooks definition + CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint + ); + CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); + CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); + COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END + $function$; + -- Supabase super admin + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + END + $$; + GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; + ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; + ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; + ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; + ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; + GRANT supabase_functions_admin TO postgres; + -- Remove unused supabase_pg_net_admin role + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; + END + $$; + -- pg_net grants when extension is already enabled + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END + $$; + -- Event trigger for pg_net + CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END; + $$; + COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; + END + $$; + INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + ALTER function supabase_functions.http_request() SECURITY DEFINER; + ALTER function supabase_functions.http_request() SET search_path = supabase_functions; + REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; + GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; +COMMIT; diff --git a/scripts/supabase/docker/volumes/functions/hello/index.ts b/scripts/supabase/docker/volumes/functions/hello/index.ts new file mode 100644 index 0000000..7ae5cc1 --- /dev/null +++ b/scripts/supabase/docker/volumes/functions/hello/index.ts @@ -0,0 +1,15 @@ +// Follow this setup guide to integrate the Deno language server with your editor: +// https://deno.land/manual/getting_started/setup_your_environment +// This enables autocomplete, go to definition, etc. + +import { serve } from 'https://deno.land/std@0.177.1/http/server.ts'; + +serve(async () => { + return new Response(`"Hello from Edge Functions!"`, { + headers: { 'Content-Type': 'application/json' }, + }); +}); + +// To invoke: +// curl 'http://localhost:/functions/v1/hello' \ +// --header 'Authorization: Bearer ' diff --git a/scripts/supabase/docker/volumes/functions/main/index.ts b/scripts/supabase/docker/volumes/functions/main/index.ts new file mode 100644 index 0000000..291c1e0 --- /dev/null +++ b/scripts/supabase/docker/volumes/functions/main/index.ts @@ -0,0 +1,94 @@ +import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'; +import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'; + +console.log('main function started'); + +const JWT_SECRET = Deno.env.get('JWT_SECRET'); +const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'; + +function getAuthToken(req: Request) { + const authHeader = req.headers.get('authorization'); + if (!authHeader) { + throw new Error('Missing authorization header'); + } + const [bearer, token] = authHeader.split(' '); + if (bearer !== 'Bearer') { + throw new Error(`Auth header is not 'Bearer {token}'`); + } + return token; +} + +async function verifyJWT(jwt: string): Promise { + const encoder = new TextEncoder(); + const secretKey = encoder.encode(JWT_SECRET); + try { + await jose.jwtVerify(jwt, secretKey); + } catch (err) { + console.error(err); + return false; + } + return true; +} + +serve(async (req: Request) => { + if (req.method !== 'OPTIONS' && VERIFY_JWT) { + try { + const token = getAuthToken(req); + const isValidJWT = await verifyJWT(token); + + if (!isValidJWT) { + return new Response(JSON.stringify({ msg: 'Invalid JWT' }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }); + } + } catch (e) { + console.error(e); + return new Response(JSON.stringify({ msg: e.toString() }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }); + } + } + + const url = new URL(req.url); + const { pathname } = url; + const path_parts = pathname.split('/'); + const service_name = path_parts[1]; + + if (!service_name || service_name === '') { + const error = { msg: 'missing function name in request' }; + return new Response(JSON.stringify(error), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }); + } + + const servicePath = `/home/deno/functions/${service_name}`; + console.error(`serving the request with ${servicePath}`); + + const memoryLimitMb = 150; + const workerTimeoutMs = 1 * 60 * 1000; + const noModuleCache = false; + const importMapPath = null; + const envVarsObj = Deno.env.toObject(); + const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]]); + + try { + const worker = await EdgeRuntime.userWorkers.create({ + servicePath, + memoryLimitMb, + workerTimeoutMs, + noModuleCache, + importMapPath, + envVars, + }); + return await worker.fetch(req); + } catch (e) { + const error = { msg: e.toString() }; + return new Response(JSON.stringify(error), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }); + } +}); diff --git a/scripts/supabase/docker/volumes/logs/vector.yml b/scripts/supabase/docker/volumes/logs/vector.yml new file mode 100644 index 0000000..cce46df --- /dev/null +++ b/scripts/supabase/docker/volumes/logs/vector.yml @@ -0,0 +1,232 @@ +api: + enabled: true + address: 0.0.0.0:9001 + +sources: + docker_host: + type: docker_logs + exclude_containers: + - supabase-vector + +transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = del(.container_name) + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "supabase-kong"' + auth: '.appname == "supabase-auth"' + rest: '.appname == "supabase-rest"' + realtime: '.appname == "supabase-realtime"' + storage: '.appname == "supabase-storage"' + functions: '.appname == "supabase-functions"' + db: '.appname == "supabase-db"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P