diff --git a/docusaurus.config.js b/docusaurus.config.js index 6f90a3619..b497e041a 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -1,219 +1,211 @@ -import { themes } from "prism-react-renderer"; -const path = require("path"); +// @ts-check +// `@type` JSDoc annotations allow editor autocompletion and type checking +// (when paired with `@ts-check`). +// There are various equivalent ways to declare your Docusaurus config. +// See: https://docusaurus.io/docs/api/docusaurus-config + +import {themes as prismThemes} from 'prism-react-renderer'; +import math from 'remark-math'; +import katex from 'rehype-katex'; +import remarkYamlToTable from 'remark-yaml-to-table'; +import remarkCodeImport from 'remark-code-import'; +import tabBlocks from 'docusaurus-remark-plugin-tab-blocks'; import 'dotenv/config'; -import platform_enterprise_latest_version from "./platform-enterprise_latest_version.js"; +// This runs in Node.js - Don't use client-side code here (browser APIs, JSX...) -export default async function createConfigAsync() { - return { - title: "Seqera Docs", - tagline: "Documentation for Seqera products", - favicon: "img/favicon--dynamic.svg", +/** @type {import('@docusaurus/types').Config} */ +const config = { + title: "Seqera Docs", + tagline: "Documentation for Seqera products", + favicon: "img/favicon--dynamic.svg", - // Set the production url of your site here - url: "https://docs.seqera.io", - // Set the // pathname under which your site is served - // For GitHub pages deployment, it is often '//' - baseUrl: "/", - trailingSlash: false, + // Set the production url of your site here + url: "https://docs.seqera.io", + // Set the // pathname under which your site is served + baseUrl: '/', + trailingSlash: false, - // GitHub pages deployment config. - // If you aren't using GitHub pages, you don't need these. - organizationName: "seqeralabs", // Usually your GitHub org/user name. - projectName: "docs", // Usually your repo name. + // GitHub pages deployment config. + organizationName: "seqeralabs", + projectName: "docs", - onBrokenLinks: "warn", - onBrokenMarkdownLinks: "warn", + onBrokenLinks: 'warn', + onBrokenMarkdownLinks: 'warn', - customFields: { - // Put your custom environment here - algolia: { - appId: process.env.PUBLIC_DOCUSAURUS_ALGOLIA_APP_ID, - apiKey: process.env.PUBLIC_DOCUSAURUS_ALGOLIA_API_KEY, - indexName: process.env.PUBLIC_DOCUSAURUS_ALGOLIA_INDEX_NAME, - }, + // Add Algolia search configuration + customFields: { + algolia: { + appId: process.env.PUBLIC_DOCUSAURUS_ALGOLIA_APP_ID, + apiKey: process.env.PUBLIC_DOCUSAURUS_ALGOLIA_API_KEY, + indexName: process.env.PUBLIC_DOCUSAURUS_ALGOLIA_INDEX_NAME, }, + }, + + i18n: { + defaultLocale: 'en', + locales: ['en'], + }, - // Even if you don't use internalization, you can use this field to set useful - // metadata like html lang. For example, if your site is Chinese, you may want - // to replace "en" with "zh-Hans". - i18n: { - defaultLocale: "en", - locales: ["en"], + stylesheets: [ + { + href: 'https://cdn.jsdelivr.net/npm/katex@0.12.0/dist/katex.min.css', + type: 'text/css', + integrity: 'sha384-AfEj0r4/OFrOo5t7NnNe46zW/tFgW6x/bCJG8FqQCEo3+Aro6EYUG4+cU+KJWu/X', + crossorigin: 'anonymous', }, + ], - presets: [ - [ - "classic", - { - blog: { - blogTitle: 'Seqera Changelog', - blogDescription: 'Blog', - blogSidebarCount: 5000, - blogSidebarTitle: 'Changelog', - path: 'changelog', - routeBasePath: '/changelog', - //processBlogPosts: () => ({}), - include: ['**/*.{md,mdx}'], - showReadingTime: false, - feedOptions: { - type: 'all', // 'rss', 'atom', or both - title: 'Seqera Changelog', - description: 'Stay updated with our blog posts!', - copyright: `Copyright © ${new Date().getFullYear()} Seqera`, - } - }, - docs: false, - theme: { - customCss: [ - require.resolve("./src/css/main.css"), - require.resolve("./src/css/misc.css"), - require.resolve("./src/css/components/checklist.css"), - require.resolve("./src/css/components/box.css"), - require.resolve("./src/css/theme-colors.css"), - require.resolve("./src/css/fonts/inter.css"), - require.resolve("./src/css/fonts/degular.css"), - ], - }, - gtag: { - trackingID: "G-NR1CNM213G", - anonymizeIP: true, - }, - googleTagManager: { - containerId: "GTM-MBCJKK4", - }, + presets: [ + [ + 'classic', + /** @type {import('@docusaurus/preset-classic').Options} */ + ({ + // Add the blog/changelog section + blog: { + blogTitle: 'Seqera Changelog', + blogDescription: 'Blog', + blogSidebarCount: 5000, + blogSidebarTitle: 'Changelog', + path: 'changelog', + routeBasePath: '/changelog', + include: ['**/*.{md,mdx}'], + showReadingTime: false, + feedOptions: { + type: 'all', + title: 'Seqera Changelog', + description: 'Stay updated with our blog posts!', + copyright: `Copyright © ${new Date().getFullYear()} Seqera`, + } }, - ], - ], - plugins: [ - [ - "@docusaurus/plugin-content-docs", - { - id: "platform-enterprise", - routeBasePath: "/platform-enterprise", - includeCurrentVersion: false, - remarkPlugins: [ - (await import("remark-code-import")).default, - (await require("remark-math")).default, - (await import("docusaurus-remark-plugin-tab-blocks")).default, - (await require("remark-yaml-to-table")).default, + docs: false, + theme: { + customCss: [ + './src/css/main.css', + // './src/css/misc.css', + // './src/css/components/checklist.css', + './src/css/components/box.css', + './src/css/theme-colors.css', + // './src/css/fonts/inter.css', + // //'./src/css/fonts/degular.css', ], - rehypePlugins: [(await require("rehype-katex")).default], - editUrl: "https://github.com/seqeralabs/docs/tree/master/", - sidebarPath: false, - versions: { - // Replace /platform-enterprise with /platform-enterprise/24.2, when no version is specified in the URL. - // (Applies to latest version only) - [platform_enterprise_latest_version]: { - label: platform_enterprise_latest_version, - path: platform_enterprise_latest_version, - }, - }, }, - ], - [ - "@docusaurus/plugin-content-docs", - { - id: "platform-cloud", - routeBasePath: "/platform-cloud", - path: "platform-cloud/docs", - remarkPlugins: [ - (await import("remark-code-import")).default, - (await require("remark-math")).default, - (await import("docusaurus-remark-plugin-tab-blocks")).default, - (await require("remark-yaml-to-table")).default, - ], - rehypePlugins: [(await require("rehype-katex")).default], - editUrl: "https://github.com/seqeralabs/docs/tree/master/", - sidebarPath: "./platform-cloud/cloud-sidebar.json", + gtag: { + trackingID: "G-NR1CNM213G", + anonymizeIP: true, }, - ], - [ - "@docusaurus/plugin-content-docs", - { - id: "multiqc", - routeBasePath: "/multiqc", - path: "multiqc_docs/multiqc_repo/docs/markdown", - remarkPlugins: [ - (await import("remark-code-import")).default, - (await require("remark-math")).default, - (await import("docusaurus-remark-plugin-tab-blocks")).default, - (await require("remark-yaml-to-table")).default, - ], - rehypePlugins: [(await require("rehype-katex")).default], - editUrl: ({ docPath }) => { - return `https://github.com/MultiQC/MultiQC/blob/main/docs/markdown/${docPath.replace('multiqc_docs/multiqc_repo/docs', '')}` - }, - sidebarPath: "./multiqc_docs/sidebar.js", + googleTagManager: { + containerId: "GTM-MBCJKK4", }, - ], - [ - "@docusaurus/plugin-content-docs", - { - id: "fusion", - routeBasePath: "/fusion", - path: "fusion_docs", - remarkPlugins: [ - (await import("remark-code-import")).default, - (await require("remark-math")).default, - (await import("docusaurus-remark-plugin-tab-blocks")).default, - (await require("remark-yaml-to-table")).default, - ], - rehypePlugins: [(await require("rehype-katex")).default], - editUrl: "https://github.com/seqeralabs/docs/tree/master/", - sidebarPath: "./fusion_docs/sidebar.json", + }), + ], + ], + + plugins: [ + // Platform Cloud plugin + [ + '@docusaurus/plugin-content-docs', + { + id: 'platform-cloud', + path: "platform-cloud/docs", + routeBasePath: 'platform-cloud', + sidebarPath: './platform-cloud/cloud-sidebar.json', + editUrl: 'https://github.com/seqeralabs/docs/tree/master/', + remarkPlugins: [math, remarkYamlToTable, remarkCodeImport , tabBlocks], + rehypePlugins: [katex], + }, + ], + // Wave plugin + [ + '@docusaurus/plugin-content-docs', + { + id: "wave", + routeBasePath: "/wave", + path: "wave_docs/wave_repo/docs", + sidebarPath: './wave_docs/sidebar.json', + remarkPlugins: [math, remarkYamlToTable, remarkCodeImport , tabBlocks], + rehypePlugins: [katex], + editUrl: ({ docPath }) => { + return `https://github.com/seqeralabs/wave/blob/master/docs/${docPath.replace('wave', '')}` }, - ], - [ - "@docusaurus/plugin-content-docs", - { - id: "wave", - routeBasePath: "/wave", - path: "wave_docs/wave_repo/docs", - remarkPlugins: [ - (await import("remark-code-import")).default, - (await require("remark-math")).default, - (await import("docusaurus-remark-plugin-tab-blocks")).default, - (await require("remark-yaml-to-table")).default, - ], - rehypePlugins: [(await require("rehype-katex")).default], - editUrl: ({ docPath }) => { - return `https://github.com/seqeralabs/wave/blob/master/docs/${docPath.replace('wave_docs/wave_repo/docs', '')}` - }, - sidebarPath: "./wave_docs/sidebar.json", + }, + ], + // MultiQC plugin + [ + '@docusaurus/plugin-content-docs', + { + id: 'multiqc', + path: "multiqc_docs/multiqc_repo/docs/markdown", + routeBasePath: 'multiqc', + sidebarPath: './multiqc_docs/sidebar.js', + remarkPlugins: [math, remarkYamlToTable, remarkCodeImport , tabBlocks], + rehypePlugins: [katex], + editUrl: ({ docPath }) => { + return `https://github.com/MultiQC/MultiQC/blob/main/docs/markdown/${docPath.replace('multiqc', '')}` }, - ], - async function tailwind() { - return { - name: "docusaurus-tailwindcss", - configurePostCss(postcssOptions) { - postcssOptions.plugins.push(require("tailwindcss")); - postcssOptions.plugins.push(require("autoprefixer")); - return postcssOptions; - }, - }; }, - function routing() { - return { - name: "latest-routing", - async contentLoaded({ actions }) { - [ - { - path: "/platform-enterprise/latest", - exact: false, - component: "@site/src/pages/platform-enterprise/latest.tsx", - }, - ].map((route) => actions.addRoute(route)); - }, - }; + ], + // Fusion plugin + [ + '@docusaurus/plugin-content-docs', + { + id: 'fusion', + path: 'fusion_docs', + routeBasePath: 'fusion', + sidebarPath: './fusion_docs/sidebar.json', + remarkPlugins: [math, remarkYamlToTable, remarkCodeImport , tabBlocks], + rehypePlugins: [katex], + editUrl: 'https://github.com/seqeralabs/docs/tree/master/', + }, + ], + // Platform Enterprise plugin + [ + '@docusaurus/plugin-content-docs', + { + id: 'platform-enterprise', + path: 'platform-enterprise', + routeBasePath: 'platform-enterprise', + sidebarPath: './platform-enterprise/sidebar.json', + includeCurrentVersion: false, + remarkPlugins: [ math, remarkYamlToTable], //remarkCodeImport tabBlocks + rehypePlugins: [katex], + editUrl: 'https://github.com/seqeralabs/docs/tree/master/', }, - // path.resolve(__dirname, "plugins_custom/seqera_jobs"), - // path.resolve(__dirname, "plugins_custom/seqera_events"), ], + //Tailwind CSS plugin + function tailwind() { + return { + name: "docusaurus-tailwindcss", + configurePostCss(postcssOptions) { + postcssOptions.plugins.push(require("tailwindcss")); + postcssOptions.plugins.push(require("autoprefixer")); + return postcssOptions; + }, + }; + }, + //Latest routing plugin + function routing() { + return { + name: "latest-routing", + async contentLoaded({ actions }) { + [ + { + path: "/platform-enterprise/latest", + exact: false, + component: "@site/src/pages/platform-enterprise/latest.tsx", + }, + ].map((route) => actions.addRoute(route)); + }, + }; + }, + ], - themeConfig: { + themeConfig: + /** @type {import('@docusaurus/preset-classic').ThemeConfig} */ + ({ image: "img/share.jpg", navbar: { + title: '', logo: { alt: "Seqera", src: "img/logo2.png", @@ -308,8 +300,8 @@ export default async function createConfigAsync() { copyright: `© ${new Date().getFullYear()} Seqera`, }, prism: { - theme: themes.oneLight, - darkTheme: themes.oneDark, + theme: prismThemes.oneLight, + darkTheme: prismThemes.oneDark, additionalLanguages: [ "bash", "docker", @@ -326,16 +318,10 @@ export default async function createConfigAsync() { "yaml" ], }, - }, - clientModules: [require.resolve("./clientside-scripts.js")], - stylesheets: [ - { - href: "https://cdn.jsdelivr.net/npm/katex@0.12.0/dist/katex.min.css", - type: "text/css", - integrity: - "sha384-AfEj0r4/OFrOo5t7NnNe46zW/tFgW6x/bCJG8FqQCEo3+Aro6EYUG4+cU+KJWu/X", - crossorigin: "anonymous", - }, - ], - }; -} + }), + + //Add client-side modules + clientModules: [require.resolve("./clientside-scripts.js")], +}; + +export default config; diff --git a/platform-enterprise/_images/nf_home_page.png b/platform-enterprise/_images/nf_home_page.png new file mode 100644 index 000000000..fa562e50d Binary files /dev/null and b/platform-enterprise/_images/nf_home_page.png differ diff --git a/platform-enterprise/_images/overview_image.png b/platform-enterprise/_images/overview_image.png new file mode 100644 index 000000000..c50d97f8a Binary files /dev/null and b/platform-enterprise/_images/overview_image.png differ diff --git a/platform-enterprise/_images/staging_options.png b/platform-enterprise/_images/staging_options.png new file mode 100644 index 000000000..850bede43 Binary files /dev/null and b/platform-enterprise/_images/staging_options.png differ diff --git a/platform-enterprise/_images/tw_agent.png b/platform-enterprise/_images/tw_agent.png new file mode 100644 index 000000000..2f39652c2 Binary files /dev/null and b/platform-enterprise/_images/tw_agent.png differ diff --git a/platform-enterprise/_images/tw_agent_running.png b/platform-enterprise/_images/tw_agent_running.png new file mode 100644 index 000000000..2bc2bd92b Binary files /dev/null and b/platform-enterprise/_images/tw_agent_running.png differ diff --git a/platform-enterprise/_images/tw_cli.png b/platform-enterprise/_images/tw_cli.png new file mode 100644 index 000000000..d93acb717 Binary files /dev/null and b/platform-enterprise/_images/tw_cli.png differ diff --git a/platform-enterprise/_logos/nf-tower-black.svg b/platform-enterprise/_logos/nf-tower-black.svg new file mode 100644 index 000000000..4f87c127a --- /dev/null +++ b/platform-enterprise/_logos/nf-tower-black.svg @@ -0,0 +1,172 @@ + + + +image/svg+xml \ No newline at end of file diff --git a/platform-enterprise/_logos/nf-tower-icon-black.svg b/platform-enterprise/_logos/nf-tower-icon-black.svg new file mode 100644 index 000000000..d9e7e37e1 --- /dev/null +++ b/platform-enterprise/_logos/nf-tower-icon-black.svg @@ -0,0 +1,110 @@ + + + +image/svg+xml \ No newline at end of file diff --git a/platform-enterprise/_logos/seqera-logo-black.png b/platform-enterprise/_logos/seqera-logo-black.png new file mode 100644 index 000000000..2f74a445f Binary files /dev/null and b/platform-enterprise/_logos/seqera-logo-black.png differ diff --git a/platform-enterprise/_templates/aws-batch/forge-policy.json b/platform-enterprise/_templates/aws-batch/forge-policy.json new file mode 100644 index 000000000..cad01805e --- /dev/null +++ b/platform-enterprise/_templates/aws-batch/forge-policy.json @@ -0,0 +1,62 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TowerForge0", + "Effect": "Allow", + "Action": [ + "ssm:GetParameters", + "iam:CreateInstanceProfile", + "iam:DeleteInstanceProfile", + "iam:GetRole", + "iam:RemoveRoleFromInstanceProfile", + "iam:CreateRole", + "iam:DeleteRole", + "iam:AttachRolePolicy", + "iam:PutRolePolicy", + "iam:AddRoleToInstanceProfile", + "iam:PassRole", + "iam:DetachRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:DeleteRolePolicy", + "iam:ListRolePolicies", + "iam:TagRole", + "iam:TagInstanceProfile", + "batch:CreateComputeEnvironment", + "batch:DescribeComputeEnvironments", + "batch:CreateJobQueue", + "batch:DescribeJobQueues", + "batch:UpdateComputeEnvironment", + "batch:DeleteComputeEnvironment", + "batch:UpdateJobQueue", + "batch:DeleteJobQueue", + "batch:TagResource", + "fsx:DeleteFileSystem", + "fsx:DescribeFileSystems", + "fsx:CreateFileSystem", + "fsx:TagResource", + "ec2:DescribeSecurityGroups", + "ec2:DescribeAccountAttributes", + "ec2:DescribeSubnets", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:CreateLaunchTemplate", + "ec2:DeleteLaunchTemplate", + "ec2:DescribeKeyPairs", + "ec2:DescribeVpcs", + "ec2:DescribeInstanceTypeOfferings", + "ec2:GetEbsEncryptionByDefault", + "elasticfilesystem:DescribeMountTargets", + "elasticfilesystem:CreateMountTarget", + "elasticfilesystem:CreateFileSystem", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DeleteMountTarget", + "elasticfilesystem:DeleteFileSystem", + "elasticfilesystem:UpdateFileSystem", + "elasticfilesystem:PutLifecycleConfiguration", + "elasticfilesystem:TagResource" + ], + "Resource": "*" + } + ] +} diff --git a/platform-enterprise/_templates/aws-batch/launch-policy.json b/platform-enterprise/_templates/aws-batch/launch-policy.json new file mode 100644 index 000000000..f129defa5 --- /dev/null +++ b/platform-enterprise/_templates/aws-batch/launch-policy.json @@ -0,0 +1,36 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TowerLaunch0", + "Effect": "Allow", + "Action": [ + "batch:DescribeJobQueues", + "batch:CancelJob", + "batch:SubmitJob", + "batch:ListJobs", + "batch:TagResource", + "batch:DescribeComputeEnvironments", + "batch:TerminateJob", + "batch:DescribeJobs", + "batch:RegisterJobDefinition", + "batch:DescribeJobDefinitions", + "ecs:DescribeTasks", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceAttribute", + "ecs:DescribeContainerInstances", + "ec2:DescribeInstanceStatus", + "logs:Describe*", + "logs:Get*", + "logs:List*", + "logs:StartQuery", + "logs:StopQuery", + "logs:TestMetricFilter", + "logs:FilterLogEvents", + "secretsmanager:ListSecrets" + ], + "Resource": "*" + } + ] +} \ No newline at end of file diff --git a/platform-enterprise/_templates/aws-batch/s3-bucket-write-policy.json b/platform-enterprise/_templates/aws-batch/s3-bucket-write-policy.json new file mode 100644 index 000000000..684f37e12 --- /dev/null +++ b/platform-enterprise/_templates/aws-batch/s3-bucket-write-policy.json @@ -0,0 +1,26 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::YOUR-BUCKET-NAME" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:PutObjectTagging", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::YOUR-BUCKET-NAME/*" + ], + "Effect": "Allow" + } + ] +} \ No newline at end of file diff --git a/platform-enterprise/_templates/aws-batch/secrets-policy-account.json b/platform-enterprise/_templates/aws-batch/secrets-policy-account.json new file mode 100644 index 000000000..238a76b68 --- /dev/null +++ b/platform-enterprise/_templates/aws-batch/secrets-policy-account.json @@ -0,0 +1,16 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TowerLaunch0", + "Effect": "Allow", + "Action": [ + "secretsmanager:DescribeSecret", + "secretsmanager:DeleteSecret", + "secretsmanager:ListSecrets", + "secretsmanager:CreateSecret" + ], + "Resource": "*" + } + ] +} \ No newline at end of file diff --git a/platform-enterprise/_templates/aws-batch/secrets-policy-execution-role.json b/platform-enterprise/_templates/aws-batch/secrets-policy-execution-role.json new file mode 100644 index 000000000..120ca0823 --- /dev/null +++ b/platform-enterprise/_templates/aws-batch/secrets-policy-execution-role.json @@ -0,0 +1,11 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "secretsmanager:GetSecretValue", + "Resource": "arn:aws:secretsmanager:*:*:secret:tower-*" + } + ] +} \ No newline at end of file diff --git a/platform-enterprise/_templates/aws-batch/secrets-policy-instance-role.json b/platform-enterprise/_templates/aws-batch/secrets-policy-instance-role.json new file mode 100644 index 000000000..352388968 --- /dev/null +++ b/platform-enterprise/_templates/aws-batch/secrets-policy-instance-role.json @@ -0,0 +1,25 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor1", + "Effect": "Allow", + "Action": "secretsmanager:GetSecretValue", + "Resource": "arn:aws:secretsmanager:*:*:secret:tower-*" + }, + { + "Sid": "VisualEditor2", + "Effect": "Allow", + "Action": "secretsmanager:ListSecrets", + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "iam:PassRole" + ], + "Resource": "arn:aws:iam:::role/" + } + ] +} \ No newline at end of file diff --git a/platform-enterprise/_templates/eks/eks-iam-policy.json b/platform-enterprise/_templates/eks/eks-iam-policy.json new file mode 100644 index 000000000..951a40011 --- /dev/null +++ b/platform-enterprise/_templates/eks/eks-iam-policy.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TowerEks0", + "Effect": "Allow", + "Action": [ + "eks:ListClusters", + "eks:DescribeCluster" + ], + "Resource": "*" + } + ] + } \ No newline at end of file diff --git a/platform-enterprise/_templates/k8s/tower-launcher.yml b/platform-enterprise/_templates/k8s/tower-launcher.yml new file mode 100644 index 000000000..2d6ac8cca --- /dev/null +++ b/platform-enterprise/_templates/k8s/tower-launcher.yml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: tower-nf + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tower-launcher-sa + namespace: tower-nf + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: tower-launcher-role + namespace: tower-nf +rules: + - apiGroups: [""] + resources: ["pods", "pods/status", "pods/log", "pods/exec", "persistentvolumeclaims", "configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["batch"] + resources: ["jobs", "jobs/status", "jobs/log"] + verbs: ["get", "list", "watch", "create", "delete"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tower-launcher-rolebind + namespace: tower-nf +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tower-launcher-role +subjects: + - kind: ServiceAccount + name: tower-launcher-sa + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tower-launcher-userbind + namespace: tower-nf +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tower-launcher-role +subjects: + - kind: User + name: tower-launcher-user diff --git a/platform-enterprise/_templates/k8s/tower-scratch-local.yml b/platform-enterprise/_templates/k8s/tower-scratch-local.yml new file mode 100644 index 000000000..f282e9e71 --- /dev/null +++ b/platform-enterprise/_templates/k8s/tower-scratch-local.yml @@ -0,0 +1,31 @@ +# PVC backed by local storage +# Only works for a single node cluster + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: tower-storage + namespace: tower-nf +spec: + storageClassName: scratch + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + hostPath: + path: /tmp/tower + +--- + +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tower-scratch + namespace: tower-nf +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: scratch diff --git a/platform-enterprise/_templates/k8s/tower-scratch-nfs.yml b/platform-enterprise/_templates/k8s/tower-scratch-nfs.yml new file mode 100644 index 000000000..9e3e29a98 --- /dev/null +++ b/platform-enterprise/_templates/k8s/tower-scratch-nfs.yml @@ -0,0 +1,111 @@ +# PVC backed by NFS server +# +# For K8s environments other than GKE, you will need to modify the nfs-storage +# persistent volume to reference directly the IP address of the nfs-server: +# 1. Deploy the nfs-server resources +# 2. Get the IP address of the nfs-server with `kubectl get service nfs-server` +# 3. Replace `nfs-server.tower-nf.svc.cluster.local` with the IP address in the nfs-storage YAML +# 4. Deploy the nfs-storage PV and tower-scratch PVC +# +# For more information, see https://github.com/kubernetes/minikube/issues/3417#issuecomment-670005434 + +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nfs-server + namespace: tower-nf +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: standard + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-server + namespace: tower-nf +spec: + replicas: 1 + selector: + matchLabels: + role: nfs-server + template: + metadata: + labels: + role: nfs-server + spec: + containers: + - name: nfs-server + image: gcr.io/google_containers/volume-nfs:0.8 + ports: + - name: nfs + containerPort: 2049 + - name: mountd + containerPort: 20048 + - name: rpcbind + containerPort: 111 + securityContext: + privileged: true + volumeMounts: + - mountPath: /exports + name: vol-1 + volumes: + - name: vol-1 + persistentVolumeClaim: + claimName: nfs-server + +--- + +apiVersion: v1 +kind: Service +metadata: + name: nfs-server + namespace: tower-nf +spec: + ports: + - name: nfs + port: 2049 + - name: mountd + port: 20048 + - name: rpcbind + port: 111 + selector: + role: nfs-server + +--- + +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs-storage + namespace: tower-nf +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + nfs: + # For K8s environments other than GKE, the nfs-server IP address must be used + server: nfs-server.tower-nf.svc.cluster.local + path: "/" + +--- + +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: tower-scratch + namespace: tower-nf +spec: + accessModes: + - ReadWriteMany + storageClassName: "" + resources: + requests: + storage: 10Gi + diff --git a/platform-enterprise/_todo.mdx b/platform-enterprise/_todo.mdx new file mode 100644 index 000000000..7ee3be43d --- /dev/null +++ b/platform-enterprise/_todo.mdx @@ -0,0 +1,74 @@ +FAQ TO DOs: +title: "\_todo" + +# GENERAL + +- $TOWER_AGENT_WORKDIR +- What does `NXF_PLUGINS_DEFAULT` environment variable do? +- Where is the analysis running? +- What about security of my data? +- Identity via LDAP/Active Directory +- Difference between free and paid Tower? +- Can I have Service-Account-type and Agent-type credentials in the same Workspace? + - Not right now. Must choose. https://github.com/seqeralabs/nf-tower-cloud/issues/2879#issuecomment-1072646557 + +# Amazon + +- [Can I have Nextflow automatically retry Tasks that fail due to Spot instance reclamation?](#aws_spot_retry) +- **Can I have Nextflow automatically retry Tasks that fail due to Spot instance reclamation?** + + Yes. As of Tower version ?????, any Spot-based AWS Batch Compute Environment created by Batch Forge will be automatically configured to retry each process 3 times. ??? If a retry policy is not defined??? + + Given that Spots can be reclaimed during the execution of a job, it is a recommended practice that pipeline authors always include retry logic in their logic. ???See HERE for examples??? + https://github.com/seqeralabs/nf-tower-cloud/pull/2820/files + + - [Why won't Secrets work with my legacy Batch Forge-created AWS Batch Compute Environment?](aws_secrets_legacy) + - **Why won't Secrets work with my legacy Batch Forge-created AWS Batch Compute Environment?** + + The Secrets feature requires new permissions to be added to existing IAM Roles: + + - The User/Role used by your Tower implementation must have the `secretsmanager:CreateSecret`. + - Your Batch EC2 Instance Role must have ??? execution role ??? + - Added details from here: https://github.com/seqeralabs/nf-tower-cloud/pull/2820 + + Add Tower Agent blurb re: Rijkzwaan as per https://git.seqera.io/rijkzwaan/nf-support/issues/15#issuecomment-8438 + + Meeting summary for 31-03-2021 + +Adding quick summary for the meeting today, please feel free to add/correct anything I might have missed. + + With Jordi's guidance, the $TW_USER_AGENT was successfully used on an agent running in Kim's account to launch a pipeline from Daniel's user on Tower UI + + The slight nuance on the RKZ cluster was that the home directories seem to be following a non-standard pattern i.e. with all-caps usernames (for example /home/DCR) and we had to append USER=DCR tw-agent ... to enable the agent. + + An upgrade to the latest version of Tower would enable the use of $TW_AGENT_WORK variable with the agent + + We also discussed the usage of pipeline reports feature + +Follow-ups + + @daniel-cruz-rijkzwaan to follow up here with an independent experiment to get up and running with tw-agent + + Abhinav to request the addition of Daniel's account in the community/showcase workspace in tower.nf + +Warmly, +Abhinav + +AZURE Batch - SSL problem as per https://git.seqera.io/eagle/nf-support/issues/10#issuecomment-8523 + +### Determine if this is relevant to Google FAQ section: + +https://github.com/nf-core/configs/blob/master/conf/google.config as per this ticket: https://git.seqera.io/pluto/nf-support/issues/6 + +TO DO: As per Ben, document profile injection behaviour into Tower docs (e.g. nf-core automagically injecting google profile if executing on GLS) + +Azure prereqs notes dump: + + + + + + + + + \ No newline at end of file diff --git a/platform-enterprise/administration/_images/edit_organization.png b/platform-enterprise/administration/_images/edit_organization.png new file mode 100644 index 000000000..6870c0821 Binary files /dev/null and b/platform-enterprise/administration/_images/edit_organization.png differ diff --git a/platform-enterprise/administration/_images/edit_user.png b/platform-enterprise/administration/_images/edit_user.png new file mode 100644 index 000000000..c4d8bdf0e Binary files /dev/null and b/platform-enterprise/administration/_images/edit_user.png differ diff --git a/platform-enterprise/administration/_images/manage_orgs_button.png b/platform-enterprise/administration/_images/manage_orgs_button.png new file mode 100644 index 000000000..4f50438a6 Binary files /dev/null and b/platform-enterprise/administration/_images/manage_orgs_button.png differ diff --git a/platform-enterprise/administration/_images/organization_administration.png b/platform-enterprise/administration/_images/organization_administration.png new file mode 100644 index 000000000..f030940d0 Binary files /dev/null and b/platform-enterprise/administration/_images/organization_administration.png differ diff --git a/platform-enterprise/administration/_images/organization_members.png b/platform-enterprise/administration/_images/organization_members.png new file mode 100644 index 000000000..632006216 Binary files /dev/null and b/platform-enterprise/administration/_images/organization_members.png differ diff --git a/platform-enterprise/administration/_images/users_organizations.png b/platform-enterprise/administration/_images/users_organizations.png new file mode 100644 index 000000000..f2ab65f84 Binary files /dev/null and b/platform-enterprise/administration/_images/users_organizations.png differ diff --git a/platform-enterprise/administration/overview.mdx b/platform-enterprise/administration/overview.mdx new file mode 100644 index 000000000..908aa0975 --- /dev/null +++ b/platform-enterprise/administration/overview.mdx @@ -0,0 +1,55 @@ +## Administration of users, organizations, and memberships + +title: "overview" + +As a **Root user**, you can access a comprehensive overview of the users, workspaces, and organizations in the system from the **Admin panel**. + +The Admin panel menu entry will only be accessible in the top right avatar menu if you are logged in as a Root user. This role should only be assigned to a system administrator, since it enables several high level and potentially risky operations. + +### User administration + +The User administration page lists all the users in the Tower database. From this page, you can: + +#### Search users + +The user search function allows you to find a specific user by name or email and perform various operations with that user. + +#### Create a user + +The Add user button above the table allows you to create a new user. If the new user email already exists in the system, the user creation will fail. Once the new user has been created, inform them that access has been granted. + +#### Edit a user + +By selecting a username from the table, you can edit the user's details, or delete the user. + +### Membership administration + +**Available from version 22.3.X** + +From the user list, you have an overview of all the memberships for the selected user. The Membership administration page is reached by selecting the **Edit organizations** button. From here, you can list and search for all the organizations the user belongs to (as a member or as an owner), change the role of the user for a given membership, remove the user from an organization, or add the user to a new organization. + +**Note:** You can only add users to an existing organization, and you cannot remove the last owner of an organization. + +### Organization administration + +The Organization administration page lists all the organizations in the Tower database. From this page, you can: + +#### Search organizations + +The organization search function allows you to find a specific organization by its name or email and perform various operations with that organization. + +#### Create an organization + +The Add organization button above the table allows you to create a new organization from scratch. + +#### Edit an organization + +By selecting an organization name from the table, you can edit the organization's details, or delete it. + +#### Membership administration + +**Available from version 22.3.X** + +From the organizations list, you have an overview of all the memberships for the selected organization. Select the **Manage users** button to access the Membership administration page. From here, you can list and search for all the users that are members or owners of the selected organization, change the role of the user for the given membership, remove the member from the organization, or add a new user to the organization. + +**Note:** You can only add existing users to an organization, and you cannot remove a membership if the user being removed is the last owner of the selected organization. To overcome this, promote another user to **Owner** before removing or demoting the last owner. diff --git a/platform-enterprise/agent.mdx b/platform-enterprise/agent.mdx new file mode 100644 index 000000000..bb674b716 --- /dev/null +++ b/platform-enterprise/agent.mdx @@ -0,0 +1,79 @@ +--- +title: "agent" +description: "Using Tower Agent" +--- + +## Overview + +Tower Agent enables Tower to launch pipelines on HPC clusters that do not allow direct access through an SSH client. + +Tower Agent is a standalone process that runs on a node that can submit jobs to the cluster (e.g. login node). It establishes an authenticated secure reverse connection with Tower, allowing Tower to submit and monitor new +jobs. The jobs are submitted on behalf of the user running the agent. + +### Installation + +Tower Agent is distributed as a single executable file to simply download and execute. + +1. Download the latest release from [Github](https://github.com/seqeralabs/tower-agent): + + ```bash + curl -fSL https://github.com/seqeralabs/tower-agent/releases/latest/download/tw-agent-linux-x86_64 > tw-agent + ``` + +2. Make it executable: + + ```bash + chmod +x ./tw-agent + ``` + +3. (Optional) Move it into a folder that is in your path. + +### Quickstart + +Before running the Agent: + +1. Create a [**personal access token**](api/overview.mdx#authentication) in Tower. + +2. Create **Tower Agent** credentials in a Tower workspace. See [here](credentials/overview.mdx) for more instructions. + +:::note +To share a single Tower Agent instance with all members of a workspace, create a Tower Agent credential with **Shared agent** enabled. +::: + +When you create the credentials you'll get an **Agent Connection ID**. You can use the default ID or enter a custom ID — the connection ID in the workspace credentials must match the ID entered when you run the agent. + +![credentials](./_images/tw_agent.png) + +The agent should always be running in order to accept incoming requests from Tower. We recommend that you use a terminal multiplexer like [tmux](https://github.com/tmux/tmux) or [GNU Screen](https://www.gnu.org/software/screen/), so that it keeps running even if you close your SSH session. + +```bash +export TOWER_ACCESS_TOKEN= +./tw-agent +``` + +![tw-agent](./_images/tw_agent_running.png) + +### Tips + +- If you are using the agent with Tower Enterprise (on-prem) you can set the API url using the `TOWER_API_ENDPOINT` environment variable or the `--url` option. +- By default, the Agent uses the folder `${HOME}/work` as the Nextflow work directory. You can change it using the `--work-dir` option. +- The work directory **must** exist before running the agent. +- You can also change the work directory in Tower when you create a compute environment or launch a pipeline. + +### Usage + +```bash +Usage: tw-agent [OPTIONS] AGENT_CONNECTION_ID + +Nextflow Tower Agent + +Parameters: +* AGENT_CONNECTION_ID Agent connection ID to identify this agent. + +Options: +* -t, --access-token= Tower personal access token. If not provided TOWER_ACCESS_TOKEN variable will be used. + -u, --url= Tower server API endpoint URL. If not provided TOWER_API_ENDPOINT variable will be used [default: https://api.cloud.seqera.io]. + -w, --work-dir= Default path where the pipeline scratch data is stored. It can be changed when launching a pipeline from Tower [default: ~/work]. + -h, --help Show this help message and exit. + -V, --version Print version information and exit. +``` diff --git a/platform-enterprise/api/_images/api_example_call.png b/platform-enterprise/api/_images/api_example_call.png new file mode 100644 index 000000000..0633e4ea1 Binary files /dev/null and b/platform-enterprise/api/_images/api_example_call.png differ diff --git a/platform-enterprise/api/_images/api_tokens.png b/platform-enterprise/api/_images/api_tokens.png new file mode 100644 index 000000000..2894ba5ad Binary files /dev/null and b/platform-enterprise/api/_images/api_tokens.png differ diff --git a/platform-enterprise/api/_images/personal_access_token.png b/platform-enterprise/api/_images/personal_access_token.png new file mode 100644 index 000000000..a58271237 Binary files /dev/null and b/platform-enterprise/api/_images/personal_access_token.png differ diff --git a/platform-enterprise/api/_images/token_form.png b/platform-enterprise/api/_images/token_form.png new file mode 100644 index 000000000..054f34886 Binary files /dev/null and b/platform-enterprise/api/_images/token_form.png differ diff --git a/platform-enterprise/api/_images/your_tokens.png b/platform-enterprise/api/_images/your_tokens.png new file mode 100644 index 000000000..a693e2e23 Binary files /dev/null and b/platform-enterprise/api/_images/your_tokens.png differ diff --git a/platform-enterprise/api/overview.mdx b/platform-enterprise/api/overview.mdx new file mode 100644 index 000000000..718effc03 --- /dev/null +++ b/platform-enterprise/api/overview.mdx @@ -0,0 +1,97 @@ +--- +title: "Tower API" +description: "Using the Nextflow Tower API." +--- + +Tower exposes a public API with all the necessary endpoints to manage Nextflow workflows programmatically, allowing organizations to incorporate Tower seamlessly into their existing processes. + +## Overview + +:::note +As of version 23.4, the Seqera API is live on `https://api.cloud.seqera.io`. The legacy API `https://api.tower.nf` remains fully operational, so existing API integrations will continue to perform as expected. Deprecation of the legacy API will be communicated well in advance to avoid any breaking changes to your integrations. +::: + +The Tower API can be accessed from `https://api.cloud.seqera.io`. All API endpoints use HTTPS, and all request and response payloads use [JSON](https://www.json.org/) encoding. All timestamps use the [ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html) date-time standard format: `YYYY-MM-DDTHH:MM:SSZ`. + +### OpenAPI + +The Tower API uses the OpenAPI standard. The current OpenAPI schema can be found [here](https://cloud.seqera.io/openapi/seqera-api-latest.yml). + +:::tip +For more information on the OpenAPI standard, see [OpenAPI](https://swagger.io/specification/). +::: + +### Endpoints + +See [Seqera Platform services API](https://cloud.seqera.io/openapi/index.html) for a detailed list of all endpoints. This page also includes request and response payload examples, and the ability to test each endpoint interactively. + +### Programmatic API + +You can use tools such as [openapi-python-client](https://github.com/openapi-generators/openapi-python-client) to generate a programmatic API for a particular language (e.g. Python) based on the OpenAPI schema. However, we do not guarantee that any OpenAPI client generator will work with Tower API; use them at your own risk. + +### Authentication + +Tower API requires an authentication token to be specified in each API request using the +[Bearer](https://swagger.io/docs/specification/authentication/bearer-authentication) HTTP header. + +Your personal authorization token can be found in the user top-right menu under +[Your tokens](https://cloud.seqera.io/tokens). + +To create a new access token, just provide a name for the token. This will help to identify it later. + +![](./_images/token_form.png) + +The token is only displayed once. Store your token in a safe place. + +Once created, use the token to authenticate to the Nextflow API via cURL, Postman, or within your code to requests. + +### cURL example + +```bash +curl -H "Authorization: Bearer eyJ...YTk0" https://api.cloud.seqera.io/workflow +``` + +:::tip +Your token must be included in every API call. See [Bearer token authentication](https://swagger.io/docs/specification/authentication/bearer-authentication) for more information on bearer token authentication. +::: + +### Parameters + +Some API `GET` methods will accept standard `query` parameters, which are defined in the documentation; `querystring` optional +parameters such as page size, number (when available) and file name; and body parameters, mostly used for `POST`, `PUT` and `DELETE` requests. + +Additionally, several head parameters are accepted such as `Authorization` for bearer access token or `Accept-Version` to indicate the desired API version to use (default to version 1) + +```bash +curl -H "Authorization: Bearer QH..E5M=" + -H "Accept-Version:1" + -X POST https://api.cloud.seqera.io/domain/{item_id}?queryString={value} + -d { params: { "key":"value" } } + +``` + +### Client errors + +There exists two typical standard errors, or non `200` or `204` status responses, to expect from the API. + +### Bad Request + +The request payload is not properly defined or the query parameters are invalid. + +```json +{ + "message": "Oops... Unable to process request - Error ID: 54apnFENQxbvCr23JaIjLb" +} +``` + +### Forbidden + +Your access token is invalid or expired. This response may also imply that the entry point you are trying to access is not available; in such a case, it is recommended you check your request syntax. + +```bash +Status: 403 Forbidden +``` + +### Rate limiting + +For all API requests, there is a limit of 20 calls per second (72000 calls per hour) and access key. diff --git a/platform-enterprise/cli.mdx b/platform-enterprise/cli.mdx new file mode 100644 index 000000000..18f10eb1e --- /dev/null +++ b/platform-enterprise/cli.mdx @@ -0,0 +1,29 @@ +--- +title: Tower CLI +headline: "CLI" +description: "Using the Tower CLI." +--- + +# Nextflow Tower CLI + +`tw` is [Tower](https://tower.nf/) on the command line. It brings Tower concepts including Pipelines, Actions and Compute Environments to the terminal. + +Tower is a full-stack application for the management of data pipelines and compute resources. It enables collaborative data analysis at scale, on-premises or in any cloud. + +The Tower CLI interacts with Tower, providing an interface to launch pipelines, manage cloud resources and administer your analysis. + +![tw](./_images/tw_cli.png) + +### Key features + +- **A Nextflow-like experience**: Tower CLI provides a developer-friendly environment. Pipelines can be launched with the CLI similar to Nextflow but with the benefits of Tower such as monitoring, logging, resource provisioning, dataset management, and collaborative sharing. + +- **Infrastructure as Code**: All Tower resources including Pipelines and Compute Environments can be described in a declarative manner. This allows a complete definition of an analysis environment that can be versioned and treated as code. It greatly simplifies sharing and re-use of configuration as well as routine administration. + +- **Built on OpenAPI**: Tower CLI interacts with Tower via the [Tower API](./api/overview.mdx) which is created using the latest OpenAPI 3.0 specification. Tower CLI provides full control of the Tower application allowing users to get maximum insights into their pipeline submissions and execution environments. + +### Availability + +Tower CLI can be installed on macOS, Windows, and Linux. + +Visit the [Tower CLI](https://github.com/seqeralabs/tower-cli/) page on GitHub for installation and configuration details. diff --git a/platform-enterprise/compute-envs/_images/aws_keys.png b/platform-enterprise/compute-envs/_images/aws_keys.png new file mode 100644 index 000000000..e8310249a Binary files /dev/null and b/platform-enterprise/compute-envs/_images/aws_keys.png differ diff --git a/platform-enterprise/compute-envs/_images/aws_lustre_options.png b/platform-enterprise/compute-envs/_images/aws_lustre_options.png new file mode 100644 index 000000000..31261ae8e Binary files /dev/null and b/platform-enterprise/compute-envs/_images/aws_lustre_options.png differ diff --git a/platform-enterprise/compute-envs/_images/aws_new_env.png b/platform-enterprise/compute-envs/_images/aws_new_env.png new file mode 100644 index 000000000..942d4fedc Binary files /dev/null and b/platform-enterprise/compute-envs/_images/aws_new_env.png differ diff --git a/platform-enterprise/compute-envs/_images/aws_new_env_manual_config.png b/platform-enterprise/compute-envs/_images/aws_new_env_manual_config.png new file mode 100644 index 000000000..88153ddeb Binary files /dev/null and b/platform-enterprise/compute-envs/_images/aws_new_env_manual_config.png differ diff --git a/platform-enterprise/compute-envs/_images/aws_new_env_name.png b/platform-enterprise/compute-envs/_images/aws_new_env_name.png new file mode 100644 index 000000000..75a7257f1 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/aws_new_env_name.png differ diff --git a/platform-enterprise/compute-envs/_images/aws_new_launch_env.png b/platform-enterprise/compute-envs/_images/aws_new_launch_env.png new file mode 100644 index 000000000..b4d26418d Binary files /dev/null and b/platform-enterprise/compute-envs/_images/aws_new_launch_env.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_config_mode_forge.png b/platform-enterprise/compute-envs/_images/azure_config_mode_forge.png new file mode 100644 index 000000000..50aaee55b Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_config_mode_forge.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_keys.png b/platform-enterprise/compute-envs/_images/azure_keys.png new file mode 100644 index 000000000..ca98b4b97 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_keys.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_manual_jobs_pool.png b/platform-enterprise/compute-envs/_images/azure_manual_jobs_pool.png new file mode 100644 index 000000000..d8d7b2b97 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_manual_jobs_pool.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_new_env_name.png b/platform-enterprise/compute-envs/_images/azure_new_env_name.png new file mode 100644 index 000000000..20aa08e6e Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_new_env_name.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_newly_created_env.png b/platform-enterprise/compute-envs/_images/azure_newly_created_env.png new file mode 100644 index 000000000..9413c51b2 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_newly_created_env.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_tower_forge.png b/platform-enterprise/compute-envs/_images/azure_tower_forge.png new file mode 100644 index 000000000..1e395257d Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_tower_forge.png differ diff --git a/platform-enterprise/compute-envs/_images/azure_tower_manual.png b/platform-enterprise/compute-envs/_images/azure_tower_manual.png new file mode 100644 index 000000000..28473f102 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/azure_tower_manual.png differ diff --git a/platform-enterprise/compute-envs/_images/container_registry_credentials_blank.png b/platform-enterprise/compute-envs/_images/container_registry_credentials_blank.png new file mode 100644 index 000000000..0669b4af7 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/container_registry_credentials_blank.png differ diff --git a/platform-enterprise/compute-envs/_images/fsx_syncronization.png b/platform-enterprise/compute-envs/_images/fsx_syncronization.png new file mode 100644 index 000000000..2ec223259 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/fsx_syncronization.png differ diff --git a/platform-enterprise/compute-envs/_images/gke_regions.png b/platform-enterprise/compute-envs/_images/gke_regions.png new file mode 100644 index 000000000..93f75a91d Binary files /dev/null and b/platform-enterprise/compute-envs/_images/gke_regions.png differ diff --git a/platform-enterprise/compute-envs/_images/gke_zone.png b/platform-enterprise/compute-envs/_images/gke_zone.png new file mode 100644 index 000000000..dba75d33f Binary files /dev/null and b/platform-enterprise/compute-envs/_images/gke_zone.png differ diff --git a/platform-enterprise/compute-envs/_images/google_batch_locations.png b/platform-enterprise/compute-envs/_images/google_batch_locations.png new file mode 100755 index 000000000..9ceddfc7b Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_batch_locations.png differ diff --git a/platform-enterprise/compute-envs/_images/google_batch_new_env.png b/platform-enterprise/compute-envs/_images/google_batch_new_env.png new file mode 100755 index 000000000..00097e31a Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_batch_new_env.png differ diff --git a/platform-enterprise/compute-envs/_images/google_batch_review_env.png b/platform-enterprise/compute-envs/_images/google_batch_review_env.png new file mode 100755 index 000000000..b93b56d6a Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_batch_review_env.png differ diff --git a/platform-enterprise/compute-envs/_images/google_credentials.png b/platform-enterprise/compute-envs/_images/google_credentials.png new file mode 100644 index 000000000..e8fefbecf Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_credentials.png differ diff --git a/platform-enterprise/compute-envs/_images/google_filestore.png b/platform-enterprise/compute-envs/_images/google_filestore.png new file mode 100644 index 000000000..0e3c3956f Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_filestore.png differ diff --git a/platform-enterprise/compute-envs/_images/google_new_env.png b/platform-enterprise/compute-envs/_images/google_new_env.png new file mode 100644 index 000000000..76a3bdfd7 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_new_env.png differ diff --git a/platform-enterprise/compute-envs/_images/google_regions_and_zones.png b/platform-enterprise/compute-envs/_images/google_regions_and_zones.png new file mode 100644 index 000000000..392d109ae Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_regions_and_zones.png differ diff --git a/platform-enterprise/compute-envs/_images/google_review_env.png b/platform-enterprise/compute-envs/_images/google_review_env.png new file mode 100644 index 000000000..25ebb3635 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/google_review_env.png differ diff --git a/platform-enterprise/compute-envs/_images/head_job_propagation.png b/platform-enterprise/compute-envs/_images/head_job_propagation.png new file mode 100644 index 000000000..9150545b0 Binary files /dev/null and b/platform-enterprise/compute-envs/_images/head_job_propagation.png differ diff --git a/platform-enterprise/compute-envs/altair-grid-engine.mdx b/platform-enterprise/compute-envs/altair-grid-engine.mdx new file mode 100644 index 000000000..ab820a091 --- /dev/null +++ b/platform-enterprise/compute-envs/altair-grid-engine.mdx @@ -0,0 +1,62 @@ +--- +title: "altair-grid-engine" +description: "Step-by-step instructions to set up Grid engine for Nextflow Tower." +--- + +## Overview + +[Altair Grid Engine](https://www.altair.com/grid-engine/) is a workload manager maintained by [Altair Engineering, Inc](https://www.altair.com). + +Tower streamlines the deployment of Nextflow pipelines into both cloud-based and on-prem Grid Engine clusters. + +### Requirements + +To launch pipelines into a **Grid Engine** cluster from Tower, the following requirements must be satisfied: + +- The cluster should allow outbound connections to the Tower web service. +- The cluster queue used to run the Nextflow head job must be able to submit cluster jobs. +- The Nextflow runtime version **21.02.0-edge** (or later) should be installed on the cluster. + +### Compute Environment + +To create a new compute environment for **Grid Engine** in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Grid Engine". + +3. Select **Altair Grid Engine** as the target platform. + +4. Select your credentials, or select **+** and **SSH** or **Tower Agent** to add new credentials. + +5. Enter a name for the credentials. + +6. Enter the absolute path of the **Work directory** to be used on the cluster. + +7. Enter the absolute path of the **Launch directory** to be used on the cluster. If omitted, it will be the same as the work directory. + +8. Enter the **Login hostname**, which is usually the hostname or public IP address of the cluster's login node. + +9. Enter the **Head queue name**, the cluster queue to which the Nextflow job will be submitted. + +10. Enter the **Compute queue name**, the cluster queue to which the Nextflow job will submit tasks. + + :::tip + The compute queue can be overridden by the Nextflow pipeline configuration. See the Nextflow [docs](https://www.nextflow.io/docs/latest/process.html#queue) for more details. + ::: + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the creation of the compute environment. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Nextflow queue size** to limit the number of jobs that Nextflow can submit to the scheduler at the same time. + +- You can use the **Head job submit options** to specify Grid Engine options for the head job. You can optionally apply these options to compute jobs as well: + + ![](./_images/head_job_propagation.png) diff --git a/platform-enterprise/compute-envs/altair-pbs-pro.mdx b/platform-enterprise/compute-envs/altair-pbs-pro.mdx new file mode 100644 index 000000000..41c9992ab --- /dev/null +++ b/platform-enterprise/compute-envs/altair-pbs-pro.mdx @@ -0,0 +1,62 @@ +--- +title: "altair-pbs-pro" +description: "Step-by-step instructions to set up Altair PBS Pro for Nextflow Tower." +--- + +## Overview + +[Altair PBS Pro](https://www.altair.com/pbs-professional/) is a workload manager and job scheduler tool provided by [Altair Engineering, Inc](https://www.altair.com). + +Tower streamlines the deployment of Nextflow pipelines into both cloud-based and on-prem PBS Pro clusters. + +### Requirements + +To launch pipelines into a **PBS Pro** cluster from Tower, the following requirements must be satisfied: + +- The cluster should allow outbound connections to the Tower web service. +- The cluster queue used to run the Nextflow head job must be able to submit cluster jobs. +- The Nextflow runtime version **21.02.0-edge** (or later) should be installed on the cluster. + +### Compute Environment + +To create a new compute environment for **PBS Pro** in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "PBS Pro cluster". + +3. Select **Altair PBS Pro** as the target platform. + +4. Select your credentials, or select **+** and **SSH** or **Tower Agent** to add new credentials. + +5. Enter a name for the credentials. + +6. Enter the absolute path of the **Work directory** to be used on the cluster. + +7. Enter the absolute path of the **Launch directory** to be used on the cluster. If omitted, it will be the same as the work directory. + +8. Enter the **Login hostname**, which is usually the hostname or public IP address of the cluster's login node. + +9. Enter the **Head queue name**, the cluster queue to which the Nextflow job will be submitted. + +10. Enter the **Compute queue name**, the cluster queue to which the Nextflow job will submit tasks. + + :::tip + The compute queue can be overridden by the Nextflow pipeline configuration. See the Nextflow [docs](https://www.nextflow.io/docs/latest/process.html#queue) for more details. + ::: + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the creation of the compute environment. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Nextflow queue size** to limit the number of jobs that Nextflow can submit to the scheduler at the same time. + +- You can use the **Head job submit options** to specify PBS options for the head job. You can optionally apply these options to compute jobs as well: + + ![](./_images/head_job_propagation.png) diff --git a/platform-enterprise/compute-envs/aws-batch.mdx b/platform-enterprise/compute-envs/aws-batch.mdx new file mode 100644 index 000000000..01c03fec5 --- /dev/null +++ b/platform-enterprise/compute-envs/aws-batch.mdx @@ -0,0 +1,320 @@ +--- +title: "aws-batch" +description: "Step-by-step instructions to set up AWS Batch in Nextflow Tower." +--- + +## Overview + +:::note +This guide assumes you have an existing [Amazon Web Service (AWS)](https://aws.amazon.com/) account. +::: + +There are two ways to create a **Compute Environment** for **AWS Batch** with Tower: + +1. **Batch Forge**: This option automatically manages the AWS Batch resources in your AWS account. + +2. **Manual**: This option allows you to create a compute environment using existing AWS Batch resources. + +If you don't have an AWS Batch environment fully set up yet, follow the [Batch Forge](#tower-forge) guide. + +If you have been provided an AWS Batch queue from your account administrator, or if you have set up AWS Batch previously, please follow the [Manual](#manual) guide. + +### Batch Forge + +:::caution +Follow these instructions only if you have not pre-configured an AWS Batch environment. Note that this option will automatically create resources in your AWS account that you may be charged for by AWS. +::: + +Batch Forge automates the configuration of an [AWS Batch](https://aws.amazon.com/batch/) compute environment and the queues required for deploying Nextflow pipelines. + +### IAM + +To use the Batch Forge feature, Tower requires an Identity and Access Management (IAM) user with the permissions listed in [this policy file](https://github.com/seqeralabs/nf-tower-aws/blob/master/forge/forge-policy.json). These authorizations are more permissive than those required to only [launch](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/launch-policy.json) a pipeline, since Tower needs to manage AWS resources on your behalf. Note that launch permissions also require the S3 storage write permissions in [this policy file](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/s3-bucket-write.json). + +We recommend creating separate IAM policies for Batch Forge and Tower launch permissions using the policy files linked above. These policies can then be assigned to the Tower IAM user. + +#### Create Tower IAM policies + +1. Open the [AWS IAM console](https://console.aws.amazon.com/iam). + +2. From the left navigation menu, select **Policies** under **Access management**. + +3. Select **Create policy**. + +4. On the **Create policy** page, select the **JSON** tab. + +5. Copy the contents of your policy JSON file ([Forge](https://github.com/seqeralabs/nf-tower-aws/blob/master/forge/forge-policy.json) or [Launch](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/launch-policy.json), depending on the policy being created) and replace the default text in the policy editor area under the JSON tab. To create a Launch user, you must also create the [S3 bucket write policy](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/s3-bucket-write.json) separately to attach to your Launch user. + +6. Select **Next: Tags**. + +7. Select **Next: Review**. + +8. Enter a name and description for the policy on the Review policy page, then select **Create policy**. + +9. Repeat these steps for both the `forge-policy.json` and `launch-policy.json` files. For a Launch user, also create the `s3-bucket-write-policy.json` listed in step 5 above. + +#### Create an IAM user + +1. From the [AWS IAM console](https://console.aws.amazon.com/iam), select **Users** in the left navigation menu, then select **Add User** at the top rigt of the page. + +2. Enter a name for your user (e.g. `tower`) and select the **Programmatic access** type. + +3. Select **Next: Permissions**. + +4. Select **Next: Tags**, then **Next: Review** and **Create User**. + + :::caution + For the time being, you can ignore the warning. Permissions will be applied using the **IAM Policy**. + ::: + +5. Save the **Access key ID** and **Secret access key** in a secure location as we will use these in the next section. + +6. Once you have saved the keys, select **Close**. + +7. Back in the users table, select the newly created user,then select **Add permissions** under the Permissions tab. + +8. Select **Attach existing policies**, then search for the policies created in the previous section ([Create Tower IAM policies](./aws-batch.mdx#create-tower-iam-policies)) and check each one. + +9. Select **Next: Review**. + +10. Select **Add permissions**. + +### S3 Bucket + +S3 stands for "Simple Storage Service" and is a type of **object storage**. To access files and store the results for our pipelines, we have to create an **S3 Bucket** and grant our new Tower IAM user access to it. + +1. Navigate to [S3 service](https://console.aws.amazon.com/s3/home). + +2. Select **Create New Bucket**. + +3. Enter a unique name for your Bucket and select a region. + + :::caution + The region of the bucket should be in the _same region as the compute environment that we create in the next section_. Typically users select a region closest to their physical location but Batch Forge supports creating resources in any available AWS region. + ::: + +4. Select the default options for **Configure options**. + +5. Select the default options for **Set permissions**. + +6. Review and select **Create bucket**. + + :::caution + S3 is used by Nextflow for the storage of intermediate files. For production pipelines, this can amount to a large quantity of data. To reduce costs, when configuring a bucket, users should consider using a retention policy, such as automatically deleting intermediate files after 30 days. For more information on this process, see [here](https://aws.amazon.com/premiumsupport/knowledge-center/s3-empty-bucket-lifecycle-rule/). + ::: + +### Compute Environment + +Batch Forge automates the configuration of an [AWS Batch](https://aws.amazon.com/batch/) compute environment and queues required for the deployment of Nextflow pipelines. + +Once the AWS resources are set up, we can add a new **AWS Batch** environment in Tower. To create a new compute environment: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "AWS Batch Spot (eu-west-1)" + +3. Select **Amazon Batch** as the target platform. + +4. From the **Credentials** drop-down, select existing AWS credentials, or add new credentials by selecting the **+** button. If you select to use existing credentials, skip to step 7. + +5. Enter a name, e.g. "AWS Credentials". + +6. Add the **Access key** and **Secret key**. These are the keys you saved previously when you created the AWS [IAM user](#iam). + + :::tip + You can create multiple credentials in your Tower environment. + ::: + + :::note + From version 22.3, Tower supports the use of credentials for container registry services. These credentials can be created from the [Credentials](../credentials/overview.mdx) tab. + ::: + +7. Select a **Region**, for example "eu-west-1 - Europe (Ireland)". + +8. Enter the S3 bucket path created in the previous section to the **Pipeline work directory** field, e.g. `s3://unique-tower-bucket`. + + :::caution + The bucket should be in the same Region selected in the previous step. + ::: + +9. Select **Enable Wave containers** to facilitate access to private container repositories and provision containers in your pipelines using the Wave containers service. See [Wave containers](https://seqera.io/wave/) for more information. + +10. Select **Enable Fusion v2** to allow access to your S3-hosted data via the [Fusion v2](https://seqera.io/fusion/) virtual distributed file system. This speeds up most data operations. The Fusion v2 file system requires Wave containers to be enabled (see above). See [Fusion file system](../supported_software/fusion/fusion.mdx) for configuration details. + +11. Select **Enable fast instance storage** to allow the use of NVMe instance storage to speed up I/O and disk access operations. NVMe instance storage requires Fusion v2 to be enabled (see above). + + :::note + Fast instance storage requires an EC2 instance type that uses NVMe disks. Tower validates any instance types you specify (from **Advanced options > Instance types**) during compute environment creation. If you do not specify an instance type, a standard EC2 instance with NVMe disks will be used (`'c5ad', 'c5d', 'c6id', 'i3', 'i4i', 'm5ad', 'm5d', 'm6id', 'r5ad', 'r5d', 'r6id'` EC2 instance families) for fast storage. + ::: + +12. Set the **Config mode** to **Batch Forge**. + +13. Select a **Provisioning model**. In most cases this will be **Spot**. + + :::tip + You can choose to create a compute environment that launches either Spot or On-Demand instances. Spot instances can cost as little as 20% of On-Demand instances, and with Nextflow's ability to automatically relaunch failed tasks, Spot is almost always the recommended provisioning model. + + Note, however, that when choosing Spot instances, Tower will also create a dedicated queue for running the main Nextflow job using a single On-Demand instance in order to prevent any execution interruptions. + ::: + +14. Enter the **Max CPUs** e.g. `64`. This is the maximum number of combined CPUs (the sum of all instances CPUs) AWS Batch will provision at any time. + +15. Select **EBS Auto scale** to allow the EC2 virtual machines to dynamically expand the amount of available disk space during task execution. + + :::caution + When running large AWS Batch clusters (hundreds of compute nodes or more), EC2 API rate limits may cause the deletion of unattached EBS volumes to fail. Volumes that remain active after Nextflow jobs have completed will incur additional costs, and should be manually deleted. Monitor your AWS account for any orphaned EBS volumes via the EC2 console, or with a Lambda function. See [here](https://aws.amazon.com/blogs/mt/controlling-your-aws-costs-by-deleting-unused-amazon-ebs-volumes/) for more information. + ::: + +16. With the optional **Enable Fusion mounts** feature enabled, S3 buckets specified in **Pipeline work directory** and **Allowed S3 Buckets** will be mounted as file system volumes in the EC2 instances carrying out the Batch job execution. These buckets will be accessible at `/fusion/s3/`. For example, if the bucket name is `s3://imputation-gp2`, the Nextflow pipeline will access it using the file system path `/fusion/s3/imputation-gp2`. + + :::tip + You do not need to modify your pipeline or files to take advantage of this feature. Nextflow is able to recognise these buckets automatically and will replace any reference to files prefixed with `s3://` with the corresponding Fusion mount paths. + ::: + +17. Select **Enable GPUs** if you intend to run GPU-dependent workflows in the compute environment. See [GPU usage](./overview.mdx#aws-batch) for more information. + +18. Enter any additional **Allowed S3 buckets** that your workflows require to read input data or write output data. The **Pipeline work directory** bucket above is added by default to the list of **Allowed S3 buckets**. + +19. To use **EFS**, you can either select **Use existing EFS file system** and specify an existing EFS instance, or select **Create new EFS file system** to create one. If you intend to use the EFS file system as your work directory, you will need to specify `/work` in the **Pipeline work directory** field (step 8 of this guide). + + - To use an existing EFS file system, enter the **EFS file system id** and **EFS mount path**. This is the path where the EFS volume is accessible to the compute environment. For simplicity, we advise that you use `/mnt/efs` as the EFS mount path. + - To create a new EFS file system, enter the **EFS mount path**. We advise that you specify `/mnt/efs` as the EFS mount path. + +20. To use **FSx for Lustre**, you can either select **Use existing FSx file system** and specify an existing FSx instance, or select **Create new FSx file system** to create one. If you intend to use the FSx file system as your work directory, you will need to specify `/work` in the **Pipeline work directory** field (step 8 of this guide). + + - To use an existing FSx file system, enter the **FSx DNS name** and **FSx mount path**. The FSx mount path is the path where the FSx volume is accessible to the compute environment. For simplicity, we advise that you use `/mnt/fsx` as the FSx mount path. + - To create a new FSx file system, enter the **FSx size** (in GB) and the **FSx mount path**. We advise that you specify `/mnt/fsx` as the FSx mount path. + +21. Select **Dispose resources** if you want Tower to automatically delete these AWS resources if you delete the compute environment in Tower. + +22. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +23. Configure any advanced options described below, as needed. + +24. Select **Create** to finalize the compute environment setup. It will take a few seconds for all the resources to be created, and then you will be ready to launch pipelines. + +Jump to the documentation for [launching pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can specify the **Allocation strategy** and indicate the preferred **Instance types** to AWS Batch. + +- You can configure your custom networking setup using the **VPC ID**, **Subnets** and **Security groups** fields. + +- You can specify a custom **AMI ID**. + + :::caution + To use a custom AMI, make sure the AMI is based on an Amazon Linux-2 ECS optimized image that meets the Batch requirements. To learn more about approved versions of the Amazon ECS optimized AMI, see [this AWS guide](https://docs.aws.amazon.com/batch/latest/userguide/compute_resource_AMIs.html#batch-ami-spec) + ::: + + :::caution + If a custom AMI is specified and the **Enable GPU** option is also selected, the custom AMI will be used instead of the AWS-recommended GPU-optimized AMI. + ::: + +- If you need to debug the EC2 instance provisioned by AWS Batch, specify a **Key pair** to log in to the instance via SSH. + +- You can set **Min CPUs** to be greater than `0`, in which case some EC2 instances will remain active. An advantage of this is that pipeline executions will initialize faster. + + :::caution + Setting Min CPUs to a value greater than 0 will keep the required compute instances active, even when your pipelines are not running. This will result in additional AWS charges. + ::: + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the hardware resources allocated for the Head Job. + +- You can use **Head Job role** and **Compute Job role** to grant fine-grained IAM permissions to the Head Job and Compute Jobs + +- You can add an execution role ARN to the **Batch execution role** field to grant permissions to make API calls on your behalf to the ECS container used by Batch. This is required if the pipeline launched with this compute environment needs access to the secrets stored in this workspace. This field can be ignored if you are not using secrets. + +- Specify an EBS block size (in GB) in the **EBS auto-expandable block size** field to control the initial size of the EBS auto-expandable volume. New blocks of this size are added when the volume begins to run out of free space. + +- Enter the **Boot disk size** (in GB) to specify the size of the boot disk in the VMs created by this compute environment. + +- If you're using **Spot** instances, then you can also specify the **Cost percentage**, which is the maximum allowed price of a **Spot** instance as a percentage of the **On-Demand** price for that instance type. Spot instances will not be launched until the current spot price is below the specified cost percentage. + +- You can use **AWS CLI tool path** to specify the location of the `aws` CLI. + +- Specify a **CloudWatch Log group** for the `awslogs` driver to stream the logs entry to an existing Log group in Cloudwatch. + +- Specify a custom **ECS agent configuration** for the ECS agent parameters used by AWS Batch. This is appended to the /etc/ecs/ecs.config file in each cluster node. + + :::note + Altering this file may result in a malfunctioning Batch Forge compute environment. See [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) to learn more about the available parameters. + ::: + +### Manual + +This section is for users with a pre-configured AWS environment. You will need a Batch queue, a Batch compute environment, an IAM user and an S3 bucket already set up. + +To enable Tower within your existing AWS configuration, you need to have an IAM user with the following IAM permissions: + +- `AmazonS3ReadOnlyAccess` +- `AmazonEC2ContainerRegistryReadOnly` +- `CloudWatchLogsReadOnlyAccess` +- A [custom policy](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/launch-policy.json) to grant the ability to submit and control Batch jobs. +- Write access to any S3 bucket used by pipelines with the following [policy template](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/s3-bucket-write.json). See [below for details](#access-to-s3-buckets) + +With these permissions set, we can add a new **AWS Batch** compute environment in Tower. + +### Access to S3 Buckets + +Tower can use S3 to store intermediate and output data generated by pipelines. You need to create a policy for your Tower IAM user that grants access to specific buckets. + +1. Go to the IAM User table in the [IAM service](https://console.aws.amazon.com/iam/home) + +2. Select the IAM user. + +3. Select **Add inline policy**. + +4. Copy the contents of [this policy](https://github.com/seqeralabs/nf-tower-aws/blob/master/launch/s3-bucket-write.json) into the **JSON** tab. Replace `YOUR-BUCKET-NAME` (lines 10 and 21) with your bucket name. + +5. Name your policy and select **Create policy**. + +### Compute Environment + +To create a new compute environment for AWS Batch (without Forge): + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "AWS Batch Manual (eu-west-1)". + +3. Select **Amazon Batch** as the target platform. + +4. Add new credentials by selecting the **+** button. + +5. Enter a name for the credentials, e.g. "AWS Credentials". + +6. Enter the **Access key** and **Secret key** for your IAM user. + + :::tip + You can create multiple credentials in your Tower environment. See the [Credentials](../credentials/overview.mdx) section. + ::: + +7. Select a **Region**, e.g. "eu-west-1 - Europe (Ireland)" + +8. Enter an S3 bucket path for the **Pipeline work directory**, for example `s3://tower-bucket` + +9. Set the **Config mode** to **Manual**. + +10. Enter the **Head queue**, which is the name of the AWS Batch queue that the Nextflow driver job will run. + +11. Enter the **Compute queue**, which is the name of the AWS Batch queue that tasks will be submitted to. + +12. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +13. Configure any advanced options described below, as needed. + +14. Select **Create** to finalize the compute environment setup. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the hardware resources allocated for the Head Job. + +- You can use **Head Job role** and **Compute Job role** to grant fine-grained IAM permissions to the Head Job and Compute Jobs + +- You can add an execution role ARN to the **Batch execution role** field to grant permissions to make API calls on your behalf to the ECS container used by Batch. This is required if the pipeline launched with this compute environment needs access to the secrets stored in this workspace. This field can be ignored if you are not using secrets. + +- You can use **AWS CLI tool path** to specify the location of the `aws` CLI. + +- Specify a **CloudWatch Log group** for the `awslogs` driver to stream the logs entry to an existing Log group in Cloudwatch. diff --git a/platform-enterprise/compute-envs/azure-batch.mdx b/platform-enterprise/compute-envs/azure-batch.mdx new file mode 100644 index 000000000..8a137c520 --- /dev/null +++ b/platform-enterprise/compute-envs/azure-batch.mdx @@ -0,0 +1,211 @@ +--- +title: "azure-batch" +description: "Step-by-step instructions to set up Azure Batch in Nextflow Tower." +--- + +## Overview + +:::caution +The Tower support for Azure Batch is currently in beta. Any feedback and suggestions are welcome. + + In order to manage capacity during the global health pandemic, Microsoft has reduced core quotas for new Batch accounts. Depending on your region and subscription type, a newly-created account may not be entitled to any VMs without first making a service request to Azure. + + Please see Azure's [Batch service quotas and limits](https://docs.microsoft.com/en-us/azure/batch/batch-quota-limit#view-batch-quotas) page for further details. + +::: + +:::note +This guide assumes you have an existing [Azure Account](https://azure.microsoft.com/en-us). Sign up for a free Azure account [here](https://azure.microsoft.com/en-us/free/). +::: + +There are two ways to create a **Compute Environment** for **Azure Batch** with Tower. + +1. **Batch Forge**: This option automatically manages the Azure Batch resources in your Azure account. + +2. **Manual**: This option allows you to create a compute environment using existing Azure Batch resources. + +If you don't yet have an Azure Batch environment fully set up, follow the [Batch Forge](#tower-forge) guide to do so. + +If you have been provided an Azure Batch queue from your account administrator, or if you have set up Azure Batch previously, directly follow the [Manual](#manual) guide. + +### Batch Forge + +:::caution +Follow these instructions only if you have **not** pre-configured an Azure Batch environment. Note that this option will create resources in your Azure account that you may be charged for by Azure. +::: + +### Resource group + +To create the necessary Azure Batch and Azure Storage accounts, we must first create a **resource group** in the region of your choice. + +When you open [this link](https://portal.azure.com/#create/Microsoft.ResourceGroup), you'll notice the **Create new resource group** dialogue. + +1. Enter a name for the resource group (e.g. `towerrg`). + +2. Select the preferred region for this resource group. + +3. Select **Review and Create** to proceed to the review screen. + +4. Select **Create** to create the resources. + +### Storage account + +The next step is to create the necessary Azure Storage. + +When you open [this link](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Storage%2FStorageAccounts), you'll notice the **Create a storage account** dialogue. + +1. Enter a name for the storage account (e.g. `towerrgstorage`). + +2. Select the preferred region for this resource group. + +3. Select **Review and Create** to proceed to the review screen. + +4. Select **Create** to create the Azure Storage account. + +5. Navigate to your new storage account and select **Container**. + +6. Create a new Blob container by selecting **+ Container**. + + A new container dialogue will open. Enter a suitable name (e.g. `towerrgstorage-container`). + +7. Once the new Blob container is created, navigate to the **Access Keys** section of the storage account (`towerrgstorage` in this example). + +8. Store the access keys for the newly created Azure Storage account. + + :::note + Blob container storage credentials are associated with the Batch pool configuration when it is created. Once your compute environment has been created with Batch Forge, these credentials should not be changed in Tower. + ::: + +### Batch account + +The next step is to create the necessary Batch account. + +When you open [this link](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Batch%2FbatchAccounts), you'll notice the **Create a batch account** dialogue. + +1. Enter a name for the storage account (e.g. `towerrgbatch`). + +2. Select the preferred region for this resource group. + +3. Select **Review and Create** to proceed to the review screen. + +4. Select **Create** to create the Azure Batch account. + +### Compute Environment + +Batch Forge automates the configuration of an [Azure Batch](https://azure.microsoft.com/en-us/services/batch/) compute environment and queues required for the deployment of Nextflow pipelines. + +Once the Azure resources are set up, we can add a new **Azure Batch** environment in Tower. To create a new compute environment: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Azure Batch (east-us)" + +3. Select **Azure Batch** as the target platform. + + ![](./_images/azure_new_env_name.png) + +4. From the **Credentials** drop-down, select existing Azure credentials, or add new credentials by selecting the **+** button. If you select to use existing credentials, skip to step 7. + +5. Enter a name, e.g. "Azure Credentials". + +6. Add the **Batch account** and **Blob Storage** credentials that we created previously. + + ![](./_images/azure_keys.png) + + :::tip + You can create multiple credentials in your Tower environment. + ::: + + :::note + From version 22.3, Tower supports the use of credentials for container registry services. These credentials can be created from the [Credentials](../credentials/overview.mdx) tab. + ::: + +7. Select a **Region**, for example "eastus (East US)". + +8. Enter the **Pipeline work directory** as the Azure blob container we created in the previous section, e.g. `az://towerrgstorage-container/work`. + + :::caution + The blob container should be in the same **Region** from the previous step. + ::: + +9. Set the **Config mode** to **Batch Forge**. + + ![](./_images/azure_tower_forge.png) + +10. Enter the default VM type depending on your quota limits. The default is `Standard_D4_v3`. + +11. Enter the **VMs count**, which is the number of VMs you'd like to deploy. + +12. Enable **Autoscale** if you'd like to automatically scale up and down based on the number of tasks. The number of VMs will vary from **0** to **VMs count**. + +13. Enable **Dispose resources** if you'd like Tower to automatically delete the Batch pool once the workflow is complete. + +14. Configure any advanced options described below, as needed. + +15. Select **Create** to finalize the compute environment setup. It will take a few seconds for all the resources to be created, and then you will be ready to launch pipelines. + + ![](./_images/azure_newly_created_env.png) + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Jobs cleanup policy** to control how jobs should be deleted on workflow completion. + +- You can use the **Token duration** to control the duration of the SAS token generated by Nextflow. + +### Manual + +This section is for users with a pre-configured Azure environment. You will need an Azure Batch account and Storage account already set up. + +To create a new compute environment for AWS Batch (without Forge): + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Azure Batch (east-us)" + +3. Select **Azure Batch** as the target platform. + + ![](./_images/azure_new_env_name.png) + +4. Select your Azure credentials or add new credentials by selecting the **+** button. + +5. Enter a name, e.g. "Azure Credentials". + +6. Add the **Batch account** and **Blob Storage** credentials that we created previously. + + ![](./_images/azure_keys.png) + + :::tip + You can create multiple credentials in your Tower environment. + ::: + +7. Select a **Region**, for example "eastus (East US)". + +8. Enter the **Pipeline work directory** as the Azure blob container we created in the previous section, e.g. `az://towerrgstorage-container/work`. + + :::caution + The blob container should be in the same **Region** you specified in step 7 above. + ::: + +9. Set the **Config mode** to **Manual**. + +10. Enter the **Compute Pool name**, the name of the Azure Batch pool provided to you by your Azure administrator. + + ![](./_images/azure_tower_manual.png) + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the compute environment setup. It will take a few seconds for all the resources to be created, and then you will be ready to launch pipelines. + + ![](./_images/azure_newly_created_env.png) + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Jobs cleanup policy** to control how jobs should be deleted on workflow completion. + +- You can use the **Token duration** to control the duration of the SAS token generated by Nextflow. diff --git a/platform-enterprise/compute-envs/eks.mdx b/platform-enterprise/compute-envs/eks.mdx new file mode 100644 index 000000000..fadb9f3e3 --- /dev/null +++ b/platform-enterprise/compute-envs/eks.mdx @@ -0,0 +1,111 @@ +--- +title: "eks" +description: "Step-by-step instructions to set up a Tower compute environment for Amazon EKS clusters" +--- + +## Overview + +[Amazon EKS](https://aws.amazon.com/eks/) is a managed Kubernetes cluster that allows the execution of containerized workloads in the AWS cloud at scale. + +Tower offers native support for AWS EKS clusters and streamlines the deployment of Nextflow pipelines in such environments. + +## Requirements + +You need to have an EKS cluster up and running. Make sure you have followed the [cluster preparation](../compute-envs/k8s.mdx#cluster-preparation) instructions to create the cluster resources required by Tower. In addition to the generic Kubernetes instructions, you will need to make a few modifications specific to EKS. + +**Assign service account role to IAM user.** You will need to assign the service role with an AWS user that will be used by Tower to access the EKS cluster. + +First, use the following command to modify the EKS auth configuration: + +```bash +kubectl edit configmap -n kube-system aws-auth +``` + +Once the editor is open, add the following entry: + +```yaml +mapUsers: | + - userarn: + username: tower-launcher-user + groups: + - tower-launcher-role +``` + +Your user ARN can be retrieved from the [AWS IAM console](https://console.aws.amazon.com/iam) or from the AWS CLI: + +```bash +aws sts get-caller-identity +``` + +:::note +The same user needs to be used when specifying the AWS credentials in the configuration of the Tower compute environment for EKS. +::: + +The AWS user should have the following IAM policy: + +
+ Click to view eks-iam-policy.json + ```yaml + "docs/_templates/eks/eks-iam-policy.json" + ``` +
+ +For more details, refer to the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html). + +### Compute Environment + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Amazon EKS (eu-west-1)". + +3. Select **Amazon EKS** as the target platform. + +4. From the **Credentials** drop-down, select existing AWS credentials, or add new credentials by selecting the **+** button. If you select to use existing credentials, skip to step 7. + + :::note + Make sure the user has the IAM permissions required to describe and list EKS clusters as explained [here](#requirements). + ::: + + :::note + From version 22.3, Tower supports the use of credentials for container registry services. These credentials can be created from the [Credentials](../credentials/overview.mdx) tab. + ::: + +5. Select a **Region**, for example "eu-west-1 - Europe (Ireland)". + +6. Select a **Cluster name** from the list of available EKS clusters in the selected region. + +7. Specify the **Namespace** created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-nf` by default. + +8. Specify the **Head service account** created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-launcher-sa` by default. + +9. Specify the **Storage claim** created in the [cluster preparation](#cluster-preparation) instructions, which serves as a scratch filesystem for Nextflow pipelines. In each of the provided examples, the storage claim is called `tower-scratch`. + +10. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +11. Configure any advanced options described below, as needed. + +12. Select **Create** to finalize the compute environment setup. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- The **Storage mount path** is the file system path where the Storage claim is mounted (default: `/scratch`). + +- The **Work directory** is the file system path used as a working directory by Nextflow pipelines. It must be the storage mount path (default) or a subdirectory of it. + +- The **Compute service account** is the service account used by Nextflow to submit tasks (default: the `default` account in the given namespace). + +- The **Pod cleanup policy** determines when terminated pods should be deleted. + +- You can use **Custom head pod specs** to provide custom options for the Nextflow workflow pod (`nodeSelector`, `affinity`, etc). For example: + + ```yaml + spec: + nodeSelector: + disktype: ssd + ``` + +- You can use **Custom service pod specs** to provide custom options for the compute environment pod. See above for an example. + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the hardware resources allocated for the Nextflow workflow pod. diff --git a/platform-enterprise/compute-envs/gke.mdx b/platform-enterprise/compute-envs/gke.mdx new file mode 100644 index 000000000..49bb84e1d --- /dev/null +++ b/platform-enterprise/compute-envs/gke.mdx @@ -0,0 +1,110 @@ +--- +title: "gke" +description: "Step-by-step instructions to set up a Tower compute environment for Google Kubernetes Engine cluster" +--- + +## Overview + +[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) is a managed Kubernetes cluster that allows the execution of containerized workloads in Google Cloud at scale. + +Tower offers native support for GKE clusters and streamlines the deployment of Nextflow pipelines in such environments. + +### Requirements + +Refer to the [Google Cloud](./google-cloud-batch.mdx#configure-google-cloud) section for instructions on how to set up your Google Cloud account and any other services (e.g. Cloud Storage) that you intend to use. + +You need to have a GKE cluster up and running. Make sure you have followed the [cluster preparation](../compute-envs/k8s.mdx#cluster-preparation) instructions to create the cluster resources required by Tower. In addition to the generic Kubernetes instructions, you will need to make a few modifications specific to GKE. + +**Assign service account role to IAM user.** You will need to grant the cluster access to the service account used to authenticate the Tower compute environment. This can be done by updating the _role binding_ as shown below: + +```yaml +cat << EOF | kubectl apply -f - +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: tower-launcher-userbind +subjects: + - kind: User + name: + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: tower-launcher-role + apiGroup: rbac.authorization.k8s.io +--- +EOF +``` + +In the above snippet, replace `` with the corresponding service account, e.g. `test-account@test-project-123456.google.com.iam.gserviceaccount.com`. + +For more details, refer to the [Google documentation](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control). + +### Compute Environment + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Google Kubernetes Engine (europe-west1)". + +3. From the **Provider** drop-down, select **Google Kubernetes Engine**. + +4. From the **Credentials** drop-down, select existing GKE credentials, or add new credentials by selecting the **+** button. If you select to use existing credentials, skip to step 7. + +5. Enter a name for the credentials, e.g. "GKE Credentials". + +6. Enter the **Service account key** for your Google Service account. + + :::tip + You can create multiple credentials in your Tower environment. + ::: + + :::note + From version 22.3, Tower supports the use of credentials for container registry services. These credentials can be created from the [Credentials](../credentials/overview.mdx) tab. + ::: + +7. Select the **Location** of your GKE cluster. + + :::caution + GKE clusters can be either _regional_ or _zonal_. For example, `us-west1` identifies the United States West-Coast region, which has three zones: `us-west1-a`, `us-west1-b`, and `us-west1-c`. + + Tower self-completion only shows regions. You should manually edit this field if you are using a zonal GKE cluster. + ![](./_images/gke_regions.png) + ::: + +8. Select or enter the **Cluster name** of your GKE cluster. + +9. Specify the **Namespace** created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-nf` by default. + +10. Specify the **Head service account** created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-launcher-sa` by default. + +11. Specify the **Storage claim** created in the [cluster preparation](#cluster-preparation) instructions, which serves as a scratch filesystem for Nextflow pipelines. In each of the provided examples, the storage claim is called `tower-scratch`. + +12. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +13. Configure any advanced options described below, as needed. + +14. Select **Create** to finalize the compute environment setup. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- The **Storage mount path** is the file system path where the Storage claim is mounted (default: `/scratch`). + +- The **Work directory** is the file system path used as a working directory by Nextflow pipelines. It must be the storage mount path (default) or a subdirectory of it. + +- The **Compute service account** is the service account used by Nextflow to submit tasks (default: the `default` account in the given namespace). + +- The **Pod cleanup policy** determines when terminated pods should be deleted. + +- You can use **Custom head pod specs** to provide custom options for the Nextflow workflow pod (`nodeSelector`, `affinity`, etc). For example: + + ```yaml + spec: + nodeSelector: + disktype: ssd + ``` + +- You can use **Custom service pod specs** to provide custom options for the compute environment pod. See above for an example. + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the hardware resources allocated for the Nextflow workflow pod. diff --git a/platform-enterprise/compute-envs/google-cloud-batch.mdx b/platform-enterprise/compute-envs/google-cloud-batch.mdx new file mode 100644 index 000000000..f5a43c229 --- /dev/null +++ b/platform-enterprise/compute-envs/google-cloud-batch.mdx @@ -0,0 +1,149 @@ +--- +title: "google-cloud-batch" +description: "Step-by-step instructions to setup Google Cloud Batch for Nextflow Tower." +--- + +## Overview + +:::caution +Tower's Google Cloud Batch support is in Beta — more features will be added as Nextflow GCB support is enhanced over time. +::: + +This guide assumes you have an existing [Google Cloud Account](https://console.cloud.google.com). Sign-up for a free account [here](https://cloud.google.com/). + +Tower provides integration to Google Cloud via the [Batch API](https://cloud.google.com/batch/docs/reference/rest). + +The guide is split into two parts: + +1. How to configure your Google Cloud account to use the Batch API. + +2. How to create a Google Cloud Batch compute environment in Tower. + +### Configure Google Cloud + +#### Create a project + +Navigate to the [Google Project Selector page](https://console.cloud.google.com/projectselector2) and either select an existing project or select **Create project**. + +Enter a name for your new project, e.g "tower-nf". + +If you are part of an organization, the location will default to your organization. + +#### Enable billing + +In the navigation menu (**≡**), select **Billing**. You can follow [these instructions](https://cloud.google.com/billing/docs/how-to/modify-project) to enable billing. + +#### Enable APIs + +Use [this link](https://console.cloud.google.com/flows/enableapi?apiid=batch.googleapis.com%2Ccompute.googleapis.com%2Cstorage-api.googleapis.com) to enable the following APIs for your project: + +- Batch API +- Compute Engine API +- Cloud Storage API + +Select your project from the dropdown menu and select **Enable**. + +Alternatively, you can enable each API manually by selecting your project in the nav bar and visiting each API page: + +- [Batch API](https://console.cloud.google.com/marketplace/product/google/batch.googleapis.com) + +- [Compute Engine API](https://console.cloud.google.com/marketplace/product/google/compute.googleapis.com) + +- [Cloud Storage API](https://console.cloud.google.com/marketplace/product/google/storage-api.googleapis.com) + +#### Create a service account key + +1. In the navigation menu, select **IAM & Admin** and then **Service Accounts**. + +2. Select the email address of the **Compute Engine default service account**. + +3. Select **Keys**, then **Add key**, then **Create new key**. + +4. Select **JSON** as the key type. + +5. Select **Create**. + +A JSON file will be downloaded to your computer. This file contains the credential that will be used by Tower. You will need it to configure the compute environment in Tower. + +You can manage your key from the **Service Accounts** page. + +#### Create a Cloud Storage bucket + +1. In the navigation menu (**≡**), select **Cloud Storage** and then **Create bucket**. + +2. Enter a name for your bucket. You will reference this name when creating the compute environment in Tower. + + :::caution + Do not use underscores (`_`) in your bucket name. Use hyphens (`-`) instead. + ::: + +3. Select **Region** for the **Location type** and select the **Location** for your bucket. You will reference this location when creating the compute environment in Tower. + +4. Select **Standard** for the default storage class. + +5. Select **Uniform** for the **Access control**. + + :::note + The Batch API is available in a limited number of [locations](https://cloud.google.com/batch/docs/locations). However, these locations are only used to store metadata about the pipeline operations. The storage bucket and compute resources can be in any region. + ::: + +6. Select **Create**. + +7. Once the bucket is created, you will be redirected to the **Bucket details** page. + +8. Select **Permissions**, then **+ Add**. + +9. Copy the email address of the Compute Engine default service account into **New principals**. + +10. Select the following roles: + +- Storage Admin +- Storage Legacy Bucket Owner +- Storage Legacy Object Owner +- Storage Object Creator + +:::tip +You have created a project, enabled the necessary Google APIs, created a bucket, and created a JSON file with the required credentials. You are now ready to set up a new compute environment in Tower. +::: + +### Configure Tower + +:::caution +The following guide to configure Tower assumes you have (1) a service account key for a Google Cloud account and (2) the name and location of a Cloud Storage bucket. +::: + +To create a new compute environment for Google Cloud in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Google Cloud Batch (europe-north1)". + +3. Select **Google Cloud Batch** as the target platform. + +4. Add new credentials by selecting the **+** button. + +5. Enter a name for the credentials, e.g. "Google Cloud Credentials". + +6. Enter the **Service account key** for your Google Cloud account. This key was created in the [previous section](#create-a-service-account-key). + +7. Select the [**Location**](https://cloud.google.com/compute/docs/regions-zones#available) where you'd like to execute pipelines. + +8. Enter your bucket URL for the **Pipeline work directory**. The URL is the name of your bucket with the `gs://` prefix, e.g. `gs://my-bucket`. This bucket should be accessible in the region selected in the previous step. + +9. You can enable **Spot** to use Spot instances, which have significantly reduced cost compared to On-Demand instances. + +10. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +11. Configure any advanced options described below, as needed. + +12. Select **Create** to finalize the compute environment setup. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can enable **Use Private Address** to ensure that your Google Cloud VMs aren't accessible to the public internet. + +- You can use **Boot disk size** to control the boot disk size of VMs. + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the CPUs and memory allocated for head jobs. diff --git a/platform-enterprise/compute-envs/google-cloud-lifesciences.mdx b/platform-enterprise/compute-envs/google-cloud-lifesciences.mdx new file mode 100644 index 000000000..0ed3be62e --- /dev/null +++ b/platform-enterprise/compute-envs/google-cloud-lifesciences.mdx @@ -0,0 +1,151 @@ +--- +title: "google-cloud-lifesciences" +description: "Step-by-step instructions to setup Google Life Sciences for Nextflow Tower." +--- + +## Overview + +This guide assumes you have an existing [Google Cloud Account](https://console.cloud.google.com). Sign-up for a free account [here](https://cloud.google.com/). + +Tower provides integration to Google Cloud via the [Cloud Life Sciences API](https://cloud.google.com/life-sciences/docs/reference/rest). + +The guide is split into two parts: + +1. How to configure your Google Cloud account to use the Cloud Life Sciences API. + +2. How to create a Google Life Sciences compute environment in Tower. + +### Configure Google Cloud + +#### Create a project + +Navigate to the [Google Project Selector page](https://console.cloud.google.com/projectselector2) and either select an existing project or select **Create project**. + +Enter a name for your new project, e.g "tower-nf". + +If you are part of an organization, the location will default to your organization. + +#### Enable billing + +In the navigation menu (**≡**), select **Billing**. You can follow [these instructions](https://cloud.google.com/billing/docs/how-to/modify-project) to enable billing. + +#### Enable APIs + +Use [this link](https://console.cloud.google.com/flows/enableapi?apiid=lifesciences.googleapis.com%2Ccompute.googleapis.com%2Cstorage-api.googleapis.com) to enable the following APIs for your project: + +- Cloud Life Sciences API +- Compute Engine API +- Cloud Storage API + +Select your project from the dropdown menu and select **Enable**. + +Alternatively, you can enable each API manually by selecting your project in the nav bar and visiting each API page: + +- [Cloud Life Sciences API](https://console.cloud.google.com/marketplace/product/google/lifesciences.googleapis.com) + +- [Compute Engine API](https://console.cloud.google.com/marketplace/product/google/compute.googleapis.com) + +- [Cloud Storage API](https://console.cloud.google.com/marketplace/product/google/storage-api.googleapis.com) + +#### Create a service account key + +1. In the navigation menu, select **IAM & Admin** and then **Service Accounts**. + +2. Select the email address of the **Compute Engine default service account**. + +3. Select **Keys**, then **Add key**, then **Create new key**. + +4. Select **JSON** as the key type. + +5. Select **Create**. + +A JSON file will be downloaded to your computer. This file contains the credential that will be used by Tower. You will need it to configure the compute environment in Tower. + +You can manage your key from the **Service Accounts** page. + +#### Create a Cloud Storage bucket + +1. In the navigation menu (**≡**), select **Cloud Storage** and then **Create bucket**. + +2. Enter a name for your bucket. You will reference this name when creating the compute environment in Tower. + + :::caution + Do not use underscores (`_`) in your bucket name. Use hyphens (`-`) instead. + ::: + +3. Select **Region** for the **Location type** and select the **Location** for your bucket. You will reference this location when creating the compute environment in Tower. + +4. Select **Standard** for the default storage class. + +5. Select **Uniform** for the **Access control**. + + :::note + The Cloud Life Sciences API is available in a limited number of [locations](https://cloud.google.com/life-sciences/docs/concepts/locations). However, these locations are only used to store metadata about the pipeline operations. The storage bucket and compute resources can be in any region. + ::: + +6. Select **Create**. + +7. Once the bucket is created, you will be redirected to the **Bucket details** page. + +8. Select **Permissions**, then **+ Add**. + +9. Copy the email address of the Compute Engine default service account into **New principals**. + +10. Select the following roles: + +- Storage Admin +- Storage Legacy Bucket Owner +- Storage Legacy Object Owner +- Storage Object Creator + +### Compute Environment + +:::caution +The following guide to configure Tower assumes you have (1) a service account key for a Google Cloud account and (2) the name and location of a Cloud Storage bucket. +::: + +To create a new compute environment for Google Cloud in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Google Life Sciences (europe-west2)". + +3. Select **Google Life Sciences** as the target platform. + +4. From the **Credentials** drop-down, select existing Google Cloud credentials, or add new credentials by selecting the **+** button. If you select to use existing credentials, skip to step 7. + +5. Enter a name for the credentials, e.g. "Google Cloud Credentials". + +6. Enter the **Service account key** for your Google Cloud account. This key was created in the [previous section](#create-a-service-account-key). + + :::tip + You can create multiple credentials in your Tower workspace. + ::: + + :::note + From version 22.3, Tower supports the use of credentials for container registry services. These credentials can be created from the [Credentials](../credentials/overview.mdx) tab. + ::: + +7. Select the [**Region** and **Zones**](https://cloud.google.com/compute/docs/regions-zones#available) where you'd like to execute pipelines. You can leave the **Location** empty and the Cloud Life Sciences API will use the closest available location. + +8. Enter your bucket URL for the **Pipeline work directory**. The URL is the name of your bucket with the `gs://` prefix, e.g. `gs://my-bucket`. This bucket should be accessible in the region selected in the previous step. + +9. You can enable **Preemptible** to use preemptible instances, which have significantly reduced cost compared to on-demand instances. + +10. You can use a **Filestore file system** to automatically mount a Google Filestore volume in your pipelines. + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the compute environment setup. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can enable **Use Private Address** to ensure that your Google Cloud VMs aren't accessible to the public internet. + +- You can use **Boot disk size** to control the boot disk size of VMs. + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the CPUs and memory allocated for head jobs. diff --git a/platform-enterprise/compute-envs/k8s.mdx b/platform-enterprise/compute-envs/k8s.mdx new file mode 100644 index 000000000..eab1fc111 --- /dev/null +++ b/platform-enterprise/compute-envs/k8s.mdx @@ -0,0 +1,111 @@ +--- +title: "k8s" +description: "Step-by-step instructions to set up a Nextflow Tower compute environment for a Kubernetes cluster" +--- + +## Overview + +[Kubernetes](https://kubernetes.io/) is the leading technology for deployment and orchestration of containerized workloads in cloud-native environments. + +Tower streamlines the deployment of Nextflow pipelines into Kubernetes both for cloud-based and on-prem clusters. + +The following instructions are for a **generic Kubernetes** distribution. If you are using [Amazon EKS](eks.mdx) or [Google Kubernetes Engine](gke.mdx), see the corresponding documentation pages. + +### Cluster Preparation + +This section describes the steps required to prepare your Kubernetes cluster for the deployment of Nextflow pipelines using Tower. It is assumed the cluster itself has already been created and you have administrative privileges. + +1. Verify the connection to your Kubernetes cluster: + + ```bash + kubectl cluster-info + ``` + +2. Create the Tower launcher: + + ```bash + kubectl apply -f https://help.tower.nf/22.1/_templates/k8s/tower-launcher.yml + ``` + + This command creates a service account called `tower-launcher-sa`, and associated role bindings. Everything is contained in a namespace called `tower-nf`. The service account is used by Tower to launch Nextflow pipelines. Use this service account name when setting up the compute environment for this Kubernetes cluster in Tower. + +3. Create persistent storage. Tower requires a `ReadWriteMany` persistent volume claim (PVC) that is mounted by all nodes where workflow pods will be dispatched. + + You can use any storage solution that supports the `ReadWriteMany` [access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes). The setup of this storage is beyond the scope of these instructions, because the right solution for you will depend on what is available for your infrastructure or cloud vendor (NFS, GlusterFS, CephFS, Amazon FSx, etc). Ask your cluster administrator for more information. + + - Example PVC backed by local storage: [tower-scratch-local.yml](../_templates/k8s/tower-scratch-local.yml) + + - Example PVC backed by NFS server: [tower-scratch-nfs.yml](../_templates/k8s/tower-scratch-nfs.yml) + +### Compute Environment + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "K8s cluster". + +3. Select **Kubernetes** as the target platform. + +4. Select your Kubernetes credentials or add new credentials by selecting the **+** button. + +5. Enter a name, e.g. "K8s Credentials". + +6. Enter the **Service account token**. + + The token can be obtained with the following command: + + ```bash + SECRET=$(kubectl get secrets | grep | cut -f1 -d ' ') + kubectl describe secret $SECRET | grep -E '^token' | cut -f2 -d':' | tr -d '\t' + ``` + + Replace `` with the name of the service account created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-launcher-sa` by default. + +7. Enter the **Master server** URL. + + The master server URL can be obtained with the following command: + + ```bash + kubectl cluster-info + ``` + + It can also be found in your `~/.kube/config` file under the `server` field corresponding to your cluster. + +8. Specify the **SSL Certificate** to authenticate your connection. + + The certificate data can be found in your `~/.kube/config` file. It is the `certificate-authority-data` field corresponding to your cluster. + +9. Specify the **Namespace** created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-nf` by default. + +10. Specify the **Head service account** created in the [cluster preparation](#cluster-preparation) instructions, which is `tower-launcher-sa` by default. + +11. Specify the **Storage claim** created in the [cluster preparation](#cluster-preparation) instructions, which serves as a scratch filesystem for Nextflow pipelines. In each of the provided examples, the storage claim is called `tower-scratch`. + +12. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +13. Configure any advanced options described below, as needed. + +14. Select **Create** to finalize the compute environment setup. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- The **Storage mount path** is the file system path where the Storage claim is mounted (default: `/scratch`). + +- The **Work directory** is the file system path used as a working directory by Nextflow pipelines. It must be the storage mount path (default) or a subdirectory of it. + +- The **Compute service account** is the service account used by Nextflow to submit tasks (default: the `default` account in the given namespace). + +- The **Pod cleanup policy** determines when terminated pods should be deleted. + +- You can use **Custom head pod specs** to provide custom options for the Nextflow workflow pod (`nodeSelector`, `affinity`, etc). For example: + + ```yaml + spec: + nodeSelector: + disktype: ssd + ``` + +- You can use **Custom service pod specs** to provide custom options for the compute environment pod. See above for an example. + +- You can use **Head Job CPUs** and **Head Job Memory** to specify the hardware resources allocated for the Nextflow workflow pod. diff --git a/platform-enterprise/compute-envs/lsf.mdx b/platform-enterprise/compute-envs/lsf.mdx new file mode 100644 index 000000000..b76601792 --- /dev/null +++ b/platform-enterprise/compute-envs/lsf.mdx @@ -0,0 +1,64 @@ +--- +title: "lsf" +description: "Step-by-step instructions to set up IBM LSF for Nextflow Tower." +--- + +## Overview + +[IBM Spectrum LSF](https://www.ibm.com/products/hpc-workload-management/details) is an IBM workload management solution for HPC. LSF aims to enhance user and administrator experience, reliability and performance at scale. + +Tower streamlines the deployment of Nextflow pipelines into both cloud-based and on-prem LSF clusters. + +### Requirements + +To launch pipelines into an **LSF** cluster from Tower, the following requirements must be satisfied: + +- The cluster should allow outbound connections to the Tower web service. +- The cluster queue used to run the Nextflow head job must be able to submit cluster jobs. +- The Nextflow runtime version **21.02.0-edge** (or later) should be installed on the cluster. + +### Compute Environment + +To create a new compute environment for **LSF** in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "LSF". + +3. Select **IBM LSF** as the target platform. + +4. Select your credentials, or select **+** and **SSH** or **Tower Agent** to add new credentials. + +5. Enter a name for the credentials. + +6. Enter the absolute path of the **Work directory** to be used on the cluster. + +7. Enter the absolute path of the **Launch directory** to be used on the cluster. If omitted, it will be the same as the work directory. + +8. Enter the **Login hostname**, which is usually the hostname or public IP address of the cluster's login node. + +9. Enter the **Head queue name**, the cluster queue to which the Nextflow job will be submitted. + +10. Enter the **Compute queue name**, the cluster queue to which the Nextflow job will submit tasks. + + :::tip + The compute queue can be overridden by the Nextflow pipeline configuration. See the Nextflow [docs](https://www.nextflow.io/docs/latest/process.html#queue) for more details. + ::: + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the creation of the compute environment. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Nextflow queue size** to limit the number of jobs that Nextflow can submit to the scheduler at the same time. + +- You can use the **Head job submit options** to specify LSF options for the head job. You can optionally apply these options to compute jobs as well: + + ![](./_images/head_job_propagation.png) + +- You can use **Unit for memory limits**, **Per job memory limits**, and **Per task reserve** to control how memory is requested for Nextflow jobs. diff --git a/platform-enterprise/compute-envs/moab.mdx b/platform-enterprise/compute-envs/moab.mdx new file mode 100644 index 000000000..df99d03ee --- /dev/null +++ b/platform-enterprise/compute-envs/moab.mdx @@ -0,0 +1,62 @@ +--- +title: "moab" +description: "Step-by-step instructions to set up Moab for Nextflow Tower." +--- + +## Overview + +[Moab](http://docs.adaptivecomputing.com/suite/8-0/basic/help.htm#topics/moabWorkloadManager/topics/intro/productOverview.htm) is a scheduling and management system designed for clusters, grids, and on-demand/utility computing systems. At a high level, Moab applies site policies and extensive optimizations to orchestrate jobs, services, and other workload across the ideal combination of network, compute, and storage resources. + +Tower streamlines the deployment of Nextflow pipelines into both cloud-based and on-prem Moab clusters. + +### Requirements + +To launch pipelines into a **Moab** cluster from Tower, the following requirements must be satisfied: + +- The cluster should allow outbound connections to the Tower web service. +- The cluster queue used to run the Nextflow head job must be able to submit cluster jobs. +- The Nextflow runtime version **21.02.0-edge** (or later) should be installed on the cluster. + +### Compute Environment + +To create a new compute environment for **Moab** in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Moab cluster". + +3. Select **Moab Workload Manager** as the target platform. + +4. Select your credentials, or select **+** and **SSH** or **Tower Agent** to add new credentials. + +5. Enter a name for the credentials. + +6. Enter the absolute path of the **Work directory** to be used on the cluster. + +7. Enter the absolute path of the **Launch directory** to be used on the cluster. If omitted, it will be the same as the work directory. + +8. If using SSH credentials, enter the **Login hostname**, which is usually the hostname or public IP address of the cluster's login node. + +9. Enter the **Head queue name**, the cluster queue to which the Nextflow job will be submitted. + +10. Enter the **Compute queue name**, the cluster queue to which the Nextflow job will submit tasks. + + :::tip + The compute queue can be overridden by the Nextflow pipeline configuration. See the Nextflow [docs](https://www.nextflow.io/docs/latest/process.html#queue) for more details. + ::: + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the creation of the compute environment. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Nextflow queue size** to limit the number of jobs that Nextflow can submit to the scheduler at the same time. + +- You can use the **Head job submit options** to specify Moab options for the head job. You can optionally apply these options to compute jobs as well: + + ![](./_images/head_job_propagation.png) diff --git a/platform-enterprise/compute-envs/overview.mdx b/platform-enterprise/compute-envs/overview.mdx new file mode 100644 index 000000000..3d78ddaf7 --- /dev/null +++ b/platform-enterprise/compute-envs/overview.mdx @@ -0,0 +1,57 @@ +--- +title: "overview" +description: "Overview of compute environments in Nextflow Tower." +--- + +## Overview + +Tower uses the concept of **compute environments** to define the execution platform where a pipeline will run. Compute environments enable Tower users to launch pipelines on a growing number of **cloud** and **on-premise** infrastructures. + +Each compute environment must be configured to enable Tower to submit tasks. See the individual compute environment pages below for platform-specific configuration steps. + +### Platforms + +- [AWS Batch](./aws-batch.mdx) +- [Azure Batch](./azure-batch.mdx) +- [Google Cloud Batch](./google-cloud-batch.mdx) +- [Google Life Sciences](./google-cloud-lifesciences.mdx) +- [Altair Grid Engine](./altair-grid-engine.mdx) +- [Altair PBS Pro](./altair-pbs-pro.mdx) +- [IBM LSF](./lsf.mdx) +- [Moab](./moab.mdx) +- [Slurm](./slurm.mdx) +- [Kubernetes](./k8s.mdx) +- [Amazon EKS](./eks.mdx) +- [Google Kubernetes Engine](./gke.mdx) + +### Select a default compute environment + +If you have more than one compute environment, you can select which one will be used by default when launching a pipeline. + +1. In a workspace, select **Compute Environments**. + +2. Select **Make primary** for a particular compute environment to make it your default. + +### GPU usage + +The process for provisioning GPU instances in your compute environment differs for each cloud provider. + +### AWS Batch + +The AWS Batch compute environment creation form in Tower includes an **Enable GPUs** option. This option makes it possible to run GPU-dependent workflows in the compute environment. Note that: + +- The **Enable GPUs** setting alone does not cause GPU instances to deploy in your compute environment. You must still specify GPU-enabled instance types in the **Advanced options > Instance types** field. + +- The **Enable GPUs** setting causes Batch Forge to specify the most current [AWS-recommended GPU-optimized ECS AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) as the EC2 fleet AMI when creating the compute environment. + +- This setting can be overridden by **AMI ID** in the advanced options. + +- The NVIDIA Container Runtime uses [environment variables](https://github.com/NVIDIA/nvidia-container-runtime#environment-variables-oci-spec) in container images to specify a GPU accelerated container. These variables should be included in the [`containerOptions`](https://www.nextflow.io/docs/latest/process.html#process-containeroptions) directive for each GPU-dependent process in your Nextflow script. The `containerOptions` directive can be set inline in your process definition or via configuration. For example, to add the directive to a process named `UseGPU` via configuration: + +```groovy +process { + withName: UseGPU { + containerOptions '-e NVIDIA_DRIVER_CAPABILITIES=compute,utility -e NVIDIA_VISIBLE_DEVICES=all' + } +} +``` diff --git a/platform-enterprise/compute-envs/slurm.mdx b/platform-enterprise/compute-envs/slurm.mdx new file mode 100644 index 000000000..f85aaf75b --- /dev/null +++ b/platform-enterprise/compute-envs/slurm.mdx @@ -0,0 +1,62 @@ +--- +title: "slurm" +description: "Step-by-step instructions to set up Slurm for Nextflow Tower." +--- + +## Overview + +[Slurm](https://slurm.schedmd.com/overview.html) is an open source, fault-tolerant, and highly scalable cluster management and job scheduling system for large and small Linux clusters. + +Tower streamlines the deployment of Nextflow pipelines into both cloud-based and on-prem Slurm clusters. + +### Requirements + +To launch pipelines into a **Slurm** cluster from Tower, the following requirements must be satisfied: + +- The cluster should allow outbound connections to the Tower web service. +- The cluster queue used to run the Nextflow head job must be able to submit cluster jobs. +- The Nextflow runtime version **21.02.0-edge** (or later) should be installed on the cluster. + +### Compute Environment + +To create a new compute environment for **Slurm** in Tower: + +1. In a workspace, select **Compute Environments** and then **New Environment**. + +2. Enter a descriptive name for this environment, e.g. "Slurm cluster". + +3. Select **Slurm Workload Manager** as the target platform. + +4. Select your credentials, or select **+** and **SSH** or **Tower Agent** to add new credentials. + +5. Enter a name for the credentials. + +6. Enter the absolute path of the **Work directory** to be used on the cluster. + +7. Enter the absolute path of the **Launch directory** to be used on the cluster. If omitted, it will be the same as the work directory. + +8. Enter the **Login hostname**, which is usually the hostname or public IP address of the cluster's login node. + +9. Enter the **Head queue name**, the cluster queue to which the Nextflow job will be submitted. + +10. Enter the **Compute queue name**, the cluster queue to which the Nextflow job will submit tasks. + + :::tip + The compute queue can be overridden by the Nextflow pipeline configuration. See the Nextflow [docs](https://www.nextflow.io/docs/latest/process.html#queue) for more details. + ::: + +11. You can use the **Environment variables** option to specify custom environment variables for the Head job and/or Compute jobs. + +12. Configure any advanced options described below, as needed. + +13. Select **Create** to finalize the creation of the compute environment. + +Jump to the documentation for [Launching Pipelines](../launch/launchpad.mdx). + +### Advanced options + +- You can use the **Nextflow queue size** to limit the number of jobs that Nextflow can submit to the scheduler at the same time. + +- You can use the **Head job submit options** to specify Slurm options for the head job. You can optionally apply these options to compute jobs as well: + + ![](./_images/head_job_propagation.png) diff --git a/platform-enterprise/core-concepts/definitions.mdx b/platform-enterprise/core-concepts/definitions.mdx new file mode 100644 index 000000000..faf71559f --- /dev/null +++ b/platform-enterprise/core-concepts/definitions.mdx @@ -0,0 +1,67 @@ +--- +title: Core Concepts +headline: "Definitions" +description: "Core concepts and terms used in Tower." +--- + +### Pipelines + +A pipeline is a pre-configured workflow that can be used by all users in a workspace. It is composed of a workflow repository, launch parameters, and a compute environment. + +### Launchpad + +The Launchpad contains the collection of available pipelines that can be run in a workspace. From here, you can view and select pre-configured pipelines for launch. + +### Runs + +The Runs view is used to monitor and inspect the details of workflow executions in a workspace. + +### Compute environments + +A compute environment is the platform where workflows are executed. It is composed of the credentials, configuration settings, and storage options configured for that platform. + +### Credentials + +Credentials are access keys stored by Tower in an encrypted format, using AES-256 encryption. They allow the safe storage of authentication keys for compute environments, private code repositories, and external services. + +### Datasets + +Datasets are collections of versioned, structured data, usually in TSV (tab-separated values) and CSV (comma-separated values) formats. They are used to manage sample sheets and metadata, to be validated and used as inputs for workflow executions. + +### Actions + +Actions are used to automate the execution of pre-configured workflows (pipelines), based on event triggers such as code commits and webhooks. + +### Pipeline secrets + +Pipeline secrets are keys used by workflow tasks to interact with external systems, such as a password to connect to an external database or an API token. They are stored in Tower using AES-256 encryption. + +There are two types of pipeline secrets: + +- Pipeline secrets defined in a workspace are available to the workflows launched within that workspace. + +- Pipeline secrets defined by a user are available to the workflows launched by that user in any workspace. + +### Workspaces + +A workspace provides the context in which a user operates, including what resources are available and who can access them. It is composed of pipelines, compute environments, credentials, runs, actions, and datasets. Access permissions are controlled through participants, collaborators, and teams. + +### Organizations + +An organization is the top-level entity where businesses, institutions, and groups can collaborate. It can contain multiple workspaces. + +### Members + +A member is a user who is internal to the organization. Members have an organization role and can operate in one or more organization workspaces. In each workspace, members can have a participant role that defines the permissions granted to them within that workspace. + +### Team + +A team is a group of members in the same organization. Teams can operate in one or more organization workspaces with a specific workspace role (one role per workspace). + +### Participant + +A user operating with an assigned role within a workspace. + +### Participant role + +The participant role defines the permissions granted to a user to perform actions or tasks within a workspace. diff --git a/platform-enterprise/credentials/_images/agent_credential.png b/platform-enterprise/credentials/_images/agent_credential.png new file mode 100644 index 000000000..d77decf85 Binary files /dev/null and b/platform-enterprise/credentials/_images/agent_credential.png differ diff --git a/platform-enterprise/credentials/_images/container_registry_credentials_blank.png b/platform-enterprise/credentials/_images/container_registry_credentials_blank.png new file mode 100644 index 000000000..0669b4af7 Binary files /dev/null and b/platform-enterprise/credentials/_images/container_registry_credentials_blank.png differ diff --git a/platform-enterprise/credentials/_images/credentials_overview.png b/platform-enterprise/credentials/_images/credentials_overview.png new file mode 100644 index 000000000..40c1ad107 Binary files /dev/null and b/platform-enterprise/credentials/_images/credentials_overview.png differ diff --git a/platform-enterprise/credentials/_images/ssh_credential.png b/platform-enterprise/credentials/_images/ssh_credential.png new file mode 100644 index 000000000..29ebb6ae4 Binary files /dev/null and b/platform-enterprise/credentials/_images/ssh_credential.png differ diff --git a/platform-enterprise/credentials/agent_credentials.mdx b/platform-enterprise/credentials/agent_credentials.mdx new file mode 100644 index 000000000..9daa5ca50 --- /dev/null +++ b/platform-enterprise/credentials/agent_credentials.mdx @@ -0,0 +1,29 @@ +--- +title: Tower Agent credentials +headline: "Tower Agent credentials" +description: "Instructions to set up Tower Agent credentials in Nextflow Tower." +--- + +[Tower Agent](../agent.mdx) enables Tower to launch pipelines on HPC clusters that do not allow direct access through an SSH client. Tower Agent authenticates a secure connection with Tower using a Tower Agent credential. + +### Tower Agent sharing + +You can share a single Tower Agent instance with all members of a workspace. Create a Tower Agent credential, with **Shared agent** enabled, in the relevant workspace. All workspace members can then use this Tower Agent credential (Connection ID + Tower access token) to use the same Tower Agent instance. + +### Create a Tower Agent credential + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/agent_credential.png) + +| Property | Description | Example | +| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores. | `my-agent-creds` | +| Provider | Credential type | Tower Agent | +| Agent connection ID | The connection ID used to run your Tower Agent instance. Must match the connection ID used when running the Agent (see **Usage** below) | `5429d66d-7712-xxxx-xxxx-xxxxxxxxxxxx` | +| Shared agent | Enables Tower Agent sharing for all workspace members. | | +| Usage | Populates a code snippet for Tower Agent download with your connection ID. Replace `` with your [Tower access token](../api/overview.mdx#authentication). | | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/credentials/aws_registry_credentials.mdx b/platform-enterprise/credentials/aws_registry_credentials.mdx new file mode 100644 index 000000000..d12ee72b3 --- /dev/null +++ b/platform-enterprise/credentials/aws_registry_credentials.mdx @@ -0,0 +1,49 @@ +--- +title: AWS ECR credentials +headline: "AWS ECR credentials" +description: "Step-by-step instructions to set up AWS ECR credentials in Nextflow Tower." +--- + +## Container registry credentials + +From version 22.3, Tower supports the configuration of credentials for the Nextflow Wave container service to authenticate to private and public container registries. For more information on Wave containers, see [here](https://www.nextflow.io/docs/latest/wave.html). + +:::note +Container registry credentials are only leveraged by the Wave containers service. In order for your pipeline execution to leverage Wave containers, add `wave { enabled=true }` either to the **Nextflow config** field on the launch page, or to your nextflow.config file. +::: + +### AWS ECR access + +Wave requires programmatic access to your private registry via [long-term access keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#create-long-term-access-keys). Create a user with registry read permissions (e.g. a subset of the AWS-managed `AmazonEC2ContainerRegistryReadOnly` policy) for this purpose. + +An IAM administrator can create and manage access keys from the AWS management console: + +1. Open the [IAM console](https://console.aws.amazon.com/iam/). +2. Select **Users** from the navigation pane. +3. Select the name of the user whose keys you want to manage, then select the **Security credentials** tab. We recommend creating an IAM user specifically for Wave authentication instead of using existing credentials with broader permissions. +4. In the **Access keys** section, select **Create access key**. Each IAM user can have only two access keys at a time, so if the Create option is deactivated, delete an existing access key first. +5. On the **Access key best practices & alternatives** page, select **Other** and then **Next**. +6. On the **Retrieve access key** page, you can either **Show** the user's secret access key details, or store them by selecting **Download .csv file**. +7. The newly created access key pair is active by default and can be stored as a container registry credential in Tower. + +:::note +Your credential must be stored in Tower as a **container registry** credential, even if the same access keys already exist in Tower as a workspace credential. +::: + +### Add credentials to Tower + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/container_registry_credentials_blank.png) + +| Property | Description | Example | +| --------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------- | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores | `my-registry-creds` | +| Provider | Credential type | Container registry | +| User name | IAM user access key ID | `AKIAIOSFODNN7EXAMPLE` | +| Password | IAM user secret access key | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` | +| Registry server | The container registry server name | `https://.dkr.ecr..amazonaws.com` | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/credentials/azure_registry_credentials.mdx b/platform-enterprise/credentials/azure_registry_credentials.mdx new file mode 100644 index 000000000..24eb6702b --- /dev/null +++ b/platform-enterprise/credentials/azure_registry_credentials.mdx @@ -0,0 +1,50 @@ +--- +title: Azure container registry credentials +headline: "Azure container credentials" +description: "Step-by-step instructions to set up Azure container registry credentials in Nextflow Tower." +--- + +## Container registry credentials + +From version 22.3, Tower supports the configuration of credentials for the Nextflow Wave container service to authenticate to private and public container registries. For more information on Wave containers, see [here](https://www.nextflow.io/docs/latest/wave.html). + +:::note +Container registry credentials are only leveraged by the Wave containers service. In order for your pipeline execution to leverage Wave containers, add `wave { enabled=true }` either to the **Nextflow config** field on the launch page, or to your nextflow.config file. +::: + +### Azure Container Registry access + +Azure container registry makes use of Azure RBAC (Role-Based Access Control) to grant users access — for further details, see [Azure container registry roles and permissions](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-roles). + +You must use Azure credentials with long-term registry read (**content/read**) access to authenticate Tower to your registry. We recommend a [token with repository-scoped permissions](https://learn.microsoft.com/en-us/azure/container-registry/container-registry-repository-scoped-permissions) that is used only by Tower. + +1. In the Azure portal, navigate to your container registry. +2. Under **Repository permissions**, select **Tokens -> +Add**. +3. Enter a token name. +4. Under **Scope map**, select **Create new**. +5. In the **Create scope map** section, enter a name and description for the new scope map. +6. Select your **Repository** from the drop-down menu. +7. Select **content/read** from the **Permissions** drop-down menu, then select **Add** to create the scope map. +8. In the **Create token** section, ensure the **Status** is **Enabled** (default), then select **Create**. +9. Return to **Repository permissions -> Tokens** for your registry, then select the token you just created. +10. On the token details page, select **password1** or **password2**. +11. In the password details section, uncheck the **Set expiration date?** checkbox, then select **Generate**. +12. Copy and save the password after it is generated. The password will be displayed only once. + +### Add credentials to Tower + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/container_registry_credentials_blank.png) + +| Property | Description | Example | +| --------------- | ------------------------------------------------------------------------------------------------ | ----------------------- | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores | `my-registry-creds` | +| Provider | Credential type | Container registry | +| User name | Registry token name | `my-registry-token` | +| Password | Registry token password | `OuSrehzUX...ACRDO+2TX` | +| Registry server | The container registry server name (**Settings -> Access keys -> Login server** in Azure portal) | `myregistry.azurecr.io` | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/credentials/docker_hub_registry_credentials.mdx b/platform-enterprise/credentials/docker_hub_registry_credentials.mdx new file mode 100644 index 000000000..848f22e70 --- /dev/null +++ b/platform-enterprise/credentials/docker_hub_registry_credentials.mdx @@ -0,0 +1,43 @@ +--- +title: Docker container registry credentials +headline: "Docker container credentials" +description: "Step-by-step instructions to set up Docker container registry credentials in Nextflow Tower." +--- + +## Container registry credentials + +From version 22.3, Tower supports the configuration of credentials for the Nextflow Wave container service to authenticate to private and public container registries. For more information on Wave containers, see [here](https://www.nextflow.io/docs/latest/wave.html). + +:::note +Container registry credentials are only leveraged by the Wave containers service. In order for your pipeline execution to leverage Wave containers, add `wave { enabled=true }` either to the **Nextflow config** field on the launch page, or to your nextflow.config file. +::: + +### Docker Hub registry access + +You must use Docker Hub credentials with **Read-only** access to authenticate Tower to your registry. Docker Hub makes use of Personal Access Tokens (PATs) for authentication. Note that we do not currently support Docker Hub authentication using 2FA (two-factor authentication). + +To create your access token in Docker Hub: + +1. Log in to [Docker Hub](https://hub.docker.com/). +2. Select your username in the top right corner and select **Account Settings**. +3. Select **Security -> New Access Token**. +4. Enter a token description and select **Read-only** from the Access permissions drop-down menu, then select **Generate**. +5. Copy and save the generated access token (this is only displayed once). + +### Add credentials to Tower + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/container_registry_credentials_blank.png) + +| Property | Description | Example | +| --------------- | --------------------------------------------------------------------------------------- | ---------------------- | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores | `my-registry-creds` | +| Provider | Credential type | Container registry | +| User name | Your Docker username | `user1` | +| Password | Your Personal Access Token | `1fcd02dc-...215bc3f3` | +| Registry server | The container registry hostname (excluding protocol) | `docker.io` | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/credentials/google_registry_credentials.mdx b/platform-enterprise/credentials/google_registry_credentials.mdx new file mode 100644 index 000000000..3a4b47e10 --- /dev/null +++ b/platform-enterprise/credentials/google_registry_credentials.mdx @@ -0,0 +1,83 @@ +--- +title: Google Cloud Artifact Registry credentials +headline: "Google Cloud registry credentials" +description: "Step-by-step instructions to set up Google Cloud registry credentials in Nextflow Tower." +--- + +## Container registry credentials + +From version 22.3, Tower supports the configuration of credentials for the Nextflow Wave container service to authenticate to private and public container registries. For more information on Wave containers, see [here](https://www.nextflow.io/docs/latest/wave.html). + +:::note +Container registry credentials are only leveraged by the Wave containers service. In order for your pipeline execution to leverage Wave containers, add `wave { enabled=true }` either to the **Nextflow config** field on the launch page, or to your nextflow.config file. +::: + +### Google Cloud registry access + +Although Container Registry is still available and supported as a [Google Enterprise API](https://cloud.google.com/blog/topics/inside-google-cloud/new-api-stability-tenets-govern-google-enterprise-apis), new features will only be available in Artifact Registry. Container Registry will only receive critical security fixes. Google recommends using Artifact Registry for all new registries moving forward. + +Google Cloud Artifact Registry and Container Registry are fully integrated with Google Cloud services and support various authentication methods. Tower requires programmatic access to your private registry using [long-lived service account keys](https://cloud.google.com/artifact-registry/docs/docker/authentication#json-key) in JSON format. + +Create dedicated service account keys that are only used to interact with your repositories. Tower requires the [Artifact Registry Reader](https://cloud.google.com/artifact-registry/docs/access-control#permissions) or [Storage Object Viewer](https://cloud.google.com/container-registry/docs/access-control#permissions) role. + +#### Create a service account + +=== "Google Cloud Artifact Registry" + + Administrators can create a service account from the Google Cloud console: + + 1. Navigate to the [Create service account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account) page. + 2. Select a Cloud project. + 3. Enter a service account name and (optional) description. + 4. Select **Create and continue**. + 5. From the **Role** drop-down menu under step 2, select **Artifact Registry -> Artifact Registry Reader**, then select Continue. + 6. (Optional) Grant other users and admins access to this service account under step 3. + 7. Select **Done**. + 8. From the project service accounts page, select the three-dot menu button under **Actions** for the service account you just created, then select **Manage keys**. + 9. On the Keys page, select **Add key**. + 10. On the Create private key popup, select **JSON** and then **Create**. This triggers a download of a JSON file containing the service account private key and service account details. + 11. Base-64 encode the contents of the JSON key file: + + ```bash + #Linux + base64 KEY-FILE-NAME > NEW-KEY-FILE-NAME + + #macOS + base64 -i KEY-FILE-NAME -o NEW-KEY-FILE-NAME + + #Windows + Base64.exe -e KEY-FILE-NAME > NEW-KEY-FILE-NAME + ``` + +=== "Google Cloud Container Registry" + + Administrators can create a service account from the Google Cloud console: + + 1. Navigate to the [Create service account](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts/create?walkthrough_id=iam--create-service-account) page. + 2. Select a Cloud project. + 3. Enter a service account name and (optional) description. + 4. Select **Create and continue**. + 5. From the **Role** drop-down menu under step 2, search for and select **Storage Object Viewer**, then select Continue. + 6. (Optional) Grant other users and admins access to this service account under step 3. + 7. Select **Done**. + 8. From the project service accounts page, select the three-dot menu button under **Actions** for the service account you just created, then select **Manage keys**. + 9. On the Keys page, select **Add key**. + 10. On the Create private key popup, select **JSON** and then **Create**. This triggers a download of a JSON file containing the service account private key and service account details. + +### Add credentials to Tower + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/container_registry_credentials_blank.png) + +| Property | Description | Example | +| --------------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------ | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores | `my-registry-creds` | +| Provider | Credential type | Container registry | +| User name | Service account key type | (Container Registry: `_json_key`, Artifact Registry: `_json_key_base64`) | +| Password | JSON key file content (base64-encoded for Artifact Registry — remove any line breaks or trailing spaces) | `wewogICJ02...9tIgp9Cg==` | +| Registry server | The container registry hostname (excluding protocol) | `-docker.pkg.dev` | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/credentials/overview.mdx b/platform-enterprise/credentials/overview.mdx new file mode 100644 index 000000000..f7e223364 --- /dev/null +++ b/platform-enterprise/credentials/overview.mdx @@ -0,0 +1,19 @@ +--- +title: Credentials Overview +headline: "Credentials" +description: "Step-by-step instructions to set up credentials in Nextflow Tower." +--- + +## Credentials + +In Tower, you can configure **workspace credentials** to store the access keys and tokens for your [compute environments](../compute-envs/overview.mdx) and [Git hosting services](../git/overview.mdx). + +From Tower 22.3, you can configure **container registry credentials** to be used by the Wave containers service to authenticate to private and public container registries such as Docker Hub, Google Artifact Registry, Quay, etc. + +See the **Container registry credentials** section for registry-specific instructions. + +![](./_images/credentials_overview.png) + +:::note +All credentials are (AES-256) encrypted before secure storage and not exposed in an unencrypted way by any Tower API. +::: diff --git a/platform-enterprise/credentials/quay_registry_credentials.mdx b/platform-enterprise/credentials/quay_registry_credentials.mdx new file mode 100644 index 000000000..df2d85d57 --- /dev/null +++ b/platform-enterprise/credentials/quay_registry_credentials.mdx @@ -0,0 +1,42 @@ +--- +title: Quay container registry credentials +headline: "Quay container registry credentials" +description: "Step-by-step instructions to set up Quay container credentials in Nextflow Tower." +--- + +## Container registry credentials + +From version 22.3, Tower supports the configuration of credentials for the Nextflow Wave container service to authenticate to private and public container registries. For more information on Wave containers, see [here](https://www.nextflow.io/docs/latest/wave.html). + +:::note +Container registry credentials are only leveraged by the Wave containers service. In order for your pipeline execution to leverage Wave containers, add `wave { enabled=true }` either to the **Nextflow config** field on the launch page, or to your nextflow.config file. +::: + +### Quay repository access + +For Quay repositories, we recommend using [robot accounts](https://docs.quay.io/glossary/robot-accounts.html) with **Read** access permissions for authentication: + +1. Sign in to [quay.io](https://quay.io/). +2. From the user or organization view, select the **Robot Accounts** tab. +3. Select **Create Robot Account**. +4. Enter a robot account name. The username for robot accounts have the format `namespace+accountname`, where `namespace` is the user or organization name and `accountname` is your chosen robot account name. +5. Grant the robot account repository **Read** permissions from **Settings -> User and Robot Permissions** in the repository view. +6. Select the robot account in your admin panel to retrieve the token value. + +### Add credentials to Tower + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/container_registry_credentials_blank.png) + +| Property | Description | Example | +| --------------- | --------------------------------------------------------------------------------------- | ---------------------------- | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores | `my-registry-creds` | +| Provider | Credential type | Container registry | +| User name | Robot account username (`namespace+accountname`) | `mycompany+myrobotaccount` | +| Password | Robot account access token | `PasswordFromQuayAdminPanel` | +| Registry server | The container registry hostname | `quay.io` | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/credentials/ssh_credentials.mdx b/platform-enterprise/credentials/ssh_credentials.mdx new file mode 100644 index 000000000..23b93c318 --- /dev/null +++ b/platform-enterprise/credentials/ssh_credentials.mdx @@ -0,0 +1,44 @@ +--- +title: SSH credentials +headline: "SSH credentials" +description: "Instructions to set up SSH credentials in Nextflow Tower." +--- + +SSH public key authentication relies on asymmetric cryptography to generate a public and private key pair. The public key remains on the target (remote) machine, while the private key (and passphrase) is stored in Tower as a credential. The key pair is used to authenticate a Tower connection with your SSH-enabled environment. + +:::note +All credentials are (AES-256) encrypted before secure storage and not exposed in an unencrypted way by any Tower API. +::: + +### Create an SSH key pair + +To use SSH public key authentication: + +- The remote system must have a version of SSH installed. This guide assumes the remote system uses OpenSSH. If you are using a different version of SSH, the key generation steps may differ. +- The SSH public key must be present on the remote system (usually in `~/.ssh/authorized_keys`). + +To generate an SSH key pair: + +1. From the target machine, open a terminal and run `ssh-keygen`. +2. Follow the prompts to: + - specify a file path and name (or keep the default) + - specify a passphrase (recommended) +3. Navigate to the target folder (default `/home/user/.ssh/id_rsa`) and open the private key file with a plain text editor. +4. Copy the private key file contents before navigating to Tower. + +### Create an SSH credential in Tower + +- From an organization workspace: navigate to the Credentials tab and select **Add Credentials**. + +- From your personal workspace: select **Your credentials** from the user top-right menu, then select **Add credentials**. + +![](./_images/ssh_credential.png) + +| Property | Description | Example | +| --------------- | -------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | +| Name | A unique name for the credentials using alphanumeric characters, dashes, or underscores. | `my-ssh-creds` | +| Provider | Credential type | SSH | +| SSH private key | The SSH private key file contents. | `-----BEGIN OPENSSH PRIVATE KEY-----b3BlbnNza....` | +| Passphrase | SSH private key passphrase (recommended). If your key pair was created without a passphrase, leave this blank. | | + +Once the form is complete, select **Add**. The new credential is now listed under the **Credentials** tab. diff --git a/platform-enterprise/dashboard/_images/dashboard_hero.png b/platform-enterprise/dashboard/_images/dashboard_hero.png new file mode 100644 index 000000000..73e7e0e4f Binary files /dev/null and b/platform-enterprise/dashboard/_images/dashboard_hero.png differ diff --git a/platform-enterprise/dashboard/_images/dashboard_orgs.png b/platform-enterprise/dashboard/_images/dashboard_orgs.png new file mode 100644 index 000000000..02d84de8d Binary files /dev/null and b/platform-enterprise/dashboard/_images/dashboard_orgs.png differ diff --git a/platform-enterprise/dashboard/overview.mdx b/platform-enterprise/dashboard/overview.mdx new file mode 100644 index 000000000..3ce984b26 --- /dev/null +++ b/platform-enterprise/dashboard/overview.mdx @@ -0,0 +1,35 @@ +--- +title: Dashboard Overview +headline: "Dashboard" +description: "View run status overview in Tower" +--- + +## Overview + +:::note +This feature is available from Tower v.22.3. +::: + +From version 22.3, Tower contains a **Dashboard** page that provides an overview of runs in your organizations and personal workspace at a glance. The dashboard is accessed from the user menu in the top right corner. Click your avatar, then select "Dashboard". + +![](./_images/dashboard_hero.png) + +The page is split into two main areas: + +### Filters and summary + +The drop-down lists at the top of the dashboard page filter total runs by your personal workspace, the organizations you have access to, and time range. + +Below the filters, a summary of total runs is shown by status. + +### Runs per organization + +Below the cards displaying total runs by status, run totals are filtered by each organization or your personal workspace. Filtering depends on what you selected in the drop-down options near the top of the page. + +Each card represents an organization. Total runs for the organization are arranged by workspace and status. + +![](./_images/dashboard_orgs.png) + +Click a run value in the table to navigate to a run list filtered by the status selected. + +Click a workspace name in the table to navigate to a run list filtered by the workspace selected. diff --git a/platform-enterprise/data-privacy/overview.mdx b/platform-enterprise/data-privacy/overview.mdx new file mode 100644 index 000000000..ea962e8ce --- /dev/null +++ b/platform-enterprise/data-privacy/overview.mdx @@ -0,0 +1,138 @@ +--- +title: Data Privacy +headline: Data Privacy +description: "Description of data collected by Tower" +--- + +### Your data + +Your data stays strictly within **your** infrastructure itself. When you launch a workflow through Tower, you need to connect your infrastructure (HPC/VMs/K8s) by creating the appropriate credentials and compute environment in a workspace. + +Tower then uses this configuration to trigger a Nextflow workflow within your infrastructure similar to what is done via the Nextflow CLI, therefore Tower does not manipulate any data itself and no data is transferred to the infrastructure where Tower is running. + +It may be possible to access some data within your storage from the Nextflow Tower interface - for example, viewing logs and reports generated in a pipeline run - however, this data is never stored within the Tower infrastructure. + +### Metadata stored by Nextflow Tower + +Workflow execution metadata is sent by the Nextflow runtime to Nextflow Tower when: + +- Launching workflow with Tower +- Using the `-with-tower` option at the command line +- When a Nextflow Tower is specified in the Nextflow config + +The following sections describe the data structure and metadata fields collected by Tower. + +#### Workflow metadata + +The following metadata fields are collected and stored by the Tower backend during a workflow execution: + +| Name | Description | +| --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | +| `command_line` | The command line used to launch the workflow execution | +| `commit_id` | The workflow project commit Id at the time of the execution | +| `complete` | The workflow execution completion timestamp | +| `config_files` | The nextflow config file paths(s) involved in the workflow execution | +| `config_text` | The nextflow config content used for the workflow execution. Note: secrets, such as, AWS keys are stripped and _not_ included in this field. | +| `container` | The container image name(s) used for the pipeline execution | +| `container_engine` | The container engine name used for the pipeline execution | +| `duration` | The workflow execution overall duration (wall time) | +| `error_message` | The error message reported in the case of nextflow execution failure | +| `error_report` | The extended error message reported in case of workflow execution error. | +| `exit_status` | The workflow execution (POSIX) exit code | +| `home_dir` | The launching user home directory path | +| `launch_dir` | The workflow launching directory path | +| `manifest_author` | The workflow project author as defined in the nextflow config manifest file | +| `manifest_default_branch` | The workflow project default Git branch as defined in the nextflow config manifest file | +| `manifest_description` | The workflow project description as defined in the nextflow config manifest file | +| `manifest_gitmodules` | The workflow project Git submodule flag in the nextflow config manifest file | +| `manifest_home_page` | The workflow project Git home page as defined in the nextflow config manifest file | +| `manifest_main_script` | The workflow project main script file name as defined in the nextflow config manifest file | +| `manifest_name` | The workflow project name as defined in the nextflow config manifest file | +| `manifest_nextflow_version` | The workflow project required Nextflow version defined in the nextflow config manifest file | +| `manifest_version` | The workflow project version string as defined in the nextflow config manifest file | +| `nextflow_build` | The build number of the Nextflow runtime used to launch the workflow execution | +| `nextflow_timestamp` | The build timestamp of the Nextflow runtime used to launch the workflow execution | +| `nextflow_version` | The version string of the Nextflow runtime used to launch the workflow execution | +| `params` | The workflow params used to launch the pipeline execution | +| `profile` | The workflow config profile string used for the pipeline execution | +| `project_dir` | The directory path where the workflow scripts are stored | +| `project_name` | The workflow project name | +| `repository` | The workflow project repository | +| `resume` | The flag set when a resume execution was submitted | +| `revision` | The workflow project revision number | +| `run_name` | The workflow run name as given by the Nextflow runtime | +| `script_file` | The workflow script file path | +| `script_id` | The workflow script checksum number | +| `script_name` | The workflow script filename | +| `session_id` | The workflow execution unique UUID as assigned by the Nextflow runtime | +| `start` | The workflow execution start timestamp | +| `stats_cached_count` | The number of cached tasks upon completion | +| `stats_cached_duration` | The aggregate time of cached tasks upon completion | +| `stats_cached_pct` | The percentage of cached tasks upon completion | +| `stats_compute_time_fmt` | The overall compute time as a formatted string | +| `stats_failed_count` | The number of failed tasks upon completion | +| `stats_failed_count_fmt` | The number of failed tasks upon completion as a formatted string | +| `stats_failed_duration` | The aggregate time of failed tasks upon completion | +| `stats_failed_pct` | The percentage of failed tasks upon completion | +| `stats_ignored_count` | The number of ignored tasks upon completion | +| `stats_ignored_count_fmt` | The number of ignored tasks upon completion as a formatted string | +| `stats_ignored_pct` | The percentage of ignored tasks upon completion | +| `stats_succeed_count` | The number of succeeded tasks upon completion | +| `stats_succeed_count_fmt` | The number of succeeded tasks upon completion as a formatted string | +| `stats_succeed_duration` | The aggregate time of succeeded tasks upon completion | +| `stats_succeed_pct` | The percentage of succeeded tasks upon completion | +| `status` | The workflow execution status | +| `submit` | The workflow execution submission timestamp | +| `success` | The flag reporting whether the execution completed successfully | +| `user_name` | The POSIX user name launching that launched the workflow execution | +| `work_dir` | The workflow execution scratch directory path | + +#### Task Metadata + +| Name | Description | +| -------------- | ---------------------------------------------------------------------------------------------- | +| `attempt` | Number of Nextflow execution attempts of the task | +| `cloud_zone` | Cloud zone where the task execution was allocated | +| `complete` | Task execution completion timestamp | +| `container` | Container image name used to execute the task | +| `cost` | Estimated task compute cost | +| `cpus` | Number of CPUs requested | +| `disk` | Amount of disk storage requested | +| `duration` | Amount of time for the task completion | +| `env` | Task execution environment variables | +| `error_action` | Action applied on task failure | +| `executor` | Executor requested for the task execution | +| `exit_status` | Task POSIX exit code on completion | +| `hash` | Task unique hash code | +| `inv_ctxt` | Number of involuntary context switches | +| `machine_type` | Cloud virtual machine type | +| `memory` | Amount of memory requested | +| `module` | Environment Module requested | +| `name` | Task unique name | +| `native_id` | Task unique ID as assigned by the underlying execution platform | +| `pcpu` | Percentage of CPU used to compute the task | +| `peak_rss` | Peak of real memory during the task execution | +| `peak_vmem` | Peak of virtual memory during the task execution | +| `pmem` | Percentage of memory used to compute the task | +| `price_model` | The cloud price model applied for the task | +| `process` | The nextflow process name | +| `queue` | The compute queue name requested | +| `rchar` | Number of bytes the process read, using any read-like system call from files, pipes, tty, etc. | +| `read_bytes` | Number of bytes the process directly read from disk | +| `realtime` | The time required to compute the task | +| `rss` | Real memory (resident set) size of the process | +| `scratch` | Flag reporting the task was executed in a local scratch path | +| `script` | The task command script | +| `start` | Task execution start timestamp | +| `status` | The task execution status | +| `submit` | Task submission timestamp | +| `syscr` | Number of read-like system call invocations that the process performed | +| `syscw` | Number of write-like system call invocations that the process performed | +| `tag` | Nextflow tag associated to the task execution | +| `task_id` | Nextflow task ID | +| `time` | Task execution timeout requested | +| `vmem` | Virtual memory size used by the task execution | +| `vol_ctxt` | Number of voluntary context switches | +| `wchar` | Number of bytes the process wrote, using any write-like system call | +| `workdir` | Task execution work directory | +| `write_bytes` | Number of bytes the process written to disk | diff --git a/platform-enterprise/datasets/_images/create_dataset.png b/platform-enterprise/datasets/_images/create_dataset.png new file mode 100644 index 000000000..310b82795 Binary files /dev/null and b/platform-enterprise/datasets/_images/create_dataset.png differ diff --git a/platform-enterprise/datasets/_images/datasets_dropdown.png b/platform-enterprise/datasets/_images/datasets_dropdown.png new file mode 100644 index 000000000..ee41d3d45 Binary files /dev/null and b/platform-enterprise/datasets/_images/datasets_dropdown.png differ diff --git a/platform-enterprise/datasets/_images/datasets_listing.png b/platform-enterprise/datasets/_images/datasets_listing.png new file mode 100644 index 000000000..2639242dc Binary files /dev/null and b/platform-enterprise/datasets/_images/datasets_listing.png differ diff --git a/platform-enterprise/datasets/_images/edit_dataset.png b/platform-enterprise/datasets/_images/edit_dataset.png new file mode 100644 index 000000000..5b4506c51 Binary files /dev/null and b/platform-enterprise/datasets/_images/edit_dataset.png differ diff --git a/platform-enterprise/datasets/overview.mdx b/platform-enterprise/datasets/overview.mdx new file mode 100644 index 000000000..133c6993b --- /dev/null +++ b/platform-enterprise/datasets/overview.mdx @@ -0,0 +1,70 @@ +--- +title: Datasets overview +description: "Managing and using datasets in Nextflow Tower." +--- + +## Overview + +:::note +This feature is only available in organization workspaces. +::: + +Datasets in Nextflow Tower are CSV (comma-separated values) and TSV (tab-separated values) formatted files stored in a workspace. They are designed to be used as inputs to pipelines to simplify data management, minimize user data-input errors, and facilitate reproducible workflows. + +The combination of datasets, [pipeline secrets](../secrets/overview.mdx), and [pipeline actions](../pipeline-actions/overview.mdx) in Tower allow you to automate workflows to curate your data and maintain and launch pipelines based on specific events. See [here](https://seqera.io/blog/workflow-automation/) for an example of pipeline workflow automation using Tower. + +- Using datasets reduces errors that occur due to manual data entry when launching pipelines. + +- Datasets can be generated automatically in response to events (such as S3 storage new file notifications). + +- Datasets can streamline differential data analysis when using the same pipeline to launch a run for each dataset as it becomes available. + +For your pipeline to use your dataset as input during runtime, information about the dataset and file format must be included in the relevant parameters of your [pipeline schema](../pipeline-schema/overview.mdx). + +### Dataset validation and file content requirements + +Tower does not validate your dataset file contents. While datasets can contain static file links, you are responsible for maintaining the access to that data. + +Datasets can point to files stored in various locations, such as Amazon S3 or GitHub. To stage the file paths defined in the dataset, Nextflow requires access to the infrastructure where the files reside, whether on Cloud or HPC systems. Add the access keys for data sources that require authentication to your [pipeline secrets](../secrets/overview.mdx). + +### Dataset permissions + +All Tower users have access to the datasets feature in organization workspaces. + +### Creating a new dataset + +To create a new dataset, follow these steps: + +1. Open the **Datasets** tab in your organization workspace. +2. Select **New dataset**. +3. Complete the **Name** and **Description** fields using information relevant to your dataset. +4. Add the dataset file to your workspace with drag-and-drop or the system file explorer dialog. +5. For dataset files that use the first row for column names, customize the dataset view with the **First row as header** option. + +:::caution +The size of the dataset file cannot exceed 10MB. +::: + +### Dataset versions + +Datasets in Tower can accommodate multiple versions of a dataset. To add a new version for an existing dataset, follow these steps: + +1. Select **Edit** next to the dataset you wish to update. +2. Select **Add a new version**. +3. Upload the newer version of the dataset and select **Update**. + +:::caution +All subsequent versions of a dataset must be in the same format (.csv or .tsv) as the initial version. +::: + +### Using a dataset + +To use a dataset with the saved pipelines in your workspace, follow these steps: + +1. Open any pipeline that contains a pipeline-schema from the Launchpad. +2. Select the input field for the pipeline, removing any default values. +3. Pick the dataset to use as input to your pipeline. + +:::note +The datasets shown in the drop-down menu depend on the chosen format in your `pipeline-schema.json`. If the schema specifies `"mimetype": "text/csv"`, no TSV datasets will be available, and vice versa. +::: diff --git a/platform-enterprise/faqs.mdx b/platform-enterprise/faqs.mdx new file mode 100644 index 000000000..b33f117fb --- /dev/null +++ b/platform-enterprise/faqs.mdx @@ -0,0 +1,1101 @@ +--- +title: Frequently Asked Questions +headline: "FAQ" +description: "Frequently asked questions" +--- + +## General Questions + +### Administration Console + +**

Q: How do I access the Administration Console?

** + +The Administration Console allows Tower instance administrators to interact with all users and organizations registered with the platform. Administrators must be identified in your Tower instance configuration files prior to instantiation of the application. + +1. Create a `TOWER_ROOT_USERS` environment variable (e.g. via _tower.env_ or Kubernetes ConfigMap). +2. Populate the variable with a sequence of comma-delimited email addresses (no spaces).
Example: `TOWER_ROOT_USERS=foo@foo.com,bar@bar.com` +3. If using a Tower version earlier than 21.12: + 1. Add the following configuration to _tower.yml_: + + ```yml + tower: + admin: + root-users: "${TOWER_ROOT_USERS:[]}" + ``` + +4. Restart the `cron` and `backend` containers/Deployments. +5. The console will now be available via your Profile drop-down menu. + +### API + +**

Q:I am trying to query more results than the maximum return size allows. Can I do pagination?

** + +Yes. We recommend using pagination to fetch the results in smaller chunks through multiple API calls with the help of `max` and subsequent `offset` parameters. You will receive an error like below if you run into the maximum result limit. + +`{object} length parameter cannot be greater than 100 (current value={value_sent})` + +We have laid out an example below using the workflow endpoint. + +``` +curl -X GET "https://$TOWER_SERVER_URL/workflow/$WORKFLOW_ID/tasks?workspaceId=$WORKSPACE_ID&max=100" \ + -H "Accept: application/json" \ + -H "Authorization: Bearer $TOWER_ACCESS_TOKEN" + +curl -X GET "https://$TOWER_SERVER_URL/workflow/$WORKFLOW_ID/tasks?workspaceId=$WORKSPACE_ID&max=100&offset=100" \ + -H "Accept: application/json" \ + -H "Authorization: Bearer $TOWER_ACCESS_TOKEN" +``` + +**

Q: Why am I receiving a 403 HTTP Response when trying to launch a pipeline via the `/workflow/launch` API endpoint?

** + +Launch users have more restricted permissions within a Workspace than Power users. While both can launch pipelines via API calls, Launch users must specify additional values that are optional for a Power user. + +One such value is `launch.id`; attempting to launch a pipeline without specifying a `launch.id` in the API payload is equivalent to using the "Start Quick Launch" button within a workspace (a feature only available to Power users). + +If you have encountered the 403 error as a result of being a Launch user who did not provide a `launch.id`, please try resolving the problem as follows: + +1. Provide the launch ID to the payload sent to the tower using the same endpoint. To do this; + + 1. Query the list of pipelines via the `/pipelines` endpoint. Find the `pipelineId` of the pipeline you intend to launch. + 2. Once you have the `pipelineId`, call the `/pipelines/{pipelineId}/launch` API to retrieve the pipeline's `launch.id`. + 3. Include the `launch.id` in your call to the `/workflow/launch` API endpoint (see example below). + + ``` + { + "launch": { + "id": "Q2kVavFZNVCBkC78foTvf", + "computeEnvId": "4nqF77d6N1JoJrVrrgB8pH", + "runName": "sample-run", + "pipeline": "https://github.com/sample-repo/project", + "workDir": "s3://myBucketName", + "revision": "main" + } + } + ``` + +2. If a launch id remains unavailable to you, upgrade your user role to 'Maintain' or higher. This will allow you to execute quick launch-type pipeline invocations. + +### Common Errors + +**

Q: After following the log-in link, why is my screen frozen at `/auth?success=true`?

** + +Starting with v22.1, Tower Enterprise implements stricter cookie security by default and will only send an auth cookie if the client is connected via HTTPS. The lack of an auth token will cause HTTP-only log-in attempts to fail (thereby causing the frozen screen). + +To remediate this problem, set the following environment variable `TOWER_ENABLE_UNSAFE_MODE=true`. + +**

Q: "Unknown pipeline repository or missing credentials" error when pulling from a public Github repository?

** + +Github imposes [rate limits](https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting) on repository pulls (including public repositories), where unauthenticated requests are capped at 60 requests/hour and authenticated requests are capped at 5000/hour. Tower users tend to encounter this error due to the 60 request/hour cap. + +To resolve the problem, please try the following: + +1. Ensure there is at least one Github credential in your Workspace's Credentials tab. +2. Ensure that the **Access token** field of all Github Credential objects is populated with a [Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) value, NOT a user password. (_Github PATs are typically several dozen characters long and begin with a `ghp_`prefix; example:`ghp*IqIMNOZH6zOwIEB4T9A2g4EHMy8Ji42q4HA`*) +3. Confirm that your PAT is providing the elevated threshold and transactions are being charged against it: + + `curl -H "Authorization: token ghp_LONG_ALPHANUMERIC_PAT" -H "Accept: application/vnd.github.v3+json" https://api.github.com/rate_limit` + +**

Q: "Row was updated or deleted by another transaction (or unsaved-value mapping was incorrect)" error.

** + +This error can occur if incorrect configuration values are assigned to the `backend` and `cron` containers' `MICRONAUT_ENVIRONMENTS` environment variable. You may see other unexpected system behaviour like two exact copies of the same Nextflow job be submitted to the Executor for scheduling. + +Please verify the following: + +1. The `MICRONAUT_ENVIRONMENTS` environment variable associated with the `backend` container: + - Contains `prod,redis,ha` + - Does not contain `cron` +2. The `MICRONAUT_ENVIRONMENTS` environment variable associated with the `cron` container: + - Contains `prod,redis,cron` + - Does not contain `ha` +3. You do not have another copy of the `MICRONAUT_ENVIRONMENTS` environment variable defined elsewhere in your application (e.g. a _tower.env_ file or Kubernetes ConfigMap). +4. If you are using a separate container/pod to execute _migrate-db.sh_, there is no `MICRONAUT_ENVIRONMENTS` environment variable assigned to it. + +**

Q: Why do I get a `chmod: cannot access PATH/TO/bin/*: No such file or directory` exception?

** + +This error will be thrown if you attempt to run `chmod` against an S3/fusion-backed workdir [which contains only hidden files](https://github.com/nextflow-io/nextflow/issues/3227). + +The behaviour is patched in [Nextflow v22.09.7-edge](https://github.com/nextflow-io/nextflow/releases/tag/v22.09.7-edge). If you are unable to upgrade please see the original bug report for alternative workarounds. + +**

Q: "No such variable" error.

** + +This error can occur if you execute a DSL 1-based Nextflow workflow using [Nextflow 22.03.0-edge](https://github.com/nextflow-io/nextflow/releases/tag/v22.03.0-edge) or later. + +**

Q: Does the sleep command work the same way across my entire script?

** + +The `sleep` commands within your Nextflow workflows may differ in behaviour depending on where they are: + +- If used within an `errorStrategy` block, the Groovy sleep function will be used (which takes its value in milliseconds). +- If used within a process script block, that language's sleep binary/method will be used. **Example:** [this BASH script](https://www.nextflow.io/docs/latest/metrics.html?highlight=sleep) uses the BASH sleep binary, which takes its value in seconds. + +**

Q: Why does re-launching/resuming a run fail with `field revision is not writable`?

** + +A known issue with Tower versions prior to 22.3 caused resuming runs to fail for users with the launch role. This issue was fixed in Tower 22.3. Upgrade to the latest version of Tower to allow launch users to resume runs. + +### Compute Environments + +**

Q: Can the name of a Compute Environment created in Tower contain special characters?

** + +No. Tower version 21.12 and later do not support the inclusion of special characters in the name of Compute Environment objects. + +**

Q: How do I set NXF_OPTS values for a Compute Environment?

** + +This depends on your Tower version: + +- For v22.1.1+, specify the values via the **Environment variables** section of the "Add Compute Environment" screen. +- For versions earlier than v22.1.1, specify the values via the **Staging options > Pre-run script** textbox on the "Add Compute Environment" screen. Example: + + `export NXF_OPTS="-Xms64m -Xmx512m"` + +### Containers + +**

Q: Can I use rootless containers in my Nextflow pipelines?

** + +Most containers use the root user by default. However, some users prefer to define a non-root user in the container in order to minimize the risk of privilege escalation. Because Nextflow and its tasks use a shared work directory to manage input and output data, using rootless containers can lead to file permissions errors in some environments: + +``` +touch: cannot touch '/fsx/work/ab/27d78d2b9b17ee895b88fcee794226/.command.begin': Permission denied +``` + +As of Tower 22.1.0 or later, this issue should not occur when using AWS Batch. In other situations, you can avoid this issue by forcing all task containers to run as root. To do so, add one of the following snippets to your Nextflow configuration: + +``` +// cloud executors +process.containerOptions = "--user 0:0" + +// Kubernetes +k8s.securityContext = [ + "runAsUser": 0, + "runAsGroup": 0 +] +``` + +### Databases + +**

Q: Help! I upgraded to Tower Enterprise 22.2.0 and now my database connect is failing.

** + +Tower Enterprise 22.2.0 [introduced a breaking change](https://install.tower.nf/22.2/release_notes/22.2/#warnings) whereby the `TOWER_DB_DRIVER` is now required to be `org.mariadb.jdbc.Driver`. + +Clients who use Amazon Aurora as their database solution may encounter a `java.sql.SQLNonTransientConnectionException: ... could not load system variables` error, likely due to a [known error](https://jira.mariadb.org/browse/CONJ-824) tracked within the MariaDB project. + +Please modify Tower Enterprise configuration as follows to try resolving the problem: + +1. Ensure your `TOWER_DB_DRIVER` uses the specified MariaDB URI. +2. Modify your `TOWER_DB_URL` to: `TOWER_DB_URL=jdbc:mysql://YOUR_DOMAIN:YOUR_PORT/YOUR_TOWER_DB?usePipelineAuth=false&useBatchMultiSend=false` + +### Datasets + +**

Q: Why are uploads of Datasets via direct calls to the Tower API failing?

** + +When uploading Datasets via the Tower UI or CLI, some steps are automatically done on your behalf. To upload Datasets via the TOwer API, additional steps are required: + +1. Explicitly define the MIME type of the file being uploaded. +2. Make two calls to the API: + 1. Create a Dataset object + 2. Upload the samplesheet to the Dataset object. + +Example: + +```bash +# Step 1: Create the Dataset object +$ curl -X POST "https://api.cloud.seqera.io/workspaces/$WORKSPACE_ID/datasets/" -H "Content-Type: application/json" -H "Authorization: Bearer $TOWER_ACCESS_TOKEN" --data '{"name":"placeholder", "description":"A placeholder for the data we will submit in the next call"}' + +# Step 2: Upload the datasheet into the Dataset object +$ curl -X POST "https://api.cloud.seqera.io/workspaces/$WORKSPACE_ID/datasets/$DATASET_ID/upload" -H "Accept: application/json" -H "Authorization: Bearer $TOWER_ACCESS_TOKEN" -H "Content-Type: multipart/form-data" -F "file=@samplesheet_full.csv; type=text/csv" +``` + +:::tip +You can also use the [tower-cli](https://github.com/seqeralabs/tower-cli) to upload the dataset to a particular workspace. + + ```bash + tw datasets add --name "cli_uploaded_samplesheet" ./samplesheet_full.csv + ``` + +::: + +**

Q: Why is my uploaded Dataset not showing in the Tower Launch screen input field drop-down?

** + +When launching a Nextflow workflow from the Tower GUI, the `input` field drop-down will only show Datasets whose mimetypes match the rules specified in the associated `nextflow_schema.json` file. If your Dataset has a different mimetype than specified in the pipeline schema, Tower will not present the file. + +Note that a known issue in Tower 22.2 which caused TSV datasets to be unavailable in the drop-down has been fixed in version 22.4.1. + +**Example:** The default [nf-core RNASeq](https://github.com/nf-core/rnaseq) pipeline specifies that only files with a [`csv` mimetype](https://github.com/nf-core/rnaseq/blob/master/nextflow_schema.json#L18) should be provided as an input file. If you created a Dataset of mimetype `tsv`, it would not appear as an input filed dropdown option. + +**

Q: Can an input file mimetype restriction be added to the _nextflow_schema.json_ file generated by the nf-core pipeline schema builder tool?

** + +As of August 2022, it is possible to add a mimetype restriction to the _nextflow_schema.json_ file generated by the [nf-core schema builder tool](https://nf-co.re/pipeline_schema_builder) but this must occur manually after generation, not during. Please refer to this [RNASeq example](https://github.com/nf-core/rnaseq/blob/master/nextflow_schema.json#L18) to see how the `mimetype` key-value pair should be specified. + +**

Q: Why are my datasets converted to 'application/vnd.ms-excel' data type when uploading on a browser using Windows OS?

** + +This is a known issue when using Firefox browser with the Tower version prior to 22.2.0. You can either (a) upgrade the Tower version to 22.2.0 or higher or (b) use Chrome. + +For context, the Tower will prompt the message below if you encountered this issue. + +``` +"Given file is not a dataset file. Detected media type: 'application/vnd.ms-excel'. Allowed types: 'text/csv, text/tab-separated-values'" +``` + +**

Q: Why are TSV-formatted datasets not shown in the Tower launch screen input field drop-down menu?

** + +An issue was identified in Tower version 22.2 which caused TSV datasets to be unavailable in the input data drop-down menu on the launch screen. This has been fixed in Tower version 22.4.1. + +### Email and TLS + +**

Q: How do I solve TLS errors when attempting to send email?

** + +Nextflow and Nextflow Tower both have the ability to interact with email providers on your behalf. These providers often require TLS connections, with many now requiring at least TLSv1.2. + +TLS connection errors can occur due to variability in the [default TLS version specified by your underlying JDK distribution](https://aws.amazon.com/blogs/opensource/tls-1-0-1-1-changes-in-openjdk-and-amazon-corretto/). If you encounter any of the following errors, there is likely a mismatch between your default TLS version and what is expected by the email provider: + +- `Unexpected error sending mail ... TLS 1.0 and 1.1 are not supported. Please upgrade/update your client to support TLS 1.2" error` +- `ERROR nextflow.script.WorkflowMetadata - Failed to invoke 'workflow.onComplete' event handler ... javax.net.ssl.SSLHandshakeException: No appropriate protocol (protocol is disabled or cipher suites are inappropriate)` + +To fix the problem, you can either: + +1. Set a JDK environment variable to force Nextflow and/or the Tower containers to use TLSv1.2 by default: + +``` +export JAVA_OPTIONS="-Dmail.smtp.ssl.protocols=TLSv1.2" +``` + +2. Add the following parameter to your nextflow.config file: + +``` +mail { + smtp.ssl.protocols = 'TLSv1.2' +} +``` + +In both cases, please ensure these values are also set for Nextflow and/or Tower: + +- `mail.smtp.starttls.enable=true` +- `mail.smtp.starttls.required=true` + +### Git integration + +**

Q: Tower authentication to BitBucket fails, with the Tower backend log containing a warning: "Can't retrieve revisions for pipeline - https://my.bitbucketserver.com/path/to/pipeline/repo - Cause: Get branches operation not support by BitbucketServerRepositoryProvider provider"

** + +If you have supplied correct BitBucket credentials and URL details in your tower.yml, but experience this error, update your Tower version to at least v22.3.0. This version addresses SCM provider authentication issues and is likely to resolve the retrieval failure described here. + +### Healthcheck + +**

Q: Does Tower offer a healthcheck API endpoint?

** + +Yes. Customers wishing to implement automated healtcheck functionality should use Tower's `service-info` endpoint. + +Example: + +``` +# Run a healthcheck and extract the HTTP response code: +$ curl -o /dev/null -s -w "%{http_code}\n" --connect-timeout 2 "https://api.cloud.seqera.io/service-info" -H "Accept: application/json" +200 +``` + +### Logging + +**

Q: Can Tower enable detailed logging related to sign-in activity?

** + +Yes. For more detailed logging related to login events, set the following environment variable: `TOWER_SECURITY_LOGLEVEL=DEBUG`. + +**

Q: Can Tower enable detailed logging related to application activites?

** + +Yes. For more detailed logging related to application activities, set the following environment variable: `TOWER_LOG_LEVEL=TRACE`. + +**

Q: Version 22.3.1: My downloaded Nextflow log file is broken.

** + +A Tower Launcher issue has been identified which affects the Nextflow log file download in Tower version 22.3.1. A patch was released in version 22.3.2 that addresses this behavior. Update Tower to version 22.3.2 or later. + +### Login + +**

Q: Can I completely disable Tower's email login feature?

** + +The email login feature cannot be completely removed from the Tower login screen. + +**

Q: How can I restrict Tower access to only a subset of email addresses?

** + +You can restrict which emails are allowed to have automatic access to your Tower implementation via a configuration in _tower.yml_. + +Users without automatic access will receive an acknowledgment of their login request but be unable to access the platform until approved by a Tower administration via the Administrator Console. + +```yaml +# This any email address that matches a pattern here will have automatic access. +tower: + trustedEmails: + - '*@seqera.io` + - 'named_user@example.com' +``` + +**

Q: Why am I receiving login errors stating that admin approval is required when using Azure AD OIDC?

** + +The Azure AD app integrated with Tower must have user consent settings configured to "Allow user consent for apps" to ensure that admin approval is not required for each application login. See [User consent settings](https://learn.microsoft.com/en-us/azure/active-directory/manage-apps/configure-user-consent?pivots=portal#configure-user-consent-settings). + +**

Q: Why is my OIDC redirect_url set to http instead of https?

** + +This can occur for several reasons. Please verify the following: + +1. Your `TOWER_SERVER_URL` environment variable uses the `https://` prefix. +2. Your `tower.yml` has `micronaut.ssl.enabled` set to `true`. +3. Any Load Balancer instance that sends traffic to the Tower application is configured to use HTTPS as its backend protocol rather than TCP. + +**

Q: Why isn't my OIDC callback working?

** + +Callbacks could fail for many reasons. To more effectively investigate the problem: + +1. Set the Tower environment variable to `TOWER_SECURITY_LOGLEVEL=DEBUG`. +2. Ensure your `TOWER_OIDC_CLIENT`, `TOWER_OIDC_SECRET`, and `TOWER_OIDC_ISSUER` environment variables all match the values specified in your OIDC provider's corresponding application. +3. Ensure your network infrastructure allow necessary egress and ingress traffic. + +**

Q: Why did Google SMTP start returning `Username and Password not accepted` errors?

** +Previously functioning Tower Enterprise email integration with Google SMTP are likely to encounter errors as of May 30, 2022 due to a [security posture change](https://support.google.com/accounts/answer/6010255#more-secure-apps-how&zippy=%2Cuse-more-secure-apps) implemented by Google. + +To reestablish email connectivity, please follow the instructions at [https://support.google.com/accounts/answer/3466521](https://support.google.com/accounts/answer/3466521) to provision an app password. Update your `TOWER_SMTP_PASSWORD` environment variable with the app password, and restart the application. + +### Logging + +**

Q: Can Tower enable detailed logging related to sign-in activity?

** + +Yes. For more detailed logging related to login events, set the following environment variable: `TOWER_SECURITY_LOGLEVEL=DEBUG`. + +**

Q: Can Tower enable detailed logging related to application activities?

** + +Yes. For more detailed logging related to application activities, set the following environment variable: `TOWER_LOG_LEVEL=TRACE`. + +### Miscellaneous + +**

Q: Is my data safe?

** + +Yes, your data stays strictly within **your** infrastructure itself. When you launch a workflow through Tower, you need to connect your infrastructure (HPC/VMs/K8s) by creating the appropriate credentials and compute environment in a workspace. + +Tower then uses this configuration to trigger a Nextflow workflow within your infrastructure similar to what is done via the Nextflow CLI, therefore Tower does not manipulate any data itself and no data is transferred to the infrastructure where Tower is running. + +### Monitoring + +**

Q: Can Tower integrate with 3rd party Java-based Application Performance Monitoring (APM) solutions?

** + +Yes. You can mount the APM solution's JAR file in the `backend` container and set the agent JVM option via the `JAVA_OPTS` env variable. + +**

Q: Is it possible to retrieve the trace file for a Tower-based workflow run?

** +Yes. Although it is not possible to directly download the file via Tower, you can configure your workflow to export the file to persistent storage: + +1. Set the following block in your `nextflow.config`: + +```nextflow +trace { + enabled = true +} +``` + +2. Add a copy command to your pipeline's **Advanced options > Post-run script** field: + +``` +# Example: Export the generated trace file to an S3 bucket +# Ensure that your Nextflow head job has the necessary permissions to interact with the target storage medium! +aws s3 cp ./trace.txt s3://MY_BUCKET/trace/trace.txt +``` + +**

Q: When monitoring pipeline execution via the Runs tab, why do I occasionally see Tower reporting "_Live events sync offline_"?

** + +Nextflow Tower uses [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events) to push real-time updates to your browser. The client must establish a connection to the Nextflow Tower server's `/api/live` endpoint to initiate the stream of data, and this connection can occasionally fail due to factors like network latency. + +To resolve the issue, please try reloading the UI to reinitiate the client's connection to the server. If reloading fails to resolve the problem, please contact Seqera Support for assistance with webserver timeout settings adjustments. + +### Nextflow Configuration + +**

Q: How can I specify Nextflow CLI run arguments when launching from Tower?

** + +As of Nextflow v22.09.1-edge, when invoking a pipeline from Tower, you can specify [Nextflow CLI run arguments](https://www.nextflow.io/docs/latest/cli.html?highlight=dump#run) by setting the `NXF_CLI_OPTS` environment variable via pre-run script: + +``` +# Example: +export NXF_CLI_OPTS='-dump-hashes' +``` + +**

Q: Can a repository's `nextflow_schema.json` support multiple input file mimetypes?

** + +No. As of April 2022, it is not possible to configure an input field ([example](https://github.com/nf-core/rnaseq/blob/master/nextflow_schema.json#L16-L21)) to support different mime types (e.g. a `text/csv`-type file during one execution, and a `text/tab-separated-values` file in a subsequent run). + +**

Q: Why are my `--outdir` artefacts not available when executing runs in a cloud environment?

** + +As of April 2022, Nextflow resolves relative paths against the current working directory. In a classic grid HPC, this normally corresponds to a subdirectory of the user's $HOME directory. In a cloud execution environment, however, the path will be resolved relative to the **container file system** meaning files will be lost when the container is termination. [See here for more details](https://github.com/nextflow-io/nextflow/issues/2661#issuecomment-1047259845). + +Tower Users can avoid this problem by specifying the following configuration in the **Advanced options > Nextflow config file** configuration textbox: `params.outdir = workDir + '/results`. This will ensure the output files are written to your stateful storage rather than ephemeral container storage. + +**

Q: Can Nextflow be configured to ignore a Singularity cache?

** + +Yes. To ignore the Singularity cache, add the following configuration item to your workflow: `process.container = 'file:///some/singularity/image.sif'`. + +**

Q: Why does Nextflow fail with a `WARN: Cannot read project manifest ... path=nextflow.config` error message?

** + +This error can occur when executing a pipeline where the source git repository's default branch is not populated with `main.nf` and `nextflow.config` files, regardless of whether the invoked pipeline is using a non-default revision/branch (e.g. `dev`). + +Current as of May 16, 2022, there is no solution for this problem other than to create blank `main.nf` and `nextflow.config` files in the default branch. This will allow the pipeline to run, using the content of the `main.nf` and `nextflow.config` in your target revision. + +**

Q: Is it possible to maintain different Nextflow configuration files for different environments?

** + +Yes. The main `nextflow.config` file will always be imported by default. Instead of managing multiple `nextflow.config` files (each customized for an environment), you can create unique environment config files and import them as [their own profile](https://www.nextflow.io/docs/latest/config.html#config-profiles) in the main `nextflow.config`. + +Example: + +``` +// nextflow.config + + + +profiles { + test { includeConfig 'conf/test.config' } + prod { includeConfig 'conf/prod.config' } + uat { includeConfig 'conf/uat.config' } +} + + +``` + +**

Q: Is there a limitation to the size of the BAM files that can be uploaded to the S3 bucket?

** + +You will see this on your log file if you encountered an error related to this: +` WARN: Failed to publish file: s3://[bucket-name]` + +AWS have a limitation on the size of the object that can be uploaded to S3 when using the multipart upload feature. You may refer to this [documentation for more information.](https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) For this specific instance, it is hitting the _maximum number of parts per upload_. + +The following configuration are suggested to work with the above stated AWS limitation: + +- Head Job CPUs = 16 +- Head Job Memory = 60000 +- Pre-run script = export NXF_OPTS="-Xms20G -Xmx40G" +- Update the `nextflow.config` to increase the chunk size and slow down the number of transfers. + + ``` + aws { + batch { + maxParallelTransfers = 5 + maxTransferAttempts = 3 + delayBetweenAttempts = 30 + } + client { + uploadChunkSize = '200MB' + maxConnections = 10 + maxErrorRetry = 10 + uploadMaxThreads = 10 + uploadMaxAttempts = 10 + uploadRetrySleep = '10 sec' + } + } + ``` + +**

Q: Why is Nextflow forbidden to retrieve a params file from Nextflow Tower?

** + +Ephemeral endpoints can only be consumed once. Nextflow versions older than `22.04` may try to call the same endpoint more than once, resulting in an error similar to the following: +`Cannot parse params file: /ephemeral/example.json - Cause: Server returned HTTP response code: 403 for URL: https://api.tower.nf/ephemeral/example.json`. + +To resolve this problem, please upgrade your Nextflow version to version `22.04.x` or later. + +**

Q: How can I prevent Nextflow from uploading intermediate files from local scratch to my S3 work directory?

** + +Nextflow will only unstage files/folders that have been explicitly defined as process outputs. If your workflow has processes that generate folder-type outputs, please ensure that the process also purges any intermediate files that reside within. Failure to do so will result in the intermediate files being copied as part of the task unstaging process, resulting in additional storage costs and lengthened pipeline execution times. + +**

Q: Why do some values specified in my git repository's _nextflow.config_ change when the pipeline is launched via Tower?

** +You may notice that some values specified in your pipeline repository's _nextflow.config_ have changed when the pipeline is invoked via Tower. This occurs because Tower is configured with a set of default values that are superimposed on the pipeline configuration (with the Tower defaults winning). + +**Example:** +The following code block is specified in your _nextflow.config_: + +``` +aws { + region = 'us-east-1' + client { + uploadChunkSize = 209715200 // 200 MB + } + ... +} +``` + +When the job instantiates on the AWS Batch Compute Environment, you will see that the `uploadChunkSize` changed: + +``` +aws { + region = 'us-east-1' + client { + uploadChunkSize = 10485760 // 10 MB + } + ... +} +``` + +This change occurred because Tower superimposes its 10 MB default value rather than using the value specified in the _nextflow.config_ file. + +To force the Tower-invoked job to use your desired value, please add the configuration setting in the Tower Workspace Launch screen's [**Advanced options > Nextflow config file textbox**](https://help.tower.nf/22.2/launch/advanced/#nextflow-config-file). In the case of our example above, you would simply need to add `aws.client.uploadChunkSize = 209715200 // 200 MB` . + +Nextflow configuration values that are affected by this behaviour include: + +- aws.client.uploadChunkSize +- aws.client.storageEncryption + +**

Q: `Missing output file(s) [X] expected by process [Y]` error during task execution in an environment using Fusion v1

** + +Fusion v1 has a limitation which causes tasks that run for less than 60 seconds to fail as the output file generated by the task is not yet detected by Nextflow. This is a limitation inherited from a Goofys driver used by the Fusion v1 implementation. Fusion v2 (to be made available to Tower Enterprise users during Q1 of 2023) resolves this issue. + +If Fusion v2 is not yet available, or updating to v2 is not feasible, this issue can be addressed by instructing Nextflow to wait for 60 seconds after the task completes. + +From **Advanced options > Nextflow config file** in **Pipeline settings**, add the following line to your Nextflow configuration: + +``` +process.afterScript = 'sleep 60' +``` + +**

Q: Why are jobs in RUNNING status not terminated when my pipeline run is canceled?

** + +The behavior of Tower when canceling a run depends on the [`errorStrategy`](https://www.nextflow.io/docs/latest/process.html#errorstrategy) defined in your process script. If the process `errorStrategy` is set to `finish`, an orderly pipeline shutdown is initiated when you cancel (or otherwise interrupt) a run. This instructs Nextflow to wait for the completion of any submitted jobs. To ensure that all jobs are terminated when your run is canceled, set `errorStrategy` to `terminate` in your Nextflow config. For example: + +```bash + +process ignoreAnyError { + errorStrategy 'ignore' + + script: + +} + +``` + +**

Q: Why do some cached tasks run from scratch when I re-launch a pipeline?

** + +When re-launching a pipeline, Tower relies on Nextflow's `resume` functionality for the continuation of a workflow execution. This skips previously completed tasks and uses a cached result in downstream tasks, rather than running the completed tasks again. The unique ID (hash) of the task is calculated using a composition of the task's: + +- Input values +- Input files +- Command line string +- Container ID +- Conda environment +- Environment modules +- Any executed scripts in the bin directory + +A change in any of these values results in a changed task hash. Changing the task hash value means that the task will be run again when the pipeline is re-launched. To aid debugging efforts when a re-launch behaves unexpectedly, run the pipeline twice with `dumpHashes=true` set in your Nextflow config file (from **Advanced options -> Nextflow config file** in the Pipeline settings). This will instruct Nextflow to dump the task hashes for both executions in the `nextflow.log` file. You can compare the log files to determine the point at which the hashes diverge in your pipeline when it is resumed. + +See [here](https://www.nextflow.io/blog/2019/demystifying-nextflow-resume.html) for more information on the Nextflow `resume` mechanism. + +**

Q: Why does my run fail with an "o.h.e.jdbc.spi.SqlExceptionHelper - Incorrect string value" error?

** + +```bash + + [scheduled-executor-thread-2] - WARN o.h.e.jdbc.spi.SqlExceptionHelper - SQL Error: 1366, SQLState: HY000 + [scheduled-executor-thread-2] - ERROR o.h.e.jdbc.spi.SqlExceptionHelper - (conn=34) Incorrect string value: '\xF0\x9F\x94\x8D |...' for column 'error_report' at row 1 + [scheduled-executor-thread-2] - ERROR i.s.t.service.job.JobSchedulerImpl - Oops .. unable to save status of job id=18165; name=nf-workflow-26uD5XXXXXXXX; opId=nf-workflow-26uD5XXXXXXXX; status=UNKNOWN + +``` + +Runs will fail if your Nextflow script or Nextflow config contain illegal characters (such as emojis or other non-UTF8 characters). Validate your script and config files for any illegal characters before atttempting to run again. + +### Nextflow Launcher + +**

Q: There are several nf-launcher images available in the [Seqera image registry](https://quay.io/repository/seqeralabs/nf-launcher?tab=tags). How can I tell which one is most appropriate for my implementation?

** + +Your Tower implementation knows the nf-launcher image version it needs and will specify this value automatically when launching a pipeline. + +If you are restricted from using public container registries, please see Tower Enterprise Release Note instructions ([example](https://install.tower.nf/22.1/release_notes/22.1/#nextflow-launcher-image)) for the specific image you should use and how to set this as the default when invoking pipelines. + +**

Q: The nf-launcher is pinned to a specific Nextflow version. How can I make it use a different release?

** + +Each Nextflow Tower release uses a specific nf-launcher image by default. This image is loaded with a specific Nextflow version, meaning that any workflow run in the container uses this Nextflow version by default. You can force your jobs to use a newer/older version of Nextflow with any of the following strategies: + +1. Use the **Pre-run script** advanced launch option to set the desired Nextflow version. Example: `export NXF_VER=22.08.0-edge` +2. For jobs executing in an AWS Batch compute environment, create a [custom job definition](https://install.tower.nf/22.2/advanced-topics/custom-launch-container/) which references a different nf-launcher image. + +### OIDC + +**

Q: Can I have users seamlessly log in to Tower if they already have an active session with their OpenId Connect (OIDC) Identity Provider (IDP)?

** + +Yes. If you are using OIDC as your authentication method, it is possible to implement a seamless login flow for your users. + +Rather than directing your users to `http(s)://YOUR_TOWER_HOSTNAME` or `http(s)://YOUR_TOWER_HOSTNAME/login`, point the user-initiated login URL here instead: `http(s)://YOUR_TOWER_HOSTNAME/oauth/login/oidc`. + +If your user already has an active session established with the IDP, they will be automatically logged into Tower rather than having to manually choose their authentication method. + +### Optimization + +**

Q: When using optimization, why are tasks failing with an `OutOfMemoryError: Container killed due to memory usage` error?

** + +Improvements are being made to the way Nextflow calculates the optimal memory needed for containerized tasks, which will resolve issues with underestimating memory allocation in an upcoming release. + +A temporary workaround for this issue is to implement a `retry` error strategy in the failing process that will increase the allocated memory each time the failed task is retried. Add the following `errorStrategy` block to the failing process: + +```bash +process { + errorStrategy = 'retry' + maxRetries = ​3 + memory = 1.GB * task.attempt +} +``` + +### Plugins + +**

Q: Is it possible to use the Nextflow SQL DB plugin to query AWS Athena?

** + +Yes. As of [Nextflow 22.05.0-edge](https://github.com/nextflow-io/nextflow/releases/tag/v22.05.0-edge), your Nextflow pipelines can query data from AWS Athena. +You must add the following configuration items to your `nextflow.config` (**Note:** the use of secrets is optional): + +``` +plugins { + id 'nf-sqldb@0.4.0' +} + +sql { + db { + 'athena' { + url = 'jdbc:awsathena://AwsRegion=YOUR_REGION;S3OutputLocation=s3://YOUR_S3_BUCKET' + user = secrets.ATHENA_USER + password = secrets.ATHENA_PASSWORD + } + } +} +``` + +You can then call the functionality from within your workflow. + +``` +// Example + channel.sql.fromQuery("select * from test", db: "athena", emitColumns:true).view() +} +``` + +For more information on the implementation, please see [https://github.com/nextflow-io/nf-sqldb/discussions/5](https://github.com/nextflow-io/nf-sqldb/discussions/5). + +### Repositories + +**

Q: Can Tower integrate with private docker registries like JFrog Artifactory?

** + +Yes. Tower-invoked jobs can pull container images from private docker registries. The method to do so differs depending on platform, however: + +- If using AWS Batch, modify your EC2 Launch Template as per [these directions from AWS](https://aws.amazon.com/blogs/compute/how-to-authenticate-private-container-registries-using-aws-batch/).
**Note:** + - This solution requires that your Docker Engine be [at least 17.07](https://docs.docker.com/engine/release-notes/17.07/) to use `--password-stdin`. + - You may need to add the following additional commands to your Launch Template depending on your security posture:
+ `cp /root/.docker/config.json /home/ec2-user/.docker/config.json && chmod 777 /home/ec2-user/.docker/config.json` +- If using Azure Batch, please create a **Container Registry**-type credential in your Tower Workspace and associate it with the Azure Batch object also defined in the Workspace. +- If using Kubernetes, please use an `imagePullSecret` as per [https://github.com/nextflow-io/nextflow/issues/2827](https://github.com/nextflow-io/nextflow/issues/2827). + +**

Q: Why does my Nextflow log have a `Remote resource not found` error when trying to contact the workflow repository?

** + +This error can occur if the Nextflow head job fails to retrieve the necessary repository credentials from Nextflow Tower. + +To determine if this is the case, please do the following: + +1. Check your Nextflow log for an entry like `DEBUG nextflow.scm.RepositoryProvider - Request [credentials -:-]`. +2. If the above is true, please check the protocol of the string that was assigned to your Tower instance's `TOWER_SERVER_URL` configuration value. It is possible this has been erroneously set to `http` rather than `https`. + +### Secrets + +**

Q: When using secrets in Tower workflow run, the process executed with an error `Missing AWS execution role arn`

** + +The [ECS Agent must be empowered](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) to retrieve Secrets from the AWS Secrets Manager. Secrets-using pipelines that are launched from Nextflow Tower and execute in an AWS Batch Compute Environment will encounter this error if an IAM Execution Role is not provided. Please see the [Pipeline Secrets](https://help.tower.nf/22.2/secrets/overview/) for remediation steps. + +**

Q: Why do work tasks which use Secrets fail when running in AWS Batch?

** + +Users may encounter a few different errors when executing pipelines that use Secrets, via AWS Batch: + +- If you use `nf-sqldb` version 0.4.1 or earlier and have Secrets in your `nextflow.config`, you may see following error in your Nextflow Log: `nextflow.secret.MissingSecretException: Unknown config secret {SECRET_NAME}`.
+ You can resolve this error by explicitly defining the `xpack-amzn` plugin in your configuration.
+ Example: + + ``` + plugins { + id 'xpack-amzn' + id 'nf-sqldb' + } + ``` + +- If you have two or more processes that use the same container image, but only a subset of these processes use Secrets, your Secret-using processes may fail during the initial run but succeed when resumed. This is due to an bug in how Nextflow (22.07.1-edge and earlier) registers jobs with AWS Batch. + + To resolve the issue, please upgrade your Nextflow version to 22.08.0-edge. If you cannot upgrade, you can use the following as workarounds: + + 1. Use a different container image for each process. + 2. Define the same set of Secrets in each process that uses the same container image. + +### Tower Agent + +**

Q:Tower Agent closes a session with "Unexpected Exception in WebSocket [io.seqera.tower.agent.AgentClientSocket$Intercepted@698514a]: Operation timed out java.io.IOException: Operation timed out"

** + +The reconnection logic of Tower Agent has been improved with the release of version 0.5.0. [Update your Tower Agent version](https://github.com/seqeralabs/tower-agent) before relaunching your pipeline. + +### Tower Configuration + +**

Q: Can I customize menu items on the Tower navigation menu?

** + +Yes. Using the `navbar` snippet in the tower.yml configuration file, you can specify custom navigation menu items for your Tower installation. See [here](https://install.tower.nf/22.3/configuration/overview/#logging-format) for more details. + +**

Q: Can a custom path be specified for the `tower.yml` configuration file?

** + +Yes. Provide a POSIX-compliant path to the `TOWER_CONFIG_FILE` environment variable. + +**

Q: Why do parts of `tower.yml` not seem to work when I run my Tower implementation?

** + +There are two reasons why configurations specified in `tower.yml` are not being expressed by your Tower instance: + +1. There is a typo in one of the key value pairs. +2. There is a duplicate key present in your file. + + ```yaml + # EXAMPLE + # This block will not end up being enforced because there is another `tower` key below. + tower: + trustedEmails: + - user@example.com + + # This block will end up being enforced because it is defined last. + tower: + auth: + oidc: + - "*@foo.com" + ``` + +**

Q: Do you have guidance on how to create custom Nextflow containers?

** + +Yes. Please see [https://github.com/seqeralabs/gatk4-germline-snps-indels/tree/master/containers](https://github.com/seqeralabs/gatk4-germline-snps-indels/tree/master/containers). + +**

Q: What DSL version does Nextflow Tower set as default for Nextflow head jobs?

** + +As of [Nextflow 22.03.0-edge](https://github.com/nextflow-io/nextflow/releases/tag/v22.03.0-edge), DSL2 is the default syntax. + +To minimize disruption on existing pipelines, Nextflow Tower version 22.1.x and later are configured to default Nextflow head jobs to DSL 1 for a transition period (ending TBD). + +You can force your Nextflow head job to use DSL2 syntax via any of the following techniques: + +- Adding `export NXF_DEFAULT_DSL=2` in the **Advanced Features > Pre-run script** field of Tower Launch UI. +- Specifying `nextflow.enable.dsl = 2` at the top of your Nextflow workflow file. +- Providing the `-dsl2` flag when invoking the Nextflow CLI (e.g. `nextflow run ... -dsl2`) + +**

Q: Can Tower to use a Nextflow workflow stored in a local git repository?

** + +Yes. As of v22.1, Nextflow Tower Enterprise can link to workflows stored in "local" git repositories. To do so: + +1. Volume mount your repository folder into the Tower Enterprise `backend` container. +2. Update your `tower.yml` with the following configuration: + +```yml +tower: + pipeline: + allow-local-repos: + - /path/to/repo +``` + +Note: This feature is not available to Tower Cloud users. + +**

Q: Am I forced to define sensitive values in `tower.env`?

** +No. You can inject values directly into `tower.yml` or - in the case of a Kubernetes deployment - reference data from a secrets manager like Hashicorp Vault. + +Please contact Seqera Labs for more details if this is of interest. + +### Batch Forge + +**

Q: What does the `Enable GPU` option do when building an AWS Batch cluster via Batch Forge?

** + +Activating the **Enable GPU** field while creating an AWS Batch environment with Batch Forge will result in an [AWS-recommended GPU-optimized ECS AMI](https://docs.aws.amazon.com/batch/latest/userguide/batch-gpu-ami.html) being used as your Batch cluster's default image. + +Note: + +1. Activation does not cause GPU-enabled instances to automatically spawn in your Batch cluster. You must still specify these in the Forge screen's **Advanced options > Instance types** field. +2. Population of the Forge screen's **Advanced options > AMI Id** field will supersede the AWS-recommended AMI. +3. Your Nextflow script must include [accelerator directives](https://www.nextflow.io/docs/latest/process.html?highlight=accelerator) to use the provisioned GPUs. + +### tw CLI + +**

Q: Can a custom run name be specified when launch a pipeline via the `tw` CLI?

** + +Yes. As of `tw` v0.6.0, this is possible. Example: `tw launch --name CUSTOM_NAME ...` + +**

Q: Why are tw cli commands resulting in segfault errors?

** + +`tw` cli versions 0.6.1 through 0.6.4 were compiled using glibc instead of MUSL. This change was discovered to cause segfaults in certain operating systems and has been rolled back in [tw cli 0.6.5](https://github.com/seqeralabs/tower-cli/releases/tag/v0.6.5). + +To resolve this error, please try using the MUSL-based binary first. If this fails to work on your machine, an alternative Java JAR-based solution is available for download and use. + +**

Q: Can `tw cli` communicate with hosts using http?

** + +This error indicates that your Tower host accepts connections using http (insecure), rather than https. If your host cannot be configured to accept https connections, run your tw cli command with the `--insecure` flag. + +``` + ERROR: You are trying to connect to an insecure server: `http://hostname:port/api` + if you want to force the connection use '--insecure'. NOT RECOMMENDED! +``` + +To do this, add the `--insecure` flag before your cli command (see below). Note that, although this approach is available for use in deployments that do not accept `https:` connections, it is not recommended. Best practice is to use `https:` wherever possible. + +``` +$ tw --insecure info + +``` + +Note: The `${TOWER_API_ENDPOINT}` is equivalent to the `${TOWER_SERVER_URL}/api`. + +**

Q: Can a user resume/relaunch a pipeline using the tw cli?

** + +Yes, it is possible with `tw runs relaunch`. + +``` +$ tw runs relaunch -i 3adMwRdD75ah6P -w 161372824019700 + + Workflow 5fUvqUMB89zr2W submitted at [org / private] workspace. + + +$ tw runs list -w 161372824019700 + + Pipeline runs at [org / private] workspace: + + ID | Status | Project Name | Run Name | Username | Submit Date + ----------------+-----------+----------------+-----------------+-------------+------------------------------- + 5fUvqUMB89zr2W | SUBMITTED | nf/hello | magical_darwin | seqera-user | Tue, 10 Sep 2022 14:40:52 GMT + 3adMwRdD75ah6P | SUCCEEDED | nf/hello | high_hodgkin | seqera-user | Tue, 10 Sep 2022 13:10:50 GMT + + +``` + +### Workspaces + +**

Q: Why is my Tower-invoked pipeline trying to contact a different Workspace than the one it was launched from?

** + +This problem will express itself with the following entry in your Nextflow log: `Unexpected response for request http://YOUR_TOWER_URL/api/trace/TRACE_ID/begin?workspaceId=WORKSPACE_ID`. + +This can occur due to the following reasons: + +1. An access token value has been hardcoded in the `tower.accessToken` block of your `nextflow.config` (either via the git repository itself or override value in the launch form). +2. In cases where your compute environment is an HPC cluster, the credentialized user's home directory contains a stateful `nextflow.config` with a hardcoded token (e.g. `~/.nextflow/config). + +**

Q: What privilege level is granted to a user assigned to a Workspace both as a Participant and Team member?

** + +It is possible for a user to be concurrently assigned to a Workspace both as a named Participant and member of a Team. In such cases, Tower will grant the **higher** of the two privilege sets. + +Example: + +- If the Participant role is Launch and the Team role is Admin, the user will have Admin rights. +- If the Participant role is Admin and the Team role is Launch, the user will have Admin rights. +- If the Participant role is Launch and the Team role is Launch, the user will have Launch rights. + +As a best practice, Seqera suggests using Teams as the primary vehicle for assigning rights within a Workspace and only adding named Participants when one-off privilege escalations are deemed necessary. + +## Amazon + +### EBS + +**

Q: EBS Autoscaling: Why do some EBS volumes remain active after their associated jobs have completed?

** + +The EBS autoscaling solution relies on an AWS-provided script running on each container host. This script performs AWS EC2 API requests to delete EBS volumes when the jobs using those volumes have been completed. When running large Batch clusters (hundreds of compute nodes or more), EC2 API rate limits may cause the deletion of unattached EBS volumes to fail. Volumes that remain active after Nextflow jobs have been completed will incur additional costs and should therefore be manually deleted. You can monitor your AWS account for any orphaned EBS volumes via the EC2 console or with a Lambda function. See [here](https://aws.amazon.com/blogs/mt/controlling-your-aws-costs-by-deleting-unused-amazon-ebs-volumes/) for more information. + +### EC2 Instances + +**

Q: Can I run a Nextflow head job on AWS Graviton instances?

** + +Yes, Nextflow supports Graviton architecture — use AWS Batch queues with Graviton-based instance types. + +### ECS + +**

Q:How often are Docker images pulled by the ECS Agent?

** + +As part of the AWS Batch creation process, Batch Forge will set ECS Agent parameters in the EC2 Launch Template that is created for your cluster's EC2 instances: + +- For clients using Tower Enterprise v22.01 or later: + - Any AWS Batch environment created by Batch Forge will set the ECS Agent's `ECS_IMAGE_PULL_BEHAVIOUR` set to `once`. +- For clients using Tower Enterprise v21.12 or earlier: + - Any AWS Batch environment created by Batch Forge will set the ECS Agent's `ECS_IMAGE_PULL_BEHAVIOUR` set to `default`. + +Please see the [AWS ECS documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) for an in-depth explanation of this difference. + +

**Note:**

This behaviour cannot be changed within the Tower Application. + +**

Q: We encountered an error saying unable to parse HTTP 429 response body.

** + +`CannotPullContainerError: Error response from daemon: error parsing HTTP 429 response body: invalid character 'T' looking for beginning of value: "Too Many Requests (HAP429)"` + +This is because of the dockerhub rate limit of 100 anonymous pulls per 6 hours. We suggest to use the following on your launch template in order to avoid this issue: + +`echo ECS_IMAGE_PULL_BEHAVIOR=once >> /etc/ecs/ecs.config` + +**

Q: Help! My job failed due to a CannotInspectContainerError error.

** + +There are multiple reasons why your pipeline could fail with an `Essential container in task exited - CannotInspectContainerError: Could not transition to inspecting; timed out after waiting 30s` error. + +Please try the following: + +1. [Upgrade your ECS Agent](https://github.com/aws/amazon-ecs-agent/releases) to [1.54.1](https://github.com/aws/amazon-ecs-agent/pull/2940) or newer ([instructions for checking your ECS Agent version](https://www.trendmicro.com/cloudoneconformity/knowledge-base/aws/ECS/latest-agent-version.html)); +2. Provision more storage space for your EC2 instance (preferably via ebs-autoscaling to ensure scalability). +3. If the error is accompanied by `command exit status: 123` and a `permissions denied` error tied to a system command, please ensure that the binary is set to be executable (i.e. `chmod u+x`). + +### Queues + +**

Q: Does Nextflow Tower support the use of multiple AWS Batch queues during a single job execution?

** + +Yes. Even though you can only create/identify a single work queue during the definition of your AWS Batch Compute Environment within Nextflow Tower, you can spread tasks across multiple queues when your job is sent to Batch for execution via your pipeline configuration. + +Adding the following snippet to either your _nextflow.config_ or the **Advanced Features > Nextflow config gile** field of Tower Launch UI, will cause processes to be distributed across two AWS Batch queues, depending on the assigned named. + +```bash +# nextflow.config + +process { + withName: foo { + queue: `TowerForge-1jJRSZmHyrrCvCVEOhmL3c-work` + } +} + +process { + withName: bar { + queue: `custom-second-queue` + } +} +``` + +### Security + +**

Q: Can Tower connect to an RDS instance using IAM credentials instead of username/password?

** + +No. Nextflow Tower must be supplied with a username & password to connect to its associated database. + +### Storage + +**

Q: Can I use EFS as my work directory?

** + +As of Nextflow Tower v21.12, you can specify an Amazon Elastic File System instance as your Nextflow work directory when creating your AWS Batch Compute Environment via Batch Forge. + +**

Q: Can I use FSX for Luster as my work directory?

** + +As of Nextflow Tower v21.12, you can specify an Amazon FSX for Lustre instance as your Nextflow work directory when creating your AWS Batch Compute Environment via Batch Forge. + +**

Q: How do I configure my Tower-invoked pipeline to be able to write to an S3 bucket that enforces AES256 server-side encryption?

** + +If you need to save files to an S3 bucket protected by a [bucket policy which enforces AES256 server-side encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html), additional configuration settings must be provided to the [nf-launcher](https://quay.io/repository/seqeralabs/nf-launcher?tab=tags) script which invokes the Nextflow head job: + +1. Add the following configuration to the **Advanced options > Nextflow config file** textbox of the **Launch Pipeline** screen: + + ``` + aws { + client { + storageEncryption = 'AES256' + } + } + ``` + +2. Add the following configuration to the **Advanced options > Pre-run script** textbox of the **Launch Pipeline** screen: + + ```bash + export TOWER_AWS_SSE=AES256 + ``` + +**Note:** This solution requires at least Tower v21.10.4 and Nextflow [22.04.0](https://github.com/nextflow-io/nextflow/releases/tag/v22.04.0). + +## Azure + +### AKS + +**

Q: Why is Nextflow returning a "... /.git/HEAD.lock: Operation not supported" error?

** + +This problem can occur if your Nextflow pod uses an Azure Files-type (SMB) Persistent Volume as its storage medium. By default, the `jgit` library used by Nextflow attempts a filesystem link operation which [is not supported](https://docs.microsoft.com/en-us/azure/storage/files/files-smb-protocol?tabs=azure-portal#limitations) by Azure Files (SMB). + +To avoid this problem, please add the following code snippet in your pipeline's **pre-run script** field: + +```bash +cat < ~/.gitconfig +[core] + supportsatomicfilecreation = true +EOT +``` + +### Batch + +**

Q: Why is my Azure Batch VM quota set to 0?

** + +In order to manage capacity during the global health pandemic, Microsoft has reduced core quotas for new Batch accounts. Depending on your region and subscription type, a newly-created account may not be entitled to any VMs without first making a service request to Azure. + +Please see Azure's [Batch service quotas and limits](https://docs.microsoft.com/en-us/azure/batch/batch-quota-limit#view-batch-quotas) page for further details. + +### SSL + +**

Q: "Problem with the SSL CA cert (path? access rights?)" error

** + +This can occur if a tool/library in your task container requires SSL certificates to validate the identity of an external data source. + +You may be able to solve the issue by: + +1. Mounting host certificates into the container ([example](https://github.com/nextflow-io/nextflow/blob/v21.10.6/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchService.groovy#L348-L351)). + +**

Q: Why is my deployment using Azure SQL database returning an error about `Connections using insecure transport are prohibited while --require_secure_transport=ON.`

** + +This is due to Azure's default MySQL behavior of enforcing the SSL connections between your server and client application, as detailed [here](https://learn.microsoft.com/en-us/azure/mysql/single-server/concepts-ssl-connection-security). In order to fix this, append the following to your `TOWER_DB_URL` connection string: `useSSL=true&enabledSslProtocolSuites=TLSv1.2&trustServerCertificate=true` + +eg, `TOWER_DB_URL=jdbc:mysql://azuredatabase.com/tower?serverTimezone=UTC&useSSL=true&enabledSslProtocolSuites=TLSv1.2&trustServerCertificate=true` + +## Google + +### Retry + +**

Q: How do I make my Nextflow pipelines more resilient to VM preemption?

** + +Running your pipelines on preemptible VMs provides significant cost savings but increases the likelihood that a task will be interrupted before completion. It is a recommended best practice to implement a retry strategy when you encounter [exit codes](https://cloud.google.com/life-sciences/docs/troubleshooting#retrying_after_encountering_errors) that are commonly related to preemption. Example: + +```config +process { + errorStrategy = { task.exitStatus in [8,10,14] ? 'retry' : 'finish' } + maxRetries = 3 + maxErrors = '-1' +} +``` + +**

Q: What are the minimum Tower Service account permissions needed for GLS and GKE?

** + +The following roles are needed to be granted to the `nextflow-service-account`. + +1. Cloud Life Sciences Workflows Runner +2. Service Account User +3. Service Usage Consumer +4. Storage Object Admin + +For detailed information, please refer to this [guide.](https://cloud.google.com/life-sciences/docs/tutorials/nextflow#create_a_service_account_and_add_roles) + +## Kubernetes + +**

Q: Pod failing with 'Invalid value: "xxx": must be less or equal to memory limit' error

** + +This error may be encountered when you specify a value in the **Head Job memory** field during the creation of a Kubernetes-type Compute Environment. + +If you receive an error that includes `field: spec.containers[x].resources.requests` and `message: Invalid value: "xxx": must be less than or equal to memory limit`, your Kubernetes cluster may be configured with [system resource limits](https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/) which deny the Nextflow head job's resource request. To isolate which component is causing the problem, try to launch a Pod directly on your cluster via your Kubernetes administration solution. Example: + +```yaml +--- +apiVersion: v1 +kind: Pod +metadata: + name: debug + labels: + app: debug +spec: + containers: + - name: debug + image: busybox + command: ["sh", "-c", "sleep 10"] + resources: + requests: + memory: "xxxMi" # or "xxxGi" + restartPolicy: Never +``` + +## On-Prem HPC + +**

Q: "java: command not found"

** + +When submitting jobs to your on-prem HPC (regardless of whether using SSH or Tower-Agent authentication), the following error may appear in your Nextflow logs even though you have Java on your $PATH environment variable: + +``` +java: command not found +Nextflow is trying to use the Java VM defined for the following environment variables: + JAVA_CMD: java + NXF_OPTS: +``` + +Possible reasons for this error: + +1. The queue where the Nextflow head job runs in a different environment/node than your login node userspace. +2. If your HPC cluster uses modules, the Java module may not be loaded by default. + +To troubleshoot: + +1. Open an interactive session with the head job queue. +2. Launch the Nextflow job from the interactive session. +3. If you cluster used modules: + 1. Add `module load ` in the **Advanced Features > Pre-run script** field when creating your HPC Compute Environment within Nextflow Tower. +4. If you cluster does not use modules: + 1. Source an environment with java and Nextflow using the **Advanced Features > Pre-run script** field when creating your HPC Compute Environment within Nextflow Tower. diff --git a/platform-enterprise/functionality_matrix/functionality_matrix.mdx b/platform-enterprise/functionality_matrix/functionality_matrix.mdx new file mode 100644 index 000000000..5b23ec0a8 --- /dev/null +++ b/platform-enterprise/functionality_matrix/functionality_matrix.mdx @@ -0,0 +1,27 @@ +--- +title: Tower / Nextflow compatibility matrix +headline: "Tower / Nextflow compatibility matrix" +description: "Tower / nf-launcher / Nextflow version compatibility matrix" +--- + +## Tower / Nextflow version compatibility + +Each Tower version makes use of `nf-launcher` to determine the Nextflow version used as its baseline. This Nextflow version can be overridden with the `NXF_VER` environment variable in your `nextflow.conf` file, but note that Tower may not work reliably with Nextflow versions other than the baseline version. + +We officially support the two latest Tower major releases (22.3.x, 22.4.x, etc) at any given time. + +nf-launcher versions prefixed with j17 refer to Java version 17; j11 refers to Java 11. + +| Tower version | nf-launcher version | Nextflow version | +| ------------- | ------------------- | ---------------- | +| 22.4.1 | j17-22.10.6 | 22.10.6 | +| 22.4.0 | j17-22.10.6 | 22.10.6 | +| 22.3.1 | j17-22.10.4 | 22.10.4 | +| 22.3 | j17-22.10.1 | 22.10.1 | +| 22.2.4 | j17-22.06.1-edge | 22.06.1-edge | +| 22.2.3 | j11-22.06.1-edge | 22.06.1-edge | +| 22.2.2 | j17-22.08.0-edge | 22.08.0-edge | + +--- + +If no Nextflow version is specified in your configuration, Tower defaults to the baseline version outlined above. diff --git a/platform-enterprise/getting-started/_images/starting_tower_enterprise.png b/platform-enterprise/getting-started/_images/starting_tower_enterprise.png new file mode 100644 index 000000000..817f93a37 Binary files /dev/null and b/platform-enterprise/getting-started/_images/starting_tower_enterprise.png differ diff --git a/platform-enterprise/getting-started/_images/starting_tower_nf.png b/platform-enterprise/getting-started/_images/starting_tower_nf.png new file mode 100644 index 000000000..33216eaa9 Binary files /dev/null and b/platform-enterprise/getting-started/_images/starting_tower_nf.png differ diff --git a/platform-enterprise/getting-started/_images/starting_tower_opensource.png b/platform-enterprise/getting-started/_images/starting_tower_opensource.png new file mode 100644 index 000000000..44f93fd00 Binary files /dev/null and b/platform-enterprise/getting-started/_images/starting_tower_opensource.png differ diff --git a/platform-enterprise/getting-started/_images/usage_create_token.png b/platform-enterprise/getting-started/_images/usage_create_token.png new file mode 100644 index 000000000..65da08d5a Binary files /dev/null and b/platform-enterprise/getting-started/_images/usage_create_token.png differ diff --git a/platform-enterprise/getting-started/_images/usage_name_token.png b/platform-enterprise/getting-started/_images/usage_name_token.png new file mode 100644 index 000000000..ec8ee1b4e Binary files /dev/null and b/platform-enterprise/getting-started/_images/usage_name_token.png differ diff --git a/platform-enterprise/getting-started/_images/usage_token.png b/platform-enterprise/getting-started/_images/usage_token.png new file mode 100644 index 000000000..0cd21e9f0 Binary files /dev/null and b/platform-enterprise/getting-started/_images/usage_token.png differ diff --git a/platform-enterprise/getting-started/deployment-options.mdx b/platform-enterprise/getting-started/deployment-options.mdx new file mode 100644 index 000000000..4fa725e17 --- /dev/null +++ b/platform-enterprise/getting-started/deployment-options.mdx @@ -0,0 +1,45 @@ +--- +title: "deployment-options" +description: "Choose how you want to use Tower." +--- + +Tower is available in three versions: + +- **Tower Cloud**: The hosted version of Tower is available free of charge at [tower.nf](https://tower.nf). This version is for individuals and organizations that want to get set up fast. It is the recommended way for users to become familiar with Tower. The service is hosted by Seqera Labs. + +- **Tower Community**: The Community edition of Tower is open-source and can be deployed by anyone on their own infrastructure. The community edition has basic features for the monitoring of pipelines by an individual user. + +- **Tower Enterprise**: The Enterprise edition of Tower contains the latest features and can be deployed in an organization's own cloud or on-premises infrastructure. This option includes dedicated support from Seqera Labs and is recommended for production environments. + +### Tower Cloud + +To try Tower Cloud, visit [tower.nf](https://tower.nf/login) and log in with your GitHub or Google credentials. The [Launching pipelines](../launch/launch.mdx) page provides step-by-step instructions to launch your first pipeline. Tower Cloud has a limit of five concurrent workflow runs per user. + +![](./_images/starting_tower_nf.png) + +### Tower Community + +For instructions to install the Community edition of Tower, visit the [GitHub repository](https://github.com/seqeralabs/nf-tower). + +![](./_images/starting_tower_opensource.png) + +:::caution +Tower Community does not include all the features of Tower Cloud and Tower Enterprise, such as **Tower Launch**, **Organizations**, and **Workspaces**. +::: + +### Tower Enterprise + +Tower Enterprise is installed in an organization's own cloud or on-premises infrastructure. It includes: + +- Monitoring, logging, and observability +- Pipeline execution launchpad +- Cloud resource provisioning +- Pipeline actions and event-based execution +- LDAP & OpenID authentication +- Enterprise role-based access control (RBAC) +- Full-featured API +- Technical support for Nextflow and Tower + +To install Tower in your organization, contact [Seqera Labs](https://cloud.tower.nf/demo/) for a demo to discuss your requirements. + +![](./_images/starting_tower_enterprise.png) diff --git a/platform-enterprise/getting-started/usage.mdx b/platform-enterprise/getting-started/usage.mdx new file mode 100644 index 000000000..91da2c705 --- /dev/null +++ b/platform-enterprise/getting-started/usage.mdx @@ -0,0 +1,77 @@ +--- +title: "usage" +description: "Choose how you want to use Tower." +--- + +You can use Tower through the web interface, the API, the CLI, or in Nextflow directly using the `-with-tower` option. + +### Tower web interface + +1. Create an account and log in to Tower, available free of charge at [tower.nf](https://cloud.tower.nf). + +2. Create and configure a new [compute environment](../compute-envs/overview.mdx). + +3. Start [launching pipelines](../launch/launchpad.mdx). + +### Tower API + +See [API](../api/overview.mdx). + +### Tower CLI + +See [CLI](../cli.mdx). + +### Nextflow `-with-tower` + +1. Create an account and log in to Tower. + +2. Create a new token. You can access your tokens from the **Settings** drop-down menu: + + ![](./_images/usage_create_token.png) + +3. Name your token. + + ![](./_images/usage_name_token.png) + +4. Store your token securely. + + ![](./_images/usage_token.png) + +:::note +The token will only be displayed once. You must copy and save the token before closing the Personal Access Token window. +::: + +5. Open a terminal and enter the following commands: + + ```bash + export TOWER_ACCESS_TOKEN=eyxxxxxxxxxxxxxxxQ1ZTE= + export NXF_VER=22.10.6 + ``` + + Where `eyxxxxxxxxxxxxxxxQ1ZTE=` is the token you just created. + + :::note + Bearer token support requires Nextflow version 20.10.0 or later, set with the second command above. + + To submit a pipeline to a [workspace](./workspace.mdx) using Nextflow, add the workspace ID to your environment: + + ```bash + export TOWER_WORKSPACE_ID=000000000000000 + ``` + + The workspace ID can be found on the organization workspaces overview page. + ::: + +6. Run your Nextflow pipeline with the `-with-tower` flag: + + ```bash + nextflow run hello.nf -with-tower + ``` + + You can now monitor your workflow runs in Tower. + + To configure and execute Nextflow pipelines in cloud environments, see [Compute Environments](../compute-envs/overview.mdx). + +:::tip +See the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html?highlight=tower#scope-tower) for further run configuration options using Nextflow configuration files. +::: diff --git a/platform-enterprise/getting-started/workspace.mdx b/platform-enterprise/getting-started/workspace.mdx new file mode 100644 index 000000000..c5fc0e396 --- /dev/null +++ b/platform-enterprise/getting-started/workspace.mdx @@ -0,0 +1,40 @@ +--- +title: "workspace" +description: "Introduction to Workspaces." +--- + +Each user has a unique workspace to manage all resources, such as pipelines, compute environments, and credentials. + +:::tip +You can create multiple workspaces within an organization context and associate each of these workspaces with dedicated teams of users, while providing fine-grained access control for each of the teams. See [Orgs and teams](../orgs-and-teams/overview.mdx). +::: + +The core components of a workspace are: + +### Launchpad + +The **Launchpad** offers a streamlined UI for launching and managing pipelines and their associated compute environments and credentials. Using the Launchpad, you can create a curated set of pipelines (including variations of the same pipeline) that are ready to be executed on the associated compute environments, while allowing the user to customize the pipeline-level parameters if needed. + +### Runs + +The **Runs** section monitors a launched workflow with real-time execution metrics, such as the number of pending or completed processes. + +See [Launch](../launch/launch.mdx). + +### Actions + +You can trigger pipelines based on specific events, such as a version release on Github or a general Tower webhook. + +See [Pipeline actions](../pipeline-actions/overview.mdx). + +### Compute environments + +Tower uses the concept of a **Compute environment** to define an execution platform for pipelines. Tower supports launching pipelines into a growing number of cloud (AWS, Azure, GCP) and on-premises (Slurm, IBM LSF, Grid Engine, etc.) infrastructures. + +See [Compute environments](../compute-envs/overview.mdx). + +### Credentials + +The **Credentials** section allows users to set up the access credentials for various platforms (Github, Gitlab, BitBucket, etc.) and compute environments (cloud, Slurm, Kubernetes, etc.) See [Compute environments](../compute-envs/overview.mdx) and [Git integration](../git/overview.mdx) for information on your infrastructure. + +See [Credentials](../credentials/overview.mdx). diff --git a/platform-enterprise/git/_images/git_manage_credentials.png b/platform-enterprise/git/_images/git_manage_credentials.png new file mode 100644 index 000000000..6d3cc584e Binary files /dev/null and b/platform-enterprise/git/_images/git_manage_credentials.png differ diff --git a/platform-enterprise/git/_images/git_platforms.png b/platform-enterprise/git/_images/git_platforms.png new file mode 100644 index 000000000..fd9c43b65 Binary files /dev/null and b/platform-enterprise/git/_images/git_platforms.png differ diff --git a/platform-enterprise/git/_images/git_public_repo.png b/platform-enterprise/git/_images/git_public_repo.png new file mode 100644 index 000000000..eda911d32 Binary files /dev/null and b/platform-enterprise/git/_images/git_public_repo.png differ diff --git a/platform-enterprise/git/overview.mdx b/platform-enterprise/git/overview.mdx new file mode 100644 index 000000000..662cf34b5 --- /dev/null +++ b/platform-enterprise/git/overview.mdx @@ -0,0 +1,186 @@ +--- +title: "overview" +description: "Managing and connecting to Git repositories for Nextflow in Nextflow Tower." +--- + +## Overview + +Data pipelines can be composed of many assets (pipeline scripts, configuration files, dependency descriptors such as for Conda or Docker, documentation, etc). By managing complex data pipelines as Git repositories, all assets can be versioned and deployed with a specific tag, release or commit id. Version control, combined with containerization, is crucial for **enabling reproducible pipeline executions**, and it provides the ability to continuously test and validate pipelines as the code evolves over time. + +Nextflow has built-in support for [Git](https://git-scm.com) and several Git-hosting platforms. Nextflow pipelines can be pulled remotely from both public and private Git-hosting providers, including the most popular platforms: GitHub, GitLab, and BitBucket. + +### Public repositories + +You can use a publicly hosted Nextflow pipeline by specifying the Git repository URL in the **Pipeline to launch** field. + +When specifying the **Revision number**, the list of available revisions are automatically pulled using the Git provider's API. By default, the default branch (usually `main` or `master`) will be used. + +![](./_images/git_public_repo.png) + +:::tip +[nf-core](https://nf-co.re/pipelines) is a great resource for public Nextflow pipelines. +::: + +:::caution +The GitHub API imposes [rate limits](https://docs.github.com/en/developers/apps/building-github-apps/rate-limits-for-github-apps) on API requests. You can increase your rate limit by adding [GitHub credentials](#github) to your workspace as shown below. +::: + +### Private repositories + +In order to access private Nextflow pipelines, you must add credentials for your private Git hosting provider. + +:::note +All credentials are (AES-256) encrypted before secure storage and are not exposed in an unencrypted way by any Tower API. +::: + +### Multiple credential filtering + +When your Tower instance has multiple stored credentials, selection of the most relevant credential for your repository takes precedence in the following order: + +1. Tower evaluates all the stored credentials available to the current Workspace. + +2. Credentials are filtered by Git provider (GitHub, GitLab, Bitbucket, etc.) + +3. Tower selects the credential with a **Repository base URL** most similar to the target repository. + +4. If no **Repository base URL** values are specified in the Workspace credentials, the most long-lived credential is selected. + +**Example**: + +Workspace A contains 4 credentials: + +**Credential A** + + Type: GitHub + + Repository base URL: + +**Credential B** + + Type: GitHub + + Repository base URL: https://github.com/ + +**Credential C** + + Type: GitHub + + Repository base URL: https://github.com/pipeline-repo + +**Credential D** + + Type: GitLab + + Repository base URL: https://gitlab.com/repo-a + +If you launch a pipeline with a Nextflow workflow residing in https://github.com/pipeline-repo, Tower will use **Credential C**. + +To ensure automatic selection of the most appropriate credential for your repository, we recommend that you: + +- Specify **Repository base URL** values as precisely as possible for each Git credential used in the Workspace. + +- Favor the use of service account type credentials where possible (such as GitLab group access tokens). + +- Avoid the use of multiple user-based tokens with similar permissions. + +### GitHub + +To connect a private [GitHub](https://github.com/) repository, personal (classic) or fine-grained access tokens can be used. + +:::note +A personal access token (classic) can access every repository that the user it belongs to can access. GitHub recommends that you use fine-grained personal access tokens (currently in beta) instead, which you can restrict to specific repositories. Fine-grained personal access tokens also enable you to specify granular permissions instead of broad scopes. +::: + +For **personal (classic)** tokens, you must grant access to the private repository by selecting the main `repo` scope when the token is created. See [here](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#creating-a-personal-access-token-classic) for instructions to create your personal access token (classic). + +For **fine-grained** tokens, the repository's organization must [opt in](https://docs.github.com/en/organizations/managing-programmatic-access-to-your-organization/setting-a-personal-access-token-policy-for-your-organization) to the use of fine-grained tokens. Tokens can be restricted by **Resource owner (organization)**, **Repository access**, and **Permissions**. See [here](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token#creating-a-fine-grained-personal-access-token) for instructions to create your fine-grained access token. + +Once you have created and copied your access token, create a new credential in Tower using these steps: + +1. Navigate to the **Credentials** tab. If you are using your personal workspace, select **Your credentials** from the user icon menu (top right). + +2. Select **Add Credentials**. + +3. Enter a **Name** for the new credentials. + +4. Select "GitHub" as the **Provider**. + +5. Enter your **Username** and **Access token**. + +6. Enter the **Repository base URL** for which the credentials should be applied (recommended). This option can be used to apply the provided credentials to a specific repository, e.g. `https://github.com/seqeralabs`. + +### GitLab + +GitLab supports [Personal](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html), [Group](https://docs.gitlab.com/ee/user/group/settings/group_access_tokens.html#group-access-tokens), and [Project](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html) access tokens for authentication. Your access token should have the `api`, `read_api`, and `read_repository` scopes in order to work with Tower. For all three token types, the token value is used for both the **Password** and **Access token** fields in the Tower credential creation form. + +To connect Tower to a private [GitLab](https://gitlab.com/) repository: + +1. Navigate to the **Credentials** tab. If you are using your personal workspace, select **Your credentials** from the user icon menu (top right). + +2. Select **Add Credentials**. + +3. Enter a **Name** for the new credentials. + +4. Select "GitLab" as the **Provider**. + +5. Enter your **Username**. For Group and Project access tokens, the username can be any non-empty value. + +6. Enter your token value in the **Password** and **Access token** fields. + +7. Enter the **Repository base URL** (recommended). This option is used to apply the credentials to a specific repository, e.g. `https://gitlab.com/seqeralabs`. + +### Gitea + +**Available from Tower 22.4.X** + +To connect to a private [Gitea](https://gitea.io/) repository, supply your Gitea user credentials to create a new credential in Tower with these steps: + +1. Navigate to the **Credentials** tab. If you are using your personal workspace, select **Your credentials** from the user icon menu (top right). + +2. Select **Add Credentials**. + +3. Enter a **Name** for the new credentials. + +4. Select "Gitea" as the **Provider**. + +5. Enter your **Username**. + +6. Enter your **Password**. + +7. Enter your **Repository base URL** (required). + +### Bitbucket + +To connect to a private BitBucket repository, refer to the [BitBucket documentation](https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/) to learn how to create a BitBucket App password. Then, create a new credential in Tower using these steps: + +1. Navigate to the **Credentials** tab. If you are using your personal workspace, select **Your credentials** from the user icon menu (top right). + +2. Select **Add Credentials**. + +3. Enter a **Name** for the new credentials. + +4. Select "BitBucket" as the **Provider**. + +5. Enter your **Username** and **Password**. + +6. Enter the **Repository base URL** (recommended). This option can be used to apply the provided credentials to a specific repository, e.g. `https://bitbucket.org/seqeralabs`. + +### AWS CodeCommit + +To connect to a private AWS CodeCommit repository, refer to the [AWS documentation](https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html) to learn more about IAM permissions for CodeCommit. Then, supply the IAM account access key and secret key to create a credential in Tower using these steps: + +1. Navigate to the **Credentials** tab. If you are using your personal workspace, select **Your credentials** from the user icon menu (top right). + +2. Select **Add Credentials**. + +3. Enter a **Name** for the new credentials. + +4. Select "CodeCommit" as the **Provider**. + +5. Enter the **Access key** and **Secret key** of the AWS IAM account that will be used to access the desired CodeCommit repository. + +6. Enter the **Repository base URL** for which the credentials should be applied (recommended). This option can be used to apply the provided credentials to a specific region, e.g. `https://git-codecommit.eu-west-1.amazonaws.com`. + +### Self-hosted Git + +It is also possible to specify Git server endpoints for Tower Enterprise. For more information, refer to the [Tower Install Documentation](https://install.tower.nf/latest/configuration/git_integration/). diff --git a/platform-enterprise/index.mdx b/platform-enterprise/index.mdx new file mode 100644 index 000000000..03b0d3dcd --- /dev/null +++ b/platform-enterprise/index.mdx @@ -0,0 +1,31 @@ +--- +title: Introduction +headline: "Tower User Documentation - Welcome!" +description: "Nextflow Tower is a full-stack application for the development, deployment, and monitoring of Nextflow data analysis pipelines." +--- + +Nextflow Tower is the centralized command post for the management of [Nextflow](https://www.nextflow.io/) data pipelines. It brings monitoring, logging, and observability to distributed workflows and simplifies the deployment of pipelines on any cloud, cluster, or laptop. + +Users can launch pre-configured pipelines with ease, while the flexible API provides programmatic integration to meet the needs of organizations building on Nextflow Tower. Workflow developers can publish pipelines to shared workspaces and administrators can set up and manage the infrastructure required to run data analysis at scale. + +![](./_images/overview_image.png) + +### What is Nextflow? + +Nextflow is a framework for the development of data workflows. It enables engineers and data scientists to create and securely deploy custom, parallel data applications to the cloud or traditional on-premises infrastructure. Nextflow is characterized by its powerful dataflow programming paradigm and execution engines that allow for transparent deployment. + +Nextflow is both a programming workflow language and an execution runtime that supports a wide range of execution platforms, including popular traditional grid scheduling systems such as Slurm and IBM LSF, and cloud services such as AWS Batch and Google Cloud Life Sciences. + +![](./_images/nf_home_page.png) + +### Why Nextflow Tower? + +We created Nextflow in 2013 to deliver the most seamless experience for executing data workflows at scale. Tower is the continuation of that mission. Using the latest technologies, we have built the solution to easily execute and monitor pipelines across every stage. Tower brings the cloud closer than ever before with automated resource provisioning and role-based access control (RBAC). + +Tower is designed to be easily configurable in any environment — data and compute never leave your organization's security boundary. It has been extensively tested with over 500 million jobs, achieving 99.99% uptime. + +As mandated by healthcare industries to ensure compliance, the Tower platform is regularly submitted to penetration tests and security scanning. These tests meet the compliance standards set by ISO-27001, HIPAA, and HITRUST. + +:::tip +[**Sign up**](https://tower.nf "Nextflow Tower") to try Tower for free, or request a [**demo**](https://cloud.tower.nf/demo/ "Nextflow Tower Demo") for deployments in your own on-premises or cloud environment. +::: diff --git a/platform-enterprise/installation/_images/installation_reference_architecture.png b/platform-enterprise/installation/_images/installation_reference_architecture.png new file mode 100644 index 000000000..5bad308d9 Binary files /dev/null and b/platform-enterprise/installation/_images/installation_reference_architecture.png differ diff --git a/platform-enterprise/installation/system-deployment.mdx b/platform-enterprise/installation/system-deployment.mdx new file mode 100644 index 000000000..71454456f --- /dev/null +++ b/platform-enterprise/installation/system-deployment.mdx @@ -0,0 +1,85 @@ +--- +title: System deployment +headline: "Deployment Guide" +description: "System description and instructions for Nextflow Tower." +--- + +:::tip +It is highly recommended to first [**Sign up**](https://tower.nf "Nextflow Tower") and try the hosted version of Tower for free or request a [**demo**](https://cloud.tower.nf/demo/ "Nextflow Tower Demo") for a deployment in your own on-premises or cloud environment. +::: + +Nextflow Tower is a web application server based on a microservice oriented architecture and designed to maximize the portability, scalability and security of the application. + +The application is composed of a variety of modules that can be configured and deployed depending on organization's requirements. + +All components for the Enterprise release are packaged as Docker container images which are hosted and security validated by the Amazon ECR service. The community version can be accessed via GitHub. + +## Deployment configurations + +:::caution +To install Nextflow Tower on private infrastructure, you'd need a **license key**. Please contact us at sales@seqera.io to get your license key. +::: + +### Basic deployment + +The minimal Tower configuration only requires the front-end, backend and database modules. + +These can be executed as Docker containers or as native services running in the hosting environment. Such a minimal configuration is only suggested for **evaluation purposes** or for a small number of users. + +### Kubernetes deployment + +Kubernetes cluster management is emerging as the technology of choice for the deployment of applications requiring high-availability, scalability and security. + +Nextflow Tower Enterprise includes configuration manifests for the deployment in the Kubernetes environment. + +This diagram shows the system architecture for the reference deployment on AWS. + +![](./_images/installation_reference_architecture.png) + +### Tower Modules + +The application is composed of a number of modules that can be configured and deployed depending on user requirements. + +All components are packaged as Docker container images which are hosted and security validated by the Amazon ECR service. + +### Backend module + +The backend is implemented as a JVM-based application server based on the Micronaut framework which provides a modern and secure backbone for the application server. + +The backend module requires OpenJDK 8 or later. + +The backend layer implements the main application logic organised in a _service_ layer, which is then exposed via a REST API and defined via an OpenAPI schema. + +The backend module uses JPA/Hibernate/JDBC API industry standards to connect the underlying relational database. + +The backend is designed to run standalone or as multiple replicas for scalability when deployed in high-availability mode. + +### Frontend module + +The frontend module is composed by Angular 8 application which is served by an Nginx web server. + +The frontend can be configured to expose the application directly to the user/DMZ via an HTTPS connection or through a load balancer. + +### Storage + +Nextflow Tower requires a relational database as its primary storage. + +It is suggested to use MySQL 5.6, however, any SQL database compatible with JPA/JDBC industry-standards is supported. + +### Caching + +Tower provides an optional caching module for configurations requiring high availability. + +This module requires a Redis 5.0 in-memory database. + +### Authentication module + +Nextflow Tower supports enterprise authentication mechanisms such as OAuth and LDAP. + +Third-party authority providers and custom single-sign-on flow can be developed depending on exact customer requirements. + +### Cron scheduler + +Tower implements a cron service which takes care of executing periodical activities, such as sending e-mail notifications and cleaning up. + +The cron service can be configured to run as an embedded backend service or an independent service. diff --git a/platform-enterprise/labels/_images/filter_labels.png b/platform-enterprise/labels/_images/filter_labels.png new file mode 100644 index 000000000..66b567ec9 Binary files /dev/null and b/platform-enterprise/labels/_images/filter_labels.png differ diff --git a/platform-enterprise/labels/_images/label_management.png b/platform-enterprise/labels/_images/label_management.png new file mode 100644 index 000000000..6ccfda122 Binary files /dev/null and b/platform-enterprise/labels/_images/label_management.png differ diff --git a/platform-enterprise/labels/_images/launch_labels.png b/platform-enterprise/labels/_images/launch_labels.png new file mode 100644 index 000000000..20698822f Binary files /dev/null and b/platform-enterprise/labels/_images/launch_labels.png differ diff --git a/platform-enterprise/labels/_images/launchpad_labels.png b/platform-enterprise/labels/_images/launchpad_labels.png new file mode 100644 index 000000000..7aab4cdd6 Binary files /dev/null and b/platform-enterprise/labels/_images/launchpad_labels.png differ diff --git a/platform-enterprise/labels/_images/new_label.png b/platform-enterprise/labels/_images/new_label.png new file mode 100644 index 000000000..793faff7c Binary files /dev/null and b/platform-enterprise/labels/_images/new_label.png differ diff --git a/platform-enterprise/labels/_images/pipeline_labels.png b/platform-enterprise/labels/_images/pipeline_labels.png new file mode 100644 index 000000000..435c55efa Binary files /dev/null and b/platform-enterprise/labels/_images/pipeline_labels.png differ diff --git a/platform-enterprise/labels/overview.mdx b/platform-enterprise/labels/overview.mdx new file mode 100644 index 000000000..93ae32710 --- /dev/null +++ b/platform-enterprise/labels/overview.mdx @@ -0,0 +1,72 @@ +--- +title: Labels Overview +headline: "Labels" +description: "Step-by-step instructions to set-up and use Labels in Tower." +--- + +## Overview + +Use labels to organize your work and filter key information. Labels are free-text annotations that can be applied to pipelines, actions, or workflow runs either during creation or afterward. + +Labels are workspace specific,each workspace has an independent set of labels), and are not propagated to Nextflow during the workflow execution. + +### Create and apply labels + +Labels can be created, applied and edited by a workspace maintainer, admin or owner. When applying a label, users can select from existing labels or add new ones on the fly. + +![](./_images/new_label.png) + +### Labels applied to a pipeline + +:::caution +Labels are applied to elements in a workspace-specific context. This means that labels applied to a shared pipeline in workspace A will not be shown when viewing the pipeline from workspace B. +::: + +Labels applied to a pipeline are displayed on the bottom of the pipeline card on the Launchpad screen. To see all labels, hover over a label with the "+" character. + +![](./_images/pipeline_labels.png) + +Apply label to a pipeline when adding a new pipeline or editing existing pipeline. + +If a label was applied to a pipeline, all workflow runs of this pipeline will inherit the label. If the label applied to the pipeline changes, this change will not be reflected on previously executed workflow runs, it will affect only future workflow runs. + +![](./_images/launchpad_labels.png) + +### Labels applied to an action + +Labels applied to an action are displayed in the action card on the Actions screen. To see all labels, hover over a label with the "+" character. + +Apply label to action when adding a new action or editing an existing action. + +If a label was applied to an action, all workflow runs of this pipeline will inherit the label. If the label is applied to the action changes, this change will not be reflected on previously executed workflow runs, it will affect only future workflow runs. + +### Labels applied to a workflow run + +Labels applied to a workflow run are displayed in the card on the Workflow runs list screen as well as in the Workflow run detail screen. To see all labels, hover over a label with the "+" character. +Apply a label to workflow run at any moment, when launching a workflow run, as well as in the Workflow runs list screen or Workflow run detail screen. + +![](./_images/launch_labels.png) + +### Search and filter with labels +Search and filter pipelines and workflow runs using one or more labels. +Filter and search are complementary. + +![](./_images/filter_labels.png) + +### Overview of labels in a workspace + +All labels used in a workspace can be viewed, added, edited, and deleted by a maintainer, admin, or workspace owner in the workspace's Setting screen. +If a label is edited or deleted in this screen, the change is propagated to all items where the label was used. Such a change is irreversible + +![](./_images/label_management.png) + +### Limits + +:::caution +Label names must contain a minimum of 2 and a maximum of 39 alphanumeric characters, separated by dashes or underscores, and must be unique in each workspace +::: + +- Label names cannot begin or end with dashes `-` or underscores `_`. +- Label names cannot contain a consecutive combination of `-` or `_` characters (`--`, `__`, `-_`, etc.) +- A maximum of 25 labels can be applied to each resource. +- A maximum of 100 labels can be used in each workspace. diff --git a/platform-enterprise/launch/_images/launch_advanced.png b/platform-enterprise/launch/_images/launch_advanced.png new file mode 100644 index 000000000..1aec7ff2b Binary files /dev/null and b/platform-enterprise/launch/_images/launch_advanced.png differ diff --git a/platform-enterprise/launch/_images/launch_manifest.png b/platform-enterprise/launch/_images/launch_manifest.png new file mode 100644 index 000000000..0a38f2fab Binary files /dev/null and b/platform-enterprise/launch/_images/launch_manifest.png differ diff --git a/platform-enterprise/launch/_images/launch_notifications.png b/platform-enterprise/launch/_images/launch_notifications.png new file mode 100644 index 000000000..472aef707 Binary files /dev/null and b/platform-enterprise/launch/_images/launch_notifications.png differ diff --git a/platform-enterprise/launch/_images/launch_pipeline_rename.png b/platform-enterprise/launch/_images/launch_pipeline_rename.png new file mode 100644 index 000000000..58ef53a27 Binary files /dev/null and b/platform-enterprise/launch/_images/launch_pipeline_rename.png differ diff --git a/platform-enterprise/launch/_images/launch_relaunch.png b/platform-enterprise/launch/_images/launch_relaunch.png new file mode 100644 index 000000000..4d85b86c2 Binary files /dev/null and b/platform-enterprise/launch/_images/launch_relaunch.png differ diff --git a/platform-enterprise/launch/_images/launch_resume.png b/platform-enterprise/launch/_images/launch_resume.png new file mode 100644 index 000000000..70020eb2c Binary files /dev/null and b/platform-enterprise/launch/_images/launch_resume.png differ diff --git a/platform-enterprise/launch/_images/launch_rnaseq_nextflow_schema.png b/platform-enterprise/launch/_images/launch_rnaseq_nextflow_schema.png new file mode 100644 index 000000000..f36d22119 Binary files /dev/null and b/platform-enterprise/launch/_images/launch_rnaseq_nextflow_schema.png differ diff --git a/platform-enterprise/launch/advanced.mdx b/platform-enterprise/launch/advanced.mdx new file mode 100644 index 000000000..ea900b471 --- /dev/null +++ b/platform-enterprise/launch/advanced.mdx @@ -0,0 +1,35 @@ +--- +title: Advanced options +headline: "Advanced launch options" +description: "Advanced guide to launch Nextflow pipelines" +--- + +Advanced launch options allow users to modify the configuration and execution of the pipeline. + +### Nextflow config file + +The _Nextflow config_ field allows the addition of settings to the Nextflow configuration file. + +This text should follow the same syntax as the [Nextflow configuration file](https://www.nextflow.io/docs/latest/config.html#config-syntax). + +In the example below, we can modify the **manifest** section to give the pipeline a name and description which will show up in the Tower monitoring section. + +![](./_images/launch_manifest.png) + +### Pre & post-run scripts + +It is possible to run custom code either before or after the execution of the Nextflow script. These fields allow users to enter shell commands. + +### Pull latest + +Enabling this option ensures Nextflow pulls the latest version from the Git repository. This is equivalent to using the `-latest` flag. + +![](./_images/launch_advanced.png) + +### Main script + +Nextflow will attempt to run the script named `main.nf` in the project repository by default. This can be changed via either the `manifest.mainScript` option or by providing the script filename to run in this field. + +### Workflow entry name + +Nextflow DSL2 provides the ability to launch specific-named workflows. Enter the name of the workflow to be executed in this field. diff --git a/platform-enterprise/launch/launch.mdx b/platform-enterprise/launch/launch.mdx new file mode 100644 index 000000000..5a00824d3 --- /dev/null +++ b/platform-enterprise/launch/launch.mdx @@ -0,0 +1,53 @@ +--- +title: Launch Form +headline: "Pipeline Execution" +description: "Guide to launching pipelines using Nextflow Tower." +--- + +### Pipeline launch form + +The **Launch Form** can be used for launching pipelines and for adding pipelines to the **Launchpad**. + +To launch a pipeline: + +1. Select **Start Quick Launch** in the navigation bar. The Launch Form will appear. + +2. Select a **Compute Environment** from the available options. + + Visit the [Compute Environment](../compute-envs/overview.mdx) documentation to learn how to create an environment for your preferred execution platform. + +3. Enter a repository URL for the **Pipeline to launch** (e.g. `https://github.com/nf-core/rnaseq.git`). + +:::tip +Nextflow pipelines are just Git repositories and they can reside on any public or private Git-hosting platform. See [Git Integration](../git/overview.mdx) in the Tower docs and [Pipeline Sharing](https://www.nextflow.io/docs/latest/sharing.html) in the Nextflow docs for more details. +::: + +4. You can select a **Revision number** to use a specific version of the pipeline. + + The Git default branch (e.g. `main` or `master`) or `manifest.defaultBranch` in the Nextflow configuration will be used by default. + +5. Enter the **Work directory**, which corresponds to the Nextflow work directory. + + The default work directory of the compute environment will be used by default. + +:::caution +The credentials associated with the compute environment must be able to access the work directory (e.g. an S3 bucket). +::: + +6. Select any **Config profiles** you would like to use. + + Visit the Nextflow [Config profiles](https://www.nextflow.io/docs/latest/config.html#config-profiles) documentation for more details. + +7. Enter any **Pipeline parameters** in YAML or JSON format. + + YAML example: + ```yaml + reads: 's3://nf-bucket/exome-data/ERR013140_{1,2}.fastq.bz2' + paired_end: true + ``` + +:::tip +In YAML, quotes should be used for paths but not for numbers or Booleans. +::: + +8. Select **Launch** to launch the pipeline. diff --git a/platform-enterprise/launch/launchpad.mdx b/platform-enterprise/launch/launchpad.mdx new file mode 100644 index 000000000..7748bf36e --- /dev/null +++ b/platform-enterprise/launch/launchpad.mdx @@ -0,0 +1,33 @@ +--- +title: "Launchpad" +headline: "Launchpad" +description: "Curate and launch workflows" +--- + +## Overview + +**Launchpad** makes it easy for any workspace user to launch a pre-configured pipeline. + +![](../_images/overview_image.png) + +A pipeline is a repository containing a Nextflow workflow, a compute environment, and pipeline parameters. + +### Pipeline Parameters Form + +Launchpad automatically detects the presence of a `nextflow_schema.json` in the root of the repository and dynamically creates a form where users can easily update the parameters. + +:::tip +The parameter forms view will appear if the workflow has a Nextflow schema file for the parameters. Please refer to the [**Nextflow Schema guide**](../pipeline-schema/overview.mdx) to learn more about the use cases and how to create them. +::: + +This makes it trivial for users without any expertise in Nextflow to enter their pipeline parameters and launch. + +![](./_images/launch_rnaseq_nextflow_schema.png) + +### Adding a New Pipeline + +Adding a pipeline to the workspace launchpad is similar to the [Launch](../launch/launch.mdx) except, instead of launching the pipeline, it gets added to the list of pipelines with the pre-saved values of fields, such as the pipeline parameters and the revision number. + +:::tip +To create your own customized Nextflow Schema for your pipeline, see the `nf-core` workflows that have adopted this. [nf-core/eager](https://github.com/nf-core/eager/blob/2.3.3/nextflow_schema.json) and [nf-core/rnaseq](https://github.com/nf-core/rnaseq/blob/3.0/nextflow_schema.json) are excellent examples. +::: diff --git a/platform-enterprise/launch/notifications.mdx b/platform-enterprise/launch/notifications.mdx new file mode 100644 index 000000000..455a9a5f4 --- /dev/null +++ b/platform-enterprise/launch/notifications.mdx @@ -0,0 +1,13 @@ +--- +title: Notifications +headline: "Notifications" +description: "Guide to setting up notifications for Nextflow pipelines using Tower." +--- + +### Email Notifications + +You can receive email notifications at the completion or a failure of a workflow execution. + +Navigate to your [profile page](https://tower.nf/profile) using dropdown on your avatar in the top-right of the page. Select the **Send notification email on workflow completion** toggle option at the bottom of the profile settings page. + +![](./_images/launch_notifications.png) diff --git a/platform-enterprise/launch/relaunch.mdx b/platform-enterprise/launch/relaunch.mdx new file mode 100644 index 000000000..487f4dfb3 --- /dev/null +++ b/platform-enterprise/launch/relaunch.mdx @@ -0,0 +1,19 @@ +--- +title: Re-launch +headline: "Resuming pipeline executions" +description: "Guide to re-launching Nextflow pipelines Tower" +--- + +Re-launching pipelines is a great way to quickly troubleshoot or make use of Nextflow's resume functionality and re-launch the same pipeline with different parameters. + +The **Resume** option is selected by default when re-launching a new pipeline from the **Runs** monitoring screen. In short, This option allows for the continuation of a workflow execution using Nextflow resume. + +:::tip +For a detailed explanation of how the resume option works, please visit [Part 1](https://www.nextflow.io/blog/2019/demystifying-nextflow-resume.html) and [Part 2](https://www.nextflow.io/blog/2019/troubleshooting-nextflow-resume.html) of the _Demystifying Nextflow resume_ description in the [Nextflow blog](https://www.nextflow.io/blog.html). +::: + +### Change compute environment when resuming a run + +**Available from Tower 22.4.0** + +Users with appropriate permissions can change the compute environment when resuming a run. The new compute environment must have access to the original run work directory. This means that the new compute environment must have a work directory that matches the root path of the original pipeline work directory, e.g. if the original pipeline work directory is `s3://foo/work/12345`, the new compute environment must have access to `s3://foo/work`. diff --git a/platform-enterprise/limits/limits.mdx b/platform-enterprise/limits/limits.mdx new file mode 100644 index 000000000..c9d4de60f --- /dev/null +++ b/platform-enterprise/limits/limits.mdx @@ -0,0 +1,34 @@ +--- +title: Usage limits +headline: "Tower Cloud usage limits" +description: "An overview of Nextflow Tower usage limits" +--- + +[Tower Cloud](https://tower.nf) elements and features have default limits per workspace and organization. + +### Workspaces + +| Description | Value | +| ------------ | ----- | +| Active runs | 5 | +| Members | 50 | +| Participants | 50 | +| Pipelines | 100 | +| Datasets | 100 | +| Labels | 1000 | + +### Organizations + +| Description | Value | +| ----------- | ----- | +| Workspaces | 50 | +| Teams | 20 | + +### Datasets + +| Description | Value | +| -------------------- | ----- | +| File size | 10 MB | +| Versions per dataset | 100 | + +If you need higher limits and capabilities, [contact us](https://cloud.tower.nf/contact-us/) to discuss your application requirements. diff --git a/platform-enterprise/monitoring/_images/monitoring_aggregate_stats.png b/platform-enterprise/monitoring/_images/monitoring_aggregate_stats.png new file mode 100644 index 000000000..afedb7bc6 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_aggregate_stats.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_cached.png b/platform-enterprise/monitoring/_images/monitoring_cached.png new file mode 100644 index 000000000..90fe2ffc3 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_cached.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_exec_log.png b/platform-enterprise/monitoring/_images/monitoring_exec_log.png new file mode 100644 index 000000000..a646f1dd1 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_exec_log.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_fastqc_processes.png b/platform-enterprise/monitoring/_images/monitoring_fastqc_processes.png new file mode 100644 index 000000000..261d50c57 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_fastqc_processes.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_general.png b/platform-enterprise/monitoring/_images/monitoring_general.png new file mode 100644 index 000000000..61715cde6 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_general.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_load.png b/platform-enterprise/monitoring/_images/monitoring_load.png new file mode 100644 index 000000000..51fac2bf7 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_load.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_metrics.png b/platform-enterprise/monitoring/_images/monitoring_metrics.png new file mode 100644 index 000000000..2a7cbfcf0 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_metrics.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_overview.png b/platform-enterprise/monitoring/_images/monitoring_overview.png new file mode 100644 index 000000000..52b2e64b6 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_overview.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_reads.png b/platform-enterprise/monitoring/_images/monitoring_reads.png new file mode 100644 index 000000000..1c169bbdc Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_reads.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_search.png b/platform-enterprise/monitoring/_images/monitoring_search.png new file mode 100644 index 000000000..9e1b7909c Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_search.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_search_keyword_suggestions.png b/platform-enterprise/monitoring/_images/monitoring_search_keyword_suggestions.png new file mode 100644 index 000000000..889cd257f Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_search_keyword_suggestions.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_sharing1.png b/platform-enterprise/monitoring/_images/monitoring_sharing1.png new file mode 100644 index 000000000..5a5f7f3f8 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_sharing1.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_sharing2.png b/platform-enterprise/monitoring/_images/monitoring_sharing2.png new file mode 100644 index 000000000..57d6fdbd3 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_sharing2.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_sharing3.png b/platform-enterprise/monitoring/_images/monitoring_sharing3.png new file mode 100644 index 000000000..77522b735 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_sharing3.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_star.png b/platform-enterprise/monitoring/_images/monitoring_star.png new file mode 100644 index 000000000..171416627 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_star.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_status.png b/platform-enterprise/monitoring/_images/monitoring_status.png new file mode 100644 index 000000000..270b9bc39 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_status.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_task_command.png b/platform-enterprise/monitoring/_images/monitoring_task_command.png new file mode 100644 index 000000000..f0fb61a6e Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_task_command.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_task_exec_log.png b/platform-enterprise/monitoring/_images/monitoring_task_exec_log.png new file mode 100644 index 000000000..be965dfbf Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_task_exec_log.png differ diff --git a/platform-enterprise/monitoring/_images/monitoring_task_resources.png b/platform-enterprise/monitoring/_images/monitoring_task_resources.png new file mode 100644 index 000000000..6b5705959 Binary files /dev/null and b/platform-enterprise/monitoring/_images/monitoring_task_resources.png differ diff --git a/platform-enterprise/monitoring/aggregate_stats.mdx b/platform-enterprise/monitoring/aggregate_stats.mdx new file mode 100644 index 000000000..43217f424 --- /dev/null +++ b/platform-enterprise/monitoring/aggregate_stats.mdx @@ -0,0 +1,23 @@ +--- +title: Aggregate stats & load +headline: "Aggregate stats and resources" +description: "Statistics and resource usage of Nextflow pipelines executed through Tower." +--- + +The **Aggregate stats** panel displays a real-time summary of the resources used by the workflow. These include total running time ('wall time'), aggregated CPU time (CPU hours), memory usage (GB hours), data i/o and cost. + +### Estimated cost + +Note that the cost estimate in Tower is a heuristic estimation of computation-only cost and is not intended to be a replacement for your cloud provider tooling (such as AWS Cost Explorer). Tower uses a database of costs for all cloud instances of AWS and Google Cloud in all regions and zones. This estimate does not currently take storage or associated network costs into account. + +The addition of [Resource labels](../resource-labels/overview.mdx) to your compute environments provides additional cost tracking through annotation of the actual cloud resources consumed by a run. + +![](./_images/monitoring_aggregate_stats.png) + +### Load and Utilization + +As processes are being submitted to the compute environment, the **Load** monitors how many cores and tasks are currently being used. For the cores gauge chart, the denominator is the maximum number of cores that have already been used at that moment during the execution of that specific pipeline. + +**Utilization** is calculated for memory and CPUs. This is the average value across all tasks and is calculated by dividing the memory (or CPUs) usage by the memory (or CPUs) requested. + +![](./_images/monitoring_load.png) diff --git a/platform-enterprise/monitoring/execution.mdx b/platform-enterprise/monitoring/execution.mdx new file mode 100644 index 000000000..c8e0961e2 --- /dev/null +++ b/platform-enterprise/monitoring/execution.mdx @@ -0,0 +1,29 @@ +--- +title: Execution details & logs +headline: "Monitoring & Logs" +description: "Monitoring a Nextflow pipeline executed through Tower." +--- + +### Run execution details + +Selecting a workflow run from the **Runs** tab will display the workflow details. This view contains: + +- [Run information](#run-information) with command-line, parameters, configuration, and execution logs in real-time. +- [Summary and status](./summary.mdx) section. +- List of pipeline [processes](./processes.mdx). +- [Aggregated stats](./aggregate_stats.mdx) and [load](./aggregate_stats.mdx#load-and-utilization). +- Detailed list of [individual tasks](./tasks.mdx#task-table) and [metrics](./tasks.mdx#resource-metrics). + +### Run information + +This section is composed of several tabs containing details about the Nextflow execution: + +- The Nextflow **Command line** that was executed. + +- The **Parameters** that were provided to the pipeline (taken from the configuration `params` scope). + +- The **Configuration** files as well as the final resolved configuration. + +- The **Execution log** from the main Nextflow process, which is updated in real time. + + ![](./_images/monitoring_exec_log.png) diff --git a/platform-enterprise/monitoring/overview.mdx b/platform-enterprise/monitoring/overview.mdx new file mode 100644 index 000000000..257d90ffe --- /dev/null +++ b/platform-enterprise/monitoring/overview.mdx @@ -0,0 +1,111 @@ +--- +title: Runs Overview +headline: "Monitoring Pipelines" +description: "Guide to monitoring Nextflow pipelines executed through Tower." +--- + +Jobs that have been submitted with Tower can be monitored wherever you have an internet connection. + +The **Runs** tab contains all previous jobs executions. Each new or resumed job will be given a random name e.g: `grave_williams`. + +![](./_images/monitoring_overview.png) + +The colors signify the completion status: + +- **Blue** are running. +- **Green** are successfully executed. +- **Red** are successfully executed where at least one task failed with a "terminate" error strategy. +- **Grey** are jobs that were forced to stop during execution. + +Selecting any particular run from the panel will display that run's execution details. + +### All runs view + +**Available from version 22.4.0** + +The **All runs** page, accessed from the top right avatar menu, provides a comprehensive overview of the runs accessible to a user across the entire Tower instance. This facilitates overall status monitoring and early detection of execution issues from a single view split across organizations and workspaces. + +### Search + +Our integrated search covers all workflow runs inside a workspace, enabling easy retrieval of complex queries. +To search and filter the runs in a workspace, the user can write a search query in the "Search workflow" textbox. + +The search text is interpreted by identifying all substrings formatted by `keyword:value` (this only applies to valid keywords shown below), combining all the rest in a single `Freetext` string, and then using all these search criteria to filter the runs. + +An example of a complex search query is the following: + +`rnaseq username:john_doe status:succeeded after:2022-02-20`. + +This string will retrieve all runs from the workspace that: + +- Ended successfully (`status:succeeded`) +- **AND** have been launched by user john_doe (`username:john_doe`) +- **AND** include "rnaseq" in the data fields covered by the free text search (e.g. the run name includes rnaseq) +- **AND** were submitted after February 20, 2022. + +The freetext search uses a **partial** match to find runs, meaning that it will search for "`*freetext*`" when looking for runs. +The `keyword:value` item, instead use **exact** match to filter runs, so `username:john` will not retrieve runs launched by `john_doe` + +:::caution +The implemented logic combines all filtering elements with **AND** logic. This means that queries like `status:succeeded, status:submitted` are formally valid but will return and empty list because a workflow can only have one status. +::: + +:::caution +The freetext resulting after identifying all the `keyword:value` are merged into a unique string including spaces, which may result in an empty list of results if there are typos. +::: + +:::note +Keywords corresponding to dates (e.g. `after` or `before`) automatically convert the input date to valid ISO-8601, taking into account the user's timezone. Partial dates are also supported e.g. `before:2022-5` will automatically be converted to `before:2022-05-01T00:00:00.000Z` under the hood. +::: + +Tower will automatically auto-suggest matching keywords while you type into the search bar. Additionally it will suggest valid values for some keywords, when supported. +![](./_images/monitoring_search_keyword_suggestions.png) + +### Search keywords + +#### Free text + +- The search box allows searching for workflows by partial match with `project name`, `run name`, `session id` or `manifest name`. Moreover, wildcards can be used to filter the desired workflows such as using asterisks `*` before and after keyword to filter results. + +#### Exact match keywords + +- `worlflowId:`: search a workflow by its `id`. + + E.g: `workflowId:3b7ToXeH9GvESr` + +- `runName:`: search with a specific `run name`. + + E.g: `runName:happy_einstein` + +- `sessionId:`: search workflows with a specific `session id`. + + E.g: `sessionId:85d35eae-21ea-4294-bc92-e35a60efa1a4` + +- `projectName:`: search workflows with a specific `project name`. + + E.g: `projectName:nextflow-io/hello` + +- `userName:`: search workflows by a specific `user name`. + + E.g: `userName:john_doe` + +- `status:`: search workflows with a specific `status` (`submitted`, `running`, `succeeded`, `failed`, `cancelled`, `unknown`). + + E.g: `status:succeeded` + +- `before:`: search workflows submitted before the given date (`YYYY-MM-DD` format), this includes the specified date. + + E.g: `before:2022-04-07` + +- `after:`: search workflows submitted after the given date (`YYYY-MM-DD` format), this includes the specified date. + + E.g: `after:2022-04-06` + +- `label:`: search workflows with a specific label (combine multiple label keywords in order to search workflows associated with all of those labels). + + E.g: `label:label1 label:label2` + +- `is:starred`: search workflows that have been starred by the user. + E.g: `is:starred` + +![](./_images/monitoring_search.png) diff --git a/platform-enterprise/monitoring/processes.mdx b/platform-enterprise/monitoring/processes.mdx new file mode 100644 index 000000000..0e80ad29f --- /dev/null +++ b/platform-enterprise/monitoring/processes.mdx @@ -0,0 +1,13 @@ +--- +title: Processes +headline: "Pipeline processes and status" +description: "Monitoring a Nextflow pipeline processes executed through Tower" +--- + +In Nextflow, a **process** is the basic primitive to execute a block of code. The **Processes** section shows all processes and the status of the tasks. + +In the example below, there are four tasks of the fastqc process. + + + +By selecting a process, the [**Tasks table**](./tasks.mdx) is filtered below. diff --git a/platform-enterprise/monitoring/summary.mdx b/platform-enterprise/monitoring/summary.mdx new file mode 100644 index 000000000..c0629f3a4 --- /dev/null +++ b/platform-enterprise/monitoring/summary.mdx @@ -0,0 +1,35 @@ +--- +title: Summary & status +headline: "Run summary & job status" +description: "Monitoring run and job status of Nextflow pipeline executed through Tower" +--- + +## General + +The General summary displays information on the environment and the job being executed: + +- Unique workflow run ID +- Workflow run name +- Date and time of job submission timestamp +- Project revision and Git commit ID +- Nextflow session ID +- Username of the launcher +- Work directory path +- Container image +- Executor +- Compute environment details +- Nextflow version + +:::tip +Hover over with the mouse to get full details on the compute environment. +::: + +![](./_images/monitoring_general.png) + +### Task status + +The **Task status** section shows in real time the statuses of your workflow tasks. The panel uses the same colour code as the pipelines in the navigation bar. + +The exact meaning of each status is dependant on the execution platform. + +![](./_images/monitoring_status.png) diff --git a/platform-enterprise/monitoring/tasks.mdx b/platform-enterprise/monitoring/tasks.mdx new file mode 100644 index 000000000..d8a78bde4 --- /dev/null +++ b/platform-enterprise/monitoring/tasks.mdx @@ -0,0 +1,59 @@ +--- +title: Tasks & metrics +headline: "Task table and metrics" +description: "Monitoring tasks and metrics of Nextflow pipeline executed through Tower." +--- + +## Task table + +The **Tasks** section shows all the tasks from an execution. + +You can use the `Search` bar to filter tasks by process name, tag, hash, status, etc. + +Selecting a status in **status** section filters the task table. E.g. clicking in the _CACHED_ card in the **status** column. + +![](./_images/monitoring_cached.png) + +Selecting a `process` in the **Processes** section above will filter all tasks for that specific process. + +![](./_images/monitoring_star.png) + +Selecting a task in the task table provides specific information about the task in the **Task details** dialog. + +![](./_images/monitoring_task_command.png) + +The task details dialog has the task information tab and the task **Execution log** tab. + +### Task information + +The task information tab contains the process name and task tag in the title. The tab includes: + +- Command +- Status +- Work directory +- Environment +- Execution time +- Resources requested +- Resources used + +![](./_images/monitoring_task_resources.png) + +### Execution log + +The **Execution log** provides a realtime log of the individual task of a Nextflow execution. + +This can be very helpful for troubleshooting. It is possible to download the log files including `stdout` and `stderr` from your compute environment. + +![](./_images/monitoring_task_exec_log.png) + +### Resource metrics + +This section displays plots with CPU, memory, task duration and I/O usage, grouped by process. + +These metrics can be used to profile an execution to ensure that the correct amount or resources are being requested for each process. + +![](./_images/monitoring_metrics.png) + +:::tip +Hover the mouse over the box plots to display more details. +::: diff --git a/platform-enterprise/orgs-and-teams/_images/pipelines_visibility.png b/platform-enterprise/orgs-and-teams/_images/pipelines_visibility.png new file mode 100644 index 000000000..d21d6c3a6 Binary files /dev/null and b/platform-enterprise/orgs-and-teams/_images/pipelines_visibility.png differ diff --git a/platform-enterprise/orgs-and-teams/_images/shared_visibility.png b/platform-enterprise/orgs-and-teams/_images/shared_visibility.png new file mode 100644 index 000000000..ce1c10bd2 Binary files /dev/null and b/platform-enterprise/orgs-and-teams/_images/shared_visibility.png differ diff --git a/platform-enterprise/orgs-and-teams/organizations.mdx b/platform-enterprise/orgs-and-teams/organizations.mdx new file mode 100644 index 000000000..330cb2ffe --- /dev/null +++ b/platform-enterprise/orgs-and-teams/organizations.mdx @@ -0,0 +1,72 @@ +--- +title: Organizations +headline: "Organizations" +description: "Create and manage organization resources." +--- + +## Overview + +Organizations are the top-level structure and contain Workspaces, Members, Teams, and Collaborators. + +### Create an organization + +To create a new organization: + +1. Navigate to [Your organizations](https://tower.nf/orgs) and select **Add Organization**. + +2. Enter a **Name** and **Full name** for your organization. + + :::caution + The organization name must follow a specific pattern. Refer to the UI for guidance. + ::: + +3. Enter any other optional fields as needed: **Description**, **Location**, **Website URL** and **Logo**. + +4. Select **Add**. + +You can view the list of all **Members**, **Teams**, and **Collaborators** in an organization on the organization's page. You can also edit any of the optional fields by selecting **Edit** from the [organizations page](https://tower.nf/orgs) or by selecting the **Settings** tab from the organization's page, provided that you are an **Owner** of the organization. + +### Members + +Once an organization is created, the user who created the organization is the default owner of that organization. It is also possible to invite or add other members as well. + +Tower provides access control for members of an organization by classifying them either as an **Owner** or a **Member**. Each organization can have multiple owners and members. + +:::note +**Owners** have full read/write access to modify members, teams, collaborators, and settings within a organization. **Members** are limited in their actions. +::: + +#### Create a new member + +To add a new member to an organization: + +1. Go to the **Members** tab of the organization menu +2. Click on **Invite member** +3. Enter the email ID of user you'd like to add to the organization + +An e-mail invitiation will be sent which needs to be accepted by the user. Once they accept the invitation, they can switch to the organization (or organization workspace) using their workspace dropdown. + +### Collaborators + +**Collaborators** are users who are invited to an organization's workspace, but are not members of that organization. As a result, their access is limited to only within that workspace. + +New collaborators to an organization's workspace can be added using **Participants**. To learn more about the various available access levels for **Participants**, please refer to the [participant roles](./workspace-management.mdx#participant-roles) section. + +:::note +**Collaborators** can only be added from a workspace. For more information, see [workspace management](./workspace-management.mdx#create-a-new-workspace). +::: + +### Teams + +**Teams** allow the organization **owners** to group members and collaborators together into a single unit and to manage them as a whole. + +#### Create a new team + +To create a new team within an organization: + +1. Go to the **Teams** tab of the organization menu +2. Click on **New team** +3. Enter the **Name** of team +4. Optionally, add the **Description** and the team's **Avatar** +5. For the newly created team, click on **View** +6. Click on **Add team member** and type in the name of the organization members or collaborators diff --git a/platform-enterprise/orgs-and-teams/overview.mdx b/platform-enterprise/orgs-and-teams/overview.mdx new file mode 100644 index 000000000..c0c19a501 --- /dev/null +++ b/platform-enterprise/orgs-and-teams/overview.mdx @@ -0,0 +1,25 @@ +--- +title: Organizations and Teams Overview +headline: "Organizations and Teams" +description: "Create and manage teams and resources for an organization." +--- + +## Overview + +Nextflow Tower simplifies the development and execution of workflows by providing a centralized interface for managing users and resources, while providing ready-to-launch workflows for users. This is achieved through the context of [workspaces](../getting-started/workspace.mdx). + +### Organization resources + +Tower allows the creation of multiple organizations, each of which can contain multiple workspaces with shared users and resources. This allows any organization to customize and organize the usage of resources while maintaining an access control layer for users associated with a workspace. + +- For further information on organizations, see [Organizations](./organizations.mdx). + +- For further information on organization workspaces, see [Workspace management](./workspace-management.mdx). + +### Organization users + +Any user can be added or removed from an organization or workspace and can be allocated a specific access role within that workspace. + +Teams provide a way for organizations to group users and participants together into teams, such as `workflow-developers` or `analysts`, and apply access control for all users within this team. + +For further information on user and team creation, see [User management](./organizations.mdx#Members). diff --git a/platform-enterprise/orgs-and-teams/shared-workspaces.mdx b/platform-enterprise/orgs-and-teams/shared-workspaces.mdx new file mode 100644 index 000000000..a08c4eb09 --- /dev/null +++ b/platform-enterprise/orgs-and-teams/shared-workspaces.mdx @@ -0,0 +1,53 @@ +--- +title: Shared workspaces +headline: "Shared workspace" +description: "Create and manage shared workspaces and resources in an organization." +--- + +## Overview + +Nextflow Tower introduces the concept of shared workspaces as a solution for synchronization and resource sharing within an organization. + +A shared workspace enables the creation of pipelines in a centralized location, making them accessible to all members of an organization. + +The benefits of using a shared workspace within an organization include: + +- **Define once and share everywhere**: Set up shared resources once and automatically share them across the organization. + +- **Centralize the management of key resources**: Organization administrators can ensure the correct pipeline configuration is used in all areas of an organization without needing to replicate pipelines across multiple workspaces. + +- **Immediate update adoption**: Updated parameters for a shared pipeline become immediately available across the entire organization, reducing the risk of pipeline discrepancies. + +- **Computational resource provision**: Pipelines in shared workflows can be shared along with the required computational resources. This eliminates the need to duplicate resource setup in individual workspaces across the organization. Shared workspaces in Tower centralize and simplify resource sharing within an organization. + +### Create a shared workspace + +Creating a shared workspace is similar to the creation of a private workspace, with the exception of the **Visibility** option, which must be set to **Shared**. + +![](./_images/shared_visibility.png) + +### Create a shared pipeline + +When creating a pipeline within a shared workspace, associating it with a [compute environment](../compute-envs/overview.mdx) is optional. + +If a compute environment from the shared workspace is associated with the pipeline, it will be available to users in other workspaces who can launch the shared pipeline using the provided environment by default. + +### Use shared pipelines from a private workspace + +Once a pipeline is set up in a shared workspace and associated with a compute environment within that shared workspace, any user can launch the pipeline from a private workspace using the shared workspace's compute environment. This eliminates the need for users to replicate shared compute environments in their private workspaces. + +:::note +The shared compute environment will not be available to launch other pipelines limited to that specific private workspace. +::: + +If a pipeline from a shared workspace is shared **without** an associated compute environment, users from other workspaces can run it from their local workspaces. By default, the **primary** compute environment of the local workspace will be selected. + +### Make shared pipelines visible in a private workspace + +To view pipelines from shared workspaces, set the **Filter -> Pipelines from** option to **This and shared workspaces** on the [Launchpad](../launch/launchpad.mdx). + +:::note +Currently, the pipelines from _all_ shared workspaces are visible when the visibility is set to "Shared workspaces". +::: + +![](./_images/pipelines_visibility.png) diff --git a/platform-enterprise/orgs-and-teams/workspace-management.mdx b/platform-enterprise/orgs-and-teams/workspace-management.mdx new file mode 100644 index 000000000..8c81ce6b3 --- /dev/null +++ b/platform-enterprise/orgs-and-teams/workspace-management.mdx @@ -0,0 +1,69 @@ +--- +title: Workspace management +headline: "Workspace management" +description: "Manage users and teams for an organization." +--- + +## Overview + +**Organization workspaces** extend the functionality of [user workspaces](../getting-started/workspace.mdx) by adding the ability to fine-tune access levels for specific members, collaborators, or teams. This is achieved by managing **participants** in the organization workspaces. + +Organizations consist of members, while workspaces consist of participants. + +:::note +A workspace participant may be a member of the workspace organization or a collaborator within that workspace only. Collaborators count toward the total number of workspace participants. See [Usage limits](../limits/limits.mdx). +::: + +### Create a new workspace + +Organization owners and admins can create a new workspace within an organization: + +1. Go to the **Workspaces** tab of the organization page. +2. Select **Add Workspace**. +3. Enter the **Name** and **Full name** for the workspace. +4. Optionally, add a **Description** for the workspace. +5. Under **Visibility**, select either **Private** or **Shared**. Private visibility means that workspace pipelines are only accessible to workspace participants. +6. Select **Add**. + +:::tip +Optional workspace fields can be modified after workspace creation, either by using the **Edit** option on the workspace listing for an organization or by accessing the **Settings** tab within the workspace page, provided that you are the **Owner** of the workspace. +::: + +Apart from the **Participants** tab, the organization workspace is similar to the **user workspace**. As such, the relation to [runs](../launch/launch.mdx), [pipeline actions](../pipeline-actions/overview.mdx), [compute environments](../compute-envs/overview.mdx) and [credentials](../credentials/overview.mdx) is the same. + +### Add a new participant + +To add a new participant to a workspace: + +1. Go to the **Participants** tab in the workspace menu. +2. Select **Add participant**. +3. Enter the **Name** of the new participant. +4. Optionally, update the participant **role**. For more information on **roles**, see [participant roles](#participant-roles). + +:::tip +A new workspace participant can be an existing organization member, team, or collaborator. +::: + +### Participant roles + +Organization owners can assign role-based access levels to any of the workspace **participants** in an organization workspace. + +:::tip +It is also possible to group **members** and **collaborators** into **teams** and apply a role to that team. Members and collaborators inherit the access role of the team. +::: + +There are five roles available for every workspace participant. + +1. **Owner**: The participant has full permissions for all resources within the workspace, including the workspace settings. + +2. **Admin**: The participant has full permissions for resources associated with the workspace. They can create, modify, and delete pipelines, compute environments, actions, and credentials. They can add or remove users from the workspace but cannot access the workspace settings. + +3. **Maintain**: The participant can launch pipelines and modify pipeline executions (e.g., they can change the pipeline launch compute environments, parameters, pre/post-run scripts, and Nextflow configuration) and create new pipelines in the Launchpad. Users with maintain permissions cannot modify compute environments and credentials. + +4. **Launch**: The participant can launch pipelines and modify the pipeline input/output parameters in the Launchpad. They cannot modify the launch configuration or other resources. + +5. **View**: The participant can view workspace pipelines and runs in read-only mode. + +### Workspace run monitoring + +To allow users executing pipelines from the command-line to share their runs with a given workspace, see [Getting started](../getting-started/usage.mdx). diff --git a/platform-enterprise/pipeline-actions/_images/actions_access_tokens.png b/platform-enterprise/pipeline-actions/_images/actions_access_tokens.png new file mode 100644 index 000000000..f0d6cef61 Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_access_tokens.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_created.png b/platform-enterprise/pipeline-actions/_images/actions_created.png new file mode 100644 index 000000000..247701849 Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_created.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_endpoint.png b/platform-enterprise/pipeline-actions/_images/actions_endpoint.png new file mode 100644 index 000000000..bb7c0e0f1 Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_endpoint.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_githook.png b/platform-enterprise/pipeline-actions/_images/actions_githook.png new file mode 100644 index 000000000..3b86ab05c Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_githook.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_new.png b/platform-enterprise/pipeline-actions/_images/actions_new.png new file mode 100644 index 000000000..6d7ab482d Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_new.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_new_token.png b/platform-enterprise/pipeline-actions/_images/actions_new_token.png new file mode 100644 index 000000000..d03df6652 Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_new_token.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_params.png b/platform-enterprise/pipeline-actions/_images/actions_params.png new file mode 100644 index 000000000..871f7dc81 Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_params.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_tower_hook.png b/platform-enterprise/pipeline-actions/_images/actions_tower_hook.png new file mode 100644 index 000000000..60ef64309 Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_tower_hook.png differ diff --git a/platform-enterprise/pipeline-actions/_images/actions_tower_hook_params.png b/platform-enterprise/pipeline-actions/_images/actions_tower_hook_params.png new file mode 100644 index 000000000..1bbb8820f Binary files /dev/null and b/platform-enterprise/pipeline-actions/_images/actions_tower_hook_params.png differ diff --git a/platform-enterprise/pipeline-actions/overview.mdx b/platform-enterprise/pipeline-actions/overview.mdx new file mode 100644 index 000000000..a75db9345 --- /dev/null +++ b/platform-enterprise/pipeline-actions/overview.mdx @@ -0,0 +1,66 @@ +--- +title: "overview" +description: "Automating Nextflow pipeline executions through pipeline actions and webhooks with Nextflow Tower." +--- + +## Overview + +Pipeline actions allow launching of pipelines based on events. + +Tower currently offers support for native **GitHub webhooks** and a general **Tower webhook** that can be invoked programmatically. Support for Bitbucket and GitLab are coming soon. + +### GitHub webhooks + +A **GitHub webhook** listens for any changes made in the pipeline repository. When a change occurs, Tower triggers the launch of the pipeline automatically. + +To create a new **Pipeline action**, select the **Actions** tab and select **Add Action**. + +1. Enter a **Name** for your Action. + +2. Select **GitHub webhook** as the **Event source**. + + ![](./_images/actions_githook.png) + +3. Select the **Compute environment** where the pipeline will be executed. + +4. Select the **Pipeline to launch** and (optionally) the **Revision number**. + +5. Enter the **Work directory**, the **Config profiles**, and the **Pipeline parameters**. + +6. Select **Add**. + + ![](./_images/actions_params.png) + +The pipeline action is now setup. When a new commit occurs for the selected repository and revision, an event will be triggered in Tower and the pipeline will be launched. + +![](./_images/actions_created.png) + +### Tower launch hooks + +A **Tower launch hook** creates a custom endpoint URL which can be used to trigger the execution of your pipeline programmatically from a script or web service. + +To create a new **Pipeline action**, select the **Actions** tab and select **Add Action**. + +1. Enter a **Name** for your Action. + +2. Select **Tower launch hook** as the event source. + + ![](./_images/actions_tower_hook.png) + +3. Select the **Compute environment** to execute your pipeline. + +4. Enter the **Pipeline to launch** and (optionally) the **Revision number**. + +5. Enter the **Work directory**, the **Config profiles**, and the **Pipeline parameters**. + +6. Select **Add**. + + ![](./_images/actions_tower_hook_params.png) + +The pipeline action has been created, and the new endpoint can be used to programmatically launch the corresponding pipeline. The snippet below shows an example `curl` command with the authentication token. + +![](./_images/actions_endpoint.png) + +When you create a **Tower launch hook**, you also create an **access token** for launching pipelines through Tower. Access tokens can be managed on the [tokens page](https://tower.nf/tokens), which is also accessible from the navigation menu. + +![](./_images/actions_new_token.png) diff --git a/platform-enterprise/pipeline-schema/_images/paste_pipeline_sample.png b/platform-enterprise/pipeline-schema/_images/paste_pipeline_sample.png new file mode 100644 index 000000000..b7c5ae51a Binary files /dev/null and b/platform-enterprise/pipeline-schema/_images/paste_pipeline_sample.png differ diff --git a/platform-enterprise/pipeline-schema/_images/paste_pipeline_schema.png b/platform-enterprise/pipeline-schema/_images/paste_pipeline_schema.png new file mode 100644 index 000000000..477a2a105 Binary files /dev/null and b/platform-enterprise/pipeline-schema/_images/paste_pipeline_schema.png differ diff --git a/platform-enterprise/pipeline-schema/_images/pipeline_schema_form.png b/platform-enterprise/pipeline-schema/_images/pipeline_schema_form.png new file mode 100644 index 000000000..63fbbebc2 Binary files /dev/null and b/platform-enterprise/pipeline-schema/_images/pipeline_schema_form.png differ diff --git a/platform-enterprise/pipeline-schema/_images/pipeline_schema_overview.png b/platform-enterprise/pipeline-schema/_images/pipeline_schema_overview.png new file mode 100644 index 000000000..b7c5ae51a Binary files /dev/null and b/platform-enterprise/pipeline-schema/_images/pipeline_schema_overview.png differ diff --git a/platform-enterprise/pipeline-schema/overview.mdx b/platform-enterprise/pipeline-schema/overview.mdx new file mode 100644 index 000000000..c6bde1a52 --- /dev/null +++ b/platform-enterprise/pipeline-schema/overview.mdx @@ -0,0 +1,31 @@ +--- +title: Pipeline Schema +headline: "Pipeline Schema" +description: "A brief introduction to pipeline schema." +--- + +## Overview + +Pipeline schema files describe the structure and validation constraints of your workflow parameters. They are used to validate parameters before launch to prevent software or pipelines from failing in unexpected ways at runtime. + +You can populate the parameters in the pipeline by uploading a YAML or JSON file, or in the Tower UI. Tower uses your pipeline schema to build a bespoke launchpad parameters form. + +See [nf-core/rnaseq](https://github.com/nf-core/rnaseq/blob/e049f51f0214b2aef7624b9dd496a404a7c34d14/nextflow_schema.json) as an example of the pipeline parameters that can be represented by a JSON schema file. + +### Building pipeline schema files + +The pipeline schema is based on [json-schema.org](https://json-schema.org/) syntax, with some additional conventions. While you can create your pipeline schema manually, we highly recommmend the use of [nf-core tools](https://nf-co.re/tools/#pipeline-schema), a toolset for developing Nextflow pipelines built by the nf-core community. + +When you run the `nf-core schema build` command in your pipeline root directory, the tool collects your pipeline parameters and gives you interactive prompts about missing or unexpected parameters. If no existing schema file is found, the tool creates one for you. `schema build` commands include the option to validate and lint your schema file according to best practice guidelines from the nf-core community. + +### Customizing pipeline schema + +Once the skeleton pipeline schema file has been built with `nf-core schema build`, the command line tool will prompt you to open a [graphical schema editor](https://nf-co.re/pipeline_schema_builder) on the nf-core website. + +![nf-core schema builder interface](./_images/pipeline_schema_overview.png) + +Leave the command-line tool running in the background - it checks the status of your schema on the website. When you select Finished on the schema editor page, your changes are saved to the schema file locally. + +:::note +The schema builder is created by the nf-core community, but can be used any Nextflow pipeline. +::: diff --git a/platform-enterprise/reports/_images/reports_config_box.png b/platform-enterprise/reports/_images/reports_config_box.png new file mode 100644 index 000000000..d99670dea Binary files /dev/null and b/platform-enterprise/reports/_images/reports_config_box.png differ diff --git a/platform-enterprise/reports/_images/reports_download.png b/platform-enterprise/reports/_images/reports_download.png new file mode 100644 index 000000000..ace4bf33c Binary files /dev/null and b/platform-enterprise/reports/_images/reports_download.png differ diff --git a/platform-enterprise/reports/_images/reports_index.png b/platform-enterprise/reports/_images/reports_index.png new file mode 100644 index 000000000..a6287417b Binary files /dev/null and b/platform-enterprise/reports/_images/reports_index.png differ diff --git a/platform-enterprise/reports/_images/reports_rendering.png b/platform-enterprise/reports/_images/reports_rendering.png new file mode 100644 index 000000000..61914bf70 Binary files /dev/null and b/platform-enterprise/reports/_images/reports_rendering.png differ diff --git a/platform-enterprise/reports/overview.mdx b/platform-enterprise/reports/overview.mdx new file mode 100644 index 000000000..3fd009cd6 --- /dev/null +++ b/platform-enterprise/reports/overview.mdx @@ -0,0 +1,91 @@ +--- +title: Reports Overview +headline: "Reports" +description: "Overview of the Tower pipeline Reports feature." +--- + +## Overview + +Most Nextflow pipelines will generate reports or output files which are useful to inspect at the end of the pipeline execution. Reports may be in various formats (e.g. HTML, PDF, TXT) and would typically contain quality control (QC) metrics that would be important to assess the integrity of the results. Tower has a Reports feature that allows you to directly visualise supported file types or to download them directly via the user interface (see [Limitations](#limitations)). This saves users the time and effort from having to retrieve and visualise output files from their local storage. + +### Visualizing Reports + +Available reports are listed in a Reports tab within the Runs page. Users can select a report from the table and open or download it (see [Limitations](#limitations) for supported file types and sizes). + +![](./_images/reports_index.png) + +To open a report preview, the file must be smaller than 10MB. + +![](./_images/reports_rendering.png) + +Users can download a report directly from Tower or using the path. Download is not available if a report is larger than 25 MB. Option to download from path is suggested instead. + +![](./_images/reports_download.png) + +### Providing Reports + +To render reports users need to create a Tower config file that defines the paths to a selection of output files published by the pipeline. There are 2 ways users can provide the Tower config file both of which have to be in YAML format: + +1. **Pipeline repository**: If a file called _tower.yml_ exists in the root of the pipeline repository then this will be fetched automatically before the pipeline execution., +2. **Tower UI**: Providing the YAML definition within the _Advanced options > Tower config file_ box when: + 1.Creating a Pipeline in the Launchpad + 2.Amending the Launch settings when launching a Pipeline. Users with _Maintain_ role only. + +:::caution +Any configuration provided in the Tower UI will completely override that which is supplied via the pipeline repository. +::: + +![](./_images/reports_config_box.png) + +### Reports implementation + +Pipeline Reports need to be specified via YAML syntax + +```yaml +reports: + : + display: text to display (required) + mimeType: file mime type (optional) +``` + +### Path pattern + +Only the published files (using the Nextflow `publishDir` directive) are possible report candidates files. The _path pattern_ is used to match published files to a report entry. It can be a partial path, a glob expression or just a file name. + +Examples of valid _path patterns_ are: + +- `multiqc.html`: This will match all the published files with this name. +- `**/multiqc.html`: This is a glob expression that matches any subfolder. It is equivalent to the previous expression. +- `results/output.txt`: This will match all the `output.txt` files inside any _results_ folder. +- `*_output.tsv`: This will match any file that ends with "\_output.tsv" + +:::caution +When you use `*` it is important to also use double quotes, otherwise it is not a valid YAML. +::: + +### Display + +Display defines the title that will be shown on the website. If there are multiple files that match the same pattern an automatic suffix will be added. +The suffix is the minimum difference between all the matching paths. For example given this report definition: + +```yaml +reports: + "**/out/sheet.tsv": + display: "Data sheet" +``` + +If you have these two paths `/workdir/sample1/out/sheet.tsv` and `/workdir/sample2/out/sheet.tsv` both of them will match the path pattern and their final display name will be _Data sheet (sample1)_ and _Data sheet (sample2)_. + +### mimeType + +By default the mime type is deduced from the file extension, so in general you don't need to explicitly define it. Optionally, you can define it to force a viewer, for example showing a `txt` file as a `tsv`. It is important that it is a valid mime type text, otherwise it will be ignored and the extension will be used instead. + +### Limitations + +The current reports implementation limits the rendering to the following formats: HTML, CSV, tsv, pdf, and txt. + +In-page rendering/report preview is restricted to files smaller than 10MB to reduce the UI overload. Larger files need to be downloaded first. + +The download is restricted to files smaller than 25 MB to reduce the overload. Larger files need to be downloaded from the path. + +Currently, there is a YAML formatting validation in place checking both the tower.yml file inside the repository and the UI configuration box. The validation phase will emit an error message when users try to launch a pipeline with non-compliant YAML definitions. diff --git a/platform-enterprise/resource-labels/_images/ce-resource-labels.png b/platform-enterprise/resource-labels/_images/ce-resource-labels.png new file mode 100644 index 000000000..de3423379 Binary files /dev/null and b/platform-enterprise/resource-labels/_images/ce-resource-labels.png differ diff --git a/platform-enterprise/resource-labels/_images/filter_labels.png b/platform-enterprise/resource-labels/_images/filter_labels.png new file mode 100644 index 000000000..66b567ec9 Binary files /dev/null and b/platform-enterprise/resource-labels/_images/filter_labels.png differ diff --git a/platform-enterprise/resource-labels/_images/launch-resource-labels.png b/platform-enterprise/resource-labels/_images/launch-resource-labels.png new file mode 100644 index 000000000..36c7ac18d Binary files /dev/null and b/platform-enterprise/resource-labels/_images/launch-resource-labels.png differ diff --git a/platform-enterprise/resource-labels/_images/workflow-resource-labels.png b/platform-enterprise/resource-labels/_images/workflow-resource-labels.png new file mode 100644 index 000000000..5f69a7d8c Binary files /dev/null and b/platform-enterprise/resource-labels/_images/workflow-resource-labels.png differ diff --git a/platform-enterprise/resource-labels/overview.mdx b/platform-enterprise/resource-labels/overview.mdx new file mode 100644 index 000000000..20f0264aa --- /dev/null +++ b/platform-enterprise/resource-labels/overview.mdx @@ -0,0 +1,238 @@ +--- +title: Resource labels overview +headline: "Resource labels" +description: "Step-by-step instructions to set-up and use Resource labels in Tower." +--- + +## Overview + +From version 22.3.0, Tower supports applying resource labels to compute environments and other Tower elements. This offers a flexible tagging system for annotation and tracking of the cloud services consumed by a run. +Resource labels are sent to the service provider for each cloud compute environment in `key=value` format. + +Resource labels are applied to Tower elements during: + +- compute environment creation with Forge +- submission +- and execution + +### Create and apply labels + +Resource labels can be created, applied, and edited by a workspace admin or owner. When applying a label, users can select from existing labels or add new labels on the fly. + +![](./_images/ce-resource-labels.png) + +#### Resource labels applied to a compute environment + +Admins can assign a set of resource labels when creating a compute environment. +All runs executed using the compute environment will be tagged with its resource labels. +Resource labels applied to a compute environment are displayed on the compute environment details page. + +![](./_images/launch-resource-labels.png) + +Apply a label when adding a new compute environment to the workspace. + +:::caution +Once the compute environment has been created, its resource labels cannot be edited. +::: + +If a resource label is applied to a compute environment, all runs in that compute environment will inherit it. Likewise, all cloud resources generated during the workflow execution will be tagged with the same resource label. + +#### Resource labels applied to pipelines, actions, and runs + +**Available from version 22.4.0** + +Admins can override the default resource labels inherited from the compute environment when creating and editing pipelines, actions, and runs on the fly. The custom resource labels associated with each Tower element will propagate to the associated resources in the cloud environment without altering the default resource labels associated with the compute environment in Tower. + +When an admin adds or edits the resource labels associated with a pipeline, action, or run, the **submission and execution time** resource labels are altered. This does not affect the resource labels for resources spawned at (compute environment) **creation time**. + +For example, the resource label `name=ce1` is set during AWS Batch compute environment creation. If you create the resource label `pipeline=pipeline1` while creating a pipeline which uses the same AWS Batch compute environment, the EC2 instances associated with that compute environment still contain only the label `name=ce1`, while the Job Definitions associated with the pipeline will inherit the `pipeline=pipeline1` resource label. + +If a maintainer changes the compute environment associated with a pipeline or run, the resource labels field is updated with the resource labels from the new compute environment. + +![](./_images/workflow-resource-labels.png) + +### Search and filter with labels + +Search and filter pipelines and runs using one or more resource labels. The resource label search uses a `label:key=value` format. + +![](./_images/filter_labels.png) + +### Overview of resource labels in a workspace + +All resource labels used in a workspace can be viewed in the workspace's Settings screen. +Resource labels can only be edited or deleted by admins and only if they are not already associated with **any** Tower resource. +This includes both compute environments and runs. +The deletion of a resource label from a workspace has no influence on the cloud environment. + +![](./_images/workflow-resource-labels.png) + +### Resource label propagation to cloud environments + +:::note +You cannot assign multiple resource labels, using the same key, to the same resource — regardless of whether this option is supported by the destination cloud provider. +::: + +Resource labels are only available for cloud environments that use a resource tagging system. Tower supports AWS, Google Life Sciences, Azure, and Kubernetes — HPC compute environments do not support resource labels. + +Note that the cloud provider credentials used by Tower must have the appropriate roles or permissions to tag resources in your environment. + +When a run is executed in a compute environment with associated resource labels, Tower propagates the labels to a set of resources (listed below), while Nextflow distributes the labels for the resources spawned at runtime. + +If the compute environment is created through Forge, the compute environment will propagate the tags to the resources generated by the Forge execution. + +:::caution +Resource label propagation is one-way and not synchronized with the cloud environment. This means that Tower attaches tags to cloud resources, but is not aware if those tags are changed or deleted directly in the cloud environment. +::: + +### AWS + +When the compute environment is created with Forge, the following resources will be tagged using the labels associated with the compute environment: + +**Forge creation time** + +- FSX Filesystems (does not cascade to files) +- EFS Filesystems (does not cascade to files) +- Batch Compute Environment +- Batch Queue(s) +- ComputeResource (EC2 instances, excluding EBS volumes) +- Service Role +- Spot Fleet Role +- Execution Role +- Instance Profile Role + +**Submission time** + +- Jobs and Job Definitions +- Tasks (via the propagateTags paramater on Job Definitions) + +**Execution time** + +- Work Tasks (via the propagateTags paramater on Job Definitions) + +At execution time, when the jobs are submitted to Batch, the requests are set up to propagate tags to all the instances created by the head job. + +The [`forge-policy.json`](../_templates/aws-batch/forge-policy.json) file contains the roles needed for Batch Forge-created AWS compute environments to tag AWS resources. Specifically, the required roles are `iam:TagRole`, `iam:TagInstanceProfile`, and `batch:TagResource`. + +To view and manage the resource labels applied to AWS resources by Tower and Nextflow, navigate to the [AWS Tag Editor](https://docs.aws.amazon.com/tag-editor/latest/userguide/find-resources-to-tag.html)(as an administrative user) and follow these steps: + +1. Under **Find resources to tag**, search for the resource label key and value in the relevant search fields under **Tags**. Your search can be further refined by AWS region and resource type. Then select **Search resources**. + +2. **Resource search results** displays all the resources tagged with your given resource label key and/or value. + +To include the cost information associated with your resource labels in your AWS billing reports, follow these steps: + +1. You need to [activate](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/activating-tags.html) the associated tags in the **AWS Billing and Cost Management console**. Note that newly-applied tags may take up to 24 hours to appear on your cost allocation tags page. + +2. Once your tags are activated and displayed on your **Cost allocation tags** page in the Billing and Cost Management console, you can apply those tags when creating [cost allocation reports](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/configurecostallocreport.html#allocation-viewing). + +#### AWS limits + +- Resource label keys and values must contain a minimum of 2 and a maximum of 39 alphanumeric characters (each), separated by dashes or underscores. + +- The key and value cannot begin or end with dashes `-` or underscores `_`. + +- The key and value cannot contain a consecutive combination of `-` or `_` characters (`--`, `__`, `-_`, etc.) + +- A maximum of 25 resource labels can be applied to each resource. + +- A maximum of 100 resource labels can be used in each workspace. + +- Keys and values cannot start with `aws` or `user`, as these are reserved prefixes appended to tags by AWS. + +- Keys and values are case-sensitive in AWS. + +See [here](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions) for more information on AWS resource tagging. + +### Google Batch and Google Life Sciences + +When the compute environment is created with Forge, the following resources will be tagged using the labels associated with the compute environment: + +**Submission time** + +- Job (Batch) +- RunPipeline (Life Sciences) + +**Execution time** + +- AllocationPolicy (Batch) +- VirtualMachine (Life Sciences) +- RunPipeline (Life Sciences) + +#### GCP limits + +- Resource label keys and values must contain a minimum of 2 and a maximum of 39 alphanumeric characters (each), separated by dashes or underscores. + +- The key and value cannot begin or end with dashes `-` or underscores `_`. + +- The key and value cannot contain a consecutive combination of `-` or `_` characters (`--`, `__`, `-_`, etc.) + +- A maximum of 25 resource labels can be applied to each resource. + +- A maximum of 100 resource labels can be used in each workspace. + +- Keys and values in Google Cloud Resource Manager may contain only lowercase letters. Resource labels created with uppercase characters in Tower are changed to lowercase before propagating to Google Cloud. + +See [here](https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements) for more information on Google Cloud Resource Manager labeling. + +### Azure + +:::note +The labeling system on Azure Cloud uses the term metadata to refer to resource and other labels +::: + +When creating an Azure Compute Environment through Forge, resource labels are added to the Pool parameters — this will add a set of `key=value` metadata pairs to the Azure Batch Pool. + +#### Azure limits + +- Resource label keys and values must contain a minimum of 2 and a maximum of 39 alphanumeric characters (each), separated by dashes or underscores. + +- The key and value cannot begin or end with dashes `-` or underscores `_`. + +- The key and value cannot contain a consecutive combination of `-` or `_` characters (`--`, `__`, `-_`, etc.) + +- A maximum of 25 resource labels can be applied to each resource. + +- A maximum of 100 resource labels can be used in each workspace. + +- Keys are case-insensitive, but values are case-sensitive. + +- Microsoft advises against using a non-English language in your resource labels, as this can lead to decoding progress failure while loading your VM's metadata. + +See [here](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/tag-resources?tabs=json) for more information on Azure Resource Manager tagging. + +### Kubernetes + +Both the Head pod and Work pod specs will contain the set of labels associated with the compute environment in addition to the standard labels applied by Tower and Nextflow. + +:::caution +Currently, tagging with resource labels is not available for the files created during a workflow execution. The cloud instances are the elements being tagged. +::: + +The following resources will be tagged using the labels associated with the compute environment: + +**Forge creation time** + +- Deployment +- PodTemplate + +**Submission time** + +- Head Pod Metadata + +**Execution time** + +- Run Pod Metadata + +#### Kubernetes limits + +- Resource label keys and values must contain a minimum of 2 and a maximum of 39 alphanumeric characters (each), separated by dashes or underscores. + +- The key and value cannot begin or end with dashes `-` or underscores `_`. + +- The key and value cannot contain a consecutive combination of `-` or `_` characters (`--`, `__`, `-_`, etc.) + +- A maximum of 25 resource labels can be applied to each resource. + +- A maximum of 100 resource labels can be used in each workspace. + +See [here](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) for more information on Kubernetes object labeling. diff --git a/platform-enterprise/secrets/_images/personal_secrets_and_and_credentials.png b/platform-enterprise/secrets/_images/personal_secrets_and_and_credentials.png new file mode 100644 index 000000000..c88df79ed Binary files /dev/null and b/platform-enterprise/secrets/_images/personal_secrets_and_and_credentials.png differ diff --git a/platform-enterprise/secrets/_images/secrets_creation_form.png b/platform-enterprise/secrets/_images/secrets_creation_form.png new file mode 100644 index 000000000..d4416acb5 Binary files /dev/null and b/platform-enterprise/secrets/_images/secrets_creation_form.png differ diff --git a/platform-enterprise/secrets/_images/secrets_list.png b/platform-enterprise/secrets/_images/secrets_list.png new file mode 100644 index 000000000..7d400aa13 Binary files /dev/null and b/platform-enterprise/secrets/_images/secrets_list.png differ diff --git a/platform-enterprise/secrets/_images/workspace_secrets_and_credentials.png b/platform-enterprise/secrets/_images/workspace_secrets_and_credentials.png new file mode 100644 index 000000000..0be39ed34 Binary files /dev/null and b/platform-enterprise/secrets/_images/workspace_secrets_and_credentials.png differ diff --git a/platform-enterprise/secrets/overview.mdx b/platform-enterprise/secrets/overview.mdx new file mode 100644 index 000000000..cd0fadfee --- /dev/null +++ b/platform-enterprise/secrets/overview.mdx @@ -0,0 +1,57 @@ +--- +title: Secrets Overview +headline: "Secrets" +description: "Step-by-step instructions to set-up Secrets in Tower." +--- + +## Overview + +Tower uses the concept of **Secrets** to store the keys and tokens used by workflow tasks to interact with external systems e.g. a password to connect to an external database or an API token. Tower relies on third-party secret manager services in order to maintain security between the workflow execution context and the secret container. This means that no secure data is transmitted from Tower to the Compute Environment. + +::: note +Currently only AWS Batch or HPC batch schedulers are supported. Please read more about the AWS Secret Manager [here](https://docs.aws.amazon.com/secretsmanager/index.html) +::: + +### Pipeline Secrets + +To create a Pipeline Secret navigate to a Workspace (private or shared) and click on the **Secrets** tab in the top navigation pane to gain access to the Secrets management interface. + +![](./_images/workspace_secrets_and_credentials.png) + +All of the available Secrets will be listed here and users with the appropriate permissions (maintainer, admin or owner) will be able to create or update Secret values. + +![](./_images/secrets_list.png) + +The form for creating or updating a Secret is very similar to the one used for Credentials. + +![](./_images/secrets_creation_form.png) + +### Pipeline Secrets for users + +Secrets can be defined for users by clicking on your avatar in the top right corner of the Tower interface and selecting "Your Secrets". Listing, creating and updating Secrets for users is the same as Secrets in a Workspace. However, Secrets defined by a user have a higher priority and will override any Secrets defined in a Workspace with the same name. + +![](./_images/personal_secrets_and_and_credentials.png) + +:::caution +Secrets defined by a user have higher priority and will override any Secrets defined in a Workspace with the same name. +::: + +### Using Secrets in workflows + +When a new workflow is launched, all Secrets are sent to the corresponding secret manager for the Compute Environment. Nextflow will download these Secrets internally and use them when they are referenced in the pipeline code as described in the [Nextflow Secrets documentation](https://www.nextflow.io/docs/edge/secrets.html#process-secrets). + +Secrets will be automatically deleted from the secret manager when the Pipeline completes (successful or unsuccessful). + +### AWS Secrets Manager Integration + +If you are planning to use the Pipeline Secrets feature provided by Tower with the AWS Secrets Manager, the following IAM permissions should be provided: + +1. Create the AWS Batch [IAM Execution role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html#create-execution-role) as specified in the AWS documentation. + +2. Add the `AmazonECSTaskExecutionRolePolicy` policy and [this custom policy](../_templates/aws-batch/secrets-policy-execution-role.json) to the execution role created above. + +3. Specify the execution role ARN in the **Batch execution role** option (under **Advanced options**) when creating your Compute Environment in Tower. + +4. Add [this custom policy](../_templates/aws-batch/secrets-policy-instance-role.json) to the ECS Instance role associated with the Batch compute environment that will be used to deploy your pipelines. Replace `YOUR-ACCOUNT` and `YOUR-EXECUTION-ROLE-NAME` with the appropriate values. See [here](https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) for more details about the Instance role. + +5. Add [this custom policy](../_templates/aws-batch/secrets-policy-account.json) to your Tower IAM user (the one specified in the Tower credentials). diff --git a/platform-enterprise/sidebar.json b/platform-enterprise/sidebar.json new file mode 100644 index 000000000..a69b65556 --- /dev/null +++ b/platform-enterprise/sidebar.json @@ -0,0 +1,133 @@ +{ + "platformSidebar": [ + "index", + { + "type": "category", + "label": "Getting started", + "collapsed": true, + "items": [ + "getting-started/deployment-options", + "getting-started/workspace" + ] + }, + { + "type": "category", + "label": "User guide", + "collapsed": true, + "items": [ + { + "type": "category", + "label": "Credentials", + "items": [ + "credentials/overview", + "git/overview", + { + "type": "category", + "label": "Container registry credentials", + "items": [ + "credentials/aws_registry_credentials", + "credentials/azure_registry_credentials", + "credentials/docker_hub_registry_credentials", + "credentials/google_registry_credentials", + "credentials/quay_registry_credentials" + ] + }, + "credentials/ssh_credentials", + "credentials/agent_credentials" + ] + }, + { + "type": "category", + "label": "Compute", + "items": [ + "compute-envs/overview", + { + "type": "category", + "label": "Cloud providers", + "items": [ + "compute-envs/aws-batch", + "compute-envs/azure-batch", + "compute-envs/google-cloud-batch", + "compute-envs/google-cloud-lifesciences", + "compute-envs/k8s", + "compute-envs/eks", + "compute-envs/gke" + ] + } + ] + }, + "datasets/overview", + { + "type": "category", + "label": "Launch", + "items": [ + "launch/launchpad", + "launch/advanced" + ] + }, + { + "type": "category", + "label": "Monitoring", + "items": [ + "monitoring/overview", + "dashboard/overview", + "monitoring/execution", + "monitoring/summary", + "monitoring/processes", + "monitoring/aggregate_stats", + "monitoring/tasks" + ] + }, + { + "type": "category", + "label": "Label", + "items": [ + "labels/overview", + "resource-labels/overview" + ] + }, + "pipeline-actions/overview", + "reports/overview", + "pipeline-schema/overview", + "secrets/overview", + { + "type": "category", + "label": "Software integration", + "items": [ + "supported_software/fusion/fusion", + "supported_software/dragen/overview" + ] + }, + { + "type": "category", + "label": "Administration", + "items": [ + "orgs-and-teams/overview", + "orgs-and-teams/organizations", + "orgs-and-teams/workspace-management", + "orgs-and-teams/shared-workspaces", + "administration/overview" + ] + } + ] + }, + { + "type": "category", + "label": "Developer tools", + "collapsed": true, + "items": [ + "api/overview" + ] + }, + { + "type": "category", + "label": "Reference", + "collapsed": true, + "items": [ + "data-privacy/overview", + "limits/limits" + ] + }, + "faqs" + ] +} diff --git a/platform-enterprise/supported_software/dragen/_images/dragen_ce_ami.png b/platform-enterprise/supported_software/dragen/_images/dragen_ce_ami.png new file mode 100644 index 000000000..2c5a6b994 Binary files /dev/null and b/platform-enterprise/supported_software/dragen/_images/dragen_ce_ami.png differ diff --git a/platform-enterprise/supported_software/dragen/_images/dragen_secrets_password.png b/platform-enterprise/supported_software/dragen/_images/dragen_secrets_password.png new file mode 100644 index 000000000..70d0f45f0 Binary files /dev/null and b/platform-enterprise/supported_software/dragen/_images/dragen_secrets_password.png differ diff --git a/platform-enterprise/supported_software/dragen/_images/dragen_secrets_username.png b/platform-enterprise/supported_software/dragen/_images/dragen_secrets_username.png new file mode 100644 index 000000000..461598b70 Binary files /dev/null and b/platform-enterprise/supported_software/dragen/_images/dragen_secrets_username.png differ diff --git a/platform-enterprise/supported_software/dragen/overview.mdx b/platform-enterprise/supported_software/dragen/overview.mdx new file mode 100644 index 000000000..4aaf6a63c --- /dev/null +++ b/platform-enterprise/supported_software/dragen/overview.mdx @@ -0,0 +1,79 @@ +--- +title: DRAGEN Overview +headline: "Illumina DRAGEN" +description: "Overview of DRAGEN integration with Tower." +--- + +## Illumina DRAGEN + +DRAGEN is a platform provided by Illumina that offers accurate, comprehensive, and efficient secondary analysis of next-generation sequencing (NGS) data with a significant speed-up over tools that are commonly used for such tasks. + +The improved performance offered by DRAGEN is possible due to the use of Illumina proprietary algorithms in conjunction with a special type of hardware accelerator called field programmable gate arrays (FPGAs). For example, when using AWS, FPGAs are available via the [F1 instance type](https://aws.amazon.com/ec2/instance-types/f1/). + +### Running DRAGEN on Nextflow Tower + +We have extended the [Batch Forge](../../compute-envs/aws-batch.mdx?h=forge#tower-forge) feature for AWS Batch to support DRAGEN. Batch Forge ensures that all of the appropriate components and settings are automatically provisioned when creating a Compute Environment for executing pipelines. + +When deploying data analysis workflows, some tasks will need to use normal instance types (e.g. for non-DRAGEN processing of samples) and others will need to be executed on F1 instances. If the DRAGEN feature is enabled, Batch Forge will create an additional AWS Batch compute queue which only uses F1 instances, to which DRAGEN tasks will be dispatched. + +### Getting started + +To showcase the capability of this integration, we have implemented a proof of concept pipeline called [nf-dragen](https://github.com/seqeralabs/nf-dragen). To run it, sign-in into Tower, navigate to the [Community Showcase](https://tower.nf/orgs/community/workspaces/showcase/launchpad) and select the "nf-dragen" pipeline. + +You can run this pipeline at your convenience without any extra setup. Note however that it will be deployed in the Compute Environment owned by the Community Showcase. + +To deploy the pipeline on your own AWS cloud infrastructure, please follow the instructions in the next section. + +### Deploy DRAGEN in your own workspace + +DRAGEN is a commercial technology provided by Illumina, so you will need to purchase a license from them. To run on Tower, you will need to obtain the following information from Illumina: + +1. DRAGEN AWS private AMI ID +2. DRAGEN license username +3. DRAGEN license password + +Batch Forge automates most of the tasks required to set up an AWS Batch Compute Environment. Please follow [our guide](../../compute-envs/aws-batch.mdx) for more details. + +In order to enable support for DRAGEN acceleration, simply toggle the "Enable DRAGEN" option when setting up the Compute Environment via Batch Forge. + +In the "DRAGEN AMI Id" field, enter the AWS AMI ID provided to you by Illumina. + +![](./_images/dragen_ce_ami.png) + +:::caution +Please ensure that the Region you select contains DRAGEN F1 instances. +::: + +### Pipeline implementation & deployment + +Please see the [dragen.nf](https://github.com/seqeralabs/nf-dragen/blob/master/modules/local/dragen.nf) module implemented in the [nf-dragen](https://github.com/seqeralabs/nf-dragen) pipeline for reference. Any Nextflow processes that run DRAGEN must: + +1. Define `label 'dragen'` + + The `label` directive allows you to annotate a process with mnemonic identifiers of your choice. Tower will use the `dragen` label to determine which processes need to be executed on DRAGEN F1 instances. + + ``` + process DRAGEN { + label 'dragen' + + + } + ``` + + Please refer to the [Nextflow label docs](https://www.nextflow.io/docs/latest/process.html?highlight=label#label) for more information. + +2. Define Secrets + + At Seqera, we use Secrets to safely encrypt sensitive information when running licensed software via Nextflow. This enables our team to use the DRAGEN software safely via the `nf-dragen` pipeline without having to worry about the setup or safe configuration of the license key. These Secrets will be provided securely to the `--lic-server` option when running DRAGEN on the CLI to validate the license. + + In the nf-dragen pipeline, we have defined two Secrets called `DRAGEN_USERNAME` and `DRAGEN_PASSWORD`, which you can add via the Tower UI by going to _"Secrets -> Add Pipeline Secret"_: + + ![](./_images/dragen_secrets_username.png) + + ![](./_images/dragen_secrets_password.png) + + Please refer to the [Secrets documentation](../../secrets/overview.mdx) for more information about this feature. + +### Limitations + +DRAGEN integration with Tower is currently only available for use on AWS, however, we plan to extend the functionality to other supported platforms like Azure in the future. diff --git a/platform-enterprise/supported_software/fusion/fusion.mdx b/platform-enterprise/supported_software/fusion/fusion.mdx new file mode 100644 index 000000000..4e72fb033 --- /dev/null +++ b/platform-enterprise/supported_software/fusion/fusion.mdx @@ -0,0 +1,24 @@ +--- +title: "fusion" +description: "Fusion file system" +--- + +## Fusion file system + +Tower 22.4 adds official support for the Fusion file system. Fusion is a lightweight client that enables containerized tasks to access data in Amazon S3 (and other object stores in future) using POSIX file access semantics. Depending on your data handling requirements, Fusion 2.0 improves pipeline throughput and/or reduces cloud computing costs. See [here](https://seqera.io/fusion/) for more information on Fusion's features. + +### Fusion requirements + +Fusion file system is designed to work with containerised workloads. Therefore, it requires the use of a container-native platform for the execution of your pipeline. Currently, Fusion is only available in AWS Batch compute environments in Tower. + +To enable Fusion in Tower: + +- Use Nextflow version `22.10.0` or later. The latest version of Nextflow is used in Tower by default, but a particular version can be specified using `NXF_VER` in the Nextflow config file field (**Advanced options -> Nextflow config file** under Pipeline settings on the launch page). + +- Enable the [Wave containers service](https://www.nextflow.io/docs/latest/wave.html#wave-page) during [AWS Batch](../../compute-envs/aws-batch.mdx) compute environment creation. + +- Select **Enable Fusion v2** during compute environment creation. + +- (Optional) Select **Enable fast instance storage** to make use of NVMe instance storage to further increase performance. + +See the [AWS Batch](../../compute-envs/aws-batch.mdx) compute environment page for detailed instructions. diff --git a/platform-enterprise_versioned_docs/version-23.3/enterprise/configuration/reverse_proxy.mdx b/platform-enterprise_versioned_docs/version-23.3/enterprise/configuration/reverse_proxy.mdx index 66d0f15b5..e26adfc8d 100644 --- a/platform-enterprise_versioned_docs/version-23.3/enterprise/configuration/reverse_proxy.mdx +++ b/platform-enterprise_versioned_docs/version-23.3/enterprise/configuration/reverse_proxy.mdx @@ -21,23 +21,23 @@ To expose your Seqera instance behind a reverse proxy, complete the following st - If your frontend container listens on `http://tower-frontend:8080` and you're using Apache HTTP as your reverse proxy, add the following lines at the end of your configuration file (replace `/myseqera/` with the URL you defined in `TOWER_BASE_PATH`): - ``` - LoadModule proxy_module modules/mod_proxy.so - LoadModule proxy_http_module modules/mod_proxy_http.so - LoadModule rewrite_module modules/mod_rewrite.so - - RewriteEngine on - RewriteRule "^/myseqera/(.*)$" http://tower-frontend:8080/$1 [P] - ProxyPassReverse "/myseqera/" http://tower-frontend:8080/ - RewriteRule "^/api/(.*)$" http://tower-frontend:8080/api/$1 [P] - ProxyPassReverse "/api/" http://tower-frontend:8080/api/ - RewriteRule "^/oauth/(.*)$" http://tower-frontend:8080/oauth/$1 [P] - ProxyPassReverse "/oauth/" http://tower-frontend:8080/oauth/ - RewriteRule "^/openapi/(.*)$" http://tower-frontend:8080/openapi/$1 [P] - ProxyPassReverse "/openapi/" http://tower-frontend:8080/openapi/ - RewriteRule "^/content/(.*)$" http://tower-frontend:8080/content/$1 [P] - ProxyPassReverse "/content/" http://tower-frontend:8080/content/ - ``` + ```sh + LoadModule proxy_module modules/mod_proxy.so + LoadModule proxy_http_module modules/mod_proxy_http.so + LoadModule rewrite_module modules/mod_rewrite.so + + RewriteEngine on + RewriteRule "^/myseqera/(.*)$" http://tower-frontend:8080/$1 [P] + ProxyPassReverse "/myseqera/" http://tower-frontend:8080/ + RewriteRule "^/api/(.*)$" http://tower-frontend:8080/api/$1 [P] + ProxyPassReverse "/api/" http://tower-frontend:8080/api/ + RewriteRule "^/oauth/(.*)$" http://tower-frontend:8080/oauth/$1 [P] + ProxyPassReverse "/oauth/" http://tower-frontend:8080/oauth/ + RewriteRule "^/openapi/(.*)$" http://tower-frontend:8080/openapi/$1 [P] + ProxyPassReverse "/openapi/" http://tower-frontend:8080/openapi/ + RewriteRule "^/content/(.*)$" http://tower-frontend:8080/content/$1 [P] + ProxyPassReverse "/content/" http://tower-frontend:8080/content/ + ``` - A similar configuration should be applied for NGINX or other reverse proxies. Redirect visits to `/api/`, `/oauth/`, `/openapi/`, and `/content/`. diff --git a/platform-enterprise_versioned_docs/version-25.1/enterprise/configuration/reverse_proxy.mdx b/platform-enterprise_versioned_docs/version-25.1/enterprise/configuration/reverse_proxy.mdx index 0186587dd..ce12543af 100644 --- a/platform-enterprise_versioned_docs/version-25.1/enterprise/configuration/reverse_proxy.mdx +++ b/platform-enterprise_versioned_docs/version-25.1/enterprise/configuration/reverse_proxy.mdx @@ -21,25 +21,25 @@ To expose your Seqera instance behind a reverse proxy, complete the following st - If your frontend container listens on `http://tower-frontend:8080` and you're using Apache HTTP as your reverse proxy, add the following lines at the end of your configuration file (replace `/myseqera/` with the URL you defined in `TOWER_BASE_PATH`): - ``` - LoadModule proxy_module modules/mod_proxy.so - LoadModule proxy_http_module modules/mod_proxy_http.so - LoadModule rewrite_module modules/mod_rewrite.so - - RewriteEngine on - RewriteRule "^/myseqera/(.*)$" http://tower-frontend:8080/$1 [P] - ProxyPassReverse "/myseqera/" http://tower-frontend:8080/ - RewriteRule "^/api/(.*)$" http://tower-frontend:8080/api/$1 [P] - ProxyPassReverse "/api/" http://tower-frontend:8080/api/ - RewriteRule "^/auth/(.*)$" http://tower-frontend:8080/auth/$1 [P] - ProxyPassReverse "/auth/" http://tower-frontend:8080/auth/ - RewriteRule "^/oauth/(.*)$" http://tower-frontend:8080/oauth/$1 [P] - ProxyPassReverse "/oauth/" http://tower-frontend:8080/oauth/ - RewriteRule "^/openapi/(.*)$" http://tower-frontend:8080/openapi/$1 [P] - ProxyPassReverse "/openapi/" http://tower-frontend:8080/openapi/ - RewriteRule "^/content/(.*)$" http://tower-frontend:8080/content/$1 [P] - ProxyPassReverse "/content/" http://tower-frontend:8080/content/ - ``` + ```shell + LoadModule proxy_module modules/mod_proxy.so + LoadModule proxy_http_module modules/mod_proxy_http.so + LoadModule rewrite_module modules/mod_rewrite.so + + RewriteEngine on + RewriteRule "^/myseqera/(.*)$" http://tower-frontend:8080/$1 [P] + ProxyPassReverse "/myseqera/" http://tower-frontend:8080/ + RewriteRule "^/api/(.*)$" http://tower-frontend:8080/api/$1 [P] + ProxyPassReverse "/api/" http://tower-frontend:8080/api/ + RewriteRule "^/auth/(.*)$" http://tower-frontend:8080/auth/$1 [P] + ProxyPassReverse "/auth/" http://tower-frontend:8080/auth/ + RewriteRule "^/oauth/(.*)$" http://tower-frontend:8080/oauth/$1 [P] + ProxyPassReverse "/oauth/" http://tower-frontend:8080/oauth/ + RewriteRule "^/openapi/(.*)$" http://tower-frontend:8080/openapi/$1 [P] + ProxyPassReverse "/openapi/" http://tower-frontend:8080/openapi/ + RewriteRule "^/content/(.*)$" http://tower-frontend:8080/content/$1 [P] + ProxyPassReverse "/content/" http://tower-frontend:8080/content/ + ``` - A similar configuration should be applied for NGINX or other reverse proxies. Redirect visits to `/api/`, `/oauth/`, `/openapi/`, and `/content/`. diff --git a/src/components/Button/styles.module.css b/src/components/Button/styles.module.css index 38588af88..2b6453810 100644 --- a/src/components/Button/styles.module.css +++ b/src/components/Button/styles.module.css @@ -8,7 +8,7 @@ border-radius: 8px; font-weight: 600; transition: all 0.2s; - text-decoration: none !important; + text-decoration: none ; cursor: pointer; &:hover .arrow { transform: translateX(3px); @@ -109,8 +109,8 @@ } .alt { &:hover { - background-color: var(--color-blu-700) !important; - border-color: var(--color-blu-700) !important; - color: #fff !important; + background-color: var(--color-blu-700) ; + border-color: var(--color-blu-700) ; + color: #fff ; } } diff --git a/src/components/Card/styles.module.css b/src/components/Card/styles.module.css index 3d0f34654..fababd755 100644 --- a/src/components/Card/styles.module.css +++ b/src/components/Card/styles.module.css @@ -5,7 +5,7 @@ background: white; border-color: var(--ifm-toc-border-color); font-size: 14px; - color: #160f26 !important; + color: #160f26 ; transition: all 0.3s; } .card:hover { @@ -36,9 +36,9 @@ html[data-theme="dark"] { & .card { background: rgba(0, 0, 0, 0.5); - color: var(--color-brand-300) !important; + color: var(--color-brand-300) ; & svg path[fill="#160F26"] { - fill: var(--color-brand-200) !important; + fill: var(--color-brand-200) ; } } & .card.platform:hover { diff --git a/src/components/Search/Search.tsx b/src/components/Search/Search.tsx index abecca9f6..5f7b8fd7f 100644 --- a/src/components/Search/Search.tsx +++ b/src/components/Search/Search.tsx @@ -1,27 +1,27 @@ import React, { createElement, useState, useEffect, useRef } from "react"; // Import the required components -import ProductItem, { setCloseSearchModalCallback } from './ProductItem'; -import { getAlgoliaResults } from '@algolia/autocomplete-js'; +import ProductItem, { setCloseSearchModalCallback } from "./ProductItem"; +import { getAlgoliaResults } from "@algolia/autocomplete-js"; // Use direct CommonJS import pattern import Autosearch from "./AlgoliaSearch"; import AiIcon from "../../theme/Navbar/Layout/SeqeraHeader/HeaderDesktop/NavItems/images/AiIcon"; import SearchIcon from "./SearchIcon"; // Import algoliasearch -import algoliasearch from 'algoliasearch'; +import algoliasearch from "algoliasearch"; -import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; export default function Search() { - const {siteConfig} = useDocusaurusContext(); - const algoliaConfig = siteConfig.customFields?.algolia as any || {}; + const { siteConfig } = useDocusaurusContext(); + const algoliaConfig = (siteConfig.customFields?.algolia as any) || {}; const appId = algoliaConfig.appId; const apiKey = algoliaConfig.apiKey; const envIndexName = algoliaConfig.indexName; // Create the search client const searchClient = algoliasearch(appId, apiKey); - + // Add getRecommendations method to the client to fix the linter error (searchClient as any).getRecommendations = async () => ({ results: [] }); @@ -33,19 +33,19 @@ export default function Search() { useEffect(() => { const handleKeyDown = (event: KeyboardEvent) => { // Check for Command+K (Mac) or Ctrl+K (Windows/Linux) - if ((event.metaKey || event.ctrlKey) && event.key === 'k') { + if ((event.metaKey || event.ctrlKey) && event.key === "k") { event.preventDefault(); - setIsOpen(prevIsOpen => !prevIsOpen); + setIsOpen((prevIsOpen) => !prevIsOpen); } // Close on Escape key - if (event.key === 'Escape' && isOpen) { + if (event.key === "Escape" && isOpen) { setIsOpen(false); } }; - window.addEventListener('keydown', handleKeyDown); + window.addEventListener("keydown", handleKeyDown); return () => { - window.removeEventListener('keydown', handleKeyDown); + window.removeEventListener("keydown", handleKeyDown); }; }, [isOpen]); @@ -54,18 +54,18 @@ export default function Search() { if (isOpen) { // Save the current scroll position const scrollY = window.scrollY; - + // Add styles to prevent scrolling on the body - document.body.style.position = 'fixed'; + document.body.style.position = "fixed"; document.body.style.top = `-${scrollY}px`; - document.body.style.width = '100%'; - + document.body.style.width = "100%"; + return () => { // Re-enable scrolling when component unmounts or modal closes - document.body.style.position = ''; - document.body.style.top = ''; - document.body.style.width = ''; - + document.body.style.position = ""; + document.body.style.top = ""; + document.body.style.width = ""; + // Restore scroll position window.scrollTo(0, scrollY); }; @@ -78,7 +78,7 @@ export default function Search() { // Small timeout to ensure the input is in the DOM setTimeout(() => { // Direct focus to the input element inside the Autosearch container - const inputElement = containerRef.current?.querySelector('input'); + const inputElement = containerRef.current?.querySelector("input"); if (inputElement) { inputElement.focus(); } @@ -90,22 +90,24 @@ export default function Search() { useEffect(() => { const handleClickOutside = (event: MouseEvent) => { // Check if the click is inside the modal or any Algolia autocomplete elements - const isInsideModal = modalRef.current && modalRef.current.contains(event.target as Node); - + const isInsideModal = + modalRef.current && modalRef.current.contains(event.target as Node); + // Check if the click is inside any Algolia autocomplete elements - const isInsideAutocomplete = - (event.target as Element)?.closest('.aa-Panel') - + const isInsideAutocomplete = (event.target as Element)?.closest( + ".aa-Panel", + ); + if (!isInsideModal && !isInsideAutocomplete) { setIsOpen(false); } }; - + if (isOpen) { - document.addEventListener('mousedown', handleClickOutside); + document.addEventListener("mousedown", handleClickOutside); } return () => { - document.removeEventListener('mousedown', handleClickOutside); + document.removeEventListener("mousedown", handleClickOutside); }; }, [isOpen]); @@ -117,13 +119,15 @@ export default function Search() { if (!modalElement) return; const focusableElements = modalElement.querySelectorAll( - 'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])' + 'button, [href], input, select, textarea, [tabindex]:not([tabindex="-1"])', ); const firstFocusableElement = focusableElements[0] as HTMLElement; - const lastFocusableElement = focusableElements[focusableElements.length - 1] as HTMLElement; + const lastFocusableElement = focusableElements[ + focusableElements.length - 1 + ] as HTMLElement; const handleTabKey = (e: KeyboardEvent) => { - if (e.key !== 'Tab') return; + if (e.key !== "Tab") return; if (e.shiftKey) { if (document.activeElement === firstFocusableElement) { @@ -138,9 +142,9 @@ export default function Search() { } }; - modalElement.addEventListener('keydown', handleTabKey); + modalElement.addEventListener("keydown", handleTabKey); return () => { - modalElement.removeEventListener('keydown', handleTabKey); + modalElement.removeEventListener("keydown", handleTabKey); }; }, [isOpen]); @@ -148,24 +152,24 @@ export default function Search() { useEffect(() => { if (isOpen) { // Apply CSS to ensure dropdowns appear above the modal - const style = document.createElement('style'); - style.id = 'search-z-index-fix'; + const style = document.createElement("style"); + style.id = "search-z-index-fix"; style.innerHTML = ` .aa-Panel { - z-index: 9999 !important; + z-index: 9999 ; } .aa-DetachedOverlay { - z-index: 9998 !important; + z-index: 9998 ; } .aa-DetachedContainer { - z-index: 9999 !important; + z-index: 9999 ; } `; document.head.appendChild(style); - + return () => { // Clean up when component unmounts or modal closes - const styleElement = document.getElementById('search-z-index-fix'); + const styleElement = document.getElementById("search-z-index-fix"); if (styleElement) { styleElement.remove(); } @@ -177,144 +181,163 @@ export default function Search() { <> {isOpen && (
-
{ const aiThreadItem = { - id: 'ai-thread', - url: query ? `https://seqera.io/ask-ai?prompt=${query}` : 'https://seqera.io/ask-ai', - title: 'Start a new thread with Seqera AI', - type: 'ai-thread' + id: "ai-thread", + url: query + ? `https://seqera.io/ask-ai?prompt=${query}` + : "https://seqera.io/ask-ai", + title: "Start a new thread with Seqera AI", + type: "ai-thread", }; if (!query) { - return [{ - sourceId: 'empty-state', + return [ + { + sourceId: "empty-state", + getItems() { + return []; + }, + templates: { + header() { + return ( + + ); + }, + noResults() { + return ( +
+ Search docs or ask with Seqera AI... +
+ ); + }, + }, + }, + ]; + } + + return [ + { + sourceId: "ai-thread", getItems() { - return []; + return [aiThreadItem]; }, templates: { - header() { + item({ item }) { return ( ); }, - noResults() { - return ( -
- Search docs or ask with Seqera AI... -
- ); - } - } - }]; - } - - return [{ - sourceId: 'ai-thread', - getItems() { - return [aiThreadItem]; + }, }, - templates: { - item({ item }) { - return ( - - ); - } - } - }, - { - sourceId: 'docs', - getItems() { - return getAlgoliaResults({ - searchClient, - queries: [ - { - indexName: envIndexName, - params: { - query, - hitsPerPage: 5, - attributesToHighlight: ['*'], + { + sourceId: "docs", + getItems() { + return getAlgoliaResults({ + searchClient, + queries: [ + { + indexName: envIndexName, + params: { + query, + hitsPerPage: 5, + attributesToHighlight: ["*"], + }, }, - }, - ], - }); - }, - templates: { - item({ item, components }) { - return ; + ], + }); }, - header() { - return ( -
Documentation
- ); + templates: { + item({ item, components }) { + return ( + + ); + }, + header() { + return ( +
+ Documentation +
+ ); + }, + noResults({ state }) { + return ( +
+

+ No results for "{`${state?.query}`}" +

+
+ ); + }, }, - noResults({ state }) { - return ( -
-

No results for "{`${state?.query}`}"

-
- ); - } - } - }]; + }, + ]; }} debug={true} /> @@ -322,22 +345,38 @@ export default function Search() {
)} - + {/* Optional: Add a button to open the search */} -
setIsOpen(true)} className="md:flex items-center px-3 py-2 rounded-md text-sm text-gray-800 cursor-pointer hover:text-gray-1000 transition-all duration-100 min-w-50 content-center" - style={{ - boxShadow: '0 0 0 1px rgba(0, 0, 0, 0.25)', - height: '44px', + style={{ + boxShadow: "0 0 0 1px rgba(0, 0, 0, 0.25)", + height: "44px", }} > - - + + - Search docs... - ⌘K + Search docs... + + ⌘K +
); -} \ No newline at end of file +} diff --git a/src/components/Search/algolia-theme.css b/src/components/Search/algolia-theme.css index 128047658..12e5105dd 100644 --- a/src/components/Search/algolia-theme.css +++ b/src/components/Search/algolia-theme.css @@ -159,8 +159,8 @@ .custom-search-panel { - position: fixed !important; + position: fixed ; } .aa-DetachedContainer .aa-Panel { - position: relative !important; -} \ No newline at end of file + position: relative ; +} diff --git a/src/css/components/box.css b/src/css/components/box.css index 074505565..5ae7fc87b 100644 --- a/src/css/components/box.css +++ b/src/css/components/box.css @@ -19,3 +19,5 @@ html[data-theme="light"] { color: var(--color-brand); } } + + diff --git a/src/css/main.css b/src/css/main.css index 2fd5808ad..c9cba6a04 100644 --- a/src/css/main.css +++ b/src/css/main.css @@ -1,26 +1,28 @@ @tailwind components; @tailwind utilities; -@layer components { - .container-xl { - @apply mx-auto; - max-width: 1440px; - } - .container-lg { - @apply mx-auto; - max-width: 1248px; - } - .container-md { - @apply mx-auto; - max-width: 1008px; - } - - .container-sm { - @apply mx-auto; - max-width: 900px; - } +@layer components { + .container-xl { + @apply mx-auto; + max-width: 1440px; + } + + .container-lg { + @apply mx-auto; + max-width: 1248px; + } + + .container-md { + @apply mx-auto; + max-width: 1008px; + } + + .container-sm { + @apply mx-auto; + max-width: 900px; + } .typo-display, .typo-display span { @@ -90,7 +92,7 @@ svg.excalidraw path[fill="#fff"] { .navbar { height: 0; width: 0; - display: none !important; + display: none ; } } @@ -151,3 +153,16 @@ nav.menu .menu__list-item--collapsed .menu__link--sublist:after, color: #98a3ff; } } + +/* Allow Code Block Expansion */ +.theme-code-block pre { + max-height: none ; + height: auto ; + overflow-y: visible ; +} + +.theme-code-block code { + max-height: none ; + height: auto ; + overflow-y: visible ; +} diff --git a/src/css/misc.css b/src/css/misc.css index c46fba54e..acab1f37f 100644 --- a/src/css/misc.css +++ b/src/css/misc.css @@ -14,10 +14,10 @@ html:not(.plugin-id-platform) .navbar .dropdown { } ol li > p:first-child { - margin-top: 0 !important; + margin-top: 0 ; } ol li > p:last-child { - margin-bottom: 0 !important; + margin-bottom: 0 ; } /** @@ -40,15 +40,16 @@ Material Design Icons (SVG) } details { - background-color: var(--details-bg) !important; - border-top-width: 0 !important; - border-bottom-width: 0 !important; - border-right-width: 0 !important; - border-left-width: 5px !important; + background-color: var(--details-bg) ; + border-top-width: 0 ; + border-bottom-width: 0 ; + border-right-width: 0 ; + border-left-width: 5px ; + } details.alert > div > div { - border-top: 1px solid #000 !important; + border-top: 1px solid #000 ; } details.alert code { diff --git a/src/css/theme-colors.css b/src/css/theme-colors.css index 7b00352c2..bf2fc5869 100644 --- a/src/css/theme-colors.css +++ b/src/css/theme-colors.css @@ -79,20 +79,20 @@ html[data-theme="light"] .menu__link--sublist-caret:after { background-image: url("data:image/svg+xml,%3Csvg width='25' height='24' viewBox='0 0 25 24' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.4943 10.4038L7.7174 15.1615L7.16162 14.6057L12.4943 9.29225L17.8078 14.6057L17.252 15.1615L12.4943 10.4038Z' fill='%23160F26'/%3E%3C/svg%3E%0A"); } html[data-theme="light"] .menu__list-item-collapsible--active { - background-color: var(--color-wave) !important; + background-color: var(--color-wave) ; } html[data-theme="light"] .menu__list-item-collapsible > a.menu__link--active { - color: var(--color-brand-1400) !important; + color: var(--color-brand-1400) ; } html[data-theme="light"] .menu__list-item-collapsible--active > a.menu__link--active { - color: white !important; + color: white ; } /* Search modal */ html[data-theme="light"] .DocSearch-Logo svg * { - fill: var(--ifm-font-color-base) !important; + fill: var(--ifm-font-color-base) ; opacity: 0.6; } diff --git a/src/modules/Homepage/Resources/Platform.tsx b/src/modules/Homepage/Resources/Platform.tsx index 89a2ab778..bc129930b 100644 --- a/src/modules/Homepage/Resources/Platform.tsx +++ b/src/modules/Homepage/Resources/Platform.tsx @@ -1,8 +1,6 @@ import React from "react"; import Link from "@docusaurus/Link"; -import platform_enterprise_latest_version from "@site/platform-enterprise_latest_version"; - type Props = {}; const Platform: React.FC = () => { @@ -33,12 +31,8 @@ const Platform: React.FC = () => {
  • For installation and configuration, the Seqera Platform{" "} - - deployment guide - {" "} - provides docs, scripts and detailed instructions. + deployment guide provides + docs, scripts and detailed instructions.
  • A{" "} diff --git a/src/modules/Homepage/index.tsx b/src/modules/Homepage/index.tsx index 03f1f8aa1..a39b7f8ba 100644 --- a/src/modules/Homepage/index.tsx +++ b/src/modules/Homepage/index.tsx @@ -7,26 +7,25 @@ import Sidebar from "../../theme/DocSidebar/Desktop"; import styles from "./styles.module.css"; -import Link from '@docusaurus/Link'; +import Link from "@docusaurus/Link"; -import CodeBlock from "@theme-original/CodeBlock"; +// import CodeBlock from "@theme-original/CodeBlock"; -import Fusion from "./images/fusion.inline.svg"; -import Nextflow from "./images/nextflow.inline.svg"; -import MultiQC from "./images/multiqc.inline.svg"; -import Platform from "./images/platform2.inline.svg"; -import Wave from "./images/wave.inline.svg"; +// import Fusion from "./images/fusion.inline.svg"; +// import Nextflow from "./images/nextflow.inline.svg"; +// import MultiQC from "./images/multiqc.inline.svg"; +// import Platform from "./images/platform2.inline.svg"; +// import Wave from "./images/wave.inline.svg"; -import Card from "../../components/Card"; -import Grid from "../../components/Grid"; +// import Card from "../../components/Card"; +// import Grid from "../../components/Grid"; -import Resources from "./Resources"; -import SearchBar from "@theme/SearchBar"; +// import Resources from "./Resources"; +// import SearchBar from "@theme/SearchBar"; -import { themes } from 'prism-react-renderer'; +// import { themes } from "prism-react-renderer"; import Button from "../../components/Button"; - const useCases = [ { title: "Basic pipeline", @@ -112,13 +111,27 @@ export default function Home(): JSX.Element {
    -

    Documentation

    -

    Explore our guides, documentation, and examples to build with Seqera.

    +

    + Documentation +

    +

    + Explore our guides, documentation, and examples to build with + Seqera. +

    - -

    Recommended articles

    + +

    + Recommended articles +

    - {/* Do not remove below block, keeping as commented for now */} {/*
    @@ -150,54 +163,131 @@ export default function Home(): JSX.Element {
    -

    Platform

    -
      -
    • Add pipelines
    • -
    • Monitor runs
    • -
    • Automation
    • -
    -
    -
    -

    Studios

    -
      -
    • Studios overview
    • -
    • Enterprise deployment
    • -
    • Data Explorer
    • -
    -
    -
    -

    Developer tools

    -
      -
    • Platform API
    • -
    • Platform CLI
    • -
    • Nextflow CLI
    • -
    -
    -
    -

    MultiQC

    -
      -
    • Get started
    • -
    • Run MultiQC
    • -
    • Modules
    • -
    -
    +

    Platform

    +
      +
    • + + Add pipelines + +
    • +
    • + + Monitor runs + +
    • +
    • + + Automation + +
    • +
    +
    +

    Studios

    +
      +
    • + + Studios overview + +
    • +
    • + + Enterprise deployment + +
    • +
    • + + Data Explorer + +
    • +
    +
    +
    +

    + Developer tools +

    +
      +
    • + + Platform API + +
    • +
    • + + Platform CLI + +
    • +
    • + + Nextflow CLI + +
    • +
    +
    +
    +

    MultiQC

    +
      +
    • + + Get started + +
    • +
    • + + Run MultiQC + +
    • +
    • + Modules +
    • +
    +
    +
    -

    Open source

    +

    + Open source +

    - - Nextflow -

    Nextflow

    + + Nextflow +

    + Nextflow +

    - - Wave -

    Wave

    + + Wave +

    + Wave +

    - - MultiQC -

    MultiQC

    + + MultiQC +

    + MultiQC +

    @@ -206,17 +296,36 @@ export default function Home(): JSX.Element {

    Platform

    - +
    -

    Seqera Cloud

    +

    + Seqera Cloud +

    - +
    -

    Seqera Enterprise

    +

    + Seqera Enterprise +

    - - Fusion -

    Fusion

    + + Fusion +

    + Fusion +

    @@ -225,17 +334,32 @@ export default function Home(): JSX.Element {

    Cloud

    - +
    -

    AWS

    +

    + AWS +

    - +
    -

    GCP

    +

    + GCP +

    - +
    -

    Azure

    +

    + Azure +

    @@ -245,20 +369,42 @@ export default function Home(): JSX.Element {

    Support

  • - +
    -

    Help center

    -

    Contact support for help with your Pro or Enterprise account

    +

    + Help center +

    +

    + Contact support for help with your Pro or Enterprise + account +

    - +
    -

    Community forum

    -

    Discuss topics with other users

    +

    + Community forum +

    +

    + Discuss topics with other users +

    - +
    -

    Service status

    -

    Check the status of Seqera services

    +

    + Service status +

    +

    + Check the status of Seqera services +

    diff --git a/src/modules/Homepage/styles.module.css b/src/modules/Homepage/styles.module.css index b05df02dc..a813133f7 100644 --- a/src/modules/Homepage/styles.module.css +++ b/src/modules/Homepage/styles.module.css @@ -19,7 +19,7 @@ a { pre, code { - max-height: 300px !important; + max-height: 300px ; } .page { diff --git a/src/pages/platform-enterprise/index.tsx b/src/pages/platform-enterprise/index.tsx deleted file mode 100644 index 394771849..000000000 --- a/src/pages/platform-enterprise/index.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import React from "react"; -import { Redirect } from "react-router-dom"; - -import platform_enterprise_latest_version from "@site/platform-enterprise_latest_version"; - -export default function Platform(): JSX.Element { - return ( - - ); -} diff --git a/src/pages/platform-enterprise/latest.tsx b/src/pages/platform-enterprise/latest.tsx index 4f0be5856..c6dbfc73c 100644 --- a/src/pages/platform-enterprise/latest.tsx +++ b/src/pages/platform-enterprise/latest.tsx @@ -4,18 +4,22 @@ import { useLocation, useHistory } from "@docusaurus/router"; import platform_enterprise_latest_version from "@site/platform-enterprise_latest_version"; export default function Platform(): JSX.Element { - const match = useLocation(); + const location = useLocation(); const history = useHistory(); - const { pathname } = match; - const actualPath = pathname.replace( - "latest", - platform_enterprise_latest_version, - ); + const { pathname } = location; + + // Only perform the replacement if the path actually contains "latest" + const containsLatest = pathname.includes("latest"); + const actualPath = containsLatest ? pathname.replace("latest", "") : pathname; - useEffect(function redirectToActualPath() { - if (typeof window === "undefined") return; - history.push(actualPath); - }, []); + useEffect( + function redirectToActualPath() { + // Only redirect if we actually changed the path + if (typeof window === "undefined" || pathname === actualPath) return; + history.push(actualPath); + }, + [pathname, actualPath, history], + ); return
    ; } diff --git a/src/theme/BlogLayout/styles.module.css b/src/theme/BlogLayout/styles.module.css index cc293fcd0..6a4079b5b 100644 --- a/src/theme/BlogLayout/styles.module.css +++ b/src/theme/BlogLayout/styles.module.css @@ -27,6 +27,6 @@ flex-wrap: nowrap; } .blogContent { - max-width: 75% !important; + max-width: 75% ; } } diff --git a/src/theme/BlogSidebar/Desktop/index.js b/src/theme/BlogSidebar/Desktop/index.js index 858dd5692..8e9744c41 100644 --- a/src/theme/BlogSidebar/Desktop/index.js +++ b/src/theme/BlogSidebar/Desktop/index.js @@ -39,7 +39,7 @@ function BlogSidebarDesktop({sidebar}) { return (