diff --git a/openmetadata-ui/src/main/resources/ui/playwright/e2e/Flow/ServiceDocPanel.spec.ts b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Flow/ServiceDocPanel.spec.ts new file mode 100644 index 000000000000..b231aa39fb6c --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/playwright/e2e/Flow/ServiceDocPanel.spec.ts @@ -0,0 +1,230 @@ +/* + * Copyright 2025 Collate. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { expect, Page, test } from '@playwright/test'; +import { redirectToHomePage } from '../../utils/common'; +import { waitForAllLoadersToDisappear } from '../../utils/entity'; + +test.use({ storageState: 'playwright/.auth/admin.json' }); + +/** + * Navigates to MySQL service creation step 3 (configure connection), + * where the ServiceDocPanel is visible with code blocks and sections. + */ +const goToMysqlConnectionStep = async (page: Page, serviceName: string) => { + await page.goto('/databaseServices/add-service', { + waitUntil: 'domcontentloaded', + }); + await waitForAllLoadersToDisappear(page); + await page.getByTestId('Mysql').click(); + await page.getByTestId('next-button').click(); + await page.getByTestId('service-name').fill(serviceName); + await page.getByTestId('next-button').click(); + await page.getByTestId('service-requirements').waitFor({ state: 'visible' }); +}; + +test.describe('ServiceDocPanel', () => { + test.beforeEach(async ({ page }) => { + await redirectToHomePage(page); + }); + + test.describe('Content rendering', () => { + test('should render headings not raw markdown', async ({ page }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-headings'); + + const docPanel = page.getByTestId('service-requirements'); + + // Requirements h2 heading should render as an element, not raw "## Requirements" + await expect(docPanel.locator('h2').first()).toBeVisible(); + await expect(docPanel).not.toContainText('## Requirements'); + }); + + test('should render admonition blocks with correct class', async ({ + page, + }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-admonition'); + + const docPanel = page.getByTestId('service-requirements'); + + // Mysql.md has $$note blocks — should render as .admonition.admonition-note + const admonition = docPanel.locator('.admonition-note').first(); + + await expect(admonition).toBeVisible(); + // Should contain actual note content, not raw "$$note" syntax + await expect(docPanel).not.toContainText('$$note'); + }); + + test('should render code blocks inside pre > code, not as raw text', async ({ + page, + }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-codeblock'); + + const docPanel = page.getByTestId('service-requirements'); + + await expect(docPanel.locator('pre code').first()).toBeVisible(); + // Raw fence markers should not appear + await expect(docPanel).not.toContainText('```'); + }); + + test('should render links that open in a new tab', async ({ page }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-links'); + + const docPanel = page.getByTestId('service-requirements'); + const externalLink = docPanel.locator('a[target="_blank"]').first(); + + await expect(externalLink).toBeVisible(); + await expect(externalLink).toHaveAttribute('href', /^https?:\/\//); + }); + + test('should render image in Mssql doc panel', async ({ page }) => { + await page.goto('/databaseServices/add-service', { + waitUntil: 'domcontentloaded', + }); + await waitForAllLoadersToDisappear(page); + await page.getByTestId('Mssql').click(); + await page.getByTestId('next-button').click(); + await page.getByTestId('service-name').fill('pw-doc-panel-mssql-img'); + await page.getByTestId('next-button').click(); + await page.getByTestId('service-requirements').waitFor({ + state: 'visible', + }); + + const docPanel = page.getByTestId('service-requirements'); + const image = docPanel.locator('img').first(); + + await expect(image).toBeVisible(); + // Verify the image loaded successfully (no broken image) + const naturalWidth = await image.evaluate( + (img: HTMLImageElement) => img.naturalWidth + ); + + expect(naturalWidth).toBeGreaterThan(0); + }); + }); + + test.describe('Section highlighting', () => { + test('should highlight section when the corresponding form field is focused', async ({ + page, + }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-highlight'); + + const docPanel = page.getByTestId('service-requirements'); + + // No section should be highlighted initially + await expect( + docPanel.locator('section[data-highlighted="true"]') + ).toHaveCount(0); + + // Focus the username field — activeField becomes "username" + await page.locator(String.raw`#root\/username`).focus(); + + // The username section should now be highlighted + const usernameSection = docPanel.locator( + 'section[data-id="username"][data-highlighted="true"]' + ); + + await expect(usernameSection).toBeVisible(); + }); + + test('should remove highlight from previous section when a new field is focused', async ({ + page, + }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-highlight-switch'); + + const docPanel = page.getByTestId('service-requirements'); + + // Focus username first + await page.locator(String.raw`#root\/username`).focus(); + + await expect( + docPanel.locator('section[data-id="username"][data-highlighted="true"]') + ).toBeVisible(); + + // Focus hostPort — username section should lose highlight + await page.locator(String.raw`#root\/hostPort`).focus(); + + await expect( + docPanel.locator('section[data-id="username"][data-highlighted="true"]') + ).toHaveCount(0); + + // hostPort section should now be highlighted + await expect( + docPanel.locator('section[data-id="hostPort"][data-highlighted="true"]') + ).toBeVisible(); + }); + + test('should only ever have one section highlighted at a time', async ({ + page, + }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-single-highlight'); + + const docPanel = page.getByTestId('service-requirements'); + + await page.locator(String.raw`#root\/username`).focus(); + await page.locator(String.raw`#root\/hostPort`).focus(); + + await expect( + docPanel.locator('section[data-highlighted="true"]') + ).toHaveCount(1); + }); + + test('should load the correct doc file for the selected service type', async ({ + page, + }) => { + await goToMysqlConnectionStep(page, 'pw-doc-panel-correct-doc'); + + const docPanel = page.getByTestId('service-requirements'); + + // MySQL doc starts with "# MySQL" + await expect(docPanel.locator('h1').first()).toContainText('MySQL'); + }); + }); + + test.describe('Code block copy button', () => { + test('should copy code block content to clipboard and show copied tooltip', async ({ + page, + context, + }) => { + await context.grantPermissions(['clipboard-read', 'clipboard-write']); + await goToMysqlConnectionStep(page, 'pw-doc-panel-copy'); + + const docPanel = page.getByTestId('service-requirements'); + const codeBlock = docPanel.locator('pre').first(); + const copyButtonWrapper = docPanel.locator('.code-copy-button').first(); + const copyButton = docPanel.getByTestId('code-block-copy-icon').first(); + + // Hover code block to reveal the button + await codeBlock.hover(); + await expect(copyButton).toBeVisible(); + + // Verify initial state + await expect(copyButtonWrapper).toHaveAttribute('data-copied', 'false'); + + // Click and verify copied state + tooltip + await copyButton.click(); + + await expect(copyButtonWrapper).toHaveAttribute('data-copied', 'true'); + await expect(page.getByRole('tooltip')).toBeVisible(); + + // Verify clipboard is non-empty + const clipboardText = await page.evaluate(() => + navigator.clipboard.readText() + ); + + expect(clipboardText.length).toBeGreaterThan(0); + + // Verify state resets after 2s timer + await expect(copyButtonWrapper).toHaveAttribute('data-copied', 'false'); + }); + }); +}); diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/CustomDashboard.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/CustomDashboard.md index b64c868dc0a2..dc925a28c7cb 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/CustomDashboard.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/CustomDashboard.md @@ -14,6 +14,7 @@ $$section Source Python Class Name to instantiated by the ingestion workflow. Note that it should implement the `next_record` method so that the Workflow can keep reading and sending records to the OpenMetadata API. +$$ $$section ### Connection Options $(id="connectionOptions") diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/Looker.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/Looker.md index 3354ffacbafb..d164ed56e428 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/Looker.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Dashboard/Looker.md @@ -47,14 +47,21 @@ $$ If we choose to inform the GitHub credentials to ingest LookML Views: +$$section #### Repository Owner $(id="repositoryOwner") The owner (user or organization) of a GitHub repository. For example, in https://github.com/open-metadata/OpenMetadata, the owner is `open-metadata`. +$$ + +$$section #### Repository Name $(id="repositoryName") The name of a GitHub repository. For example, in https://github.com/open-metadata/OpenMetadata, the name is `OpenMetadata`. +$$ + +$$section #### API Token $(id="token") Token to use the API. This is required for private repositories and to ensure we don't hit API limits. @@ -72,3 +79,5 @@ If your GitHub organization has SAML Single Sign-On (SSO) enabled, you must auth Follow these steps to authorize your token for use with SAML SSO. $$ + +$$ diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Athena.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Athena.md index 6475ddded029..a0896e0b6832 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Athena.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Athena.md @@ -48,11 +48,9 @@ And is defined as: ``` -{% note %} - +$$note If you have external services other than glue and facing permission issues, add the permissions to the list above. - -{% /note %} +$$ You can find further information on the Athena connector in the docs. diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/CustomDatabase.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/CustomDatabase.md index cdab34ac4200..fbcdc76030bb 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/CustomDatabase.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/CustomDatabase.md @@ -14,6 +14,7 @@ $$section Source Python Class Name to instantiated by the ingestion workflow. Note that it should implement the `next_record` method so that the Workflow can keep reading and sending records to the OpenMetadata API. +$$ $$section ### Connection Options $(id="connectionOptions") diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Databricks.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Databricks.md index 62b1229f856c..121362dd696a 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Databricks.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Databricks.md @@ -16,13 +16,13 @@ To extract basic metadata (catalogs, schemas, tables, views) from Databricks, th ```sql -- Grant USE CATALOG on catalog -GRANT USE CATALOG ON CATALOG TO ``; +GRANT USE CATALOG ON CATALOG TO ''; -- Grant USE SCHEMA on schemas -GRANT USE SCHEMA ON SCHEMA TO ``; +GRANT USE SCHEMA ON SCHEMA TO ''; -- Grant SELECT on tables and views -GRANT SELECT ON TABLE TO ``; +GRANT SELECT ON TABLE TO ''; ``` ### View Definitions (Optional) @@ -31,7 +31,7 @@ To extract view definitions from `INFORMATION_SCHEMA.VIEWS`, ensure the user has ```sql -- Grant SELECT on INFORMATION_SCHEMA.VIEWS -GRANT SELECT ON VIEW information_schema.views TO ``; +GRANT SELECT ON VIEW information_schema.views TO ''; ``` ### Unity Catalog Tags (Optional) @@ -40,16 +40,16 @@ To extract tags at different levels (catalog, schema, table, column), the user n ```sql -- For catalog-level tags -GRANT SELECT ON TABLE system.information_schema.catalog_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.catalog_tags TO ''; -- For schema-level tags -GRANT SELECT ON TABLE system.information_schema.schema_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.schema_tags TO ''; -- For table-level tags -GRANT SELECT ON TABLE system.information_schema.table_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.table_tags TO ''; -- For column-level tags -GRANT SELECT ON TABLE system.information_schema.column_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.column_tags TO ''; ``` $$note @@ -62,10 +62,10 @@ To extract table and column-level lineage from Unity Catalog system tables, the ```sql -- For table lineage -GRANT SELECT ON TABLE system.access.table_lineage TO ``; +GRANT SELECT ON TABLE system.access.table_lineage TO ''; -- For column lineage -GRANT SELECT ON TABLE system.access.column_lineage TO ``; +GRANT SELECT ON TABLE system.access.column_lineage TO ''; ``` $$note diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/DeltaLake.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/DeltaLake.md index 5a9cdab981f9..3ea7869b4cdc 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/DeltaLake.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/DeltaLake.md @@ -70,28 +70,28 @@ $$section In this configuration we will be pointing to the Hive Metastore database directly. -#### Hive Metastore Database ($id="metastoreDb") +### Hive Metastore Database JDBC connection to the metastore database. It should be a properly formatted database URL, which will be used in the Spark Configuration under `spark.hadoop.javax.jdo.option.ConnectionURL`. -#### Connection UserName ($id="username") +#### Connection UserName Username to use against the metastore database. The value will be used in the Spark Configuration under `spark.hadoop.javax.jdo.option.ConnectionUserName`. -#### Connection Password ($id="password") +#### Connection Password Password to use against metastore database. The value will be used in the Spark Configuration under `spark.hadoop.javax.jdo.option.ConnectionPassword`. -#### Connection Driver Name ($id="driverName") +#### Connection Driver Name Driver class name for JDBC metastore. The value will be used in the Spark Configuration under `spark.hadoop.javax.jdo.option.ConnectionDriverName`, e.g., `org.mariadb.jdbc.Driver`. You will need to provide the driver to the ingestion image, and pass the Class path as explained below. -#### JDBC Driver Class Path ($id="jdbcDriverClassPath") +#### JDBC Driver Class Path Class path to JDBC driver required for the JDBC connection. The value will be used in the Spark Configuration under `spark.driver.extraClassPath`. diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Doris.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Doris.md index 61c1c8279166..8e24f8f5cae8 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Doris.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Doris.md @@ -74,3 +74,4 @@ $$ $$section ### Connection Arguments $(id="connectionArguments") Additional connection arguments such as security or protocol configs that can be sent to the service during connection. +$$ diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md index 87d741994f88..a0d68e302d15 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Exasol.md @@ -47,4 +47,5 @@ Uses Transport Layer Security (TLS) but disables the validation of the server ce #### disable-tls Does not use any Transport Layer Security (TLS). Data will be sent in plain text (no encryption). While this may be helpful in rare cases of debugging, make sure you do not use this in production. +$$ diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Hive.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Hive.md index f35b9dd7cf4a..c1a7e7a4965e 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Hive.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Hive.md @@ -33,7 +33,7 @@ This parameter specifies the host and port of the Hive instance. This should be If you are running the OpenMetadata ingestion in a docker and your services are hosted on the `localhost`, then use `host.docker.internal:10000` as the value. $$ -$$section +$$section ### Authentication Mode $(id="auth") The auth parameter specifies the authentication method to use when connecting to the Hive server. Possible values are `LDAP`, `NONE`, `CUSTOM`, or `KERBEROS`. If you are using Kerberos authentication, you should set auth to `KERBEROS`. If you are using custom authentication, you should set auth to `CUSTOM` and provide additional options in the `authOptions` parameter. $$ @@ -50,6 +50,7 @@ $$ $$section ### Database Name $(id="databaseName") + In OpenMetadata, the Database Service hierarchy works as follows: ``` Database Service > Database > Schema > Table @@ -86,10 +87,11 @@ $$ ## Basic Auth +$$section ### Password $(id="password") Password to connect to Postgres/MySQL. - +$$ ## IAM Auth Config @@ -266,35 +268,38 @@ If ticked, the workflow will be able to ingest all database in the cluster. If n ## Hive Mysql Metastore Connection Details - +$$section ### Scheme $(id="scheme") SQLAlchemy driver scheme options. If you are unsure about this setting, you can use the default value. +$$ - - +$$section ### Username $(id="username") Username to connect to MySQL. This user should have access to the `INFORMATION_SCHEMA` to extract metadata. Other workflows may require different permissions -- refer to the section above for more information. +$$ - +$$section ### Host Port $(id="hostPort") This parameter specifies the host and port of the MySQL instance. This should be specified as a string in the format `hostname:port`. For example, you might set the hostPort parameter to `localhost:3306`. If you are running the OpenMetadata ingestion in a docker and your services are hosted on the `localhost`, then use `host.docker.internal:3306` as the value. +$$ - - +$$section ### Database Name $(id="databaseName") + In OpenMetadata, the Database Service hierarchy works as follows: ``` Database Service > Database > Schema > Table ``` In the case of MySQL, we won't have a Database as such. If you'd like to see your data in a database named something other than `default`, you can specify the name in this field. +$$ - - +$$section ### Database Schema $(id="databaseSchema") This is an optional parameter. When set, the value will be used to restrict the metadata reading to a single database (corresponding to the value passed in this field). When left blank, OpenMetadata will scan all the databases. +$$ $$section diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mssql.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mssql.md index dbcec1118a5a..839f80782f31 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mssql.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mssql.md @@ -30,7 +30,7 @@ Make sure the SQL server that you are trying to connect is in running state. This step allow the sql server to accept remote connection request. -![remote-connection](/doc-images/Database/Mssql/remote-connection.png) +![remote-connection](/locales/doc-images/Database/Mssql/remote-connection.png) #### 3. Configure Windows Firewall diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mysql.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mysql.md index 85b90d48913c..7e69d9789028 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mysql.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Mysql.md @@ -73,7 +73,7 @@ $$ ## IAM Auth Config -$$note +$$note If you are using IAM auth, add
`"ssl": {"ssl-mode": "allow"}` under Connection Arguments $$ diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/StarRocks.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/StarRocks.md index 3fc76683405e..97b1376c151a 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/StarRocks.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/StarRocks.md @@ -74,3 +74,4 @@ $$ $$section ### Connection Arguments $(id="connectionArguments") Additional connection arguments such as security or protocol configs that can be sent to the service during connection. +$$ diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Timescale.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Timescale.md index 641be4ab7171..c7eb608d7ba5 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Timescale.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Timescale.md @@ -35,7 +35,7 @@ $$ ## IAM Auth Config -$$note +$$note If you are using IAM auth, add
`"ssl": {"ssl-mode": "allow"}` under Connection Arguments $$ diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Trino.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Trino.md index 1a2390dabc0a..d254a9395a3c 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Trino.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/Trino.md @@ -29,13 +29,14 @@ $$section Username to connect to Trino. This user should have `SELECT` permission on the `SYSTEM.METADATA` and `INFORMATION_SCHEMA` - see the section above for more details. $$ +$$section ### Auth Config $(id="authType") There are 2 types of auth configs: - Basic Auth. - JWT Auth. User can authenticate the Trino Instance with auth type as `Basic Authentication` i.e. Password **or** by using `JWT Authentication`. - +$$ ## Basic Auth diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/UnityCatalog.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/UnityCatalog.md index 0d657db76def..219096580dac 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/UnityCatalog.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Database/UnityCatalog.md @@ -16,13 +16,13 @@ To extract basic metadata (catalogs, schemas, tables, views) from Databricks, th ```sql -- Grant USE CATALOG on catalog -GRANT USE CATALOG ON CATALOG TO ``; +GRANT USE CATALOG ON CATALOG TO ''; -- Grant USE SCHEMA on schemas -GRANT USE SCHEMA ON SCHEMA TO ``; +GRANT USE SCHEMA ON SCHEMA TO ''; -- Grant SELECT on tables and views -GRANT SELECT ON TABLE TO ``; +GRANT SELECT ON TABLE TO ''; ``` ### View Definitions (Optional) @@ -31,7 +31,7 @@ To extract view definitions from `INFORMATION_SCHEMA.VIEWS`, ensure the user has ```sql -- Grant SELECT on INFORMATION_SCHEMA.VIEWS -GRANT SELECT ON VIEW information_schema.views TO ``; +GRANT SELECT ON VIEW information_schema.views TO ''; ``` ### Unity Catalog Tags (Optional) @@ -40,16 +40,16 @@ To extract tags at different levels (catalog, schema, table, column), the user n ```sql -- For catalog-level tags -GRANT SELECT ON TABLE system.information_schema.catalog_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.catalog_tags TO ''; -- For schema-level tags -GRANT SELECT ON TABLE system.information_schema.schema_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.schema_tags TO ''; -- For table-level tags -GRANT SELECT ON TABLE system.information_schema.table_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.table_tags TO ''; -- For column-level tags -GRANT SELECT ON TABLE system.information_schema.column_tags TO ``; +GRANT SELECT ON TABLE system.information_schema.column_tags TO ''; ``` $$note @@ -62,10 +62,10 @@ To extract table and column-level lineage from Unity Catalog system tables, the ```sql -- For table lineage -GRANT SELECT ON TABLE system.access.table_lineage TO ``; +GRANT SELECT ON TABLE system.access.table_lineage TO ''; -- For column lineage -GRANT SELECT ON TABLE system.access.column_lineage TO ``; +GRANT SELECT ON TABLE system.access.column_lineage TO ''; ``` $$note diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Messaging/CustomMessaging.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Messaging/CustomMessaging.md index a843adf4752f..81663ebac79a 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Messaging/CustomMessaging.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Messaging/CustomMessaging.md @@ -14,6 +14,7 @@ $$section Source Python Class Name to instantiated by the ingestion workflow. Note that it should implement the `next_record` method so that the Workflow can keep reading and sending records to the OpenMetadata API. +$$ $$section ### Connection Options $(id="connectionOptions") diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/MlModel/CustomMlModel.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/MlModel/CustomMlModel.md index 73c2c3c54fdb..cf03b94f24ca 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/MlModel/CustomMlModel.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/MlModel/CustomMlModel.md @@ -14,6 +14,7 @@ $$section Source Python Class Name to instantiated by the ingestion workflow. Note that it should implement the `next_record` method so that the Workflow can keep reading and sending records to the OpenMetadata API. +$$ $$section ### Connection Options $(id="connectionOptions") diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/Airflow.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/Airflow.md index 542ed5bb5fbe..ba57180c462a 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/Airflow.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/Airflow.md @@ -213,14 +213,20 @@ Host and port of the MySQL service. This should be specified as a string in the MySQL schema that contains the Airflow tables. +$$section ### SSL CA $(id="sslCA") Provide the path to SSL CA file, which needs to be local in the ingestion process. +$$ +$$section ### SSL Certificate $(id="sslCert") Provide the path to SSL client certificate file (`ssl_cert`) +$$ +$$section ### SSL Key $(id="sslKey") Provide the path to SSL key file (`ssl_key`) +$$ ## Postgres Connection @@ -239,11 +245,13 @@ Host and port of the Postgres service. E.g., `localhost:5432` or `host.docker.in Postgres database that contains the Airflow tables. +$$section ### SSL Mode $(id="sslMode") SSL Mode to connect to postgres database. E.g, `prefer`, `verify-ca` etc. You can ignore the rest of the properties, since we won't ingest any database not policy tags. +$$ ## MSSQL Connection @@ -271,12 +279,14 @@ $$ ## Basic Auth +$$section ### Password $(id="password") Password to connect to MySQL. +$$ ## IAM Auth Config -$$note +$$note If you are using IAM auth, add
`"ssl": {"ssl-mode": "allow"}` under Connection Arguments $$ @@ -432,6 +442,7 @@ $$ $$section ### Database Name $(id="databaseName") + In OpenMetadata, the Database Service hierarchy works as follows: ``` Database Service > Database > Schema > Table @@ -467,16 +478,18 @@ $$ $$section ### Connection Arguments $(id="connectionArguments") Additional connection arguments such as security or protocol configs that can be sent to the service during connection. +$$ ## Postgres Connection - +$$section ### Username $(id="username") Username to connect to Postgres. This user should have privileges to read all the metadata in Postgres. +$$ - +$$section ### Auth Config $(id="authType") There are 2 types of auth configs: - Basic Auth. @@ -484,18 +497,21 @@ There are 2 types of auth configs: - Azure Based Auth. User can authenticate the Postgres Instance with auth type as `Basic Authentication` i.e. Password **or** by using `IAM based Authentication` to connect to AWS related services **or** by using `Azure Baed Authentication` to connecto to Azure releated services. +$$ ## Basic Auth +$$section ### Password $(id="password") Password to connect to Postgres. +$$ ## IAM Auth Config - +$$section ### AWS Access Key ID $(id="awsAccessKeyId") When you interact with AWS, you specify your AWS security credentials to verify who you are and whether you have permission to access the resources that you are requesting. AWS uses the security credentials to authenticate and authorize your requests (docs). @@ -513,9 +529,9 @@ $$section ### AWS Secret Access Key $(id="awsSecretAccessKey") Secret access key (for example, `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY`). +$$ - - +$$section ### AWS Region $(id="awsRegion") Each AWS Region is a separate geographic area in which AWS clusters data centers (docs). @@ -523,25 +539,25 @@ Each AWS Region is a separate geographic area in which AWS clusters data centers As AWS can have instances in multiple regions, we need to know the region the service you want reach belongs to. Note that the AWS Region is the only required parameter when configuring a connection. When connecting to the services programmatically, there are different ways in which we can extract and use the rest of AWS configurations. You can find further information about configuring your credentials here. +$$ - - +$$section ### AWS Session Token $(id="awsSessionToken") If you are using temporary credentials to access your services, you will need to inform the AWS Access Key ID and AWS Secrets Access Key. Also, these will include an AWS Session Token. You can find more information on Using temporary credentials with AWS resources. +$$ - - +$$section ### Endpoint URL $(id="endPointURL") To connect programmatically to an AWS service, you use an endpoint. An *endpoint* is the URL of the entry point for an AWS web service. The AWS SDKs and the AWS Command Line Interface (AWS CLI) automatically use the default endpoint for each service in an AWS Region. But you can specify an alternate endpoint for your API requests. Find more information on AWS service endpoints. +$$ - - +$$section ### Profile Name $(id="profileName") A named profile is a collection of settings and credentials that you can apply to an AWS CLI command. When you specify a profile to run a command, the settings and credentials are used to run that command. Multiple named profiles can be stored in the config and credentials files. @@ -549,9 +565,9 @@ A named profile is a collection of settings and credentials that you can apply t You can inform this field if you'd like to use a profile other than `default`. Find here more information about Named profiles for the AWS CLI. +$$ - - +$$section ### Assume Role ARN $(id="assumeRoleArn") Typically, you use `AssumeRole` within your account or for cross-account access. In this field you'll set the `ARN` (Amazon Resource Name) of the policy of the other account. @@ -561,9 +577,9 @@ A user who wants to access a role in a different account must also have permissi This is a required field if you'd like to `AssumeRole`. Find more information on AssumeRole. +$$ - - +$$section ### Assume Role Session Name $(id="assumeRoleSessionName") An identifier for the assumed role session. Use the role session name to uniquely identify a session when the same role is assumed by different principals or for different reasons. @@ -571,25 +587,26 @@ An identifier for the assumed role session. Use the role session name to uniquel By default, we'll use the name `OpenMetadataSession`. Find more information about the Role Session Name. +$$ - - +$$section ### Assume Role Source Identity $(id="assumeRoleSourceIdentity") The source identity specified by the principal that is calling the `AssumeRole` operation. You can use source identity information in AWS CloudTrail logs to determine who took actions with a role. Find more information about Source Identity. +$$ ## Azure Auth Config - +$$section ### Client ID $(id="clientId") This is a unique identifier for the service account. To fetch this key, look for the value associated with the `client_id` key in the service account key file. +$$ - - +$$section ### Client Secret $(id="clientSecret") To get the client secret, follow these steps: @@ -601,8 +618,9 @@ To get the client secret, follow these steps: 6. In the `Add a client secret` pop-up window, provide a description for your application secret. Choose when the application should expire, and select `Add`. 7. From the `Client secrets` section, copy the string in the `Value` column of the newly created application secret. +$$ - +$$section ### Tenant ID $(id="tenantId") To get the tenant ID, follow these steps: @@ -611,21 +629,21 @@ To get the tenant ID, follow these steps: 2. Search for `App registrations` and select the `App registrations link`. 3. Select the `Azure AD` app you're using for Power BI. 4. From the `Overview` section, copy the `Directory (tenant) ID`. +$$ - - +$$section ### Storage Account Name $(id="accountName") Account Name of your storage account +$$ - - +$$section ### Key Vault Name $(id="vaultName") Key Vault Name +$$ - - +$$section ### Scopes $(id="scopes") To let OM use the Trino Auth APIs using your Azure AD app, you'll need to add the scope @@ -634,23 +652,23 @@ To let OM use the Trino Auth APIs using your Azure AD app, you'll need to add th 3. Select the `Azure AD` app you're using for Trino. 4. From the `Expose an API` section, copy the `Application ID URI` 5. Make sure the URI ends with `/.default` in case it does not, you can append the same manually +$$ - - +$$section ### Host and Port $(id="hostPort") This parameter specifies the host and port of the Postgres instance. This should be specified as a string in the format `hostname:port`. For example, you might set the hostPort parameter to `localhost:5432`. If you are running the OpenMetadata ingestion in a docker and your services are hosted on the `localhost`, then use `host.docker.internal:5432` as the value. +$$ - - +$$section ### Database $(id="database") Initial Postgres database to connect to. If you want to ingest all databases, set `ingestAllDatabases` to true. +$$ - - +$$section ### SSL Mode $(id="sslMode") SSL Mode to connect to postgres database. E.g, `prefer`, `verify-ca`, `allow` etc. @@ -659,14 +677,18 @@ $$note if you are using `IAM auth`, select either `allow` (recommended) or other option based on your use case. $$ +$$ + +$$section ### SSL CA $(id="caCertificate") The CA certificate used for SSL validation (`sslrootcert`). - $$note Postgres only needs CA Certificate $$ +$$ + $$section ### Classification Name $(id="classificationName") @@ -679,43 +701,47 @@ $$section If ticked, the workflow will be able to ingest all database in the cluster. If not ticked, the workflow will only ingest tables from the database set above. $$ - +$$section ### Connection Arguments $(id="connectionArguments") Additional connection arguments such as security or protocol configs that can be sent to service during connection. +$$ - - +$$section ### Connection Options $(id="connectionOptions") Additional connection options to build the URL that can be sent to service during the connection. +$$ ## SQLite Connection +$$section ### Username $(id="username") Username to connect to SQLite. Blank for in-memory database. +$$ - +$$section ### Password $(id="password") Password to connect to SQLite. Blank for in-memory database. +$$ - - +$$section ### Host Port $(id="hostPort") This parameter specifies the host and port of the SQLite instance. This should be specified as a string in the format `hostname:port`. For example, you might set the hostPort parameter to `localhost:3306`. If you are running the OpenMetadata ingestion in a docker and your services are hosted on the `localhost`, then use `host.docker.internal:3306` as the value. Keep it blank for in-memory databases. +$$ - +$$section ### Database $(id="database") Database of the data source. This is an optional parameter, if you would like to restrict the metadata reading to a single database. When left blank, the OpenMetadata Ingestion attempts to scan all the databases. - +$$ $$section ### Database Mode $(id="databaseMode") @@ -723,13 +749,14 @@ $$section How to run the SQLite database. :memory: by default. $$ - +$$section ### Connection Options $(id="connectionOptions") Additional connection options to build the URL that can be sent to service during the connection. +$$ - - +$$section ### Connection Arguments $(id="connectionArguments") -Additional connection arguments such as security or protocol configs that can be sent to service during connection. \ No newline at end of file +Additional connection arguments such as security or protocol configs that can be sent to service during connection. +$$ \ No newline at end of file diff --git a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/CustomPipeline.md b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/CustomPipeline.md index 0630c8ecc69d..2729e567c870 100644 --- a/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/CustomPipeline.md +++ b/openmetadata-ui/src/main/resources/ui/public/locales/en-US/Pipeline/CustomPipeline.md @@ -14,6 +14,7 @@ $$section Source Python Class Name to instantiated by the ingestion workflow. Note that it should implement the `next_record` method so that the Workflow can keep reading and sending records to the OpenMetadata API. +$$ $$section ### Connection Options $(id="connectionOptions") diff --git a/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/BlockEditor.interface.ts b/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/BlockEditor.interface.ts index e57e3cb56609..c1209e15be8d 100644 --- a/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/BlockEditor.interface.ts +++ b/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/BlockEditor.interface.ts @@ -58,6 +58,17 @@ export interface ExtensionOptions { * @default false */ enableSectionNode?: boolean; + /** + * Enable admonition node extension to render
blocks. + * Required when rendering connector documentation with $$note/$$warning/etc. blocks. + * @default false + */ + enableAdmonitionNode?: boolean; + /** + * Replace the default code block with one that includes a copy-to-clipboard button. + * @default false + */ + enableCodeBlockCopy?: boolean; } export interface BlockEditorProps { diff --git a/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/Extensions/AdmonitionNode.ts b/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/Extensions/AdmonitionNode.ts new file mode 100644 index 000000000000..e25fbc70b0ba --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/Extensions/AdmonitionNode.ts @@ -0,0 +1,55 @@ +/* + * Copyright 2025 Collate. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { mergeAttributes, Node } from '@tiptap/core'; +import { ADMONITION_TYPES } from '../../../constants/BlockEditor.constants'; + +const AdmonitionNode = Node.create({ + name: 'admonition', + + group: 'block', + content: 'block+', + + addAttributes() { + return { + type: { + default: 'note', + parseHTML: (element) => { + const type = element.dataset.admonition ?? 'note'; + + return (ADMONITION_TYPES as readonly string[]).includes(type) + ? type + : 'note'; + }, + renderHTML: (attributes) => ({ + 'data-admonition': attributes.type, + }), + }, + }; + }, + + parseHTML() { + return [{ tag: 'div[data-admonition]' }]; + }, + + renderHTML({ HTMLAttributes, node }) { + return [ + 'div', + mergeAttributes(HTMLAttributes, { + class: `admonition admonition-${node.attrs.type}`, + }), + 0, + ]; + }, +}); + +export default AdmonitionNode; diff --git a/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/Extensions/CodeBlock/CodeBlockComponent.tsx b/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/Extensions/CodeBlock/CodeBlockComponent.tsx new file mode 100644 index 000000000000..3819beedce6f --- /dev/null +++ b/openmetadata-ui/src/main/resources/ui/src/components/BlockEditor/Extensions/CodeBlock/CodeBlockComponent.tsx @@ -0,0 +1,64 @@ +/* + * Copyright 2025 Collate. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { NodeViewContent, NodeViewProps, NodeViewWrapper } from '@tiptap/react'; +import { Button, Tooltip } from 'antd'; +import { FC, useCallback, useEffect, useRef, useState } from 'react'; +import { useTranslation } from 'react-i18next'; +import { ReactComponent as CopyIcon } from '../../../../assets/svg/icon-copy.svg'; + +const CodeBlockComponent: FC = ({ node }) => { + const { t } = useTranslation(); + const [copied, setCopied] = useState(false); + const timerRef = useRef>(); + + useEffect(() => { + return () => { + if (timerRef.current) { + clearTimeout(timerRef.current); + } + }; + }, []); + + const handleCopy = useCallback(async () => { + try { + await navigator.clipboard.writeText(node.textContent); + setCopied(true); + if (timerRef.current) { + clearTimeout(timerRef.current); + } + timerRef.current = setTimeout(() => setCopied(false), 2000); + } catch { + // clipboard write failed silently + } + }, [node]); + + return ( + + + + +