diff --git a/content/assets/images/how-to/change-compatibility-mode.png b/content/assets/images/how-to/change-compatibility-mode.png
new file mode 100644
index 000000000..f014da2b2
Binary files /dev/null and b/content/assets/images/how-to/change-compatibility-mode.png differ
diff --git a/content/features/CSharpScripts/Advanced/script-add-databricks-metadata-descriptions.md b/content/features/CSharpScripts/Advanced/script-add-databricks-metadata-descriptions.md
index 583e9730c..9f2f9a595 100644
--- a/content/features/CSharpScripts/Advanced/script-add-databricks-metadata-descriptions.md
+++ b/content/features/CSharpScripts/Advanced/script-add-databricks-metadata-descriptions.md
@@ -2,7 +2,7 @@
uid: script-add-databricks-metadata-descriptions
title: Add Databricks Metadata Descriptions
author: Johnny Winter
-updated: 2025-09-04
+updated: 2026-04-08
applies_to:
products:
- product: Tabular Editor 2
@@ -16,7 +16,8 @@ applies_to:
This script was created as part of the Tabular Editor x Databricks series. In Unity Catalog it is possible provide descriptive comments for tables and columns. This script can re-use this information to automatically populate table and column descriptions in your semantic model.
> [!NOTE]
-> This script requires the Simba Spark ODBC Driver to be installed (download from https://www.databricks.com/spark/odbc-drivers-download)
+> This script requires a Databricks ODBC driver. We recommend the new [Databricks ODBC Driver](https://www.databricks.com/spark/odbc-drivers-download), which replaces the legacy Simba Spark ODBC Driver. The script auto-detects which driver is installed and uses it accordingly.
+
Each run of the script will prompt the user for a Databricks Personal Access Token. This is required to authenticate to Databricks.
The script utilises the information_schema tables in Unity Catalog to retrieve relationship information, so you may need to double check with your Databricks administrator to make sure you have permission to query these tables.
@@ -37,7 +38,8 @@ The script utilises the information_schema tables in Unity Catalog to retrieve r
* For each table processed, a message box will display the number of descriptions updated.
* Click OK to continue to the next table.
* Notes:
- * - This script requires the Simba Spark ODBC Driver to be installed (download from https://www.databricks.com/spark/odbc-drivers-download)
+ * - This script requires the Databricks ODBC Driver (recommended) or legacy Simba Spark ODBC Driver to be installed (download from https://www.databricks.com/spark/odbc-drivers-download)
+ * - The script auto-detects which driver is installed
* - Each run of the script will prompt the user for a Databricks Personal Access Token
*/
#r "Microsoft.VisualBasic"
@@ -376,6 +378,37 @@ do
// toggle the 'Running Macro' spinbox
ScriptHelper.WaitFormVisible = true;
+// auto-detect Databricks ODBC driver
+string driverPath;
+string newDriverPath = @"C:\Program Files\Databricks ODBC Driver";
+string legacyDriverPath = @"C:\Program Files\Simba Spark ODBC Driver";
+
+if (System.IO.Directory.Exists(newDriverPath))
+{
+ driverPath = newDriverPath;
+}
+else if (System.IO.Directory.Exists(legacyDriverPath))
+{
+ driverPath = legacyDriverPath;
+}
+else
+{
+ ScriptHelper.WaitFormVisible = false;
+ Interaction.MsgBox(
+ @"No Databricks ODBC driver found.
+
+Please install the Databricks ODBC Driver from:
+https://www.databricks.com/spark/odbc-drivers-download
+
+Expected installation paths:
+ " + newDriverPath + @"
+ " + legacyDriverPath,
+ MsgBoxStyle.Critical,
+ "ODBC Driver Not Found"
+ );
+ return;
+}
+
//for each selected table, get the Databricks connection info from the partition info
foreach (var t in Selected.Tables)
{
@@ -391,11 +424,11 @@ foreach (var t in Selected.Tables)
string tableName = connectionInfo.TableName;
//set DBX connection string
var odbcConnStr =
- @"DSN=Simba Spark;driver=C:\Program Files\Simba Spark ODBC Driver;host="
+ @"Driver=" + driverPath + ";Host="
+ serverHostname
- + ";port=443;httppath="
+ + ";Port=443;HTTPPath="
+ httpPath
- + ";thrifttransport=2;ssl=1;authmech=3;uid=token;pwd="
+ + ";SSL=1;ThriftTransport=2;AuthMech=3;UID=token;PWD="
+ dbxPAT;
//test connection
@@ -409,15 +442,13 @@ foreach (var t in Selected.Tables)
// toggle the 'Running Macro' spinbox
ScriptHelper.WaitFormVisible = false;
Interaction.MsgBox(
- @"Connection failed
+ @"Connection failed (using driver: " + driverPath + @")
-Please check the following prequisites:
+Please check the following prerequisites:
-- you must have the Simba Spark ODBC Driver installed
+- you must have the Databricks ODBC Driver installed
(download from https://www.databricks.com/spark/odbc-drivers-download)
-- the ODBC driver must be installed in the path C:\Program Files\Simba Spark ODBC Driver
-
- check that the Databricks server name "
+ serverHostname
+ @" is correct
@@ -557,7 +588,7 @@ Either:
}
```
### Explanation
-The script uses WinForms to prompt for a Databricks personal access token, used to authenticate to Databricks. For each selected table, the script retrieves the Databricks connection string information and schema and table name from the M query in the selected table's partition. Using the Spark ODBC driver it then sends a SQL query to Databricks that queries the information_schema tables to return the table description that is defined in Unity Catalog. This is then updated on the table description in the semantic model. A second SQL Query using the DESCRIBE command is also sent to the selected table to get column descriptions. The results of this are looped through, with descriptions added in the model. Once the script has run on each selected table, a dialogue box is displayed to show the number of descriptions updated.
+The script uses WinForms to prompt for a Databricks personal access token, used to authenticate to Databricks. It auto-detects whether the new Databricks ODBC Driver or the legacy Simba Spark ODBC Driver is installed. For each selected table, the script retrieves the Databricks connection string information and schema and table name from the M query in the selected table's partition. Using the detected ODBC driver it then sends a SQL query to Databricks that queries the information_schema tables to return the table description that is defined in Unity Catalog. This is then updated on the table description in the semantic model. A second SQL Query using the DESCRIBE command is also sent to the selected table to get column descriptions. The results of this are looped through, with descriptions added in the model. Once the script has run on each selected table, a dialogue box is displayed to show the number of descriptions updated.
## Example Output
diff --git a/content/features/CSharpScripts/Advanced/script-create-databricks-relationships.md b/content/features/CSharpScripts/Advanced/script-create-databricks-relationships.md
index ae444fad5..73730477e 100644
--- a/content/features/CSharpScripts/Advanced/script-create-databricks-relationships.md
+++ b/content/features/CSharpScripts/Advanced/script-create-databricks-relationships.md
@@ -2,7 +2,7 @@
uid: script-create-databricks-relationships
title: Create Databricks Relationships
author: Johnny Winter
-updated: 2025-09-04
+updated: 2026-04-08
applies_to:
products:
- product: Tabular Editor 2
@@ -16,7 +16,8 @@ applies_to:
This script was created as part of the Tabular Editor x Databricks series. In Unity Catalog it is possible to define primary and foreign key relationships between tables. This script can re-use this information to automatically detect and create relationships in Tabular Editor. Whilst importing the relationships, the script will also hide primary and foreign keys and set IsAvailableInMDX to false (with the exception of DateTime type primary keys). Primary keys are also marked as IsKey = TRUE in the semantic model.
> [!NOTE]
-> This script requires the Simba Spark ODBC Driver to be installed (download from https://www.databricks.com/spark/odbc-drivers-download)
+> This script requires a Databricks ODBC driver. We recommend the new [Databricks ODBC Driver](https://www.databricks.com/spark/odbc-drivers-download), which replaces the legacy Simba Spark ODBC Driver. The script auto-detects which driver is installed and uses it accordingly.
+
Each run of the script will prompt the user for a Databricks Personal Access Token. This is required to authenticate to Databricks.
The script utilises the information_schema tables in Unity Catalog to retrieve relationship information, so you may need to double check with your Databricks administrator to make sure you have permission to query these tables.
@@ -41,7 +42,8 @@ The script utilises the information_schema tables in Unity Catalog to retrieve r
For each table processed, a message box will display the number of relationships created.
* Click OK to continue to the next table.
* Notes:
- * - This script requires the Simba Spark ODBC Driver to be installed (download from https://www.databricks.com/spark/odbc-drivers-download)
+ * - This script requires the Databricks ODBC Driver (recommended) or legacy Simba Spark ODBC Driver to be installed (download from https://www.databricks.com/spark/odbc-drivers-download)
+ * - The script auto-detects which driver is installed
* - Each run of the script will prompt the user for a Databricks Personal Access Token
*/
#r "Microsoft.VisualBasic"
@@ -380,6 +382,37 @@ do
// toggle the 'Running Macro' spinbox
ScriptHelper.WaitFormVisible = true;
+// auto-detect Databricks ODBC driver
+string driverPath;
+string newDriverPath = @"C:\Program Files\Databricks ODBC Driver";
+string legacyDriverPath = @"C:\Program Files\Simba Spark ODBC Driver";
+
+if (System.IO.Directory.Exists(newDriverPath))
+{
+ driverPath = newDriverPath;
+}
+else if (System.IO.Directory.Exists(legacyDriverPath))
+{
+ driverPath = legacyDriverPath;
+}
+else
+{
+ ScriptHelper.WaitFormVisible = false;
+ Interaction.MsgBox(
+ @"No Databricks ODBC driver found.
+
+Please install the Databricks ODBC Driver from:
+https://www.databricks.com/spark/odbc-drivers-download
+
+Expected installation paths:
+ " + newDriverPath + @"
+ " + legacyDriverPath,
+ MsgBoxStyle.Critical,
+ "ODBC Driver Not Found"
+ );
+ return;
+}
+
//for each selected table, get the Databricks connection info from the partition info
foreach (var t in Selected.Tables)
{
@@ -433,11 +466,11 @@ foreach (var t in Selected.Tables)
//set DBX connection string
var odbcConnStr =
- @"DSN=Simba Spark;driver=C:\Program Files\Simba Spark ODBC Driver;host="
+ @"Driver=" + driverPath + ";Host="
+ serverHostname
- + ";port=443;httppath="
+ + ";Port=443;HTTPPath="
+ httpPath
- + ";thrifttransport=2;ssl=1;authmech=3;uid=token;pwd="
+ + ";SSL=1;ThriftTransport=2;AuthMech=3;UID=token;PWD="
+ dbxPAT;
//test connection
@@ -451,15 +484,13 @@ foreach (var t in Selected.Tables)
// toggle the 'Running Macro' spinbox
ScriptHelper.WaitFormVisible = false;
Interaction.MsgBox(
- @"Connection failed
+ @"Connection failed (using driver: " + driverPath + @")
-Please check the following prequisites:
+Please check the following prerequisites:
-- you must have the Simba Spark ODBC Driver installed
+- you must have the Databricks ODBC Driver installed
(download from https://www.databricks.com/spark/odbc-drivers-download)
-- the ODBC driver must be installed in the path C:\Program Files\Simba Spark ODBC Driver
-
- check that the Databricks server name "
+ serverHostname
+ @" is correct
@@ -585,7 +616,7 @@ Please check the following prequisites:
}
```
### Explanation
-The script uses WinForms to prompt for a Databricks personal access token, used to authenticate to Databricks. For each selected table, the script retrieves the Databricks connection string information and schema and table name from the M query in the selected table's partition. Using the Spark ODBC driver it then sends a SQL query to Databricks that queries the information_schema tables to find any foreign key relationships for the table that are defined in Unity Catalog. For each row returned in the SQL query, the script looks for matching table and column names in the model and where a relationship does not already exist, a new one is created. For role playing dimensions, where the same table might have multiple foreign keys relating to a single table, the first relationship detected will be the active one, and all other subsequent relationships are created as inactive. The script will also hide primary and foreign keys and set IsAvailableInMDX to false (with the exception of DateTime type primary keys). Primary keys are also marked as IsKey = TRUE in the semantic model. After the script has run for each selected table, a dialogue box will appear showing how many new relationships were created.
+The script uses WinForms to prompt for a Databricks personal access token, used to authenticate to Databricks. It auto-detects whether the new Databricks ODBC Driver or the legacy Simba Spark ODBC Driver is installed. For each selected table, the script retrieves the Databricks connection string information and schema and table name from the M query in the selected table's partition. Using the detected ODBC driver it then sends a SQL query to Databricks that queries the information_schema tables to find any foreign key relationships for the table that are defined in Unity Catalog. For each row returned in the SQL query, the script looks for matching table and column names in the model and where a relationship does not already exist, a new one is created. For role playing dimensions, where the same table might have multiple foreign keys relating to a single table, the first relationship detected will be the active one, and all other subsequent relationships are created as inactive. The script will also hide primary and foreign keys and set IsAvailableInMDX to false (with the exception of DateTime type primary keys). Primary keys are also marked as IsKey = TRUE in the semantic model. After the script has run for each selected table, a dialogue box will appear showing how many new relationships were created.
## Example Output
diff --git a/content/features/Semantic-Model/direct-lake-sql-model.md b/content/features/Semantic-Model/direct-lake-sql-model.md
index a1d5af2dc..d76ba5741 100644
--- a/content/features/Semantic-Model/direct-lake-sql-model.md
+++ b/content/features/Semantic-Model/direct-lake-sql-model.md
@@ -2,7 +2,7 @@
uid: direct-lake-sql-model
title: Direct Lake on SQL Semantic Models
author: Morten Lønskov
-updated: 2024-08-22
+updated: 2026-03-27
applies_to:
products:
- product: Tabular Editor 2
@@ -24,14 +24,13 @@ Direct Lake on SQL semantic models connect directly to data sources stored in [O
> As of [Tabular Editor 3.22.0](../../references/release-notes/3_22_0.md), Tabular Editor 3 supports Direct Lake on OneLake, which is recommended in most scenarios. See our [Direct Lake guidance](xref:direct-lake-guidance) article for more information.
Tabular Editor 3 can create and connect to this type of model. For a tutorial on this please refer to our blog article: [Direct Lake semantic models: How to use them with Tabular Editor](https://blog.tabulareditor.com/2023/09/26/fabric-direct-lake-with-tabular-editor-part-2-creation/).
-Tabular Editor 3 can create direct lake semantic models with both the Lakehouse and Datawarehouse SQL Endpoint.
+Tabular Editor 3 can create Direct Lake semantic models with both the Lakehouse and Datawarehouse SQL Endpoint.
-Tabular Editor 2 can connect to Direct Lake semantic models, but does not have any built in functionality to create new tables or direct lake semantic models. This needs to be done manually or with a C# script.
+Tabular Editor 2 can connect to Direct Lake semantic models, but does not have any built-in functionality to create new tables or Direct Lake semantic models. This needs to be done manually or with a C# script.
-
+> [!NOTE]
+> **Direct Lake limitations**
+> There are several limitations to the changes that can be made to a Direct Lake model. See [Direct Lake Considerations and Limitations](https://learn.microsoft.com/en-us/fabric/fundamentals/direct-lake-overview#considerations-and-limitations) for the full list. See also [this article by SQLBI](https://www.sqlbi.com/blog/marco/2024/04/06/direct-lake-vs-import-mode-in-power-bi/) for an overview of choosing between Direct Lake and Import mode.
## Creating a Direct Lake on SQL model in Tabular Editor 3
@@ -43,7 +42,7 @@ Using the checkbox ensures that Direct Lake specific properties and annotations
> [!NOTE]
> Direct Lake on SQL models currently use a collation that is different from regular Power BI import semantic models. This may lead to different results when querying the model, or when referencing object names in DAX code.
- For more information please see this blog post by Kurt Buhler: [Case-sensitive models in Power BI: consequences & considerations](https://data-goblins.com/power-bi/case-specific)
+> For more information, see this blog post by Kurt Buhler: [Case-sensitive models in Power BI: consequences & considerations](https://data-goblins.com/power-bi/case-specific).
> [!IMPORTANT]
> As of [Tabular Editor 3.22.0](../../references/release-notes/3_22_0.md), the Direct Lake checkbox has been removed from the New Model dialog. You must [manually set the collation on your model to match that of your Fabric Warehouse](xref:direct-lake-guidance#collation) if using Direct Lake on SQL.
@@ -62,7 +61,7 @@ The top title bar of Tabular Editor shows which type of model is open in that in
## Converting a Direct Lake model to Import Mode
-The below C# script converts and existing model into 'Import Mode'. This can be useful if the data latency requirements of your model does not require Direct Lake or you want to avoid the limitations of a Direct Lake model but have already started building one inside Fabric.
+The below C# script converts an existing model into Import mode. This can be useful if the data latency requirements of your model does not require Direct Lake or you want to avoid the limitations of a Direct Lake model but have already started building one inside Fabric.
Running the script is possible when Tabular Editor is connected to a semantic model through the XMLA endpoint. However, saving changes directly back to the Power BI/Fabric workspace is not supported by Microsoft. To circumvent this, the recommended approach is to use the "Model > Deploy..." option. This allows for the deployment of the newly converted model as a new entity in a workspace.
diff --git a/content/features/Semantic-Model/semantic-model-types.md b/content/features/Semantic-Model/semantic-model-types.md
index a9ad7db7f..c88aee94b 100644
--- a/content/features/Semantic-Model/semantic-model-types.md
+++ b/content/features/Semantic-Model/semantic-model-types.md
@@ -2,7 +2,7 @@
uid: semantic-model-types
title: Power BI Semantic model Types
author: Morten Lønskov
-updated: 2025-06-19
+updated: 2026-03-27
applies_to:
products:
- product: Tabular Editor 2
@@ -24,54 +24,55 @@ Tabular Editor can work with several different model types. Below is an overview
|Model Type|Import|Direct Query|Direct Lake on OneLake|Direct Lake on SQL|.pbix|.pbip|
-|---|---|---|---|---|
-|Connect in Tabular Editor|✔️|✔️|✔️|✔️|✔️|
+|---|---|---|---|---|---|---|
+|Connect in Tabular Editor|✔️|✔️|✔️|✔️|✔️| |
|Create new model|✔️|✔️|✔️|✔️|✔️|✔️|
|Write Measures|✔️|✔️|✔️|✔️|✔️|✔️|
|Create & Edit Tables|✔️|✔️|✔️[1](#DirectLake)|✔️[1](#DirectLake)|✔️|✔️|
|Create & Edit Partitions|✔️|✔️|✔️[1](#DirectLake)|✔️[1](#DirectLake)|✔️|✔️|
|Create & Edit Columns|✔️|✔️|✔️[1](#DirectLake)|✔️[1](#DirectLake)|✔️|✔️|
-|Create & Edit Calculated Tables|✔️|✔️|✔️[2](#DirectLakeCalculated)|✔️|✔️|✔️|
-|Create & Edit Calculated Columns|✔️|✔️|✔️[2](#DirectLakeCalculated)|✔️|✔️|✔️|
-|Create & Edit Calculation Groups|✔️|✔️|✔️|✔️|✔️|
-|Create & Edit Relationships|✔️|✔️|✔️|✔️|✔️|
+|Create & Edit Calculated Tables|✔️|✔️|✔️[2](#DirectLakeCalculated)|✔️[4](#DirectLakeSQLCalculated)|✔️|✔️|
+|Create & Edit Calculated Columns|✔️|✔️|❌|❌|✔️|✔️|
+|Create & Edit Calculation Groups|✔️|✔️|✔️|✔️|✔️| |
+|Create & Edit Relationships|✔️|✔️|✔️|✔️|✔️| |
|Create & Edit Roles|✔️|✔️|✔️|✔️|✔️|✔️|
|Create & Edit Perspectives|✔️|✔️|✔️|✔️|✔️|✔️|
|Create & Edit Translations|✔️|✔️|✔️|✔️|✔️|✔️|
-|Use Best Practice Analyzer|✔️|✔️|✔️|✔️|✔️|
+|Use Best Practice Analyzer|✔️|✔️|✔️|✔️|✔️| |
|Edit All TOM properties|✔️|✔️|✔️|✔️|✔️|✔️|
|Create Diagrams[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
|Use Preview Data[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
|Use Pivot Grids[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
|Use DAX Queries[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
|Use DAX Debugger[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
-|Use Vertipac Analyzer[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
+|Use VertiPaq Analyzer[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
|Process Model and Tables[3](#TE3Prem)|✔️|✔️|✔️|✔️|✔️|✔️|
-|Delete Objects|✔️|✔️|✔️|✔️|
+|Delete Objects|✔️|✔️|✔️|✔️| | |
**Legend:**
- ✔️: Supported
- ❌: Unsupported
-1 - The table partition must be an Entity Partition to work correctly and Direct Lake models can only have one partition.
-2 - Calculated Tables and Columns cannot refer to Direct Lake on OneLake tables or columns.
+1 - The table partition must be an Entity Partition to work correctly. Direct Lake models can only have one partition per table.
+2 - Calculated Tables cannot refer to Direct Lake on OneLake tables or columns. Calculation groups, what-if parameters and field parameters are supported.
3 - Tabular Editor 3 features only. Operations performed through the XMLA endpoint requires a Business or Enterprise license. [More information](xref:editions).
+4 - Direct Lake on SQL only supports calculation groups, what-if parameters and field parameters, which implicitly create calculated tables. General calculated tables are not supported.
>[!NOTE]
-> The June 2025 Release of Power BI Desktop all modeling limitations for third party tools where lifted. Prior to that various modeling operations where not supported. See [Power BI Desktop Limitations](xref:desktop-limitations)
+> The June 2025 release of Power BI Desktop lifted all modeling limitations for third-party tools. Prior to that, various modeling operations were not supported. See [Power BI Desktop Limitations](xref:desktop-limitations).
>[!TIP]
> For further details on restrictions on Direct Lake models refer to Microsoft's [Direct Lake documentation](https://learn.microsoft.com/en-us/fabric/fundamentals/direct-lake-overview)
## Unsupported Semantic Model types
-The following semantic model types are unsupported, as they don't support XMLA write operations.
+The following semantic model types are unsupported, as they do not support XMLA write operations.
- Reports based on a live connection to an Azure Analysis Services or SQL Server Analysis Services model.
- Reports based on a live connection to a Power BI dataset.
- Models with Push data.
- Models stored in Power BI My Workspace.
- Models stored in Power BI Pro Workspace.
-- Direct Lake Default Semantic Models. (It is possible to connect to a default dataset, but it is not possible to change it through the XMLA endpoint)
+- Direct Lake Default Semantic Models. As of September 2025, Power BI no longer automatically creates default semantic models when a warehouse, lakehouse or mirrored item is created. By November 2025, all existing default semantic models were disconnected from their items and became independent semantic models. It is possible to connect to a default semantic model, but it is not possible to change it through the XMLA endpoint.
- Excel workbook Semantic Models.
\ No newline at end of file
diff --git a/content/features/ai-assistant.md b/content/features/ai-assistant.md
index e4bdfd9ae..d7b9e3cc5 100644
--- a/content/features/ai-assistant.md
+++ b/content/features/ai-assistant.md
@@ -2,7 +2,7 @@
uid: ai-assistant
title: AI Assistant
author: Morten Lønskov
-updated: 2026-03-19
+updated: 2026-04-15
applies_to:
products:
- product: Tabular Editor 2
@@ -72,7 +72,31 @@ Select **Anthropic** as the provider and enter your API key. The default model i
### Azure OpenAI
-Select **Azure OpenAI** as the provider. Enter your API key and the service endpoint URL for your Azure OpenAI resource. Set the model name to match your deployment name.
+Select **Azure OpenAI** as the provider and configure three fields:
+
+- **API key** — the access key for your Azure OpenAI resource
+- **Service endpoint** — the endpoint URL for your resource, for example `https://your-resource.openai.azure.com`. Use the resource URL, not the `privatelink` alias; the SSL certificate is issued for `*.openai.azure.com` and connecting directly to `*.privatelink.openai.azure.com` fails certificate validation
+- **Model name** — the **deployment name**, not the underlying model name and not the resource name
+
+Azure OpenAI requires the deployment name in every API call. A deployment name is chosen when the deployment is created, so it can be any string. Deployments are often named after the model they serve (for example `gpt-4o`), but that is a convention, not a requirement. If you enter the resource name or a raw model name that does not exist as a deployment, the request fails.
+
+#### Finding your deployment name
+
+In the [Azure AI Foundry portal](https://ai.azure.com):
+
+1. Sign in and select your Azure OpenAI resource
+2. Open **Deployments** (or **Models + endpoints** if the resource has been upgraded to Foundry)
+3. Copy the value from the **Name** column
+
+Deployments created before your organization adopted Azure AI Foundry may not appear in the portal. List them from the Azure CLI:
+
+```bash
+az cognitiveservices account deployment list --name "" --resource-group "" --output table
+```
+
+See [Create and deploy an Azure OpenAI resource](https://learn.microsoft.com/azure/ai-foundry/openai/how-to/create-resource#deploy-a-model) for more details.
+
+For 403 errors, SSL failures or "DeploymentNotFound" responses, see @azure-openai-connection-errors.
### Custom (OpenAI-compatible)
@@ -298,7 +322,7 @@ Select specific objects in the **TOM Explorer** before asking your question. Whe
Other ways to reduce token usage:
-- Ask focused questions about specific tables, measures or columns rather than broad questions about the entire model
+- Ask focused questions about specific tables, measures or columns rather than broad questions about the entire model. A vague prompt such as *"Set display folders on all measures"* forces the assistant to retrieve metadata for the entire model. A specific prompt such as *"Set display folders on the measures I have selected"* limits the context to the current selection and uses far fewer tokens
- Start new conversations when switching topics to avoid accumulating long conversation histories
- Use a smaller or less expensive model for exploratory questions
diff --git a/content/features/tmdl.md b/content/features/tmdl.md
index 6f837c8ca..1dc66f885 100644
--- a/content/features/tmdl.md
+++ b/content/features/tmdl.md
@@ -1,4 +1,4 @@
----
+---
uid: tmdl
title: Tabular Model Definition Language (TMDL)
author: Daniel Otykier
diff --git a/content/features/views/bpa-view.md b/content/features/views/bpa-view.md
index db9c00ec5..f13334c63 100644
--- a/content/features/views/bpa-view.md
+++ b/content/features/views/bpa-view.md
@@ -1,4 +1,4 @@
----
+---
uid: bpa-view
title: Best Practice Analyzer view
author: Daniel Otykier
diff --git a/content/getting-started/editions.md b/content/getting-started/editions.md
index 4856928cc..af0d3ebba 100644
--- a/content/getting-started/editions.md
+++ b/content/getting-started/editions.md
@@ -35,7 +35,7 @@ Please refer to the matrix below for the full overview of supported scenarios:
|Scenario / Edition|Desktop|Business|Enterprise|
|---|---|---|---|
-|External Tool for Power BI Desktop|✔|✔|✔|
+|External Tool for Power BI Desktop|✔|✔|✔|
|Load/save model metadata to disk**|❌|✔*|✔|
|Workspace Mode***|❌|✔*|✔|
|Power BI Premium Per User|❌|✔|✔|
@@ -72,13 +72,15 @@ Similarly, [Power BI Premium-Per-User workspaces do not support Direct Lake data
|Azure AS / SSAS|Multiple partitions|❌|✔|
|Azure AS / SSAS|DirectQuery*|✔|✔|
|Azure AS / SSAS|Direct Lake|N/A|N/A|
-|Power BI|Perspectives|✔|✔|
-|Power BI|Multiple partitions|✔|✔|
+|Power BI|Perspectives**|✔|✔|
+|Power BI|Multiple partitions**|✔|✔|
|Power BI|DirectQuery|✔|✔|
|Power BI|Direct Lake|❌|✔|
\***Note:** Analysis Services on SQL Server Standard Edition pre-2019 does not support DirectQuery. Nor does Azure AS Basic Tier. [Learn more](https://learn.microsoft.com/en-us/analysis-services/analysis-services-features-by-edition?view=asallproducts-allversions#tabular-models).
+\*\***Note:** Perspectives and multiple partitions are available in Business Edition for Power BI models, but the model's `CompatibilityMode` must be set to `PowerBI`. See [Change compatibility mode](xref:change-compatibility-mode) for instructions.
+
If you attempt to open a model that uses one or more of the modeling restrictions listed above, while on a TE3 Business Edition license, you will see the error message below:

diff --git a/content/getting-started/views/tom-explorer-view-reference.md b/content/getting-started/views/tom-explorer-view-reference.md
index 0bee8abd8..664b9f649 100644
--- a/content/getting-started/views/tom-explorer-view-reference.md
+++ b/content/getting-started/views/tom-explorer-view-reference.md
@@ -1,4 +1,4 @@
----
+---
uid: tom-explorer-view-reference
title: TOM Explorer view
author: Morten Lønskov
diff --git a/content/how-tos/change-compatibility-mode.md b/content/how-tos/change-compatibility-mode.md
new file mode 100644
index 000000000..82a38c6be
--- /dev/null
+++ b/content/how-tos/change-compatibility-mode.md
@@ -0,0 +1,67 @@
+---
+uid: change-compatibility-mode
+title: Change compatibility mode
+author: Morten Lønskov
+updated: 2026-04-08
+applies_to:
+ products:
+ - product: Tabular Editor 2
+ full: true
+ - product: Tabular Editor 3
+ editions:
+ - edition: Desktop
+ full: true
+ - edition: Business
+ full: true
+ - edition: Enterprise
+ full: true
+---
+
+# Change compatibility mode
+
+A model's **Compatibility Mode** controls which platform the model targets. This property determines:
+
+- Which Tabular Object Model (TOM) objects and properties are available
+- Which edition restrictions Tabular Editor applies
+
+Compatibility Mode is separate from the [Compatibility Level](xref:update-compatibility-level), which gates features behind version numbers.
+
+## Compatibility mode values
+
+The `Database.CompatibilityMode` property accepts the following values, defined by the [Microsoft.AnalysisServices.CompatibilityMode](https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.compatibilitymode?view=analysisservices-dotnet) enum:
+
+| Value | Meaning |
+|---|---|
+| `Unknown` | No specific mode. Default when the mode has not been explicitly set. The AS client library automatically detects the actual mode based on which TOM features are used (for example, if any Power BI-specific features are present). |
+| `AnalysisServices` | Model targets SQL Server Analysis Services or Azure Analysis Services. |
+| `PowerBI` | Model targets Power BI (Desktop, Premium Per User, Premium Capacity, Fabric). Certain TOM properties are only available in this mode. See the Remarks section of each property in the [Microsoft.AnalysisServices.Tabular namespace reference](https://learn.microsoft.com/dotnet/api/microsoft.analysisservices.tabular?view=analysisservices-dotnet) for details. |
+| `Excel` | Model originates from an Excel Power Pivot data model. Tabular Editor does not support Power Pivot models. |
+
+Azure Analysis Services and SQL Server Analysis Services only support `AnalysisServices` mode. Power BI and Fabric support both `AnalysisServices` and `PowerBI` modes.
+
+> [!IMPORTANT]
+> Tabular Editor uses Compatibility Mode to determine edition restrictions. A model set to `AnalysisServices` mode triggers Enterprise-only restrictions for features like perspectives and multiple partitions, even if you deploy to Power BI.
+
+## When to change compatibility mode
+
+Change the compatibility mode to `PowerBI` when all of the following are true:
+
+- The model is deployed to Power BI (Premium Per User, Premium Capacity, or Fabric)
+- The model will **not** be deployed to SSAS or Azure Analysis Services
+- The `.bim` file was originally created in Visual Studio, SSDT, or another tool that defaults to `AnalysisServices` mode
+- You receive an edition error about Enterprise-tier features (such as perspectives) that should be available in your edition for Power BI models
+
+## Change the compatibility mode
+
+1. Open your model in Tabular Editor.
+2. In the **TOM Explorer**, select the top-level **Model** node.
+3. In the **Properties** panel, expand **Database**.
+4. Locate `CompatibilityMode`.
+5. Change the value from `AnalysisServices` to `PowerBI`.
+6. Save the model (**Ctrl+S**).
+
+
+
+> [!NOTE]
+> Changing the compatibility mode affects which TOM properties are available and how the model is validated. Verify that your deployment target matches the selected mode before saving.
+
diff --git a/content/how-tos/powerbi-xmla-pbix-workaround.md b/content/how-tos/powerbi-xmla-pbix-workaround.md
index 5acb1b93f..0552f101d 100644
--- a/content/how-tos/powerbi-xmla-pbix-workaround.md
+++ b/content/how-tos/powerbi-xmla-pbix-workaround.md
@@ -6,7 +6,7 @@ updated: 2023-10-18
applies_to:
products:
- product: Tabular Editor 2
- none: true
+ full: true
- product: Tabular Editor 3
editions:
- edition: Desktop
diff --git a/content/how-tos/toc.md b/content/how-tos/toc.md
index 28fafa4ef..35c1bf854 100644
--- a/content/how-tos/toc.md
+++ b/content/how-tos/toc.md
@@ -9,6 +9,7 @@
## [Folder Serialization](folder-serialization.md)
## [Master Model Pattern](Master-model-pattern.md)
## [Update compatibility level](update-compatibility-level.md)
+## [Change compatibility mode](change-compatibility-mode.md)
# Data Import and Tables
## [Importing Tables (TE2)](Importing-Tables.md)
diff --git a/content/how-tos/update-compatibility-level.md b/content/how-tos/update-compatibility-level.md
index 64cd53b0d..382dca9b2 100644
--- a/content/how-tos/update-compatibility-level.md
+++ b/content/how-tos/update-compatibility-level.md
@@ -24,6 +24,17 @@ A model's **Compatibility Level** controls which Tabular Object Model (TOM) feat
> [!WARNING]
> Compatibility upgrades are one-way. You can upgrade but can't reliably downgrade. Treat this like a schema upgrade and validate your deployment targets first.
+## Compatibility level vs. compatibility mode
+
+Compatibility Level and Compatibility Mode are separate properties that serve different purposes:
+
+| Property | Controls | Values |
+|---|---|---|
+| `Database.CompatibilityLevel` | Which TOM features are available (e.g., custom calendars, DAX UDFs) | `1200`, `1500`, `1600`, `1701`, `1702`, etc. |
+| `Database.CompatibilityMode` | Which platform the model targets, which TOM objects and properties are available, and which edition restrictions apply | `Unknown`, `AnalysisServices`, `PowerBI`, `Excel` |
+
+If you need to change the platform target rather than unlock new TOM features, see [Change compatibility mode](xref:change-compatibility-mode).
+
## When to upgrade
Upgrade when:
diff --git a/content/references/shortcuts3.md b/content/references/shortcuts3.md
index 756ee8306..eee8315f5 100644
--- a/content/references/shortcuts3.md
+++ b/content/references/shortcuts3.md
@@ -1,171 +1,171 @@
----
-uid: shortcuts3
-title: Keyboard shortcuts Tabular Editor 3
-author: Daniel Otykier
-updated: 2021-09-08
-applies_to:
- products:
- - product: Tabular Editor 2
- none: true
- - product: Tabular Editor 3
- editions:
- - edition: Desktop
- full: true
- - edition: Business
- full: true
- - edition: Enterprise
- full: true
----
-# Keyboard shortcuts
-
-## General
-
-|Command|Shortcut|
-|---|---|
-|New model|Ctrl+N|
-|Open file|Ctrl+O|
-|Load model from a database|Ctrl+Shift+O|
-|Save current item|Ctrl+S|
-|Save all|Ctrl+Shift+S|
-|Close Document|Ctrl+W|
-|Exit|Alt+F4|
-|Deployment wizard|Ctrl+Shift+D|
-|Switch In-Focus Window|Ctrl+Tab|
-
-## Edit
-
-|Command|Shortcut|
-|---|---|
-|Select All|Ctrl+A|
-|Copy|Ctrl+C|
-|Cut|Ctrl+X|
-|Paste|Ctrl+V|
-|Undo|Ctrl+Z|
-|Redo|Ctrl+Y|
-|Find|Ctrl+F|
-|Replace|Ctrl+H|
-
-## Data modelling
-
-|Command|Shortcut|
-|---|---|
-|Properties|F4|
-|Edit object name / batch rename|F2|
-|Batch rename children|Shift+F2|
-|View dependencies|Shift+F12|
-|Make invisible|Ctrl+I|
-|Make visible|Ctrl+U|
-|Create measure|Alt+1|
-|Create calculated column|Alt+2|
-|Create hierarchy|Alt+3|
-|Create data column|Alt+4|
-|Create table|Alt+5|
-|Create calculated table|Alt+6|
-|Create calculation group|Alt+7|
-|Accept expression change|F5|
-
-## TOM Explorer
-
-|Command|Shortcut|
-|---|---|
-|Navigate up or down|Up / Down arrow|
-|Expand / collapse current node|Right / Left arrow|
-|Expand / collapse current node and all subnodes|Ctrl+Right / Left arrow|
-|Expand / collapse entire tree|Ctrl+Shift+Right / Left arrow|
-|Toggle measures|Ctrl+1|
-|Toggle columns|Ctrl+2|
-|Toggle hierarchies|Ctrl+3|
-|Toggle partitions|Ctrl+4|
-|Toggle display folders|Ctrl+5|
-|Toggle hidden objects|Ctrl+6|
-|Toggle info columns|Ctrl+7|
-|Navigate back|Alt+Left arrow|
-|Navigate forward|Alt+Right arrow|
-
-## Text/code editing (general)
-
-|Command|Shortcut|
-|---|---|
-|Cut line|Ctrl+L|
-|Delete line|Ctrl+Shift+L|
-|Copy line|Ctrl+Shift+T|
-|Transpose lines|Ctrl+T|
-|Duplicate line|Ctrl+D|
-|Lowercase line|Ctrl+U|
-|Uppercase line|Ctrl+Shift+U|
-|Move lines up|Alt+Up arrow|
-|Move lines down|Alt+Down arrow|
-
-## DAX code
-
-|Command|Shortcut|
-|---|---|
-|Go to definition|F12|
-|Peek definition|Alt+F12|
-|Refactor|Ctrl+R|
-|Show auto-complete|Ctrl+Space|
-|Show calltip|Ctrl+Shift+Space|
-|Format DAX|F6|
-|Format DAX (Short lines)|Shift+F6|
-|Comment lines|Ctrl+K|
-|Uncomment lines|Ctrl+U|
-|Toggle comments|Ctrl+/|
-|Collapse all foldable regions|Ctrl+Alt+[|
-|Expand all foldable regions|Ctrl+Alt+]|
-|Toggle all foldable regions state|Ctrl+Alt+;|
-|Collapse foldable region|Ctrl+Shift+[|
-|Expand foldable region|Ctrl+Shift+]|
-|Toggle foldable region state|Ctrl+Shift+;|
-|Delete reference or words|Ctrl+Backspace or Ctrl+Delete|
-|Expand Selection|Ctrl+Shift+E|
-
-## DAX Query
-
-|Command|Shortcut|
-|---|---|
-|Execute query|F5|
-|Execute selection|Shift+F5|
-|Apply|F7|
-|Apply & Sync|Shift+F7|
-|Apply Selection|F8|
-|Apply Selection & Sync|Shift+F8|
-|Show Code Actions|Ctrl+.|
-
-## DAX Script
-
-|Command|Shortcut|
-|---|---|
-|Apply script|F5|
-|Apply selection|F8|
-|Apply script and save model|Shift+F5|
-|Apply selection and save model|Shift+F8|
-
-## DAX Debugger
-
-|Command|Shortcut|
-|---|---|
-|Step over|F10|
-|Step back|Shift+F10|
-|Step in|F11|
-|Step out|Shift+F11|
-|Next row (innermost row context)|F9|
-|Previous row (innermost row context)|Shift+F9|
-
-## C# Script
-
-|Command|Shortcut|
-|---|---|
-|Run script|F5|
-
-# Customizing Shortcuts
-
-Tabular Editor 3 allows for the customization of shortcuts by rebinding existing or adding new shortcuts.
-
-Setting shortcuts can be done through **Tools -> Preferences -> Keyboard** and locating the command that should have a shortcut binding and setting the binding in the menu.
-Shortcuts can be set for many different parts of Tabular Editor 3 including [Macros](xref:creating-macros) to have C# scripts available at the fingertips.
-
-
-
-1. Keyboard Menu in Preferences
-2. Find command that should have a shortcut
-3. Set shortcut by holding desired shortcuts key and use "Assign Shortcut"
\ No newline at end of file
+---
+uid: shortcuts3
+title: Keyboard shortcuts Tabular Editor 3
+author: Daniel Otykier
+updated: 2021-09-08
+applies_to:
+ products:
+ - product: Tabular Editor 2
+ none: true
+ - product: Tabular Editor 3
+ editions:
+ - edition: Desktop
+ full: true
+ - edition: Business
+ full: true
+ - edition: Enterprise
+ full: true
+---
+# Keyboard shortcuts
+
+## General
+
+|Command|Shortcut|
+|---|---|
+|New model|Ctrl+N|
+|Open file|Ctrl+O|
+|Load model from a database|Ctrl+Shift+O|
+|Save current item|Ctrl+S|
+|Save all|Ctrl+Shift+S|
+|Close Document|Ctrl+W|
+|Exit|Alt+F4|
+|Deployment wizard|Ctrl+Shift+D|
+|Switch In-Focus Window|Ctrl+Tab|
+
+## Edit
+
+|Command|Shortcut|
+|---|---|
+|Select All|Ctrl+A|
+|Copy|Ctrl+C|
+|Cut|Ctrl+X|
+|Paste|Ctrl+V|
+|Undo|Ctrl+Z|
+|Redo|Ctrl+Y|
+|Find|Ctrl+F|
+|Replace|Ctrl+H|
+
+## Data modelling
+
+|Command|Shortcut|
+|---|---|
+|Properties|F4|
+|Edit object name / batch rename|F2|
+|Batch rename children|Shift+F2|
+|View dependencies|Shift+F12|
+|Make invisible|Ctrl+I|
+|Make visible|Ctrl+U|
+|Create measure|Alt+1|
+|Create calculated column|Alt+2|
+|Create hierarchy|Alt+3|
+|Create data column|Alt+4|
+|Create table|Alt+5|
+|Create calculated table|Alt+6|
+|Create calculation group|Alt+7|
+|Accept expression change|F5|
+
+## TOM Explorer
+
+|Command|Shortcut|
+|---|---|
+|Navigate up or down|Up / Down arrow|
+|Expand / collapse current node|Right / Left arrow|
+|Expand / collapse current node and all subnodes|Ctrl+Right / Left arrow|
+|Expand / collapse entire tree|Ctrl+Shift+Right / Left arrow|
+|Toggle measures|Ctrl+1|
+|Toggle columns|Ctrl+2|
+|Toggle hierarchies|Ctrl+3|
+|Toggle partitions|Ctrl+4|
+|Toggle display folders|Ctrl+5|
+|Toggle hidden objects|Ctrl+6|
+|Toggle info columns|Ctrl+7|
+|Navigate back|Alt+Left arrow|
+|Navigate forward|Alt+Right arrow|
+
+## Text/code editing (general)
+
+|Command|Shortcut|
+|---|---|
+|Cut line|Ctrl+L|
+|Delete line|Ctrl+Shift+L|
+|Copy line|Ctrl+Shift+T|
+|Transpose lines|Ctrl+T|
+|Duplicate line|Ctrl+D|
+|Lowercase line|Ctrl+U|
+|Uppercase line|Ctrl+Shift+U|
+|Move lines up|Alt+Up arrow|
+|Move lines down|Alt+Down arrow|
+
+## DAX code
+
+|Command|Shortcut|
+|---|---|
+|Go to definition|F12|
+|Peek definition|Alt+F12|
+|Refactor|Ctrl+R|
+|Show auto-complete|Ctrl+Space|
+|Show calltip|Ctrl+Shift+Space|
+|Format DAX|F6|
+|Format DAX (Short lines)|Shift+F6|
+|Comment lines|Ctrl+K|
+|Uncomment lines|Ctrl+U|
+|Toggle comments|Ctrl+/|
+|Collapse all foldable regions|Ctrl+Alt+[|
+|Expand all foldable regions|Ctrl+Alt+]|
+|Toggle all foldable regions state|Ctrl+Alt+;|
+|Collapse foldable region|Ctrl+Shift+[|
+|Expand foldable region|Ctrl+Shift+]|
+|Toggle foldable region state|Ctrl+Shift+;|
+|Delete reference or words|Ctrl+Backspace or Ctrl+Delete|
+|Expand Selection|Ctrl+Shift+E|
+
+## DAX Query
+
+|Command|Shortcut|
+|---|---|
+|Execute query|F5|
+|Execute selection|Shift+F5|
+|Apply|F7|
+|Apply & Sync|Shift+F7|
+|Apply Selection|F8|
+|Apply Selection & Sync|Shift+F8|
+|Show Code Actions|Ctrl+.|
+
+## DAX Script
+
+|Command|Shortcut|
+|---|---|
+|Apply script|F5|
+|Apply selection|F8|
+|Apply script and save model|Shift+F5|
+|Apply selection and save model|Shift+F8|
+
+## DAX Debugger
+
+|Command|Shortcut|
+|---|---|
+|Step over|F10|
+|Step back|Shift+F10|
+|Step in|F11|
+|Step out|Shift+F11|
+|Next row (innermost row context)|F9|
+|Previous row (innermost row context)|Shift+F9|
+
+## C# Script
+
+|Command|Shortcut|
+|---|---|
+|Run script|F5|
+
+# Customizing Shortcuts
+
+Tabular Editor 3 allows for the customization of shortcuts by rebinding existing or adding new shortcuts.
+
+Setting shortcuts can be done through **Tools -> Preferences -> Keyboard** and locating the command that should have a shortcut binding and setting the binding in the menu.
+Shortcuts can be set for many different parts of Tabular Editor 3 including [Macros](xref:creating-macros) to have C# scripts available at the fingertips.
+
+
+
+1. Keyboard Menu in Preferences
+2. Find command that should have a shortcut
+3. Set shortcut by holding desired shortcuts key and use "Assign Shortcut"
\ No newline at end of file
diff --git a/content/troubleshooting/azure-openai-connection-errors.md b/content/troubleshooting/azure-openai-connection-errors.md
new file mode 100644
index 000000000..62dc40b0b
--- /dev/null
+++ b/content/troubleshooting/azure-openai-connection-errors.md
@@ -0,0 +1,73 @@
+---
+uid: azure-openai-connection-errors
+title: Azure OpenAI connection errors
+author: Morten Lønskov
+updated: 2026-04-15
+applies_to:
+ products:
+ - product: Tabular Editor 2
+ none: true
+ - product: Tabular Editor 3
+ since: 3.26.0
+ editions:
+ - edition: Desktop
+ full: true
+ - edition: Business
+ full: true
+ - edition: Enterprise
+ full: true
+---
+
+# Azure OpenAI connection errors
+
+This page covers common connection failures when using Azure OpenAI as the provider for the @ai-assistant. See the [Azure OpenAI configuration section](xref:ai-assistant#azure-openai) for setup details.
+
+## 403 "Public access is disabled. Please configure private endpoint"
+
+This 403 comes from Azure OpenAI itself, which means the HTTP request reached the public endpoint rather than your private endpoint. Azure rejects it because public access is disabled on the resource.
+
+The typical cause is a system proxy that resolves DNS outside your VPN tunnel. Your workstation resolves the Azure OpenAI hostname to the private IP via Azure Private DNS, but the proxy server uses its own resolver, reaches the public IP, and gets rejected.
+
+To confirm DNS on your workstation is resolving correctly:
+
+```text
+nslookup yourresource.openai.azure.com
+```
+
+A result pointing to a private IP range (for example `10.x.x.x`) confirms that your workstation side is correct and that the issue is on the proxy path.
+
+Options to resolve:
+
+- Add your Azure OpenAI hostname to the bypass list under **Tools > Preferences > Proxy Settings** so the request skips the proxy and goes directly through the VPN tunnel. Separate multiple hostnames with semicolons
+- Ask your network team to update the proxy PAC file to bypass `*.openai.azure.com`, or to configure the proxy server to resolve Azure Private DNS zones
+- Use split tunneling so Azure private endpoint IP ranges route directly rather than via the proxy
+
+See @proxy-settings for general Tabular Editor proxy configuration.
+
+## SSL connection errors
+
+If the **Service endpoint** uses the `privatelink` alias (for example `https://your-resource.privatelink.openai.azure.com`), SSL validation fails because the Azure OpenAI certificate is issued for `*.openai.azure.com`, not `*.privatelink.openai.azure.com`.
+
+Use the standard resource hostname in the service endpoint field and let DNS resolve it to the private IP:
+
+```text
+https://your-resource.openai.azure.com
+```
+
+## "Error getting AI response" with 404 or DeploymentNotFound
+
+The value in the **Model name** field does not match a deployment in your Azure OpenAI resource. Azure OpenAI requires the **deployment name** in every API call, not the underlying model name and not the resource name.
+
+Verify the deployment name:
+
+1. Sign in to the [Azure AI Foundry portal](https://ai.azure.com) and select your resource
+2. Open **Deployments** (or **Models + endpoints** if the resource has been upgraded to Foundry)
+3. Copy the value from the **Name** column
+
+Deployments created before your organization adopted Azure AI Foundry may not appear in the portal. List them from the Azure CLI:
+
+```bash
+az cognitiveservices account deployment list --name "" --resource-group "" --output table
+```
+
+The model name field is case-sensitive.
diff --git a/content/troubleshooting/databricks-column-comments-length.md b/content/troubleshooting/databricks-column-comments-length.md
index 7303d1fb3..8384ed411 100644
--- a/content/troubleshooting/databricks-column-comments-length.md
+++ b/content/troubleshooting/databricks-column-comments-length.md
@@ -2,7 +2,7 @@
uid: databricks-column-comments-length
title: Databricks Column Comment Length Error
author: Support Team
-updated: 2026-02-06
+updated: 2026-04-08
applies_to:
products:
- product: Tabular Editor 2
@@ -19,6 +19,9 @@ applies_to:
# Databricks Column Comment Length Error
+> [!TIP]
+> Databricks has released a new ODBC driver that replaces the legacy Simba Spark ODBC Driver. The new [Databricks ODBC Driver](https://www.databricks.com/spark/odbc-drivers-download) may not have the `MaxCommentLen` limitation described below. If you experience this issue, consider switching to the new driver, which Tabular Editor 3.26.0 and later supports.
+
When using the Import Table Wizard to import tables from Databricks, you may encounter a connection error if column comments (descriptions) exceed 512 characters. This limitation exists in the Simba Spark ODBC Driver, even though Databricks Unity Catalog allows longer column comments.
A typical error message looks like:
diff --git a/content/troubleshooting/index.md b/content/troubleshooting/index.md
index ab996285f..4d74a6018 100644
--- a/content/troubleshooting/index.md
+++ b/content/troubleshooting/index.md
@@ -9,6 +9,7 @@ This section contains troubleshooting guides and solutions for common issues.
- @locale-not-supported - Locale Not Supported
- @calendar-blank-value - Calendar function blank date error
- @direct-lake-entity-updates-reverting - Entity Name Changes Revert in Direct Lake Models
+- @azure-openai-connection-errors - AI Assistant connection failures when using Azure OpenAI
---
diff --git a/content/troubleshooting/toc.md b/content/troubleshooting/toc.md
index c4e7b7572..666a3e9ea 100644
--- a/content/troubleshooting/toc.md
+++ b/content/troubleshooting/toc.md
@@ -6,4 +6,6 @@
# @calendar-blank-value
-# @direct-lake-entity-updates-reverting
\ No newline at end of file
+# @direct-lake-entity-updates-reverting
+
+# @azure-openai-connection-errors
\ No newline at end of file
diff --git a/content/tutorials/connecting-to-azure-databricks.md b/content/tutorials/connecting-to-azure-databricks.md
index baddeba71..e7d89fd8b 100644
--- a/content/tutorials/connecting-to-azure-databricks.md
+++ b/content/tutorials/connecting-to-azure-databricks.md
@@ -2,7 +2,7 @@
uid: connecting-to-azure-databricks
title: Connecting to Azure Databricks
author: David Bojsen
-updated: 2025-08-05
+updated: 2026-04-08
applies_to:
products:
- product: Tabular Editor 2
@@ -28,6 +28,10 @@ Before you begin, ensure you have the following:
- A valid Azure Databricks workspace
- Appropriate permissions to access the Databricks data
- Tabular Editor 3 (Desktop, Business, or Enterprise edition)
+- The [Databricks ODBC Driver](https://www.databricks.com/spark/odbc-drivers-download) installed on your machine
+
+> [!IMPORTANT]
+> Databricks has released a new ODBC driver that replaces the legacy Simba Spark ODBC Driver. We recommend installing the new [Databricks ODBC Driver](https://www.databricks.com/spark/odbc-drivers-download). Tabular Editor 3.26.0 and later supports both drivers, but the new driver is the recommended option going forward. The legacy Simba driver is available from the [Databricks ODBC driver archive](https://www.databricks.com/spark/odbc-drivers-archive#simba_odbc).
## Authentication Methods
diff --git a/content/tutorials/direct-lake-guidance.md b/content/tutorials/direct-lake-guidance.md
index 7165f3ad4..db8c41484 100644
--- a/content/tutorials/direct-lake-guidance.md
+++ b/content/tutorials/direct-lake-guidance.md
@@ -2,7 +2,7 @@
uid: direct-lake-guidance
title: Direct Lake Guidance
author: Daniel Otykier
-updated: 2024-06-18
+updated: 2026-03-27
applies_to:
products:
- product: Tabular Editor 2
@@ -30,7 +30,7 @@ The following table summarizes the storage modes available in Power BI semantic
| Import | Data is imported into the semantic model and stored in the model's in-memory cache (VertiPaq). | When you need fast query performance and can afford to refresh the data periodically. |
| DirectQuery | Data is queried directly from the source at query time, without being imported into the model. Supports various sources, such as SQL, KQL and even other semantic models. | When you need real-time data access or when the data volume is too large to fit in memory. |
| Dual | A hybrid mode where the engine can choose between returning the imported data or delegating to DirectQuery, depending on the query context. | When your model contains a mix of DirectQuery and Import tables (for example when using aggregations), and you have tables that are related to both. |
-| Direct Lake on OneLake | Utilizes the Delta Parquet story format to quickly swap the data into semantic model memory when needed. | When your data is already available as tables or materialized views in a Fabric Warehouse or Lakehouse. |
+| Direct Lake on OneLake | Utilizes the Delta Parquet storage format to quickly swap the data into semantic model memory when needed. | When your data is already available as tables or materialized views in a Fabric Warehouse or Lakehouse. |
| Direct Lake on SQL | Older version of Direct Lake which utilizes the SQL Analytics Endpoint of Fabric Warehouses or Lakehouses. | Not recommended for new development (use Direct Lake on OneLake instead). |
> [!NOTE]
@@ -40,14 +40,26 @@ The following table summarizes the storage modes available in Power BI semantic
[Direct Lake on OneLake](https://learn.microsoft.com/en-us/fabric/fundamentals/direct-lake-overview#key-concepts-and-terminology) was introduced in March 2025 as an alternative to Direct Lake on SQL. With Direct Lake on OneLake, there is no dependency on the SQL endpoint and no fallback to DirectQuery mode. This also means that the [usual restrictions that apply to DirectQuery models](https://learn.microsoft.com/en-us/power-bi/connect-data/desktop-directquery-about#modeling-limitations) do not apply to Direct Lake on OneLake models.
-However, as with Direct Lake on SQL, there are still some [limitations that *do* apply](https://learn.microsoft.com/en-us/fabric/fundamentals/direct-lake-overview#considerations-and-limitations). The most important limitations are listed below. See the link for a full list of limitations:
+> [!NOTE]
+> Direct Lake on OneLake is currently in public preview. You must enable the tenant setting **User can create Direct Lake on OneLake semantic models (preview)** in the Fabric admin portal before you can create semantic models with this table storage mode.
+
+However, as with Direct Lake on SQL, there are still some [limitations that *do* apply](https://learn.microsoft.com/en-us/fabric/fundamentals/direct-lake-overview#considerations-and-limitations). Key limitations include:
+
+- Calculated columns are not supported in either Direct Lake mode.
+- Calculated tables cannot reference columns or tables in Direct Lake storage mode. Calculation groups, what-if parameters and field parameters are supported because they create implicit calculated tables that do not reference Direct Lake columns.
+- Non-materialized SQL views are not supported as data sources for Direct Lake on OneLake tables. Use materialized views or ensure the source Delta table contains the columns you need.
+- Shortcuts in a lakehouse are not supported as data sources during the public preview of Direct Lake on OneLake.
-- Calculated columns on Direct Lake tables cannot reference columns that are sourced from OneLake.
-- Calculated tables on Direct Lake models cannot refer columns on Direct Lake tables that are sourced from OneLake.
+For a full and up-to-date list of limitations, see the [Microsoft documentation on Direct Lake considerations and limitations](https://learn.microsoft.com/en-us/fabric/fundamentals/direct-lake-overview#considerations-and-limitations).
-One possible workaround for the above limitation, is to create a **composite model** by combining Direct Lake tables with Import tables. This is allowed with Direct Lake on OneLake, but not with Direct Lake on SQL. In this case, you would typically use Import mode for smaller dimension tables, where you may need to add custom groupings, which calculated columns are ideal for, while keeping the larger fact tables in Direct Lake mode.
+### Composite models
-Alternatively, ensure that your source contains the columns it needs. If you add columns through a view, please note that the view must be materialized in the Fabric Warehouse or Lakehouse, as Direct Lake on OneLake does not support non-materialized views.
+One workaround for the calculated column limitation is to create a **composite model** by combining Direct Lake tables with Import tables. This is supported with Direct Lake on OneLake, but not with Direct Lake on SQL. In a composite model, you typically keep larger fact tables in Direct Lake mode while using Import mode for smaller dimension tables where you need calculated columns or custom groupings.
+
+Direct Lake on OneLake also supports combining with DirectQuery tables through XMLA-based tools such as Tabular Editor. Import tables can be added through Power BI web modeling, Power BI Desktop (live editing) or through XMLA tools.
+
+> [!NOTE]
+> Direct Lake on SQL does not support composite models. You cannot combine Direct Lake on SQL tables with Import, DirectQuery or Dual storage mode tables in the same semantic model. However, you can use Power BI Desktop to create a composite model *on top of* a Direct Lake on SQL semantic model and extend it with new tables. See [Build a composite model on a semantic model](https://learn.microsoft.com/en-us/power-bi/transform-model/desktop-composite-models#building-a-composite-model-on-a-semantic-model-or-model) for more information.
## Collation
@@ -57,7 +69,7 @@ When using **Direct Lake on OneLake**, the collation of the model is the same as
For a **Direct Lake on SQL** model, the collation is case-insensitive for queries that do not fallback to DirectQuery. If the query does fallback, the collation depends on the collation of the source. For a Fabric Warehouse, the collation might be case-sensitive, in which case you should specify a [case-sensitive collation on the model](https://data-goblins.com/power-bi/case-specific).
> [!NOTE]
-> You cannot change the collation of a model once the metadata has been deployed to Analysis Services / Power BI. As such, if you plan to use Direct Lake on SQL with a case-sensitive Fabric Warehouse, you must set the collation on the model metadata before it's deployed:
+> You cannot change the collation of a model once the metadata has been deployed to Analysis Services / Power BI. As such, if you plan to use Direct Lake on SQL with a case-sensitive Fabric Warehouse, you must set the collation on the model metadata before it is deployed:
>
> 1. Create a new model in Tabular Editor 3 (File > New > Model...)
> 2. Uncheck "Use workspace database"
@@ -106,11 +118,11 @@ This section contains a more technical description on how the TOM objects and pr
To manually set up a table for **Direct Lake on OneLake** mode, you need to do the following:
-1. **Create Shared Expression**: Direct Lake tables use "Entity" partitions, which much reference a Shared Expression in the model. Start out by creating this shared expression, if you don't have it already. Name it `DatabaseQuery`:
+1. **Create Shared Expression**: Direct Lake tables use "Entity" partitions, which must reference a Shared Expression in the model. Start by creating this shared expression, if you do not have it already. Name it `DatabaseQuery`:

-2. **Configure Shared Expression**: Set the **Kind** property of the expression you created in step 1 to "M", and set the *Expression** property to the following M query, replacing the IDs in the URL for your Fabric workspace and Lakehouse/Warehouse:
+2. **Configure Shared Expression**: Set the **Kind** property of the expression you created in step 1 to "M", and set the **Expression** property to the following M query, replacing the IDs in the URL for your Fabric workspace and Lakehouse/Warehouse:
```m
let