diff --git a/tests/integration/data_sources_and_expectations/test_canonical_expectations.py b/tests/integration/data_sources_and_expectations/test_canonical_expectations.py index 3bbbf429329d..9daa0b5f5a37 100644 --- a/tests/integration/data_sources_and_expectations/test_canonical_expectations.py +++ b/tests/integration/data_sources_and_expectations/test_canonical_expectations.py @@ -8,6 +8,7 @@ from great_expectations.datasource.fluent.interfaces import Batch from tests.integration.conftest import parameterize_batch_for_data_sources from tests.integration.test_utils.data_source_config import ( + MSSQLDatasourceTestConfig, MySQLDatasourceTestConfig, PandasDataFrameDatasourceTestConfig, PandasFilesystemCsvDatasourceTestConfig, @@ -18,6 +19,7 @@ @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PandasDataFrameDatasourceTestConfig(), PandasFilesystemCsvDatasourceTestConfig(), @@ -34,6 +36,7 @@ def test_expect_column_min_to_be_between(batch_for_datasource) -> None: @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(column_types={"date": sqltypes.DATE}), MySQLDatasourceTestConfig(column_types={"date": sqltypes.DATE}), PandasDataFrameDatasourceTestConfig(), PandasFilesystemCsvDatasourceTestConfig(), @@ -62,6 +65,7 @@ def test_expect_column_min_to_be_between__date(batch_for_datasource) -> None: @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(column_types={"date": sqltypes.DATE}), MySQLDatasourceTestConfig(column_types={"date": sqltypes.DATE}), PandasDataFrameDatasourceTestConfig(), PandasFilesystemCsvDatasourceTestConfig(), @@ -90,6 +94,7 @@ def test_expect_column_max_to_be_between__date(batch_for_datasource) -> None: @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PandasDataFrameDatasourceTestConfig(), PandasFilesystemCsvDatasourceTestConfig(), @@ -106,6 +111,7 @@ def test_expect_column_max_to_be_between(batch_for_datasource) -> None: @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PandasDataFrameDatasourceTestConfig(), PandasFilesystemCsvDatasourceTestConfig(), @@ -122,6 +128,7 @@ def test_expect_column_to_exist(batch_for_datasource): @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PandasDataFrameDatasourceTestConfig(), PandasFilesystemCsvDatasourceTestConfig(), @@ -155,6 +162,7 @@ def test_expect_column_mean_to_be_between(batch_for_datasource): class TestExpectTableRowCountToEqualOtherTable: @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PostgreSQLDatasourceTestConfig(), SnowflakeDatasourceTestConfig(), @@ -174,6 +182,7 @@ def test_success( @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PostgreSQLDatasourceTestConfig( extra_column_types={"other_table": {"col_b": sqltypes.VARCHAR}} @@ -199,6 +208,7 @@ def test_different_counts( @parameterize_batch_for_data_sources( data_source_configs=[ + MSSQLDatasourceTestConfig(), MySQLDatasourceTestConfig(), PostgreSQLDatasourceTestConfig(), SnowflakeDatasourceTestConfig(), diff --git a/tests/integration/test_utils/data_source_config/__init__.py b/tests/integration/test_utils/data_source_config/__init__.py index 1381f09dabd6..6e3c786292ec 100644 --- a/tests/integration/test_utils/data_source_config/__init__.py +++ b/tests/integration/test_utils/data_source_config/__init__.py @@ -1,4 +1,5 @@ from .base import DataSourceTestConfig +from .mssql import MSSQLDatasourceTestConfig from .mysql import MySQLDatasourceTestConfig from .pandas_data_frame import PandasDataFrameDatasourceTestConfig from .pandas_filesystem_csv import PandasFilesystemCsvDatasourceTestConfig diff --git a/tests/integration/test_utils/data_source_config/mssql.py b/tests/integration/test_utils/data_source_config/mssql.py new file mode 100644 index 000000000000..7faaf4a66a58 --- /dev/null +++ b/tests/integration/test_utils/data_source_config/mssql.py @@ -0,0 +1,63 @@ +from typing import Mapping, Union + +import pandas as pd +import pytest + +from great_expectations.compatibility.typing_extensions import override +from great_expectations.datasource.fluent.interfaces import Batch +from tests.integration.test_utils.data_source_config.base import ( + BatchTestSetup, + DataSourceTestConfig, +) +from tests.integration.test_utils.data_source_config.sql import SQLBatchTestSetup + + +class MSSQLDatasourceTestConfig(DataSourceTestConfig): + @property + @override + def label(self) -> str: + return "mssql" + + @property + @override + def pytest_mark(self) -> pytest.MarkDecorator: + return pytest.mark.mssql + + @override + def create_batch_setup( + self, + request: pytest.FixtureRequest, + data: pd.DataFrame, + extra_data: Mapping[str, pd.DataFrame], + ) -> BatchTestSetup: + return MSSQLBatchTestSetup( + data=data, + config=self, + extra_data=extra_data, + ) + + +class MSSQLBatchTestSetup(SQLBatchTestSetup[MSSQLDatasourceTestConfig]): + @property + @override + def connection_string(self) -> str: + return "mssql+pyodbc://sa:ReallyStrongPwd1234%^&*@localhost:1433/test_ci?driver=ODBC Driver 17 for SQL Server&charset=utf8&autocommit=true" # noqa: E501 # it's okay + + @property + @override + def schema(self) -> Union[str, None]: + return None + + @override + def make_batch(self) -> Batch: + name = self._random_resource_name() + return ( + self.context.data_sources.add_sql(name=name, connection_string=self.connection_string) + .add_table_asset( + name=name, + table_name=self.table_name, + schema_name=self.schema, + ) + .add_batch_definition_whole_table(name=name) + .get_batch() + )