From e0042afa80fd7d25fed282c320c1bc1f9b093ad5 Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Tue, 3 Dec 2024 18:40:01 +0100 Subject: [PATCH] Python: graduate filters, add exception during addition and some cleanup (#9856) ### Motivation and Context This PR graduates the filters. This includes some updates to the docstrings. And adds a specific Exception for errors during adding or removing of filters. Closes #9838 Fixes #9641 ### Description ### Contribution Checklist - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [x] I didn't break anyone :smile: --- python/pyproject.toml | 1 + .../filtering/auto_function_invoke_filters.py | 15 +++-------- .../filtering/function_invocation_filters.py | 11 ++++---- .../function_invocation_filters_stream.py | 23 +++++++++------- .../semantic_kernel/connectors/ai/__init__.py | 3 ++- .../contents/chat_message_content.py | 2 +- python/semantic_kernel/exceptions/__init__.py | 1 + .../exceptions/filter_exceptions.py | 20 ++++++++++++++ python/semantic_kernel/filters/__init__.py | 15 +++++++++++ .../auto_function_invocation_context.py | 22 +++++++++++++++- .../filters/filter_context_base.py | 2 -- .../semantic_kernel/filters/filter_types.py | 3 --- .../functions/function_invocation_context.py | 14 +++++++++- .../filters/kernel_filters_extension.py | 21 ++++++++------- .../filters/prompts/prompt_render_context.py | 14 +++++++++- .../audio_to_text/audio_to_text_test_base.py | 6 +++-- .../audio_to_text/test_audio_to_text.py | 2 ++ .../completions/chat_completion_test_base.py | 26 +++++++++++-------- .../completions/test_text_completion.py | 25 +++++++++--------- .../embeddings/test_embedding_service_base.py | 14 ++++++---- .../unit/agents/test_termination_strategy.py | 8 +++--- .../open_ai/test_openai_request_settings.py | 8 +++--- .../kernel/test_kernel_filter_extension.py | 7 ++--- .../tests/unit/schema/test_schema_builder.py | 6 ++--- 24 files changed, 179 insertions(+), 90 deletions(-) create mode 100644 python/semantic_kernel/exceptions/filter_exceptions.py diff --git a/python/pyproject.toml b/python/pyproject.toml index e6c46fd2d1b5..27f5eff41e07 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -146,6 +146,7 @@ environments = [ ] [tool.pytest.ini_options] +testpaths = 'tests' addopts = "-ra -q -r fEX" asyncio_default_fixture_loop_scope = "function" filterwarnings = [ diff --git a/python/samples/concepts/filtering/auto_function_invoke_filters.py b/python/samples/concepts/filtering/auto_function_invoke_filters.py index bf5f7e358716..b1e055e9397d 100644 --- a/python/samples/concepts/filtering/auto_function_invoke_filters.py +++ b/python/samples/concepts/filtering/auto_function_invoke_filters.py @@ -4,19 +4,12 @@ import os from semantic_kernel import Kernel -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.ai import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings -from semantic_kernel.contents import ChatHistory -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.function_call_content import FunctionCallContent -from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents import ChatHistory, ChatMessageContent, FunctionCallContent, FunctionResultContent from semantic_kernel.core_plugins import MathPlugin, TimePlugin -from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import ( - AutoFunctionInvocationContext, -) -from semantic_kernel.filters.filter_types import FilterTypes -from semantic_kernel.functions import KernelArguments -from semantic_kernel.functions.function_result import FunctionResult +from semantic_kernel.filters import AutoFunctionInvocationContext, FilterTypes +from semantic_kernel.functions import FunctionResult, KernelArguments system_message = """ You are a chat bot. Your name is Mosscap and diff --git a/python/samples/concepts/filtering/function_invocation_filters.py b/python/samples/concepts/filtering/function_invocation_filters.py index bf6e19785954..92b9132ef290 100644 --- a/python/samples/concepts/filtering/function_invocation_filters.py +++ b/python/samples/concepts/filtering/function_invocation_filters.py @@ -6,12 +6,11 @@ from collections.abc import Callable, Coroutine from typing import Any -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.exceptions.kernel_exceptions import OperationCancelledException -from semantic_kernel.filters.filter_types import FilterTypes -from semantic_kernel.filters.functions.function_invocation_context import FunctionInvocationContext -from semantic_kernel.kernel import Kernel +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistory +from semantic_kernel.exceptions import OperationCancelledException +from semantic_kernel.filters import FilterTypes, FunctionInvocationContext logger = logging.getLogger(__name__) diff --git a/python/samples/concepts/filtering/function_invocation_filters_stream.py b/python/samples/concepts/filtering/function_invocation_filters_stream.py index 17bca2cbaf24..f1dbb85b1601 100644 --- a/python/samples/concepts/filtering/function_invocation_filters_stream.py +++ b/python/samples/concepts/filtering/function_invocation_filters_stream.py @@ -3,15 +3,15 @@ import asyncio import logging import os +from collections.abc import Callable, Coroutine from functools import reduce +from typing import Any -from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion -from semantic_kernel.contents import AuthorRole -from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent -from semantic_kernel.filters.filter_types import FilterTypes -from semantic_kernel.functions.function_result import FunctionResult -from semantic_kernel.kernel import Kernel +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion +from semantic_kernel.contents import AuthorRole, ChatHistory, StreamingChatMessageContent +from semantic_kernel.filters import FilterTypes, FunctionInvocationContext +from semantic_kernel.functions import FunctionResult logger = logging.getLogger(__name__) @@ -32,7 +32,10 @@ # in the specific case of a filter for streaming functions, you need to override the generator # that is present in the function_result.value as seen below. @kernel.filter(FilterTypes.FUNCTION_INVOCATION) -async def streaming_exception_handling(context, next): +async def streaming_exception_handling( + context: FunctionInvocationContext, + next: Callable[[FunctionInvocationContext], Coroutine[Any, Any, None]], +): await next(context) async def override_stream(stream): @@ -40,7 +43,9 @@ async def override_stream(stream): async for partial in stream: yield partial except Exception as e: - yield [StreamingChatMessageContent(role=AuthorRole.ASSISTANT, content=f"Exception caught: {e}")] + yield [ + StreamingChatMessageContent(role=AuthorRole.ASSISTANT, content=f"Exception caught: {e}", choice_index=0) + ] stream = context.result.value context.result = FunctionResult(function=context.result.function, value=override_stream(stream)) diff --git a/python/semantic_kernel/connectors/ai/__init__.py b/python/semantic_kernel/connectors/ai/__init__.py index e325b6540244..f4b6682a9eb8 100644 --- a/python/semantic_kernel/connectors/ai/__init__.py +++ b/python/semantic_kernel/connectors/ai/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -__all__ = ["PromptExecutionSettings"] +__all__ = ["FunctionChoiceBehavior", "PromptExecutionSettings"] diff --git a/python/semantic_kernel/contents/chat_message_content.py b/python/semantic_kernel/contents/chat_message_content.py index da9005626de9..b369038cdceb 100644 --- a/python/semantic_kernel/contents/chat_message_content.py +++ b/python/semantic_kernel/contents/chat_message_content.py @@ -84,7 +84,7 @@ class ChatMessageContent(KernelContent): tag: ClassVar[str] = CHAT_MESSAGE_CONTENT_TAG role: AuthorRole name: str | None = None - items: list[Annotated[ITEM_TYPES, Field(..., discriminator=DISCRIMINATOR_FIELD)]] = Field(default_factory=list) + items: list[Annotated[ITEM_TYPES, Field(discriminator=DISCRIMINATOR_FIELD)]] = Field(default_factory=list) encoding: str | None = None finish_reason: FinishReason | None = None diff --git a/python/semantic_kernel/exceptions/__init__.py b/python/semantic_kernel/exceptions/__init__.py index f5d88ee71e9c..9ed131971525 100644 --- a/python/semantic_kernel/exceptions/__init__.py +++ b/python/semantic_kernel/exceptions/__init__.py @@ -2,6 +2,7 @@ from semantic_kernel.exceptions.agent_exceptions import * # noqa: F403 from semantic_kernel.exceptions.content_exceptions import * # noqa: F403 +from semantic_kernel.exceptions.filter_exceptions import * # noqa: F403 from semantic_kernel.exceptions.function_exceptions import * # noqa: F403 from semantic_kernel.exceptions.kernel_exceptions import * # noqa: F403 from semantic_kernel.exceptions.memory_connector_exceptions import * # noqa: F403 diff --git a/python/semantic_kernel/exceptions/filter_exceptions.py b/python/semantic_kernel/exceptions/filter_exceptions.py new file mode 100644 index 000000000000..69f80d70c5d9 --- /dev/null +++ b/python/semantic_kernel/exceptions/filter_exceptions.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.exceptions.kernel_exceptions import KernelException + + +class FilterException(KernelException): + """Base class for all filter exceptions.""" + + pass + + +class FilterManagementException(FilterException): + """An error occurred while adding or removing the filter to/from the kernel.""" + + pass + + +__all__ = [ + "FilterException", + "FilterManagementException", +] diff --git a/python/semantic_kernel/filters/__init__.py b/python/semantic_kernel/filters/__init__.py index e69de29bb2d1..b0b8b16e35c9 100644 --- a/python/semantic_kernel/filters/__init__.py +++ b/python/semantic_kernel/filters/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import ( + AutoFunctionInvocationContext, +) +from semantic_kernel.filters.filter_types import FilterTypes +from semantic_kernel.filters.functions.function_invocation_context import FunctionInvocationContext +from semantic_kernel.filters.prompts.prompt_render_context import PromptRenderContext + +__all__ = [ + "AutoFunctionInvocationContext", + "FilterTypes", + "FunctionInvocationContext", + "PromptRenderContext", +] diff --git a/python/semantic_kernel/filters/auto_function_invocation/auto_function_invocation_context.py b/python/semantic_kernel/filters/auto_function_invocation/auto_function_invocation_context.py index 3dedbefb2a59..5a0f7c300b46 100644 --- a/python/semantic_kernel/filters/auto_function_invocation/auto_function_invocation_context.py +++ b/python/semantic_kernel/filters/auto_function_invocation/auto_function_invocation_context.py @@ -10,7 +10,27 @@ class AutoFunctionInvocationContext(FilterContextBase): - """Class for auto function invocation context.""" + """Class for auto function invocation context. + + This is the context supplied to the auto function invocation filters. + + Common use case are to alter the function_result, for instance filling it with a pre-computed + value, in order to skip a step, for instance when doing caching. + + Another option is to terminate, this can be done by setting terminate to True. + + Attributes: + function: The function invoked. + kernel: The kernel used. + arguments: The arguments used to call the function. + chat_history: The chat history or None. + function_result: The function result or None. + request_sequence_index: The request sequence index. + function_sequence_index: The function sequence index. + function_count: The function count. + terminate: The flag to terminate. + + """ chat_history: "ChatHistory | None" = None function_result: "FunctionResult | None" = None diff --git a/python/semantic_kernel/filters/filter_context_base.py b/python/semantic_kernel/filters/filter_context_base.py index d991378131ba..b7f1b8da82c8 100644 --- a/python/semantic_kernel/filters/filter_context_base.py +++ b/python/semantic_kernel/filters/filter_context_base.py @@ -3,7 +3,6 @@ from typing import TYPE_CHECKING from semantic_kernel.kernel_pydantic import KernelBaseModel -from semantic_kernel.utils.experimental_decorator import experimental_class if TYPE_CHECKING: from semantic_kernel.functions.kernel_arguments import KernelArguments @@ -11,7 +10,6 @@ from semantic_kernel.kernel import Kernel -@experimental_class class FilterContextBase(KernelBaseModel): """Base class for Kernel Filter Contexts.""" diff --git a/python/semantic_kernel/filters/filter_types.py b/python/semantic_kernel/filters/filter_types.py index 7dbee2b2cbe0..8928438d04c7 100644 --- a/python/semantic_kernel/filters/filter_types.py +++ b/python/semantic_kernel/filters/filter_types.py @@ -2,10 +2,7 @@ from enum import Enum -from semantic_kernel.utils.experimental_decorator import experimental_class - -@experimental_class class FilterTypes(str, Enum): """Enum for the filter types.""" diff --git a/python/semantic_kernel/filters/functions/function_invocation_context.py b/python/semantic_kernel/filters/functions/function_invocation_context.py index 7ee5aabeb27a..5c9dedce50cb 100644 --- a/python/semantic_kernel/filters/functions/function_invocation_context.py +++ b/python/semantic_kernel/filters/functions/function_invocation_context.py @@ -9,6 +9,18 @@ class FunctionInvocationContext(FilterContextBase): - """Class for function invocation context.""" + """Class for function invocation context. + + This filter can be used to monitor which functions are called. + To log what function was called with which parameters and what output. + Finally it can be used for caching by setting the result value. + + Attributes: + function: The function invoked. + kernel: The kernel used. + arguments: The arguments used to call the function. + result: The result of the function, or None. + + """ result: "FunctionResult | None" = None diff --git a/python/semantic_kernel/filters/kernel_filters_extension.py b/python/semantic_kernel/filters/kernel_filters_extension.py index da37a3b791b2..82bf489f8c5f 100644 --- a/python/semantic_kernel/filters/kernel_filters_extension.py +++ b/python/semantic_kernel/filters/kernel_filters_extension.py @@ -7,10 +7,10 @@ from pydantic import Field +from semantic_kernel.exceptions.filter_exceptions import FilterManagementException from semantic_kernel.filters.filter_context_base import FilterContextBase from semantic_kernel.filters.filter_types import FilterTypes from semantic_kernel.kernel_pydantic import KernelBaseModel -from semantic_kernel.utils.experimental_decorator import experimental_function FILTER_CONTEXT_TYPE = TypeVar("FILTER_CONTEXT_TYPE", bound=FilterContextBase) CALLABLE_FILTER_TYPE = Callable[[FILTER_CONTEXT_TYPE, Callable[[FILTER_CONTEXT_TYPE], None]], None] @@ -32,7 +32,6 @@ class KernelFilterExtension(KernelBaseModel, ABC): prompt_rendering_filters: list[tuple[int, CALLABLE_FILTER_TYPE]] = Field(default_factory=list) auto_function_invocation_filters: list[tuple[int, CALLABLE_FILTER_TYPE]] = Field(default_factory=list) - @experimental_function def add_filter(self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes, filter: CALLABLE_FILTER_TYPE) -> None: """Add a filter to the Kernel. @@ -45,12 +44,17 @@ def add_filter(self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes, filter: filter_type (str): The type of the filter to add (function_invocation, prompt_rendering) filter (object): The filter to add + Raises: + FilterDefinitionException: If an error occurs while adding the filter to the kernel + """ - if not isinstance(filter_type, FilterTypes): - filter_type = FilterTypes(filter_type) - getattr(self, FILTER_MAPPING[filter_type.value]).insert(0, (id(filter), filter)) + try: + if not isinstance(filter_type, FilterTypes): + filter_type = FilterTypes(filter_type) + getattr(self, FILTER_MAPPING[filter_type.value]).insert(0, (id(filter), filter)) + except Exception as ecx: + raise FilterManagementException(f"Error adding filter {filter} to {filter_type}") from ecx - @experimental_function def filter( self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes ) -> Callable[[CALLABLE_FILTER_TYPE], CALLABLE_FILTER_TYPE]: @@ -64,7 +68,6 @@ def decorator( return decorator - @experimental_function def remove_filter( self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes | None = None, @@ -83,10 +86,10 @@ def remove_filter( if filter_type and not isinstance(filter_type, FilterTypes): filter_type = FilterTypes(filter_type) if filter_id is None and position is None: - raise ValueError("Either hook_id or position should be provided.") + raise FilterManagementException("Either hook_id or position should be provided.") if position is not None: if filter_type is None: - raise ValueError("Please specify the type of filter when using position.") + raise FilterManagementException("Please specify the type of filter when using position.") getattr(self, FILTER_MAPPING[filter_type]).pop(position) return if filter_type: diff --git a/python/semantic_kernel/filters/prompts/prompt_render_context.py b/python/semantic_kernel/filters/prompts/prompt_render_context.py index c5b439e69914..dde178ad42d9 100644 --- a/python/semantic_kernel/filters/prompts/prompt_render_context.py +++ b/python/semantic_kernel/filters/prompts/prompt_render_context.py @@ -9,7 +9,19 @@ class PromptRenderContext(FilterContextBase): - """Context for prompt rendering filters.""" + """Context for prompt rendering filters. + + When prompt rendering is expensive (for instance when there are expensive functions being called.) + This filter can be used to set the rendered_prompt directly and returning. + + Attributes: + function: The function invoked. + kernel: The kernel used. + arguments: The arguments used to call the function. + rendered_prompt: The result of the prompt rendering. + function_result: The result of the function that used the prompt. + + """ rendered_prompt: str | None = None function_result: "FunctionResult | None" = None diff --git a/python/tests/integration/audio_to_text/audio_to_text_test_base.py b/python/tests/integration/audio_to_text/audio_to_text_test_base.py index 78b5ab78cf1c..9278b865836c 100644 --- a/python/tests/integration/audio_to_text/audio_to_text_test_base.py +++ b/python/tests/integration/audio_to_text/audio_to_text_test_base.py @@ -11,7 +11,7 @@ # There is only the whisper model available on Azure OpenAI for audio to text. And that model is # only available in the North Switzerland region. Therefore, the endpoint is different than the one # we use for other services. -is_service_setup_for_testing(["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"]) +azure_setup = is_service_setup_for_testing(["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"], raise_if_not_set=False) class AudioToTextTestBase: @@ -22,5 +22,7 @@ def services(self) -> dict[str, AudioToTextClientBase]: """Return audio-to-text services.""" return { "openai": OpenAIAudioToText(), - "azure_openai": AzureAudioToText(endpoint=os.environ["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"]), + "azure_openai": AzureAudioToText(endpoint=os.environ["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"]) + if azure_setup + else None, } diff --git a/python/tests/integration/audio_to_text/test_audio_to_text.py b/python/tests/integration/audio_to_text/test_audio_to_text.py index 50c105710d10..ea051f1e577e 100644 --- a/python/tests/integration/audio_to_text/test_audio_to_text.py +++ b/python/tests/integration/audio_to_text/test_audio_to_text.py @@ -49,6 +49,8 @@ async def test_audio_to_text( """ service = services[service_id] + if not service: + pytest.mark.xfail("Azure Audio to Text not setup.") result = await service.get_text_content(audio_content) for word in expected_text: diff --git a/python/tests/integration/completions/chat_completion_test_base.py b/python/tests/integration/completions/chat_completion_test_base.py index 1fe87415e865..d83182151efc 100644 --- a/python/tests/integration/completions/chat_completion_test_base.py +++ b/python/tests/integration/completions/chat_completion_test_base.py @@ -56,17 +56,21 @@ # There is no single model in Ollama that supports both image and tool call in chat completion # We are splitting the Ollama test into three services: chat, image, and tool call. The chat model # can be any model that supports chat completion. Also, Ollama is only available on Linux runners in our pipeline. -ollama_setup: bool = is_service_setup_for_testing(["OLLAMA_CHAT_MODEL_ID"]) and is_test_running_on_supported_platforms([ - "Linux" -]) -ollama_image_setup: bool = is_service_setup_for_testing([ - "OLLAMA_CHAT_MODEL_ID_IMAGE" -]) and is_test_running_on_supported_platforms(["Linux"]) -ollama_tool_call_setup: bool = is_service_setup_for_testing([ - "OLLAMA_CHAT_MODEL_ID_TOOL_CALL" -]) and is_test_running_on_supported_platforms(["Linux"]) -google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY", "GOOGLE_AI_GEMINI_MODEL_ID"]) -vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID", "VERTEX_AI_GEMINI_MODEL_ID"]) +ollama_setup: bool = is_service_setup_for_testing( + ["OLLAMA_CHAT_MODEL_ID"], raise_if_not_set=False +) and is_test_running_on_supported_platforms(["Linux"]) +ollama_image_setup: bool = is_service_setup_for_testing( + ["OLLAMA_CHAT_MODEL_ID_IMAGE"], raise_if_not_set=False +) and is_test_running_on_supported_platforms(["Linux"]) +ollama_tool_call_setup: bool = is_service_setup_for_testing( + ["OLLAMA_CHAT_MODEL_ID_TOOL_CALL"], raise_if_not_set=False +) and is_test_running_on_supported_platforms(["Linux"]) +google_ai_setup: bool = is_service_setup_for_testing( + ["GOOGLE_AI_API_KEY", "GOOGLE_AI_GEMINI_MODEL_ID"], raise_if_not_set=False +) +vertex_ai_setup: bool = is_service_setup_for_testing( + ["VERTEX_AI_PROJECT_ID", "VERTEX_AI_GEMINI_MODEL_ID"], raise_if_not_set=False +) onnx_setup: bool = is_service_setup_for_testing( ["ONNX_GEN_AI_CHAT_MODEL_FOLDER"], raise_if_not_set=False ) # Tests are optional for ONNX diff --git a/python/tests/integration/completions/test_text_completion.py b/python/tests/integration/completions/test_text_completion.py index c4c8058a9ab0..1225f1b22aca 100644 --- a/python/tests/integration/completions/test_text_completion.py +++ b/python/tests/integration/completions/test_text_completion.py @@ -5,9 +5,15 @@ from functools import partial, reduce from typing import Any +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + import pytest from openai import AsyncAzureOpenAI +from semantic_kernel import Kernel from semantic_kernel.connectors.ai.bedrock import BedrockTextCompletion, BedrockTextPromptExecutionSettings from semantic_kernel.connectors.ai.google.google_ai import GoogleAITextCompletion, GoogleAITextPromptExecutionSettings from semantic_kernel.connectors.ai.google.vertex_ai import VertexAITextCompletion, VertexAITextPromptExecutionSettings @@ -19,26 +25,19 @@ OpenAITextCompletion, OpenAITextPromptExecutionSettings, ) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.text_content import TextContent from semantic_kernel.utils.authentication.entra_id_authentication import get_entra_auth_token - -if sys.version_info >= (3, 12): - from typing import override # pragma: no cover -else: - from typing_extensions import override # pragma: no cover - -from semantic_kernel import Kernel -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from tests.integration.completions.completion_test_base import CompletionTestBase, ServiceType from tests.utils import is_service_setup_for_testing, is_test_running_on_supported_platforms, retry -ollama_setup: bool = is_service_setup_for_testing(["OLLAMA_TEXT_MODEL_ID"]) and is_test_running_on_supported_platforms([ - "Linux" -]) -google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY"]) -vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID"]) +ollama_setup: bool = is_service_setup_for_testing( + ["OLLAMA_TEXT_MODEL_ID"], raise_if_not_set=False +) and is_test_running_on_supported_platforms(["Linux"]) +google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY"], raise_if_not_set=False) +vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID"], raise_if_not_set=False) onnx_setup: bool = is_service_setup_for_testing( ["ONNX_GEN_AI_TEXT_MODEL_FOLDER"], raise_if_not_set=False ) # Tests are optional for ONNX diff --git a/python/tests/integration/embeddings/test_embedding_service_base.py b/python/tests/integration/embeddings/test_embedding_service_base.py index 4bb68b8729fd..eb4e4e38b152 100644 --- a/python/tests/integration/embeddings/test_embedding_service_base.py +++ b/python/tests/integration/embeddings/test_embedding_service_base.py @@ -41,11 +41,15 @@ mistral_ai_setup: bool = is_service_setup_for_testing( ["MISTRALAI_API_KEY", "MISTRALAI_EMBEDDING_MODEL_ID"], raise_if_not_set=False ) # We don't have a MistralAI deployment -google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY", "GOOGLE_AI_EMBEDDING_MODEL_ID"]) -vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID", "VERTEX_AI_EMBEDDING_MODEL_ID"]) -ollama_setup: bool = is_service_setup_for_testing([ - "OLLAMA_EMBEDDING_MODEL_ID" -]) and is_test_running_on_supported_platforms(["Linux"]) +google_ai_setup: bool = is_service_setup_for_testing( + ["GOOGLE_AI_API_KEY", "GOOGLE_AI_EMBEDDING_MODEL_ID"], raise_if_not_set=False +) +vertex_ai_setup: bool = is_service_setup_for_testing( + ["VERTEX_AI_PROJECT_ID", "VERTEX_AI_EMBEDDING_MODEL_ID"], raise_if_not_set=False +) +ollama_setup: bool = is_service_setup_for_testing( + ["OLLAMA_EMBEDDING_MODEL_ID"], raise_if_not_set=False +) and is_test_running_on_supported_platforms(["Linux"]) class EmbeddingServiceTestBase: diff --git a/python/tests/unit/agents/test_termination_strategy.py b/python/tests/unit/agents/test_termination_strategy.py index 0d2b34308f3b..d876b57e9859 100644 --- a/python/tests/unit/agents/test_termination_strategy.py +++ b/python/tests/unit/agents/test_termination_strategy.py @@ -29,7 +29,7 @@ async def create_channel(self) -> AgentChannel: return AsyncMock(spec=AgentChannel) -class TestTerminationStrategy(TerminationStrategy): +class TerminationStrategyTest(TerminationStrategy): """A test implementation of TerminationStrategy for testing purposes.""" async def should_agent_terminate(self, agent: "Agent", history: list[ChatMessageContent]) -> bool: @@ -40,7 +40,7 @@ async def should_agent_terminate(self, agent: "Agent", history: list[ChatMessage @pytest.mark.asyncio async def test_should_terminate_with_matching_agent(): agent = MockAgent(id="test-agent-id") - strategy = TestTerminationStrategy(agents=[agent]) + strategy = TerminationStrategyTest(agents=[agent]) # Assuming history is a list of ChatMessageContent; can be mocked or made minimal history = [MagicMock(spec=ChatMessageContent)] @@ -53,7 +53,7 @@ async def test_should_terminate_with_matching_agent(): async def test_should_terminate_with_non_matching_agent(): agent = MockAgent(id="test-agent-id") non_matching_agent = MockAgent(id="non-matching-agent-id") - strategy = TestTerminationStrategy(agents=[non_matching_agent]) + strategy = TerminationStrategyTest(agents=[non_matching_agent]) # Assuming history is a list of ChatMessageContent; can be mocked or made minimal history = [MagicMock(spec=ChatMessageContent)] @@ -65,7 +65,7 @@ async def test_should_terminate_with_non_matching_agent(): @pytest.mark.asyncio async def test_should_terminate_no_agents_in_strategy(): agent = MockAgent(id="test-agent-id") - strategy = TestTerminationStrategy() + strategy = TerminationStrategyTest() # Assuming history is a list of ChatMessageContent; can be mocked or made minimal history = [MagicMock(spec=ChatMessageContent)] diff --git a/python/tests/unit/connectors/ai/open_ai/test_openai_request_settings.py b/python/tests/unit/connectors/ai/open_ai/test_openai_request_settings.py index db39285f39a6..25cb379bff12 100644 --- a/python/tests/unit/connectors/ai/open_ai/test_openai_request_settings.py +++ b/python/tests/unit/connectors/ai/open_ai/test_openai_request_settings.py @@ -21,11 +21,11 @@ ############################################ # Test classes for structured output -class TestClass: +class ClassTest: attribute: str -class TestClassPydantic(KernelBaseModel): +class ClassTestPydantic(KernelBaseModel): attribute: str @@ -354,13 +354,13 @@ def test_openai_chat_prompt_execution_settings_with_json_structured_output(): def test_openai_chat_prompt_execution_settings_with_nonpydantic_type_structured_output(): settings = OpenAIChatPromptExecutionSettings() - settings.response_format = TestClass + settings.response_format = ClassTest assert isinstance(settings.response_format, type) def test_openai_chat_prompt_execution_settings_with_pydantic_type_structured_output(): settings = OpenAIChatPromptExecutionSettings() - settings.response_format = TestClassPydantic + settings.response_format = ClassTestPydantic assert issubclass(settings.response_format, BaseModel) diff --git a/python/tests/unit/kernel/test_kernel_filter_extension.py b/python/tests/unit/kernel/test_kernel_filter_extension.py index 18ecad6420c0..a2610912d8e2 100644 --- a/python/tests/unit/kernel/test_kernel_filter_extension.py +++ b/python/tests/unit/kernel/test_kernel_filter_extension.py @@ -2,6 +2,7 @@ from pytest import fixture, mark, raises from semantic_kernel import Kernel +from semantic_kernel.exceptions.filter_exceptions import FilterManagementException @fixture @@ -63,15 +64,15 @@ def test_remove_filter_without_type(self, kernel: Kernel, custom_filter, filter_ def test_unknown_filter_type(kernel: Kernel, custom_filter): - with raises(ValueError): + with raises(FilterManagementException): kernel.add_filter("unknown", custom_filter) def test_remove_filter_fail(kernel: Kernel): - with raises(ValueError): + with raises(FilterManagementException): kernel.remove_filter() def test_remove_filter_fail_position(kernel: Kernel): - with raises(ValueError): + with raises(FilterManagementException): kernel.remove_filter(position=0) diff --git a/python/tests/unit/schema/test_schema_builder.py b/python/tests/unit/schema/test_schema_builder.py index f6f2b4071983..5d24a599c96c 100644 --- a/python/tests/unit/schema/test_schema_builder.py +++ b/python/tests/unit/schema/test_schema_builder.py @@ -35,7 +35,7 @@ class ModelWithUnionPrimitives: item: int | str -class TestEnum(Enum): +class EnumTest(Enum): OPTION_A = "OptionA" OPTION_B = "OptionB" OPTION_C = "OptionC" @@ -370,7 +370,7 @@ def test_build_complex_type_list(): def test_enum_schema(): - schema = KernelJsonSchemaBuilder.build(TestEnum, "Test Enum Description") + schema = KernelJsonSchemaBuilder.build(EnumTest, "Test Enum Description") expected_schema = { "type": "string", "enum": ["OptionA", "OptionB", "OptionC"], @@ -380,7 +380,7 @@ def test_enum_schema(): def test_enum_schema_without_description(): - schema = KernelJsonSchemaBuilder.build(TestEnum) + schema = KernelJsonSchemaBuilder.build(EnumTest) expected_schema = {"type": "string", "enum": ["OptionA", "OptionB", "OptionC"]} assert schema == expected_schema