repo_id
stringlengths 15
132
| file_path
stringlengths 34
176
| content
stringlengths 2
3.52M
| __index_level_0__
int64 0
0
|
---|---|---|---|
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_csharp_executor_proxy.py | import json
import multiprocessing
import threading
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional, Tuple, Union
import pytest
from promptflow._constants import FlowLanguage
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._batch_engine import BatchEngine
from promptflow.batch._csharp_executor_proxy import CSharpExecutorProxy
from promptflow.batch._result import BatchResult
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ..mock_execution_server import run_executor_server
from ..utils import MemoryRunStorage, get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.unittest
class TestCSharpExecutorProxy:
def setup_method(self):
BatchEngine.register_executor(FlowLanguage.CSharp, MockCSharpExecutorProxy)
def test_batch(self):
# submit a batch run
_, batch_result = self._submit_batch_run()
assert batch_result.status == Status.Completed
assert batch_result.completed_lines == batch_result.total_lines
assert batch_result.system_metrics.duration > 0
assert batch_result.completed_lines > 0
def test_batch_execution_error(self):
# submit a batch run
_, batch_result = self._submit_batch_run(has_error=True)
assert batch_result.status == Status.Completed
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.system_metrics.duration > 0
def test_batch_validation_error(self):
# prepare the init error file to mock the validation error
error_message = "'test_connection' not found."
test_exception = ConnectionNotFound(message=error_message)
error_dict = ExceptionPresenter.create(test_exception).to_dict()
init_error_file = Path(mkdtemp()) / "init_error.json"
with open(init_error_file, "w") as file:
json.dump(error_dict, file)
# submit a batch run
with pytest.raises(ValidationException) as e:
self._submit_batch_run(init_error_file=init_error_file)
assert error_message in e.value.message
assert e.value.error_codes == ["UserError", "ValidationError"]
assert e.value.target == ErrorTarget.BATCH
def test_batch_cancel(self):
# use a thread to submit a batch run
batch_engine, batch_run_thread = self._submit_batch_run(run_in_thread=True)
assert batch_engine._is_canceled is False
batch_run_thread.start()
# cancel the batch run
batch_engine.cancel()
batch_run_thread.join()
assert batch_engine._is_canceled is True
assert batch_result_global.status == Status.Canceled
assert batch_result_global.system_metrics.duration > 0
def _submit_batch_run(
self, run_in_thread=False, has_error=False, init_error_file=None
) -> Union[Tuple[BatchEngine, threading.Thread], Tuple[BatchEngine, BatchResult]]:
flow_folder = "csharp_flow"
mem_run_storage = MemoryRunStorage()
# init the batch engine
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
storage=mem_run_storage,
has_error=has_error,
init_error_file=init_error_file,
)
# prepare the inputs
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
inputs_mapping = {"question": "${data.question}"}
output_dir = Path(mkdtemp())
if run_in_thread:
return batch_engine, threading.Thread(
target=self._batch_run_in_thread, args=(batch_engine, input_dirs, inputs_mapping, output_dir)
)
else:
return batch_engine, batch_engine.run(input_dirs, inputs_mapping, output_dir)
def _batch_run_in_thread(self, batch_engine: BatchEngine, input_dirs, inputs_mapping, output_dir):
global batch_result_global
batch_result_global = batch_engine.run(input_dirs, inputs_mapping, output_dir)
class MockCSharpExecutorProxy(CSharpExecutorProxy):
def __init__(self, process: multiprocessing.Process, port: str):
self._process = process
self._port = port
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockCSharpExecutorProxy":
"""Create a new executor"""
has_error = kwargs.get("has_error", False)
init_error_file = kwargs.get("init_error_file", None)
port = cls.find_available_port()
process = multiprocessing.Process(
target=run_executor_server,
args=(
int(port),
has_error,
init_error_file,
),
)
process.start()
executor_proxy = cls(process, port)
await executor_proxy.ensure_executor_startup(init_error_file)
return executor_proxy
async def destroy(self):
"""Destroy the executor"""
if self._process and self._process.is_alive():
self._process.terminate()
try:
self._process.join(timeout=5)
except TimeoutError:
self._process.kill()
def _is_executor_active(self):
return self._process and self._process.is_alive()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py | from types import GeneratorType
import pytest
from promptflow._utils.dataclass_serializer import serialize
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from ..utils import get_yaml_file
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorTraces:
def validate_openai_apicall(self, apicall: dict):
"""Validates an apicall dict.
Ensure that the trace output of openai api is a list of dicts.
Args:
apicall (dict): A dictionary representing apicall.
Raises:
AssertionError: If the API call is invalid.
"""
get_trace = False
if apicall.get("name", "") in (
"openai.api_resources.chat_completion.ChatCompletion.create",
"openai.api_resources.completion.Completion.create",
"openai.api_resources.embedding.Embedding.create",
"openai.resources.completions.Completions.create", # openai>=1.0.0
"openai.resources.chat.completions.Completions.create", # openai>=1.0.0
):
get_trace = True
output = apicall.get("output")
assert not isinstance(output, str)
assert isinstance(output, (list, dict))
if isinstance(output, list):
assert all(isinstance(item, dict) for item in output)
children = apicall.get("children", [])
if children is not None:
for child in children:
get_trace = get_trace or self.validate_openai_apicall(child)
return get_trace
def get_chat_input(stream):
return {
"question": "What is the capital of the United States of America?",
"chat_history": [],
"stream": stream,
}
def get_comletion_input(stream):
return {"prompt": "What is the capital of the United States of America?", "stream": stream}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_comletion_input(False)),
("openai_completion_api_flow", get_comletion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
assert get_traced is True
def test_executor_generator_tools(self, dev_connections):
executor = FlowExecutor.create(get_yaml_file("generator_tools"), dev_connections)
inputs = {"text": "This is a test"}
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
tool_trace = flow_result.run_info.api_calls[0]["children"][0]
generator_trace = tool_trace.get("children")[0]
assert generator_trace is not None
output = generator_trace.get("output")
assert isinstance(output, list)
@pytest.mark.parametrize("allow_generator_output", [False, True])
def test_trace_behavior_with_generator_node(self, dev_connections, allow_generator_output):
"""Test to verify the trace output list behavior for a flow with a generator node.
This test checks the trace output list in two scenarios based on the 'allow_generator_output' flag:
- When 'allow_generator_output' is True, the output list should initially be empty until the generator is
consumed.
- When 'allow_generator_output' is False, the output list should contain items produced by the generator node.
The test ensures that the trace accurately reflects the generator's consumption status and helps in monitoring
and debugging flow execution.
"""
# Set up executor with a flow that contains a generator node
executor = FlowExecutor.create(get_yaml_file("generator_nodes"), dev_connections)
inputs = {"text": "This is a test"}
# Execute the flow with the given inputs and 'allow_generator_output' setting
flow_result = executor.exec_line(inputs, allow_generator_output=allow_generator_output)
# Verify that the flow execution result is a dictionary and the flow has completed successfully
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
# Extract the trace for the generator node
tool_trace = flow_result.run_info.api_calls[0]["children"][0]
generator_output_trace = tool_trace.get("output")
# Verify that the trace output is a list
assert isinstance(generator_output_trace, list)
if allow_generator_output:
# If generator output is allowed, the trace list should be empty before consumption
assert not generator_output_trace
# Obtain the generator from the flow result
answer_gen = flow_result.output.get("answer")
assert isinstance(answer_gen, GeneratorType)
# Consume the generator and check that it yields text
try:
generated_text = next(answer_gen)
assert isinstance(generated_text, str)
# Verify the trace list contains the most recently generated item
assert generator_output_trace[-1] == generated_text
except StopIteration:
assert False, "Generator did not generate any text"
else:
# If generator output is not allowed, the trace list should contain generated items
assert generator_output_trace
assert all(isinstance(item, str) for item in generator_output_trace)
@pytest.mark.parametrize("flow_file", ["flow_with_trace", "flow_with_trace_async"])
def test_flow_with_trace(self, flow_file, dev_connections):
"""Tests to verify the flows that contains @trace marks.
They should generate traces with "Function" type and nested in the "Tool" traces.
This test case is to verify a flow like following structure, both sync and async mode:
.. code-block::
flow (Flow, 1.5s)
greetings (Tool, 1.5s)
get_user_name (Function, 1.0s)
is_valid_name (Function, 0.5s)
format_greeting (Function, 0.5s)
"""
executor = FlowExecutor.create(get_yaml_file(flow_file), dev_connections)
inputs = {"user_id": 1}
flow_result = executor.exec_line(inputs)
# Assert the run status is completed
assert flow_result.output == {"output": "Hello, User 1!"}
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
# Verify the traces are as expected
api_calls = flow_result.run_info.api_calls
assert len(api_calls) == 1
# Assert the "flow" root level trace
flow_trace = api_calls[0]
assert flow_trace["name"] == "flow"
assert flow_trace["type"] == "Flow"
assert flow_trace["end_time"] - flow_trace["start_time"] == pytest.approx(1.5, abs=0.3)
assert len(flow_trace["children"]) == 1
assert flow_trace["system_metrics"]["duration"] == pytest.approx(1.5, abs=0.3)
assert flow_trace["system_metrics"]["prompt_tokens"] == 0
assert flow_trace["system_metrics"]["completion_tokens"] == 0
assert flow_trace["system_metrics"]["total_tokens"] == 0
# TODO: These assertions should be fixed after added these fields to the top level trace
assert "inputs" not in flow_trace
assert "output" not in flow_trace
assert "error" not in flow_trace
# Assert the "greetings" tool
greetings_trace = flow_trace["children"][0]
assert greetings_trace["name"] == "greetings"
assert greetings_trace["type"] == "Tool"
assert greetings_trace["inputs"] == inputs
assert greetings_trace["output"] == {"greeting": "Hello, User 1!"}
assert greetings_trace["error"] is None
assert greetings_trace["children"] is not None
assert greetings_trace["end_time"] - greetings_trace["start_time"] == pytest.approx(1.5, abs=0.3)
assert len(greetings_trace["children"]) == 2
# TODO: to verfiy the system metrics. This might need to be fixed.
assert greetings_trace["system_metrics"] == {}
# Assert the "get_user_name" function
get_user_name_trace = greetings_trace["children"][0]
assert get_user_name_trace["name"] == "get_user_name"
assert get_user_name_trace["type"] == "Function"
assert get_user_name_trace["inputs"] == {"user_id": 1}
assert get_user_name_trace["output"] == "User 1"
assert get_user_name_trace["error"] is None
assert get_user_name_trace["end_time"] - get_user_name_trace["start_time"] == pytest.approx(1.0, abs=0.2)
assert len(get_user_name_trace["children"]) == 1
# TODO: to verfiy the system metrics. This might need to be fixed.
assert get_user_name_trace["system_metrics"] == {}
# Assert the "get_user_name/is_valid_name" function
is_valid_name_trace = get_user_name_trace["children"][0]
assert is_valid_name_trace["name"] == "is_valid_name"
assert is_valid_name_trace["type"] == "Function"
assert is_valid_name_trace["inputs"] == {"name": "User 1"}
assert is_valid_name_trace["output"] is True
assert is_valid_name_trace["error"] is None
# When running tests in MacOS, it will take longer. So we adjust abs to 0.15 and see if it needs to be extended.
assert is_valid_name_trace["end_time"] - is_valid_name_trace["start_time"] == pytest.approx(0.5, abs=0.15)
assert is_valid_name_trace["children"] == []
# TODO: to verfiy the system metrics. This might need to be fixed.
assert is_valid_name_trace["system_metrics"] == {}
# Assert the "format_greeting" function
format_greeting_trace = greetings_trace["children"][1]
assert format_greeting_trace["name"] == "format_greeting"
assert format_greeting_trace["type"] == "Function"
assert format_greeting_trace["inputs"] == {"user_name": "User 1"}
assert format_greeting_trace["output"] == "Hello, User 1!"
assert format_greeting_trace["error"] is None
# When running tests in MacOS, it will take longer. So we adjust abs to 0.15 and see if it needs to be extended.
assert format_greeting_trace["end_time"] - format_greeting_trace["start_time"] == pytest.approx(0.5, abs=0.15)
assert format_greeting_trace["children"] == []
# TODO: to verfiy the system metrics. This might need to be fixed.
assert format_greeting_trace["system_metrics"] == {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_executor_happypath.py | import logging
import multiprocessing
import os
import re
import shutil
import sys
from pathlib import Path
from types import GeneratorType
import pytest
from promptflow.contracts.run_info import Status
from promptflow.exceptions import UserErrorException
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import ConnectionNotFound, InputTypeError, ResolveToolError
from promptflow.executor.flow_executor import execute_flow
from promptflow.storage._run_storage import DefaultRunStorage
from ..utils import FLOW_ROOT, get_flow_folder, get_flow_sample_inputs, get_yaml_file, is_image_file
SAMPLE_FLOW = "web_classification_no_variants"
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestExecutor:
def get_line_inputs(self, flow_folder=""):
if flow_folder:
inputs = self.get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(self, nlinee=4, flow_folder="", sample_inputs_file="", return_dict=False):
if flow_folder:
if not sample_inputs_file:
sample_inputs_file = "samples.json"
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
if return_dict:
return inputs
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [self.get_line_inputs() for _ in range(nlinee)]
def skip_serp(self, flow_folder, dev_connections):
serp_required_flows = ["package_tools"]
# Real key is usually more than 32 chars
serp_key = dev_connections.get("serp_connection", {"value": {"api_key": ""}})["value"]["api_key"]
if flow_folder in serp_required_flows and len(serp_key) < 32:
pytest.skip("serp_connection is not prepared")
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
"prompt_tools",
"script_with___file__",
"script_with_import",
"package_tools",
"connection_as_input",
"async_tools",
"async_tools_with_sync_tools",
"tool_with_assistant_definition",
],
)
def test_executor_exec_line(self, flow_folder, dev_connections):
self.skip_serp(flow_folder, dev_connections)
os.chdir(get_flow_folder(flow_folder))
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(self.get_line_inputs())
assert not executor._run_tracker._flow_runs, "Flow runs in run tracker should be empty."
assert not executor._run_tracker._node_runs, "Node runs in run tracker should be empty."
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
node_count = len(executor._flow.nodes)
assert isinstance(flow_result.run_info.api_calls, list) and len(flow_result.run_info.api_calls) == 1
assert (
isinstance(flow_result.run_info.api_calls[0]["children"], list)
and len(flow_result.run_info.api_calls[0]["children"]) == node_count
)
assert len(flow_result.node_run_infos) == node_count
for node, node_run_info in flow_result.node_run_infos.items():
assert node_run_info.status == Status.Completed
assert node_run_info.node == node
assert isinstance(node_run_info.api_calls, list) # api calls is set
def test_long_running_log(self, dev_connections, capsys):
# TODO: investigate why flow_logger does not output to stdout in test case
from promptflow._utils.logger_utils import flow_logger
flow_logger.addHandler(logging.StreamHandler(sys.stdout))
os.environ["PF_TASK_PEEKING_INTERVAL"] = "1"
executor = FlowExecutor.create(get_yaml_file("async_tools"), dev_connections)
executor.exec_line(self.get_line_inputs())
captured = capsys.readouterr()
expected_long_running_str_1 = r".*.*Task async_passthrough has been running for 1 seconds, stacktrace:\n.*async_passthrough\.py.*in passthrough_str_and_wait\n.*await asyncio.sleep\(1\).*tasks\.py.*" # noqa E501
assert re.match(
expected_long_running_str_1, captured.out, re.DOTALL
), "flow_logger should contain long running async tool log"
expected_long_running_str_2 = r".*.*Task async_passthrough has been running for 2 seconds, stacktrace:\n.*async_passthrough\.py.*in passthrough_str_and_wait\n.*await asyncio.sleep\(1\).*tasks\.py.*" # noqa E501
assert re.match(
expected_long_running_str_2, captured.out, re.DOTALL
), "flow_logger should contain long running async tool log"
flow_logger.handlers.pop()
os.environ.pop("PF_TASK_PEEKING_INTERVAL")
@pytest.mark.parametrize(
"flow_folder, node_name, flow_inputs, dependency_nodes_outputs",
[
("web_classification_no_variants", "summarize_text_content", {}, {"fetch_text_content_from_url": "Hello"}),
("prompt_tools", "summarize_text_content_prompt", {"text": "text"}, {}),
("script_with___file__", "node1", {"text": "text"}, None),
("script_with___file__", "node2", None, {"node1": "text"}),
("script_with___file__", "node3", None, None),
("package_tools", "search_by_text", {"text": "elon mask"}, None), # Skip since no api key in CI
("connection_as_input", "conn_node", None, None),
("simple_aggregation", "accuracy", {"text": "A"}, {"passthrough": "B"}),
("script_with_import", "node1", {"text": "text"}, None),
],
)
def test_executor_exec_node(self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, dev_connections):
self.skip_serp(flow_folder, dev_connections)
yaml_file = get_yaml_file(flow_folder)
run_info = FlowExecutor.load_and_exec_node(
yaml_file,
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=dependency_nodes_outputs,
connections=dev_connections,
raise_ex=True,
)
assert run_info.output is not None
assert run_info.status == Status.Completed
assert isinstance(run_info.api_calls, list)
assert run_info.node == node_name
assert run_info.system_metrics["duration"] >= 0
def test_executor_exec_node_with_llm_node(self, dev_connections):
# Run the test in a new process to ensure the openai api is injected correctly for the single node run
context = multiprocessing.get_context("spawn")
queue = context.Queue()
process = context.Process(
target=exec_node_within_process,
args=(queue, "llm_tool", "joke", {"topic": "fruit"}, {}, dev_connections, True),
)
process.start()
process.join()
if not queue.empty():
raise queue.get()
def test_executor_node_overrides(self, dev_connections):
inputs = self.get_line_inputs()
executor = FlowExecutor.create(
get_yaml_file(SAMPLE_FLOW),
dev_connections,
node_override={"classify_with_llm.deployment_name": "dummy_deployment"},
raise_ex=True,
)
with pytest.raises(UserErrorException) as e:
executor.exec_line(inputs)
assert type(e.value).__name__ == "WrappedOpenAIError"
assert "The API deployment for this resource does not exist." in str(e.value)
with pytest.raises(ResolveToolError) as e:
executor = FlowExecutor.create(
get_yaml_file(SAMPLE_FLOW),
dev_connections,
node_override={"classify_with_llm.connection": "dummy_connection"},
raise_ex=True,
)
assert isinstance(e.value.inner_exception, ConnectionNotFound)
assert "Connection 'dummy_connection' not found" in str(e.value)
@pytest.mark.parametrize(
"flow_folder",
[
"no_inputs_outputs",
],
)
def test_flow_with_no_inputs_and_output(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder, FLOW_ROOT), dev_connections)
flow_result = executor.exec_line({})
assert flow_result.output == {}
assert flow_result.run_info.status == Status.Completed
node_count = len(executor._flow.nodes)
assert isinstance(flow_result.run_info.api_calls, list) and len(flow_result.run_info.api_calls) == node_count
assert len(flow_result.node_run_infos) == node_count
for node, node_run_info in flow_result.node_run_infos.items():
assert node_run_info.status == Status.Completed
assert node_run_info.node == node
assert isinstance(node_run_info.api_calls, list) # api calls is set
@pytest.mark.parametrize(
"flow_folder",
[
"simple_flow_with_python_tool",
],
)
def test_convert_flow_input_types(self, flow_folder, dev_connections) -> None:
executor = FlowExecutor.create(get_yaml_file(flow_folder, FLOW_ROOT), dev_connections)
ret = executor.convert_flow_input_types(inputs={"num": "11"})
assert ret == {"num": 11}
ret = executor.convert_flow_input_types(inputs={"text": "12", "num": "11"})
assert ret == {"text": "12", "num": 11}
with pytest.raises(InputTypeError):
ret = executor.convert_flow_input_types(inputs={"num": "hello"})
executor.convert_flow_input_types(inputs={"num": "hello"})
def test_chat_flow_stream_mode(self, dev_connections) -> None:
executor = FlowExecutor.create(get_yaml_file("python_stream_tools", FLOW_ROOT), dev_connections)
# To run a flow with stream output, we need to set this flag to run tracker.
# TODO: refine the interface
inputs = {"text": "hello", "chat_history": []}
line_result = executor.exec_line(inputs, allow_generator_output=True)
# Assert there's only one output
assert len(line_result.output) == 1
assert set(line_result.output.keys()) == {"output_echo"}
# Assert the only output is a generator
output_echo = line_result.output["output_echo"]
assert isinstance(output_echo, GeneratorType)
assert list(output_echo) == ["Echo: ", "hello "]
# Assert the flow is completed and no errors are raised
flow_run_info = line_result.run_info
assert flow_run_info.status == Status.Completed
assert flow_run_info.error is None
@pytest.mark.parametrize(
"flow_folder",
[
"web_classification",
],
)
def test_executor_creation_with_default_variants(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(self.get_line_inputs())
assert flow_result.run_info.status == Status.Completed
def test_executor_creation_with_default_input(self):
# Assert for single node run.
default_input_value = "input value from default"
yaml_file = get_yaml_file("default_input")
executor = FlowExecutor.create(yaml_file, {})
node_result = executor.load_and_exec_node(yaml_file, "test_print_input")
assert node_result.status == Status.Completed
assert node_result.output == default_input_value
# Assert for flow run.
flow_result = executor.exec_line({})
assert flow_result.run_info.status == Status.Completed
assert flow_result.output["output"] == default_input_value
aggr_results = executor.exec_aggregation({}, aggregation_inputs={})
flow_aggregate_node = aggr_results.node_run_infos["aggregate_node"]
assert flow_aggregate_node.status == Status.Completed
assert flow_aggregate_node.output == [default_input_value]
# Assert for exec
exec_result = executor.exec({})
assert exec_result["output"] == default_input_value
def test_executor_for_script_tool_with_init(self, dev_connections):
executor = FlowExecutor.create(get_yaml_file("script_tool_with_init"), dev_connections)
flow_result = executor.exec_line({"input": "World"})
assert flow_result.run_info.status == Status.Completed
assert flow_result.output["output"] == "Hello World"
@pytest.mark.parametrize(
"output_dir_name, intermediate_dir_name, run_aggregation, expected_node_counts",
[
("output", "intermediate", True, 2),
("output_1", "intermediate_1", False, 1),
],
)
def test_execute_flow(
self, output_dir_name: str, intermediate_dir_name: str, run_aggregation: bool, expected_node_counts: int
):
flow_folder = get_flow_folder("eval_flow_with_simple_image")
# prepare output folder
output_dir = flow_folder / output_dir_name
intermediate_dir = flow_folder / intermediate_dir_name
output_dir.mkdir(exist_ok=True)
intermediate_dir.mkdir(exist_ok=True)
storage = DefaultRunStorage(base_dir=flow_folder, sub_dir=Path(intermediate_dir_name))
line_result = execute_flow(
flow_file=get_yaml_file(flow_folder),
working_dir=flow_folder,
output_dir=Path(output_dir_name),
inputs={},
connections={},
run_aggregation=run_aggregation,
storage=storage,
)
assert line_result.run_info.status == Status.Completed
assert len(line_result.node_run_infos) == expected_node_counts
assert all(is_image_file(output_file) for output_file in output_dir.iterdir())
assert all(is_image_file(output_file) for output_file in intermediate_dir.iterdir())
# clean up output folder
shutil.rmtree(output_dir)
shutil.rmtree(intermediate_dir)
def exec_node_within_process(queue, flow_file, node_name, flow_inputs, dependency_nodes_outputs, connections, raise_ex):
try:
result = FlowExecutor.load_and_exec_node(
flow_file=get_yaml_file(flow_file),
node_name=node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=dependency_nodes_outputs,
connections=connections,
raise_ex=raise_ex,
)
# Assert llm single node run contains openai traces
# And the traces contains system metrics
OPENAI_AGGREGATE_METRICS = ["prompt_tokens", "completion_tokens", "total_tokens"]
assert len(result.api_calls) == 1
assert len(result.api_calls[0]["children"]) == 1
assert isinstance(result.api_calls[0]["children"][0]["system_metrics"], dict)
for key in OPENAI_AGGREGATE_METRICS:
assert key in result.api_calls[0]["children"][0]["system_metrics"]
for key in OPENAI_AGGREGATE_METRICS:
assert (
result.api_calls[0]["system_metrics"][key] == result.api_calls[0]["children"][0]["system_metrics"][key]
)
except Exception as ex:
queue.put(ex)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_concurent_execution.py | import re
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.exception_utils import ErrorResponse
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.executor._flow_nodes_scheduler import RUN_FLOW_NODES_LINEARLY
from promptflow.executor._result import LineResult
from promptflow.executor.flow_executor import FlowExecutor
from ..utils import get_flow_inputs, get_yaml_file, load_content
TEST_ROOT = Path(__file__).parent.parent.parent
FLOWS_ROOT = TEST_ROOT / "test_configs/flows"
FLOW_FOLDER = "concurrent_execution_flow"
@pytest.mark.e2etest
class TestConcurrentExecution:
def test_concurrent_run(self):
logs_directory = Path(mkdtemp())
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {})
flow_run_log_path = str(logs_directory / "test_flow_run.log")
# flow run: test exec_line
with LogContext(flow_run_log_path, run_mode=RunMode.Test):
results = executor.exec_line(get_flow_inputs(FLOW_FOLDER))
log_content = load_content(flow_run_log_path)
pattern = r"\[wait_(\d+) in line None.*Thread (\d+)"
matches = re.findall(pattern, log_content)
wait_thread_mapping = {}
for wait, thread in matches:
if wait in wait_thread_mapping:
if wait_thread_mapping[wait] != thread:
raise Exception(f"wait_{wait} corresponds to more than one thread number")
else:
wait_thread_mapping[wait] = thread
self.assert_run_result(results)
assert (
results.run_info.system_metrics["duration"] < 10
), "run nodes concurrently should decrease the total run time."
def test_concurrent_run_with_exception(self):
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {}, raise_ex=False)
flow_result = executor.exec_line({"input1": "True", "input2": "False", "input3": "False", "input4": "False"})
assert 2 < flow_result.run_info.system_metrics["duration"] < 4, "Should at least finish the running job."
error_response = ErrorResponse.from_error_dict(flow_result.run_info.error)
assert error_response.error_code_hierarchy == "UserError/ToolExecutionError"
def test_linear_run(self):
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {})
# flow run: test exec_line run linearly
results = executor.exec_line(get_flow_inputs(FLOW_FOLDER), node_concurrency=RUN_FLOW_NODES_LINEARLY)
self.assert_run_result(results)
assert 15 > results.run_info.system_metrics["duration"] > 10, "run nodes linearly will consume more time."
def assert_run_result(self, result: LineResult):
# Validate the flow status
assert result.run_info.status == Status.Completed
# Validate the flow output
assert isinstance(result.output, dict)
# Validate the flow node run infos
assert len(result.node_run_infos) == 5
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_logs.py | from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.logger_utils import LogContext
from promptflow.batch import BatchEngine
from promptflow.batch._result import BatchResult
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.executor import FlowExecutor
from ..utils import (
get_flow_folder,
get_flow_inputs_file,
get_flow_sample_inputs,
get_yaml_file,
load_content,
load_jsonl,
)
TEST_LOGS_FLOW = ["print_input_flow"]
SAMPLE_FLOW_WITH_TEN_INPUTS = "simple_flow_with_ten_inputs"
OUTPUT_FILE_NAME = "output.jsonl"
def submit_batch_run(
flow_folder,
inputs_mapping,
*,
input_dirs={},
input_file_name="samples.json",
run_id=None,
connections={},
storage=None,
return_output_dir=False,
):
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=connections, storage=storage
)
if not input_dirs and inputs_mapping:
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name=input_file_name)}
output_dir = Path(mkdtemp())
if return_output_dir:
return batch_engine.run(input_dirs, inputs_mapping, output_dir, run_id=run_id), output_dir
return batch_engine.run(input_dirs, inputs_mapping, output_dir, run_id=run_id)
def get_batch_inputs_line(flow_folder, sample_inputs_file="samples.json"):
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
return len(inputs)
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorLogs:
def assert_node_run_info(self, node_run_info, content):
assert node_run_info.status == Status.Completed
assert content in node_run_info.logs["stdout"]
assert "STDOUT:" in node_run_info.logs["stdout"]
assert content in node_run_info.logs["stderr"]
assert "STDERR:" in node_run_info.logs["stderr"]
def assert_flow_result(self, flow_result, content):
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
for node_run_info in flow_result.node_run_infos.values():
self.assert_node_run_info(node_run_info, content)
def submit_bulk_run(self, folder_name):
batch_engine = BatchEngine(get_yaml_file(folder_name), get_flow_folder(folder_name), connections={})
input_dirs = {"data": get_flow_inputs_file(folder_name)}
inputs_mapping = {"text": "${data.text}"}
output_dir = Path(mkdtemp())
return batch_engine.run(input_dirs, inputs_mapping, output_dir)
@pytest.mark.parametrize(
"folder_name",
TEST_LOGS_FLOW,
)
def test_node_logs(self, folder_name):
# Test node logs in flow run
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
content = "line_text"
flow_result = executor.exec_line({"text": content})
node_run_ids = [node_run_info.run_id for node_run_info in flow_result.node_run_infos.values()]
for node_run_id in node_run_ids:
logs = executor._run_tracker.node_log_manager.get_logs(node_run_id)
assert logs["stderr"] is None and logs["stdout"] is None, f"Logs for node {node_run_id} is cleared."
self.assert_flow_result(flow_result, content)
# Test node logs in single node run
content = "single_node_text"
node_run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(folder_name),
"print_input",
flow_inputs={"text": content},
)
self.assert_node_run_info(node_run_info, content)
@pytest.mark.parametrize(
"folder_name",
TEST_LOGS_FLOW,
)
def test_executor_logs(self, folder_name):
logs_directory = Path(mkdtemp())
flow_run_log_path = str(logs_directory / "test_flow_run.log")
bulk_run_log_path = str(logs_directory / "test_bulk_run.log")
# flow run: test exec_line
with LogContext(flow_run_log_path):
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
executor.exec_line({"text": "line_text"})
log_content = load_content(flow_run_log_path)
loggers_name_list = ["execution", "execution.flow"]
assert all(logger in log_content for logger in loggers_name_list)
# bulk run: test batch_engine.run
# setting run_mode to BulkTest is a requirement to use bulk_logger
with LogContext(bulk_run_log_path, run_mode=RunMode.Batch):
self.submit_bulk_run(folder_name)
log_content = load_content(bulk_run_log_path)
loggers_name_list = ["execution", "execution.bulk"]
# bulk logger will print the average execution time and estimated time
bulk_logs_keywords = ["Average execution time for completed lines", "Estimated time for incomplete lines"]
assert all(logger in log_content for logger in loggers_name_list)
assert all(keyword in log_content for keyword in bulk_logs_keywords)
@pytest.mark.parametrize(
"folder_name",
TEST_LOGS_FLOW,
)
def test_node_logs_in_executor_logs(self, folder_name):
logs_directory = Path(mkdtemp())
flow_run_log_path = str(logs_directory / "test_flow_run.log")
bulk_run_log_path = str(logs_directory / "test_bulk_run.log")
# flow run: test exec_line
with LogContext(flow_run_log_path, run_mode=RunMode.Test):
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
executor.exec_line({"text": "line_text"})
log_content = load_content(flow_run_log_path)
node_logs_list = ["print_input in line", "stdout> STDOUT:", "stderr> STDERR:"]
assert all(node_log in log_content for node_log in node_logs_list)
# bulk run: test batch_engine.run
# setting run_mode to BulkTest is a requirement to use bulk_logger
with LogContext(bulk_run_log_path, run_mode=RunMode.Batch):
self.submit_bulk_run(folder_name)
log_content = load_content(bulk_run_log_path)
node_logs_list = ["print_input in line", "stderr> STDERR:"]
assert all(node_log in log_content for node_log in node_logs_list)
def test_long_run_log(self):
executor = FlowExecutor.create(get_yaml_file("long_run"), {})
file_path = Path(mkdtemp()) / "flow.log"
with LogContext(file_path):
flow_result = executor.exec_line({}, index=0)
node_run = flow_result.node_run_infos["long_run_node"]
assert node_run.status == Status.Completed
with open(file_path) as fin:
lines = fin.readlines()
lines = [line for line in lines if line.strip()]
target_texts = [
"INFO Start executing nodes in thread pool mode.",
"INFO Start to run 1 nodes with concurrency level 16.",
"INFO Executing node long_run_node.",
"WARNING long_run_node in line 0 has been running for 60 seconds, stacktrace of thread",
", line 16, in long_run_func",
"return f2()",
", line 11, in f2",
"return f1()",
", line 6, in f1",
"time.sleep(61)",
"INFO Node long_run_node completes.",
]
msg = f"Got {len(lines)} lines in {file_path}, expected {len(target_texts)}."
assert len(lines) == len(target_texts), msg
for actual, expected in zip(lines, target_texts):
assert expected in actual, f"Expected {expected} in {actual}"
@pytest.mark.parametrize(
"flow_folder, inputs_mapping",
[
(
SAMPLE_FLOW_WITH_TEN_INPUTS,
{"input": "${data.input}", "index": "${data.index}"},
)
],
)
def test_log_progress(self, flow_folder, inputs_mapping, dev_connections):
logs_directory = Path(mkdtemp())
bulk_run_log_path = str(logs_directory / "test_bulk_run.log")
with LogContext(bulk_run_log_path, run_mode=RunMode.Batch):
batch_result, output_dir = submit_batch_run(
flow_folder, inputs_mapping, connections=dev_connections, return_output_dir=True
)
nlines = get_batch_inputs_line(flow_folder)
log_content = load_content(bulk_run_log_path)
for i in range(1, nlines + 1):
assert f"Finished {i} / {nlines} lines." in log_content
assert isinstance(batch_result, BatchResult)
assert batch_result.total_lines == nlines
assert batch_result.completed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == nlines
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
def test_activate_config_log(self):
logs_directory = Path(mkdtemp())
log_path = str(logs_directory / "flow.log")
# flow run: test exec_line
with LogContext(log_path, run_mode=RunMode.Test):
executor = FlowExecutor.create(get_yaml_file("activate_flow"), {})
# use default inputs
executor.exec_line({})
log_content = load_content(log_path)
logs_list = [
"execution.flow",
"The node 'nodeA' will be bypassed because the activate condition is not met, "
"i.e. '${flow.text}' is not equal to 'hello'.",
"The node 'nodeB' will be bypassed because it depends on the node 'nodeA' "
"which has already been bypassed in the activate config.",
"The node 'nodeC' will be bypassed because all nodes ['nodeB'] it depends on are bypassed.",
"The node 'nodeD' will be executed because the activate condition is met, "
"i.e. '${flow.text}' is equal to 'world'.",
]
assert all(log in log_content for log in logs_list)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_eager_flow.py | import os
from dataclasses import is_dataclass
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.batch._result import BatchResult, LineResult
from promptflow.contracts.run_info import Status
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.executor.flow_executor import FlowExecutor
from ..utils import (
EAGER_FLOW_ROOT,
get_bulk_inputs_from_jsonl,
get_entry_file,
get_flow_folder,
get_flow_inputs_file,
get_yaml_file,
load_jsonl,
)
SAMPLE_FLOW = "web_classification_no_variants"
SAMPLE_EVAL_FLOW = "classification_accuracy_evaluation"
SAMPLE_FLOW_WITH_PARTIAL_FAILURE = "python_tool_partial_failure"
def validate_batch_result(batch_result: BatchResult, flow_folder, output_dir, ensure_output):
assert isinstance(batch_result, BatchResult)
nlines = len(get_bulk_inputs_from_jsonl(flow_folder, root=EAGER_FLOW_ROOT))
assert batch_result.total_lines == nlines
assert batch_result.completed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == nlines
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
assert ensure_output(output)
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestEagerFlow:
@pytest.mark.parametrize(
"flow_folder, entry, inputs, ensure_output",
[
(
"dummy_flow_with_trace",
"my_flow",
{"text": "text", "models": ["model"]},
lambda x: x == "dummy_output"
),
(
"flow_with_dataclass_output",
"my_flow",
{"text": "text", "models": ["model"]},
lambda x: is_dataclass(x) and x.text == "text" and x.models == ["model"]
),
]
)
def test_flow_run(self, flow_folder, entry, inputs, ensure_output):
# Test submitting eager flow to script executor
flow_file = get_entry_file(flow_folder, root=EAGER_FLOW_ROOT)
executor = ScriptExecutor(flow_file=flow_file, entry=entry)
line_result = executor.exec_line(inputs=inputs, index=0)
assert isinstance(line_result, LineResult)
assert ensure_output(line_result.output)
# Test submitting eager flow to flow executor
working_dir = get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT)
os.chdir(working_dir)
flow_file = get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT)
executor = FlowExecutor.create(flow_file=flow_file, connections={})
line_result = executor.exec_line(inputs=inputs, index=0)
assert isinstance(line_result, LineResult)
assert ensure_output(line_result.output)
@pytest.mark.parametrize(
"flow_folder, inputs, ensure_output",
[
(
"dummy_flow_with_trace",
{"text": "text", "models": ["model"]},
lambda x: x == "dummy_output"
),
(
"flow_with_dataclass_output",
{"text": "text", "models": ["model"]},
lambda x: is_dataclass(x) and x.text == "text" and x.models == ["model"]
),
]
)
def test_flow_run_with_flow_yaml(self, flow_folder, inputs, ensure_output):
working_dir = get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT)
os.chdir(working_dir)
flow_file = get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT)
executor = FlowExecutor.create(flow_file=flow_file, connections={})
line_result = executor.exec_line(inputs=inputs, index=0)
assert isinstance(line_result, LineResult)
assert ensure_output(line_result.output)
def test_exec_line_with_invalid_case(self):
flow_file = get_entry_file("dummy_flow_with_exception", root=EAGER_FLOW_ROOT)
executor = ScriptExecutor(flow_file=flow_file, entry="my_flow")
line_result = executor.exec_line(inputs={"text": "text"}, index=0)
assert isinstance(line_result, LineResult)
assert line_result.output is None
assert line_result.run_info.status == Status.Failed
assert "dummy exception" in line_result.run_info.error["message"]
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, entry, ensure_output",
[
(
"dummy_flow_with_trace",
{"text": "${data.text}", "models": "${data.models}"},
"my_flow",
lambda x: "output" in x and x["output"] == "dummy_output",
),
(
"flow_with_dataclass_output",
{"text": "${data.text}", "models": "${data.models}"},
"my_flow",
lambda x: x["text"] == "text" and isinstance(x["models"], list),
),
(
"flow_with_dataclass_output",
{}, # if inputs_mapping is empty, then the inputs will be the default value
"my_flow",
lambda x: x["text"] == "default_text" and x["models"] == ["default_model"],
)
]
)
def test_batch_run(self, flow_folder, entry, inputs_mapping, ensure_output):
batch_engine = BatchEngine(
get_entry_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
entry=entry,
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_result = batch_engine.run(input_dirs, inputs_mapping, output_dir)
validate_batch_result(batch_result, flow_folder, output_dir, ensure_output)
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, ensure_output",
[
(
"dummy_flow_with_trace",
{"text": "${data.text}", "models": "${data.models}"},
lambda x: "output" in x and x["output"] == "dummy_output",
),
(
"flow_with_dataclass_output",
{"text": "${data.text}", "models": "${data.models}"},
lambda x: x["text"] == "text" and isinstance(x["models"], list),
),
]
)
def test_batch_run_with_flow_yaml(self, flow_folder, inputs_mapping, ensure_output):
batch_engine = BatchEngine(
get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_result = batch_engine.run(input_dirs, inputs_mapping, output_dir)
validate_batch_result(batch_result, flow_folder, output_dir, ensure_output)
def test_batch_run_with_invalid_case(self):
flow_folder = "dummy_flow_with_exception"
batch_engine = BatchEngine(
get_entry_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
entry="my_flow",
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_result = batch_engine.run(input_dirs, {"text": "${data.text}"}, output_dir)
assert isinstance(batch_result, BatchResult)
nlines = len(get_bulk_inputs_from_jsonl(flow_folder, root=EAGER_FLOW_ROOT))
assert batch_result.total_lines == nlines
assert batch_result.failed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_package_tool.py | import sys
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._core._errors import PackageToolNotFoundError, ToolLoadError
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import NodeInputValidationError, ResolveToolError
from promptflow.executor._result import LineResult
from ..utils import WRONG_FLOW_ROOT, get_flow_package_tool_definition, get_flow_sample_inputs, get_yaml_file
PACKAGE_TOOL_BASE = Path(__file__).parent.parent / "package_tools"
PACKAGE_TOOL_ENTRY = "promptflow._core.tools_manager.collect_package_tools"
sys.path.insert(0, str(PACKAGE_TOOL_BASE.resolve()))
@pytest.mark.e2etest
class TestPackageTool:
def get_line_inputs(self, flow_folder=""):
if flow_folder:
inputs = self.get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(self, nlinee=4, flow_folder=""):
if flow_folder:
inputs = get_flow_sample_inputs(flow_folder)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [self.get_line_inputs() for _ in range(nlinee)]
def test_executor_package_tool_with_conn(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "tool_with_connection"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch(
"promptflow.tools.list.list_package_tools",
return_value=package_tool_definition,
)
name, secret = "dummy_name", "dummy_secret"
connections = {
"test_conn": {
"type": "TestConnection",
"value": {"name": name, "secret": secret},
}
}
executor = FlowExecutor.create(get_yaml_file(flow_folder), connections, raise_ex=True)
flow_result = executor.exec_line({})
assert flow_result.run_info.status == Status.Completed
assert len(flow_result.node_run_infos) == 1
for _, v in flow_result.node_run_infos.items():
assert v.status == Status.Completed
assert v.output == name + secret
@pytest.mark.skipif(sys.platform == "darwin", reason="Skip on Mac")
def test_executor_package_with_prompt_tool(self, dev_connections, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
with mocker.patch(PACKAGE_TOOL_ENTRY, return_value=package_tool_definition):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, raise_ex=True)
bulk_inputs = self.get_bulk_inputs(flow_folder=flow_folder)
for i in bulk_inputs:
line_result = executor.exec_line(i)
assert isinstance(line_result, LineResult)
msg = f"Got {line_result.run_info.status} for input {i}"
assert line_result.run_info.status == Status.Completed, msg
def test_custom_llm_tool_with_duplicated_inputs(self, dev_connections, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool_with_duplicated_inputs"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
with mocker.patch(PACKAGE_TOOL_ENTRY, return_value=package_tool_definition):
msg = (
"Invalid inputs {'api'} in prompt template of node custom_llm_tool_with_duplicated_inputs. "
"These inputs are duplicated with the inputs of custom llm tool."
)
with pytest.raises(ResolveToolError, match=msg) as e:
FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
assert isinstance(e.value.inner_exception, NodeInputValidationError)
@pytest.mark.parametrize(
"flow_folder, error_class, inner_class, error_message",
[
(
"wrong_tool_in_package_tools",
ResolveToolError,
PackageToolNotFoundError,
"Tool load failed in 'search_by_text': (PackageToolNotFoundError) "
"Package tool 'promptflow.tools.serpapi.SerpAPI.search_11' is not found in the current environment. "
"All available package tools are: "
"['promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text', "
"'promptflow.tools.azure_detect.AzureDetect.get_language'].",
),
(
"wrong_package_in_package_tools",
ResolveToolError,
PackageToolNotFoundError,
"Tool load failed in 'search_by_text': (PackageToolNotFoundError) "
"Package tool 'promptflow.tools.serpapi11.SerpAPI.search' is not found in the current environment. "
"All available package tools are: "
"['promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text', "
"'promptflow.tools.azure_detect.AzureDetect.get_language'].",
),
],
)
def test_package_tool_execution(self, flow_folder, error_class, inner_class, error_message, dev_connections):
def mock_collect_package_tools(keys=None):
return {
"promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text": None,
"promptflow.tools.azure_detect.AzureDetect.get_language": None,
}
with patch(PACKAGE_TOOL_ENTRY, side_effect=mock_collect_package_tools):
with pytest.raises(error_class) as exce_info:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT), dev_connections)
if isinstance(exce_info.value, ResolveToolError):
assert isinstance(exce_info.value.inner_exception, inner_class)
assert error_message == exce_info.value.message
@pytest.mark.parametrize(
"flow_folder, error_message",
[
(
"tool_with_init_error",
"Tool load failed in 'tool_with_init_error': "
"(ToolLoadError) Failed to load package tool 'Tool with init error': (Exception) Tool load error.",
)
],
)
def test_package_tool_load_error(self, flow_folder, error_message, dev_connections, mocker):
flow_folder = PACKAGE_TOOL_BASE / flow_folder
package_tool_definition = get_flow_package_tool_definition(flow_folder)
with mocker.patch(PACKAGE_TOOL_ENTRY, return_value=package_tool_definition):
with pytest.raises(ResolveToolError) as exce_info:
FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
assert isinstance(exce_info.value.inner_exception, ToolLoadError)
assert exce_info.value.message == error_message
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_executor_validation.py | import json
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._core._errors import FlowOutputUnserializable, InvalidSource
from promptflow._core.tools_manager import APINotFound
from promptflow._sdk._constants import DAG_FILE_NAME
from promptflow._utils.utils import dump_list_to_jsonl
from promptflow.batch import BatchEngine
from promptflow.contracts._errors import FailedToImportModule
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import (
ConnectionNotFound,
DuplicateNodeName,
EmptyOutputReference,
InputNotFound,
InputReferenceNotFound,
InputTypeError,
InvalidConnectionType,
NodeCircularDependency,
NodeInputValidationError,
NodeReferenceNotFound,
OutputReferenceNotFound,
ResolveToolError,
SingleNodeValidationError,
)
from ..utils import FLOW_ROOT, WRONG_FLOW_ROOT, get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestValidation:
@pytest.mark.parametrize(
"flow_folder, yml_file, error_class, inner_class, error_msg",
[
(
"flow_llm_with_wrong_conn",
"flow.dag.yaml",
ResolveToolError,
InvalidConnectionType,
(
"Tool load failed in 'wrong_llm': "
"(InvalidConnectionType) Connection type CustomConnection is not supported for LLM."
),
),
(
"nodes_names_duplicated",
"flow.dag.yaml",
DuplicateNodeName,
None,
(
"Invalid node definitions found in the flow graph. Node with name 'stringify_num' appears more "
"than once in the node definitions in your flow, which is not allowed. To "
"address this issue, please review your flow and either rename or remove "
"nodes with identical names."
),
),
(
"source_file_missing",
"flow.dag.jinja.yaml",
ResolveToolError,
InvalidSource,
(
"Tool load failed in 'summarize_text_content': (InvalidSource) "
"Node source path 'summarize_text_content__variant_1.jinja2' is invalid on node "
"'summarize_text_content'."
),
),
(
"node_reference_not_found",
"flow.dag.yaml",
NodeReferenceNotFound,
None,
(
"Invalid node definitions found in the flow graph. Node 'divide_num_2' references a non-existent "
"node 'divide_num_3' in your flow. Please review your flow to ensure that the "
"node name is accurately specified."
),
),
(
"node_circular_dependency",
"flow.dag.yaml",
NodeCircularDependency,
None,
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for "
"the nodes ['divide_num', 'divide_num_1', 'divide_num_2'] and resolve the "
"circular reference issue in the flow."
),
),
(
"flow_input_reference_invalid",
"flow.dag.yaml",
InputReferenceNotFound,
None,
(
"Invalid node definitions found in the flow graph. Node 'divide_num' references flow input 'num_1' "
"which is not defined in your flow. To resolve this issue, please review your "
"flow, ensuring that you either add the missing flow inputs or adjust node "
"reference to the correct flow input."
),
),
(
"flow_output_reference_invalid",
"flow.dag.yaml",
EmptyOutputReference,
None,
(
"The output 'content' for flow is incorrect. The reference is not specified for the output "
"'content' in the flow. To rectify this, ensure that you accurately specify "
"the reference in the flow."
),
),
(
"outputs_reference_not_valid",
"flow.dag.yaml",
OutputReferenceNotFound,
None,
(
"The output 'content' for flow is incorrect. The output 'content' references non-existent "
"node 'another_stringify_num' in your flow. To resolve this issue, please "
"carefully review your flow and correct the reference definition for the "
"output in question."
),
),
(
"outputs_with_invalid_flow_inputs_ref",
"flow.dag.yaml",
OutputReferenceNotFound,
None,
(
"The output 'num' for flow is incorrect. The output 'num' references non-existent flow "
"input 'num11' in your flow. Please carefully review your flow and correct "
"the reference definition for the output in question."
),
),
],
)
def test_executor_create_failure_type_and_message(
self, flow_folder, yml_file, error_class, inner_class, error_msg, dev_connections
):
with pytest.raises(error_class) as exc_info:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT, yml_file), dev_connections)
if isinstance(exc_info.value, ResolveToolError):
assert isinstance(exc_info.value.inner_exception, inner_class)
assert error_msg == exc_info.value.message
@pytest.mark.parametrize(
"flow_folder, yml_file, error_class, inner_class",
[
("source_file_missing", "flow.dag.python.yaml", ResolveToolError, InvalidSource),
],
)
def test_executor_create_failure_type(self, flow_folder, yml_file, error_class, inner_class, dev_connections):
with pytest.raises(error_class) as e:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT, yml_file), dev_connections)
if isinstance(e.value, ResolveToolError):
assert isinstance(e.value.inner_exception, inner_class)
@pytest.mark.parametrize(
"ordered_flow_folder, unordered_flow_folder",
[
("web_classification_no_variants", "web_classification_no_variants_unordered"),
],
)
def test_node_topology_in_order(self, ordered_flow_folder, unordered_flow_folder, dev_connections):
ordered_executor = FlowExecutor.create(get_yaml_file(ordered_flow_folder), dev_connections)
unordered_executor = FlowExecutor.create(get_yaml_file(unordered_flow_folder), dev_connections)
for node1, node2 in zip(ordered_executor._flow.nodes, unordered_executor._flow.nodes):
assert node1.name == node2.name
@pytest.mark.parametrize(
"flow_folder, error_class, inner_class",
[
("invalid_connection", ResolveToolError, ConnectionNotFound),
("tool_type_missing", ResolveToolError, NotImplementedError),
("wrong_module", FailedToImportModule, None),
("wrong_api", ResolveToolError, APINotFound),
("wrong_provider", ResolveToolError, APINotFound),
],
)
def test_invalid_flow_dag(self, flow_folder, error_class, inner_class, dev_connections):
with pytest.raises(error_class) as e:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT), dev_connections)
if isinstance(e.value, ResolveToolError):
assert isinstance(e.value.inner_exception, inner_class)
@pytest.mark.parametrize(
"flow_folder, line_input, error_class",
[
("simple_flow_with_python_tool", {"num11": "22"}, InputNotFound),
("simple_flow_with_python_tool", {"num": "hello"}, InputTypeError),
("python_tool_with_simple_image_without_default", {}, InputNotFound),
],
)
def test_flow_run_input_type_invalid(self, flow_folder, line_input, error_class, dev_connections):
# Flow run - the input is from get_partial_line_inputs()
executor = FlowExecutor.create(get_yaml_file(flow_folder, FLOW_ROOT), dev_connections)
with pytest.raises(error_class):
executor.exec_line(line_input)
@pytest.mark.parametrize(
"flow_folder, line_input, error_class, error_msg",
[
(
"flow_output_unserializable",
{"num": "22"},
FlowOutputUnserializable,
(
"The output 'content' for flow is incorrect. The output value is not JSON serializable. "
"JSON dump failed: (TypeError) Object of type UnserializableClass is not JSON serializable. "
"Please verify your flow output and make sure the value serializable."
),
),
],
)
def test_flow_run_execution_errors(self, flow_folder, line_input, error_class, error_msg, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT), dev_connections)
# For now, there exception is designed to be swallowed in executor. But Run Info would have the error details
res = executor.exec_line(line_input)
assert error_msg == res.run_info.error["message"]
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, error_message, error_class",
[
(
"simple_flow_with_python_tool",
{"num": "${data.num}"},
(
"The input for flow is incorrect. The value for flow input 'num' in line 0 of input data does not "
"match the expected type 'int'. Please change flow input type or adjust the input value in "
"your input data."
),
"InputTypeError",
),
],
)
def test_batch_run_input_type_invalid(
self, flow_folder, inputs_mapping, error_message, error_class, dev_connections
):
# Bulk run - the input is from sample.json
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
output_dir = Path(mkdtemp())
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert error_message in str(
batch_results.error_summary.error_list[0].error
), f"Expected message {error_message} but got {str(batch_results.error_summary.error_list[0].error)}"
assert error_class in str(
batch_results.error_summary.error_list[0].error
), f"Expected message {error_class} but got {str(batch_results.error_summary.error_list[0].error)}"
@pytest.mark.parametrize(
"path_root, flow_folder, node_name, line_input, error_class, error_msg",
[
(
FLOW_ROOT,
"simple_flow_with_python_tool",
"divide_num",
{"num11": "22"},
InputNotFound,
(
"The input for node is incorrect. Node input 'num' is not found in input data "
"for node 'divide_num'. Please verify the inputs data for the node."
),
),
(
FLOW_ROOT,
"simple_flow_with_python_tool",
"divide_num",
{"num": "hello"},
InputTypeError,
(
"The input for node is incorrect. Value for input 'num' of node 'divide_num' "
"is not type 'int'. Please review and rectify the input data."
),
),
(
WRONG_FLOW_ROOT,
"flow_input_reference_invalid",
"divide_num",
{"num": "22"},
InputNotFound,
(
"The input for node is incorrect. Node input 'num_1' is not found from flow "
"inputs of node 'divide_num'. Please review the node definition in your flow."
),
),
(
FLOW_ROOT,
"simple_flow_with_python_tool",
"bad_node_name",
{"num": "22"},
SingleNodeValidationError,
(
"Validation failed when attempting to execute the node. Node 'bad_node_name' is not found in flow "
"'flow.dag.yaml'. Please change node name or correct the flow file."
),
),
(
WRONG_FLOW_ROOT,
"node_missing_type_or_source",
"divide_num",
{"num": "22"},
SingleNodeValidationError,
(
"Validation failed when attempting to execute the node. Properties 'source' or 'type' are not "
"specified for Node 'divide_num' in flow 'flow.dag.yaml'. Please make sure "
"these properties are in place and try again."
),
),
],
)
def test_single_node_input_type_invalid(
self, path_root: str, flow_folder, node_name, line_input, error_class, error_msg, dev_connections
):
# Single Node run - the inputs are from flow_inputs + dependency_nodes_outputs
with pytest.raises(error_class) as exe_info:
FlowExecutor.load_and_exec_node(
flow_file=DAG_FILE_NAME,
node_name=node_name,
flow_inputs=line_input,
dependency_nodes_outputs={},
connections=dev_connections,
working_dir=Path(path_root) / flow_folder,
raise_ex=True,
)
assert error_msg == exe_info.value.message
@pytest.mark.parametrize(
"flow_folder, msg",
[
(
"prompt_tool_with_duplicated_inputs",
"Invalid inputs {'template'} in prompt template of node prompt_tool_with_duplicated_inputs. "
"These inputs are duplicated with the reserved parameters of prompt tool.",
),
(
"llm_tool_with_duplicated_inputs",
"Invalid inputs {'prompt'} in prompt template of node llm_tool_with_duplicated_inputs. "
"These inputs are duplicated with the parameters of AzureOpenAI.completion.",
),
],
)
def test_flow_run_with_duplicated_inputs(self, flow_folder, msg, dev_connections):
with pytest.raises(ResolveToolError, match=msg) as e:
FlowExecutor.create(get_yaml_file(flow_folder, FLOW_ROOT), dev_connections)
assert isinstance(e.value.inner_exception, NodeInputValidationError)
@pytest.mark.parametrize(
"flow_folder, batch_input, raise_on_line_failure, error_class",
[
("simple_flow_with_python_tool", [{"num": "hello"}], True, Exception),
("simple_flow_with_python_tool", [{"num": "hello"}], False, InputTypeError),
("simple_flow_with_python_tool", [{"num": "22"}], True, None),
("simple_flow_with_python_tool", [{"num": "22"}], False, None),
],
)
def test_batch_run_raise_on_line_failure(
self, flow_folder, batch_input, raise_on_line_failure, error_class, dev_connections
):
# Bulk run - the input is from sample.json
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
# prepare input file and output dir
input_file = Path(mkdtemp()) / "inputs.jsonl"
dump_list_to_jsonl(input_file, batch_input)
input_dirs = {"data": input_file}
output_dir = Path(mkdtemp())
inputs_mapping = {"num": "${data.num}"}
if error_class is None:
batch_result = batch_engine.run(
input_dirs, inputs_mapping, output_dir, raise_on_line_failure=raise_on_line_failure
)
assert batch_result.total_lines == 1
assert batch_result.completed_lines == 1
assert batch_result.error_summary.error_list == []
else:
if raise_on_line_failure:
with pytest.raises(error_class):
batch_engine.run(
input_dirs, inputs_mapping, output_dir, raise_on_line_failure=raise_on_line_failure
)
else:
batch_result = batch_engine.run(
input_dirs, inputs_mapping, output_dir, raise_on_line_failure=raise_on_line_failure
)
assert batch_result.total_lines == 1
assert batch_result.failed_lines == 1
assert error_class.__name__ in json.dumps(batch_result.error_summary.error_list[0].error)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_async.py | import os
import pytest
from promptflow.executor import FlowExecutor
from ..utils import get_flow_folder, get_yaml_file
@pytest.mark.e2etest
class TestAsync:
@pytest.mark.parametrize(
"folder_name, concurrency_levels, expected_concurrency",
[
("async_tools", [1, 2, 3], [1, 2, 2]),
("async_tools_with_sync_tools", [1, 2, 3], [1, 2, 2]),
],
)
def test_executor_node_concurrency(self, folder_name, concurrency_levels, expected_concurrency):
os.chdir(get_flow_folder(folder_name))
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
def calculate_max_concurrency(flow_result):
timeline = []
api_calls = flow_result.run_info.api_calls[0]["children"]
for api_call in api_calls:
timeline.append(("start", api_call["start_time"]))
timeline.append(("end", api_call["end_time"]))
timeline.sort(key=lambda x: x[1])
current_concurrency = 0
max_concurrency = 0
for event, _ in timeline:
if event == "start":
current_concurrency += 1
max_concurrency = max(max_concurrency, current_concurrency)
elif event == "end":
current_concurrency -= 1
return max_concurrency
for i in range(len(concurrency_levels)):
concurrency = concurrency_levels[i]
flow_result = executor.exec_line({"input_str": "Hello"}, node_concurrency=concurrency)
max_concurrency = calculate_max_concurrency(flow_result)
assert max_concurrency == expected_concurrency[i]
assert max_concurrency <= concurrency
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_langchain.py | from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow.batch import BatchEngine
from promptflow.batch._result import BatchResult
from ..utils import get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestLangchain:
@pytest.mark.parametrize(
"flow_folder, inputs_mapping",
[
("flow_with_langchain_traces", {"question": "${data.question}"}),
("openai_chat_api_flow", {"question": "${data.question}", "chat_history": "${data.chat_history}"}),
("openai_completion_api_flow", {"prompt": "${data.prompt}"}),
],
)
def test_batch_with_langchain(self, flow_folder, inputs_mapping, dev_connections):
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
output_dir = Path(mkdtemp())
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert isinstance(batch_results, BatchResult)
assert batch_results.total_lines == batch_results.completed_lines
assert batch_results.system_metrics.total_tokens > 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_executor_execution_failures.py | import pytest
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from ..utils import (
get_yaml_file,
)
SAMPLE_FLOW = "web_classification_no_variants"
SAMPLE_EVAL_FLOW = "classification_accuracy_evaluation"
SAMPLE_FLOW_WITH_PARTIAL_FAILURE = "python_tool_partial_failure"
SAMPLE_FLOW_WITH_LANGCHAIN_TRACES = "flow_with_langchain_traces"
expected_stack_traces = {
"sync_tools_failures": """Traceback (most recent call last):
sync_fail.py", line 11, in raise_an_exception
raise_exception(s)
sync_fail.py", line 5, in raise_exception
raise Exception(msg)
Exception: In raise_exception: dummy_input
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
sync_fail.py", line 13, in raise_an_exception
raise Exception(f"In tool raise_an_exception: {s}") from e
Exception: In tool raise_an_exception: dummy_input
""".split("\n"),
"async_tools_failures": """Traceback (most recent call last):
async_fail.py", line 11, in raise_an_exception_async
await raise_exception_async(s)
async_fail.py", line 5, in raise_exception_async
raise Exception(msg)
Exception: In raise_exception_async: dummy_input
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
in raise_an_exception_async
raise Exception(f"In tool raise_an_exception_async: {s}") from e
Exception: In tool raise_an_exception_async: dummy_input
""".split("\n"),
}
@pytest.mark.e2etest
class TestExecutorFailures:
@pytest.mark.parametrize(
"flow_folder, node_name, message",
[
("sync_tools_failures", "sync_fail", "In tool raise_an_exception: dummy_input"),
("async_tools_failures", "async_fail", "In tool raise_an_exception_async: dummy_input"),
],
)
def test_executor_exec_node_fail(self, flow_folder, node_name, message):
yaml_file = get_yaml_file(flow_folder)
run_info = FlowExecutor.load_and_exec_node(yaml_file, node_name)
assert run_info.output is None
assert run_info.status == Status.Failed
assert isinstance(run_info.api_calls, list)
assert len(run_info.api_calls) == 1
assert run_info.node == node_name
assert run_info.system_metrics["duration"] >= 0
assert run_info.error is not None
assert f"Execution failure in '{node_name}'" in run_info.error["message"]
assert len(run_info.error["additionalInfo"]) == 1
user_error_info_dict = run_info.error["additionalInfo"][0]
assert "ToolExecutionErrorDetails" == user_error_info_dict["type"]
user_error_info = user_error_info_dict["info"]
assert message == user_error_info["message"]
# Make sure the stack trace is as expected
stacktrace = user_error_info["traceback"].split("\n")
expected_stack_trace = expected_stack_traces[flow_folder]
assert len(stacktrace) == len(expected_stack_trace)
for expected_item, actual_item in zip(expected_stack_trace, stacktrace):
assert expected_item in actual_item
@pytest.mark.parametrize(
"flow_folder, failed_node_name, message",
[
("sync_tools_failures", "sync_fail", "In tool raise_an_exception: dummy_input"),
("async_tools_failures", "async_fail", "In tool raise_an_exception_async: dummy_input"),
],
)
def test_executor_exec_line_fail(self, flow_folder, failed_node_name, message):
yaml_file = get_yaml_file(flow_folder)
executor = FlowExecutor.create(yaml_file, {}, raise_ex=False)
line_result = executor.exec_line({})
run_info = line_result.run_info
assert run_info.output is None
assert run_info.status == Status.Failed
assert isinstance(run_info.api_calls, list)
assert len(run_info.api_calls) == 1
assert run_info.system_metrics["duration"] >= 0
assert run_info.error is not None
assert f"Execution failure in '{failed_node_name}'" in run_info.error["message"]
assert len(run_info.error["additionalInfo"]) == 1
user_error_info_dict = run_info.error["additionalInfo"][0]
assert "ToolExecutionErrorDetails" == user_error_info_dict["type"]
user_error_info = user_error_info_dict["info"]
assert message == user_error_info["message"]
# Make sure the stack trace is as expected
stacktrace = user_error_info["traceback"].split("\n")
expected_stack_trace = expected_stack_traces[flow_folder]
assert len(stacktrace) == len(expected_stack_trace)
for expected_item, actual_item in zip(expected_stack_trace, stacktrace):
assert expected_item in actual_item
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_image.py | import os
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.multimedia_utils import MIME_PATTERN, _create_image_from_file, _is_url, is_multimedia_dict
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.batch._result import BatchResult
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
from promptflow.executor import FlowExecutor
from promptflow.storage._run_storage import DefaultRunStorage
from ..utils import get_flow_folder, get_yaml_file, is_image_file, is_jsonl_file, load_jsonl
SIMPLE_IMAGE_FLOW = "python_tool_with_simple_image"
SAMPLE_IMAGE_FLOW_WITH_DEFAULT = "python_tool_with_simple_image_with_default"
SIMPLE_IMAGE_WITH_INVALID_DEFAULT_VALUE_FLOW = "python_tool_with_invalid_default_value"
COMPOSITE_IMAGE_FLOW = "python_tool_with_composite_image"
CHAT_FLOW_WITH_IMAGE = "chat_flow_with_image"
EVAL_FLOW_WITH_SIMPLE_IMAGE = "eval_flow_with_simple_image"
EVAL_FLOW_WITH_COMPOSITE_IMAGE = "eval_flow_with_composite_image"
NESTED_API_CALLS_FLOW = "python_tool_with_image_nested_api_calls"
IMAGE_URL = (
"https://raw.githubusercontent.com/microsoft/promptflow/main/src/promptflow/tests/test_configs/datas/logo.jpg"
)
def get_test_cases_for_simple_input(flow_folder):
working_dir = get_flow_folder(flow_folder)
image = _create_image_from_file(working_dir / "logo.jpg")
inputs = [
{"data:image/jpg;path": str(working_dir / "logo.jpg")},
{"data:image/jpg;base64": image.to_base64()},
{"data:image/jpg;url": IMAGE_URL},
str(working_dir / "logo.jpg"),
image.to_base64(),
IMAGE_URL,
]
return [(flow_folder, {"image": input}) for input in inputs]
def get_test_cases_for_composite_input(flow_folder):
working_dir = get_flow_folder(flow_folder)
image_1 = _create_image_from_file(working_dir / "logo.jpg")
image_2 = _create_image_from_file(working_dir / "logo_2.png")
inputs = [
[
{"data:image/jpg;path": str(working_dir / "logo.jpg")},
{"data:image/png;path": str(working_dir / "logo_2.png")},
],
[{"data:image/jpg;base64": image_1.to_base64()}, {"data:image/png;base64": image_2.to_base64()}],
[{"data:image/jpg;url": IMAGE_URL}, {"data:image/png;url": IMAGE_URL}],
]
return [
(flow_folder, {"image_list": input, "image_dict": {"image_1": input[0], "image_2": input[1]}})
for input in inputs
]
def get_test_cases_for_node_run():
image = {"data:image/jpg;path": str(get_flow_folder(SIMPLE_IMAGE_FLOW) / "logo.jpg")}
simple_image_input = {"image": image}
image_list = [{"data:image/jpg;path": "logo.jpg"}, {"data:image/png;path": "logo_2.png"}]
image_dict = {
"image_dict": {
"image_1": {"data:image/jpg;path": "logo.jpg"},
"image_2": {"data:image/png;path": "logo_2.png"},
}
}
composite_image_input = {"image_list": image_list, "image_dcit": image_dict}
return [
(SIMPLE_IMAGE_FLOW, "python_node", simple_image_input, None),
(SIMPLE_IMAGE_FLOW, "python_node_2", simple_image_input, {"python_node": image}),
(COMPOSITE_IMAGE_FLOW, "python_node", composite_image_input, None),
(COMPOSITE_IMAGE_FLOW, "python_node_2", composite_image_input, None),
(
COMPOSITE_IMAGE_FLOW,
"python_node_3",
composite_image_input,
{"python_node": image_list, "python_node_2": image_dict},
),
]
def contain_image_reference(value, parent_path="temp"):
if isinstance(value, (FlowRunInfo, RunInfo)):
assert contain_image_reference(value.api_calls, parent_path)
assert contain_image_reference(value.inputs, parent_path)
assert contain_image_reference(value.output, parent_path)
return True
assert not isinstance(value, Image)
if isinstance(value, list):
return any(contain_image_reference(item, parent_path) for item in value)
if isinstance(value, dict):
if is_multimedia_dict(value):
v = list(value.values())[0]
assert isinstance(v, str)
assert _is_url(v) or str(Path(v).parent) == parent_path
return True
return any(contain_image_reference(v, parent_path) for v in value.values())
return False
def contain_image_object(value):
if isinstance(value, list):
return any(contain_image_object(item) for item in value)
elif isinstance(value, dict):
assert not is_multimedia_dict(value)
return any(contain_image_object(v) for v in value.values())
else:
return isinstance(value, Image)
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorWithImage:
@pytest.mark.parametrize(
"flow_folder, inputs",
get_test_cases_for_simple_input(SIMPLE_IMAGE_FLOW)
+ get_test_cases_for_composite_input(COMPOSITE_IMAGE_FLOW)
+ [(CHAT_FLOW_WITH_IMAGE, {}), (NESTED_API_CALLS_FLOW, {})],
)
def test_executor_exec_line_with_image(self, flow_folder, inputs, dev_connections):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert contain_image_object(flow_result.output)
# Assert output also contains plain text.
assert any(isinstance(v, str) for v in flow_result.output)
assert flow_result.run_info.status == Status.Completed
assert contain_image_reference(flow_result.run_info)
for _, node_run_info in flow_result.node_run_infos.items():
assert node_run_info.status == Status.Completed
assert contain_image_reference(node_run_info)
@pytest.mark.parametrize(
"flow_folder, node_name, flow_inputs, dependency_nodes_outputs", get_test_cases_for_node_run()
)
def test_executor_exec_node_with_image(
self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, dev_connections
):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(flow_folder),
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=dependency_nodes_outputs,
connections=dev_connections,
storage=storage,
raise_ex=True,
)
assert run_info.status == Status.Completed
assert contain_image_reference(run_info)
# Assert image could be persisted to the specified path.
@pytest.mark.parametrize(
"output_sub_dir, assign_storage, expected_path",
[
("test_path", True, "test_storage"),
("test_path", False, "test_path"),
(None, True, "test_storage"),
(None, False, "."),
],
)
def test_executor_exec_node_with_image_storage_and_path(self, output_sub_dir, assign_storage, expected_path):
flow_folder = SIMPLE_IMAGE_FLOW
node_name = "python_node"
image = {"data:image/jpg;path": str(get_flow_folder(SIMPLE_IMAGE_FLOW) / "logo.jpg")}
flow_inputs = {"image": image}
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./test_storage"))
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(flow_folder),
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=None,
connections=None,
storage=storage if assign_storage else None,
output_sub_dir=output_sub_dir,
raise_ex=True,
)
assert run_info.status == Status.Completed
assert contain_image_reference(run_info, parent_path=expected_path)
@pytest.mark.parametrize(
"flow_folder, node_name, flow_inputs, dependency_nodes_outputs",
[
(
SIMPLE_IMAGE_WITH_INVALID_DEFAULT_VALUE_FLOW,
"python_node_2",
{},
{
"python_node": {
"data:image/jpg;path": str(
get_flow_folder(SIMPLE_IMAGE_WITH_INVALID_DEFAULT_VALUE_FLOW) / "logo.jpg"
)
}
},
)
],
)
def test_executor_exec_node_with_invalid_default_value(
self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, dev_connections
):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(flow_folder),
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=dependency_nodes_outputs,
connections=dev_connections,
storage=storage,
raise_ex=True,
)
assert run_info.status == Status.Completed
assert contain_image_reference(run_info)
@pytest.mark.parametrize(
"flow_folder, input_dirs, inputs_mapping, output_key, expected_outputs_number, has_aggregation_node",
[
(
SIMPLE_IMAGE_FLOW,
{"data": "."},
{"image": "${data.image}"},
"output",
4,
False,
),
(
SAMPLE_IMAGE_FLOW_WITH_DEFAULT,
{"data": "."},
{"image_2": "${data.image_2}"},
"output",
4,
False,
),
(
COMPOSITE_IMAGE_FLOW,
{"data": "inputs.jsonl"},
{"image_list": "${data.image_list}", "image_dict": "${data.image_dict}"},
"output",
2,
False,
),
(
CHAT_FLOW_WITH_IMAGE,
{"data": "inputs.jsonl"},
{"question": "${data.question}", "chat_history": "${data.chat_history}"},
"answer",
2,
False,
),
(
EVAL_FLOW_WITH_SIMPLE_IMAGE,
{"data": "inputs.jsonl"},
{"image": "${data.image}"},
"output",
2,
True,
),
(
EVAL_FLOW_WITH_COMPOSITE_IMAGE,
{"data": "inputs.jsonl"},
{"image_list": "${data.image_list}", "image_dict": "${data.image_dict}"},
"output",
2,
True,
),
],
)
def test_batch_engine_with_image(
self, flow_folder, input_dirs, inputs_mapping, output_key, expected_outputs_number, has_aggregation_node
):
flow_file = get_yaml_file(flow_folder)
working_dir = get_flow_folder(flow_folder)
output_dir = Path(mkdtemp())
batch_result = BatchEngine(flow_file, working_dir).run(
input_dirs, inputs_mapping, output_dir, max_lines_count=4
)
assert isinstance(batch_result, BatchResult)
assert batch_result.completed_lines == expected_outputs_number
assert all(is_jsonl_file(output_file) or is_image_file(output_file) for output_file in output_dir.iterdir())
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == expected_outputs_number
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
result = output[output_key][0] if isinstance(output[output_key], list) else output[output_key]
assert all(MIME_PATTERN.search(key) for key in result), f"image is not in {i}th output {output}"
@pytest.mark.parametrize(
"flow_folder, inputs",
get_test_cases_for_simple_input(EVAL_FLOW_WITH_SIMPLE_IMAGE)
+ get_test_cases_for_composite_input(EVAL_FLOW_WITH_COMPOSITE_IMAGE),
)
def test_executor_exec_aggregation_with_image(self, flow_folder, inputs, dev_connections):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage)
flow_result = executor.exec_line(inputs, index=0)
flow_inputs = {k: [v] for k, v in inputs.items()}
aggregation_inputs = {k: [v] for k, v in flow_result.aggregation_inputs.items()}
aggregation_results = executor.exec_aggregation(flow_inputs, aggregation_inputs=aggregation_inputs)
for _, node_run_info in aggregation_results.node_run_infos.items():
assert node_run_info.status == Status.Completed
assert contain_image_reference(node_run_info)
def test_batch_run_then_eval_with_image(self):
# submit a flow in batch mode fisrt
batch_flow_folder = get_flow_folder(COMPOSITE_IMAGE_FLOW)
batch_flow_file = get_yaml_file(batch_flow_folder)
batch_working_dir = get_flow_folder(batch_flow_folder)
batch_output_dir = Path(mkdtemp())
batch_input_dirs = {"data": "inputs.jsonl"}
batch_inputs_mapping = {"image_list": "${data.image_list}", "image_dict": "${data.image_dict}"}
batch_result = BatchEngine(batch_flow_file, batch_working_dir).run(
batch_input_dirs, batch_inputs_mapping, batch_output_dir
)
assert batch_result.completed_lines == batch_result.total_lines
# use the output of batch run as input of eval flow
eval_flow_folder = get_flow_folder(EVAL_FLOW_WITH_COMPOSITE_IMAGE)
eval_flow_file = get_yaml_file(eval_flow_folder)
eval_working_dir = get_flow_folder(eval_flow_folder)
eval_output_dir = Path(mkdtemp())
eval_input_dirs = {
"data": batch_flow_folder / "inputs.jsonl",
"run.outputs": batch_output_dir / OUTPUT_FILE_NAME,
}
eval_inputs_mapping = {"image_list": "${run.outputs.output}", "image_dict": "${data.image_dict}"}
eval_result = BatchEngine(eval_flow_file, eval_working_dir).run(
eval_input_dirs, eval_inputs_mapping, eval_output_dir
)
assert eval_result.completed_lines == eval_result.total_lines
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_telemetry.py | import json
import uuid
from collections import namedtuple
from importlib.metadata import version
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import patch
import pytest
from promptflow._core.operation_context import OperationContext
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.contracts.run_mode import RunMode
from promptflow.executor import FlowExecutor
from ..utils import get_flow_folder, get_flow_inputs_file, get_yaml_file, load_jsonl
IS_LEGACY_OPENAI = version("openai").startswith("0.")
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["delta"])
Delta = namedtuple("Delta", ["content"])
def stream_response(kwargs):
if IS_LEGACY_OPENAI:
delta = Delta(content=json.dumps(kwargs.get("headers", {})))
yield Completion(choices=[{"delta": delta}])
else:
delta = Delta(content=json.dumps(kwargs.get("extra_headers", {})))
yield Completion(choices=[Choice(delta=delta)])
def mock_stream_chat(*args, **kwargs):
return stream_response(kwargs)
@pytest.mark.skip(reason="Skip on Mac and Windows and Linux, patch does not work in the spawn process")
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorTelemetry:
def test_executor_openai_telemetry(self, dev_connections):
"""This test validates telemetry info header is correctly injected to OpenAI API
by mocking chat api method. The mock method will return a generator that yields a
namedtuple with a json string of the headers passed to the method.
"""
if IS_LEGACY_OPENAI:
api = "openai.ChatCompletion.create"
else:
api = "openai.resources.chat.Completions.create"
with patch(api, new=mock_stream_chat):
operation_context = OperationContext.get_instance()
operation_context.clear()
flow_folder = "openai_chat_api_flow"
# Set user-defined properties `scenario` in context
operation_context.scenario = "test"
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
# flow run case
inputs = {"question": "What's your name?", "chat_history": [], "stream": True}
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
headers = json.loads(flow_result.output.get("answer", ""))
assert "promptflow/" in headers.get("x-ms-useragent")
assert headers.get("ms-azure-ai-promptflow-scenario") == "test"
assert headers.get("ms-azure-ai-promptflow-run-mode") == RunMode.Test.name
# batch run case
run_id = str(uuid.uuid4())
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
inputs_mapping = {"question": "${data.question}", "chat_history": "${data.chat_history}"}
output_dir = Path(mkdtemp())
batch_engine.run(input_dirs, inputs_mapping, output_dir, run_id=run_id)
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
for line in outputs:
headers = json.loads(line.get("answer", ""))
assert "promptflow/" in headers.get("x-ms-useragent")
assert headers.get("ms-azure-ai-promptflow-scenario") == "test"
assert headers.get("ms-azure-ai-promptflow-run-mode") == RunMode.Batch.name
# single_node case
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file("openai_chat_api_flow"),
"chat",
flow_inputs=inputs,
connections=dev_connections,
raise_ex=True,
)
assert run_info.output is not None
headers = json.loads(run_info.output)
assert "promptflow/" in headers.get("x-ms-useragent")
assert headers.get("ms-azure-ai-promptflow-scenario") == "test"
assert headers.get("ms-azure-ai-promptflow-run-mode") == RunMode.SingleNode.name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/integrations/test_langchain.py | import pytest
from langchain.schema import AgentAction, AgentFinish
from promptflow.integrations.langchain import LangChainEventType, PromptFlowCallbackHandler
@pytest.mark.unittest
class TestLangchain:
def get_handler(self):
class MockTracer():
def __init__(self):
self._trace_stack = []
def _push(self, trace):
self._trace_stack.append(trace)
def _pop(self, output=None, error=None):
self._trace_stack.pop()
handler = PromptFlowCallbackHandler()
handler._tracer = MockTracer()
return handler
def test_langchain_traces(self):
handler = self.get_handler()
handler.on_agent_action(action=AgentAction("test_agent_name", "test", "test"))
handler.on_tool_start(serialized={"name": "test_tool_name"}, input_str="test")
handler.on_chain_start(serialized={"id": ["test_chain_name"]}, inputs={"test": "test"})
handler.on_llm_start(serialized={"test": "test"}, prompts=["test"])
assert handler._events_stack == [
LangChainEventType.AGENT,
LangChainEventType.TOOL,
LangChainEventType.CHAIN,
LangChainEventType.LLM
]
assert len(handler._tracer._trace_stack) == 4
assert handler._tracer._trace_stack[0].name == "test_agent_name"
assert handler._tracer._trace_stack[1].name == "test_tool_name"
assert handler._tracer._trace_stack[2].name == "test_chain_name"
assert handler._tracer._trace_stack[3].name == "LLM" # The default name
handler.on_llm_error(error=None)
handler.on_chain_error(error=None)
handler.on_tool_error(error=None)
handler.on_agent_finish(finish=AgentFinish({"test": "test"}, "test"))
assert len(handler._events_stack) == 0
assert len(handler._tracer._trace_stack) == 0
def test_langchain_traces_with_unpaired_events(self):
handler = self.get_handler()
handler.on_tool_start(serialized={"test": "test"}, input_str="test")
# Missing on_chain_start
# Missing on_llm_start
assert len(handler._tracer._trace_stack) == 1
handler.on_llm_end(response=None)
handler.on_chain_end(outputs={"test": "test"})
assert len(handler._tracer._trace_stack) == 1
handler.on_tool_end(output="test")
assert len(handler._events_stack) == 0
assert len(handler._tracer._trace_stack) == 0
handler = self.get_handler()
handler.on_tool_start(serialized={"test": "test"}, input_str="test")
handler.on_chain_start(serialized={"test": "test"}, inputs={"test": "test"})
handler.on_llm_start(serialized={"test": "test"}, prompts=["test"])
assert len(handler._tracer._trace_stack) == 3
# Missing on_chain_end
# Missing on_llm_end
handler.on_tool_end(output="test")
assert len(handler._events_stack) == 0
assert len(handler._tracer._trace_stack) == 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/storage/test_queue_run_storage.py | import pytest
from multiprocessing import Queue
from promptflow.executor._line_execution_process_pool import QueueRunStorage
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
@pytest.mark.unittest
class TestLineExecutionProcessPool:
def test_persist_node_run(self):
queue = Queue()
run_storage = QueueRunStorage(queue)
node_run_info = NodeRunInfo(
node="node1",
flow_run_id="flow_run_id",
run_id="run_id",
status="status",
inputs="inputs",
output="output",
metrics="metrics",
error="error",
parent_run_id="parent_run_id",
start_time="start_time",
end_time="end_time",
index="index",
api_calls="api_calls",
variant_id="variant_id",
cached_run_id="cached_run_id",
cached_flow_run_id="cached_flow_run_id",
logs="logs",
system_metrics="system_metrics",
result="result",
)
run_storage.persist_node_run(node_run_info)
assert queue.get() == node_run_info
def test_persist_flow_run(self):
queue = Queue()
run_storage = QueueRunStorage(queue)
flow_run_info = FlowRunInfo(
run_id="run_id",
status="status",
inputs="inputs",
output="output",
metrics="metrics",
request="request",
root_run_id="root_run_id",
source_run_id="source_run_id",
flow_id="flow_id",
error="error",
parent_run_id="parent_run_id",
start_time="start_time",
end_time="end_time",
index="index",
api_calls="api_calls",
variant_id="variant_id",
system_metrics="system_metrics",
result="result",
)
run_storage.persist_flow_run(flow_run_info)
assert queue.get() == flow_run_info
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/storage/test_run_records.py | import json
from datetime import datetime
import pytest
from promptflow._utils.dataclass_serializer import serialize
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
from promptflow.storage.run_records import LineRunRecord, NodeRunRecord
@pytest.mark.unittest
def test_line_record():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
flow_run_info = FlowRunInfo(
run_id=None,
status=Status.Completed,
error=None,
inputs=None,
output=None,
metrics=None,
request=None,
parent_run_id=None,
root_run_id=None,
source_run_id=None,
flow_id=None,
start_time=start_time,
end_time=end_time,
index=0,
variant_id=None,
)
line_record = LineRunRecord.from_run_info(flow_run_info)
assert line_record.line_number == 0
assert line_record.start_time == start_time.isoformat()
assert line_record.end_time == end_time.isoformat()
assert line_record.status == Status.Completed.value
assert line_record.run_info == serialize(flow_run_info)
@pytest.mark.unittest
def test_line_serialize():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
flow_run_info = FlowRunInfo(
run_id=None,
status=Status.Completed,
error=None,
inputs=None,
output=None,
metrics=None,
request=None,
parent_run_id=None,
root_run_id=None,
source_run_id=None,
flow_id=None,
start_time=start_time,
end_time=end_time,
index=0,
variant_id=None,
)
line_record = LineRunRecord.from_run_info(flow_run_info)
result = line_record.serialize()
expected_result = json.dumps(line_record.__dict__)
assert result == expected_result
@pytest.mark.unittest
def test_node_record():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
assert node_record.line_number == 0
assert node_record.start_time == start_time.isoformat()
assert node_record.end_time == end_time.isoformat()
assert node_record.status == Status.Completed.value
assert node_record.run_info == serialize(node_run_info)
@pytest.mark.unittest
def test_node_serialize():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
result = node_record.serialize()
expected_result = json.dumps(node_record.__dict__)
assert result == expected_result
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/processpool/test_line_execution_process_pool.py | import multiprocessing
import os
import sys
import uuid
from multiprocessing import Queue
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import patch
import pytest
from pytest_mock import MockFixture
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, UserErrorException
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import SpawnedForkProcessManagerStartFailure
from promptflow.executor._line_execution_process_pool import (
LineExecutionProcessPool,
_exec_line,
format_current_process_info,
get_available_max_worker_count,
log_process_status,
)
from promptflow.executor._process_manager import create_spawned_fork_process_manager
from promptflow.executor._result import LineResult
from ...utils import get_flow_sample_inputs, get_yaml_file
SAMPLE_FLOW = "web_classification_no_variants"
def get_line_inputs(flow_folder=""):
if flow_folder:
inputs = get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(nlinee=4, flow_folder="", sample_inputs_file="", return_dict=False):
if flow_folder:
if not sample_inputs_file:
sample_inputs_file = "samples.json"
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
if return_dict:
return inputs
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [get_line_inputs() for _ in range(nlinee)]
def execute_in_fork_mode_subprocess(
dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
os.environ["PF_BATCH_METHOD"] = "fork"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment " f"variable 'PF_WORKER_COUNT'."
)
else:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the " f"factors of {factors}."
)
def execute_in_spawn_mode_subprocess(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
os.environ["PF_BATCH_METHOD"] = "spawn"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(
get_yaml_file(flow_folder),
dev_connections,
)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = 128.0 * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = 64 * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count and is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
mock_logger.warning.assert_any_call(
f"The current process count ({pf_worker_count}) is larger than recommended process count "
f"({estimated_available_worker_count}) that estimated by system available memory. This may "
f"cause memory exhaustion"
)
elif is_set_environ_pf_worker_count and not is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
elif not is_set_environ_pf_worker_count:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
"estimated_worker_count_based_on_memory_usage": estimated_available_worker_count,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the factors "
f"of {factors}."
)
def create_line_execution_process_pool(dev_connections):
executor = FlowExecutor.create(get_yaml_file(SAMPLE_FLOW), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
line_execution_process_pool = LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
)
return line_execution_process_pool
def set_environment_successed_in_subprocess(dev_connections, pf_batch_method):
os.environ["PF_BATCH_METHOD"] = pf_batch_method
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork is False
def set_environment_failed_in_subprocess(dev_connections):
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
os.environ["PF_BATCH_METHOD"] = "test"
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
sys_start_methods = multiprocessing.get_all_start_methods()
exexpected_log_message = (
"Failed to set start method to 'test', start method test" f" is not in: {sys_start_methods}."
)
mock_logger.warning.assert_called_once_with(exexpected_log_message)
def not_set_environment_in_subprocess(dev_connections):
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
def custom_create_spawned_fork_process_manager(*args, **kwargs):
create_spawned_fork_process_manager("test", *args, **kwargs)
@pytest.mark.unittest
class TestLineExecutionProcessPool:
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_process_pool(self, flow_folder, dev_connections):
log_path = str(Path(mkdtemp()) / "test.log")
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
with log_context:
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
executor._log_interval = 1
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.status == Status.Completed, f"{i}th line got {line_result.run_info.status}"
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_not_completed(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
result_list = sorted(result_list, key=lambda r: r.run_info.index)
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == f"Line {i} execution timeout for exceeding 1 seconds"
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
line_timeout_sec=600,
)
assert isinstance(line_result, LineResult)
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line_failed_when_line_execution_not_start(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
test_error_msg = "Test user error"
with patch("promptflow.executor.flow_executor.FlowExecutor.exec_line", autouse=True) as mock_exec_line:
mock_exec_line.side_effect = UserErrorException(
message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE
)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
line_timeout_sec=600,
)
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == test_error_msg
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_process_pool_run_with_exception(self, flow_folder, dev_connections, mocker: MockFixture):
# mock process pool run execution raise error
test_error_msg = "Test user error"
mocker.patch(
"promptflow.executor._line_execution_process_pool.LineExecutionProcessPool."
"_monitor_workers_and_process_tasks_in_thread",
side_effect=UserErrorException(message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE),
)
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
with pytest.raises(UserErrorException) as e:
pool.run(zip(range(nlines), bulk_inputs))
assert e.value.message == test_error_msg
assert e.value.target == ErrorTarget.AZURE_RUN_STORAGE
assert e.value.error_codes[0] == "UserError"
@pytest.mark.parametrize(
("flow_folder", "is_set_environ_pf_worker_count", "pf_worker_count", "n_process"),
[(SAMPLE_FLOW, True, "3", 3), (SAMPLE_FLOW, False, None, 4)],
)
def test_process_pool_parallelism_in_fork_mode(
self, dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
if "fork" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: fork")
p = multiprocessing.Process(
target=execute_in_fork_mode_subprocess,
args=(dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process),
)
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize(
(
"flow_folder",
"is_set_environ_pf_worker_count",
"is_calculation_smaller_than_set",
"pf_worker_count",
"estimated_available_worker_count",
"n_process",
),
[
(SAMPLE_FLOW, True, False, "2", 4, 2),
(SAMPLE_FLOW, True, True, "6", 2, 6),
(SAMPLE_FLOW, False, True, None, 2, 2),
],
)
def test_process_pool_parallelism_in_spawn_mode(
self,
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
if "spawn" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: spawn")
p = multiprocessing.Process(
target=execute_in_spawn_mode_subprocess,
args=(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_successed(self, dev_connections):
p = multiprocessing.Process(
target=set_environment_successed_in_subprocess,
args=(
dev_connections,
"spawn",
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_failed(self, dev_connections):
p = multiprocessing.Process(target=set_environment_failed_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
def test_process_not_set_environment_variable(self, dev_connections):
p = multiprocessing.Process(target=not_set_environment_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.skipif(sys.platform == "win32" or sys.platform == "darwin", reason="Only test on linux")
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
@patch(
"promptflow.executor._process_manager.create_spawned_fork_process_manager",
custom_create_spawned_fork_process_manager,
)
def test_spawned_fork_process_manager_crashed_in_fork_mode(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with pytest.raises(SpawnedForkProcessManagerStartFailure) as e:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
pool.run(zip(range(nlines), bulk_inputs))
assert "Failed to start spawned fork process manager" in str(e.value)
class TestGetAvailableMaxWorkerCount:
@pytest.mark.parametrize(
"available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count",
[
(128.0, 64.0, 2, 2), # available_memory/process_memory > 1
(63.0, 64.0, 1, 0), # available_memory/process_memory < 1
],
)
def test_get_available_max_worker_count(
self, available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count
):
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = available_memory * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = process_memory * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
estimated_available_worker_count = get_available_max_worker_count()
assert estimated_available_worker_count == expected_max_worker_count
if actual_calculate_worker_count < 1:
mock_logger.warning.assert_called_with(
f"Current system's available memory is {available_memory}MB, less than the memory "
f"{process_memory}MB required by the process. The maximum available worker count is 1."
)
else:
mock_logger.info.assert_called_with(
f"Current system's available memory is {available_memory}MB, "
f"memory consumption of current process is {process_memory}MB, "
f"estimated available worker count is {available_memory}/{process_memory} "
f"= {actual_calculate_worker_count}"
)
@pytest.mark.unittest
class TestFormatCurrentProcess:
def test_format_current_process_info(self):
process_name = "process_name"
process_pid = 123
line_number = 13
formatted_message = format_current_process_info(process_name, process_pid, line_number)
expected_returned_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number})"
)
assert formatted_message == expected_returned_log_message
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_start_execution(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) start execution."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_completed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_completed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) completed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_failed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_failed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) failed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_batch_engine.py | from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock, patch
import pytest
from promptflow._core._errors import UnexpectedError
from promptflow.batch import APIBasedExecutorProxy, BatchEngine, CSharpExecutorProxy, PythonExecutorProxy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ConnectionNotFound
from promptflow.executor._result import AggregationResult
from ...utils import MemoryRunStorage, get_yaml_file, load_jsonl
from .test_result import get_line_results, get_node_run_infos
@pytest.mark.unittest
class TestBatchEngine:
@pytest.mark.parametrize(
"side_effect, ex_type, ex_target, ex_codes, ex_msg",
[
(
Exception("test error"),
UnexpectedError,
ErrorTarget.BATCH,
["SystemError", "UnexpectedError"],
"Unexpected error occurred while executing the batch run. Error: (Exception) test error.",
),
(
ConnectionNotFound(message="Connection 'aoai_conn' not found"),
ConnectionNotFound,
ErrorTarget.EXECUTOR,
["UserError", "ValidationError", "InvalidRequest", "ConnectionNotFound"],
"Connection 'aoai_conn' not found",
),
],
)
def test_batch_engine_run_error(self, side_effect, ex_type, ex_target, ex_codes, ex_msg):
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
with patch("promptflow.batch._batch_engine.BatchEngine._exec_in_task") as mock_func:
mock_func.side_effect = side_effect
with patch(
"promptflow.batch._batch_inputs_processor.BatchInputsProcessor.process_batch_inputs", new=Mock()
):
with pytest.raises(ex_type) as e:
batch_engine.run({}, {}, Path("."))
assert e.value.target == ex_target
assert e.value.error_codes == ex_codes
assert e.value.message == ex_msg
def test_register_executor(self):
# assert original values
assert BatchEngine.executor_proxy_classes["python"] == PythonExecutorProxy
assert BatchEngine.executor_proxy_classes["csharp"] == CSharpExecutorProxy
class MockJSExecutorProxy(APIBasedExecutorProxy):
pass
# register new proxy
BatchEngine.register_executor("js", MockJSExecutorProxy)
assert BatchEngine.executor_proxy_classes["js"] == MockJSExecutorProxy
assert len(BatchEngine.executor_proxy_classes) == 3
def test_cancel(self):
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
assert batch_engine._is_canceled is False
batch_engine.cancel()
assert batch_engine._is_canceled is True
def test_persist_run_info(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
line_results = get_line_results(line_dict)
mem_run_storge = MemoryRunStorage()
batch_engine = BatchEngine(get_yaml_file("print_input_flow"), "", storage=mem_run_storge)
batch_engine._persist_run_info(line_results)
assert len(mem_run_storge._flow_runs) == 3
assert len(mem_run_storge._node_runs) == 9
def test_persist_outputs(self):
outputs = [
{"line_number": 0, "output": "Hello World!"},
{"line_number": 1, "output": "Hello Microsoft!"},
{"line_number": 2, "output": "Hello Promptflow!"},
]
output_dir = Path(mkdtemp())
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
batch_engine._persist_outputs(outputs, output_dir)
actual_outputs = load_jsonl(output_dir / "output.jsonl")
assert actual_outputs == outputs
def test_update_aggr_result(self):
output = {"output": "Hello World!"}
metrics = {"accuracy": 0.9}
node_run_infos = get_node_run_infos({"aggr_1": Status.Completed, "aggr_2": Status.Completed})
aggre_result = AggregationResult(output={}, metrics={}, node_run_infos={})
aggr_exec_result = AggregationResult(output=output, metrics=metrics, node_run_infos=node_run_infos)
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
batch_engine._update_aggr_result(aggre_result, aggr_exec_result)
assert aggre_result.output == output
assert aggre_result.metrics == metrics
assert aggre_result.node_run_infos == node_run_infos
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_csharp_executor_proxy.py | import json
import socket
import subprocess
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import MagicMock, patch
import pytest
from promptflow._core._errors import MetaFileNotFound, MetaFileReadError
from promptflow._sdk._constants import FLOW_TOOLS_JSON, PROMPT_FLOW_DIR_NAME
from promptflow.batch import CSharpExecutorProxy
from promptflow.executor._result import AggregationResult
from ...utils import get_flow_folder, get_yaml_file
async def get_executor_proxy():
flow_file = get_yaml_file("csharp_flow")
working_dir = get_flow_folder("csharp_flow")
with patch.object(CSharpExecutorProxy, "ensure_executor_startup", return_value=None):
return await CSharpExecutorProxy.create(flow_file, working_dir)
@pytest.mark.unittest
class TestCSharpExecutorProxy:
@pytest.mark.asyncio
async def test_create(self):
with patch("subprocess.Popen") as mock_popen:
mock_popen.return_value = MagicMock()
executor_proxy = await get_executor_proxy()
mock_popen.assert_called_once()
assert executor_proxy is not None
assert executor_proxy._process is not None
assert executor_proxy._port is not None
assert executor_proxy.api_endpoint == f"http://localhost:{executor_proxy._port}"
@pytest.mark.asyncio
async def test_destroy_with_already_terminated(self):
mock_process = MagicMock()
mock_process.poll.return_value = 0
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_not_called()
@pytest.mark.asyncio
async def test_destroy_with_terminates_gracefully(self):
mock_process = MagicMock()
mock_process.poll.return_value = None
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
mock_process.kill.assert_not_called()
@pytest.mark.asyncio
async def test_destroy_with_force_kill(self):
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_process.wait.side_effect = subprocess.TimeoutExpired(cmd="cmd", timeout=5)
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
mock_process.kill.assert_called_once()
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
executor_proxy = await get_executor_proxy()
aggr_result = await executor_proxy.exec_aggregation_async("", "", "")
assert isinstance(aggr_result, AggregationResult)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"exit_code, expected_result",
[
(None, True),
(0, False),
(1, False),
],
)
async def test_is_executor_active(self, exit_code, expected_result):
executor_proxy = await get_executor_proxy()
executor_proxy._process = MagicMock()
executor_proxy._process.poll.return_value = exit_code
assert executor_proxy._is_executor_active() == expected_result
def test_get_tool_metadata_succeed(self):
working_dir = Path(mkdtemp())
expected_tool_meta = {"name": "csharp_flow", "version": "0.1.0"}
tool_meta_file = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
tool_meta_file.parent.mkdir(parents=True, exist_ok=True)
with open(tool_meta_file, "w") as file:
json.dump(expected_tool_meta, file, indent=4)
tool_meta = CSharpExecutorProxy.get_tool_metadata("", working_dir)
assert tool_meta == expected_tool_meta
def test_get_tool_metadata_failed_with_file_not_found(self):
working_dir = Path(mkdtemp())
with pytest.raises(MetaFileNotFound):
CSharpExecutorProxy.get_tool_metadata("", working_dir)
def test_get_tool_metadata_failed_with_content_not_json(self):
working_dir = Path(mkdtemp())
tool_meta_file = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
tool_meta_file.parent.mkdir(parents=True, exist_ok=True)
tool_meta_file.touch()
with pytest.raises(MetaFileReadError):
CSharpExecutorProxy.get_tool_metadata("", working_dir)
def test_find_available_port(self):
port = CSharpExecutorProxy.find_available_port()
assert isinstance(port, str)
assert int(port) > 0, "Port number should be greater than 0"
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("localhost", int(port)))
except OSError:
pytest.fail("Port is not actually available")
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_base_executor_proxy.py | import json
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional
from unittest.mock import AsyncMock, patch
import httpx
import pytest
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy
from promptflow.batch._errors import ExecutorServiceUnhealthy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ...mock_execution_server import _get_aggr_result_dict, _get_line_result_dict
@pytest.mark.unittest
class TestAPIBasedExecutorProxy:
@pytest.mark.asyncio
@pytest.mark.parametrize(
"has_error",
[False, True],
)
async def test_exec_line_async(self, has_error):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
index = 1
inputs = {"question": "test"}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
line_result_dict = _get_line_result_dict(run_id, index, inputs, has_error=has_error)
status_code = 400 if has_error else 200
mock.return_value = httpx.Response(status_code, json=line_result_dict)
line_result = await mock_executor_proxy.exec_line_async(inputs, index, run_id)
assert line_result.output == {} if has_error else {"answer": "Hello world!"}
assert line_result.run_info.run_id == run_id
assert line_result.run_info.index == index
assert line_result.run_info.status == Status.Failed if has_error else Status.Completed
assert line_result.run_info.inputs == inputs
assert (line_result.run_info.error is not None) == has_error
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
batch_inputs = {"question": ["test", "error"]}
aggregation_inputs = {"${get_answer.output}": ["Incorrect", "Correct"]}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
aggr_result_dict = _get_aggr_result_dict(run_id, aggregation_inputs)
mock.return_value = httpx.Response(200, json=aggr_result_dict)
aggr_result = await mock_executor_proxy.exec_aggregation_async(batch_inputs, aggregation_inputs, run_id)
assert aggr_result.metrics == {"accuracy": 0.5}
assert len(aggr_result.node_run_infos) == 1
assert aggr_result.node_run_infos["aggregation"].flow_run_id == run_id
assert aggr_result.node_run_infos["aggregation"].inputs == aggregation_inputs
assert aggr_result.node_run_infos["aggregation"].status == Status.Completed
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_no_error(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
with patch.object(APIBasedExecutorProxy, "_check_startup_error_from_file") as mock_check_startup_error:
await mock_executor_proxy.ensure_executor_startup("")
mock_check_startup_error.assert_not_called()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_not_healthy(self):
# empty error file
error_file = Path(mkdtemp()) / "error.json"
error_file.touch()
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ExecutorServiceUnhealthy) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == "executor unhealthy"
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_existing_validation_error(self):
# prepare the error file
error_file = Path(mkdtemp()) / "error.json"
error_message = "Connection 'aoai_conn' not found"
error_dict = ExceptionPresenter.create(ConnectionNotFound(message=error_message)).to_dict()
with open(error_file, "w") as file:
json.dump(error_dict, file, indent=4)
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ValidationException) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == error_message
assert ex.value.target == ErrorTarget.BATCH
@pytest.mark.asyncio
async def test_ensure_executor_health_when_healthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=True) as mock:
await mock_executor_proxy.ensure_executor_health()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_health_when_unhealthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
assert mock.call_count == 20
@pytest.mark.asyncio
async def test_ensure_executor_health_when_not_active(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with patch.object(APIBasedExecutorProxy, "_is_executor_active", return_value=False):
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
mock.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"mock_value, expected_result",
[
(httpx.Response(200), True),
(httpx.Response(500), False),
(Exception("error"), False),
],
)
async def test_check_health(self, mock_value, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch("httpx.AsyncClient.get", new_callable=AsyncMock) as mock:
mock.return_value = mock_value
assert await mock_executor_proxy._check_health() is expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"response, expected_result",
[
(
httpx.Response(200, json={"result": "test"}),
{"result": "test"},
),
(
httpx.Response(500, json={"error": "test error"}),
"test error",
),
(
httpx.Response(400, json={"detail": "test"}),
{
"message": 'Unexpected error when executing a line, status code: 400, error: {"detail": "test"}',
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "400",
"error": '{"detail": "test"}',
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
(
httpx.Response(502, text="test"),
{
"message": "Unexpected error when executing a line, status code: 502, error: test",
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "502",
"error": "test",
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
],
)
async def test_process_http_response(self, response, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
assert mock_executor_proxy._process_http_response(response) == expected_result
class MockAPIBasedExecutorProxy(APIBasedExecutorProxy):
@property
def api_endpoint(self) -> str:
return "http://localhost:8080"
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockAPIBasedExecutorProxy":
return MockAPIBasedExecutorProxy()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_result.py | from datetime import datetime
import pytest
from promptflow.batch._result import BatchResult, ErrorSummary, LineError, SystemMetrics
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.executor._result import AggregationResult, LineResult
def get_node_run_infos(node_dict: dict, index=None, api_calls=None, system_metrics=None):
return {
k: NodeRunInfo(
node=k,
flow_run_id="flow_run_id",
run_id=f"{index}_run_id_{k}",
status=v,
inputs=[],
output={},
metrics={},
error={"code": "UserError", "message": "test message"} if v == Status.Failed else None,
parent_run_id="",
start_time=None,
end_time=None,
index=index,
api_calls=api_calls,
system_metrics=system_metrics,
)
for k, v in node_dict.items()
}
def get_flow_run_info(status_dict: dict, index: int):
status = Status.Failed if any(status == Status.Failed for status in status_dict.values()) else Status.Completed
error = {"code": "UserError", "message": "test message"} if status == Status.Failed else None
return FlowRunInfo(
run_id=f"{index}_run_id",
status=status,
error=error,
inputs={},
output={},
metrics={},
request=None,
parent_run_id="",
root_run_id="",
source_run_id="",
flow_id="",
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
index=index,
)
def get_line_results(line_dict: dict, api_calls=None, system_metrics=None):
return [
LineResult(
output={},
aggregation_inputs={},
run_info=get_flow_run_info(status_dict=v, index=k),
node_run_infos=get_node_run_infos(node_dict=v, index=k, api_calls=api_calls, system_metrics=system_metrics),
)
for k, v in line_dict.items()
]
def get_aggregation_result(aggr_dict: dict, api_calls=None, system_metrics=None):
return AggregationResult(
output={},
metrics={},
node_run_infos=get_node_run_infos(node_dict=aggr_dict, api_calls=api_calls, system_metrics=system_metrics),
)
def get_batch_result(line_dict, aggr_dict, line_api_calls=None, aggr_api_calls=None):
line_results = get_line_results(line_dict=line_dict, api_calls=line_api_calls)
aggr_result = get_aggregation_result(aggr_dict=aggr_dict, api_calls=aggr_api_calls)
return BatchResult.create(datetime.utcnow(), datetime.utcnow(), line_results=line_results, aggr_result=aggr_result)
def get_api_call(type, name, inputs={}, output={}, children=None):
return {"type": type, "name": name, "inputs": inputs, "output": output, "children": children}
@pytest.mark.unittest
class TestBatchResult:
def test_node_status(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed, "aggr_2": Status.Bypassed}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.completed_lines == 2
assert batch_result.failed_lines == 1
assert batch_result.node_status == {
"node_0.completed": 3,
"node_1.completed": 2,
"node_1.failed": 1,
"node_2.completed": 2,
"node_2.bypassed": 1,
"aggr_0.completed": 1,
"aggr_1.failed": 1,
"aggr_2.bypassed": 1,
}
def test_system_metrics(self):
from openai.types.completion import Completion, CompletionChoice
line_dict = {0: {"node_0": Status.Completed}}
aggr_dict = {"aggr_0": Status.Completed}
api_call_1 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={"prompt": "Please tell me a joke.", "model": "text-davinci-003"},
output={"choices": [{"text": "text"}]},
)
api_call_2 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={
"prompt": ["Please tell me a joke.", "Please tell me a joke about fruit."],
"model": "text-davinci-003",
},
output=[
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
],
)
line_api_calls = get_api_call("Chain", "Chain", children=[api_call_1, api_call_2])
aggr_api_call = get_api_call(
"LLM",
"openai.resources.chat.completions.Completions.create",
inputs={
"messages": [{"system": "You are a helpful assistant.", "user": "Please tell me a joke."}],
"model": "gpt-35-turbo",
},
output={"choices": [{"message": {"content": "content"}}]},
)
batch_result = get_batch_result(
line_dict=line_dict, aggr_dict=aggr_dict, line_api_calls=[line_api_calls], aggr_api_calls=[aggr_api_call]
)
assert batch_result.system_metrics.total_tokens == 42
assert batch_result.system_metrics.prompt_tokens == 38
assert batch_result.system_metrics.completion_tokens == 4
system_metrics_dict = {
"total_tokens": 42,
"prompt_tokens": 38,
"completion_tokens": 4,
}
assert system_metrics_dict.items() <= batch_result.system_metrics.to_dict().items()
@pytest.mark.parametrize(
"api_call",
[
get_api_call("LLM", "Completion", inputs="invalid"),
get_api_call("LLM", "Completion", output="invalid"),
get_api_call("LLM", "Invalid"),
get_api_call("LLM", "Completion"),
get_api_call("LLM", "Completion", inputs={"api_type": "azure"}),
get_api_call("LLM", "ChatCompletion", inputs={"api_type": "azure", "engine": "invalid"}),
],
)
def test_invalid_api_calls(self, api_call):
line_dict = {0: {"node_0": Status.Completed}}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict={}, line_api_calls=[api_call])
assert batch_result.system_metrics.total_tokens == 0
assert batch_result.system_metrics.completion_tokens == 0
assert batch_result.system_metrics.prompt_tokens == 0
def test_error_summary(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {
"aggr_0": Status.Completed,
"aggr_1": Status.Failed,
"aggr_2": Status.Bypassed,
"aggr_4": Status.Failed,
}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.error_summary.failed_system_error_lines == 0
assert batch_result.error_summary.failed_user_error_lines == 1
assert batch_result.error_summary.error_list == [
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert batch_result.error_summary.error_list[0].to_dict() == {
"line_number": 1,
"error": {
"code": "UserError",
"message": "test message",
},
}
assert batch_result.error_summary.aggr_error_dict == {
"aggr_1": {"code": "UserError", "message": "test message"},
"aggr_4": {"code": "UserError", "message": "test message"},
}
@pytest.mark.unittest
class TestErrorSummary:
def test_create(self):
line_dict = {
0: {"node_0": Status.Failed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
}
line_results = get_line_results(line_dict)
line_results[0].run_info.error = {"code": "SystemError", "message": "test system error message"}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed}
aggr_result = get_aggregation_result(aggr_dict)
error_summary = ErrorSummary.create(line_results, aggr_result)
assert error_summary.failed_user_error_lines == 1
assert error_summary.failed_system_error_lines == 1
assert error_summary.error_list == [
LineError(line_number=0, error={"code": "SystemError", "message": "test system error message"}),
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert error_summary.aggr_error_dict == {"aggr_1": {"code": "UserError", "message": "test message"}}
@pytest.mark.unittest
class TestSystemMetrics:
def test_create(slef):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Completed},
}
line_system_metrics = {
"total_tokens": 5,
"prompt_tokens": 3,
"completion_tokens": 2,
}
line_results = get_line_results(line_dict, system_metrics=line_system_metrics)
aggr_dict = {"aggr_0": Status.Completed}
# invalid system metrics
aggr_system_metrics = {
"total_tokens": 10,
"prompt_tokens": 6,
}
aggr_result = get_aggregation_result(aggr_dict, system_metrics=aggr_system_metrics)
system_metrics = SystemMetrics.create(datetime.utcnow(), datetime.utcnow(), line_results, aggr_result)
assert system_metrics.total_tokens == 20
assert system_metrics.prompt_tokens == 12
assert system_metrics.completion_tokens == 8
system_metrics_dict = {
"total_tokens": 20,
"prompt_tokens": 12,
"completion_tokens": 8,
}
assert system_metrics_dict.items() <= system_metrics.to_dict().items()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_batch_inputs_processor.py | import json
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._core._errors import UnexpectedError
from promptflow._utils.utils import dump_list_to_jsonl
from promptflow.batch._batch_inputs_processor import BatchInputsProcessor, apply_inputs_mapping
from promptflow.batch._errors import EmptyInputsData, InputMappingError
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
from ...utils import DATA_ROOT
@pytest.mark.unittest
class TestBatchInputsProcessor:
def test_process_batch_inputs(self):
data = [
{"question": "What's promptflow?"},
{"question": "Do you like promptflow?"},
]
data_file = Path(mkdtemp()) / "data.jsonl"
dump_list_to_jsonl(data_file, data)
input_dirs = {"data": data_file}
inputs_mapping = {"question": "${data.question}"}
batch_inputs = BatchInputsProcessor("", {}).process_batch_inputs(input_dirs, inputs_mapping)
assert batch_inputs == [
{"line_number": 0, "question": "What's promptflow?"},
{"line_number": 1, "question": "Do you like promptflow?"},
]
def test_process_batch_inputs_error(self):
data_file = Path(mkdtemp()) / "data.jsonl"
data_file.touch()
input_dirs = {"data": data_file}
inputs_mapping = {"question": "${data.question}"}
with pytest.raises(EmptyInputsData) as e:
BatchInputsProcessor("", {}).process_batch_inputs(input_dirs, inputs_mapping)
expected_error_message = (
"Couldn't find any inputs data at the given input paths. "
"Please review the provided path and consider resubmitting."
)
assert expected_error_message in e.value.message
def test_resolve_data_from_input_path(self):
inputs_dir = Path(mkdtemp())
# data.jsonl
data = [
{"question": "What's promptflow?"},
{"question": "Do you like promptflow?"},
]
data_file = inputs_dir / "data.jsonl"
dump_list_to_jsonl(data_file, data)
# inputs.json
inputs_file = inputs_dir / "inputs.json"
with open(inputs_file, "w") as file:
file.write(json.dumps(data))
result = BatchInputsProcessor("", {})._resolve_data_from_input_path(inputs_dir)
assert result == data + data
# if has max_lines_count
result = BatchInputsProcessor("", {}, max_lines_count=1)._resolve_data_from_input_path(inputs_dir)
assert result == [
{"question": "What's promptflow?"},
]
@pytest.mark.parametrize(
"data_path",
[
"10k.jsonl",
"10k",
],
)
def test_resolve_data_from_input_path_with_large_data(self, data_path):
data_path = DATA_ROOT / "load_data_cases" / data_path
result = BatchInputsProcessor("", {})._resolve_data_from_input_path(Path(data_path))
assert isinstance(result, list)
assert len(result) == 10000
# specify max_rows_count
max_rows_count = 5
head_results = BatchInputsProcessor(
working_dir="",
flow_inputs={},
max_lines_count=max_rows_count,
)._resolve_data_from_input_path(Path(data_path))
assert isinstance(head_results, list)
assert len(head_results) == max_rows_count
assert result[:max_rows_count] == head_results
@pytest.mark.parametrize(
"inputs, inputs_mapping, expected",
[
(
{"data.test": {"question": "longer input key has lower priority."}, "line_number": 0},
{
"question": "${data.test.question}", # Question from the data
"value": 1,
},
{"question": "longer input key has lower priority.", "value": 1, "line_number": 0},
),
(
{
# Missing line_number is also valid data.
"data.test": {"question": "longer input key has lower priority."},
"data": {"test.question": "Shorter input key has higher priority."},
},
{
"question": "${data.test.question}", # Question from the data
"deployment_name": "text-davinci-003", # literal value
},
{
"question": "Shorter input key has higher priority.",
"deployment_name": "text-davinci-003",
},
),
],
)
def test_apply_inputs_mapping(self, inputs, inputs_mapping, expected):
result = apply_inputs_mapping(inputs, inputs_mapping)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
@pytest.mark.parametrize(
"inputs, inputs_mapping, error_code, error_message",
[
(
{
"baseline": {"answer": 123, "question": "dummy"},
},
{
"question": "${baseline.output}",
"answer": "${data.output}",
},
InputMappingError,
"Couldn't find these mapping relations: ${baseline.output}, ${data.output}. "
"Please make sure your input mapping keys and values match your YAML input section and input data.",
),
],
)
def test_apply_inputs_mapping_error(self, inputs, inputs_mapping, error_code, error_message):
with pytest.raises(error_code) as e:
apply_inputs_mapping(inputs, inputs_mapping)
assert error_message in str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"inputs, expected",
[
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"output": [{"answer": "output_ans1"}, {"answer": "output_ans2"}],
},
[
# Get 2 lines data.
{
"data": {"question": "q1", "answer": "ans1"},
"output": {"answer": "output_ans1"},
"line_number": 0,
},
{
"data": {"question": "q2", "answer": "ans2"},
"output": {"answer": "output_ans2"},
"line_number": 1,
},
],
),
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"output": [{"answer": "output_ans2", "line_number": 1}],
},
[
# Only one line valid data.
{
"data": {"question": "q2", "answer": "ans2"},
"output": {"answer": "output_ans2", "line_number": 1},
"line_number": 1,
},
],
),
],
)
def test_merge_input_dicts_by_line(self, inputs, expected):
result = BatchInputsProcessor("", {})._merge_input_dicts_by_line(inputs)
json.dumps(result)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
@pytest.mark.parametrize(
"inputs, error_code, error_message",
[
(
{
"baseline": [],
},
InputMappingError,
"The input for batch run is incorrect. Input from key 'baseline' is an empty list, which means we "
"cannot generate a single line input for the flow run. Please rectify the input and try again.",
),
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"baseline": [{"answer": "baseline_ans2"}],
},
InputMappingError,
"The input for batch run is incorrect. Line numbers are not aligned. Some lists have dictionaries "
"missing the 'line_number' key, and the lengths of these lists are different. List lengths are: "
"{'data': 2, 'baseline': 1}. Please make sure these lists have the same length "
"or add 'line_number' key to each dictionary.",
),
],
)
def test_merge_input_dicts_by_line_error(self, inputs, error_code, error_message):
with pytest.raises(error_code) as e:
BatchInputsProcessor("", {})._merge_input_dicts_by_line(inputs)
assert error_message == str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize("inputs_mapping", [{"question": "${data.question}"}, {}])
def test_complete_inputs_mapping_by_default_value(self, inputs_mapping):
inputs = {
"question": None,
"groundtruth": None,
"input_with_default_value": FlowInputDefinition(type=ValueType.BOOL, default=False),
}
updated_inputs_mapping = BatchInputsProcessor("", inputs)._complete_inputs_mapping_by_default_value(
inputs_mapping
)
assert "input_with_default_value" not in updated_inputs_mapping
assert updated_inputs_mapping == {"question": "${data.question}", "groundtruth": "${data.groundtruth}"}
@pytest.mark.parametrize(
"inputs, inputs_mapping, expected",
[
(
# Use default mapping generated from flow inputs.
{
"data": [{"question": "q1", "groundtruth": "ans1"}, {"question": "q2", "groundtruth": "ans2"}],
},
{},
[
{
"question": "q1",
"groundtruth": "ans1",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"line_number": 1,
},
],
),
(
# Partially use default mapping generated from flow inputs.
{
"data": [{"question": "q1", "groundtruth": "ans1"}, {"question": "q2", "groundtruth": "ans2"}],
},
{
"question": "${data.question}",
},
[
{
"question": "q1",
"groundtruth": "ans1",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"line_number": 1,
},
],
),
(
{
"data": [
{"question": "q1", "answer": "ans1", "line_number": 5},
{"question": "q2", "answer": "ans2", "line_number": 6},
],
"baseline": [
{"answer": "baseline_ans1", "line_number": 5},
{"answer": "baseline_ans2", "line_number": 7},
],
},
{
"question": "${data.question}", # Question from the data
"groundtruth": "${data.answer}", # Answer from the data
"baseline": "${baseline.answer}", # Answer from the baseline
"deployment_name": "text-davinci-003", # literal value
"line_number": "${data.question}", # line_number mapping should be ignored
},
[
{
"question": "q1",
"groundtruth": "ans1",
"baseline": "baseline_ans1",
"deployment_name": "text-davinci-003",
"line_number": 5,
},
],
),
],
)
def test_validate_and_apply_inputs_mapping(self, inputs, inputs_mapping, expected):
flow_inputs = {"question": None, "groundtruth": None}
result = BatchInputsProcessor("", flow_inputs)._validate_and_apply_inputs_mapping(inputs, inputs_mapping)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
def test_validate_and_apply_inputs_mapping_empty_input(self):
inputs = {
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"baseline": [{"answer": "baseline_ans1"}, {"answer": "baseline_ans2"}],
}
result = BatchInputsProcessor("", {})._validate_and_apply_inputs_mapping(inputs, {})
assert result == [
{"line_number": 0},
{"line_number": 1},
], "Empty flow inputs and inputs_mapping should return list with empty dicts."
@pytest.mark.parametrize(
"inputs_mapping, error_code",
[
(
{"question": "${question}"},
InputMappingError,
),
],
)
def test_validate_and_apply_inputs_mapping_error(self, inputs_mapping, error_code):
flow_inputs = {"question": None}
with pytest.raises(error_code) as _:
BatchInputsProcessor("", flow_inputs)._validate_and_apply_inputs_mapping(
inputs={}, inputs_mapping=inputs_mapping
)
@pytest.mark.parametrize(
"inputs, inputs_mapping, error_code, error_message",
[
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
},
None,
UnexpectedError,
"The input for batch run is incorrect. Please make sure to set up a proper input mapping "
"before proceeding. If you need additional help, feel free to contact support for further assistance.",
),
],
)
def test_inputs_mapping_for_all_lines_error(self, inputs, inputs_mapping, error_code, error_message):
with pytest.raises(error_code) as e:
BatchInputsProcessor("", {})._apply_inputs_mapping_for_all_lines(inputs, inputs_mapping)
assert error_message == str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_execution_utils.py | import pytest
from promptflow._utils.execution_utils import apply_default_value_for_input
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
None, # Could handle None input
{"input_from_default": "default_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"input_from_default": "default_value"},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{},
{"input_from_default": False},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.LIST, default=[]),
},
{},
{"input_from_default": []},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.OBJECT, default={}),
},
{},
{"input_from_default": {}},
),
],
)
def test_apply_default_value_for_input(self, flow_inputs, inputs, expected_inputs):
result = apply_default_value_for_input(flow_inputs, inputs)
assert result == expected_inputs
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_tool_utils.py | import inspect
from typing import Union
import pytest
from promptflow._core._errors import DuplicateToolMappingError
from promptflow._utils.tool_utils import (
DynamicListError,
ListFunctionResponseError,
_find_deprecated_tools,
append_workspace_triple_to_func_input_params,
function_to_interface,
load_function_from_function_path,
param_to_definition,
validate_dynamic_list_func_response_type,
)
from promptflow.connections import AzureOpenAIConnection, CustomConnection
from promptflow.contracts.tool import ValueType, Tool, ToolType
# mock functions for dynamic list function testing
def mock_dynamic_list_func1():
pass
def mock_dynamic_list_func2(input1):
pass
def mock_dynamic_list_func3(input1, input2):
pass
def mock_dynamic_list_func4(input1, input2, **kwargs):
pass
def mock_dynamic_list_func5(input1, input2, subscription_id):
pass
def mock_dynamic_list_func6(input1, input2, subscription_id, resource_group_name, workspace_name):
pass
def mock_dynamic_list_func7(input1, input2, subscription_id, **kwargs):
pass
def mock_dynamic_list_func8(input1, input2, subscription_id, resource_group_name, workspace_name, **kwargs):
pass
@pytest.mark.unittest
class TestToolUtils:
def test_function_to_interface(self):
def func(conn: [AzureOpenAIConnection, CustomConnection], input: [str, int]):
pass
input_defs, _, connection_types, _ = function_to_interface(func)
assert len(input_defs) == 2
assert input_defs["conn"].type == ["AzureOpenAIConnection", "CustomConnection"]
assert input_defs["input"].type == [ValueType.OBJECT]
assert connection_types == [["AzureOpenAIConnection", "CustomConnection"]]
def test_function_to_interface_with_invalid_initialize_inputs(self):
def func(input_str: str):
pass
with pytest.raises(Exception) as exec_info:
function_to_interface(func, {"input_str": "test"})
assert "Duplicate inputs found from" in exec_info.value.args[0]
def test_function_to_interface_with_kwargs(self):
def func(input_str: str, **kwargs):
pass
_, _, _, enable_kwargs = function_to_interface(func)
assert enable_kwargs is True
def func(input_str: str):
pass
_, _, _, enable_kwargs = function_to_interface(func)
assert enable_kwargs is False
def test_param_to_definition(self):
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow.contracts.tool import Secret
class MyFirstConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
class MySecondConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
def some_func(
conn1: MyFirstConnection,
conn2: Union[CustomConnection, MyFirstConnection],
conn3: Union[MyFirstConnection, CustomConnection],
conn4: Union[MyFirstConnection, MySecondConnection],
conn5: CustomConnection,
conn6: Union[CustomConnection, int],
conn7: Union[MyFirstConnection, int],
):
pass
sig = inspect.signature(some_func)
input_def, _ = param_to_definition(sig.parameters.get("conn1"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn2"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn3"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn4"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection", "MySecondConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn5"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type is None
input_def, _ = param_to_definition(sig.parameters.get("conn6"), gen_custom_type_conn=True)
assert input_def.type == [ValueType.OBJECT]
assert input_def.custom_type is None
input_def, _ = param_to_definition(sig.parameters.get("conn7"), gen_custom_type_conn=True)
assert input_def.type == [ValueType.OBJECT]
assert input_def.custom_type is None
@pytest.mark.parametrize(
"func, func_input_params_dict, use_ws_triple, expected_res",
[
(mock_dynamic_list_func1, None, False, {}),
(mock_dynamic_list_func2, {"input1": "value1"}, False, {"input1": "value1"}),
(
mock_dynamic_list_func3,
{"input1": "value1", "input2": "value2"},
False,
{"input1": "value1", "input2": "value2"},
),
(mock_dynamic_list_func3, {"input1": "value1"}, False, {"input1": "value1"}),
(mock_dynamic_list_func3, {"input1": "value1"}, True, {"input1": "value1"}),
(
mock_dynamic_list_func4,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func5,
{"input1": "value1"},
True,
{"input1": "value1", "subscription_id": "mock_subscription_id"},
),
(
mock_dynamic_list_func5,
{"input1": "value1", "subscription_id": "input_subscription_id"},
True,
{"input1": "value1", "subscription_id": "input_subscription_id"},
),
(
mock_dynamic_list_func6,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func6,
{
"input1": "value1",
"workspace_name": "input_workspace_name",
},
True,
{
"input1": "value1",
"workspace_name": "input_workspace_name",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
},
),
(
mock_dynamic_list_func7,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func7,
{"input1": "value1", "subscription_id": "input_subscription_id"},
True,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func8,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func8,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "input_resource_group",
"workspace_name": "input_workspace_name",
},
True,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "input_resource_group",
"workspace_name": "input_workspace_name",
},
),
],
)
def test_append_workspace_triple_to_func_input_params(
self, func, func_input_params_dict, use_ws_triple, expected_res, mocked_ws_triple
):
ws_triple_dict = mocked_ws_triple._asdict() if use_ws_triple else None
func_sig_params = inspect.signature(func).parameters
actual_combined_inputs = append_workspace_triple_to_func_input_params(
func_sig_params=func_sig_params,
func_input_params_dict=func_input_params_dict,
ws_triple_dict=ws_triple_dict,
)
assert actual_combined_inputs == expected_res
@pytest.mark.parametrize(
"res",
[
(
[
{
"value": "fig0",
"display_value": "My_fig0",
"hyperlink": "https://www.bing.com/search?q=fig0",
"description": "this is 0 item",
},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
"description": "this is 1 item",
},
]
),
([{"value": "fig0"}, {"value": "kiwi1"}]),
([{"value": "fig0", "display_value": "My_fig0"}, {"value": "kiwi1", "display_value": "My_kiwi1"}]),
(
[
{"value": "fig0", "display_value": "My_fig0", "hyperlink": "https://www.bing.com/search?q=fig0"},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
},
]
),
([{"value": "fig0", "hyperlink": "https://www.bing.com/search?q=fig0"}]),
(
[
{"value": "fig0", "display_value": "My_fig0", "description": "this is 0 item"},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
"description": "this is 1 item",
},
]
),
],
)
def test_validate_dynamic_list_func_response_type(self, res):
validate_dynamic_list_func_response_type(response=res, f="mock_func")
@pytest.mark.parametrize(
"res, err_msg",
[
(None, "mock_func response can not be empty."),
([], "mock_func response can not be empty."),
(["a", "b"], "mock_func response must be a list of dict. a is not a dict."),
({"a": "b"}, "mock_func response must be a list."),
([{"a": "b"}], "mock_func response dict must have 'value' key."),
([{"value": 1 + 2j}], "mock_func response dict value \\(1\\+2j\\) is not json serializable."),
],
)
def test_validate_dynamic_list_func_response_type_with_error(self, res, err_msg):
error_message = (
f"Unable to display list of items due to '{err_msg}'. \nPlease contact the tool "
f"author/support team for troubleshooting assistance."
)
with pytest.raises(ListFunctionResponseError, match=error_message):
validate_dynamic_list_func_response_type(response=res, f="mock_func")
def test_load_function_from_function_path(self, mock_module_with_list_func):
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_list_func"
load_function_from_function_path(func_path)
def test_load_function_from_function_path_with_error(self, mock_module_with_list_func):
func_path = "mock_func_path"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'mock_func_path'. Expected format: format 'my_module.my_func'. Detailed error: not enough "
"values to unpack \\(expected 2, got 1\\)'. \nPlease contact the tool author/support team for "
"troubleshooting assistance.",
):
load_function_from_function_path(func_path)
func_path = "fake_tool_pkg.tools.tool_with_dynamic_list_input.my_list_func"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'fake_tool_pkg.tools.tool_with_dynamic_list_input.my_list_func'. Expected format: format "
"'my_module.my_func'. Detailed error: No module named 'fake_tool_pkg''. \nPlease contact the tool "
"author/support team for troubleshooting assistance.",
):
load_function_from_function_path(func_path)
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_field"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'my_tool_package.tools.tool_with_dynamic_list_input.my_field'. Expected format: "
"format 'my_module.my_func'. Detailed error: Unable to display list of items due to ''1' "
"is not callable.'. \nPlease contact the tool author/support team for troubleshooting assistance.",
):
load_function_from_function_path(func_path)
def test_find_deprecated_tools(self):
package_tools = {
"new_tool_1": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]).serialize(),
"new_tool_2": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]).serialize(),
}
with pytest.raises(DuplicateToolMappingError, match="secure operation"):
_find_deprecated_tools(package_tools)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_logger_utils.py | import io
import logging
import time
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock
from uuid import uuid4
import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
from promptflow._utils.logger_utils import (
CredentialScrubberFormatter,
FileHandler,
FileHandlerConcurrentWrapper,
LogContext,
bulk_logger,
scrub_credentials,
update_log_path,
update_single_log_path,
)
from promptflow.contracts.run_mode import RunMode
from ...utils import load_content
def _set_handler(logger: logging.Logger, handler: FileHandler, log_content: str):
for h in logger.handlers:
if isinstance(h, FileHandlerConcurrentWrapper):
h.handler = handler
time.sleep(1)
logger.warning(log_content)
h.clear()
class DummyException(Exception):
pass
@pytest.fixture
def logger():
logger = logging.getLogger(str(uuid4()))
logger.setLevel(logging.INFO)
return logger
@pytest.fixture
def stream_handler():
stream = io.StringIO()
return logging.StreamHandler(stream)
@pytest.mark.unittest
class TestCredentialScrubberFormatter:
def test_log(self, logger, stream_handler):
"""Make sure credentials by logger.log are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=signature")
logger.error("testerror&key=accountkey")
logger.warning("testwarning&sig=signature")
logger.critical("print dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"testerror&key={CredentialScrubber.PLACE_HOLDER}\n"
f"testwarning&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"print {CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_args(self, logger, stream_handler):
"""Make sure credentials by logger.log (in args) are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=%s credential=%s", "signature", "dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER} " f"credential={CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_exc_info(self, logger, stream_handler):
"""Make sure credentials in exception are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
exception = DummyException("credential=dummy secret accountkey=accountkey")
logger.exception("test exception", exc_info=exception)
expected_log_output = "credential=**data_scrubbed** accountkey=**data_scrubbed**"
assert expected_log_output in stream_handler.stream.getvalue()
def test_set_credential_list_thread_safe(self):
formatter = CredentialScrubberFormatter()
def set_and_check_credential_list(credential_list):
formatter.set_credential_list(credential_list)
time.sleep(1)
assert formatter.credential_scrubber.custom_str_set == set(credential_list)
with ThreadPool(processes=3) as pool:
results = pool.map(set_and_check_credential_list, [[f"secret {i}", f"credential {i}"] for i in range(3)])
_ = list(results)
@pytest.mark.unittest
class TestFileHandlerConcurrentWrapper:
def test_set_handler_thread_safe(self):
wrapper = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test execution log handler")
logger.addHandler(wrapper)
process_num = 3
folder_path = Path(mkdtemp())
log_path_list = [str(folder_path / f"log_{i}.log") for i in range(process_num)]
with ThreadPool(processes=process_num) as pool:
results = pool.starmap(
_set_handler, ((logger, FileHandler(log_path_list[i]), f"log {i}") for i in range(process_num))
)
results = list(results)
# Make sure log content is as expected.
for i, log_path in enumerate(log_path_list):
with open(log_path, "r") as f:
log = f.read()
log_lines = log.split("\n")
assert len(log_lines) == 2
assert f"log {i}" in log_lines[0]
assert log_lines[1] == ""
def test_clear(self):
wrapper = FileHandlerConcurrentWrapper()
assert wrapper.handler is None
log_path = str(Path(mkdtemp()) / "logs.log")
file_handler = FileHandler(log_path)
file_handler.close = Mock(side_effect=Exception("test exception"))
wrapper.handler = file_handler
wrapper.clear()
assert wrapper.handler is None
@pytest.mark.unittest
class TestLogContext:
def test_context_manager(self):
log_handler = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test_setup_logger_context")
logger.addHandler(log_handler)
log_path = str(Path(mkdtemp()) / "test.log")
try:
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
log_context.input_logger = logger
assert LogContext.get_current() is None
with log_context:
assert LogContext.get_current() is not None
# Make sure context variables are set.
inner_handler = log_handler._context_var.get()
assert isinstance(inner_handler, FileHandler)
assert isinstance(inner_handler._formatter, CredentialScrubberFormatter)
scrubber = inner_handler._formatter._context_var.get()
assert scrubber is not None
logger.warning("Print %s", "&sig=signature")
# Raise exception for test.
raise DummyException("Raise exception for test.")
except DummyException:
pass
# Make sure log content is as expected.
with open(log_path, "r") as f:
log_content = f.read()
assert f"Print &sig={CredentialScrubber.PLACE_HOLDER}" in log_content
# Make sure context variables are cleaned up.
assert log_handler._context_var.get() is None
def test_empty_file_path(self, logger, stream_handler):
logger.addHandler(stream_handler)
logger.addHandler(FileHandlerConcurrentWrapper())
with LogContext("", input_logger=logger):
logger.info("test log")
assert stream_handler.stream.getvalue() == "test log\n"
def test_update_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
log_path = str(folder_path / "log_without_input_logger.log")
update_log_path(log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
log = load_content(log_path)
keywords = ["test update log", "test update input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in log for keyword in keywords)
def test_update_single_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
bulk_log_path = str(folder_path / "update_bulk_log.log")
update_single_log_path(bulk_log_path, bulk_logger)
input_log_path = str(folder_path / "update_input_log.log")
update_single_log_path(input_log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
bulk_log = load_content(bulk_log_path)
input_log = load_content(input_log_path)
bulk_keywords = ["test update log", "execution.bulk", "INFO"]
input_keywords = ["test update input log", "input_logger", "WARNING"]
assert all(keyword in bulk_log for keyword in bulk_keywords)
assert all(keyword not in bulk_log for keyword in input_keywords)
assert all(keyword in input_log for keyword in input_keywords)
assert all(keyword not in input_log for keyword in bulk_keywords)
def test_scrub_credentials(self):
log_content = "sig=signature&key=accountkey"
folder_path = Path(mkdtemp())
logs_path = str(folder_path / "logs.log")
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
with LogContext(logs_path):
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_feature_utils.py | import pytest
from promptflow._utils.feature_utils import Feature, get_feature_list
@pytest.mark.unittest
def test_get_feature_list():
feature_list = get_feature_list()
assert isinstance(feature_list, list)
assert all(isinstance(feature, Feature) for feature in feature_list)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_utils.py | import pytest
import os
from unittest.mock import patch
from datetime import datetime
from promptflow._utils.utils import is_json_serializable, get_int_env_var, log_progress
class MyObj:
pass
@pytest.mark.unittest
class TestUtils:
@pytest.mark.parametrize("value, expected_res", [(None, True), (1, True), ("", True), (MyObj(), False)])
def test_is_json_serializable(self, value, expected_res):
assert is_json_serializable(value) == expected_res
@pytest.mark.parametrize(
"env_var, env_value, default_value, expected_result",
[
("TEST_VAR", "10", None, 10), # Valid integer string
("TEST_VAR", "invalid", None, None), # Invalid integer strings
("TEST_VAR", None, 5, 5), # Environment variable does not exist
("TEST_VAR", "10", 5, 10), # Valid integer string with a default value
("TEST_VAR", "invalid", 5, 5), # Invalid integer string with a default value
])
def test_get_int_env_var(self, env_var, env_value, default_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var, default_value) == expected_result
@pytest.mark.parametrize(
"env_var, env_value, expected_result",
[
("TEST_VAR", "10", 10), # Valid integer string
("TEST_VAR", "invalid", None), # Invalid integer strings
("TEST_VAR", None, None), # Environment variable does not exist
])
def test_get_int_env_var_without_default_vaue(self, env_var, env_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var) == expected_result
@patch('promptflow.executor._line_execution_process_pool.bulk_logger', autospec=True)
def test_log_progress(self, mock_logger):
run_start_time = datetime.utcnow()
count = 1
# Tests do not log when not specified at specified intervals (interval = 2)
total_count = 20
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_not_called()
# Test logging at specified intervals (interval = 2)
count = 8
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test logging using last_log_count parameter (conut - last_log_count > interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=5)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test don't log using last_log_count parameter ((conut - last_log_count < interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=7)
mock_logger.info.assert_not_called()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_exception_utils.py | import json
import re
from traceback import TracebackException
import pytest
from promptflow._core._errors import ToolExecutionError
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import (
ErrorResponse,
ExceptionPresenter,
JsonSerializedPromptflowException,
get_tb_next,
infer_error_code_from_class,
last_frame_info,
remove_suffix,
)
from promptflow.exceptions import (
ErrorTarget,
PromptflowException,
SystemErrorException,
UserErrorException,
ValidationException,
)
def set_inner_exception_by_parameter():
raise PromptflowException("test", error=ValueError("bad number"))
def set_inner_exception_by_raise_from():
raise PromptflowException("test") from ValueError("bad number")
def code_with_bug():
1 / 0
def raise_tool_execution_error():
try:
code_with_bug()
except Exception as e:
raise ToolExecutionError(node_name="MyTool") from e
def raise_exception_with_object():
raise PromptflowException(message_format="{inner_exception}", inner_exception=Exception("exception message"))
def raise_user_error():
try:
code_with_bug()
except Exception as e:
raise UserErrorException("run failed", target=ErrorTarget.TOOL) from e
def raise_context_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedContextException(e)
class CustomizedContextException(Exception):
def __init__(self, inner_exception):
self.inner_exception = inner_exception
@property
def message(self):
code_with_bug()
return "context exception"
class CustomizedException(Exception):
pass
class CustomUserError(UserErrorException):
pass
class CustomDefaultTargetError(UserErrorException):
def __init__(self, target=ErrorTarget.EXECUTOR, **kwargs):
super().__init__(target=target, **kwargs)
def raise_general_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedException("General exception") from e
def raise_promptflow_exception():
try:
code_with_bug()
except Exception as e:
raise PromptflowException("Promptflow exception") from e
def raise_promptflow_exception_without_inner_exception():
try:
code_with_bug()
except Exception:
raise PromptflowException("Promptflow exception")
TOOL_EXECUTION_ERROR_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
ZeroDivisionError: division by zero
"""
TOOL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_.*
raise_tool_execution_error\(\)
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
raise ToolExecutionError\(node_name="MyTool"\) from e
"""
TOOL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
GENERAL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_general_exception
raise_general_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_general_exception
raise CustomizedException\("General exception"\) from e
"""
GENERAL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_general_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
CONTEXT_EXCEPTION_TRACEBACK = r"""
During handling of the above exception, another exception occurred:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_context_exception
raise_context_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_context_exception
raise CustomizedContextException\(e\)
"""
CONTEXT_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_context_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
@pytest.mark.unittest
class TestExceptionUtilsCommonMethod:
def test_get_tb_next(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
tb_next = get_tb_next(e.value.__traceback__, 3)
te = TracebackException(type(e.value), e.value, tb_next)
formatted_tb = "".join(te.format())
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, formatted_tb)
def test_last_frame_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
frame_info = last_frame_info(e.value)
assert "test_exception_utils.py" in frame_info.get("filename")
assert frame_info.get("lineno") > 0
assert frame_info.get("name") == "raise_tool_execution_error"
assert last_frame_info(None) == {}
@pytest.mark.parametrize(
"error_class, expected_error_code",
[
(UserErrorException, "UserError"),
(SystemErrorException, "SystemError"),
(ValidationException, "ValidationError"),
(ToolExecutionError, "ToolExecutionError"),
(ValueError, "ValueError"),
],
)
def test_infer_error_code_from_class(self, error_class, expected_error_code):
assert infer_error_code_from_class(error_class) == expected_error_code
@pytest.mark.unittest
class TestExceptionPresenter:
def test_debug_info(self):
# Test ToolExecutionError
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_context_exception(self):
with pytest.raises(CustomizedContextException) as e:
raise_context_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedContextException"
assert re.match(CONTEXT_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(CONTEXT_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_general_exception(self):
# Test General Exception
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedException"
assert re.match(GENERAL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(GENERAL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_to_dict_for_general_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=True)
assert "debugInfo" in dct
dct.pop("debugInfo")
assert dct == {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": {
"code": "ZeroDivisionError",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception_without_inner_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception_without_inner_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": None,
}
def test_to_dict_for_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
assert re.search(TOOL_EXCEPTION_INNER_TRACEBACK, presenter.formatted_traceback)
assert re.search(TOOL_EXCEPTION_TRACEBACK, presenter.formatted_traceback)
dct = presenter.to_dict(include_debug_info=False)
assert dct.pop("additionalInfo") is not None
assert dct == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
@pytest.mark.parametrize(
"raise_exception_func, error_class, expected_error_codes",
[
(raise_general_exception, CustomizedException, ["SystemError", "CustomizedException"]),
(raise_tool_execution_error, ToolExecutionError, ["UserError", "ToolExecutionError"]),
(raise_promptflow_exception, PromptflowException, ["SystemError", "ZeroDivisionError"]),
(raise_promptflow_exception_without_inner_exception, PromptflowException, ["SystemError"]),
],
)
def test_error_codes(self, raise_exception_func, error_class, expected_error_codes):
with pytest.raises(error_class) as e:
raise_exception_func()
presenter = ExceptionPresenter.create(e.value)
assert presenter.error_codes == expected_error_codes
@pytest.mark.unittest
class TestErrorResponse:
def test_from_error_dict(self):
error_dict = {
"code": "UserError",
"message": "Flow run failed.",
}
response = ErrorResponse.from_error_dict(error_dict)
assert response.response_code == "400"
assert response.error_codes == ["UserError"]
assert response.message == "Flow run failed."
response_dct = response.to_dict()
assert response_dct["time"] is not None
response_dct.pop("time")
component_name = response_dct.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response_dct == {
"error": {
"code": "UserError",
"message": "Flow run failed.",
},
"correlation": None,
"environment": None,
"location": None,
}
def test_to_simplied_dict(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
error_response = ErrorResponse.from_exception(e.value)
assert error_response.to_simplified_dict() == {
"error": {
"code": "SystemError",
"message": "General exception",
}
}
def test_from_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
response = ErrorResponse.from_exception(e.value).to_dict()
assert response["time"] is not None
response.pop("time")
component_name = response.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response == {
"error": {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
},
"correlation": None,
"environment": None,
"location": None,
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"input_dict, expected",
[
({"code": "firstError"}, "firstError"),
({"code": "firstError", "innerError": {}}, "firstError"),
({"code": "firstError", "innerError": {"code": "secondError"}}, "firstError/secondError"),
({"code": None, "innerError": {"code": "secondError"}}, ""),
# Dict doesn't have code in outmost will return empty string.
({"error": {"code": "firstError", "innerError": {"code": "secondError"}}}, ""),
],
)
def test_error_code_hierarchy(self, input_dict, expected):
assert ErrorResponse.from_error_dict(input_dict).error_code_hierarchy == expected
@pytest.mark.parametrize(
"error_dict, expected_innermost_error_code",
[
(
{
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
},
"ToolExecutionError",
),
({"code": "UserError", "innerError": None}, "UserError"),
({"message": "UserError", "innerError": None}, None),
],
)
def test_innermost_error_code_with_code(self, error_dict, expected_innermost_error_code):
inner_error_code = ErrorResponse.from_error_dict(error_dict).innermost_error_code
assert inner_error_code == expected_innermost_error_code
@pytest.mark.parametrize(
"error_dict, expected_additional_info",
[
({"code": "UserError"}, {}),
(
{
"code": "UserError",
"additionalInfo": [
{
"type": "test_additional_info",
"info": "This is additional info for testing.",
},
"not_dict",
{
"type": "empty_info",
},
{
"info": "Empty type",
},
{
"test": "Invalid additional info",
},
],
},
{"test_additional_info": "This is additional info for testing."},
),
],
)
def test_additional_info(self, error_dict, expected_additional_info):
error_response = ErrorResponse.from_error_dict(error_dict)
assert error_response.additional_info == expected_additional_info
assert all(error_response.get_additional_info(key) == value for key, value in expected_additional_info.items())
@pytest.mark.parametrize(
"raise_exception_func, error_class",
[
(raise_general_exception, CustomizedException),
(raise_tool_execution_error, ToolExecutionError),
],
)
def test_get_user_execution_error_info(self, raise_exception_func, error_class):
with pytest.raises(error_class) as e:
raise_exception_func()
error_repsonse = ErrorResponse.from_exception(e.value)
actual_error_info = error_repsonse.get_user_execution_error_info()
self.assert_user_execution_error_info(e.value, actual_error_info)
def assert_user_execution_error_info(self, exception, error_info):
if isinstance(exception, ToolExecutionError):
assert error_info["type"] == "ZeroDivisionError"
assert error_info["message"] == "division by zero"
assert error_info["filename"].endswith("test_exception_utils.py")
assert error_info["lineno"] > 0
assert error_info["name"] == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
error_info["traceback"],
)
# assert re.match(TOOL_EXECUTION_ERROR_TRACEBACK, error_info["traceback"])
else:
assert error_info == {}
@pytest.mark.unittest
class TestExceptions:
@pytest.mark.parametrize(
"ex, expected_message, expected_message_format, expected_message_parameters",
[
(
CustomUserError("message"),
"message",
"",
{},
),
(
CustomUserError(message="message"),
"message",
"",
{},
),
(
CustomUserError("message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message="message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message_format="Hello world"),
"Hello world",
"Hello world",
{},
),
(
CustomUserError(message_format="Hello {name}", name="world"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", not_used="whatever"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", target=ErrorTarget.TOOL),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(
message_format="Tool '{tool_name}' execution failed due to {error}",
tool_name="my tool",
error="bug",
),
"Tool 'my tool' execution failed due to bug",
"Tool '{tool_name}' execution failed due to {error}",
{
"tool_name": "my tool",
"error": "bug",
},
),
],
)
def test_message_and_format(self, ex, expected_message, expected_message_format, expected_message_parameters):
with pytest.raises(CustomUserError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.message_format == expected_message_format
assert exc.value.message_parameters == expected_message_parameters
@pytest.mark.parametrize(
"ex, expected_message, exepcted_target",
[
(
CustomDefaultTargetError(message="message", target=ErrorTarget.TOOL),
"message",
ErrorTarget.TOOL,
),
(
CustomDefaultTargetError(message="message"),
"message",
ErrorTarget.EXECUTOR,
),
],
)
def test_target_and_message(self, ex, expected_message, exepcted_target):
with pytest.raises(CustomDefaultTargetError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.target == exepcted_target
def test_reference_code(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.reference_code == ErrorTarget.TOOL.value
module = "promptflow_vectordb.tool.faiss_index_loopup"
e.module = module
assert e.reference_code == f"{ErrorTarget.TOOL.value}/{module}"
@pytest.mark.parametrize(
"func_that_raises_exception",
[
set_inner_exception_by_parameter,
set_inner_exception_by_raise_from,
],
)
def test_inner_exception(self, func_that_raises_exception):
with pytest.raises(PromptflowException) as e:
func_that_raises_exception()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ValueError)
assert str(inner_exception) == "bad number"
assert str(e.value) == "test"
def test_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ZeroDivisionError)
assert str(inner_exception) == "division by zero"
assert e.value.message == "Execution failure in 'MyTool': (ZeroDivisionError) division by zero"
last_frame_info = e.value.tool_last_frame_info
assert "test_exception_utils.py" in last_frame_info.get("filename")
assert last_frame_info.get("lineno") > 0
assert last_frame_info.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
e.value.tool_traceback,
)
def test_code_hierarchy(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.error_codes == ["UserError", "ToolExecutionError"]
assert ExceptionPresenter.create(e).error_code_recursed == {
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_debug_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
presenter = ExceptionPresenter.create(e)
assert presenter.debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, presenter.debug_info["stackTrace"])
inner_exception = presenter.debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_additional_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
additional_info = ExceptionPresenter.create(e.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "ToolExecutionErrorDetails"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*test_exception_utils.py", info_0_value["filename"])
assert info_0_value.get("lineno") > 0
assert info_0_value.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = ToolExecutionError(node_name="Node1")
dct = ExceptionPresenter.create(ex).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
def test_additional_info_for_empty_case(self):
with pytest.raises(UserErrorException) as e:
raise_user_error()
dct = ExceptionPresenter.create(e.value).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_turning_on_or_off_debug_info(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
result = ExceptionPresenter.create(e).to_dict(include_debug_info=include_debug_info)
if include_debug_info:
assert "debugInfo" in result
else:
assert "debugInfo" not in result
def test_to_dict(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Wo do not check additonalInfo since it is already checked in other cases
result.pop("additionalInfo")
assert result == {
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_to_dict_object_parameter(self):
with pytest.raises(PromptflowException) as e:
raise_exception_with_object()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Assert message is str(exception)
assert result == {
"message": "exception message",
"messageFormat": "{inner_exception}",
"messageParameters": {"inner_exception": "exception message"},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": None,
}
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_for_JsonSerializedPromptflowException(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
exception_dict = ExceptionPresenter.create(e.value).to_dict(include_debug_info=True)
message = json.dumps(exception_dict)
exception = JsonSerializedPromptflowException(message=message)
assert str(exception) == message
json_serialized_exception_dict = ExceptionPresenter.create(exception).to_dict(
include_debug_info=include_debug_info
)
error_dict = exception.to_dict(include_debug_info=include_debug_info)
assert error_dict == json_serialized_exception_dict
if include_debug_info:
assert "debugInfo" in error_dict
error_dict.pop("debugInfo")
error_dict.pop("additionalInfo")
assert error_dict == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_remove_suffix(self):
assert remove_suffix('PackageToolNotFoundError.', '.') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', 'Error') == 'PackageToolNotFound'
assert remove_suffix('PackageToolNotFoundError', 'PackageToolNotFoundError') == ''
assert remove_suffix('PackageToolNotFoundError', 'NonExistedSuffix') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', '') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', None) == 'PackageToolNotFoundError'
assert remove_suffix('', 'NonExistedSuffix') == ''
assert remove_suffix(None, 'NonExistedSuffix') is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_dataclass_serializer.py | import pytest
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, List
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._utils.dataclass_serializer import \
get_type, serialize, deserialize_dataclass, deserialize_value, assertEqual
from promptflow.contracts.run_info import RunInfo, Status
from promptflow._core.connection_manager import ConnectionManager
from promptflow.storage.run_records import NodeRunRecord
from unittest.mock import patch, Mock
import sys
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<aoai-api-endpoint>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "<connection-endpoint>",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"type_input, expected",
[
(NodeRunRecord, NodeRunRecord),
([NodeRunRecord], List[NodeRunRecord]),
(dict(a=NodeRunRecord), Dict[str, NodeRunRecord]),
(int, int),
(str, str),
]
)
def test_get_type(type_input, expected):
assert get_type(type_input) == expected
@pytest.mark.unittest
def test_serialize_dataclass():
start_time = datetime(2023, 9, 4)
end_time = datetime(2023, 9, 4)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
serialized_info = serialize(node_run_info)
serialized_record = serialize(node_record)
# test dataclass without serialize attribute
assert serialized_info['status'] == "Completed"
assert serialized_info['start_time'] == "2023-09-04T00:00:00Z"
assert deserialize_value(serialized_info, RunInfo) == node_run_info
# test dataclass with serialize attribute
assert serialized_record == node_record.serialize()
@pytest.mark.unittest
@pytest.mark.parametrize(
"value, value_type, expected",
[
(datetime(2023, 9, 4), datetime, "2023-09-04T00:00:00Z"),
(Status.Completed, Status, "Completed"),
([1, 2, 3], List[int], [1, 2, 3]),
({"a": 1, "b": 2}, Dict[str, int], {"a": 1, "b": 2}),
(1, int, 1),
("a", str, "a"),
]
)
def test_serialize_value(value, value_type, expected):
assert serialize(value) == expected
assert deserialize_value(serialize(value), value_type) == value
@pytest.mark.unittest
def test_serialize_remove_null():
value = {"a": 1, "b": None}
value_type = Dict[str, int]
assert deserialize_value(serialize(value, remove_null=True), value_type) == {"a": 1, "b": None}
@dataclass
class DummyDataClass:
name: str
age: int
assert serialize(DummyDataClass("Dummy", None), remove_null=True) == {'name': 'Dummy'}
@pytest.mark.unittest
def test_serialize_connection():
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert serialize(connection_manager.get("azure_open_ai_connection")) == "azure_open_ai_connection"
@pytest.mark.unittest
def test_serialize_generator():
def generator():
for i in range(3):
yield i
g = GeneratorProxy(generator())
next(g)
assert serialize(g) == [0]
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': None})
def test_import_pydantic_error():
# mock pydantic is not installed
class DummyClass:
def __init__(self, name, age):
self.name = name
self.age = age
dummy = DummyClass('Test', 20)
assert serialize(dummy) == dummy
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': Mock()})
def test_import_pydantic():
# mock pydantic is installed
class MockBaseModel:
def dict(self):
return {"key": "value"}
mock_value = MockBaseModel()
sys.modules['pydantic'].BaseModel = MockBaseModel
assert serialize(mock_value) == mock_value.dict()
assert serialize(123) == 123
@pytest.mark.unittest
def test_deserialize_dataclass():
# test when cls is not dataclass
with pytest.raises(ValueError):
deserialize_dataclass(int, 1)
# test when data is not a dict
with pytest.raises(ValueError):
deserialize_dataclass(NodeRunRecord, "NodeRunRecord")
@dataclass
class DummyDataClassWithDefault:
name: str = "Default Name"
age: int = 0
# test deserialize dataclass with default value
data = {"age": 25}
obj = deserialize_dataclass(DummyDataClassWithDefault, data)
assert obj.name == "Default Name"
assert obj.age == 25
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, expected",
[
(1, 2, 1),
(Status.Completed, Status, Status.Completed),
(None, datetime, None),
("2022-01-01T00:00:00", datetime, datetime.fromisoformat("2022-01-01T00:00:00")),
]
)
def test_deserialize_value(a, b, expected):
assert deserialize_value(a, b) == expected
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, path, are_equal",
[
# Test with identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key2': 'value2'}, \
"unittests/_utils/test_dataclass_serializer", True),
# Test with non-identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key3': 'value3'}, \
"unittests/_utils/test_dataclass_serializer", False),
# Test with identical lists
(['item1', 'item2'], ['item1', 'item2'], "", True),
# Test with non-identical lists
(['item1', 'item2'], ['item1', 'item3'], "", False),
# Test with other types
(1, 1, "", True),
(1, 2, "", False),
('string', 'string', "", True),
('string1', 'string2', "", False),
]
)
def test_assertEqual(a, b, path, are_equal):
if are_equal:
assertEqual(a, b, path)
else:
with pytest.raises(AssertionError):
assertEqual(a, b, path)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_generate_tool_meta_utils.py | import os
import re
import sys
from multiprocessing import Pool
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._core.tool_meta_generator import (
JinjaParsingError,
MultipleToolsDefined,
NoToolDefined,
PythonLoadError,
PythonParsingError,
generate_prompt_meta,
generate_python_meta,
generate_tool_meta_dict_by_file,
)
from promptflow._utils.exception_utils import ExceptionPresenter
from ...utils import FLOW_ROOT, load_json
TEST_ROOT = Path(__file__).parent.parent.parent.parent
TOOLS_ROOT = TEST_ROOT / "test_configs/wrong_tools"
def cd_and_run(working_dir, source_path, tool_type):
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def cd_and_run_with_read_text_error(working_dir, source_path, tool_type):
def mock_read_text_error(self: Path, *args, **kwargs):
raise Exception("Mock read text error.")
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
with patch("promptflow._core.tool_meta_generator.Path.read_text", new=mock_read_text_error):
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def cd_and_run_with_bad_function_interface(working_dir, source_path, tool_type):
def mock_function_to_interface(*args, **kwargs):
raise Exception("Mock function to interface error.")
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
with patch("promptflow._core.tool_meta_generator.function_to_interface", new=mock_function_to_interface):
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func):
with Pool(1) as pool:
return pool.apply(func, (wd, tool_path, tool_type))
@pytest.mark.unittest
class TestToolMetaUtils:
@pytest.mark.parametrize(
"flow_dir, tool_path, tool_type",
[
("prompt_tools", "summarize_text_content_prompt.jinja2", "prompt"),
("prompt_tools", "summarize_text_content_prompt.jinja2", "llm"),
("script_with_import", "dummy_utils/main.py", "python"),
("script_with___file__", "script_with___file__.py", "python"),
("script_with_special_character", "script_with_special_character.py", "python"),
],
)
def test_generate_tool_meta_dict_by_file(self, flow_dir, tool_path, tool_type):
wd = str((FLOW_ROOT / flow_dir).resolve())
meta_dict = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, cd_and_run)
assert isinstance(meta_dict, dict), "Call cd_and_run failed:\n" + meta_dict
target_file = (Path(wd) / tool_path).with_suffix(".meta.json")
expected_dict = load_json(target_file)
if tool_type == "llm":
expected_dict["type"] = "llm" # We use prompt as default for jinja2
assert meta_dict == expected_dict
@pytest.mark.parametrize(
"flow_dir, tool_path, tool_type, func, msg_pattern",
[
pytest.param(
"prompt_tools",
"summarize_text_content_prompt.jinja2",
"python",
cd_and_run,
r"\(PythonLoaderNotFound\) Failed to load python file '.*summarize_text_content_prompt.jinja2'. "
r"Please make sure it is a valid .py file.",
id="PythonLoaderNotFound",
),
pytest.param(
"script_with_import",
"fail.py",
"python",
cd_and_run,
r"\(PythonLoadError\) Failed to load python module from file '.*fail.py': "
r"\(ModuleNotFoundError\) No module named 'aaa'",
id="PythonLoadError",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"python",
cd_and_run_with_bad_function_interface,
r"\(BadFunctionInterface\) Parse interface for tool 'divide_num' failed: "
r"\(Exception\) Mock function to interface error.",
id="BadFunctionInterface",
),
pytest.param(
"script_with_import",
"aaa.py",
"python",
cd_and_run,
r"\(MetaFileNotFound\) Generate tool meta failed for python tool. "
r"Meta file 'aaa.py' can not be found.",
id="MetaFileNotFound",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"python",
cd_and_run_with_read_text_error,
r"\(MetaFileReadError\) Generate tool meta failed for python tool. "
r"Read meta file 'divide_num.py' failed: \(Exception\) Mock read text error.",
id="MetaFileReadError",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"action",
cd_and_run,
r"\(NotSupported\) Generate tool meta failed. The type 'action' is currently unsupported. "
r"Please choose from available types: python,llm,prompt and try again.",
id="NotSupported",
),
],
)
def test_generate_tool_meta_dict_by_file_exception(self, flow_dir, tool_path, tool_type, func, msg_pattern):
wd = str((FLOW_ROOT / flow_dir).resolve())
ret = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func)
assert isinstance(ret, str), "Call cd_and_run should fail but succeeded:\n" + str(ret)
assert re.match(msg_pattern, ret)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"zzz",
PythonParsingError,
"Failed to load python module. Python parsing failed: (NameError) name 'zzz' is not defined",
id="PythonParsingError_NameError",
),
pytest.param(
"# Nothing",
NoToolDefined,
"No tool found in the python script. "
"Please make sure you have one and only one tool definition in your script.",
id="NoToolDefined",
),
pytest.param(
"multiple_tools.py",
MultipleToolsDefined,
"Expected 1 but collected 2 tools: tool1, tool2. "
"Please make sure you have one and only one tool definition in your script.",
id="MultipleToolsDefined",
),
pytest.param(
"{% zzz",
PythonParsingError,
"Failed to load python module. Python parsing failed: "
"(SyntaxError) invalid syntax (<string>, line 1)",
id="PythonParsingError_SyntaxError",
),
],
)
def test_custom_python_meta(self, content, error_code, message) -> None:
if content.endswith(".py"):
source = TOOLS_ROOT / content
with open(source, "r") as f:
code = f.read()
else:
code = content
source = None
with pytest.raises(error_code) as ex:
generate_python_meta("some_tool", code, source)
assert message == str(ex.value)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"{% zzz",
JinjaParsingError,
"Generate tool meta failed for llm tool. Jinja parsing failed at line 1: "
"(TemplateSyntaxError) Encountered unknown tag 'zzz'.",
id="JinjaParsingError_Code",
),
pytest.param(
"no_end.jinja2",
JinjaParsingError,
"Generate tool meta failed for llm tool. Jinja parsing failed at line 2: "
"(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: "
"'endfor' or 'else'. The innermost block that needs to be closed is 'for'.",
id="JinjaParsingError_File",
),
],
)
def test_custom_llm_meta(self, content, error_code, message) -> None:
if content.endswith(".jinja2"):
with open(TOOLS_ROOT / content, "r") as f:
code = f.read()
else:
code = content
with pytest.raises(error_code) as ex:
generate_prompt_meta("some_tool", code)
assert message == str(ex.value)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"{% zzz",
JinjaParsingError,
"Generate tool meta failed for prompt tool. Jinja parsing failed at line 1: "
"(TemplateSyntaxError) Encountered unknown tag 'zzz'.",
id="JinjaParsingError_Code",
),
pytest.param(
"no_end.jinja2",
JinjaParsingError,
"Generate tool meta failed for prompt tool. Jinja parsing failed at line 2: "
"(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: "
"'endfor' or 'else'. The innermost block that needs to be closed is 'for'.",
id="JinjaParsingError_File",
),
],
)
def test_custom_prompt_meta(self, content, error_code, message) -> None:
if content.endswith(".jinja2"):
with open(TOOLS_ROOT / content, "r") as f:
code = f.read()
else:
code = content
with pytest.raises(error_code) as ex:
generate_prompt_meta("some_tool", code, prompt_only=True)
assert message == str(ex.value)
@pytest.mark.unittest
class TestPythonLoadError:
def test_additional_info(self):
source = TOOLS_ROOT / "load_error.py"
with open(source, "r") as f:
code = f.read()
with pytest.raises(PythonLoadError) as ex:
generate_python_meta("some_tool", code, source)
additional_info = ExceptionPresenter.create(ex.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "UserCodeStackTrace"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*load_error.py", info_0_value["filename"])
assert info_0_value.get("lineno") == 3
assert info_0_value.get("name") == "<module>"
assert re.search(
r"Traceback \(most recent call last\):\n"
r' File ".*load_error.py", line .*, in <module>\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = PythonLoadError(message_format="Test empty error")
additional_info = ExceptionPresenter.create(ex).to_dict().get("additionalInfo")
assert additional_info is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_thread_utils.py | import re
import sys
import time
from io import StringIO
from logging import WARNING, Logger, StreamHandler
import pytest
from promptflow._utils.thread_utils import RepeatLogTimer
from promptflow._utils.utils import generate_elapsed_time_messages
class DummyException(Exception):
pass
@pytest.mark.skipif(sys.platform == "darwin", reason="Skip on Mac")
@pytest.mark.unittest
class TestRepeatLogTimer:
def test_context_manager(self):
s = StringIO()
logger = Logger("test_repeat_log_timer")
logger.addHandler(StreamHandler(s))
interval_seconds = 1
start_time = time.perf_counter()
with RepeatLogTimer(
interval_seconds=interval_seconds,
logger=logger,
level=WARNING,
log_message_function=generate_elapsed_time_messages,
args=("Test", start_time, interval_seconds, None),
):
time.sleep(10.5)
logs = s.getvalue().split("\n")
logs = [log for log in logs if log]
log_pattern = re.compile(
r"^Test has been running for [0-9]+ seconds, thread None cannot be found in sys._current_frames, "
r"maybe it has been terminated due to unexpected errors.$"
)
assert logs, "Logs are empty."
for log in logs:
assert re.match(log_pattern, log), f"The wrong log: {log}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py | import pytest
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow._utils.connection_utils import (
generate_custom_strong_type_connection_spec,
generate_custom_strong_type_connection_template,
)
from promptflow.contracts.types import Secret
class MyCustomConnectionWithNoComments(CustomStrongTypeConnection):
api_key: Secret
api_base: str
class MyCustomConnectionWithDefaultValue(CustomStrongTypeConnection):
api_key: Secret
api_base: str = "default value of api-base"
class MyCustomConnectionWithInvalidComments(CustomStrongTypeConnection):
"""My custom connection with invalid comments.
:param api_key: The api key.
:type api_key: String
:param api_base: The api base.
:type api_base: String
:param api_key_2: The api key 2.
:type api_key_2: String
"""
api_key: Secret
api_base: str
class MyCustomConnectionMissingTypeComments(CustomStrongTypeConnection):
"""My custom connection with missing type comments.
:param api_key: The api key.
"""
api_key: Secret
api_base: str
class MyCustomConnectionMissingParamComments(CustomStrongTypeConnection):
"""My custom connection with missing param comments.
:type api_key: String
"""
api_key: Secret
api_base: str
@pytest.mark.unittest
class TestConnectionUtils:
@pytest.mark.parametrize(
"cls, expected_str_in_template",
[
(
MyCustomConnectionWithNoComments,
['api_base: "to_replace_with_api_base"\n', 'api_key: "to_replace_with_api_key"\n'],
),
(
MyCustomConnectionWithInvalidComments,
[
'api_base: "to_replace_with_api_base" # String type. The api base.\n',
'api_key: "to_replace_with_api_key" # String type. The api key.\n',
],
),
(MyCustomConnectionMissingTypeComments, ['api_key: "to_replace_with_api_key" # The api key.']),
(MyCustomConnectionMissingParamComments, ['api_key: "to_replace_with_api_key" # String type.']),
],
)
def test_generate_custom_strong_type_connection_template_with_comments(self, cls, expected_str_in_template):
package = "test-package"
package_version = "0.0.1"
spec = generate_custom_strong_type_connection_spec(cls, package, package_version)
template = generate_custom_strong_type_connection_template(cls, spec, package, package_version)
for comment in expected_str_in_template:
assert comment in template
def test_generate_custom_strong_type_connection_template_with_default_value(self):
package = "test-package"
package_version = "0.0.1"
spec = generate_custom_strong_type_connection_spec(MyCustomConnectionWithDefaultValue, package, package_version)
template = generate_custom_strong_type_connection_template(
MyCustomConnectionWithDefaultValue, spec, package, package_version
)
assert 'api_base: "default value of api-base"' in template
@pytest.mark.parametrize(
"input_value, expected_connection_names",
[
pytest.param(
"new_ai_connection",
["new_ai_connection"],
id="standard",
),
pytest.param(
"${node.output}",
[],
id="output_reference",
),
pytest.param(
"${inputs.question}",
[],
id="input_reference",
),
],
)
def test_get_used_connection_names_from_flow_meta(self, input_value: str, expected_connection_names: list):
from promptflow._sdk._submitter.utils import SubmitterHelper
connection_names = SubmitterHelper.get_used_connection_names(
{
"package": {
"(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat": {
"name": "Promptflow.Tools.BuiltInTools.AOAI.Chat",
"type": "csharp",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"prompt": {"type": ["string"]},
"deployment_name": {"type": ["string"]},
"objects": {"type": ["object"]},
},
"description": "",
"class_name": "AOAI",
"module": "Promptflow.Tools.BuiltInTools.AOAI",
"function": "Chat",
"is_builtin": True,
"package": "Promptflow.Tools",
"package_version": "0.0.14.0",
"toolId": "(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat",
},
},
"code": {},
},
{
"nodes": [
{
"name": "get_summarized_text_content",
"type": "csharp",
"source": {
"type": "package",
"tool": "(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat",
},
"inputs": {
"connection": input_value,
},
},
]
},
)
assert connection_names == expected_connection_names
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_credential_scrubber.py | import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
def mock_connection_string():
connection_str_before_key = "DefaultEndpointsProtocol=https;AccountName=accountName;"
connection_str_after_key = "EndpointSuffix=core.windows.net"
return (
f"{connection_str_before_key}AccountKey=accountKey;{connection_str_after_key}",
f"{connection_str_before_key}AccountKey={CredentialScrubber.PLACE_HOLDER};{connection_str_after_key}",
)
def mock_sas_uri():
uri_without_signature = "https://bloburi/containerName/file.txt?sv=2021-10-04&se=2023-05-17&sr=b&sp=rw"
return (f"{uri_without_signature}&sig=signature", f"{uri_without_signature}&sig={CredentialScrubber.PLACE_HOLDER}")
@pytest.mark.unittest
class TestCredentialScrubber:
def test_scrub_sigature_in_sasuri(self):
input_str, ground_truth = mock_sas_uri()
assert CredentialScrubber().scrub(input_str) == ground_truth
def test_scrub_key_in_connection_string(self):
input_str, ground_truth = mock_connection_string()
output = CredentialScrubber().scrub(input_str)
assert output == ground_truth
def test_add_regex(self):
scrubber = CredentialScrubber()
scrubber.add_regex(r"(?<=credential=)[^\s;&]+")
assert scrubber.scrub("test&credential=credential") == f"test&credential={CredentialScrubber.PLACE_HOLDER}"
def test_add_str(self):
scrubber = CredentialScrubber()
scrubber.add_str(None)
assert len(scrubber.custom_str_set) == 0
scrubber.add_str("credential")
assert len(scrubber.custom_str_set) == 1
assert scrubber.scrub("test&secret=credential") == f"test&secret={CredentialScrubber.PLACE_HOLDER}"
def test_add_str_length_threshold(self):
"""If the secret is too short (length <= 2 chars), it will not be scrubbed."""
scrubber = CredentialScrubber()
scrubber.add_str("yy")
assert scrubber.scrub("test&secret=yy") == "test&secret=yy"
def test_normal_str_not_affected(self):
assert CredentialScrubber().scrub("no secret") == "no secret"
def test_clear(self):
scrubber = CredentialScrubber()
scrubber.add_str("credential")
scrubber.add_regex(r"(?<=credential=)[^\s;&]+")
assert len(scrubber.custom_str_set) == 1
assert len(scrubber.custom_regex_set) == 1
scrubber.clear()
assert len(scrubber.custom_str_set) == 0
assert len(scrubber.custom_regex_set) == 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_utils.py | import re
from pathlib import Path
from unittest.mock import MagicMock, mock_open, patch
import pytest
from promptflow._utils._errors import InvalidImageInput, LoadMultimediaDataError
from promptflow._utils.multimedia_utils import (
_create_image_from_base64,
_create_image_from_file,
_create_image_from_url,
_process_multimedia_dict_recursively,
_process_recursively,
convert_multimedia_data_to_base64,
create_image,
load_multimedia_data,
persist_multimedia_data,
resolve_multimedia_data_recursively,
)
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.multimedia import Image
from promptflow.contracts.tool import ValueType
from ...utils import DATA_ROOT
TEST_IMAGE_PATH = DATA_ROOT / "logo.jpg"
@pytest.mark.unittest
class TestMultimediaUtils:
@pytest.mark.parametrize("image_path", ["logo.jpg", "logo.png", "logo.webp", "logo.gif"])
def test_create_image_from_base64(self, image_path):
image = _create_image_from_file(DATA_ROOT / image_path)
base64_str = image.to_base64()
image_from_base64 = _create_image_from_base64(base64_str)
assert str(image) == str(image_from_base64)
format = image_path.split(".")[-1]
mime_type = f"image/{format}" if format != "jpg" else "image/jpeg"
assert mime_type == image_from_base64._mime_type
@patch("requests.get")
def test_create_image_from_url_with_mime_type(self, mock_get):
url = "https://example.com/image.jpg"
content = b"image content"
mime_type = "image/jpeg"
mock_get.return_value = MagicMock(status_code=200, content=content)
image = _create_image_from_url(url, mime_type)
assert isinstance(image, Image)
assert image._mime_type == mime_type
assert image.source_url == url
@patch("requests.get")
def test_create_image_from_url_failure(self, mock_get):
url = "https://example.com/image.jpg"
message = "Failed to fetch image"
code = 404
mock_get.return_value = MagicMock(status_code=code, text=message)
with pytest.raises(InvalidImageInput) as ex:
_create_image_from_url(url)
expected_message = f"Failed to fetch image from URL: {url}. Error code: {code}. Error message: {message}."
assert str(ex.value) == expected_message
def test_create_image_with_dict(self, mocker):
## From path
image_dict = {"data:image/jpg;path": TEST_IMAGE_PATH}
image_from_path = create_image(image_dict)
assert image_from_path._mime_type == "image/jpg"
## From base64
image_dict = {"data:image/jpg;base64": image_from_path.to_base64()}
image_from_base64 = create_image(image_dict)
assert str(image_from_path) == str(image_from_base64)
assert image_from_base64._mime_type == "image/jpg"
## From url
mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200))
image_dict = {"data:image/jpg;url": ""}
image_from_url = create_image(image_dict)
assert str(image_from_path) == str(image_from_url)
assert image_from_url._mime_type == "image/jpg"
mocker.patch("requests.get", return_value=mocker.Mock(content=None, status_code=404))
with pytest.raises(InvalidImageInput) as ex:
create_image(image_dict)
assert "Failed to fetch image from URL" in ex.value.message_format
def test_create_image_with_string(self, mocker):
## From path
image_from_path = create_image(str(TEST_IMAGE_PATH))
assert image_from_path._mime_type == "image/jpeg"
# From base64
image_from_base64 = create_image(image_from_path.to_base64())
assert str(image_from_path) == str(image_from_base64)
assert image_from_base64._mime_type == "image/jpeg"
## From url
mocker.patch("promptflow._utils.multimedia_utils._is_url", return_value=True)
mocker.patch("promptflow._utils.multimedia_utils._is_base64", return_value=False)
mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200))
image_from_url = create_image("Test")
assert str(image_from_path) == str(image_from_url)
assert image_from_url._mime_type == "image/jpeg"
## From image
image_from_image = create_image(image_from_path)
assert str(image_from_path) == str(image_from_image)
def test_create_image_with_invalid_cases(self):
# Test invalid input type
with pytest.raises(InvalidImageInput) as ex:
create_image(0)
assert "Unsupported image input type" in ex.value.message_format
# Test invalid image dict
with pytest.raises(InvalidImageInput) as ex:
invalid_image_dict = {"invalid_image": "invalid_image"}
create_image(invalid_image_dict)
assert "Invalid image input format" in ex.value.message_format
# Test none or empty input value
with pytest.raises(InvalidImageInput) as ex:
create_image(None)
assert "Unsupported image input type" in ex.value.message_format
with pytest.raises(InvalidImageInput) as ex:
create_image("")
assert "The image input should not be empty." in ex.value.message_format
def test_persist_multimedia_date(self, mocker):
image = _create_image_from_file(TEST_IMAGE_PATH)
mocker.patch("builtins.open", mock_open())
data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"}
persisted_data = persist_multimedia_data(data, base_dir=Path(__file__).parent)
file_name = re.compile(r"^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}.jpeg$")
assert re.match(file_name, persisted_data["image"]["data:image/jpeg;path"])
assert re.match(file_name, persisted_data["images"][0]["data:image/jpeg;path"])
assert re.match(file_name, persisted_data["images"][1]["data:image/jpeg;path"])
def test_convert_multimedia_date_to_base64(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"}
base64_data = convert_multimedia_data_to_base64(data)
assert base64_data == {
"image": image.to_base64(),
"images": [image.to_base64(), image.to_base64(), "other_data"],
"other_data": "other_data",
}
base64_data = convert_multimedia_data_to_base64(data, with_type=True)
prefix = f"data:{image._mime_type};base64,"
assert base64_data == {
"image": prefix + image.to_base64(),
"images": [prefix + image.to_base64(), prefix + image.to_base64(), "other_data"],
"other_data": "other_data",
}
def test_load_multimedia_data(self):
# Case 1: Test normal node
inputs = {
"image": FlowInputDefinition(type=ValueType.IMAGE),
"images": FlowInputDefinition(type=ValueType.LIST),
"object": FlowInputDefinition(type=ValueType.OBJECT),
}
image_dict = {"data:image/jpg;path": str(TEST_IMAGE_PATH)}
line_inputs = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
updated_inputs = load_multimedia_data(inputs, line_inputs)
image = _create_image_from_file(TEST_IMAGE_PATH)
assert updated_inputs == {
"image": image,
"images": [image, image],
"object": {"image": image, "other_data": "other_data"},
}
# Case 2: Test aggregation node
line_inputs = {
"image": [image_dict, image_dict],
"images": [[image_dict, image_dict], [image_dict]],
"object": [{"image": image_dict, "other_data": "other_data"}, {"other_data": "other_data"}],
}
updated_inputs = load_multimedia_data(inputs, line_inputs)
assert updated_inputs == {
"image": [image, image],
"images": [[image, image], [image]],
"object": [{"image": image, "other_data": "other_data"}, {"other_data": "other_data"}],
}
# Case 3: Test invalid input type
with pytest.raises(LoadMultimediaDataError) as ex:
line_inputs = {"image": 0}
load_multimedia_data(inputs, line_inputs)
assert (
"Failed to load image for input 'image': "
"(InvalidImageInput) Unsupported image input type") in ex.value.message
def test_resolve_multimedia_data_recursively(self):
image_dict = {"data:image/jpg;path": "logo.jpg"}
value = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
input_dir = TEST_IMAGE_PATH
updated_value = resolve_multimedia_data_recursively(input_dir, value)
updated_image_dict = {"data:image/jpg;path": str(DATA_ROOT / "logo.jpg")}
assert updated_value == {
"image": updated_image_dict,
"images": [updated_image_dict, updated_image_dict],
"object": {"image": updated_image_dict, "other_data": "other_data"},
}
def test_process_recursively(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
value = {"image": image, "images": [image, image], "object": {"image": image, "other_data": "other_data"}}
process_funcs = {Image: lambda x: str(x)}
updated_value = _process_recursively(value, process_funcs)
image_str = str(image)
assert updated_value == {
"image": image_str,
"images": [image_str, image_str],
"object": {"image": image_str, "other_data": "other_data"},
}
assert value != updated_value
def test_process_recursively_inplace(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
value = {"image": image, "images": [image, image], "object": {"image": image, "other_data": "other_data"}}
process_funcs = {Image: lambda x: str(x)}
_process_recursively(value, process_funcs, inplace=True)
image_str = str(image)
assert value == {
"image": image_str,
"images": [image_str, image_str],
"object": {"image": image_str, "other_data": "other_data"},
}
def test_process_multimedia_dict_recursively(self):
def process_func(image_dict):
return "image_placeholder"
image_dict = {"data:image/jpg;path": "logo.jpg"}
value = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
updated_value = _process_multimedia_dict_recursively(value, process_func)
assert updated_value == {
"image": "image_placeholder",
"images": ["image_placeholder", "image_placeholder"],
"object": {"image": "image_placeholder", "other_data": "other_data"},
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_data_converter.py | from pathlib import Path
from unittest.mock import Mock
import pytest
from promptflow._utils.multimedia_data_converter import (
AbstractMultimediaInfoConverter,
MultimediaConverter,
MultimediaFormatAdapter20231201,
MultimediaInfo,
ResourceType,
)
@pytest.mark.unittest
class TestMultimediaConverter:
def test_convert_content_recursively(self):
converter = MultimediaConverter(Path("flow.yaml"))
# Don't convert anything.
content = {
"image": {"data:image/jpg;url": "https://example.com/logo.jpg"},
"images": [
{"data:image/jpg;url": "https://example.com/logo.jpg"},
{"data:image/jpg;base64": "base64 string"},
],
"object": {"image": {"data:image/png;path": "random_path"}, "other_data": "other_data"},
}
mock_converter = Mock(spec=AbstractMultimediaInfoConverter)
mock_converter.convert.side_effect = lambda x: x
result = converter.convert_content_recursively(content, mock_converter)
assert result == content
# Convert all valid images.
mock_converter.convert.side_effect = lambda x: MultimediaInfo("image/jpg", ResourceType("path"), "logo.jpg")
result = converter.convert_content_recursively(content, mock_converter)
expected_result = {
"image": {"data:image/jpg;path": "logo.jpg"},
"images": [
{"data:image/jpg;path": "logo.jpg"},
{"data:image/jpg;path": "logo.jpg"},
],
"object": {"image": {"data:image/jpg;path": "logo.jpg"}, "other_data": "other_data"},
}
assert result == expected_result
@pytest.mark.unittest
class TestMultimediaFormatAdapter20231201:
def test_is_valid_format(self):
adapter = MultimediaFormatAdapter20231201()
assert adapter.is_valid_format({"data:image/jpg;path": "logo.jpg"})
assert adapter.is_valid_format({"data:image/jpg;url": "https://example.com/logo.jpg"})
assert not adapter.is_valid_format({"data:audio/mp3;path": "audio.mp3"})
assert not adapter.is_valid_format({"data:video/mp4;url": "https://example.com/video.mp4"})
def test_extract_info(self):
adapter = MultimediaFormatAdapter20231201()
# Valid formats
expected_result = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
assert adapter.extract_info({"data:image/jpg;path": "random_path"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
assert adapter.extract_info({"data:image/jpg;url": "random_url"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.BASE64, "random_base64")
assert adapter.extract_info({"data:image/jpg;base64": "random_base64"}) == expected_result
# Invalid format
assert adapter.extract_info({"data:video/mp4;url": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"data:image/mp4;url2": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"content:image/mp4;path": "random_path"}) is None
def test_create_data(self):
adapter = MultimediaFormatAdapter20231201()
info = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
expected_result = {"data:image/jpg;path": "random_path"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
expected_result = {"data:image/jpg;url": "random_url"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.BASE64, "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
assert adapter.create_data(info) == expected_result
# Bad case when client provides invalid resource type.
info = MultimediaInfo("image/jpg", "path", "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
with pytest.raises(AttributeError):
adapter.create_data(info)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_connection_manager.py | import pytest
from promptflow._core.connection_manager import ConnectionManager
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.tool import ConnectionType
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<api-base>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "https://api.bing.microsoft.com/v7.0/search",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
class TestConnectionManager:
def test_build_connections(self):
new_connection = get_connection_dict()
# Add not exist key
new_connection["azure_open_ai_connection"]["value"]["not_exist"] = "test"
connection_manager = ConnectionManager(new_connection)
assert len(connection_manager._connections) == 2
assert isinstance(connection_manager.get("azure_open_ai_connection"), AzureOpenAIConnection)
assert connection_manager.to_connections_dict() == new_connection
def test_serialize(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert (
ConnectionType.serialize_conn(connection_manager.get("azure_open_ai_connection"))
== "azure_open_ai_connection"
)
assert ConnectionType.serialize_conn(connection_manager.get("custom_connection")) == "custom_connection"
def test_get_secret_list(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
expected_list = ["<azure-openai-key>", "<your-key>"]
assert set(connection_manager.get_secret_list()) == set(expected_list)
def test_is_secret(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
connection = connection_manager.get("custom_connection")
assert connection.is_secret("api_key") is True
assert connection.is_secret("url") is False
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_run_tracker.py | import pytest
from promptflow._core._errors import RunRecordNotFound
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.run_tracker import RunTracker
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.run_info import Status
class UnserializableClass:
def __init__(self, data: str):
self.data = data
@pytest.mark.unittest
class TestRunTracker:
def test_run_tracker(self):
# TODO: Refactor this test case, it's very confusing now.
# Initialize run tracker with dummy run storage
run_tracker = RunTracker.init_dummy()
# Start flow run
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id")
assert len(run_tracker._flow_runs) == 1
assert run_tracker._current_run_id == "test_flow_run_id"
flow_input = {"flow_input": "input_0"}
run_tracker.set_inputs("test_flow_run_id", flow_input)
# Start node runs
run_info = run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id", "run_id_0", index=0)
run_info.index = 0
run_info = run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id", "run_id_1", index=1)
run_info.index = 1
run_tracker.start_node_run("node_aggr", "test_root_run_id", "test_flow_run_id", "run_id_aggr", index=None)
assert len(run_tracker._node_runs) == 3
assert run_tracker._current_run_id == "run_id_aggr"
# Test collect_all_run_infos_as_dicts
run_tracker.allow_generator_types = True
run_tracker.set_inputs(
"run_id_0",
{"input": "input_0", "connection": AzureOpenAIConnection("api_key", "api_base")}
)
run_tracker.set_inputs(
"run_id_1",
{"input": "input_1", "generator": GeneratorProxy(item for item in range(10))}
)
run_infos = run_tracker.collect_all_run_infos_as_dicts()
assert len(run_infos["flow_runs"]) == 1
assert len(run_infos["node_runs"]) == 3
assert run_infos["node_runs"][0]["inputs"] == {"input": "input_0", "connection": "AzureOpenAIConnection"}
assert run_infos["node_runs"][1]["inputs"] == {"input": "input_1", "generator": []}
# Test end run with normal result
result = {"result": "result"}
run_info_0 = run_tracker.end_run(run_id="run_id_0", result=result)
assert run_info_0.status == Status.Completed
assert run_info_0.output == result
# Test end run with unserializable result
result = {"unserialized_value": UnserializableClass("test")}
run_info_1 = run_tracker.end_run(run_id="run_id_1", result=result)
assert run_info_1.status == Status.Completed
assert run_info_1.output == str(result)
# Test end run with invalid run id
with pytest.raises(RunRecordNotFound):
run_tracker.end_run(run_id="invalid_run_id")
# Test end run with exception
ex = Exception("Failed")
run_info_aggr = run_tracker.end_run(run_id="run_id_aggr", ex=ex)
assert run_info_aggr.status == Status.Failed
assert run_info_aggr.error["message"] == "Failed"
# Test end flow run with unserializable result
result = {"unserialized_value": UnserializableClass("test")}
run_info_flow = run_tracker.end_run(run_id="test_flow_run_id", result=result)
assert run_info_flow.status == Status.Failed
assert "The output 'unserialized_value' for flow is incorrect." in run_info_flow.error["message"]
# Test _update_flow_run_info_with_node_runs
run_info_0.api_calls, run_info_0.system_metrics = [{"name": "caht"}], {"total_tokens": 10}
run_info_1.api_calls, run_info_1.system_metrics = [{"name": "completion"}], {"total_tokens": 20}
run_info_aggr.api_calls, run_info_aggr.system_metrics = [
{"name": "caht"}, {"name": "completion"}], {"total_tokens": 30}
run_tracker._update_flow_run_info_with_node_runs(run_info_flow)
assert len(run_info_flow.api_calls) == 1, "There should be only one top level api call for flow run."
assert run_info_flow.system_metrics["total_tokens"] == 60
assert run_info_flow.api_calls[0]["name"] == "flow"
assert run_info_flow.api_calls[0]["node_name"] == "flow"
assert run_info_flow.api_calls[0]["type"] == "Flow"
assert run_info_flow.api_calls[0]["system_metrics"]["total_tokens"] == 60
assert isinstance(run_info_flow.api_calls[0]["start_time"], float)
assert isinstance(run_info_flow.api_calls[0]["end_time"], float)
assert len(run_info_flow.api_calls[0]["children"]) == 4, "There should be 4 children under root."
# Test get_status_summary
status_summary = run_tracker.get_status_summary("test_root_run_id")
assert status_summary == {
"__pf__.lines.completed": 0,
"__pf__.lines.failed": 1,
"__pf__.nodes.node_0.completed": 2,
"__pf__.nodes.node_aggr.completed": 0,
}
def test_run_tracker_flow_run_without_node_run(self):
"""When line timeout, there will be flow run info without node run info."""
# Initialize run tracker with dummy run storage
run_tracker = RunTracker.init_dummy()
# Start flow run
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id_0", index=0)
run_tracker.end_run("test_flow_run_id_0", ex=Exception("Timeout"))
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id_1", index=1)
run_tracker.end_run("test_flow_run_id_1", result={"result": "result"})
assert len(run_tracker._flow_runs) == 2
# Start node runs
run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id_2", "test_node_run_id_1", index=0)
run_tracker.end_run("test_node_run_id_1", result={"result": "result"})
assert len(run_tracker._node_runs) == 1
status_summary = run_tracker.get_status_summary("test_root_run_id")
assert status_summary == {
"__pf__.lines.completed": 1,
"__pf__.lines.failed": 1,
"__pf__.nodes.node_0.completed": 1,
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py | import textwrap
from pathlib import Path
from unittest.mock import patch
import pytest
from mock import MagicMock
from promptflow import tool
from promptflow._core._errors import InputTypeMismatch, InvalidSource, PackageToolNotFoundError
from promptflow._core.tools_manager import (
BuiltinsManager,
ToolLoader,
collect_package_tools,
collect_package_tools_and_connections,
)
from promptflow._utils.yaml_utils import load_yaml_string
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import Tool, ToolType
from promptflow.exceptions import UserErrorException
@pytest.mark.unittest
class TestToolLoader:
def test_load_tool_for_node_with_invalid_node(self):
tool_loader = ToolLoader(working_dir="test_working_dir")
node: Node = Node(name="test", tool="test_tool", inputs={}, type=ToolType.PYTHON)
with pytest.raises(UserErrorException, match="Node test does not have source defined."):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type=ToolType.PYTHON, source=ToolSource(type="invalid_type")
)
with pytest.raises(
NotImplementedError, match="Tool source type invalid_type for python tool is not supported yet."
):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type=ToolType.CUSTOM_LLM, source=ToolSource(type="invalid_type")
)
with pytest.raises(
NotImplementedError, match="Tool source type invalid_type for custom_llm tool is not supported yet."
):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type="invalid_type", source=ToolSource(type=ToolSourceType.Code)
)
with pytest.raises(NotImplementedError, match="Tool type invalid_type is not supported yet."):
tool_loader.load_tool_for_node(node)
def test_load_tool_for_package_node(self, mocker):
package_tools = {"test_tool": Tool(name="test_tool", type=ToolType.PYTHON, inputs={}).serialize()}
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tools)
tool_loader = ToolLoader(
working_dir="test_working_dir", package_tool_keys=["promptflow._core.tools_manager.collect_package_tools"]
)
node: Node = Node(
name="test",
tool="test_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="test_tool"),
)
tool = tool_loader.load_tool_for_node(node)
assert tool.name == "test_tool"
node: Node = Node(
name="test",
tool="test_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="invalid_tool"),
)
msg = (
"Package tool 'invalid_tool' is not found in the current environment. "
"All available package tools are: ['test_tool']."
)
with pytest.raises(PackageToolNotFoundError) as ex:
tool_loader.load_tool_for_node(node)
assert str(ex.value) == msg
def test_load_tool_for_package_node_with_legacy_tool_id(self, mocker):
package_tools = {
"new_tool_1": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]
).serialize(),
"new_tool_2": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_2"]
).serialize(),
"old_tool_2": Tool(name="old tool 2", type=ToolType.PYTHON, inputs={}).serialize(),
}
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tools)
tool_loader = ToolLoader(working_dir="test_working_dir", package_tool_keys=list(package_tools.keys()))
node_with_legacy_tool: Node = Node(
name="test_legacy_tool",
tool="old_tool_1",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="old_tool_1"),
)
assert tool_loader.load_tool_for_node(node_with_legacy_tool).name == "new tool 1"
node_with_legacy_tool_but_in_package_tools: Node = Node(
name="test_legacy_tool_but_in_package_tools",
tool="old_tool_2",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="old_tool_2"),
)
assert tool_loader.load_tool_for_node(node_with_legacy_tool_but_in_package_tools).name == "old tool 2"
def test_load_tool_for_script_node(self):
working_dir = Path(__file__).parent
tool_loader = ToolLoader(working_dir=working_dir)
file = "test_tools_manager.py"
node: Node = Node(
name="test",
tool="sample_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Code, path=file),
)
tool = tool_loader.load_tool_for_node(node)
assert tool.name == "sample_tool"
@pytest.mark.parametrize(
"source_path, error_message",
[
(None, "Load tool failed for node 'test'. The source path is 'None'."),
("invalid_file.py", "Load tool failed for node 'test'. Tool file 'invalid_file.py' can not be found."),
],
)
def test_load_tool_for_script_node_exception(self, source_path, error_message):
working_dir = Path(__file__).parent
tool_loader = ToolLoader(working_dir=working_dir)
node: Node = Node(
name="test",
tool="sample_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Code, path=source_path),
)
with pytest.raises(InvalidSource) as ex:
tool_loader.load_tool_for_script_node(node)
assert str(ex.value) == error_message
# This tool is for testing tools_manager.ToolLoader.load_tool_for_script_node
@tool
def sample_tool(input: str):
return input
@pytest.mark.unittest
class TestToolsManager:
def test_collect_package_tools_if_node_source_tool_is_legacy(self):
legacy_node_source_tools = ["content_safety_text.tools.content_safety_text_tool.analyze_text"]
package_tools = collect_package_tools(legacy_node_source_tools)
assert "promptflow.tools.azure_content_safety.analyze_text" in package_tools.keys()
def test_collect_package_tools_and_connections(self, install_custom_tool_pkg):
keys = ["my_tool_package.tools.my_tool_2.MyTool.my_tool"]
tools, specs, templates = collect_package_tools_and_connections(keys)
assert len(tools) == 1
assert specs == {
"my_tool_package.connections.MyFirstConnection": {
"connectionCategory": "CustomKeys",
"flowValueType": "CustomConnection",
"connectionType": "MyFirstConnection",
"ConnectionTypeDisplayName": "MyFirstConnection",
"configSpecs": [
{"name": "api_key", "displayName": "Api Key", "configValueType": "Secret", "isOptional": False},
{"name": "api_base", "displayName": "Api Base", "configValueType": "str", "isOptional": True},
],
"module": "my_tool_package.connections",
"package": "test-custom-tools",
"package_version": "0.0.2",
}
}
expected_template = {
"$schema": "https://azuremlschemas.azureedge.net/promptflow/latest/CustomStrongTypeConnection.schema.json",
"name": "to_replace_with_connection_name",
"type": "custom",
"custom_type": "MyFirstConnection",
"module": "my_tool_package.connections",
"package": "test-custom-tools",
"package_version": "0.0.2",
"configs": {"api_base": "This is my first connection."},
"secrets": {"api_key": "to_replace_with_api_key"},
}
loaded_yaml = load_yaml_string(templates["my_tool_package.connections.MyFirstConnection"])
assert loaded_yaml == expected_template
keys = ["my_tool_package.tools.my_tool_with_custom_strong_type_connection.my_tool"]
tools, specs, templates = collect_package_tools_and_connections(keys)
assert len(templates) == 1
expected_template = """
name: "to_replace_with_connection_name"
type: custom
custom_type: MyCustomConnection
module: my_tool_package.tools.my_tool_with_custom_strong_type_connection
package: test-custom-tools
package_version: 0.0.2
configs:
api_url: "This is a fake api url." # String type. The api url.
secrets: # must-have
api_key: "to_replace_with_api_key" # String type. The api key.
"""
content = templates["my_tool_package.tools.my_tool_with_custom_strong_type_connection.MyCustomConnection"]
expected_template_str = textwrap.dedent(expected_template)
assert expected_template_str in content
def test_gen_dynamic_list(self, mocked_ws_triple, mock_module_with_list_func):
from promptflow._sdk._utils import _gen_dynamic_list
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_list_func"
func_kwargs = {"prefix": "My"}
result = _gen_dynamic_list({"func_path": func_path, "func_kwargs": func_kwargs})
assert len(result) == 2
# test gen_dynamic_list with ws_triple.
with patch("promptflow._cli._utils.get_workspace_triad_from_local", return_value=mocked_ws_triple):
result = _gen_dynamic_list({"func_path": func_path, "func_kwargs": func_kwargs})
assert len(result) == 2
@pytest.mark.unittest
class TestBuiltinsManager:
def test_load_tool_from_module(
self,
):
# Test case 1: When class_name is None
module = MagicMock()
tool_name = "test_tool"
module_name = "test_module"
class_name = None
method_name = "test_method"
node_inputs = {"input1": InputAssignment(value_type=InputValueType.LITERAL, value="value1")}
# Mock the behavior of the module and class
module.test_method = MagicMock()
# Call the method
api, init_inputs = BuiltinsManager._load_tool_from_module(
module, tool_name, module_name, class_name, method_name, node_inputs
)
# Assertions
assert api == module.test_method
assert init_inputs == {}
# Non literal input for init parameter will raise exception.
module = MagicMock()
tool_name = "test_tool"
module_name = "test_module"
class_name = "TestClass"
method_name = "test_method"
node_inputs = {"input1": InputAssignment(value_type=InputValueType.FLOW_INPUT, value="value1")}
# Mock the behavior of the module and class
module.TestClass = MagicMock()
module.TestClass.get_initialize_inputs = MagicMock(return_value=["input1"])
module.TestClass.get_required_initialize_inputs = MagicMock(return_value=["input1"])
module.TestClass.test_method = MagicMock()
# Call the method
with pytest.raises(InputTypeMismatch) as ex:
BuiltinsManager._load_tool_from_module(module, tool_name, module_name, class_name, method_name, node_inputs)
expected_message = (
"Invalid input for 'test_tool': Initialization input 'input1' requires a literal value, "
"but ${flow.value1} was received."
)
assert expected_message == str(ex.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tracer.py | import inspect
import pytest
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.tracer import Tracer, _create_trace_from_function_call, _traced, trace
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.trace import Trace, TraceType
def generator():
for i in range(3):
yield i
@pytest.mark.unittest
class TestTracer:
def test_end_tracing(self):
# Activate the tracer in the current context
tracer = Tracer("test_run_id")
tracer._activate_in_context()
# Assert that there is an active tracer instance
assert Tracer.active_instance() is tracer
# End tracing and get the traces as a JSON string
traces = Tracer.end_tracing()
# Assert that the traces is a list
assert isinstance(traces, list)
# Assert that there is no active tracer instance after ending tracing
assert Tracer.active_instance() is None
# Test the raise_ex argument of the end_tracing method
with pytest.raises(Exception):
# Try to end tracing again with raise_ex=True
Tracer.end_tracing(raise_ex=True)
# Try to end tracing again with raise_ex=False
traces = Tracer.end_tracing(raise_ex=False)
# Assert that the traces are empty
assert not traces
def test_start_tracing(self):
# Assert that there is no active tracer instance before starting tracing
assert Tracer.active_instance() is None
# Start tracing with a mock run_id
Tracer.start_tracing("test_run_id")
# Assert that there is an active tracer instance after starting tracing
assert Tracer.active_instance() is not None
# Assert that the active tracer instance has the correct run_id
assert Tracer.active_instance()._run_id == "test_run_id"
Tracer.end_tracing()
def test_push_pop(self, caplog):
# test the push method with a single trace
Tracer.start_tracing("test_run_id")
tracer = Tracer.active_instance()
trace1 = Trace("test1", inputs=[1, 2, 3], type=TraceType.TOOL)
trace2 = Trace("test2", inputs=[4, 5, 6], type=TraceType.TOOL)
Tracer.push(trace1)
assert tracer._traces == [trace1]
assert tracer._id_to_trace == {trace1.id: trace1}
# test the push method with a nested trace
Tracer.push(trace2)
assert tracer._traces == [trace1] # check if the tracer still has only the first trace in its _traces list
# check if the tracer has both traces in its trace dict
assert tracer._id_to_trace == {trace1.id: trace1, trace2.id: trace2}
assert trace1.children == [trace2] # check if the first trace has the second trace as its child
# test the pop method with generator output
tool_output = generator()
error1 = ValueError("something went wrong")
assert tracer._get_current_trace() is trace2
output = Tracer.pop(output=tool_output, error=error1)
# check output iterator
for i in range(3):
assert next(output) == i
assert isinstance(trace2.output, GeneratorProxy)
assert trace2.error == {
"message": str(error1),
"type": type(error1).__qualname__,
}
assert tracer._get_current_trace() is trace1
# test the pop method with no arguments
output = Tracer.pop()
assert tracer._get_current_trace() is None
assert trace1.output is None
assert output is None
Tracer.end_tracing()
# test the push method with no active tracer
Tracer.push(trace1)
# assert that the warning message is logged
assert "Try to push trace but no active tracer in current context." in caplog.text
def test_unserializable_obj_to_serializable(self):
# assert that the function returns a str object for unserializable objects
assert Tracer.to_serializable(generator) == str(generator)
@pytest.mark.parametrize("obj", [({"name": "Alice", "age": 25}), ([1, 2, 3]), (GeneratorProxy(generator())), (42)])
def test_to_serializable(self, obj):
assert Tracer.to_serializable(obj) == obj
def func_with_no_parameters():
pass
def func_with_args_and_kwargs(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
async def func_with_args_and_kwargs_async(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
def func_with_connection_parameter(a: int, conn: AzureOpenAIConnection):
_ = (a, conn)
class MyClass:
def my_method(self, a: int):
_ = a
@pytest.mark.unittest
class TestCreateTraceFromFunctionCall:
"""This class tests the `_create_trace_from_function_call` function."""
def test_basic_fields_are_filled_and_others_are_not(self):
trace = _create_trace_from_function_call(func_with_no_parameters)
# These fields should be filled in this method call.
assert trace.name == "func_with_no_parameters"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {}
# start_time should be a timestamp, which is a float value currently.
assert isinstance(trace.start_time, float)
# These should be left empty in this method call.
# They will be filled by the tracer later.
assert trace.output is None
assert trace.end_time is None
assert trace.children == []
assert trace.error is None
def test_basic_fields_are_filled_for_async_functions(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs_async, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.name == "func_with_args_and_kwargs_async"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_trace_name_should_contain_class_name_for_class_methods(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[obj, 1])
assert trace.name == "MyClass.my_method"
def test_trace_type_can_be_set_correctly(self):
trace = _create_trace_from_function_call(func_with_no_parameters, trace_type=TraceType.TOOL)
assert trace.type == TraceType.TOOL
def test_args_and_kwargs_are_filled_correctly(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_args_called_with_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1], kwargs={"arg2": 2, "kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg2": 4}
def test_kwargs_called_without_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1, 2, 3], kwargs={"kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_empty_args_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1])
assert trace.inputs == {"arg1": 1}
def test_empty_kwargs_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg1": 1})
assert trace.inputs == {"kwarg1": 1}
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg2": 2})
assert trace.inputs == {"kwarg2": 2}
def test_args_and_kwargs_should_be_filled_in_called_order(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg2": 4, "kwarg1": 3}
)
assert list(trace.inputs.keys()) == ["arg1", "arg2", "kwarg2", "kwarg1"]
def test_connections_should_be_serialized(self):
conn = AzureOpenAIConnection("test_name", "test_secret")
trace = _create_trace_from_function_call(func_with_connection_parameter, args=[1, conn])
assert trace.inputs == {"a": 1, "conn": "AzureOpenAIConnection"}
def test_self_arg_should_be_excluded_from_inputs(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[1])
assert trace.inputs == {"a": 1}
def sync_func(a: int):
return a
async def async_func(a: int):
return a
def sync_error_func(a: int):
a / 0
async def async_error_func(a: int):
a / 0
@pytest.mark.unittest
class TestTraced:
"""This class tests the `_traced` function."""
def test_traced_sync_func_should_be_a_sync_func(self):
assert inspect.iscoroutinefunction(_traced(sync_func)) is False
def test_traced_async_func_should_be_an_async_func(self):
assert inspect.iscoroutinefunction(_traced(async_func)) is True
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_should_have_same_name(self, func):
traced_func = _traced(func)
assert traced_func.__name__ == func.__name__
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_attributes_are_set(self, func):
traced_func = _traced(func)
assert getattr(traced_func, "__original_function") == func
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_not_generated_when_tracer_is_not_active(self, func):
# Do not call Tracer.start_tracing() here
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
# Check the generated trace is not generated
traces = Tracer.end_tracing()
assert len(traces) == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_generated_when_tracer_is_active(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_error_func, async_error_func])
async def test_trace_is_generated_when_errors_occurred(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
with pytest.raises(ZeroDivisionError):
if inspect.iscoroutinefunction(traced_func):
await traced_func(1)
else:
traced_func(1)
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] is None
assert trace["error"] == {"message": "division by zero", "type": "ZeroDivisionError"}
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_type_can_be_set_correctly(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func, trace_type=TraceType.TOOL)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
@trace
def my_tool(a: int):
return a
@trace
async def my_tool_async(a: int):
return a
@pytest.mark.unittest
class TestTrace:
"""This class tests `trace` function."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
my_tool,
my_tool_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_generator_proxy.py | import pytest
from promptflow._core.generator_proxy import GeneratorProxy, generate_from_proxy
def generator():
for i in range(3):
yield i
def iterator():
return iter([0, 1, 2])
@pytest.mark.unittest
def test_generator_proxy_next():
proxy = GeneratorProxy(generator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generator_proxy_iter():
original_generator = generator()
proxy = GeneratorProxy(generator())
for num in proxy:
assert num == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_proxy():
proxy = GeneratorProxy(generator())
original_generator = generator()
for i in generate_from_proxy(proxy):
assert i == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_next():
proxy = GeneratorProxy(iterator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_iter():
original_iterator = iterator()
proxy = GeneratorProxy(iterator())
for num in proxy:
assert num == next(original_iterator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_iterator_proxy():
proxy = GeneratorProxy(iterator())
original_iterator = iterator()
for i in generate_from_proxy(proxy):
assert i == next(original_iterator)
assert proxy.items == [0, 1, 2]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tool.py | import inspect
import pytest
from promptflow import tool
from promptflow._core.tool import InputSetting, ToolType
from promptflow._core.tracer import Tracer, TraceType
from promptflow.exceptions import UserErrorException
@tool
def decorated_without_parentheses(a: int):
return a
@tool()
def decorated_with_parentheses(a: int):
return a
@tool
async def decorated_without_parentheses_async(a: int):
return a
@tool()
async def decorated_with_parentheses_async(a: int):
return a
@tool(
name="tool_with_attributes",
description="Sample tool with a lot of attributes",
type=ToolType.LLM,
input_settings=InputSetting(),
streaming_option_parameter="stream",
extra_a="a",
extra_b="b",
)
def tool_with_attributes(stream: bool, a: int, b: int):
return stream, a, b
@pytest.mark.unittest
class TestTool:
"""This class tests the `tool` decorator."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
decorated_with_parentheses,
decorated_without_parentheses,
decorated_with_parentheses_async,
decorated_without_parentheses_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
def test_attributes_are_set_to_the_tool_function(self):
stream, a, b = tool_with_attributes(True, 1, 2)
# Check the results are as expected
assert stream is True
assert a == 1
assert b == 2
# Check the attributes are set to the function
assert getattr(tool_with_attributes, "__tool") is None
assert getattr(tool_with_attributes, "__name") == "tool_with_attributes"
assert getattr(tool_with_attributes, "__description") == "Sample tool with a lot of attributes"
assert getattr(tool_with_attributes, "__type") == ToolType.LLM
assert getattr(tool_with_attributes, "__input_settings") == InputSetting()
assert getattr(tool_with_attributes, "__extra_info") == {"extra_a": "a", "extra_b": "b"}
assert getattr(tool_with_attributes, "_streaming_option_parameter") == "stream"
def test_invalid_tool_type_should_raise_error(self):
with pytest.raises(UserErrorException, match="Tool type invalid_type is not supported yet."):
@tool(type="invalid_type")
def invalid_tool_type():
pass
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_metric_logger.py | import pytest
from promptflow._core.metric_logger import MetricLoggerManager, add_metric_logger, log_metric, remove_metric_logger
@pytest.mark.unittest
class TestMetricLogger:
def test_add_and_remove_metric_logger(self):
# define log metric function
metrics = {}
def _log_metric(key, value):
metrics[key] = value
def _log_metric_invalid(key, value, variant_id, extra_param):
metrics[key] = {variant_id: {value: extra_param}}
add_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger(_log_metric_invalid)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger("test")
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
remove_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == []
def test_log_metric(self):
# define log metric function
metrics = {}
def _log_metric(key, value):
metrics[key] = value
def _log_metric_with_variant_id(key, value, variant_id):
metrics[key] = {variant_id: value}
add_metric_logger(_log_metric)
log_metric("test1", 1)
assert metrics == {"test1": 1}
add_metric_logger(_log_metric_with_variant_id)
log_metric("test2", 1, "line_0")
assert metrics == {"test1": 1, "test2": {"line_0": 1}}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_log_manager.py | import logging
import sys
import time
from multiprocessing.pool import ThreadPool
import pytest
from dateutil.parser import parse
from promptflow._core.log_manager import NodeLogManager, NodeLogWriter
RUN_ID = "dummy_run_id"
NODE_NAME = "dummy_node"
LINE_NUMBER = 1
def assert_print_result(i: int, run_logger: NodeLogWriter):
run_id = f"{RUN_ID}-{i}"
run_logger.set_node_info(run_id, NODE_NAME, LINE_NUMBER)
time.sleep(i / 10)
print(i)
assert_datetime_prefix(run_logger.get_log(run_id), str(i) + "\n")
def is_datetime(string: str) -> bool:
"""Check if a string follows datetime format."""
try:
parse(string)
return True
except ValueError:
return False
def assert_datetime_prefix(string: str, expected_str: str):
"""Assert if string has a datetime prefix, such as:
[2023-04-17T07:49:54+0000] example string
"""
datetime_prefix = string[string.index("[") + 1 : string.index("]")]
inner_str = string[string.index("]") + 2 :]
assert is_datetime(datetime_prefix)
assert inner_str == expected_str
@pytest.mark.unittest
class TestNodeLogManager:
def test_get_logs(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
print("test stderr", file=sys.stderr)
assert lm.get_logs(RUN_ID).get("stdout") == "test\ntest2\n"
assert lm.get_logs(RUN_ID).get("stderr") == "test stderr\n"
lm.clear_node_context(RUN_ID)
assert lm.get_logs(RUN_ID).get("stdout") is None
assert lm.get_logs(RUN_ID).get("stderr") is None
def test_logging(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
stdout_logger = logging.getLogger("stdout")
stderr_logger = logging.getLogger("stderr")
stdout_logger.addHandler(logging.StreamHandler(stream=sys.stdout))
stderr_logger.addHandler(logging.StreamHandler(stream=sys.stderr))
stdout_logger.warning("test stdout")
stderr_logger.warning("test stderr")
logs = lm.get_logs(RUN_ID)
assert logs.get("stdout") == "test stdout\n"
assert logs.get("stderr") == "test stderr\n"
def test_exit_context_manager(self):
with NodeLogManager() as lm:
assert lm.stdout_logger is sys.stdout
assert lm.stdout_logger != sys.stdout
def test_datetime_prefix(self):
with NodeLogManager(record_datetime=True) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
output = lm.get_logs(RUN_ID).get("stdout")
outputs = output.split("\n")
assert_datetime_prefix(outputs[0], "test")
assert_datetime_prefix(outputs[1], "test2")
assert outputs[2] == ""
@pytest.mark.unittest
class TestNodeLogWriter:
def test_set_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
assert run_logger.get_log(RUN_ID) == ""
def test_clear_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
run_logger.clear_node_info(RUN_ID)
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
run_logger.clear_node_info(RUN_ID)
assert run_logger.run_id_to_stdout.get(RUN_ID) is None
def test_get_log(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
print("test")
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
assert_datetime_prefix(run_logger.get_log(RUN_ID), "test\n")
run_logger.clear_node_info(RUN_ID)
assert run_logger.get_log(RUN_ID) is None
def test_multi_thread(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
with ThreadPool(processes=10) as pool:
results = pool.starmap(assert_print_result, ((i, run_logger) for i in range(10)))
for r in results:
pass
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_operation_context.py | import threading
import pytest
from promptflow._core.operation_context import OperationContext
from promptflow._version import VERSION
from promptflow.contracts.run_mode import RunMode
def set_run_mode(context: OperationContext, run_mode: RunMode):
"""This method simulates the runtime.execute_request()
It is aimed to set the run_mode into operation context.
"""
context.run_mode = run_mode.name if run_mode is not None else ""
@pytest.mark.unittest
class TestOperationContext:
def test_get_user_agent(self):
operation_context = OperationContext()
assert operation_context.get_user_agent() == f"promptflow/{VERSION}"
operation_context.user_agent = "test_agent/0.0.2"
assert operation_context.get_user_agent() == f"test_agent/0.0.2 promptflow/{VERSION}"
@pytest.mark.parametrize(
"run_mode, expected",
[
(RunMode.Test, "Test"),
(RunMode.SingleNode, "SingleNode"),
(RunMode.Batch, "Batch"),
],
)
def test_run_mode(self, run_mode, expected):
context = OperationContext()
set_run_mode(context, run_mode)
assert context.run_mode == expected
def test_context_dict(self):
context = OperationContext()
context.run_mode = "Flow"
context.user_agent = "test_agent/0.0.2"
context.none_value = None
context_dict = context.get_context_dict()
assert context_dict["run_mode"] == "Flow"
assert context_dict["user_agent"] == "test_agent/0.0.2"
assert context_dict["none_value"] is None
def test_setattr(self):
context = OperationContext()
context.run_mode = "Flow"
assert context["run_mode"] == "Flow"
def test_setattr_non_primitive(self):
# Test set non-primitive type
context = OperationContext()
with pytest.raises(TypeError):
context.foo = [1, 2, 3]
def test_getattr(self):
context = OperationContext()
context["run_mode"] = "Flow"
assert context.run_mode == "Flow"
def test_getattr_missing(self):
context = OperationContext()
with pytest.raises(AttributeError):
context.foo
def test_delattr(self):
# test that delattr works as expected
context = OperationContext()
context.foo = "bar"
del context.foo
assert "foo" not in context
# test that delattr raises AttributeError for non-existent name
with pytest.raises(AttributeError):
del context.baz
def test_append_user_agent(self):
context = OperationContext()
user_agent = ' ' + context.user_agent if 'user_agent' in context else ''
context.append_user_agent("test_agent/0.0.2")
assert context.user_agent == "test_agent/0.0.2" + user_agent
context.append_user_agent("test_agent/0.0.3")
assert context.user_agent == "test_agent/0.0.2 test_agent/0.0.3" + user_agent
def test_get_instance(self):
context1 = OperationContext.get_instance()
context2 = OperationContext.get_instance()
assert context1 is context2
def test_set_batch_input_source_from_inputs_mapping_run(self):
input_mapping = {"input1": "${run.outputs.output1}", "input2": "${run.outputs.output2}"}
context = OperationContext()
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Run"
def test_set_batch_input_source_from_inputs_mapping_data(self):
input_mapping = {"url": "${data.url}"}
context = OperationContext()
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_set_batch_input_source_from_inputs_mapping_none(self):
input_mapping = None
context = OperationContext()
assert not hasattr(context, "batch_input_source")
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_set_batch_input_source_from_inputs_mapping_empty(self):
input_mapping = {}
context = OperationContext()
assert not hasattr(context, "batch_input_source")
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_different_thread_have_different_instance(self):
# create a list to store the OperationContext instances from each thread
instances = []
# define a function that gets the OperationContext instance and appends it to the list
def get_instance():
instance = OperationContext.get_instance()
instances.append(instance)
# create two threads and run the function in each thread
thread1 = threading.Thread(target=get_instance)
thread2 = threading.Thread(target=get_instance)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# assert that the list has two elements and they are different objects
assert len(instances) == 2
assert instances[0] is not instances[1]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_api_injector.py | import logging
from collections import namedtuple
from importlib.metadata import version
from types import GeneratorType
from unittest.mock import MagicMock, patch
import openai
import pytest
from promptflow._core.openai_injector import (
PROMPTFLOW_PREFIX,
USER_AGENT_HEADER,
_generate_api_and_injector,
_openai_api_list,
get_aoai_telemetry_headers,
inject_async,
inject_openai_api,
inject_operation_headers,
inject_sync,
recover_openai_api,
)
from promptflow._core.operation_context import OperationContext
from promptflow._core.tracer import Tracer
from promptflow._version import VERSION
from promptflow.connections import AzureOpenAIConnection
from promptflow.exceptions import UserErrorException
from promptflow.tools.aoai import AzureOpenAI
from promptflow.tools.embedding import embedding
IS_LEGACY_OPENAI = version("openai").startswith("0.")
# Mock classes and functions for test
class MockAPI:
def create(self):
pass
@pytest.mark.unittest
def test_inject_operation_headers_sync():
@inject_operation_headers
def f(**kwargs):
return kwargs
if IS_LEGACY_OPENAI:
headers = "headers"
kwargs_1 = {"headers": {"a": 1, "b": 2}}
kwargs_2 = {"headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
else:
headers = "extra_headers"
kwargs_1 = {"extra_headers": {"a": 1, "b": 2}}
kwargs_2 = {"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
injected_headers = get_aoai_telemetry_headers()
assert f(a=1, b=2) == {"a": 1, "b": 2, headers: injected_headers}
merged_headers = {**injected_headers, "a": 1, "b": 2}
assert f(**kwargs_1) == {headers: merged_headers}
aoai_tools_headers = injected_headers.copy()
aoai_tools_headers.update({"ms-azure-ai-promptflow-called-from": "aoai-tool"})
assert f(**kwargs_2) == {headers: aoai_tools_headers}
@pytest.mark.unittest
@pytest.mark.asyncio
async def test_inject_operation_headers_async():
@inject_operation_headers
async def f(**kwargs):
return kwargs
if IS_LEGACY_OPENAI:
headers = "headers"
kwargs_1 = {"headers": {"a": 1, "b": 2}}
kwargs_2 = {"headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
else:
headers = "extra_headers"
kwargs_1 = {"extra_headers": {"a": 1, "b": 2}}
kwargs_2 = {"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
injected_headers = get_aoai_telemetry_headers()
assert await f(a=1, b=2) == {"a": 1, "b": 2, headers: injected_headers}
merged_headers = {**injected_headers, "a": 1, "b": 2}
assert await f(**kwargs_1) == {headers: merged_headers}
aoai_tools_headers = injected_headers.copy()
aoai_tools_headers.update({"ms-azure-ai-promptflow-called-from": "aoai-tool"})
assert await f(**kwargs_2) == {headers: aoai_tools_headers}
@pytest.mark.unittest
def test_aoai_generator_proxy_sync():
def mock_aoai(**kwargs):
# check if args has a stream parameter
if "stream" in kwargs and kwargs["stream"]:
# stream parameter is true, yield a string
def generator():
yield "This is a yielded string"
return generator()
else:
# stream parameter is false or not given, return a string
return "This is a returned string"
if IS_LEGACY_OPENAI:
apis = ["openai.Completion.create", "openai.ChatCompletion.create", "openai.Embedding.create"]
else:
apis = [
"openai.resources.Completions.create",
"openai.resources.chat.Completions.create",
"openai.resources.Embeddings.create",
]
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
Tracer.start_tracing("mock_run_id")
inject_openai_api()
if IS_LEGACY_OPENAI:
return_string = openai.Completion.create(stream=False)
return_generator = openai.Completion.create(stream=True)
else:
return_string = openai.resources.Completions.create(stream=False)
return_generator = openai.resources.Completions.create(stream=True)
assert return_string == "This is a returned string"
assert isinstance(return_generator, GeneratorType)
for _ in return_generator:
pass
traces = Tracer.end_tracing()
assert len(traces) == 2
for trace in traces:
assert trace["type"] == "LLM"
if trace["inputs"]["stream"]:
assert trace["output"] == ["This is a yielded string"]
else:
assert trace["output"] == "This is a returned string"
@pytest.mark.unittest
@pytest.mark.asyncio
async def test_aoai_generator_proxy_async():
async def mock_aoai(**kwargs):
# check if args has a stream parameter
if "stream" in kwargs and kwargs["stream"]:
# stream parameter is true, yield a string
def generator():
yield "This is a yielded string"
return generator()
else:
# stream parameter is false or not given, return a string
return "This is a returned string"
if IS_LEGACY_OPENAI:
apis = ["openai.Completion.acreate", "openai.ChatCompletion.acreate", "openai.Embedding.acreate"]
else:
apis = [
"openai.resources.AsyncCompletions.create",
"openai.resources.chat.AsyncCompletions.create",
"openai.resources.AsyncEmbeddings.create",
]
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
Tracer.start_tracing("mock_run_id")
inject_openai_api()
if IS_LEGACY_OPENAI:
return_string = await openai.Completion.acreate(stream=False)
return_generator = await openai.Completion.acreate(stream=True)
else:
return_string = await openai.resources.AsyncCompletions.create(stream=False)
return_generator = await openai.resources.AsyncCompletions.create(stream=True)
assert return_string == "This is a returned string"
assert isinstance(return_generator, GeneratorType)
for _ in return_generator:
pass
traces = Tracer.end_tracing()
assert len(traces) == 2
for trace in traces:
assert trace["type"] == "LLM"
if trace["inputs"]["stream"]:
assert trace["output"] == ["This is a yielded string"]
else:
assert trace["output"] == "This is a returned string"
@pytest.mark.unittest
def test_aoai_call_inject():
if IS_LEGACY_OPENAI:
headers = "headers"
apis = ["openai.Completion.create", "openai.ChatCompletion.create", "openai.Embedding.create"]
else:
headers = "extra_headers"
apis = [
"openai.resources.Completions.create",
"openai.resources.chat.Completions.create",
"openai.resources.Embeddings.create",
]
def mock_aoai(**kwargs):
return kwargs.get(headers)
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
inject_openai_api()
injected_headers = get_aoai_telemetry_headers()
if IS_LEGACY_OPENAI:
return_headers_1 = openai.Completion.create(headers=None)
return_headers_2 = openai.ChatCompletion.create(headers="abc")
return_headers_3 = openai.Embedding.create(headers=1)
else:
return_headers_1 = openai.resources.Completions.create(extra_headers=None)
return_headers_2 = openai.resources.chat.Completions.create(extra_headers="abc")
return_headers_3 = openai.resources.Embeddings.create(extra_headers=1)
assert return_headers_1 is not None
assert injected_headers.items() <= return_headers_1.items()
assert return_headers_2 is not None
assert injected_headers.items() <= return_headers_2.items()
assert return_headers_3 is not None
assert injected_headers.items() <= return_headers_3.items()
@pytest.mark.unittest
def test_aoai_tool_header():
def mock_complete(*args, **kwargs):
Response = namedtuple("Response", ["choices"])
Choice = namedtuple("Choice", ["text"])
choice = Choice(text=kwargs.get("extra_headers", {}))
response = Response(choices=[choice])
return response
def mock_chat(*args, **kwargs):
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["message"])
Message = namedtuple("Message", ["content"])
message = Message(content=kwargs.get("extra_headers", {}))
choice = Choice(message=message)
completion = Completion(choices=[choice])
return completion
def mock_embedding(*args, **kwargs):
Response = namedtuple("Response", ["data"])
Embedding = namedtuple("Embedding", ["embedding"])
response = Response(data=[Embedding(embedding=kwargs.get("extra_headers", {}))])
return response
with patch("openai.resources.Completions.create", new=mock_complete), patch(
"openai.resources.chat.Completions.create", new=mock_chat
), patch("openai.resources.Embeddings.create", new=mock_embedding):
inject_openai_api()
aoai_tool_header = {"ms-azure-ai-promptflow-called-from": "aoai-tool"}
return_headers = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).completion(
prompt="test", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
return_headers = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\ntest", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
return_headers = embedding(
AzureOpenAIConnection(api_key="test", api_base="test"), input="test", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
@pytest.mark.unittest
def test_aoai_chat_tool_prompt():
def mock_chat(*args, **kwargs):
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["message"])
Message = namedtuple("Message", ["content"])
message = Message(content=kwargs.get("messages", {}))
choice = Choice(message=message)
completion = Completion(choices=[choice])
return completion
with patch("openai.resources.chat.Completions.create", new=mock_chat):
inject_openai_api()
return_messages = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\ntest", deployment_name="test"
)
assert return_messages == [{"role": "user", "content": "test"}]
return_messages = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\r\n", deployment_name="test"
)
assert return_messages == [{"role": "user", "content": ""}]
with pytest.raises(UserErrorException, match="The Chat API requires a specific format for prompt"):
AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:", deployment_name="test"
)
# The new generator-based test function
@pytest.mark.parametrize(
"is_legacy, expected_apis_with_injectors",
[
(
True,
[
(
(
("openai", "Completion", "create"),
("openai", "ChatCompletion", "create"),
("openai", "Embedding", "create"),
),
inject_sync,
),
(
(
("openai", "Completion", "acreate"),
("openai", "ChatCompletion", "acreate"),
("openai", "Embedding", "acreate"),
),
inject_async,
),
],
),
(
False,
[
(
(
("openai.resources.chat", "Completions", "create"),
("openai.resources", "Completions", "create"),
("openai.resources", "Embeddings", "create"),
),
inject_sync,
),
(
(
("openai.resources.chat", "AsyncCompletions", "create"),
("openai.resources", "AsyncCompletions", "create"),
("openai.resources", "AsyncEmbeddings", "create"),
),
inject_async,
),
],
),
],
)
def test_api_list(is_legacy, expected_apis_with_injectors):
with patch("promptflow._core.openai_injector.IS_LEGACY_OPENAI", is_legacy):
# Using list comprehension to get all items from the generator
actual_apis_with_injectors = list(_openai_api_list())
# Assert that the actual list matches the expected list
assert actual_apis_with_injectors == expected_apis_with_injectors
@pytest.mark.parametrize(
"apis_with_injectors, expected_output, expected_logs",
[
([((("MockModule", "MockAPI", "create"),), inject_sync)], [(MockAPI, "create", inject_sync)], []),
([((("MockModule", "MockAPI", "create"),), inject_async)], [(MockAPI, "create", inject_async)], []),
],
)
def test_generate_api_and_injector(apis_with_injectors, expected_output, expected_logs, caplog):
with patch("importlib.import_module", return_value=MagicMock(MockAPI=MockAPI)) as mock_import_module:
# Capture the logs
with caplog.at_level(logging.WARNING):
# Run the generator and collect the output
result = list(_generate_api_and_injector(apis_with_injectors))
# Check if the result matches the expected output
assert result == expected_output
# Check if the logs match the expected logs
assert len(caplog.records) == len(expected_logs)
for record, expected_message in zip(caplog.records, expected_logs):
assert expected_message in record.message
mock_import_module.assert_called_with("MockModule")
def test_generate_api_and_injector_attribute_error_logging(caplog):
apis = [
((("NonExistentModule", "NonExistentAPI", "create"),), MagicMock()),
((("MockModuleMissingMethod", "MockAPIMissingMethod", "missing_method"),), MagicMock()),
]
# Set up the side effect for the mock
def import_module_effect(name):
if name == "MockModuleMissingMethod":
module = MagicMock()
delattr(module, "MockAPIMissingMethod") # Use delattr to remove the attribute
return module
else:
raise ModuleNotFoundError(f"No module named '{name}'")
with patch("importlib.import_module") as mock_import_module:
mock_import_module.side_effect = import_module_effect
with caplog.at_level(logging.WARNING):
list(_generate_api_and_injector(apis))
assert len(caplog.records) == 2
assert "An unexpected error occurred" in caplog.records[0].message
assert "NonExistentModule" in caplog.records[0].message
assert "does not have the class" in caplog.records[1].message
assert "MockAPIMissingMethod" in caplog.records[1].message
# Verify that `importlib.import_module` was called with correct module names
mock_import_module.assert_any_call("NonExistentModule")
mock_import_module.assert_any_call("MockModuleMissingMethod")
@pytest.mark.unittest
def test_get_aoai_telemetry_headers():
# create a mock operation context
mock_operation_context = OperationContext()
mock_operation_context.user_agent = "test-user-agent"
mock_operation_context.update(
{
"flow_id": "test-flow-id",
"root_run_id": "test-root-run-id",
"index": 1,
"run_id": "test-run-id",
"variant_id": "test-variant-id",
}
)
# patch the OperationContext.get_instance method to return the mock operation context
with patch("promptflow._core.operation_context.OperationContext.get_instance") as mock_get_instance:
mock_get_instance.return_value = mock_operation_context
# call the function under test and get the headers
headers = get_aoai_telemetry_headers()
for key in headers.keys():
assert key.startswith(PROMPTFLOW_PREFIX) or key == USER_AGENT_HEADER
assert "_" not in key
# assert that the headers are correct
assert headers[USER_AGENT_HEADER] == f"test-user-agent promptflow/{VERSION}"
assert headers[f"{PROMPTFLOW_PREFIX}flow-id"] == "test-flow-id"
assert headers[f"{PROMPTFLOW_PREFIX}root-run-id"] == "test-root-run-id"
assert headers[f"{PROMPTFLOW_PREFIX}index"] == "1"
assert headers[f"{PROMPTFLOW_PREFIX}run-id"] == "test-run-id"
assert headers[f"{PROMPTFLOW_PREFIX}variant-id"] == "test-variant-id"
@pytest.mark.unittest
def test_inject_and_recover_openai_api():
class FakeAPIWithoutOriginal:
@staticmethod
def create():
pass
class FakeAPIWithOriginal:
@staticmethod
def create():
pass
def dummy_api():
pass
# Real injector function that adds an _original attribute
def injector(f):
def wrapper_fun(*args, **kwargs):
return f(*args, **kwargs)
wrapper_fun._original = f
return wrapper_fun
# Set an _original attribute for the create method of FakeAPIWithOriginal
FakeAPIWithOriginal.create._original = dummy_api
# Store the original create methods before injection
original_api_without_original = FakeAPIWithoutOriginal.create
original_api_with_original = FakeAPIWithOriginal.create
# Mock the generator function to yield our mocked api and method
with patch(
"promptflow._core.openai_injector.available_openai_apis_and_injectors",
return_value=[(FakeAPIWithoutOriginal, "create", injector), (FakeAPIWithOriginal, "create", injector)],
):
# Call the function to inject the APIs
inject_openai_api()
# Check that the _original attribute was set for the method that didn't have it
assert hasattr(FakeAPIWithoutOriginal.create, "_original")
# Ensure the _original attribute points to the correct original method
assert FakeAPIWithoutOriginal.create._original is original_api_without_original
# Check that the injector was not applied again to the method that already had an _original attribute
# The _original attribute should still point to the mock, not the original method
assert getattr(FakeAPIWithOriginal.create, "_original") is not FakeAPIWithOriginal.create
# The original method should remain unchanged
assert FakeAPIWithOriginal.create is original_api_with_original
# Call the function to recover the APIs
recover_openai_api()
# Check that the _original attribute was removed for the method that didn't have it
assert not hasattr(FakeAPIWithoutOriginal.create, "_original")
assert not hasattr(FakeAPIWithOriginal.create, "_original")
# The original methods should be restored
assert FakeAPIWithoutOriginal.create is original_api_without_original
assert FakeAPIWithOriginal.create is dummy_api
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_flow.py | from pathlib import Path
import pytest
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts._errors import FailedToImportModule
from promptflow.contracts.flow import (
Flow,
FlowInputAssignment,
FlowInputDefinition,
FlowOutputDefinition,
InputAssignment,
InputValueType,
Node,
NodeVariant,
NodeVariants,
ToolSource,
ToolSourceType,
)
from promptflow.contracts.tool import Tool, ToolType, ValueType
from ...utils import EAGER_FLOWS_ROOT, FLOW_ROOT, get_flow_folder, get_flow_package_tool_definition, get_yaml_file
PACKAGE_TOOL_BASE = Path(__file__).parent.parent.parent / "package_tools"
@pytest.mark.e2etest
class TestFlowContract:
@pytest.mark.parametrize(
"flow_folder, expected_connection_names",
[
("web_classification", {"azure_open_ai_connection"}),
("basic-with-connection", {"azure_open_ai_connection"}),
("flow_with_dict_input_with_variant", {"mock_custom_connection"}),
],
)
def test_flow_get_connection_names(self, flow_folder, expected_connection_names):
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_names() == expected_connection_names
def test_flow_get_connection_input_names_for_node_with_variants(self):
# Connection input exists only in python node
flow_folder = "flow_with_dict_input_with_variant"
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_input_names_for_node("print_val") == ["conn"]
def test_flow_get_connection_names_with_package_tool(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_names()
assert connection_names == {"azure_open_ai_connection"}
def test_flow_get_connection_input_names_for_node(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_input_names_for_node(flow.nodes[0].name)
assert connection_names == ["connection", "connection_2"]
assert flow.get_connection_input_names_for_node("not_exist") == []
@pytest.mark.parametrize(
"flow_folder_name, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
],
)
def test_flow_get_environment_variables_with_overrides(
self, flow_folder_name, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name)
flow_file = "flow.dag.yaml"
flow = Flow.from_yaml(flow_file=flow_file, working_dir=flow_folder)
merged_environment_variables = flow.get_environment_variables_with_overrides(
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.parametrize(
"flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
None,
{},
id="LoadEnvVariablesForEagerFlow",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesForEagerFlowWithOverrides",
),
],
)
def test_load_env_variables(
self, flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name, folder_root)
merged_environment_variables = Flow.load_env_variables(
flow_file=flow_file,
working_dir=flow_folder,
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.unittest
class TestFlow:
@pytest.mark.parametrize(
"flow, expected_value",
[
(
Flow(id="flow_id", name="flow_name", nodes=[], inputs={}, outputs={}, tools=[]),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [],
"inputs": {},
"outputs": {},
"tools": [],
"language": "python",
},
),
(
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={"input1": FlowInputDefinition(type=ValueType.STRING)},
outputs={"output1": FlowOutputDefinition(type=ValueType.STRING, reference=None)},
tools=[],
),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
"language": "python",
},
),
],
)
def test_flow_serialize(self, flow, expected_value):
assert flow.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}, "outputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
},
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={
"input1": FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
)
},
outputs={
"output1": FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment(
value="", value_type=InputValueType.LITERAL, section="", property=""
),
description="",
evaluation_only=False,
is_chat_output=False,
)
},
tools=[],
node_variants={},
program_language="python",
environment_variables={},
),
),
],
)
def test_flow_deserialize(self, data, expected_value):
assert Flow.deserialize(data) == expected_value
def test_import_requisites(self):
tool1 = Tool(name="tool1", type=ToolType.PYTHON, inputs={}, module="yaml")
tool2 = Tool(name="tool2", type=ToolType.PYTHON, inputs={}, module="module")
node1 = Node(name="node1", tool="tool1", inputs={}, module="yaml")
node2 = Node(name="node2", tool="tool2", inputs={}, module="module")
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool1], [node2])
assert str(e.value).startswith(
"Failed to import modules with error: Import node 'node2' provider module 'module' failed."
)
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool2], [node1])
assert str(e.value).startswith(
"Failed to import modules with error: Import tool 'tool2' module 'module' failed."
)
def test_apply_default_node_variants(self):
node_variant = NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None}, use_variants=False),
description=None,
)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": node_variant},
)
}
flow1 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is True
flow1._apply_default_node_variants()
assert flow1.nodes[0].use_variants is False
assert flow1.nodes[0].inputs.keys() == {"input2"}
assert flow1.nodes[0].name == "print_val"
flow2 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=False)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is False
tmp_nodes = flow2.nodes
flow2._apply_default_node_variants()
assert flow2.nodes == tmp_nodes
@pytest.mark.parametrize(
"node_variants",
[
(None),
(
{
"test": NodeVariants(
default_variant_id="variant1",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
(
{
"print_val": NodeVariants(
default_variant_id="test",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
],
)
def test_apply_default_node_variant(self, node_variants):
node = Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)
assert Flow._apply_default_node_variant(node, node_variants) == node
def test_apply_node_overrides(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, connection="open_ai_connection")
test_node = Node(
name="test_node", tool=None, inputs={"test": InputAssignment("test_value1", InputValueType.LITERAL)}
)
flow = Flow(id="test_flow_id", name="test_flow", nodes=[llm_node, test_node], inputs={}, outputs={}, tools=[])
assert flow == flow._apply_node_overrides(None)
assert flow == flow._apply_node_overrides({})
node_overrides = {
"other_node.connection": "some_connection",
}
with pytest.raises(ValueError):
flow._apply_node_overrides(node_overrides)
node_overrides = {
"llm_node.connection": "custom_connection",
"test_node.test": "test_value2",
}
flow = flow._apply_node_overrides(node_overrides)
assert flow.nodes[0].connection == "custom_connection"
assert flow.nodes[1].inputs["test"].value == "test_value2"
def test_has_aggregation_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow1 = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert not flow1.has_aggregation_node()
flow2 = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow2.has_aggregation_node()
def test_get_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
flow = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert flow.get_node("llm_node") is llm_node
assert flow.get_node("other_node") is None
def test_get_tool(self):
tool = Tool(name="tool", type=ToolType.PYTHON, inputs={})
flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[tool])
assert flow.get_tool("tool") is tool
assert flow.get_tool("other_tool") is None
def test_is_reduce_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_reduce_node("llm_node")
assert flow.is_reduce_node("aggre_node")
def test_is_normal_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_normal_node("llm_node")
assert not flow.is_normal_node("aggre_node")
def test_is_llm_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, type=ToolType.LLM)
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_llm_node(llm_node)
assert not flow.is_llm_node(aggre_node)
def test_is_referenced_by_flow_output(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
output = {
"output": FlowOutputDefinition(
type=ValueType.STRING, reference=InputAssignment("llm_node", InputValueType.NODE_REFERENCE, "output")
)
}
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs=output, tools=[])
assert flow.is_referenced_by_flow_output(llm_node)
assert not flow.is_referenced_by_flow_output(aggre_node)
def test_is_node_referenced_by(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_node_referenced_by(aggre_node, llm_node)
assert flow.is_node_referenced_by(llm_node, aggre_node)
def test_is_referenced_by_other_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_referenced_by_other_node(aggre_node)
assert flow.is_referenced_by_other_node(llm_node)
def test_is_chat_flow(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert not standard_flow.is_chat_flow()
assert chat_flow.is_chat_flow()
def test_get_chat_input_name(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert standard_flow.get_chat_input_name() is None
assert chat_flow.get_chat_input_name() == "question"
def test_get_chat_output_name(self):
chat_output = {"answer": FlowOutputDefinition(type=ValueType.STRING, reference=None, is_chat_output=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs=chat_output, tools=[])
assert standard_flow.get_chat_output_name() is None
assert chat_flow.get_chat_output_name() == "answer"
def test_replace_with_variant(self):
node0 = Node(name="node0", tool=None, inputs={"input0": None}, use_variants=True)
node1 = Node(name="node1", tool="tool1", inputs={"input1": None}, use_variants=False)
node2 = Node(name="node2", tool="tool2", inputs={"input2": None}, use_variants=False)
node_variant = Node(name="node0", tool="tool3", inputs={"input3": None}, use_variants=False)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": NodeVariant(node_variant, None)},
)
}
flow = Flow("test_flow_id", "test_flow", [node0, node1, node2], {}, {}, [], node_variants)
# flow = Flow.from_yaml(get_yaml_file("web_classification"))
tool_cnt = len(flow.tools)
flow._replace_with_variant(node_variant, [flow.nodes[1].tool, flow.nodes[2].tool])
assert "input3" in flow.nodes[0].inputs
assert flow.nodes[0].tool == "tool3"
assert len(flow.tools) == tool_cnt + 2
@pytest.mark.unittest
class TestInputAssignment:
@pytest.mark.parametrize(
"value, expected_value",
[
(InputAssignment("value", InputValueType.LITERAL), "value"),
(InputAssignment("value", InputValueType.FLOW_INPUT), "${flow.value}"),
(InputAssignment("value", InputValueType.NODE_REFERENCE, "section"), "${value.section}"),
(
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
"${value.section.property}",
),
(InputAssignment(AzureContentSafetyConnection, InputValueType.LITERAL, "section", "property"), "ABCMeta"),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"serialized_value, expected_value",
[
(
"${value.section.property}",
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
),
(
"${flow.section.property}",
FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT),
),
("${value}", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("$value", InputAssignment("$value", InputValueType.LITERAL)),
("value", InputAssignment("value", InputValueType.LITERAL)),
],
)
def test_deserialize(self, serialized_value, expected_value):
input_assignment = InputAssignment.deserialize(serialized_value)
assert input_assignment == expected_value
@pytest.mark.parametrize(
"serialized_reference, expected_value",
[
("input", InputAssignment("input", InputValueType.NODE_REFERENCE, "output")),
("flow.section", FlowInputAssignment("section", value_type=InputValueType.FLOW_INPUT, prefix="flow.")),
(
"flow.section.property",
FlowInputAssignment("section.property", value_type=InputValueType.FLOW_INPUT, prefix="flow."),
),
],
)
def test_deserialize_reference(self, serialized_reference, expected_value):
assert InputAssignment.deserialize_reference(serialized_reference) == expected_value
@pytest.mark.parametrize(
"serialized_node_reference, expected_value",
[
("value", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("value.section", InputAssignment("value", InputValueType.NODE_REFERENCE, "section")),
("value.section.property", InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property")),
],
)
def test_deserialize_node_reference(self, serialized_node_reference, expected_value):
assert InputAssignment.deserialize_node_reference(serialized_node_reference) == expected_value
@pytest.mark.unittest
class TestFlowInputAssignment:
@pytest.mark.parametrize(
"input_value, expected_value",
[
("flow.section.property", True),
("inputs.section.property", True),
("section.property", False),
("", False),
],
)
def test_is_flow_input(self, input_value, expected_value):
assert FlowInputAssignment.is_flow_input(input_value) == expected_value
def test_deserialize(self):
expected_input = FlowInputAssignment("section.property", prefix="inputs.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("inputs.section.property") == expected_input
expected_flow = FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("flow.section.property") == expected_flow
with pytest.raises(ValueError):
FlowInputAssignment.deserialize("value")
@pytest.mark.unittest
class TestToolSource:
@pytest.mark.parametrize(
"tool_source, expected_value",
[
({}, ToolSource(type=ToolSourceType.Code)),
({"type": ToolSourceType.Code.value}, ToolSource(type=ToolSourceType.Code)),
(
{"type": ToolSourceType.Package.value, "tool": "tool", "path": "path"},
ToolSource(type=ToolSourceType.Package, tool="tool", path="path"),
),
],
)
def test_deserialize(self, tool_source, expected_value):
assert ToolSource.deserialize(tool_source) == expected_value
@pytest.mark.unittest
class TestNode:
@pytest.mark.parametrize(
"node, expected_value",
[
(
Node(name="test_node", tool="test_tool", inputs={}),
{"name": "test_node", "tool": "test_tool", "inputs": {}},
),
(
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True, "reduce": True},
),
],
)
def test_serialize(self, node, expected_value):
assert node.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{"name": "test_node", "tool": "test_tool", "inputs": {}},
Node(name="test_node", tool="test_tool", inputs={}),
),
(
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True},
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
),
],
)
def test_deserialize(self, data, expected_value):
assert Node.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowInputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowInputDefinition(type=ValueType.BOOL), {"type": ValueType.BOOL.value}),
(
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
{
"type": ValueType.STRING.value,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
),
(
{
"type": ValueType.STRING,
},
FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowInputDefinition.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowOutputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowOutputDefinition(type=ValueType.BOOL, reference=None), {"type": ValueType.BOOL.value}),
(
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("value", InputValueType.NODE_REFERENCE),
description="description",
evaluation_only=True,
is_chat_output=True,
),
{
"type": ValueType.STRING.value,
"reference": "${value.}",
"description": "description",
"evaluation_only": True,
"is_chat_output": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
},
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("", InputValueType.LITERAL),
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowOutputDefinition.deserialize(data) == expected_value
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_mode.py | import pytest
from promptflow.contracts.run_mode import RunMode
@pytest.mark.unittest
@pytest.mark.parametrize(
"run_mode, expected",
[
("Test", RunMode.Test),
("SingleNode", RunMode.SingleNode),
("Batch", RunMode.Batch),
("Default", RunMode.Test),
],
)
def test_parse(run_mode, expected):
assert RunMode.parse(run_mode) == expected
@pytest.mark.unittest
def test_parse_invalid():
with pytest.raises(ValueError):
RunMode.parse(123)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_multimedia.py | import pytest
from promptflow.contracts.multimedia import Image, PFBytes
@pytest.mark.unittest
class TestMultimediaContract:
@pytest.mark.parametrize(
"value, mime_type, source_url",
[
(b"test", "image/*", None),
(b"test", "image/jpg", None),
(b"test", "image/png", None),
(b"test", None, None),
(b"test", "image/*", "mock_url"),
]
)
def test_image_contract(self, value, mime_type, source_url):
image = Image(value, mime_type, source_url)
if mime_type is None:
mime_type = "image/*"
assert image._mime_type == mime_type
assert image._hash == "a94a8fe5"
assert image.to_base64() == "dGVzdA=="
assert image.to_base64(with_type=True) == f"data:{mime_type};base64,dGVzdA=="
assert image.to_base64(with_type=True, dict_type=True) == {f"data:{mime_type};base64": "dGVzdA=="}
assert bytes(image) == value
assert image.source_url == source_url
assert str(image) == "Image(a94a8fe5)"
assert repr(image) == "Image(a94a8fe5)"
assert image.serialize() == "Image(a94a8fe5)"
assert image.serialize(lambda x: x.to_base64()) == "dGVzdA=="
@pytest.mark.parametrize(
"value, mime_type, source_url",
[
(b"test", "image/*", None),
(b"test", "image/jpg", None),
(b"test", "image/png", None),
(b"test", "image/*", "mock_url"),
]
)
def test_pfbytes_contract(self, value, mime_type, source_url):
pfBytes = PFBytes(value, mime_type, source_url)
assert pfBytes._mime_type == mime_type
assert pfBytes._hash == "a94a8fe5"
assert pfBytes.to_base64() == "dGVzdA=="
assert pfBytes.to_base64(with_type=True) == f"data:{mime_type};base64,dGVzdA=="
assert pfBytes.to_base64(with_type=True, dict_type=True) == {f"data:{mime_type};base64": "dGVzdA=="}
assert bytes(pfBytes) == value
assert pfBytes.source_url == source_url
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_types.py | import pytest
from promptflow.contracts.types import AssistantDefinition, Secret, PromptTemplate, FilePath
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
@pytest.mark.unittest
def test_secret():
secret = Secret('my_secret')
secret.set_secret_name('secret_name')
assert secret.secret_name == 'secret_name'
@pytest.mark.unittest
def test_prompt_template():
prompt = PromptTemplate('my_prompt')
assert isinstance(prompt, str)
assert str(prompt) == 'my_prompt'
@pytest.mark.unittest
def test_file_path():
file_path = FilePath('my_file_path')
assert isinstance(file_path, str)
@pytest.mark.unittest
def test_assistant_definition():
data = {"model": "model", "instructions": "instructions", "tools": []}
assistant_definition = AssistantDefinition.deserialize(data)
assert isinstance(assistant_definition, AssistantDefinition)
assert assistant_definition.model == "model"
assert assistant_definition.instructions == "instructions"
assert assistant_definition.tools == []
assert assistant_definition.serialize() == data
assert isinstance(assistant_definition.init_tool_invoker(), AssistantToolInvoker)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_tool.py | from enum import Enum
from typing import Any, Callable, NewType, Optional, Tuple, TypeVar, Union
import pytest
from promptflow._core.tools_manager import connections
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import Status
from promptflow.contracts.tool import (
AssistantDefinition,
ConnectionType,
InputDefinition,
OutputDefinition,
Tool,
ToolType,
ValueType,
_deserialize_enum,
)
from promptflow.contracts.types import FilePath, PromptTemplate, Secret
class MyConnection(CustomStrongTypeConnection):
pass
my_connection = MyConnection(name="my_connection", secrets={"key": "value"})
def some_function():
pass
class TestStatus(Enum):
Running = 1
Preparing = 2
Completed = 3
@pytest.mark.unittest
@pytest.mark.parametrize(
"enum, value, expected",
[
(Status, "Running", Status.Running),
(Status, "running", Status.Running),
(Status, "FAILED", Status.Failed),
(Status, "UNKNOWN", "UNKNOWN"),
(TestStatus, "Running", "Running"),
],
)
def test_deserialize_enum(enum, value, expected):
assert _deserialize_enum(enum, value) == expected
@pytest.mark.unittest
class TestValueType:
@pytest.mark.parametrize(
"value, expected",
[
(1, ValueType.INT),
(1.0, ValueType.DOUBLE),
(True, ValueType.BOOL),
("string", ValueType.STRING),
([], ValueType.LIST),
({}, ValueType.OBJECT),
(Secret("secret"), ValueType.SECRET),
(PromptTemplate("prompt"), ValueType.PROMPT_TEMPLATE),
(FilePath("file_path"), ValueType.FILE_PATH),
(AssistantDefinition("model", "instructions", []), ValueType.ASSISTANT_DEFINITION),
],
)
def test_from_value(self, value, expected):
assert ValueType.from_value(value) == expected
@pytest.mark.parametrize(
"value, expected",
[
(int, ValueType.INT),
(float, ValueType.DOUBLE),
(bool, ValueType.BOOL),
(str, ValueType.STRING),
(list, ValueType.LIST),
(dict, ValueType.OBJECT),
(Secret, ValueType.SECRET),
(PromptTemplate, ValueType.PROMPT_TEMPLATE),
(FilePath, ValueType.FILE_PATH),
(Image, ValueType.IMAGE),
(AssistantDefinition, ValueType.ASSISTANT_DEFINITION),
],
)
def test_from_type(self, value, expected):
assert ValueType.from_type(value) == expected
@pytest.mark.parametrize(
"value, value_type, expected",
[
("1", ValueType.INT, 1),
("1.0", ValueType.DOUBLE, 1.0),
("true", ValueType.BOOL, True),
("false", ValueType.BOOL, False),
(True, ValueType.BOOL, True),
(123, ValueType.STRING, "123"),
('["a", "b", "c"]', ValueType.LIST, ["a", "b", "c"]),
('{"key": "value"}', ValueType.OBJECT, {"key": "value"}),
("[1, 2, 3]", ValueType.OBJECT, [1, 2, 3]),
("{", ValueType.OBJECT, "{"),
([1, 2, 3], ValueType.OBJECT, [1, 2, 3]),
],
)
def test_parse(self, value, value_type, expected):
assert value_type.parse(value) == expected
@pytest.mark.parametrize(
"value, value_type",
[
("1", ValueType.BOOL),
({}, ValueType.LIST),
],
)
def test_parse_error(self, value, value_type):
with pytest.raises(ValueError):
value_type.parse(value)
@pytest.mark.unittest
class TestConnectionType:
@pytest.mark.parametrize(
"type_name, expected",
[
("AzureContentSafetyConnection", connections.get("AzureContentSafetyConnection")),
("AzureOpenAIConnection", connections.get("AzureOpenAIConnection")),
("_Connection", connections.get("_Connection")),
("unknown", None),
(123, None),
],
)
def test_get_connection_class(self, type_name, expected):
assert ConnectionType.get_connection_class(type_name) == expected
@pytest.mark.parametrize(
"type_name, expected",
[
("AzureContentSafetyConnection", True),
("AzureOpenAIConnection", True),
("_Connection", True),
("unknown", False),
(123, False),
],
)
def test_is_connection_class_name(self, type_name, expected):
assert ConnectionType.is_connection_class_name(type_name) == expected
@pytest.mark.parametrize(
"value, expected",
[
(connections.get("AzureContentSafetyConnection"), True),
(AzureContentSafetyConnection("api_key", "endpoint"), True),
(Status, False),
(ConnectionType.is_connection_value("non_connection_instance"), False),
],
)
def test_is_connection_value(self, value, expected):
assert ConnectionType.is_connection_value(value) == expected
@pytest.mark.parametrize(
"val, expected_res",
[
(my_connection, True),
(MyConnection, True),
(list, False),
# (list[str], False), # Python 3.9
# (list[int], False),
([1, 2, 3], False),
(float, False),
(int, False),
(5, False),
(str, False),
(some_function, False),
(Union[str, int], False),
# ((int | str), False), # Python 3.10
(tuple, False),
# (tuple[str, int], False), # Python 3.9
(Tuple[int, ...], False),
# (dict[str, Any], False), # Python 3.9
({"test1": [1, 2, 3], "test2": [4, 5, 6], "test3": [7, 8, 9]}, False),
(Any, False),
(None, False),
(Optional[str], False),
(TypeVar("T"), False),
(TypeVar, False),
(Callable, False),
(Callable[..., Any], False),
(NewType("MyType", int), False),
],
)
def test_is_custom_strong_type(self, val, expected_res):
assert ConnectionType.is_custom_strong_type(val) == expected_res
def test_serialize_conn(self):
assert ConnectionType.serialize_conn(AzureContentSafetyConnection) == "ABCMeta"
connection_instance = AzureContentSafetyConnection("api_key", "endpoint")
assert ConnectionType.serialize_conn(connection_instance) == "AzureContentSafetyConnection"
with pytest.raises(ValueError):
ConnectionType.serialize_conn("non_connection_instance")
@pytest.mark.unittest
class TestInputDefinition:
def test_serialize(self):
# test when len(type) == 1
input_def = InputDefinition(
[ValueType.STRING],
default="Default",
description="Description",
enum=["A", "B", "C"],
custom_type=["customtype"],
)
serialized = input_def.serialize()
assert serialized == {
"type": "string",
"default": "Default",
"description": "Description",
"enum": ["A", "B", "C"],
"custom_type": ["customtype"],
}
# test when len(type) > 1
input_def = InputDefinition([ValueType.STRING, ValueType.INT])
serialized = input_def.serialize()
assert serialized == {"type": ["string", "int"]}
def test_deserialize(self):
serialized = {"type": "string", "default": "Default", "description": "Description", "enum": ["A", "B", "C"]}
deserialized = InputDefinition.deserialize(serialized)
assert deserialized.type == [ValueType.STRING]
assert deserialized.default == "Default"
assert deserialized.description == "Description"
assert deserialized.enum == ["A", "B", "C"]
serialized = {"type": ["string", "int"]}
deserialized = InputDefinition.deserialize(serialized)
assert deserialized.type == [ValueType.STRING, ValueType.INT]
assert deserialized.default == ""
assert deserialized.description == ""
assert deserialized.enum == []
@pytest.mark.unittest
class TestOutDefinition:
@pytest.mark.parametrize(
"value, expected",
[
(
OutputDefinition([ValueType.STRING], description="Description", is_property=True),
{"type": "string", "description": "Description", "is_property": True},
),
(OutputDefinition([ValueType.STRING, ValueType.INT]), {"type": ["string", "int"], "is_property": False}),
],
)
def test_serialize(self, value, expected):
assert value.serialize() == expected
@pytest.mark.parametrize(
"value, expected",
[
(
{"type": "string", "description": "Description", "is_property": True},
OutputDefinition([ValueType.STRING], description="Description", is_property=True),
),
({"type": ["string", "int"]}, OutputDefinition([ValueType.STRING, ValueType.INT])),
],
)
def test_deserialize(self, value, expected):
assert OutputDefinition.deserialize(value) == expected
@pytest.mark.unittest
class TestTool:
@pytest.mark.parametrize(
"tool_type, expected_keys",
[
(ToolType._ACTION, ["name", "description", "enable_kwargs"]),
(ToolType.LLM, ["name", "type", "inputs", "description", "enable_kwargs"]),
],
)
def test_serialize_tool(self, tool_type, expected_keys):
tool = Tool(name="test_tool", type=tool_type, inputs={}, outputs={}, description="description")
serialized_tool = tool.serialize()
assert set(serialized_tool.keys()) == set(expected_keys)
def test_deserialize_tool(self):
data = {
"name": "test_tool",
"type": "LLM",
"inputs": {"input1": {"type": "ValueType1"}},
}
tool = Tool.deserialize(data)
assert tool.name == data["name"]
assert tool.type == ToolType[data["type"]]
assert "input1" in tool.inputs
@pytest.mark.parametrize(
"tooltype, connection_type, expected",
[
(ToolType.LLM, None, True),
(ToolType._ACTION, ["AzureContentSafetyConnection"], True),
(ToolType._ACTION, None, False),
],
)
def test_require_connection(self, tooltype, connection_type, expected):
tool = Tool(name="Test Tool", type=tooltype, inputs={}, connection_type=connection_type)
assert tool._require_connection() == expected
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_info.py | from datetime import datetime
import pytest
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
@pytest.mark.unittest
class TestStatus:
@pytest.mark.parametrize(
"status,expected",
[
(Status.Completed, True),
(Status.Failed, True),
(Status.Bypassed, True),
(Status.Canceled, True),
(Status.Running, False),
(Status.Preparing, False),
(Status.NotStarted, False),
(Status.CancelRequested, False),
(123, False),
],
)
def test_status_is_terminated(self, status, expected):
assert Status.is_terminated(status) == expected
@pytest.mark.unittest
class TestRunInfo:
def test_creation(self):
run_info = RunInfo(
node="node1",
flow_run_id="123",
run_id="123:456",
status=Status.Running,
inputs=[],
output={},
metrics={},
error={},
parent_run_id="789",
start_time=datetime.now(),
end_time=datetime.now(),
system_metrics={},
)
assert run_info.node == "node1"
assert run_info.flow_run_id == "123"
assert run_info.run_id == "123:456"
assert run_info.status == Status.Running
def test_deserialize(self):
run_info_dict = {
"node": "get_answer",
"flow_run_id": "",
"run_id": "dummy_run_id",
"status": "Completed",
"inputs": {"question": "string"},
"output": "Hello world: What's promptflow?",
"metrics": None,
"error": None,
"parent_run_id": "dummy_flow_run_id",
"start_time": "2023-11-24T06:03:20.2688262Z",
"end_time": "2023-11-24T06:03:20.268858Z",
"index": 0,
"api_calls": None,
"variant_id": "",
"cached_run_id": None,
"cached_flow_run_id": None,
"logs": None,
"system_metrics": {"duration": "00:00:00.0000318", "total_tokens": 0},
"result": "Hello world: What's promptflow?",
}
run_info = RunInfo.deserialize(run_info_dict)
assert run_info.index == 0
assert isinstance(run_info.start_time, datetime) and isinstance(run_info.end_time, datetime)
assert run_info.status == Status.Completed
assert run_info.run_id == "dummy_run_id"
assert run_info.api_calls is None
assert run_info.system_metrics == {"duration": "00:00:00.0000318", "total_tokens": 0}
assert run_info.output == "Hello world: What's promptflow?"
@pytest.mark.unittest
class TestFlowRunInfo:
def test_creation(self):
flow_run_info = FlowRunInfo(
run_id="123:456",
status=Status.Running,
error={},
inputs={},
output={},
metrics={},
request={},
parent_run_id="789",
root_run_id="123",
source_run_id="456",
flow_id="flow1",
start_time=datetime.now(),
end_time=datetime.now(),
system_metrics={},
upload_metrics=False,
)
assert flow_run_info.run_id == "123:456"
assert flow_run_info.status == Status.Running
assert flow_run_info.flow_id == "flow1"
def test_deserialize(self):
flow_run_info_dict = {
"run_id": "dummy_run_id",
"status": "Completed",
"error": None,
"inputs": {"question": "What's promptflow?"},
"output": {"answer": "Hello world: What's promptflow?"},
"metrics": None,
"request": None,
"parent_run_id": None,
"root_run_id": None,
"source_run_id": None,
"flow_id": "Flow",
"start_time": "2023-11-23T10:58:37.9436245Z",
"end_time": "2023-11-23T10:58:37.9590789Z",
"index": 0,
"api_calls": None,
"variant_id": "",
"name": "",
"description": "",
"tags": None,
"system_metrics": {"duration": "00:00:00.0154544", "total_tokens": 0},
"result": {"answer": "Hello world: What's promptflow?"},
"upload_metrics": False,
}
flow_run_info = FlowRunInfo.deserialize(flow_run_info_dict)
assert flow_run_info.index == 0
assert isinstance(flow_run_info.start_time, datetime) and isinstance(flow_run_info.end_time, datetime)
assert flow_run_info.status == Status.Completed
assert flow_run_info.run_id == "dummy_run_id"
assert flow_run_info.api_calls is None
assert flow_run_info.system_metrics == {"duration": "00:00:00.0154544", "total_tokens": 0}
assert flow_run_info.output == {"answer": "Hello world: What's promptflow?"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_management.py | import json
import pytest
from promptflow._sdk._constants import VIS_JS_BUNDLE_FILENAME
from promptflow.contracts._run_management import VisualizationRender
@pytest.mark.unittest
def test_visualization_render():
data = {"key": "value"}
viz = VisualizationRender(data)
assert viz.data == json.dumps(json.dumps(data))
assert viz.js_path == VIS_JS_BUNDLE_FILENAME
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_assistant_tool_invoker.py | import pytest
from pathlib import Path
from typing import Callable
from promptflow import tool
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
from promptflow.executor._errors import UnsupportedAssistantToolType
@pytest.mark.unittest
class TestAssistantToolInvoker:
@pytest.fixture
def tool_definitions(self):
return [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
}
]
@pytest.mark.parametrize(
"predefined_inputs", [({}), ({"input_int": 1})]
)
def test_load_tools(self, predefined_inputs):
input_int = 1
input_str = "test"
tool_definitions = [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
"predefined_inputs": predefined_inputs
}
]
# Test load tools
invoker = AssistantToolInvoker.init(tool_definitions, working_dir=Path(__file__).parent)
for tool_name, assistant_tool in invoker._assistant_tools.items():
assert tool_name in ("code_interpreter", "retrieval", "sample_tool")
assert assistant_tool.name == tool_name
assert isinstance(assistant_tool.openai_definition, dict)
if tool_name in ("code_interpreter", "retrieval"):
assert assistant_tool.func is None
else:
assert isinstance(assistant_tool.func, Callable)
# Test to_openai_tools
descriptions = invoker.to_openai_tools()
assert len(descriptions) == 3
properties = {
"input_int": {"description": "This is a sample input int.", "type": "number"},
"input_str": {"description": "This is a sample input str.", "type": "string"}
}
required = ["input_int", "input_str"]
self._remove_predefined_inputs(properties, predefined_inputs.keys())
self._remove_predefined_inputs(required, predefined_inputs.keys())
for description in descriptions:
if description["type"] in ("code_interpreter", "retrieval"):
assert description == {"type": description["type"]}
else:
assert description == {
"type": "function",
"function": {
"name": "sample_tool",
"description": "This is a sample tool.",
"parameters": {
"type": "object",
"properties": properties,
"required": required
}
}
}
# Test invoke tool
kwargs = {"input_int": input_int, "input_str": input_str}
self._remove_predefined_inputs(kwargs, predefined_inputs.keys())
result = invoker.invoke_tool(func_name="sample_tool", kwargs=kwargs)
assert result == (input_int, input_str)
def test_load_tools_with_invalid_case(self):
tool_definitions = [{"type": "invalid_type"}]
with pytest.raises(UnsupportedAssistantToolType) as exc_info:
AssistantToolInvoker.init(tool_definitions)
assert "Unsupported assistant tool type" in exc_info.value.message
def _remove_predefined_inputs(self, value: any, predefined_inputs: list):
for input in predefined_inputs:
if input in value:
if isinstance(value, dict):
value.pop(input)
elif isinstance(value, list):
value.remove(input)
@tool
def sample_tool(input_int: int, input_str: str):
"""This is a sample tool.
:param input_int: This is a sample input int.
:type input_int: int
:param input_str: This is a sample input str.
:type input_str: str
"""
return input_int, input_str
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_validator.py | import pytest
from promptflow.contracts.flow import Flow, FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor._errors import InputParseError, InputTypeError, InvalidAggregationInput, InvalidFlowRequest
from promptflow.executor.flow_validator import FlowValidator
from ...utils import WRONG_FLOW_ROOT, get_flow_from_folder
@pytest.mark.unittest
class TestFlowValidator:
@pytest.mark.parametrize(
"flow_folder, expected_node_order",
[
("unordered_nodes", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_skip", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_activate", ["first_node", "second_node", "third_node"]),
],
)
def test_ensure_nodes_order(self, flow_folder, expected_node_order):
flow = get_flow_from_folder(flow_folder)
flow = FlowValidator._ensure_nodes_order(flow)
node_order = [node.name for node in flow.nodes]
assert node_order == expected_node_order
@pytest.mark.parametrize(
"flow_folder, error_message",
[
(
"nodes_cycle",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for the nodes "
"['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"nodes_cycle_with_activate",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships "
"for the nodes ['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"wrong_node_reference",
(
"Invalid node definitions found in the flow graph. Node 'second_node' references a non-existent "
"node 'third_node' in your flow. Please review your flow to ensure that the node "
"name is accurately specified."
),
),
(
"non_aggregation_reference_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregate node 'test_node' "
"cannot reference aggregate nodes {'calculate_accuracy'}. Please review and rectify "
"the node reference."
),
),
(
"aggregation_activate_reference_non_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregation node 'grade' cannot be "
"referenced in the activate config of the aggregation node 'calculate_accuracy'. Please "
"review and rectify the node reference."
),
),
],
)
def test_ensure_nodes_order_with_exception(self, flow_folder, error_message):
flow = get_flow_from_folder(flow_folder, root=WRONG_FLOW_ROOT)
with pytest.raises(InvalidFlowRequest) as e:
FlowValidator._ensure_nodes_order(flow)
assert str(e.value) == error_message, "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"aggregated_flow_inputs, aggregation_inputs, error_message",
[
(
{},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The value for aggregated reference input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{
"input1": "value1",
},
{},
"The input for aggregation is incorrect. "
"The value for aggregated flow input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{"input1": ["value1_1", "value1_2"]},
{"input_2": ["value2_1"]},
"The input for aggregation is incorrect. The length of all aggregated inputs should be the same. "
"Current input lengths are: {'input1': 2, 'input_2': 1}. "
"Please adjust the input value in your input data.",
),
(
{
"input1": "value1",
},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The input 'input1' appears in both aggregated flow input and aggregated reference input. "
"Please remove one of them and try the operation again.",
),
],
)
def test_validate_aggregation_inputs_error(self, aggregated_flow_inputs, aggregation_inputs, error_message):
with pytest.raises(InvalidAggregationInput) as e:
FlowValidator._validate_aggregation_inputs(aggregated_flow_inputs, aggregation_inputs)
assert str(e.value) == error_message
@pytest.mark.parametrize(
"flow_folder",
["simple_flow_with_python_tool_and_aggregate"],
)
def test_ensure_outputs_valid_with_aggregation(self, flow_folder):
flow = get_flow_from_folder(flow_folder)
assert flow.outputs["content"] is not None
assert flow.outputs["aggregate_content"] is not None
flow.outputs = FlowValidator._ensure_outputs_valid(flow)
print(flow.outputs)
assert flow.outputs["content"] is not None
assert flow.outputs.get("aggregate_content") is None
@pytest.mark.parametrize(
"flow_folder, inputs, index, error_type, error_message",
[
(
"flow_with_list_input",
{"key": "['hello']"},
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
(
"flow_with_list_input",
{"key": "['hello']"},
0,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' in line 0 of input data was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_resolve_flow_inputs_type_json_error_for_list_type(
self, flow_folder, inputs, index, error_type, error_message
):
flow = get_flow_from_folder(flow_folder)
with pytest.raises(error_type) as exe_info:
FlowValidator.resolve_flow_inputs_type(flow, inputs, idx=index)
assert error_message == exe_info.value.message
@pytest.mark.parametrize(
"inputs, expected_result",
[({"test_input": ["1", "2"]}, {"test_input": [1, 2]})],
)
def test_resolve_aggregated_flow_inputs_type(self, inputs, expected_result):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
result = FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert result == expected_result
@pytest.mark.parametrize(
"inputs, expected_message",
[
(
{"test_input": ["1", "str"]},
(
"The input for flow is incorrect. The value for flow input 'test_input' in line 1 of input data "
"does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
)
],
)
def test_resolve_aggregated_flow_inputs_type_error(self, inputs, expected_message):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
with pytest.raises(InputTypeError) as ex:
FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert expected_message == str(ex.value)
@pytest.mark.parametrize(
"input, type, expected_result",
[
("1", ValueType.INT, 1),
('["1", "2"]', ValueType.LIST, ["1", "2"]),
],
)
def test_parse_input_value(self, input, type, expected_result):
input_key = "test_input"
result = FlowValidator._parse_input_value(input_key, input, type)
assert result == expected_result
@pytest.mark.parametrize(
"input, type, index, error_type, expected_message",
[
(
"str",
ValueType.INT,
None,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' does not match the expected "
"type 'int'. Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' was interpreted as JSON "
"string since its type is 'list'. However, the value '['1', '2']' is invalid for JSON parsing. "
"Error details: (JSONDecodeError) Expecting value: line 1 column 2 (char 1). "
"Please make sure your inputs are properly formatted."
),
),
(
"str",
ValueType.INT,
10,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' in line 10 of "
"input data does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
10,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' in line 10 of input data "
"was interpreted as JSON string since its type is 'list'. However, the value '['1', '2']' is "
"invalid for JSON parsing. Error details: (JSONDecodeError) Expecting value: "
"line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_parse_input_value_error(self, input, type, index, error_type, expected_message):
input_key = "my_input"
with pytest.raises(error_type) as ex:
FlowValidator._parse_input_value(input_key, input, type, index)
assert expected_message == str(ex.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py | import re
import sys
from pathlib import Path
from typing import List
from unittest.mock import mock_open
import pytest
from jinja2 import TemplateSyntaxError
from promptflow._core._errors import InvalidSource
from promptflow._core.tools_manager import ToolLoader
from promptflow._internal import tool
from promptflow._sdk.entities import CustomConnection, CustomStrongTypeConnection
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import AssistantDefinition, InputDefinition, Secret, Tool, ToolType, ValueType
from promptflow.contracts.types import PromptTemplate
from promptflow.exceptions import UserErrorException
from promptflow.executor._errors import (
ConnectionNotFound,
InvalidConnectionType,
NodeInputValidationError,
ResolveToolError,
ValueTypeUnresolved,
)
from promptflow.executor._tool_resolver import ResolvedTool, ToolResolver
from ...utils import DATA_ROOT, FLOW_ROOT
TEST_ROOT = Path(__file__).parent.parent.parent
REQUESTS_PATH = TEST_ROOT / "test_configs/executor_api_requests"
WRONG_REQUESTS_PATH = TEST_ROOT / "test_configs/executor_wrong_requests"
class MyFirstCSTConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
@tool(streaming_option_parameter="stream_enabled")
def mock_package_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
@pytest.mark.unittest
class TestToolResolver:
@pytest.fixture
def resolver(self):
return ToolResolver(working_dir=None, connections={})
def test_resolve_tool_by_node_with_diff_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
mocker.patch.object(
resolver,
"_resolve_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_script_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_prompt_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_llm_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_integrate_prompt_in_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Package)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
resolver.resolve_tool_by_node(node)
resolver._resolve_script_node.assert_called_once()
node.type = ToolType.PROMPT
resolver.resolve_tool_by_node(node)
resolver._resolve_prompt_node.assert_called_once()
node.type = ToolType.LLM
resolver.resolve_tool_by_node(node)
resolver._resolve_llm_node.assert_called_once()
resolver._resolve_package_node.reset_mock()
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=ToolSourceType.PackageWithPrompt)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
resolver._integrate_prompt_in_package_node.assert_called_once()
def test_resolve_tool_by_node_with_invalid_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool type" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_source_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
def test_resolve_tool_by_node_with_no_source(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = None
with pytest.raises(ResolveToolError) as ex:
resolver.resolve_tool_by_node(node)
assert isinstance(ex.value.inner_exception, UserErrorException)
def test_resolve_tool_by_node_with_no_source_path(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
node.source = mocker.Mock(type=ToolSourceType.Package, path=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, InvalidSource)
assert "Node source path" in exec_info.value.message
def test_resolve_tool_by_node_with_duplicated_inputs(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{template}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NodeInputValidationError)
assert "These inputs are duplicated" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_template(self, resolver, mocker):
node = mocker.Mock(tool=None, inputs={})
node.name = "node"
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{current context}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, TemplateSyntaxError)
expected_message = (
"Tool load failed in 'node': Jinja parsing failed at line 1: "
"(TemplateSyntaxError) expected token 'end of print statement', got 'context'"
)
assert expected_message in exec_info.value.message
def test_convert_node_literal_input_types_with_invalid_case(self):
# Case 1: conn_name not in connections, should raise conn_name not found error
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 2: conn_name in connections, but type not matched
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._convert_node_literal_input_types(node, tool)
message = "'AzureOpenAIConnection' is not supported, valid types ['CustomConnection']"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 3: Literal value, type mismatch
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=[ValueType.INT])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "value 'invalid' is not type int"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 4: Unresolved value, like newly added type not in old version ValueType enum
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=["A_good_type"])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ValueTypeUnresolved):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 5: Literal value, invalid image in list
tool = Tool(name="mock", type="python", inputs={"list_input": InputDefinition(type=[ValueType.LIST])})
invalid_image = {"data:image/jpg;base64": "invalid_image"}
node = Node(
name="mock",
tool=tool,
inputs={"list_input": InputAssignment(value=[invalid_image], value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "Invalid base64 image"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 6: Literal value, invalid assistant definition path
tool = Tool(
name="mock",
type="python",
inputs={"assistant_definition": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])},
)
node = Node(
name="mock",
tool=tool,
inputs={"assistant_definition": InputAssignment(value="invalid_path", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
assert (
"Failed to load assistant definition" in exe_info.value.message
and "is not a valid path" in exe_info.value.message
), "Expected: {}, Actual: {}".format(message, exe_info.value.message)
def test_resolve_llm_connection_to_inputs(self):
# Case 1: node.connection is not specified
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 2: node.connection is not found from connection manager
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name1",
)
connections = {}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 3: Tool definition with bad input type list
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["int"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Connection type can not be resolved for tool" in exe_info.value.message
# Case 4: Tool type not match the connection manager return
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["OpenAIConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Invalid connection" in exe_info.value.message
# Case 5: Normal case
tool = Tool(
name="mock",
type="python",
inputs={"conn": InputDefinition(type=["OpenAIConnection", "AzureOpenAIConnection"])},
)
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
key, conn = tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert key == "conn"
assert isinstance(conn, AzureOpenAIConnection)
def test_resolve_llm_node(self, mocker):
def mock_llm_api_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.LLM, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_llm_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_llm_api_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
mocker.patch.object(tool_resolver, "_load_source_content", return_value="{{text}}![image]({{image}})")
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
"image": InputAssignment(value=str(DATA_ROOT / "logo.jpg"), value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_llm_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
pattern = re.compile(r"^Hello World!!\[image\]\(Image\([a-z0-9]{8}\)\)$")
prompt = resolved_tool.callable(**kwargs)
assert re.match(pattern, prompt)
def test_resolve_script_node(self, mocker):
def mock_python_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_resolve_script_node_with_assistant_definition(self, mocker):
def mock_python_func(input: AssistantDefinition):
if input.model == "model" and input.instructions == "instructions" and input.tools == []:
return True
return False
tool_loader = ToolLoader(working_dir=None)
tool = Tool(
name="mock", type=ToolType.PYTHON, inputs={"input": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])}
)
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {}),
)
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._tool_loader = tool_loader
mocker.patch("builtins.open", mock_open())
mocker.patch(
"ruamel.yaml.YAML.load", return_value={"model": "model", "instructions": "instructions", "tools": []}
)
node = Node(
name="mock",
tool=None,
inputs={"input": InputAssignment(value="test_tool_resolver.py", value_type=InputValueType.LITERAL)},
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 1
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs)
def test_resolve_package_node(self, mocker):
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_package_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_package_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_package_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_integrate_prompt_in_package_node(self, mocker):
tool_resolver = ToolResolver(working_dir=None, connections={})
mocker.patch.object(
tool_resolver,
"_load_source_content",
return_value="{{text}}",
)
tool = Tool(name="mock", type=ToolType.CUSTOM_LLM, inputs={"prompt": InputDefinition(type=["PromptTemplate"])})
node = Node(
name="mock",
tool=None,
inputs={"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL)},
connection="conn_name",
provider="mock",
)
resolved_tool = ResolvedTool(node=node, callable=mock_package_func, definition=tool, init_args=None)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
resolved_tool = tool_resolver._integrate_prompt_in_package_node(resolved_tool)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
@pytest.mark.parametrize(
"conn_types, expected_type",
[
(["MyFirstCSTConnection"], MyFirstCSTConnection),
(["CustomConnection", "MyFirstCSTConnection"], CustomConnection),
(["CustomConnection", "MyFirstCSTConnection", "MySecondCSTConnection"], CustomConnection),
(["MyFirstCSTConnection", "MySecondCSTConnection"], MyFirstCSTConnection),
],
)
def test_convert_to_custom_strong_type_connection_value(self, conn_types: List[str], expected_type, mocker):
connections = {"conn_name": {"type": "CustomConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
tool = Tool(name="tool", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
m = sys.modules[__name__]
v = InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)
actual = tool_resolver._convert_to_custom_strong_type_connection_value(
"conn_name", v, node, tool, conn_types, m
)
assert isinstance(actual, expected_type)
assert actual.api_base == "mock"
def test_load_source(self):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=ToolSource())
node.source.path = "./script_with_special_character/script_with_special_character.py"
resolver = ToolResolver(FLOW_ROOT)
result = resolver._load_source_content(node)
assert "https://www.bing.com/\ue000\ue001/" in result
@pytest.mark.parametrize(
"source",
[
None,
ToolSource(path=None), # Then will try to read one directory.
ToolSource(path=""), # Then will try to read one directory.
ToolSource(path="NotExistPath.py"),
],
)
def test_load_source_error(self, source):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=source)
resolver = ToolResolver(FLOW_ROOT)
with pytest.raises(InvalidSource) as _:
resolver._load_source_content(node)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_nodes_scheduler.py | from concurrent.futures import Future
from typing import Callable
from unittest.mock import MagicMock
import pytest
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow.contracts.flow import Node
from promptflow.executor._dag_manager import DAGManager
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
NoNodeExecutedError,
)
@pytest.mark.unittest
class TestFlowNodesScheduler:
def setup_method(self):
# Define mock objects and methods
self.tools_manager = MagicMock()
self.context = MagicMock(spec=FlowExecutionContext)
self.context.invoke_tool.side_effect = lambda _, func, kwargs: func(**kwargs)
self.scheduler = FlowNodesScheduler(self.tools_manager, {}, [], DEFAULT_CONCURRENCY_BULK, self.context)
def test_maximun_concurrency(self):
scheduler = FlowNodesScheduler(self.tools_manager, {}, [], 1000, self.context)
assert scheduler._node_concurrency == DEFAULT_CONCURRENCY_FLOW
def test_collect_outputs(self):
future1 = Future()
future1.set_result("output1")
future2 = Future()
future2.set_result("output2")
node1 = MagicMock(spec=Node)
node1.name = "node1"
node2 = MagicMock(spec=Node)
node2.name = "node2"
self.scheduler._future_to_node = {future1: node1, future2: node2}
completed_nodes_outputs = self.scheduler._collect_outputs([future1, future2])
assert completed_nodes_outputs == {"node1": future1.result(), "node2": future2.result()}
def test_bypass_nodes(self):
executor = MagicMock()
dag_manager = MagicMock(spec=DAGManager)
node1 = MagicMock(spec=Node)
node1.name = "node1"
# The return value will be a list with one item for the first time.
# Will be a list without item for the second time.
dag_manager.pop_bypassable_nodes.side_effect = ([node1], [])
self.scheduler._dag_manager = dag_manager
self.scheduler._execute_nodes(executor)
self.scheduler._context.bypass_node.assert_called_once_with(node1)
def test_submit_nodes(self):
executor = MagicMock()
dag_manager = MagicMock(spec=DAGManager)
node1 = MagicMock(spec=Node)
node1.name = "node1"
dag_manager.pop_bypassable_nodes.return_value = []
# The return value will be a list with one item for the first time.
# Will be a list without item for the second time.
dag_manager.pop_ready_nodes.return_value = [node1]
self.scheduler._dag_manager = dag_manager
self.scheduler._execute_nodes(executor)
self.scheduler._context.bypass_node.assert_not_called()
assert node1 in self.scheduler._future_to_node.values()
def test_future_cancelled_for_exception(self):
dag_manager = MagicMock(spec=DAGManager)
self.scheduler._dag_manager = dag_manager
dag_manager.completed.return_value = False
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
failed_future = Future()
failed_future.set_exception(Exception("test"))
from concurrent.futures._base import CANCELLED, FINISHED
failed_future._state = FINISHED
cancelled_future = Future()
node1 = MagicMock(spec=Node)
node1.name = "node1"
node2 = MagicMock(spec=Node)
node2.name = "node2"
self.scheduler._future_to_node = {failed_future: node1, cancelled_future: node2}
try:
self.scheduler.execute()
except Exception:
pass
# Assert another future is cancelled.
assert CANCELLED in cancelled_future._state
def test_success_result(self):
dag_manager = MagicMock(spec=DAGManager)
finished_future = Future()
finished_future.set_result("output1")
finished_node = MagicMock(spec=Node)
finished_node.name = "node1"
self.scheduler._dag_manager = dag_manager
self.scheduler._future_to_node = {finished_future: finished_node}
# No more nodes need to run.
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
dag_manager.completed.side_effect = (False, True)
bypassed_node_result = {"bypassed_node": "output2"}
dag_manager.bypassed_nodes = bypassed_node_result
completed_node_result = {"completed_node": "output1"}
dag_manager.completed_nodes_outputs = completed_node_result
result = self.scheduler.execute()
dag_manager.complete_nodes.assert_called_once_with({"node1": "output1"})
assert result == (completed_node_result, bypassed_node_result)
def test_no_nodes_to_run(self):
dag_manager = MagicMock(spec=DAGManager)
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
dag_manager.completed.return_value = False
self.scheduler._dag_manager = dag_manager
with pytest.raises(NoNodeExecutedError) as _:
self.scheduler.execute()
def test_execute_single_node(self):
node_to_run = MagicMock(spec=Node)
node_to_run.name = "node1"
mock_callable = MagicMock(spec=Callable)
mock_callable.return_value = "output1"
self.scheduler._tools_manager.get_tool.return_value = mock_callable
dag_manager = MagicMock(spec=DAGManager)
dag_manager.get_node_valid_inputs.return_value = {"input": 1}
result = self.scheduler._exec_single_node_in_thread((node_to_run, dag_manager))
mock_callable.assert_called_once_with(**{"input": 1})
assert result == "output1"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_errors.py | import pytest
from promptflow._core.tool_meta_generator import PythonLoadError
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ResolveToolError
def code_with_bug():
1 / 0
def raise_resolve_tool_error(func, target=None, module=None):
try:
func()
except Exception as e:
if target:
raise ResolveToolError(node_name="MyTool", target=target, module=module) from e
raise ResolveToolError(node_name="MyTool") from e
def raise_python_load_error():
try:
code_with_bug()
except Exception as e:
raise PythonLoadError(message="Test PythonLoadError.") from e
def test_resolve_tool_error():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(raise_python_load_error, ErrorTarget.TOOL, "__pf_main__")
exception = e.value
inner_exception = exception.inner_exception
assert isinstance(inner_exception, PythonLoadError)
assert exception.message == "Tool load failed in 'MyTool': (PythonLoadError) Test PythonLoadError."
assert exception.additional_info == inner_exception.additional_info
assert exception.error_codes == ["UserError", "ToolValidationError", "PythonParsingError", "PythonLoadError"]
assert exception.reference_code == "Tool/__pf_main__"
def test_resolve_tool_error_with_none_inner():
with pytest.raises(ResolveToolError) as e:
raise ResolveToolError(node_name="MyTool")
exception = e.value
assert exception.inner_exception is None
assert exception.message == "Tool load failed in 'MyTool'."
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ResolveToolError"]
assert exception.reference_code == "Executor"
def test_resolve_tool_error_with_no_PromptflowException_inner():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(code_with_bug)
exception = e.value
assert isinstance(exception.inner_exception, ZeroDivisionError)
assert exception.message == "Tool load failed in 'MyTool': (ZeroDivisionError) division by zero"
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ZeroDivisionError"]
assert exception.reference_code == "Executor"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_dag_manager.py | import pytest
from promptflow.contracts.flow import ActivateCondition, InputAssignment, Node
from promptflow.executor._dag_manager import DAGManager
def create_test_node(name, input, activate=None):
input = InputAssignment.deserialize(input)
activate = ActivateCondition.deserialize(activate, name) if activate else None
return Node(
name=name,
tool="test_tool",
connection="azure_open_ai_connection",
inputs={"test_input": input, "test_input2": InputAssignment("hello world")},
provider="test_provider",
api="test_api",
activate=activate,
)
def pop_ready_node_names(dag_manager: DAGManager):
return {node.name for node in dag_manager.pop_ready_nodes()}
def pop_bypassed_node_names(dag_manager: DAGManager):
return {node.name for node in dag_manager.pop_bypassable_nodes()}
@pytest.mark.unittest
class TestDAGManager:
def test_pop_ready_nodes(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${node1.output}"),
create_test_node("node3", input="${node1.output}"),
]
dag_manager = DAGManager(nodes, flow_inputs={})
assert pop_ready_node_names(dag_manager) == {"node1"}
dag_manager.complete_nodes({"node1": None})
assert pop_ready_node_names(dag_manager) == {"node2", "node3"}
dag_manager.complete_nodes({"node2": None, "node3": None})
def test_pop_bypassed_nodes(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${inputs.text}", activate={"when": "${inputs.text}", "is": "world"}),
create_test_node("node3", input="${node1.output}"),
create_test_node("node4", input="${node2.output}"),
]
flow_inputs = {"text": "hello"}
dag_manager = DAGManager(nodes, flow_inputs)
expected_bypassed_nodes = {"node2", "node4"}
assert pop_bypassed_node_names(dag_manager) == expected_bypassed_nodes
assert dag_manager.bypassed_nodes.keys() == expected_bypassed_nodes
def test_complete_nodes(self):
nodes = [create_test_node("node1", input="value1")]
dag_manager = DAGManager(nodes, flow_inputs={})
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
assert len(dag_manager.completed_nodes_outputs) == 1
assert dag_manager.completed_nodes_outputs["node1"] == {"output1": "value1"}
def test_completed(self):
nodes = [
create_test_node("node1", input="${inputs.text}", activate={"when": "${inputs.text}", "is": "hello"}),
create_test_node("node2", input="${node1.output}"),
]
flow_inputs = {"text": "hello"}
dag_manager = DAGManager(nodes, flow_inputs)
assert pop_ready_node_names(dag_manager) == {"node1"}
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
assert pop_ready_node_names(dag_manager) == {"node2"}
dag_manager.complete_nodes({"node2": {"output1": "value1"}})
assert dag_manager.completed_nodes_outputs.keys() == {"node1", "node2"}
assert dag_manager.completed()
def test_get_node_valid_inputs(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${node1.output}"),
]
def f(input):
return input
flow_inputs = {}
dag_manager = DAGManager(nodes, flow_inputs)
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
valid_inputs = dag_manager.get_node_valid_inputs(nodes[1], f)
assert valid_inputs == {"test_input": {"output1": "value1"}, "test_input2": "hello world"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_executor.py | from unittest.mock import Mock
import pytest
from promptflow import tool
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor.flow_executor import (
FlowExecutor,
_ensure_node_result_is_serializable,
_inject_stream_options,
enable_streaming_for_llm_tool,
)
from promptflow.tools.aoai import chat, completion
from promptflow.tools.embedding import embedding
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, aggregated_flow_inputs, aggregation_inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{},
{"input_from_default": ["default_value"]},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"another_key": ["input_value", "input_value"]},
{},
{
"input_from_default": ["default_value", "default_value"],
"another_key": ["input_value", "input_value"],
},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{"another_key": ["input_value", "input_value"]},
{},
{
"input_from_default": [False, False],
"another_key": ["input_value", "input_value"],
},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"another_key_in_aggregation_inputs": ["input_value", "input_value"]},
{
"input_from_default": ["default_value", "default_value"],
},
),
],
)
def test_apply_default_value_for_aggregation_input(
self, flow_inputs, aggregated_flow_inputs, aggregation_inputs, expected_inputs
):
result = FlowExecutor._apply_default_value_for_aggregation_input(
flow_inputs, aggregated_flow_inputs, aggregation_inputs
)
assert result == expected_inputs
def func_with_stream_parameter(a: int, b: int, stream=False):
return a + b, stream
def func_without_stream_parameter(a: int, b: int):
return a + b
class TestEnableStreamForLLMTool:
@pytest.mark.parametrize(
"tool, should_be_wrapped",
[
(completion, True),
(chat, True),
(embedding, False),
],
)
def test_enable_stream_for_llm_tool(self, tool, should_be_wrapped):
func = enable_streaming_for_llm_tool(tool)
is_wrapped = func != tool
assert is_wrapped == should_be_wrapped
def test_func_with_stream_parameter_should_be_wrapped(self):
func = enable_streaming_for_llm_tool(func_with_stream_parameter)
assert func != func_with_stream_parameter
result = func(a=1, b=2)
assert result == (3, True)
result = func_with_stream_parameter(a=1, b=2)
assert result == (3, False)
def test_func_without_stream_parameter_should_not_be_wrapped(self):
func = enable_streaming_for_llm_tool(func_without_stream_parameter)
assert func == func_without_stream_parameter
result = func(a=1, b=2)
assert result == 3
def test_inject_stream_options_no_stream_param(self):
# Test that the function does not wrap the decorated function if it has no stream parameter
func = _inject_stream_options(lambda: True)(func_without_stream_parameter)
assert func == func_without_stream_parameter
result = func(a=1, b=2)
assert result == 3
def test_inject_stream_options_with_stream_param(self):
# Test that the function wraps the decorated function and injects the stream option
func = _inject_stream_options(lambda: True)(func_with_stream_parameter)
assert func != func_with_stream_parameter
result = func(a=1, b=2)
assert result == (3, True)
result = func_with_stream_parameter(a=1, b=2)
assert result == (3, False)
def test_inject_stream_options_with_mocked_should_stream(self):
# Test that the function uses the should_stream callable to determine the stream option
should_stream = Mock(return_value=True)
func = _inject_stream_options(should_stream)(func_with_stream_parameter)
result = func(a=1, b=2)
assert result == (3, True)
should_stream.return_value = False
result = func(a=1, b=2)
assert result == (3, False)
@tool
def streaming_tool():
for i in range(10):
yield i
@tool
def non_streaming_tool():
return 1
class TestEnsureNodeResultIsSerializable:
def test_streaming_tool_should_be_consumed_and_merged(self):
func = _ensure_node_result_is_serializable(streaming_tool)
assert func() == "0123456789"
def test_non_streaming_tool_should_not_be_affected(self):
func = _ensure_node_result_is_serializable(non_streaming_tool)
assert func() == 1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_input_assignment_parser.py | from typing import Any
import pytest
from promptflow._core._errors import NotSupported
from promptflow.contracts.flow import InputAssignment
from promptflow.executor._errors import (
InputNotFound,
InputNotFoundFromAncestorNodeOutput,
InvalidReferenceProperty,
UnsupportedReference,
)
from promptflow.executor._input_assignment_parser import parse_node_property, parse_value
FLOW_INPUTS = {"text": "hello promptflow"}
NODE_OUTPUTS = {"node1": "hello promptflow"}
class WrongInputAssignment:
value: Any
value_type: str = "wrong_type"
section: str = ""
property: str = ""
class DummyObject:
value: str = "dummy"
@pytest.mark.unittest
class TestInputAssignmentParser:
@pytest.mark.parametrize(
"input, expected_value",
[
("hello promptflow", "hello promptflow"),
("${inputs.text}", "hello promptflow"),
("${node1.output}", "hello promptflow"),
],
)
def test_parse_value(self, input, expected_value):
input_assignment = InputAssignment.deserialize(input)
actual_value = parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert actual_value == expected_value
@pytest.mark.parametrize(
"input, expected_error_class, expected_error_message",
[
(
"${inputs.word}",
InputNotFound,
(
"The input 'word' is not found from flow inputs 'text'. "
"Please check the input name and try again."
),
),
(
"${node2.output}",
InputNotFoundFromAncestorNodeOutput,
(
"The input 'node2' is not found from ancestor node outputs ['node1']. "
"Please check the node name and try again."
),
),
(
"${node1.word}",
UnsupportedReference,
(
"The section 'word' of reference is currently unsupported. "
"Please specify the output part of the node 'node1'."
),
),
(
WrongInputAssignment(),
NotSupported,
(
"The type 'wrong_type' is currently unsupported. "
"Please choose from available types: ['Literal', 'FlowInput', 'NodeReference'] and try again."
),
),
],
)
def test_parse_value_with_exception(self, input, expected_error_class, expected_error_message):
input_assignment = InputAssignment.deserialize(input) if isinstance(input, str) else input
with pytest.raises(expected_error_class) as e:
parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
@pytest.mark.parametrize(
"node_val, property, expected_value",
[
(
{"output": "hello promptflow"},
"output",
"hello promptflow",
),
(
{"output": "hello promptflow"},
"['output']",
"hello promptflow",
),
(
{"output": "hello promptflow"},
'["output"]',
"hello promptflow",
),
(
{"output": {"text": "hello promptflow"}},
'["output"]["text"]',
"hello promptflow",
),
(
["output1", "output2"],
"[1]",
"output2",
),
(
DummyObject(),
"value",
"dummy",
),
],
)
def test_parse_node_property(self, node_val, property, expected_value):
actual_value = parse_node_property("node1", node_val, property)
assert actual_value == expected_value
@pytest.mark.parametrize(
"node_val, property, expected_error_message",
[
(
{"output_str": ["output1", "output2"]},
"output_str[2]",
(
"Invalid property 'output_str[2]' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
{"word": "hello promptflow"},
"text",
(
"Invalid property 'text' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
DummyObject(),
"value_type",
(
"Invalid property 'value_type' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
],
)
def test_parse_node_property_with_exception(self, node_val, property, expected_error_message):
with pytest.raises(InvalidReferenceProperty) as e:
parse_node_property("node1", node_val, property)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_exceptions.py | import pytest
from promptflow.exceptions import PromptflowException
@pytest.mark.unittest
class TestExceptions:
def test_exception_message(self):
ex = PromptflowException(
message_format="Test exception message with parameters: {param}, {param1}.",
param="test_param",
)
assert ex.message == "Test exception message with parameters: test_param, <param1>."
assert None not in ex.message_parameters
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error.py | from promptflow import ToolProvider, tool
class TestLoadErrorTool(ToolProvider):
def __init__(self):
raise Exception("Tool load error.")
@tool
def tool(self, name: str):
return name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool.py | from jinja2 import Template
from promptflow import ToolProvider, tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class TestCustomLLMTool(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
@tool
def call(self, connection_2: AzureOpenAIConnection, api: str, template: PromptTemplate, **kwargs):
prompt = Template(template, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
assert isinstance(self.connection, AzureOpenAIConnection)
assert isinstance(connection_2, AzureOpenAIConnection)
assert api in ["completion", "chat"]
return prompt
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection.py | from dataclasses import dataclass
from promptflow import tool
from promptflow._core.tools_manager import register_connections
from promptflow.contracts.types import Secret
@dataclass
class TestConnection:
name: str
secret: Secret
register_connections(TestConnection)
@tool
def tool_with_test_conn(conn: TestConnection):
assert isinstance(conn, TestConnection)
return conn.name + conn.secret
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/package_tool_definition.json | {
"tool_with_init_error": {
"class_name": "TestLoadErrorTool",
"function": "tool",
"inputs": {
"name": {"type": ["string"]}
},
"module": "tool_with_init_error",
"name": "Tool with init error",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/flow.dag.yaml | inputs: {}
outputs: {}
nodes:
- name: tool_with_init_error
type: python
source:
type: package
tool: tool_with_init_error
inputs:
name: test_name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2 | {{api}}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/package_tool_definition.json | {
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${custom_llm_tool_with_duplicated_inputs.output}
nodes:
- name: custom_llm_tool_with_duplicated_inputs
type: custom_llm
source:
type: package_with_prompt
tool: custom_llm_tool.TestCustomLLMTool.call
path: ./prompt_with_duplicated_inputs.jinja2
inputs:
connection: azure_open_ai_connection
api: completion
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/package_tool_definition.json | {
"tool_with_connection": {
"function": "tool_with_test_conn",
"inputs": {
"conn": {"type": ["TestConnection"]}
},
"module": "tool_with_connection",
"name": "Test Tool with Connection",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/flow.dag.yaml | inputs: {}
outputs: {}
nodes:
- name: tool_with_conn
type: python
source:
type: package
tool: tool_with_connection
inputs:
conn: test_conn
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/inputs.json | {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/my_prompt.jinja2 | {# Please replace the template with your own prompt. #}
Write a simple program that displays the greeting message: "{{text}}" when executed.
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/samples.json | [
{
"text": "Hello"
},
{
"text": "Hello World!"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/package_tool_definition.json | {
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"connection_2": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${my_custom_llm_tool.output}
nodes:
- name: my_custom_llm_tool
type: custom_llm
source:
type: package_with_prompt
tool: custom_llm_tool.TestCustomLLMTool.call
path: ./my_prompt.jinja2
inputs:
connection: azure_open_ai_connection
connection_2: azure_open_ai_connection
api: completion
text: ${inputs.text}
| 0 |