repo_id
stringlengths 15
132
| file_path
stringlengths 34
176
| content
stringlengths 2
3.52M
| __index_level_0__
int64 0
0
|
---|---|---|---|
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/processpool/test_line_execution_process_pool.py | import multiprocessing
import os
import uuid
from multiprocessing import Queue
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import patch
import pytest
from pytest_mock import MockFixture
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, UserErrorException
from promptflow.executor import FlowExecutor
from promptflow.executor._line_execution_process_pool import (
LineExecutionProcessPool,
_exec_line,
format_current_process_info,
get_available_max_worker_count,
log_process_status,
)
from promptflow.executor._result import LineResult
from ...utils import get_flow_sample_inputs, get_yaml_file
SAMPLE_FLOW = "web_classification_no_variants"
def get_line_inputs(flow_folder=""):
if flow_folder:
inputs = get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(nlinee=4, flow_folder="", sample_inputs_file="", return_dict=False):
if flow_folder:
if not sample_inputs_file:
sample_inputs_file = "samples.json"
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
if return_dict:
return inputs
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [get_line_inputs() for _ in range(nlinee)]
def execute_in_fork_mode_subprocess(
dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
os.environ["PF_BATCH_METHOD"] = "fork"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment " f"variable 'PF_WORKER_COUNT'."
)
else:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the " f"factors of {factors}."
)
def execute_in_spawn_mode_subprocess(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
os.environ["PF_BATCH_METHOD"] = "spawn"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(
get_yaml_file(flow_folder),
dev_connections,
)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = 128.0 * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = 64 * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count and is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
mock_logger.warning.assert_any_call(
f"The current process count ({pf_worker_count}) is larger than recommended process count "
f"({estimated_available_worker_count}) that estimated by system available memory. This may "
f"cause memory exhaustion"
)
elif is_set_environ_pf_worker_count and not is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
elif not is_set_environ_pf_worker_count:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
"estimated_worker_count_based_on_memory_usage": estimated_available_worker_count,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the factors "
f"of {factors}."
)
def create_line_execution_process_pool(dev_connections):
executor = FlowExecutor.create(get_yaml_file(SAMPLE_FLOW), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
line_execution_process_pool = LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
)
return line_execution_process_pool
def set_environment_successed_in_subprocess(dev_connections, pf_batch_method):
os.environ["PF_BATCH_METHOD"] = pf_batch_method
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork is False
def set_environment_failed_in_subprocess(dev_connections):
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
os.environ["PF_BATCH_METHOD"] = "test"
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
sys_start_methods = multiprocessing.get_all_start_methods()
exexpected_log_message = (
"Failed to set start method to 'test', start method test" f" is not in: {sys_start_methods}."
)
mock_logger.warning.assert_called_once_with(exexpected_log_message)
def not_set_environment_in_subprocess(dev_connections):
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
@pytest.mark.unittest
class TestLineExecutionProcessPool:
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_process_pool(self, flow_folder, dev_connections):
log_path = str(Path(mkdtemp()) / "test.log")
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
with log_context:
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
executor._log_interval = 1
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.status == Status.Completed, f"{i}th line got {line_result.run_info.status}"
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_not_completed(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
result_list = sorted(result_list, key=lambda r: r.run_info.index)
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == f"Line {i} execution timeout for exceeding 1 seconds"
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
)
assert isinstance(line_result, LineResult)
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line_failed_when_line_execution_not_start(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
test_error_msg = "Test user error"
with patch("promptflow.executor.flow_executor.FlowExecutor.exec_line", autouse=True) as mock_exec_line:
mock_exec_line.side_effect = UserErrorException(
message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE
)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
)
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == test_error_msg
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_process_pool_run_with_exception(self, flow_folder, dev_connections, mocker: MockFixture):
# mock process pool run execution raise error
test_error_msg = "Test user error"
mocker.patch(
"promptflow.executor._line_execution_process_pool.LineExecutionProcessPool."
"_monitor_workers_and_process_tasks_in_thread",
side_effect=UserErrorException(message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE),
)
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
with pytest.raises(UserErrorException) as e:
pool.run(zip(range(nlines), bulk_inputs))
assert e.value.message == test_error_msg
assert e.value.target == ErrorTarget.AZURE_RUN_STORAGE
assert e.value.error_codes[0] == "UserError"
@pytest.mark.parametrize(
("flow_folder", "is_set_environ_pf_worker_count", "pf_worker_count", "n_process"),
[(SAMPLE_FLOW, True, "3", 3), (SAMPLE_FLOW, False, None, 4)],
)
def test_process_pool_parallelism_in_fork_mode(
self, dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
if "fork" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: fork")
p = multiprocessing.Process(
target=execute_in_fork_mode_subprocess,
args=(dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process),
)
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize(
(
"flow_folder",
"is_set_environ_pf_worker_count",
"is_calculation_smaller_than_set",
"pf_worker_count",
"estimated_available_worker_count",
"n_process",
),
[
(SAMPLE_FLOW, True, False, "2", 4, 2),
(SAMPLE_FLOW, True, True, "6", 2, 6),
(SAMPLE_FLOW, False, True, None, 2, 2),
],
)
def test_process_pool_parallelism_in_spawn_mode(
self,
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
if "spawn" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: spawn")
p = multiprocessing.Process(
target=execute_in_spawn_mode_subprocess,
args=(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_successed(self, dev_connections):
p = multiprocessing.Process(
target=set_environment_successed_in_subprocess,
args=(
dev_connections,
"spawn",
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_failed(self, dev_connections):
p = multiprocessing.Process(target=set_environment_failed_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
def test_process_not_set_environment_variable(self, dev_connections):
p = multiprocessing.Process(target=not_set_environment_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
class TestGetAvailableMaxWorkerCount:
@pytest.mark.parametrize(
"available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count",
[
(128.0, 64.0, 2, 2), # available_memory/process_memory > 1
(63.0, 64.0, 1, 0), # available_memory/process_memory < 1
],
)
def test_get_available_max_worker_count(
self, available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count
):
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = available_memory * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = process_memory * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
estimated_available_worker_count = get_available_max_worker_count()
assert estimated_available_worker_count == expected_max_worker_count
if actual_calculate_worker_count < 1:
mock_logger.warning.assert_called_with(
f"Current system's available memory is {available_memory}MB, less than the memory "
f"{process_memory}MB required by the process. The maximum available worker count is 1."
)
else:
mock_logger.info.assert_called_with(
f"Current system's available memory is {available_memory}MB, "
f"memory consumption of current process is {process_memory}MB, "
f"estimated available worker count is {available_memory}/{process_memory} "
f"= {actual_calculate_worker_count}"
)
@pytest.mark.unittest
class TestFormatCurrentProcess:
def test_format_current_process_info(self):
process_name = "process_name"
process_pid = 123
line_number = 13
formatted_message = format_current_process_info(process_name, process_pid, line_number)
expected_returned_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number})"
)
assert formatted_message == expected_returned_log_message
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_start_execution(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) start execution."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_completed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_completed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) completed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_failed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_failed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) failed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_batch_engine.py | from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock, patch
import pytest
from promptflow._core._errors import UnexpectedError
from promptflow.batch import APIBasedExecutorProxy, BatchEngine, CSharpExecutorProxy, PythonExecutorProxy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ConnectionNotFound
from promptflow.executor._result import AggregationResult
from ...utils import MemoryRunStorage, get_yaml_file, load_jsonl
from .test_result import get_line_results, get_node_run_infos
@pytest.mark.unittest
class TestBatchEngine:
@pytest.mark.parametrize(
"side_effect, ex_type, ex_target, ex_codes, ex_msg",
[
(
Exception("test error"),
UnexpectedError,
ErrorTarget.BATCH,
["SystemError", "UnexpectedError"],
"Unexpected error occurred while executing the batch run. Error: (Exception) test error.",
),
(
ConnectionNotFound(message="Connection 'aoai_conn' not found"),
ConnectionNotFound,
ErrorTarget.EXECUTOR,
["UserError", "ValidationError", "InvalidRequest", "ConnectionNotFound"],
"Connection 'aoai_conn' not found",
),
],
)
def test_batch_engine_run_error(self, side_effect, ex_type, ex_target, ex_codes, ex_msg):
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
with patch("promptflow.batch._batch_engine.BatchEngine._exec_in_task") as mock_func:
mock_func.side_effect = side_effect
with patch(
"promptflow.batch._batch_inputs_processor.BatchInputsProcessor.process_batch_inputs", new=Mock()
):
with pytest.raises(ex_type) as e:
batch_engine.run({}, {}, Path("."))
assert e.value.target == ex_target
assert e.value.error_codes == ex_codes
assert e.value.message == ex_msg
def test_register_executor(self):
# assert original values
assert BatchEngine.executor_proxy_classes["python"] == PythonExecutorProxy
assert BatchEngine.executor_proxy_classes["csharp"] == CSharpExecutorProxy
class MockJSExecutorProxy(APIBasedExecutorProxy):
pass
# register new proxy
BatchEngine.register_executor("js", MockJSExecutorProxy)
assert BatchEngine.executor_proxy_classes["js"] == MockJSExecutorProxy
assert len(BatchEngine.executor_proxy_classes) == 3
def test_cancel(self):
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
assert batch_engine._is_canceled is False
batch_engine.cancel()
assert batch_engine._is_canceled is True
def test_persist_run_info(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
line_results = get_line_results(line_dict)
mem_run_storge = MemoryRunStorage()
batch_engine = BatchEngine(get_yaml_file("print_input_flow"), "", storage=mem_run_storge)
batch_engine._persist_run_info(line_results)
assert len(mem_run_storge._flow_runs) == 3
assert len(mem_run_storge._node_runs) == 9
def test_persist_outputs(self):
outputs = [
{"line_number": 0, "output": "Hello World!"},
{"line_number": 1, "output": "Hello Microsoft!"},
{"line_number": 2, "output": "Hello Promptflow!"},
]
output_dir = Path(mkdtemp())
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
batch_engine._persist_outputs(outputs, output_dir)
actual_outputs = load_jsonl(output_dir / "output.jsonl")
assert actual_outputs == outputs
def test_update_aggr_result(self):
output = {"output": "Hello World!"}
metrics = {"accuracy": 0.9}
node_run_infos = get_node_run_infos({"aggr_1": Status.Completed, "aggr_2": Status.Completed})
aggre_result = AggregationResult(output={}, metrics={}, node_run_infos={})
aggr_exec_result = AggregationResult(output=output, metrics=metrics, node_run_infos=node_run_infos)
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
batch_engine._update_aggr_result(aggre_result, aggr_exec_result)
assert aggre_result.output == output
assert aggre_result.metrics == metrics
assert aggre_result.node_run_infos == node_run_infos
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_csharp_executor_proxy.py | import json
import socket
import subprocess
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import MagicMock, patch
import pytest
from promptflow._core._errors import MetaFileNotFound, MetaFileReadError
from promptflow._sdk._constants import FLOW_TOOLS_JSON, PROMPT_FLOW_DIR_NAME
from promptflow.batch import CSharpExecutorProxy
from promptflow.executor._result import AggregationResult
from ...utils import get_flow_folder, get_yaml_file
async def get_executor_proxy():
flow_file = get_yaml_file("csharp_flow")
working_dir = get_flow_folder("csharp_flow")
with patch.object(CSharpExecutorProxy, "ensure_executor_startup", return_value=None):
return await CSharpExecutorProxy.create(flow_file, working_dir)
@pytest.mark.unittest
class TestCSharpExecutorProxy:
@pytest.mark.asyncio
async def test_create(self):
with patch("subprocess.Popen") as mock_popen:
mock_popen.return_value = MagicMock()
executor_proxy = await get_executor_proxy()
mock_popen.assert_called_once()
assert executor_proxy is not None
assert executor_proxy._process is not None
assert executor_proxy._port is not None
assert executor_proxy.api_endpoint == f"http://localhost:{executor_proxy._port}"
@pytest.mark.asyncio
async def test_destroy_with_already_terminated(self):
mock_process = MagicMock()
mock_process.poll.return_value = 0
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_not_called()
@pytest.mark.asyncio
async def test_destroy_with_terminates_gracefully(self):
mock_process = MagicMock()
mock_process.poll.return_value = None
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
mock_process.kill.assert_not_called()
@pytest.mark.asyncio
async def test_destroy_with_force_kill(self):
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_process.wait.side_effect = subprocess.TimeoutExpired(cmd="cmd", timeout=5)
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
mock_process.kill.assert_called_once()
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
executor_proxy = await get_executor_proxy()
aggr_result = await executor_proxy.exec_aggregation_async("", "", "")
assert isinstance(aggr_result, AggregationResult)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"exit_code, expected_result",
[
(None, True),
(0, False),
(1, False),
],
)
async def test_is_executor_active(self, exit_code, expected_result):
executor_proxy = await get_executor_proxy()
executor_proxy._process = MagicMock()
executor_proxy._process.poll.return_value = exit_code
assert executor_proxy._is_executor_active() == expected_result
def test_get_tool_metadata_succeed(self):
working_dir = Path(mkdtemp())
expected_tool_meta = {"name": "csharp_flow", "version": "0.1.0"}
tool_meta_file = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
tool_meta_file.parent.mkdir(parents=True, exist_ok=True)
with open(tool_meta_file, "w") as file:
json.dump(expected_tool_meta, file, indent=4)
tool_meta = CSharpExecutorProxy.get_tool_metadata("", working_dir)
assert tool_meta == expected_tool_meta
def test_get_tool_metadata_failed_with_file_not_found(self):
working_dir = Path(mkdtemp())
with pytest.raises(MetaFileNotFound):
CSharpExecutorProxy.get_tool_metadata("", working_dir)
def test_get_tool_metadata_failed_with_content_not_json(self):
working_dir = Path(mkdtemp())
tool_meta_file = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
tool_meta_file.parent.mkdir(parents=True, exist_ok=True)
tool_meta_file.touch()
with pytest.raises(MetaFileReadError):
CSharpExecutorProxy.get_tool_metadata("", working_dir)
def test_find_available_port(self):
port = CSharpExecutorProxy.find_available_port()
assert isinstance(port, str)
assert int(port) > 0, "Port number should be greater than 0"
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("localhost", int(port)))
except OSError:
pytest.fail("Port is not actually available")
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_base_executor_proxy.py | import json
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional
from unittest.mock import AsyncMock, patch
import httpx
import pytest
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy
from promptflow.batch._errors import ExecutorServiceUnhealthy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ...mock_execution_server import _get_aggr_result_dict, _get_line_result_dict
@pytest.mark.unittest
class TestAPIBasedExecutorProxy:
@pytest.mark.asyncio
@pytest.mark.parametrize(
"has_error",
[False, True],
)
async def test_exec_line_async(self, has_error):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
index = 1
inputs = {"question": "test"}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
line_result_dict = _get_line_result_dict(run_id, index, inputs, has_error=has_error)
status_code = 400 if has_error else 200
mock.return_value = httpx.Response(status_code, json=line_result_dict)
line_result = await mock_executor_proxy.exec_line_async(inputs, index, run_id)
assert line_result.output == {} if has_error else {"answer": "Hello world!"}
assert line_result.run_info.run_id == run_id
assert line_result.run_info.index == index
assert line_result.run_info.status == Status.Failed if has_error else Status.Completed
assert line_result.run_info.inputs == inputs
assert (line_result.run_info.error is not None) == has_error
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
batch_inputs = {"question": ["test", "error"]}
aggregation_inputs = {"${get_answer.output}": ["Incorrect", "Correct"]}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
aggr_result_dict = _get_aggr_result_dict(run_id, aggregation_inputs)
mock.return_value = httpx.Response(200, json=aggr_result_dict)
aggr_result = await mock_executor_proxy.exec_aggregation_async(batch_inputs, aggregation_inputs, run_id)
assert aggr_result.metrics == {"accuracy": 0.5}
assert len(aggr_result.node_run_infos) == 1
assert aggr_result.node_run_infos["aggregation"].flow_run_id == run_id
assert aggr_result.node_run_infos["aggregation"].inputs == aggregation_inputs
assert aggr_result.node_run_infos["aggregation"].status == Status.Completed
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_no_error(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
with patch.object(APIBasedExecutorProxy, "_check_startup_error_from_file") as mock_check_startup_error:
await mock_executor_proxy.ensure_executor_startup("")
mock_check_startup_error.assert_not_called()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_not_healthy(self):
# empty error file
error_file = Path(mkdtemp()) / "error.json"
error_file.touch()
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ExecutorServiceUnhealthy) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == "executor unhealthy"
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_existing_validation_error(self):
# prepare the error file
error_file = Path(mkdtemp()) / "error.json"
error_message = "Connection 'aoai_conn' not found"
error_dict = ExceptionPresenter.create(ConnectionNotFound(message=error_message)).to_dict()
with open(error_file, "w") as file:
json.dump(error_dict, file, indent=4)
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ValidationException) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == error_message
assert ex.value.target == ErrorTarget.BATCH
@pytest.mark.asyncio
async def test_ensure_executor_health_when_healthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=True) as mock:
await mock_executor_proxy.ensure_executor_health()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_health_when_unhealthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
assert mock.call_count == 20
@pytest.mark.asyncio
async def test_ensure_executor_health_when_not_active(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with patch.object(APIBasedExecutorProxy, "_is_executor_active", return_value=False):
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
mock.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"mock_value, expected_result",
[
(httpx.Response(200), True),
(httpx.Response(500), False),
(Exception("error"), False),
],
)
async def test_check_health(self, mock_value, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch("httpx.AsyncClient.get", new_callable=AsyncMock) as mock:
mock.return_value = mock_value
assert await mock_executor_proxy._check_health() is expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"response, expected_result",
[
(
httpx.Response(200, json={"result": "test"}),
{"result": "test"},
),
(
httpx.Response(500, json={"error": "test error"}),
"test error",
),
(
httpx.Response(400, json={"detail": "test"}),
{
"message": 'Unexpected error when executing a line, status code: 400, error: {"detail": "test"}',
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "400",
"error": '{"detail": "test"}',
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
(
httpx.Response(502, text="test"),
{
"message": "Unexpected error when executing a line, status code: 502, error: test",
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "502",
"error": "test",
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
],
)
async def test_process_http_response(self, response, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
assert mock_executor_proxy._process_http_response(response) == expected_result
class MockAPIBasedExecutorProxy(APIBasedExecutorProxy):
@property
def api_endpoint(self) -> str:
return "http://localhost:8080"
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockAPIBasedExecutorProxy":
return MockAPIBasedExecutorProxy()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_result.py | from datetime import datetime
import pytest
from promptflow.batch._result import BatchResult, ErrorSummary, LineError, SystemMetrics
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.executor._result import AggregationResult, LineResult
def get_node_run_infos(node_dict: dict, index=None, api_calls=None, system_metrics=None):
return {
k: NodeRunInfo(
node=k,
flow_run_id="flow_run_id",
run_id=f"{index}_run_id_{k}",
status=v,
inputs=[],
output={},
metrics={},
error={"code": "UserError", "message": "test message"} if v == Status.Failed else None,
parent_run_id="",
start_time=None,
end_time=None,
index=index,
api_calls=api_calls,
system_metrics=system_metrics,
)
for k, v in node_dict.items()
}
def get_flow_run_info(status_dict: dict, index: int):
status = Status.Failed if any(status == Status.Failed for status in status_dict.values()) else Status.Completed
error = {"code": "UserError", "message": "test message"} if status == Status.Failed else None
return FlowRunInfo(
run_id=f"{index}_run_id",
status=status,
error=error,
inputs={},
output={},
metrics={},
request=None,
parent_run_id="",
root_run_id="",
source_run_id="",
flow_id="",
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
index=index,
)
def get_line_results(line_dict: dict, api_calls=None, system_metrics=None):
return [
LineResult(
output={},
aggregation_inputs={},
run_info=get_flow_run_info(status_dict=v, index=k),
node_run_infos=get_node_run_infos(node_dict=v, index=k, api_calls=api_calls, system_metrics=system_metrics),
)
for k, v in line_dict.items()
]
def get_aggregation_result(aggr_dict: dict, api_calls=None, system_metrics=None):
return AggregationResult(
output={},
metrics={},
node_run_infos=get_node_run_infos(node_dict=aggr_dict, api_calls=api_calls, system_metrics=system_metrics),
)
def get_batch_result(line_dict, aggr_dict, line_api_calls=None, aggr_api_calls=None):
line_results = get_line_results(line_dict=line_dict, api_calls=line_api_calls)
aggr_result = get_aggregation_result(aggr_dict=aggr_dict, api_calls=aggr_api_calls)
return BatchResult.create(datetime.utcnow(), datetime.utcnow(), line_results=line_results, aggr_result=aggr_result)
def get_api_call(type, name, inputs={}, output={}, children=None):
return {"type": type, "name": name, "inputs": inputs, "output": output, "children": children}
@pytest.mark.unittest
class TestBatchResult:
def test_node_status(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed, "aggr_2": Status.Bypassed}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.completed_lines == 2
assert batch_result.failed_lines == 1
assert batch_result.node_status == {
"node_0.completed": 3,
"node_1.completed": 2,
"node_1.failed": 1,
"node_2.completed": 2,
"node_2.bypassed": 1,
"aggr_0.completed": 1,
"aggr_1.failed": 1,
"aggr_2.bypassed": 1,
}
def test_system_metrics(self):
from openai.types.completion import Completion, CompletionChoice
line_dict = {0: {"node_0": Status.Completed}}
aggr_dict = {"aggr_0": Status.Completed}
api_call_1 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={"prompt": "Please tell me a joke.", "model": "text-davinci-003"},
output={"choices": [{"text": "text"}]},
)
api_call_2 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={
"prompt": ["Please tell me a joke.", "Please tell me a joke about fruit."],
"model": "text-davinci-003",
},
output=[
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
],
)
line_api_calls = get_api_call("Chain", "Chain", children=[api_call_1, api_call_2])
aggr_api_call = get_api_call(
"LLM",
"openai.resources.chat.completions.Completions.create",
inputs={
"messages": [{"system": "You are a helpful assistant.", "user": "Please tell me a joke."}],
"model": "gpt-35-turbo",
},
output={"choices": [{"message": {"content": "content"}}]},
)
batch_result = get_batch_result(
line_dict=line_dict, aggr_dict=aggr_dict, line_api_calls=[line_api_calls], aggr_api_calls=[aggr_api_call]
)
assert batch_result.system_metrics.total_tokens == 42
assert batch_result.system_metrics.prompt_tokens == 38
assert batch_result.system_metrics.completion_tokens == 4
system_metrics_dict = {
"total_tokens": 42,
"prompt_tokens": 38,
"completion_tokens": 4,
}
assert system_metrics_dict.items() <= batch_result.system_metrics.to_dict().items()
@pytest.mark.parametrize(
"api_call",
[
get_api_call("LLM", "Completion", inputs="invalid"),
get_api_call("LLM", "Completion", output="invalid"),
get_api_call("LLM", "Invalid"),
get_api_call("LLM", "Completion"),
get_api_call("LLM", "Completion", inputs={"api_type": "azure"}),
get_api_call("LLM", "ChatCompletion", inputs={"api_type": "azure", "engine": "invalid"}),
],
)
def test_invalid_api_calls(self, api_call):
line_dict = {0: {"node_0": Status.Completed}}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict={}, line_api_calls=[api_call])
assert batch_result.system_metrics.total_tokens == 0
assert batch_result.system_metrics.completion_tokens == 0
assert batch_result.system_metrics.prompt_tokens == 0
def test_error_summary(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {
"aggr_0": Status.Completed,
"aggr_1": Status.Failed,
"aggr_2": Status.Bypassed,
"aggr_4": Status.Failed,
}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.error_summary.failed_system_error_lines == 0
assert batch_result.error_summary.failed_user_error_lines == 1
assert batch_result.error_summary.error_list == [
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert batch_result.error_summary.error_list[0].to_dict() == {
"line_number": 1,
"error": {
"code": "UserError",
"message": "test message",
},
}
assert batch_result.error_summary.aggr_error_dict == {
"aggr_1": {"code": "UserError", "message": "test message"},
"aggr_4": {"code": "UserError", "message": "test message"},
}
@pytest.mark.unittest
class TestErrorSummary:
def test_create(self):
line_dict = {
0: {"node_0": Status.Failed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
}
line_results = get_line_results(line_dict)
line_results[0].run_info.error = {"code": "SystemError", "message": "test system error message"}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed}
aggr_result = get_aggregation_result(aggr_dict)
error_summary = ErrorSummary.create(line_results, aggr_result)
assert error_summary.failed_user_error_lines == 1
assert error_summary.failed_system_error_lines == 1
assert error_summary.error_list == [
LineError(line_number=0, error={"code": "SystemError", "message": "test system error message"}),
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert error_summary.aggr_error_dict == {"aggr_1": {"code": "UserError", "message": "test message"}}
@pytest.mark.unittest
class TestSystemMetrics:
def test_create(slef):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Completed},
}
line_system_metrics = {
"total_tokens": 5,
"prompt_tokens": 3,
"completion_tokens": 2,
}
line_results = get_line_results(line_dict, system_metrics=line_system_metrics)
aggr_dict = {"aggr_0": Status.Completed}
# invalid system metrics
aggr_system_metrics = {
"total_tokens": 10,
"prompt_tokens": 6,
}
aggr_result = get_aggregation_result(aggr_dict, system_metrics=aggr_system_metrics)
system_metrics = SystemMetrics.create(datetime.utcnow(), datetime.utcnow(), line_results, aggr_result)
assert system_metrics.total_tokens == 20
assert system_metrics.prompt_tokens == 12
assert system_metrics.completion_tokens == 8
system_metrics_dict = {
"total_tokens": 20,
"prompt_tokens": 12,
"completion_tokens": 8,
}
assert system_metrics_dict.items() <= system_metrics.to_dict().items()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_batch_inputs_processor.py | import json
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._core._errors import UnexpectedError
from promptflow._utils.utils import dump_list_to_jsonl
from promptflow.batch._batch_inputs_processor import BatchInputsProcessor, apply_inputs_mapping
from promptflow.batch._errors import EmptyInputsData, InputMappingError
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
from ...utils import DATA_ROOT
@pytest.mark.unittest
class TestBatchInputsProcessor:
def test_process_batch_inputs(self):
data = [
{"question": "What's promptflow?"},
{"question": "Do you like promptflow?"},
]
data_file = Path(mkdtemp()) / "data.jsonl"
dump_list_to_jsonl(data_file, data)
input_dirs = {"data": data_file}
inputs_mapping = {"question": "${data.question}"}
batch_inputs = BatchInputsProcessor("", {}).process_batch_inputs(input_dirs, inputs_mapping)
assert batch_inputs == [
{"line_number": 0, "question": "What's promptflow?"},
{"line_number": 1, "question": "Do you like promptflow?"},
]
def test_process_batch_inputs_error(self):
data_file = Path(mkdtemp()) / "data.jsonl"
data_file.touch()
input_dirs = {"data": data_file}
inputs_mapping = {"question": "${data.question}"}
with pytest.raises(EmptyInputsData) as e:
BatchInputsProcessor("", {}).process_batch_inputs(input_dirs, inputs_mapping)
expected_error_message = (
"Couldn't find any inputs data at the given input paths. "
"Please review the provided path and consider resubmitting."
)
assert expected_error_message in e.value.message
def test_resolve_data_from_input_path(self):
inputs_dir = Path(mkdtemp())
# data.jsonl
data = [
{"question": "What's promptflow?"},
{"question": "Do you like promptflow?"},
]
data_file = inputs_dir / "data.jsonl"
dump_list_to_jsonl(data_file, data)
# inputs.json
inputs_file = inputs_dir / "inputs.json"
with open(inputs_file, "w") as file:
file.write(json.dumps(data))
result = BatchInputsProcessor("", {})._resolve_data_from_input_path(inputs_dir)
assert result == data + data
# if has max_lines_count
result = BatchInputsProcessor("", {}, max_lines_count=1)._resolve_data_from_input_path(inputs_dir)
assert result == [
{"question": "What's promptflow?"},
]
@pytest.mark.parametrize(
"data_path",
[
"10k.jsonl",
"10k",
],
)
def test_resolve_data_from_input_path_with_large_data(self, data_path):
data_path = DATA_ROOT / "load_data_cases" / data_path
result = BatchInputsProcessor("", {})._resolve_data_from_input_path(Path(data_path))
assert isinstance(result, list)
assert len(result) == 10000
# specify max_rows_count
max_rows_count = 5
head_results = BatchInputsProcessor(
working_dir="",
flow_inputs={},
max_lines_count=max_rows_count,
)._resolve_data_from_input_path(Path(data_path))
assert isinstance(head_results, list)
assert len(head_results) == max_rows_count
assert result[:max_rows_count] == head_results
@pytest.mark.parametrize(
"inputs, inputs_mapping, expected",
[
(
{"data.test": {"question": "longer input key has lower priority."}, "line_number": 0},
{
"question": "${data.test.question}", # Question from the data
"value": 1,
},
{"question": "longer input key has lower priority.", "value": 1, "line_number": 0},
),
(
{
# Missing line_number is also valid data.
"data.test": {"question": "longer input key has lower priority."},
"data": {"test.question": "Shorter input key has higher priority."},
},
{
"question": "${data.test.question}", # Question from the data
"deployment_name": "text-davinci-003", # literal value
},
{
"question": "Shorter input key has higher priority.",
"deployment_name": "text-davinci-003",
},
),
],
)
def test_apply_inputs_mapping(self, inputs, inputs_mapping, expected):
result = apply_inputs_mapping(inputs, inputs_mapping)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
@pytest.mark.parametrize(
"inputs, inputs_mapping, error_code, error_message",
[
(
{
"baseline": {"answer": 123, "question": "dummy"},
},
{
"question": "${baseline.output}",
"answer": "${data.output}",
},
InputMappingError,
"Couldn't find these mapping relations: ${baseline.output}, ${data.output}. "
"Please make sure your input mapping keys and values match your YAML input section and input data.",
),
],
)
def test_apply_inputs_mapping_error(self, inputs, inputs_mapping, error_code, error_message):
with pytest.raises(error_code) as e:
apply_inputs_mapping(inputs, inputs_mapping)
assert error_message in str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"inputs, expected",
[
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"output": [{"answer": "output_ans1"}, {"answer": "output_ans2"}],
},
[
# Get 2 lines data.
{
"data": {"question": "q1", "answer": "ans1"},
"output": {"answer": "output_ans1"},
"line_number": 0,
},
{
"data": {"question": "q2", "answer": "ans2"},
"output": {"answer": "output_ans2"},
"line_number": 1,
},
],
),
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"output": [{"answer": "output_ans2", "line_number": 1}],
},
[
# Only one line valid data.
{
"data": {"question": "q2", "answer": "ans2"},
"output": {"answer": "output_ans2", "line_number": 1},
"line_number": 1,
},
],
),
],
)
def test_merge_input_dicts_by_line(self, inputs, expected):
result = BatchInputsProcessor("", {})._merge_input_dicts_by_line(inputs)
json.dumps(result)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
@pytest.mark.parametrize(
"inputs, error_code, error_message",
[
(
{
"baseline": [],
},
InputMappingError,
"The input for batch run is incorrect. Input from key 'baseline' is an empty list, which means we "
"cannot generate a single line input for the flow run. Please rectify the input and try again.",
),
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"baseline": [{"answer": "baseline_ans2"}],
},
InputMappingError,
"The input for batch run is incorrect. Line numbers are not aligned. Some lists have dictionaries "
"missing the 'line_number' key, and the lengths of these lists are different. List lengths are: "
"{'data': 2, 'baseline': 1}. Please make sure these lists have the same length "
"or add 'line_number' key to each dictionary.",
),
],
)
def test_merge_input_dicts_by_line_error(self, inputs, error_code, error_message):
with pytest.raises(error_code) as e:
BatchInputsProcessor("", {})._merge_input_dicts_by_line(inputs)
assert error_message == str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize("inputs_mapping", [{"question": "${data.question}"}, {}])
def test_complete_inputs_mapping_by_default_value(self, inputs_mapping):
inputs = {
"question": None,
"groundtruth": None,
"input_with_default_value": FlowInputDefinition(type=ValueType.BOOL, default=False),
}
updated_inputs_mapping = BatchInputsProcessor("", inputs)._complete_inputs_mapping_by_default_value(
inputs_mapping
)
assert "input_with_default_value" not in updated_inputs_mapping
assert updated_inputs_mapping == {"question": "${data.question}", "groundtruth": "${data.groundtruth}"}
@pytest.mark.parametrize(
"inputs, inputs_mapping, expected",
[
(
# Use default mapping generated from flow inputs.
{
"data": [{"question": "q1", "groundtruth": "ans1"}, {"question": "q2", "groundtruth": "ans2"}],
},
{},
[
{
"question": "q1",
"groundtruth": "ans1",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"line_number": 1,
},
],
),
(
# Partially use default mapping generated from flow inputs.
{
"data": [{"question": "q1", "groundtruth": "ans1"}, {"question": "q2", "groundtruth": "ans2"}],
},
{
"question": "${data.question}",
},
[
{
"question": "q1",
"groundtruth": "ans1",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"line_number": 1,
},
],
),
(
{
"data": [
{"question": "q1", "answer": "ans1", "line_number": 5},
{"question": "q2", "answer": "ans2", "line_number": 6},
],
"baseline": [
{"answer": "baseline_ans1", "line_number": 5},
{"answer": "baseline_ans2", "line_number": 7},
],
},
{
"question": "${data.question}", # Question from the data
"groundtruth": "${data.answer}", # Answer from the data
"baseline": "${baseline.answer}", # Answer from the baseline
"deployment_name": "text-davinci-003", # literal value
"line_number": "${data.question}", # line_number mapping should be ignored
},
[
{
"question": "q1",
"groundtruth": "ans1",
"baseline": "baseline_ans1",
"deployment_name": "text-davinci-003",
"line_number": 5,
},
],
),
],
)
def test_validate_and_apply_inputs_mapping(self, inputs, inputs_mapping, expected):
flow_inputs = {"question": None, "groundtruth": None}
result = BatchInputsProcessor("", flow_inputs)._validate_and_apply_inputs_mapping(inputs, inputs_mapping)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
def test_validate_and_apply_inputs_mapping_empty_input(self):
inputs = {
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"baseline": [{"answer": "baseline_ans1"}, {"answer": "baseline_ans2"}],
}
result = BatchInputsProcessor("", {})._validate_and_apply_inputs_mapping(inputs, {})
assert result == [
{"line_number": 0},
{"line_number": 1},
], "Empty flow inputs and inputs_mapping should return list with empty dicts."
@pytest.mark.parametrize(
"inputs_mapping, error_code",
[
(
{"question": "${question}"},
InputMappingError,
),
],
)
def test_validate_and_apply_inputs_mapping_error(self, inputs_mapping, error_code):
flow_inputs = {"question": None}
with pytest.raises(error_code) as _:
BatchInputsProcessor("", flow_inputs)._validate_and_apply_inputs_mapping(
inputs={}, inputs_mapping=inputs_mapping
)
@pytest.mark.parametrize(
"inputs, inputs_mapping, error_code, error_message",
[
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
},
None,
UnexpectedError,
"The input for batch run is incorrect. Please make sure to set up a proper input mapping "
"before proceeding. If you need additional help, feel free to contact support for further assistance.",
),
],
)
def test_inputs_mapping_for_all_lines_error(self, inputs, inputs_mapping, error_code, error_message):
with pytest.raises(error_code) as e:
BatchInputsProcessor("", {})._apply_inputs_mapping_for_all_lines(inputs, inputs_mapping)
assert error_message == str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_execution_utils.py | import pytest
from promptflow._utils.execution_utils import apply_default_value_for_input
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
None, # Could handle None input
{"input_from_default": "default_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"input_from_default": "default_value"},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{},
{"input_from_default": False},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.LIST, default=[]),
},
{},
{"input_from_default": []},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.OBJECT, default={}),
},
{},
{"input_from_default": {}},
),
],
)
def test_apply_default_value_for_input(self, flow_inputs, inputs, expected_inputs):
result = apply_default_value_for_input(flow_inputs, inputs)
assert result == expected_inputs
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_tool_utils.py | import inspect
from typing import Union
import pytest
from promptflow._core._errors import DuplicateToolMappingError
from promptflow._utils.tool_utils import (
DynamicListError,
ListFunctionResponseError,
_find_deprecated_tools,
append_workspace_triple_to_func_input_params,
function_to_interface,
load_function_from_function_path,
param_to_definition,
validate_dynamic_list_func_response_type,
)
from promptflow.connections import AzureOpenAIConnection, CustomConnection
from promptflow.contracts.tool import ValueType, Tool, ToolType
# mock functions for dynamic list function testing
def mock_dynamic_list_func1():
pass
def mock_dynamic_list_func2(input1):
pass
def mock_dynamic_list_func3(input1, input2):
pass
def mock_dynamic_list_func4(input1, input2, **kwargs):
pass
def mock_dynamic_list_func5(input1, input2, subscription_id):
pass
def mock_dynamic_list_func6(input1, input2, subscription_id, resource_group_name, workspace_name):
pass
def mock_dynamic_list_func7(input1, input2, subscription_id, **kwargs):
pass
def mock_dynamic_list_func8(input1, input2, subscription_id, resource_group_name, workspace_name, **kwargs):
pass
@pytest.mark.unittest
class TestToolUtils:
def test_function_to_interface(self):
def func(conn: [AzureOpenAIConnection, CustomConnection], input: [str, int]):
pass
input_defs, _, connection_types, _ = function_to_interface(func)
assert len(input_defs) == 2
assert input_defs["conn"].type == ["AzureOpenAIConnection", "CustomConnection"]
assert input_defs["input"].type == [ValueType.OBJECT]
assert connection_types == [["AzureOpenAIConnection", "CustomConnection"]]
def test_function_to_interface_with_invalid_initialize_inputs(self):
def func(input_str: str):
pass
with pytest.raises(Exception) as exec_info:
function_to_interface(func, {"input_str": "test"})
assert "Duplicate inputs found from" in exec_info.value.args[0]
def test_function_to_interface_with_kwargs(self):
def func(input_str: str, **kwargs):
pass
_, _, _, enable_kwargs = function_to_interface(func)
assert enable_kwargs is True
def func(input_str: str):
pass
_, _, _, enable_kwargs = function_to_interface(func)
assert enable_kwargs is False
def test_param_to_definition(self):
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow.contracts.tool import Secret
class MyFirstConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
class MySecondConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
def some_func(
conn1: MyFirstConnection,
conn2: Union[CustomConnection, MyFirstConnection],
conn3: Union[MyFirstConnection, CustomConnection],
conn4: Union[MyFirstConnection, MySecondConnection],
conn5: CustomConnection,
conn6: Union[CustomConnection, int],
conn7: Union[MyFirstConnection, int],
):
pass
sig = inspect.signature(some_func)
input_def, _ = param_to_definition(sig.parameters.get("conn1"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn2"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn3"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn4"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection", "MySecondConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn5"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type is None
input_def, _ = param_to_definition(sig.parameters.get("conn6"), gen_custom_type_conn=True)
assert input_def.type == [ValueType.OBJECT]
assert input_def.custom_type is None
input_def, _ = param_to_definition(sig.parameters.get("conn7"), gen_custom_type_conn=True)
assert input_def.type == [ValueType.OBJECT]
assert input_def.custom_type is None
@pytest.mark.parametrize(
"func, func_input_params_dict, use_ws_triple, expected_res",
[
(mock_dynamic_list_func1, None, False, {}),
(mock_dynamic_list_func2, {"input1": "value1"}, False, {"input1": "value1"}),
(
mock_dynamic_list_func3,
{"input1": "value1", "input2": "value2"},
False,
{"input1": "value1", "input2": "value2"},
),
(mock_dynamic_list_func3, {"input1": "value1"}, False, {"input1": "value1"}),
(mock_dynamic_list_func3, {"input1": "value1"}, True, {"input1": "value1"}),
(
mock_dynamic_list_func4,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func5,
{"input1": "value1"},
True,
{"input1": "value1", "subscription_id": "mock_subscription_id"},
),
(
mock_dynamic_list_func5,
{"input1": "value1", "subscription_id": "input_subscription_id"},
True,
{"input1": "value1", "subscription_id": "input_subscription_id"},
),
(
mock_dynamic_list_func6,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func6,
{
"input1": "value1",
"workspace_name": "input_workspace_name",
},
True,
{
"input1": "value1",
"workspace_name": "input_workspace_name",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
},
),
(
mock_dynamic_list_func7,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func7,
{"input1": "value1", "subscription_id": "input_subscription_id"},
True,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func8,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func8,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "input_resource_group",
"workspace_name": "input_workspace_name",
},
True,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "input_resource_group",
"workspace_name": "input_workspace_name",
},
),
],
)
def test_append_workspace_triple_to_func_input_params(
self, func, func_input_params_dict, use_ws_triple, expected_res, mocked_ws_triple
):
ws_triple_dict = mocked_ws_triple._asdict() if use_ws_triple else None
func_sig_params = inspect.signature(func).parameters
actual_combined_inputs = append_workspace_triple_to_func_input_params(
func_sig_params=func_sig_params,
func_input_params_dict=func_input_params_dict,
ws_triple_dict=ws_triple_dict,
)
assert actual_combined_inputs == expected_res
@pytest.mark.parametrize(
"res",
[
(
[
{
"value": "fig0",
"display_value": "My_fig0",
"hyperlink": "https://www.bing.com/search?q=fig0",
"description": "this is 0 item",
},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
"description": "this is 1 item",
},
]
),
([{"value": "fig0"}, {"value": "kiwi1"}]),
([{"value": "fig0", "display_value": "My_fig0"}, {"value": "kiwi1", "display_value": "My_kiwi1"}]),
(
[
{"value": "fig0", "display_value": "My_fig0", "hyperlink": "https://www.bing.com/search?q=fig0"},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
},
]
),
([{"value": "fig0", "hyperlink": "https://www.bing.com/search?q=fig0"}]),
(
[
{"value": "fig0", "display_value": "My_fig0", "description": "this is 0 item"},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
"description": "this is 1 item",
},
]
),
],
)
def test_validate_dynamic_list_func_response_type(self, res):
validate_dynamic_list_func_response_type(response=res, f="mock_func")
@pytest.mark.parametrize(
"res, err_msg",
[
(None, "mock_func response can not be empty."),
([], "mock_func response can not be empty."),
(["a", "b"], "mock_func response must be a list of dict. a is not a dict."),
({"a": "b"}, "mock_func response must be a list."),
([{"a": "b"}], "mock_func response dict must have 'value' key."),
([{"value": 1 + 2j}], "mock_func response dict value \\(1\\+2j\\) is not json serializable."),
],
)
def test_validate_dynamic_list_func_response_type_with_error(self, res, err_msg):
error_message = (
f"Unable to display list of items due to '{err_msg}'. \nPlease contact the tool "
f"author/support team for troubleshooting assistance."
)
with pytest.raises(ListFunctionResponseError, match=error_message):
validate_dynamic_list_func_response_type(response=res, f="mock_func")
def test_load_function_from_function_path(self, mock_module_with_list_func):
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_list_func"
load_function_from_function_path(func_path)
def test_load_function_from_function_path_with_error(self, mock_module_with_list_func):
func_path = "mock_func_path"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'mock_func_path'. Expected format: format 'my_module.my_func'. Detailed error: not enough "
"values to unpack \\(expected 2, got 1\\)'. \nPlease contact the tool author/support team for "
"troubleshooting assistance.",
):
load_function_from_function_path(func_path)
func_path = "fake_tool_pkg.tools.tool_with_dynamic_list_input.my_list_func"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'fake_tool_pkg.tools.tool_with_dynamic_list_input.my_list_func'. Expected format: format "
"'my_module.my_func'. Detailed error: No module named 'fake_tool_pkg''. \nPlease contact the tool "
"author/support team for troubleshooting assistance.",
):
load_function_from_function_path(func_path)
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_field"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'my_tool_package.tools.tool_with_dynamic_list_input.my_field'. Expected format: "
"format 'my_module.my_func'. Detailed error: Unable to display list of items due to ''1' "
"is not callable.'. \nPlease contact the tool author/support team for troubleshooting assistance.",
):
load_function_from_function_path(func_path)
def test_find_deprecated_tools(self):
package_tools = {
"new_tool_1": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]).serialize(),
"new_tool_2": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]).serialize(),
}
with pytest.raises(DuplicateToolMappingError, match="secure operation"):
_find_deprecated_tools(package_tools)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_logger_utils.py | import io
import logging
import time
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock
from uuid import uuid4
import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
from promptflow._utils.logger_utils import (
CredentialScrubberFormatter,
FileHandler,
FileHandlerConcurrentWrapper,
LogContext,
bulk_logger,
scrub_credentials,
update_log_path,
update_single_log_path,
)
from promptflow.contracts.run_mode import RunMode
from ...utils import load_content
def _set_handler(logger: logging.Logger, handler: FileHandler, log_content: str):
for h in logger.handlers:
if isinstance(h, FileHandlerConcurrentWrapper):
h.handler = handler
time.sleep(1)
logger.warning(log_content)
h.clear()
class DummyException(Exception):
pass
@pytest.fixture
def logger():
logger = logging.getLogger(str(uuid4()))
logger.setLevel(logging.INFO)
return logger
@pytest.fixture
def stream_handler():
stream = io.StringIO()
return logging.StreamHandler(stream)
@pytest.mark.unittest
class TestCredentialScrubberFormatter:
def test_log(self, logger, stream_handler):
"""Make sure credentials by logger.log are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=signature")
logger.error("testerror&key=accountkey")
logger.warning("testwarning&sig=signature")
logger.critical("print dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"testerror&key={CredentialScrubber.PLACE_HOLDER}\n"
f"testwarning&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"print {CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_args(self, logger, stream_handler):
"""Make sure credentials by logger.log (in args) are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=%s credential=%s", "signature", "dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER} " f"credential={CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_exc_info(self, logger, stream_handler):
"""Make sure credentials in exception are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
exception = DummyException("credential=dummy secret accountkey=accountkey")
logger.exception("test exception", exc_info=exception)
expected_log_output = "credential=**data_scrubbed** accountkey=**data_scrubbed**"
assert expected_log_output in stream_handler.stream.getvalue()
def test_set_credential_list_thread_safe(self):
formatter = CredentialScrubberFormatter()
def set_and_check_credential_list(credential_list):
formatter.set_credential_list(credential_list)
time.sleep(1)
assert formatter.credential_scrubber.custom_str_set == set(credential_list)
with ThreadPool(processes=3) as pool:
results = pool.map(set_and_check_credential_list, [[f"secret {i}", f"credential {i}"] for i in range(3)])
_ = list(results)
@pytest.mark.unittest
class TestFileHandlerConcurrentWrapper:
def test_set_handler_thread_safe(self):
wrapper = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test execution log handler")
logger.addHandler(wrapper)
process_num = 3
folder_path = Path(mkdtemp())
log_path_list = [str(folder_path / f"log_{i}.log") for i in range(process_num)]
with ThreadPool(processes=process_num) as pool:
results = pool.starmap(
_set_handler, ((logger, FileHandler(log_path_list[i]), f"log {i}") for i in range(process_num))
)
results = list(results)
# Make sure log content is as expected.
for i, log_path in enumerate(log_path_list):
with open(log_path, "r") as f:
log = f.read()
log_lines = log.split("\n")
assert len(log_lines) == 2
assert f"log {i}" in log_lines[0]
assert log_lines[1] == ""
def test_clear(self):
wrapper = FileHandlerConcurrentWrapper()
assert wrapper.handler is None
log_path = str(Path(mkdtemp()) / "logs.log")
file_handler = FileHandler(log_path)
file_handler.close = Mock(side_effect=Exception("test exception"))
wrapper.handler = file_handler
wrapper.clear()
assert wrapper.handler is None
@pytest.mark.unittest
class TestLogContext:
def test_context_manager(self):
log_handler = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test_setup_logger_context")
logger.addHandler(log_handler)
log_path = str(Path(mkdtemp()) / "test.log")
try:
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
log_context.input_logger = logger
assert LogContext.get_current() is None
with log_context:
assert LogContext.get_current() is not None
# Make sure context variables are set.
inner_handler = log_handler._context_var.get()
assert isinstance(inner_handler, FileHandler)
assert isinstance(inner_handler._formatter, CredentialScrubberFormatter)
scrubber = inner_handler._formatter._context_var.get()
assert scrubber is not None
logger.warning("Print %s", "&sig=signature")
# Raise exception for test.
raise DummyException("Raise exception for test.")
except DummyException:
pass
# Make sure log content is as expected.
with open(log_path, "r") as f:
log_content = f.read()
assert f"Print &sig={CredentialScrubber.PLACE_HOLDER}" in log_content
# Make sure context variables are cleaned up.
assert log_handler._context_var.get() is None
def test_empty_file_path(self, logger, stream_handler):
logger.addHandler(stream_handler)
logger.addHandler(FileHandlerConcurrentWrapper())
with LogContext("", input_logger=logger):
logger.info("test log")
assert stream_handler.stream.getvalue() == "test log\n"
def test_update_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
log_path = str(folder_path / "log_without_input_logger.log")
update_log_path(log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
log = load_content(log_path)
keywords = ["test update log", "test update input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in log for keyword in keywords)
def test_update_single_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
bulk_log_path = str(folder_path / "update_bulk_log.log")
update_single_log_path(bulk_log_path, bulk_logger)
input_log_path = str(folder_path / "update_input_log.log")
update_single_log_path(input_log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
bulk_log = load_content(bulk_log_path)
input_log = load_content(input_log_path)
bulk_keywords = ["test update log", "execution.bulk", "INFO"]
input_keywords = ["test update input log", "input_logger", "WARNING"]
assert all(keyword in bulk_log for keyword in bulk_keywords)
assert all(keyword not in bulk_log for keyword in input_keywords)
assert all(keyword in input_log for keyword in input_keywords)
assert all(keyword not in input_log for keyword in bulk_keywords)
def test_scrub_credentials(self):
log_content = "sig=signature&key=accountkey"
folder_path = Path(mkdtemp())
logs_path = str(folder_path / "logs.log")
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
with LogContext(logs_path):
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_feature_utils.py | import pytest
from promptflow._utils.feature_utils import Feature, get_feature_list
@pytest.mark.unittest
def test_get_feature_list():
feature_list = get_feature_list()
assert isinstance(feature_list, list)
assert all(isinstance(feature, Feature) for feature in feature_list)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_utils.py | import pytest
import os
from unittest.mock import patch
from datetime import datetime
from promptflow._utils.utils import is_json_serializable, get_int_env_var, log_progress
class MyObj:
pass
@pytest.mark.unittest
class TestUtils:
@pytest.mark.parametrize("value, expected_res", [(None, True), (1, True), ("", True), (MyObj(), False)])
def test_is_json_serializable(self, value, expected_res):
assert is_json_serializable(value) == expected_res
@pytest.mark.parametrize(
"env_var, env_value, default_value, expected_result",
[
("TEST_VAR", "10", None, 10), # Valid integer string
("TEST_VAR", "invalid", None, None), # Invalid integer strings
("TEST_VAR", None, 5, 5), # Environment variable does not exist
("TEST_VAR", "10", 5, 10), # Valid integer string with a default value
("TEST_VAR", "invalid", 5, 5), # Invalid integer string with a default value
])
def test_get_int_env_var(self, env_var, env_value, default_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var, default_value) == expected_result
@pytest.mark.parametrize(
"env_var, env_value, expected_result",
[
("TEST_VAR", "10", 10), # Valid integer string
("TEST_VAR", "invalid", None), # Invalid integer strings
("TEST_VAR", None, None), # Environment variable does not exist
])
def test_get_int_env_var_without_default_vaue(self, env_var, env_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var) == expected_result
@patch('promptflow.executor._line_execution_process_pool.bulk_logger', autospec=True)
def test_log_progress(self, mock_logger):
run_start_time = datetime.utcnow()
count = 1
# Tests do not log when not specified at specified intervals (interval = 2)
total_count = 20
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_not_called()
# Test logging at specified intervals (interval = 2)
count = 8
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test logging using last_log_count parameter (conut - last_log_count > interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=5)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test don't log using last_log_count parameter ((conut - last_log_count < interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=7)
mock_logger.info.assert_not_called()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_exception_utils.py | import json
import re
from traceback import TracebackException
import pytest
from promptflow._core._errors import ToolExecutionError
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import (
ErrorResponse,
ExceptionPresenter,
JsonSerializedPromptflowException,
get_tb_next,
infer_error_code_from_class,
last_frame_info,
remove_suffix,
)
from promptflow.exceptions import (
ErrorTarget,
PromptflowException,
SystemErrorException,
UserErrorException,
ValidationException,
)
def set_inner_exception_by_parameter():
raise PromptflowException("test", error=ValueError("bad number"))
def set_inner_exception_by_raise_from():
raise PromptflowException("test") from ValueError("bad number")
def code_with_bug():
1 / 0
def raise_tool_execution_error():
try:
code_with_bug()
except Exception as e:
raise ToolExecutionError(node_name="MyTool") from e
def raise_exception_with_object():
raise PromptflowException(message_format="{inner_exception}", inner_exception=Exception("exception message"))
def raise_user_error():
try:
code_with_bug()
except Exception as e:
raise UserErrorException("run failed", target=ErrorTarget.TOOL) from e
def raise_context_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedContextException(e)
class CustomizedContextException(Exception):
def __init__(self, inner_exception):
self.inner_exception = inner_exception
@property
def message(self):
code_with_bug()
return "context exception"
class CustomizedException(Exception):
pass
class CustomUserError(UserErrorException):
pass
class CustomDefaultTargetError(UserErrorException):
def __init__(self, target=ErrorTarget.EXECUTOR, **kwargs):
super().__init__(target=target, **kwargs)
def raise_general_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedException("General exception") from e
def raise_promptflow_exception():
try:
code_with_bug()
except Exception as e:
raise PromptflowException("Promptflow exception") from e
def raise_promptflow_exception_without_inner_exception():
try:
code_with_bug()
except Exception:
raise PromptflowException("Promptflow exception")
TOOL_EXECUTION_ERROR_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
ZeroDivisionError: division by zero
"""
TOOL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_.*
raise_tool_execution_error\(\)
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
raise ToolExecutionError\(node_name="MyTool"\) from e
"""
TOOL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
GENERAL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_general_exception
raise_general_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_general_exception
raise CustomizedException\("General exception"\) from e
"""
GENERAL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_general_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
CONTEXT_EXCEPTION_TRACEBACK = r"""
During handling of the above exception, another exception occurred:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_context_exception
raise_context_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_context_exception
raise CustomizedContextException\(e\)
"""
CONTEXT_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_context_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
@pytest.mark.unittest
class TestExceptionUtilsCommonMethod:
def test_get_tb_next(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
tb_next = get_tb_next(e.value.__traceback__, 3)
te = TracebackException(type(e.value), e.value, tb_next)
formatted_tb = "".join(te.format())
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, formatted_tb)
def test_last_frame_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
frame_info = last_frame_info(e.value)
assert "test_exception_utils.py" in frame_info.get("filename")
assert frame_info.get("lineno") > 0
assert frame_info.get("name") == "raise_tool_execution_error"
assert last_frame_info(None) == {}
@pytest.mark.parametrize(
"error_class, expected_error_code",
[
(UserErrorException, "UserError"),
(SystemErrorException, "SystemError"),
(ValidationException, "ValidationError"),
(ToolExecutionError, "ToolExecutionError"),
(ValueError, "ValueError"),
],
)
def test_infer_error_code_from_class(self, error_class, expected_error_code):
assert infer_error_code_from_class(error_class) == expected_error_code
@pytest.mark.unittest
class TestExceptionPresenter:
def test_debug_info(self):
# Test ToolExecutionError
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_context_exception(self):
with pytest.raises(CustomizedContextException) as e:
raise_context_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedContextException"
assert re.match(CONTEXT_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(CONTEXT_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_general_exception(self):
# Test General Exception
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedException"
assert re.match(GENERAL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(GENERAL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_to_dict_for_general_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=True)
assert "debugInfo" in dct
dct.pop("debugInfo")
assert dct == {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": {
"code": "ZeroDivisionError",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception_without_inner_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception_without_inner_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": None,
}
def test_to_dict_for_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
assert re.search(TOOL_EXCEPTION_INNER_TRACEBACK, presenter.formatted_traceback)
assert re.search(TOOL_EXCEPTION_TRACEBACK, presenter.formatted_traceback)
dct = presenter.to_dict(include_debug_info=False)
assert dct.pop("additionalInfo") is not None
assert dct == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
@pytest.mark.parametrize(
"raise_exception_func, error_class, expected_error_codes",
[
(raise_general_exception, CustomizedException, ["SystemError", "CustomizedException"]),
(raise_tool_execution_error, ToolExecutionError, ["UserError", "ToolExecutionError"]),
(raise_promptflow_exception, PromptflowException, ["SystemError", "ZeroDivisionError"]),
(raise_promptflow_exception_without_inner_exception, PromptflowException, ["SystemError"]),
],
)
def test_error_codes(self, raise_exception_func, error_class, expected_error_codes):
with pytest.raises(error_class) as e:
raise_exception_func()
presenter = ExceptionPresenter.create(e.value)
assert presenter.error_codes == expected_error_codes
@pytest.mark.unittest
class TestErrorResponse:
def test_from_error_dict(self):
error_dict = {
"code": "UserError",
"message": "Flow run failed.",
}
response = ErrorResponse.from_error_dict(error_dict)
assert response.response_code == "400"
assert response.error_codes == ["UserError"]
assert response.message == "Flow run failed."
response_dct = response.to_dict()
assert response_dct["time"] is not None
response_dct.pop("time")
component_name = response_dct.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response_dct == {
"error": {
"code": "UserError",
"message": "Flow run failed.",
},
"correlation": None,
"environment": None,
"location": None,
}
def test_to_simplied_dict(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
error_response = ErrorResponse.from_exception(e.value)
assert error_response.to_simplified_dict() == {
"error": {
"code": "SystemError",
"message": "General exception",
}
}
def test_from_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
response = ErrorResponse.from_exception(e.value).to_dict()
assert response["time"] is not None
response.pop("time")
component_name = response.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response == {
"error": {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
},
"correlation": None,
"environment": None,
"location": None,
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"input_dict, expected",
[
({"code": "firstError"}, "firstError"),
({"code": "firstError", "innerError": {}}, "firstError"),
({"code": "firstError", "innerError": {"code": "secondError"}}, "firstError/secondError"),
({"code": None, "innerError": {"code": "secondError"}}, ""),
# Dict doesn't have code in outmost will return empty string.
({"error": {"code": "firstError", "innerError": {"code": "secondError"}}}, ""),
],
)
def test_error_code_hierarchy(self, input_dict, expected):
assert ErrorResponse.from_error_dict(input_dict).error_code_hierarchy == expected
@pytest.mark.parametrize(
"error_dict, expected_innermost_error_code",
[
(
{
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
},
"ToolExecutionError",
),
({"code": "UserError", "innerError": None}, "UserError"),
({"message": "UserError", "innerError": None}, None),
],
)
def test_innermost_error_code_with_code(self, error_dict, expected_innermost_error_code):
inner_error_code = ErrorResponse.from_error_dict(error_dict).innermost_error_code
assert inner_error_code == expected_innermost_error_code
@pytest.mark.parametrize(
"error_dict, expected_additional_info",
[
({"code": "UserError"}, {}),
(
{
"code": "UserError",
"additionalInfo": [
{
"type": "test_additional_info",
"info": "This is additional info for testing.",
},
"not_dict",
{
"type": "empty_info",
},
{
"info": "Empty type",
},
{
"test": "Invalid additional info",
},
],
},
{"test_additional_info": "This is additional info for testing."},
),
],
)
def test_additional_info(self, error_dict, expected_additional_info):
error_response = ErrorResponse.from_error_dict(error_dict)
assert error_response.additional_info == expected_additional_info
assert all(error_response.get_additional_info(key) == value for key, value in expected_additional_info.items())
@pytest.mark.parametrize(
"raise_exception_func, error_class",
[
(raise_general_exception, CustomizedException),
(raise_tool_execution_error, ToolExecutionError),
],
)
def test_get_user_execution_error_info(self, raise_exception_func, error_class):
with pytest.raises(error_class) as e:
raise_exception_func()
error_repsonse = ErrorResponse.from_exception(e.value)
actual_error_info = error_repsonse.get_user_execution_error_info()
self.assert_user_execution_error_info(e.value, actual_error_info)
def assert_user_execution_error_info(self, exception, error_info):
if isinstance(exception, ToolExecutionError):
assert error_info["type"] == "ZeroDivisionError"
assert error_info["message"] == "division by zero"
assert error_info["filename"].endswith("test_exception_utils.py")
assert error_info["lineno"] > 0
assert error_info["name"] == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
error_info["traceback"],
)
# assert re.match(TOOL_EXECUTION_ERROR_TRACEBACK, error_info["traceback"])
else:
assert error_info == {}
@pytest.mark.unittest
class TestExceptions:
@pytest.mark.parametrize(
"ex, expected_message, expected_message_format, expected_message_parameters",
[
(
CustomUserError("message"),
"message",
"",
{},
),
(
CustomUserError(message="message"),
"message",
"",
{},
),
(
CustomUserError("message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message="message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message_format="Hello world"),
"Hello world",
"Hello world",
{},
),
(
CustomUserError(message_format="Hello {name}", name="world"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", not_used="whatever"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", target=ErrorTarget.TOOL),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(
message_format="Tool '{tool_name}' execution failed due to {error}",
tool_name="my tool",
error="bug",
),
"Tool 'my tool' execution failed due to bug",
"Tool '{tool_name}' execution failed due to {error}",
{
"tool_name": "my tool",
"error": "bug",
},
),
],
)
def test_message_and_format(self, ex, expected_message, expected_message_format, expected_message_parameters):
with pytest.raises(CustomUserError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.message_format == expected_message_format
assert exc.value.message_parameters == expected_message_parameters
@pytest.mark.parametrize(
"ex, expected_message, exepcted_target",
[
(
CustomDefaultTargetError(message="message", target=ErrorTarget.TOOL),
"message",
ErrorTarget.TOOL,
),
(
CustomDefaultTargetError(message="message"),
"message",
ErrorTarget.EXECUTOR,
),
],
)
def test_target_and_message(self, ex, expected_message, exepcted_target):
with pytest.raises(CustomDefaultTargetError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.target == exepcted_target
def test_reference_code(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.reference_code == ErrorTarget.TOOL.value
module = "promptflow_vectordb.tool.faiss_index_loopup"
e.module = module
assert e.reference_code == f"{ErrorTarget.TOOL.value}/{module}"
@pytest.mark.parametrize(
"func_that_raises_exception",
[
set_inner_exception_by_parameter,
set_inner_exception_by_raise_from,
],
)
def test_inner_exception(self, func_that_raises_exception):
with pytest.raises(PromptflowException) as e:
func_that_raises_exception()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ValueError)
assert str(inner_exception) == "bad number"
assert str(e.value) == "test"
def test_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ZeroDivisionError)
assert str(inner_exception) == "division by zero"
assert e.value.message == "Execution failure in 'MyTool': (ZeroDivisionError) division by zero"
last_frame_info = e.value.tool_last_frame_info
assert "test_exception_utils.py" in last_frame_info.get("filename")
assert last_frame_info.get("lineno") > 0
assert last_frame_info.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
e.value.tool_traceback,
)
def test_code_hierarchy(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.error_codes == ["UserError", "ToolExecutionError"]
assert ExceptionPresenter.create(e).error_code_recursed == {
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_debug_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
presenter = ExceptionPresenter.create(e)
assert presenter.debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, presenter.debug_info["stackTrace"])
inner_exception = presenter.debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_additional_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
additional_info = ExceptionPresenter.create(e.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "ToolExecutionErrorDetails"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*test_exception_utils.py", info_0_value["filename"])
assert info_0_value.get("lineno") > 0
assert info_0_value.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = ToolExecutionError(node_name="Node1")
dct = ExceptionPresenter.create(ex).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
def test_additional_info_for_empty_case(self):
with pytest.raises(UserErrorException) as e:
raise_user_error()
dct = ExceptionPresenter.create(e.value).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_turning_on_or_off_debug_info(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
result = ExceptionPresenter.create(e).to_dict(include_debug_info=include_debug_info)
if include_debug_info:
assert "debugInfo" in result
else:
assert "debugInfo" not in result
def test_to_dict(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Wo do not check additonalInfo since it is already checked in other cases
result.pop("additionalInfo")
assert result == {
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_to_dict_object_parameter(self):
with pytest.raises(PromptflowException) as e:
raise_exception_with_object()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Assert message is str(exception)
assert result == {
"message": "exception message",
"messageFormat": "{inner_exception}",
"messageParameters": {"inner_exception": "exception message"},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": None,
}
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_for_JsonSerializedPromptflowException(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
exception_dict = ExceptionPresenter.create(e.value).to_dict(include_debug_info=True)
message = json.dumps(exception_dict)
exception = JsonSerializedPromptflowException(message=message)
assert str(exception) == message
json_serialized_exception_dict = ExceptionPresenter.create(exception).to_dict(
include_debug_info=include_debug_info
)
error_dict = exception.to_dict(include_debug_info=include_debug_info)
assert error_dict == json_serialized_exception_dict
if include_debug_info:
assert "debugInfo" in error_dict
error_dict.pop("debugInfo")
error_dict.pop("additionalInfo")
assert error_dict == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_remove_suffix(self):
assert remove_suffix('PackageToolNotFoundError.', '.') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', 'Error') == 'PackageToolNotFound'
assert remove_suffix('PackageToolNotFoundError', 'PackageToolNotFoundError') == ''
assert remove_suffix('PackageToolNotFoundError', 'NonExistedSuffix') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', '') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', None) == 'PackageToolNotFoundError'
assert remove_suffix('', 'NonExistedSuffix') == ''
assert remove_suffix(None, 'NonExistedSuffix') is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_dataclass_serializer.py | import pytest
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, List
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._utils.dataclass_serializer import \
get_type, serialize, deserialize_dataclass, deserialize_value, assertEqual
from promptflow.contracts.run_info import RunInfo, Status
from promptflow._core.connection_manager import ConnectionManager
from promptflow.storage.run_records import NodeRunRecord
from unittest.mock import patch, Mock
import sys
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<aoai-api-endpoint>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "<connection-endpoint>",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"type_input, expected",
[
(NodeRunRecord, NodeRunRecord),
([NodeRunRecord], List[NodeRunRecord]),
(dict(a=NodeRunRecord), Dict[str, NodeRunRecord]),
(int, int),
(str, str),
]
)
def test_get_type(type_input, expected):
assert get_type(type_input) == expected
@pytest.mark.unittest
def test_serialize_dataclass():
start_time = datetime(2023, 9, 4)
end_time = datetime(2023, 9, 4)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
serialized_info = serialize(node_run_info)
serialized_record = serialize(node_record)
# test dataclass without serialize attribute
assert serialized_info['status'] == "Completed"
assert serialized_info['start_time'] == "2023-09-04T00:00:00Z"
assert deserialize_value(serialized_info, RunInfo) == node_run_info
# test dataclass with serialize attribute
assert serialized_record == node_record.serialize()
@pytest.mark.unittest
@pytest.mark.parametrize(
"value, value_type, expected",
[
(datetime(2023, 9, 4), datetime, "2023-09-04T00:00:00Z"),
(Status.Completed, Status, "Completed"),
([1, 2, 3], List[int], [1, 2, 3]),
({"a": 1, "b": 2}, Dict[str, int], {"a": 1, "b": 2}),
(1, int, 1),
("a", str, "a"),
]
)
def test_serialize_value(value, value_type, expected):
assert serialize(value) == expected
assert deserialize_value(serialize(value), value_type) == value
@pytest.mark.unittest
def test_serialize_remove_null():
value = {"a": 1, "b": None}
value_type = Dict[str, int]
assert deserialize_value(serialize(value, remove_null=True), value_type) == {"a": 1, "b": None}
@dataclass
class DummyDataClass:
name: str
age: int
assert serialize(DummyDataClass("Dummy", None), remove_null=True) == {'name': 'Dummy'}
@pytest.mark.unittest
def test_serialize_connection():
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert serialize(connection_manager.get("azure_open_ai_connection")) == "azure_open_ai_connection"
@pytest.mark.unittest
def test_serialize_generator():
def generator():
for i in range(3):
yield i
g = GeneratorProxy(generator())
next(g)
assert serialize(g) == [0]
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': None})
def test_import_pydantic_error():
# mock pydantic is not installed
class DummyClass:
def __init__(self, name, age):
self.name = name
self.age = age
dummy = DummyClass('Test', 20)
assert serialize(dummy) == dummy
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': Mock()})
def test_import_pydantic():
# mock pydantic is installed
class MockBaseModel:
def dict(self):
return {"key": "value"}
mock_value = MockBaseModel()
sys.modules['pydantic'].BaseModel = MockBaseModel
assert serialize(mock_value) == mock_value.dict()
assert serialize(123) == 123
@pytest.mark.unittest
def test_deserialize_dataclass():
# test when cls is not dataclass
with pytest.raises(ValueError):
deserialize_dataclass(int, 1)
# test when data is not a dict
with pytest.raises(ValueError):
deserialize_dataclass(NodeRunRecord, "NodeRunRecord")
@dataclass
class DummyDataClassWithDefault:
name: str = "Default Name"
age: int = 0
# test deserialize dataclass with default value
data = {"age": 25}
obj = deserialize_dataclass(DummyDataClassWithDefault, data)
assert obj.name == "Default Name"
assert obj.age == 25
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, expected",
[
(1, 2, 1),
(Status.Completed, Status, Status.Completed),
(None, datetime, None),
("2022-01-01T00:00:00", datetime, datetime.fromisoformat("2022-01-01T00:00:00")),
]
)
def test_deserialize_value(a, b, expected):
assert deserialize_value(a, b) == expected
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, path, are_equal",
[
# Test with identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key2': 'value2'}, \
"unittests/_utils/test_dataclass_serializer", True),
# Test with non-identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key3': 'value3'}, \
"unittests/_utils/test_dataclass_serializer", False),
# Test with identical lists
(['item1', 'item2'], ['item1', 'item2'], "", True),
# Test with non-identical lists
(['item1', 'item2'], ['item1', 'item3'], "", False),
# Test with other types
(1, 1, "", True),
(1, 2, "", False),
('string', 'string', "", True),
('string1', 'string2', "", False),
]
)
def test_assertEqual(a, b, path, are_equal):
if are_equal:
assertEqual(a, b, path)
else:
with pytest.raises(AssertionError):
assertEqual(a, b, path)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_generate_tool_meta_utils.py | import os
import re
import sys
from multiprocessing import Pool
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._core.tool_meta_generator import (
JinjaParsingError,
MultipleToolsDefined,
NoToolDefined,
PythonLoadError,
PythonParsingError,
generate_prompt_meta,
generate_python_meta,
generate_tool_meta_dict_by_file,
)
from promptflow._utils.exception_utils import ExceptionPresenter
from ...utils import FLOW_ROOT, load_json
TEST_ROOT = Path(__file__).parent.parent.parent.parent
TOOLS_ROOT = TEST_ROOT / "test_configs/wrong_tools"
def cd_and_run(working_dir, source_path, tool_type):
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def cd_and_run_with_read_text_error(working_dir, source_path, tool_type):
def mock_read_text_error(self: Path, *args, **kwargs):
raise Exception("Mock read text error.")
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
with patch("promptflow._core.tool_meta_generator.Path.read_text", new=mock_read_text_error):
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def cd_and_run_with_bad_function_interface(working_dir, source_path, tool_type):
def mock_function_to_interface(*args, **kwargs):
raise Exception("Mock function to interface error.")
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
with patch("promptflow._core.tool_meta_generator.function_to_interface", new=mock_function_to_interface):
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func):
with Pool(1) as pool:
return pool.apply(func, (wd, tool_path, tool_type))
@pytest.mark.unittest
class TestToolMetaUtils:
@pytest.mark.parametrize(
"flow_dir, tool_path, tool_type",
[
("prompt_tools", "summarize_text_content_prompt.jinja2", "prompt"),
("prompt_tools", "summarize_text_content_prompt.jinja2", "llm"),
("script_with_import", "dummy_utils/main.py", "python"),
("script_with___file__", "script_with___file__.py", "python"),
("script_with_special_character", "script_with_special_character.py", "python"),
],
)
def test_generate_tool_meta_dict_by_file(self, flow_dir, tool_path, tool_type):
wd = str((FLOW_ROOT / flow_dir).resolve())
meta_dict = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, cd_and_run)
assert isinstance(meta_dict, dict), "Call cd_and_run failed:\n" + meta_dict
target_file = (Path(wd) / tool_path).with_suffix(".meta.json")
expected_dict = load_json(target_file)
if tool_type == "llm":
expected_dict["type"] = "llm" # We use prompt as default for jinja2
assert meta_dict == expected_dict
@pytest.mark.parametrize(
"flow_dir, tool_path, tool_type, func, msg_pattern",
[
pytest.param(
"prompt_tools",
"summarize_text_content_prompt.jinja2",
"python",
cd_and_run,
r"\(PythonLoaderNotFound\) Failed to load python file '.*summarize_text_content_prompt.jinja2'. "
r"Please make sure it is a valid .py file.",
id="PythonLoaderNotFound",
),
pytest.param(
"script_with_import",
"fail.py",
"python",
cd_and_run,
r"\(PythonLoadError\) Failed to load python module from file '.*fail.py': "
r"\(ModuleNotFoundError\) No module named 'aaa'",
id="PythonLoadError",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"python",
cd_and_run_with_bad_function_interface,
r"\(BadFunctionInterface\) Parse interface for tool 'divide_num' failed: "
r"\(Exception\) Mock function to interface error.",
id="BadFunctionInterface",
),
pytest.param(
"script_with_import",
"aaa.py",
"python",
cd_and_run,
r"\(MetaFileNotFound\) Generate tool meta failed for python tool. "
r"Meta file 'aaa.py' can not be found.",
id="MetaFileNotFound",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"python",
cd_and_run_with_read_text_error,
r"\(MetaFileReadError\) Generate tool meta failed for python tool. "
r"Read meta file 'divide_num.py' failed: \(Exception\) Mock read text error.",
id="MetaFileReadError",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"action",
cd_and_run,
r"\(NotSupported\) Generate tool meta failed. The type 'action' is currently unsupported. "
r"Please choose from available types: python,llm,prompt and try again.",
id="NotSupported",
),
],
)
def test_generate_tool_meta_dict_by_file_exception(self, flow_dir, tool_path, tool_type, func, msg_pattern):
wd = str((FLOW_ROOT / flow_dir).resolve())
ret = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func)
assert isinstance(ret, str), "Call cd_and_run should fail but succeeded:\n" + str(ret)
assert re.match(msg_pattern, ret)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"zzz",
PythonParsingError,
"Failed to load python module. Python parsing failed: (NameError) name 'zzz' is not defined",
id="PythonParsingError_NameError",
),
pytest.param(
"# Nothing",
NoToolDefined,
"No tool found in the python script. "
"Please make sure you have one and only one tool definition in your script.",
id="NoToolDefined",
),
pytest.param(
"multiple_tools.py",
MultipleToolsDefined,
"Expected 1 but collected 2 tools: tool1, tool2. "
"Please make sure you have one and only one tool definition in your script.",
id="MultipleToolsDefined",
),
pytest.param(
"{% zzz",
PythonParsingError,
"Failed to load python module. Python parsing failed: "
"(SyntaxError) invalid syntax (<string>, line 1)",
id="PythonParsingError_SyntaxError",
),
],
)
def test_custom_python_meta(self, content, error_code, message) -> None:
if content.endswith(".py"):
source = TOOLS_ROOT / content
with open(source, "r") as f:
code = f.read()
else:
code = content
source = None
with pytest.raises(error_code) as ex:
generate_python_meta("some_tool", code, source)
assert message == str(ex.value)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"{% zzz",
JinjaParsingError,
"Generate tool meta failed for llm tool. Jinja parsing failed at line 1: "
"(TemplateSyntaxError) Encountered unknown tag 'zzz'.",
id="JinjaParsingError_Code",
),
pytest.param(
"no_end.jinja2",
JinjaParsingError,
"Generate tool meta failed for llm tool. Jinja parsing failed at line 2: "
"(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: "
"'endfor' or 'else'. The innermost block that needs to be closed is 'for'.",
id="JinjaParsingError_File",
),
],
)
def test_custom_llm_meta(self, content, error_code, message) -> None:
if content.endswith(".jinja2"):
with open(TOOLS_ROOT / content, "r") as f:
code = f.read()
else:
code = content
with pytest.raises(error_code) as ex:
generate_prompt_meta("some_tool", code)
assert message == str(ex.value)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"{% zzz",
JinjaParsingError,
"Generate tool meta failed for prompt tool. Jinja parsing failed at line 1: "
"(TemplateSyntaxError) Encountered unknown tag 'zzz'.",
id="JinjaParsingError_Code",
),
pytest.param(
"no_end.jinja2",
JinjaParsingError,
"Generate tool meta failed for prompt tool. Jinja parsing failed at line 2: "
"(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: "
"'endfor' or 'else'. The innermost block that needs to be closed is 'for'.",
id="JinjaParsingError_File",
),
],
)
def test_custom_prompt_meta(self, content, error_code, message) -> None:
if content.endswith(".jinja2"):
with open(TOOLS_ROOT / content, "r") as f:
code = f.read()
else:
code = content
with pytest.raises(error_code) as ex:
generate_prompt_meta("some_tool", code, prompt_only=True)
assert message == str(ex.value)
@pytest.mark.unittest
class TestPythonLoadError:
def test_additional_info(self):
source = TOOLS_ROOT / "load_error.py"
with open(source, "r") as f:
code = f.read()
with pytest.raises(PythonLoadError) as ex:
generate_python_meta("some_tool", code, source)
additional_info = ExceptionPresenter.create(ex.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "UserCodeStackTrace"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*load_error.py", info_0_value["filename"])
assert info_0_value.get("lineno") == 3
assert info_0_value.get("name") == "<module>"
assert re.search(
r"Traceback \(most recent call last\):\n"
r' File ".*load_error.py", line .*, in <module>\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = PythonLoadError(message_format="Test empty error")
additional_info = ExceptionPresenter.create(ex).to_dict().get("additionalInfo")
assert additional_info is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_thread_utils.py | import re
import sys
import time
from io import StringIO
from logging import WARNING, Logger, StreamHandler
import pytest
from promptflow._utils.thread_utils import RepeatLogTimer
from promptflow._utils.utils import generate_elapsed_time_messages
class DummyException(Exception):
pass
@pytest.mark.skipif(sys.platform == "darwin", reason="Skip on Mac")
@pytest.mark.unittest
class TestRepeatLogTimer:
def test_context_manager(self):
s = StringIO()
logger = Logger("test_repeat_log_timer")
logger.addHandler(StreamHandler(s))
interval_seconds = 1
start_time = time.perf_counter()
with RepeatLogTimer(
interval_seconds=interval_seconds,
logger=logger,
level=WARNING,
log_message_function=generate_elapsed_time_messages,
args=("Test", start_time, interval_seconds, None),
):
time.sleep(10.5)
logs = s.getvalue().split("\n")
logs = [log for log in logs if log]
log_pattern = re.compile(
r"^Test has been running for [0-9]+ seconds, thread None cannot be found in sys._current_frames, "
r"maybe it has been terminated due to unexpected errors.$"
)
assert logs, "Logs are empty."
for log in logs:
assert re.match(log_pattern, log), f"The wrong log: {log}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py | import pytest
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow._utils.connection_utils import (
generate_custom_strong_type_connection_spec,
generate_custom_strong_type_connection_template,
)
from promptflow.contracts.types import Secret
class MyCustomConnectionWithNoComments(CustomStrongTypeConnection):
api_key: Secret
api_base: str
class MyCustomConnectionWithDefaultValue(CustomStrongTypeConnection):
api_key: Secret
api_base: str = "default value of api-base"
class MyCustomConnectionWithInvalidComments(CustomStrongTypeConnection):
"""My custom connection with invalid comments.
:param api_key: The api key.
:type api_key: String
:param api_base: The api base.
:type api_base: String
:param api_key_2: The api key 2.
:type api_key_2: String
"""
api_key: Secret
api_base: str
class MyCustomConnectionMissingTypeComments(CustomStrongTypeConnection):
"""My custom connection with missing type comments.
:param api_key: The api key.
"""
api_key: Secret
api_base: str
class MyCustomConnectionMissingParamComments(CustomStrongTypeConnection):
"""My custom connection with missing param comments.
:type api_key: String
"""
api_key: Secret
api_base: str
@pytest.mark.unittest
class TestConnectionUtils:
@pytest.mark.parametrize(
"cls, expected_str_in_template",
[
(
MyCustomConnectionWithNoComments,
['api_base: "to_replace_with_api_base"\n', 'api_key: "to_replace_with_api_key"\n'],
),
(
MyCustomConnectionWithInvalidComments,
[
'api_base: "to_replace_with_api_base" # String type. The api base.\n',
'api_key: "to_replace_with_api_key" # String type. The api key.\n',
],
),
(MyCustomConnectionMissingTypeComments, ['api_key: "to_replace_with_api_key" # The api key.']),
(MyCustomConnectionMissingParamComments, ['api_key: "to_replace_with_api_key" # String type.']),
],
)
def test_generate_custom_strong_type_connection_template_with_comments(self, cls, expected_str_in_template):
package = "test-package"
package_version = "0.0.1"
spec = generate_custom_strong_type_connection_spec(cls, package, package_version)
template = generate_custom_strong_type_connection_template(cls, spec, package, package_version)
for comment in expected_str_in_template:
assert comment in template
def test_generate_custom_strong_type_connection_template_with_default_value(self):
package = "test-package"
package_version = "0.0.1"
spec = generate_custom_strong_type_connection_spec(MyCustomConnectionWithDefaultValue, package, package_version)
template = generate_custom_strong_type_connection_template(
MyCustomConnectionWithDefaultValue, spec, package, package_version
)
assert 'api_base: "default value of api-base"' in template
@pytest.mark.parametrize(
"input_value, expected_connection_names",
[
pytest.param(
"new_ai_connection",
["new_ai_connection"],
id="standard",
),
pytest.param(
"${node.output}",
[],
id="output_reference",
),
pytest.param(
"${inputs.question}",
[],
id="input_reference",
),
],
)
def test_get_used_connection_names_from_flow_meta(self, input_value: str, expected_connection_names: list):
from promptflow._sdk._submitter.utils import SubmitterHelper
connection_names = SubmitterHelper.get_used_connection_names(
{
"package": {
"(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat": {
"name": "Promptflow.Tools.BuiltInTools.AOAI.Chat",
"type": "csharp",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"prompt": {"type": ["string"]},
"deployment_name": {"type": ["string"]},
"objects": {"type": ["object"]},
},
"description": "",
"class_name": "AOAI",
"module": "Promptflow.Tools.BuiltInTools.AOAI",
"function": "Chat",
"is_builtin": True,
"package": "Promptflow.Tools",
"package_version": "0.0.14.0",
"toolId": "(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat",
},
},
"code": {},
},
{
"nodes": [
{
"name": "get_summarized_text_content",
"type": "csharp",
"source": {
"type": "package",
"tool": "(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat",
},
"inputs": {
"connection": input_value,
},
},
]
},
)
assert connection_names == expected_connection_names
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_credential_scrubber.py | import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
def mock_connection_string():
connection_str_before_key = "DefaultEndpointsProtocol=https;AccountName=accountName;"
connection_str_after_key = "EndpointSuffix=core.windows.net"
return (
f"{connection_str_before_key}AccountKey=accountKey;{connection_str_after_key}",
f"{connection_str_before_key}AccountKey={CredentialScrubber.PLACE_HOLDER};{connection_str_after_key}",
)
def mock_sas_uri():
uri_without_signature = "https://bloburi/containerName/file.txt?sv=2021-10-04&se=2023-05-17&sr=b&sp=rw"
return (f"{uri_without_signature}&sig=signature", f"{uri_without_signature}&sig={CredentialScrubber.PLACE_HOLDER}")
@pytest.mark.unittest
class TestCredentialScrubber:
def test_scrub_sigature_in_sasuri(self):
input_str, ground_truth = mock_sas_uri()
assert CredentialScrubber().scrub(input_str) == ground_truth
def test_scrub_key_in_connection_string(self):
input_str, ground_truth = mock_connection_string()
output = CredentialScrubber().scrub(input_str)
assert output == ground_truth
def test_add_regex(self):
scrubber = CredentialScrubber()
scrubber.add_regex(r"(?<=credential=)[^\s;&]+")
assert scrubber.scrub("test&credential=credential") == f"test&credential={CredentialScrubber.PLACE_HOLDER}"
def test_add_str(self):
scrubber = CredentialScrubber()
scrubber.add_str(None)
assert len(scrubber.custom_str_set) == 0
scrubber.add_str("credential")
assert len(scrubber.custom_str_set) == 1
assert scrubber.scrub("test&secret=credential") == f"test&secret={CredentialScrubber.PLACE_HOLDER}"
def test_add_str_length_threshold(self):
"""If the secret is too short (length <= 2 chars), it will not be scrubbed."""
scrubber = CredentialScrubber()
scrubber.add_str("yy")
assert scrubber.scrub("test&secret=yy") == "test&secret=yy"
def test_normal_str_not_affected(self):
assert CredentialScrubber().scrub("no secret") == "no secret"
def test_clear(self):
scrubber = CredentialScrubber()
scrubber.add_str("credential")
scrubber.add_regex(r"(?<=credential=)[^\s;&]+")
assert len(scrubber.custom_str_set) == 1
assert len(scrubber.custom_regex_set) == 1
scrubber.clear()
assert len(scrubber.custom_str_set) == 0
assert len(scrubber.custom_regex_set) == 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_utils.py | import re
from pathlib import Path
from unittest.mock import MagicMock, mock_open, patch
import pytest
from promptflow._utils._errors import InvalidImageInput, LoadMultimediaDataError
from promptflow._utils.multimedia_utils import (
_create_image_from_base64,
_create_image_from_file,
_create_image_from_url,
_process_multimedia_dict_recursively,
_process_recursively,
convert_multimedia_data_to_base64,
create_image,
load_multimedia_data,
persist_multimedia_data,
resolve_multimedia_data_recursively,
)
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.multimedia import Image
from promptflow.contracts.tool import ValueType
from ...utils import DATA_ROOT
TEST_IMAGE_PATH = DATA_ROOT / "logo.jpg"
@pytest.mark.unittest
class TestMultimediaUtils:
@pytest.mark.parametrize("image_path", ["logo.jpg", "logo.png", "logo.webp", "logo.gif"])
def test_create_image_from_base64(self, image_path):
image = _create_image_from_file(DATA_ROOT / image_path)
base64_str = image.to_base64()
image_from_base64 = _create_image_from_base64(base64_str)
assert str(image) == str(image_from_base64)
format = image_path.split(".")[-1]
mime_type = f"image/{format}" if format != "jpg" else "image/jpeg"
assert mime_type == image_from_base64._mime_type
@patch("requests.get")
def test_create_image_from_url_with_mime_type(self, mock_get):
url = "https://example.com/image.jpg"
content = b"image content"
mime_type = "image/jpeg"
mock_get.return_value = MagicMock(status_code=200, content=content)
image = _create_image_from_url(url, mime_type)
assert isinstance(image, Image)
assert image._mime_type == mime_type
assert image.source_url == url
@patch("requests.get")
def test_create_image_from_url_failure(self, mock_get):
url = "https://example.com/image.jpg"
message = "Failed to fetch image"
code = 404
mock_get.return_value = MagicMock(status_code=code, text=message)
with pytest.raises(InvalidImageInput) as ex:
_create_image_from_url(url)
expected_message = f"Failed to fetch image from URL: {url}. Error code: {code}. Error message: {message}."
assert str(ex.value) == expected_message
def test_create_image_with_dict(self, mocker):
## From path
image_dict = {"data:image/jpg;path": TEST_IMAGE_PATH}
image_from_path = create_image(image_dict)
assert image_from_path._mime_type == "image/jpg"
## From base64
image_dict = {"data:image/jpg;base64": image_from_path.to_base64()}
image_from_base64 = create_image(image_dict)
assert str(image_from_path) == str(image_from_base64)
assert image_from_base64._mime_type == "image/jpg"
## From url
mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200))
image_dict = {"data:image/jpg;url": ""}
image_from_url = create_image(image_dict)
assert str(image_from_path) == str(image_from_url)
assert image_from_url._mime_type == "image/jpg"
mocker.patch("requests.get", return_value=mocker.Mock(content=None, status_code=404))
with pytest.raises(InvalidImageInput) as ex:
create_image(image_dict)
assert "Failed to fetch image from URL" in ex.value.message_format
def test_create_image_with_string(self, mocker):
## From path
image_from_path = create_image(str(TEST_IMAGE_PATH))
assert image_from_path._mime_type == "image/jpeg"
# From base64
image_from_base64 = create_image(image_from_path.to_base64())
assert str(image_from_path) == str(image_from_base64)
assert image_from_base64._mime_type == "image/jpeg"
## From url
mocker.patch("promptflow._utils.multimedia_utils._is_url", return_value=True)
mocker.patch("promptflow._utils.multimedia_utils._is_base64", return_value=False)
mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200))
image_from_url = create_image("Test")
assert str(image_from_path) == str(image_from_url)
assert image_from_url._mime_type == "image/jpeg"
## From image
image_from_image = create_image(image_from_path)
assert str(image_from_path) == str(image_from_image)
def test_create_image_with_invalid_cases(self):
# Test invalid input type
with pytest.raises(InvalidImageInput) as ex:
create_image(0)
assert "Unsupported image input type" in ex.value.message_format
# Test invalid image dict
with pytest.raises(InvalidImageInput) as ex:
invalid_image_dict = {"invalid_image": "invalid_image"}
create_image(invalid_image_dict)
assert "Invalid image input format" in ex.value.message_format
# Test none or empty input value
with pytest.raises(InvalidImageInput) as ex:
create_image(None)
assert "Unsupported image input type" in ex.value.message_format
with pytest.raises(InvalidImageInput) as ex:
create_image("")
assert "The image input should not be empty." in ex.value.message_format
def test_persist_multimedia_date(self, mocker):
image = _create_image_from_file(TEST_IMAGE_PATH)
mocker.patch("builtins.open", mock_open())
data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"}
persisted_data = persist_multimedia_data(data, base_dir=Path(__file__).parent)
file_name = re.compile(r"^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}.jpeg$")
assert re.match(file_name, persisted_data["image"]["data:image/jpeg;path"])
assert re.match(file_name, persisted_data["images"][0]["data:image/jpeg;path"])
assert re.match(file_name, persisted_data["images"][1]["data:image/jpeg;path"])
def test_convert_multimedia_date_to_base64(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"}
base64_data = convert_multimedia_data_to_base64(data)
assert base64_data == {
"image": image.to_base64(),
"images": [image.to_base64(), image.to_base64(), "other_data"],
"other_data": "other_data",
}
base64_data = convert_multimedia_data_to_base64(data, with_type=True)
prefix = f"data:{image._mime_type};base64,"
assert base64_data == {
"image": prefix + image.to_base64(),
"images": [prefix + image.to_base64(), prefix + image.to_base64(), "other_data"],
"other_data": "other_data",
}
def test_load_multimedia_data(self):
# Case 1: Test normal node
inputs = {
"image": FlowInputDefinition(type=ValueType.IMAGE),
"images": FlowInputDefinition(type=ValueType.LIST),
"object": FlowInputDefinition(type=ValueType.OBJECT),
}
image_dict = {"data:image/jpg;path": str(TEST_IMAGE_PATH)}
line_inputs = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
updated_inputs = load_multimedia_data(inputs, line_inputs)
image = _create_image_from_file(TEST_IMAGE_PATH)
assert updated_inputs == {
"image": image,
"images": [image, image],
"object": {"image": image, "other_data": "other_data"},
}
# Case 2: Test aggregation node
line_inputs = {
"image": [image_dict, image_dict],
"images": [[image_dict, image_dict], [image_dict]],
"object": [{"image": image_dict, "other_data": "other_data"}, {"other_data": "other_data"}],
}
updated_inputs = load_multimedia_data(inputs, line_inputs)
assert updated_inputs == {
"image": [image, image],
"images": [[image, image], [image]],
"object": [{"image": image, "other_data": "other_data"}, {"other_data": "other_data"}],
}
# Case 3: Test invalid input type
with pytest.raises(LoadMultimediaDataError) as ex:
line_inputs = {"image": 0}
load_multimedia_data(inputs, line_inputs)
assert (
"Failed to load image for input 'image': "
"(InvalidImageInput) Unsupported image input type") in ex.value.message
def test_resolve_multimedia_data_recursively(self):
image_dict = {"data:image/jpg;path": "logo.jpg"}
value = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
input_dir = TEST_IMAGE_PATH
updated_value = resolve_multimedia_data_recursively(input_dir, value)
updated_image_dict = {"data:image/jpg;path": str(DATA_ROOT / "logo.jpg")}
assert updated_value == {
"image": updated_image_dict,
"images": [updated_image_dict, updated_image_dict],
"object": {"image": updated_image_dict, "other_data": "other_data"},
}
def test_process_recursively(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
value = {"image": image, "images": [image, image], "object": {"image": image, "other_data": "other_data"}}
process_funcs = {Image: lambda x: str(x)}
updated_value = _process_recursively(value, process_funcs)
image_str = str(image)
assert updated_value == {
"image": image_str,
"images": [image_str, image_str],
"object": {"image": image_str, "other_data": "other_data"},
}
assert value != updated_value
def test_process_recursively_inplace(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
value = {"image": image, "images": [image, image], "object": {"image": image, "other_data": "other_data"}}
process_funcs = {Image: lambda x: str(x)}
_process_recursively(value, process_funcs, inplace=True)
image_str = str(image)
assert value == {
"image": image_str,
"images": [image_str, image_str],
"object": {"image": image_str, "other_data": "other_data"},
}
def test_process_multimedia_dict_recursively(self):
def process_func(image_dict):
return "image_placeholder"
image_dict = {"data:image/jpg;path": "logo.jpg"}
value = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
updated_value = _process_multimedia_dict_recursively(value, process_func)
assert updated_value == {
"image": "image_placeholder",
"images": ["image_placeholder", "image_placeholder"],
"object": {"image": "image_placeholder", "other_data": "other_data"},
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_data_converter.py | from pathlib import Path
from unittest.mock import Mock
import pytest
from promptflow._utils.multimedia_data_converter import (
AbstractMultimediaInfoConverter,
MultimediaConverter,
MultimediaFormatAdapter20231201,
MultimediaInfo,
ResourceType,
)
@pytest.mark.unittest
class TestMultimediaConverter:
def test_convert_content_recursively(self):
converter = MultimediaConverter(Path("flow.yaml"))
# Don't convert anything.
content = {
"image": {"data:image/jpg;url": "https://example.com/logo.jpg"},
"images": [
{"data:image/jpg;url": "https://example.com/logo.jpg"},
{"data:image/jpg;base64": "base64 string"},
],
"object": {"image": {"data:image/png;path": "random_path"}, "other_data": "other_data"},
}
mock_converter = Mock(spec=AbstractMultimediaInfoConverter)
mock_converter.convert.side_effect = lambda x: x
result = converter.convert_content_recursively(content, mock_converter)
assert result == content
# Convert all valid images.
mock_converter.convert.side_effect = lambda x: MultimediaInfo("image/jpg", ResourceType("path"), "logo.jpg")
result = converter.convert_content_recursively(content, mock_converter)
expected_result = {
"image": {"data:image/jpg;path": "logo.jpg"},
"images": [
{"data:image/jpg;path": "logo.jpg"},
{"data:image/jpg;path": "logo.jpg"},
],
"object": {"image": {"data:image/jpg;path": "logo.jpg"}, "other_data": "other_data"},
}
assert result == expected_result
@pytest.mark.unittest
class TestMultimediaFormatAdapter20231201:
def test_is_valid_format(self):
adapter = MultimediaFormatAdapter20231201()
assert adapter.is_valid_format({"data:image/jpg;path": "logo.jpg"})
assert adapter.is_valid_format({"data:image/jpg;url": "https://example.com/logo.jpg"})
assert not adapter.is_valid_format({"data:audio/mp3;path": "audio.mp3"})
assert not adapter.is_valid_format({"data:video/mp4;url": "https://example.com/video.mp4"})
def test_extract_info(self):
adapter = MultimediaFormatAdapter20231201()
# Valid formats
expected_result = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
assert adapter.extract_info({"data:image/jpg;path": "random_path"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
assert adapter.extract_info({"data:image/jpg;url": "random_url"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.BASE64, "random_base64")
assert adapter.extract_info({"data:image/jpg;base64": "random_base64"}) == expected_result
# Invalid format
assert adapter.extract_info({"data:video/mp4;url": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"data:image/mp4;url2": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"content:image/mp4;path": "random_path"}) is None
def test_create_data(self):
adapter = MultimediaFormatAdapter20231201()
info = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
expected_result = {"data:image/jpg;path": "random_path"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
expected_result = {"data:image/jpg;url": "random_url"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.BASE64, "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
assert adapter.create_data(info) == expected_result
# Bad case when client provides invalid resource type.
info = MultimediaInfo("image/jpg", "path", "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
with pytest.raises(AttributeError):
adapter.create_data(info)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_connection_manager.py | import pytest
from promptflow._core.connection_manager import ConnectionManager
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.tool import ConnectionType
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<api-base>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "https://api.bing.microsoft.com/v7.0/search",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
class TestConnectionManager:
def test_build_connections(self):
new_connection = get_connection_dict()
# Add not exist key
new_connection["azure_open_ai_connection"]["value"]["not_exist"] = "test"
connection_manager = ConnectionManager(new_connection)
assert len(connection_manager._connections) == 2
assert isinstance(connection_manager.get("azure_open_ai_connection"), AzureOpenAIConnection)
assert connection_manager.to_connections_dict() == new_connection
def test_serialize(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert (
ConnectionType.serialize_conn(connection_manager.get("azure_open_ai_connection"))
== "azure_open_ai_connection"
)
assert ConnectionType.serialize_conn(connection_manager.get("custom_connection")) == "custom_connection"
def test_get_secret_list(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
expected_list = ["<azure-openai-key>", "<your-key>"]
assert set(connection_manager.get_secret_list()) == set(expected_list)
def test_is_secret(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
connection = connection_manager.get("custom_connection")
assert connection.is_secret("api_key") is True
assert connection.is_secret("url") is False
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_run_tracker.py | import pytest
from promptflow._core._errors import RunRecordNotFound
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.run_tracker import RunTracker
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.run_info import Status
class UnserializableClass:
def __init__(self, data: str):
self.data = data
@pytest.mark.unittest
class TestRunTracker:
def test_run_tracker(self):
# TODO: Refactor this test case, it's very confusing now.
# Initialize run tracker with dummy run storage
run_tracker = RunTracker.init_dummy()
# Start flow run
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id")
assert len(run_tracker._flow_runs) == 1
assert run_tracker._current_run_id == "test_flow_run_id"
flow_input = {"flow_input": "input_0"}
run_tracker.set_inputs("test_flow_run_id", flow_input)
# Start node runs
run_info = run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id", "run_id_0", index=0)
run_info.index = 0
run_info = run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id", "run_id_1", index=1)
run_info.index = 1
run_tracker.start_node_run("node_aggr", "test_root_run_id", "test_flow_run_id", "run_id_aggr", index=None)
assert len(run_tracker._node_runs) == 3
assert run_tracker._current_run_id == "run_id_aggr"
# Test collect_all_run_infos_as_dicts
run_tracker.allow_generator_types = True
run_tracker.set_inputs(
"run_id_0",
{"input": "input_0", "connection": AzureOpenAIConnection("api_key", "api_base")}
)
run_tracker.set_inputs(
"run_id_1",
{"input": "input_1", "generator": GeneratorProxy(item for item in range(10))}
)
run_infos = run_tracker.collect_all_run_infos_as_dicts()
assert len(run_infos["flow_runs"]) == 1
assert len(run_infos["node_runs"]) == 3
assert run_infos["node_runs"][0]["inputs"] == {"input": "input_0", "connection": "AzureOpenAIConnection"}
assert run_infos["node_runs"][1]["inputs"] == {"input": "input_1", "generator": []}
# Test end run with normal result
result = {"result": "result"}
run_info_0 = run_tracker.end_run(run_id="run_id_0", result=result)
assert run_info_0.status == Status.Completed
assert run_info_0.output == result
# Test end run with unserializable result
result = {"unserialized_value": UnserializableClass("test")}
run_info_1 = run_tracker.end_run(run_id="run_id_1", result=result)
assert run_info_1.status == Status.Completed
assert run_info_1.output == str(result)
# Test end run with invalid run id
with pytest.raises(RunRecordNotFound):
run_tracker.end_run(run_id="invalid_run_id")
# Test end run with exception
ex = Exception("Failed")
run_info_aggr = run_tracker.end_run(run_id="run_id_aggr", ex=ex)
assert run_info_aggr.status == Status.Failed
assert run_info_aggr.error["message"] == "Failed"
# Test end flow run with unserializable result
result = {"unserialized_value": UnserializableClass("test")}
run_info_flow = run_tracker.end_run(run_id="test_flow_run_id", result=result)
assert run_info_flow.status == Status.Failed
assert "The output 'unserialized_value' for flow is incorrect." in run_info_flow.error["message"]
# Test _update_flow_run_info_with_node_runs
run_info_0.api_calls, run_info_0.system_metrics = [{"name": "caht"}], {"total_tokens": 10}
run_info_1.api_calls, run_info_1.system_metrics = [{"name": "completion"}], {"total_tokens": 20}
run_info_aggr.api_calls, run_info_aggr.system_metrics = [
{"name": "caht"}, {"name": "completion"}], {"total_tokens": 30}
run_tracker._update_flow_run_info_with_node_runs(run_info_flow)
assert len(run_info_flow.api_calls) == 1, "There should be only one top level api call for flow run."
assert run_info_flow.system_metrics["total_tokens"] == 60
assert run_info_flow.api_calls[0]["name"] == "flow"
assert run_info_flow.api_calls[0]["node_name"] == "flow"
assert run_info_flow.api_calls[0]["type"] == "Flow"
assert run_info_flow.api_calls[0]["system_metrics"]["total_tokens"] == 60
assert isinstance(run_info_flow.api_calls[0]["start_time"], float)
assert isinstance(run_info_flow.api_calls[0]["end_time"], float)
assert len(run_info_flow.api_calls[0]["children"]) == 4, "There should be 4 children under root."
# Test get_status_summary
status_summary = run_tracker.get_status_summary("test_root_run_id")
assert status_summary == {
"__pf__.lines.completed": 0,
"__pf__.lines.failed": 1,
"__pf__.nodes.node_0.completed": 2,
"__pf__.nodes.node_aggr.completed": 0,
}
def test_run_tracker_flow_run_without_node_run(self):
"""When line timeout, there will be flow run info without node run info."""
# Initialize run tracker with dummy run storage
run_tracker = RunTracker.init_dummy()
# Start flow run
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id_0", index=0)
run_tracker.end_run("test_flow_run_id_0", ex=Exception("Timeout"))
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id_1", index=1)
run_tracker.end_run("test_flow_run_id_1", result={"result": "result"})
assert len(run_tracker._flow_runs) == 2
# Start node runs
run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id_2", "test_node_run_id_1", index=0)
run_tracker.end_run("test_node_run_id_1", result={"result": "result"})
assert len(run_tracker._node_runs) == 1
status_summary = run_tracker.get_status_summary("test_root_run_id")
assert status_summary == {
"__pf__.lines.completed": 1,
"__pf__.lines.failed": 1,
"__pf__.nodes.node_0.completed": 1,
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py | import textwrap
from pathlib import Path
from unittest.mock import patch
import pytest
from mock import MagicMock
from promptflow import tool
from promptflow._core._errors import InputTypeMismatch, InvalidSource, PackageToolNotFoundError
from promptflow._core.tools_manager import (
BuiltinsManager,
ToolLoader,
collect_package_tools,
collect_package_tools_and_connections,
)
from promptflow._utils.yaml_utils import load_yaml_string
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import Tool, ToolType
from promptflow.exceptions import UserErrorException
@pytest.mark.unittest
class TestToolLoader:
def test_load_tool_for_node_with_invalid_node(self):
tool_loader = ToolLoader(working_dir="test_working_dir")
node: Node = Node(name="test", tool="test_tool", inputs={}, type=ToolType.PYTHON)
with pytest.raises(UserErrorException, match="Node test does not have source defined."):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type=ToolType.PYTHON, source=ToolSource(type="invalid_type")
)
with pytest.raises(
NotImplementedError, match="Tool source type invalid_type for python tool is not supported yet."
):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type=ToolType.CUSTOM_LLM, source=ToolSource(type="invalid_type")
)
with pytest.raises(
NotImplementedError, match="Tool source type invalid_type for custom_llm tool is not supported yet."
):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type="invalid_type", source=ToolSource(type=ToolSourceType.Code)
)
with pytest.raises(NotImplementedError, match="Tool type invalid_type is not supported yet."):
tool_loader.load_tool_for_node(node)
def test_load_tool_for_package_node(self, mocker):
package_tools = {"test_tool": Tool(name="test_tool", type=ToolType.PYTHON, inputs={}).serialize()}
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tools)
tool_loader = ToolLoader(
working_dir="test_working_dir", package_tool_keys=["promptflow._core.tools_manager.collect_package_tools"]
)
node: Node = Node(
name="test",
tool="test_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="test_tool"),
)
tool = tool_loader.load_tool_for_node(node)
assert tool.name == "test_tool"
node: Node = Node(
name="test",
tool="test_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="invalid_tool"),
)
msg = (
"Package tool 'invalid_tool' is not found in the current environment. "
"All available package tools are: ['test_tool']."
)
with pytest.raises(PackageToolNotFoundError) as ex:
tool_loader.load_tool_for_node(node)
assert str(ex.value) == msg
def test_load_tool_for_package_node_with_legacy_tool_id(self, mocker):
package_tools = {
"new_tool_1": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]
).serialize(),
"new_tool_2": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_2"]
).serialize(),
"old_tool_2": Tool(name="old tool 2", type=ToolType.PYTHON, inputs={}).serialize(),
}
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tools)
tool_loader = ToolLoader(working_dir="test_working_dir", package_tool_keys=list(package_tools.keys()))
node_with_legacy_tool: Node = Node(
name="test_legacy_tool",
tool="old_tool_1",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="old_tool_1"),
)
assert tool_loader.load_tool_for_node(node_with_legacy_tool).name == "new tool 1"
node_with_legacy_tool_but_in_package_tools: Node = Node(
name="test_legacy_tool_but_in_package_tools",
tool="old_tool_2",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="old_tool_2"),
)
assert tool_loader.load_tool_for_node(node_with_legacy_tool_but_in_package_tools).name == "old tool 2"
def test_load_tool_for_script_node(self):
working_dir = Path(__file__).parent
tool_loader = ToolLoader(working_dir=working_dir)
file = "test_tools_manager.py"
node: Node = Node(
name="test",
tool="sample_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Code, path=file),
)
tool = tool_loader.load_tool_for_node(node)
assert tool.name == "sample_tool"
@pytest.mark.parametrize(
"source_path, error_message",
[
(None, "Load tool failed for node 'test'. The source path is 'None'."),
("invalid_file.py", "Load tool failed for node 'test'. Tool file 'invalid_file.py' can not be found."),
],
)
def test_load_tool_for_script_node_exception(self, source_path, error_message):
working_dir = Path(__file__).parent
tool_loader = ToolLoader(working_dir=working_dir)
node: Node = Node(
name="test",
tool="sample_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Code, path=source_path),
)
with pytest.raises(InvalidSource) as ex:
tool_loader.load_tool_for_script_node(node)
assert str(ex.value) == error_message
# This tool is for testing tools_manager.ToolLoader.load_tool_for_script_node
@tool
def sample_tool(input: str):
return input
@pytest.mark.unittest
class TestToolsManager:
def test_collect_package_tools_if_node_source_tool_is_legacy(self):
legacy_node_source_tools = ["content_safety_text.tools.content_safety_text_tool.analyze_text"]
package_tools = collect_package_tools(legacy_node_source_tools)
assert "promptflow.tools.azure_content_safety.analyze_text" in package_tools.keys()
def test_collect_package_tools_and_connections(self, install_custom_tool_pkg):
keys = ["my_tool_package.tools.my_tool_2.MyTool.my_tool"]
tools, specs, templates = collect_package_tools_and_connections(keys)
assert len(tools) == 1
assert specs == {
"my_tool_package.connections.MyFirstConnection": {
"connectionCategory": "CustomKeys",
"flowValueType": "CustomConnection",
"connectionType": "MyFirstConnection",
"ConnectionTypeDisplayName": "MyFirstConnection",
"configSpecs": [
{"name": "api_key", "displayName": "Api Key", "configValueType": "Secret", "isOptional": False},
{"name": "api_base", "displayName": "Api Base", "configValueType": "str", "isOptional": True},
],
"module": "my_tool_package.connections",
"package": "test-custom-tools",
"package_version": "0.0.2",
}
}
expected_template = {
"$schema": "https://azuremlschemas.azureedge.net/promptflow/latest/CustomStrongTypeConnection.schema.json",
"name": "to_replace_with_connection_name",
"type": "custom",
"custom_type": "MyFirstConnection",
"module": "my_tool_package.connections",
"package": "test-custom-tools",
"package_version": "0.0.2",
"configs": {"api_base": "This is my first connection."},
"secrets": {"api_key": "to_replace_with_api_key"},
}
loaded_yaml = load_yaml_string(templates["my_tool_package.connections.MyFirstConnection"])
assert loaded_yaml == expected_template
keys = ["my_tool_package.tools.my_tool_with_custom_strong_type_connection.my_tool"]
tools, specs, templates = collect_package_tools_and_connections(keys)
assert len(templates) == 1
expected_template = """
name: "to_replace_with_connection_name"
type: custom
custom_type: MyCustomConnection
module: my_tool_package.tools.my_tool_with_custom_strong_type_connection
package: test-custom-tools
package_version: 0.0.2
configs:
api_url: "This is a fake api url." # String type. The api url.
secrets: # must-have
api_key: "to_replace_with_api_key" # String type. The api key.
"""
content = templates["my_tool_package.tools.my_tool_with_custom_strong_type_connection.MyCustomConnection"]
expected_template_str = textwrap.dedent(expected_template)
assert expected_template_str in content
def test_gen_dynamic_list(self, mocked_ws_triple, mock_module_with_list_func):
from promptflow._sdk._utils import _gen_dynamic_list
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_list_func"
func_kwargs = {"prefix": "My"}
result = _gen_dynamic_list({"func_path": func_path, "func_kwargs": func_kwargs})
assert len(result) == 2
# test gen_dynamic_list with ws_triple.
with patch("promptflow._cli._utils.get_workspace_triad_from_local", return_value=mocked_ws_triple):
result = _gen_dynamic_list({"func_path": func_path, "func_kwargs": func_kwargs})
assert len(result) == 2
@pytest.mark.unittest
class TestBuiltinsManager:
def test_load_tool_from_module(
self,
):
# Test case 1: When class_name is None
module = MagicMock()
tool_name = "test_tool"
module_name = "test_module"
class_name = None
method_name = "test_method"
node_inputs = {"input1": InputAssignment(value_type=InputValueType.LITERAL, value="value1")}
# Mock the behavior of the module and class
module.test_method = MagicMock()
# Call the method
api, init_inputs = BuiltinsManager._load_tool_from_module(
module, tool_name, module_name, class_name, method_name, node_inputs
)
# Assertions
assert api == module.test_method
assert init_inputs == {}
# Non literal input for init parameter will raise exception.
module = MagicMock()
tool_name = "test_tool"
module_name = "test_module"
class_name = "TestClass"
method_name = "test_method"
node_inputs = {"input1": InputAssignment(value_type=InputValueType.FLOW_INPUT, value="value1")}
# Mock the behavior of the module and class
module.TestClass = MagicMock()
module.TestClass.get_initialize_inputs = MagicMock(return_value=["input1"])
module.TestClass.get_required_initialize_inputs = MagicMock(return_value=["input1"])
module.TestClass.test_method = MagicMock()
# Call the method
with pytest.raises(InputTypeMismatch) as ex:
BuiltinsManager._load_tool_from_module(module, tool_name, module_name, class_name, method_name, node_inputs)
expected_message = (
"Invalid input for 'test_tool': Initialization input 'input1' requires a literal value, "
"but ${flow.value1} was received."
)
assert expected_message == str(ex.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tracer.py | import inspect
import pytest
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.tracer import Tracer, _create_trace_from_function_call, _traced, trace
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.trace import Trace, TraceType
def generator():
for i in range(3):
yield i
@pytest.mark.unittest
class TestTracer:
def test_end_tracing(self):
# Activate the tracer in the current context
tracer = Tracer("test_run_id")
tracer._activate_in_context()
# Assert that there is an active tracer instance
assert Tracer.active_instance() is tracer
# End tracing and get the traces as a JSON string
traces = Tracer.end_tracing()
# Assert that the traces is a list
assert isinstance(traces, list)
# Assert that there is no active tracer instance after ending tracing
assert Tracer.active_instance() is None
# Test the raise_ex argument of the end_tracing method
with pytest.raises(Exception):
# Try to end tracing again with raise_ex=True
Tracer.end_tracing(raise_ex=True)
# Try to end tracing again with raise_ex=False
traces = Tracer.end_tracing(raise_ex=False)
# Assert that the traces are empty
assert not traces
def test_start_tracing(self):
# Assert that there is no active tracer instance before starting tracing
assert Tracer.active_instance() is None
# Start tracing with a mock run_id
Tracer.start_tracing("test_run_id")
# Assert that there is an active tracer instance after starting tracing
assert Tracer.active_instance() is not None
# Assert that the active tracer instance has the correct run_id
assert Tracer.active_instance()._run_id == "test_run_id"
Tracer.end_tracing()
def test_push_pop(self, caplog):
# test the push method with a single trace
Tracer.start_tracing("test_run_id")
tracer = Tracer.active_instance()
trace1 = Trace("test1", inputs=[1, 2, 3], type=TraceType.TOOL)
trace2 = Trace("test2", inputs=[4, 5, 6], type=TraceType.TOOL)
Tracer.push(trace1)
assert tracer._traces == [trace1]
assert tracer._id_to_trace == {trace1.id: trace1}
# test the push method with a nested trace
Tracer.push(trace2)
assert tracer._traces == [trace1] # check if the tracer still has only the first trace in its _traces list
# check if the tracer has both traces in its trace dict
assert tracer._id_to_trace == {trace1.id: trace1, trace2.id: trace2}
assert trace1.children == [trace2] # check if the first trace has the second trace as its child
# test the pop method with generator output
tool_output = generator()
error1 = ValueError("something went wrong")
assert tracer._get_current_trace() is trace2
output = Tracer.pop(output=tool_output, error=error1)
# check output iterator
for i in range(3):
assert next(output) == i
assert isinstance(trace2.output, GeneratorProxy)
assert trace2.error == {
"message": str(error1),
"type": type(error1).__qualname__,
}
assert tracer._get_current_trace() is trace1
# test the pop method with no arguments
output = Tracer.pop()
assert tracer._get_current_trace() is None
assert trace1.output is None
assert output is None
Tracer.end_tracing()
# test the push method with no active tracer
Tracer.push(trace1)
# assert that the warning message is logged
assert "Try to push trace but no active tracer in current context." in caplog.text
def test_unserializable_obj_to_serializable(self):
# assert that the function returns a str object for unserializable objects
assert Tracer.to_serializable(generator) == str(generator)
@pytest.mark.parametrize("obj", [({"name": "Alice", "age": 25}), ([1, 2, 3]), (GeneratorProxy(generator())), (42)])
def test_to_serializable(self, obj):
assert Tracer.to_serializable(obj) == obj
def func_with_no_parameters():
pass
def func_with_args_and_kwargs(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
async def func_with_args_and_kwargs_async(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
def func_with_connection_parameter(a: int, conn: AzureOpenAIConnection):
_ = (a, conn)
class MyClass:
def my_method(self, a: int):
_ = a
@pytest.mark.unittest
class TestCreateTraceFromFunctionCall:
"""This class tests the `_create_trace_from_function_call` function."""
def test_basic_fields_are_filled_and_others_are_not(self):
trace = _create_trace_from_function_call(func_with_no_parameters)
# These fields should be filled in this method call.
assert trace.name == "func_with_no_parameters"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {}
# start_time should be a timestamp, which is a float value currently.
assert isinstance(trace.start_time, float)
# These should be left empty in this method call.
# They will be filled by the tracer later.
assert trace.output is None
assert trace.end_time is None
assert trace.children == []
assert trace.error is None
def test_basic_fields_are_filled_for_async_functions(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs_async, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.name == "func_with_args_and_kwargs_async"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_trace_name_should_contain_class_name_for_class_methods(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[obj, 1])
assert trace.name == "MyClass.my_method"
def test_trace_type_can_be_set_correctly(self):
trace = _create_trace_from_function_call(func_with_no_parameters, trace_type=TraceType.TOOL)
assert trace.type == TraceType.TOOL
def test_args_and_kwargs_are_filled_correctly(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_args_called_with_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1], kwargs={"arg2": 2, "kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg2": 4}
def test_kwargs_called_without_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1, 2, 3], kwargs={"kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_empty_args_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1])
assert trace.inputs == {"arg1": 1}
def test_empty_kwargs_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg1": 1})
assert trace.inputs == {"kwarg1": 1}
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg2": 2})
assert trace.inputs == {"kwarg2": 2}
def test_args_and_kwargs_should_be_filled_in_called_order(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg2": 4, "kwarg1": 3}
)
assert list(trace.inputs.keys()) == ["arg1", "arg2", "kwarg2", "kwarg1"]
def test_connections_should_be_serialized(self):
conn = AzureOpenAIConnection("test_name", "test_secret")
trace = _create_trace_from_function_call(func_with_connection_parameter, args=[1, conn])
assert trace.inputs == {"a": 1, "conn": "AzureOpenAIConnection"}
def test_self_arg_should_be_excluded_from_inputs(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[1])
assert trace.inputs == {"a": 1}
def sync_func(a: int):
return a
async def async_func(a: int):
return a
def sync_error_func(a: int):
a / 0
async def async_error_func(a: int):
a / 0
@pytest.mark.unittest
class TestTraced:
"""This class tests the `_traced` function."""
def test_traced_sync_func_should_be_a_sync_func(self):
assert inspect.iscoroutinefunction(_traced(sync_func)) is False
def test_traced_async_func_should_be_an_async_func(self):
assert inspect.iscoroutinefunction(_traced(async_func)) is True
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_should_have_same_name(self, func):
traced_func = _traced(func)
assert traced_func.__name__ == func.__name__
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_attributes_are_set(self, func):
traced_func = _traced(func)
assert getattr(traced_func, "__original_function") == func
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_not_generated_when_tracer_is_not_active(self, func):
# Do not call Tracer.start_tracing() here
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
# Check the generated trace is not generated
traces = Tracer.end_tracing()
assert len(traces) == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_generated_when_tracer_is_active(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_error_func, async_error_func])
async def test_trace_is_generated_when_errors_occurred(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
with pytest.raises(ZeroDivisionError):
if inspect.iscoroutinefunction(traced_func):
await traced_func(1)
else:
traced_func(1)
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] is None
assert trace["error"] == {"message": "division by zero", "type": "ZeroDivisionError"}
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_type_can_be_set_correctly(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func, trace_type=TraceType.TOOL)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
@trace
def my_tool(a: int):
return a
@trace
async def my_tool_async(a: int):
return a
@pytest.mark.unittest
class TestTrace:
"""This class tests `trace` function."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
my_tool,
my_tool_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_generator_proxy.py | import pytest
from promptflow._core.generator_proxy import GeneratorProxy, generate_from_proxy
def generator():
for i in range(3):
yield i
def iterator():
return iter([0, 1, 2])
@pytest.mark.unittest
def test_generator_proxy_next():
proxy = GeneratorProxy(generator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generator_proxy_iter():
original_generator = generator()
proxy = GeneratorProxy(generator())
for num in proxy:
assert num == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_proxy():
proxy = GeneratorProxy(generator())
original_generator = generator()
for i in generate_from_proxy(proxy):
assert i == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_next():
proxy = GeneratorProxy(iterator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_iter():
original_iterator = iterator()
proxy = GeneratorProxy(iterator())
for num in proxy:
assert num == next(original_iterator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_iterator_proxy():
proxy = GeneratorProxy(iterator())
original_iterator = iterator()
for i in generate_from_proxy(proxy):
assert i == next(original_iterator)
assert proxy.items == [0, 1, 2]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tool.py | import inspect
import pytest
from promptflow import tool
from promptflow._core.tool import InputSetting, ToolType
from promptflow._core.tracer import Tracer, TraceType
from promptflow.exceptions import UserErrorException
@tool
def decorated_without_parentheses(a: int):
return a
@tool()
def decorated_with_parentheses(a: int):
return a
@tool
async def decorated_without_parentheses_async(a: int):
return a
@tool()
async def decorated_with_parentheses_async(a: int):
return a
@tool(
name="tool_with_attributes",
description="Sample tool with a lot of attributes",
type=ToolType.LLM,
input_settings=InputSetting(),
streaming_option_parameter="stream",
extra_a="a",
extra_b="b",
)
def tool_with_attributes(stream: bool, a: int, b: int):
return stream, a, b
@pytest.mark.unittest
class TestTool:
"""This class tests the `tool` decorator."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
decorated_with_parentheses,
decorated_without_parentheses,
decorated_with_parentheses_async,
decorated_without_parentheses_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
def test_attributes_are_set_to_the_tool_function(self):
stream, a, b = tool_with_attributes(True, 1, 2)
# Check the results are as expected
assert stream is True
assert a == 1
assert b == 2
# Check the attributes are set to the function
assert getattr(tool_with_attributes, "__tool") is None
assert getattr(tool_with_attributes, "__name") == "tool_with_attributes"
assert getattr(tool_with_attributes, "__description") == "Sample tool with a lot of attributes"
assert getattr(tool_with_attributes, "__type") == ToolType.LLM
assert getattr(tool_with_attributes, "__input_settings") == InputSetting()
assert getattr(tool_with_attributes, "__extra_info") == {"extra_a": "a", "extra_b": "b"}
assert getattr(tool_with_attributes, "_streaming_option_parameter") == "stream"
def test_invalid_tool_type_should_raise_error(self):
with pytest.raises(UserErrorException, match="Tool type invalid_type is not supported yet."):
@tool(type="invalid_type")
def invalid_tool_type():
pass
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_metric_logger.py | import pytest
from promptflow._core.metric_logger import MetricLoggerManager, add_metric_logger, log_metric, remove_metric_logger
@pytest.mark.unittest
class TestMetricLogger:
def test_add_and_remove_metric_logger(self):
# define log metric function
metrics = {}
def _log_metric(key, value):
metrics[key] = value
def _log_metric_invalid(key, value, variant_id, extra_param):
metrics[key] = {variant_id: {value: extra_param}}
add_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger(_log_metric_invalid)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger("test")
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
remove_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == []
def test_log_metric(self):
# define log metric function
metrics = {}
def _log_metric(key, value):
metrics[key] = value
def _log_metric_with_variant_id(key, value, variant_id):
metrics[key] = {variant_id: value}
add_metric_logger(_log_metric)
log_metric("test1", 1)
assert metrics == {"test1": 1}
add_metric_logger(_log_metric_with_variant_id)
log_metric("test2", 1, "line_0")
assert metrics == {"test1": 1, "test2": {"line_0": 1}}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_log_manager.py | import logging
import sys
import time
from multiprocessing.pool import ThreadPool
import pytest
from dateutil.parser import parse
from promptflow._core.log_manager import NodeLogManager, NodeLogWriter
RUN_ID = "dummy_run_id"
NODE_NAME = "dummy_node"
LINE_NUMBER = 1
def assert_print_result(i: int, run_logger: NodeLogWriter):
run_id = f"{RUN_ID}-{i}"
run_logger.set_node_info(run_id, NODE_NAME, LINE_NUMBER)
time.sleep(i / 10)
print(i)
assert_datetime_prefix(run_logger.get_log(run_id), str(i) + "\n")
def is_datetime(string: str) -> bool:
"""Check if a string follows datetime format."""
try:
parse(string)
return True
except ValueError:
return False
def assert_datetime_prefix(string: str, expected_str: str):
"""Assert if string has a datetime prefix, such as:
[2023-04-17T07:49:54+0000] example string
"""
datetime_prefix = string[string.index("[") + 1 : string.index("]")]
inner_str = string[string.index("]") + 2 :]
assert is_datetime(datetime_prefix)
assert inner_str == expected_str
@pytest.mark.unittest
class TestNodeLogManager:
def test_get_logs(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
print("test stderr", file=sys.stderr)
assert lm.get_logs(RUN_ID).get("stdout") == "test\ntest2\n"
assert lm.get_logs(RUN_ID).get("stderr") == "test stderr\n"
lm.clear_node_context(RUN_ID)
assert lm.get_logs(RUN_ID).get("stdout") is None
assert lm.get_logs(RUN_ID).get("stderr") is None
def test_logging(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
stdout_logger = logging.getLogger("stdout")
stderr_logger = logging.getLogger("stderr")
stdout_logger.addHandler(logging.StreamHandler(stream=sys.stdout))
stderr_logger.addHandler(logging.StreamHandler(stream=sys.stderr))
stdout_logger.warning("test stdout")
stderr_logger.warning("test stderr")
logs = lm.get_logs(RUN_ID)
assert logs.get("stdout") == "test stdout\n"
assert logs.get("stderr") == "test stderr\n"
def test_exit_context_manager(self):
with NodeLogManager() as lm:
assert lm.stdout_logger is sys.stdout
assert lm.stdout_logger != sys.stdout
def test_datetime_prefix(self):
with NodeLogManager(record_datetime=True) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
output = lm.get_logs(RUN_ID).get("stdout")
outputs = output.split("\n")
assert_datetime_prefix(outputs[0], "test")
assert_datetime_prefix(outputs[1], "test2")
assert outputs[2] == ""
@pytest.mark.unittest
class TestNodeLogWriter:
def test_set_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
assert run_logger.get_log(RUN_ID) == ""
def test_clear_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
run_logger.clear_node_info(RUN_ID)
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
run_logger.clear_node_info(RUN_ID)
assert run_logger.run_id_to_stdout.get(RUN_ID) is None
def test_get_log(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
print("test")
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
assert_datetime_prefix(run_logger.get_log(RUN_ID), "test\n")
run_logger.clear_node_info(RUN_ID)
assert run_logger.get_log(RUN_ID) is None
def test_multi_thread(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
with ThreadPool(processes=10) as pool:
results = pool.starmap(assert_print_result, ((i, run_logger) for i in range(10)))
for r in results:
pass
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_operation_context.py | import threading
import pytest
from promptflow._core.operation_context import OperationContext
from promptflow._version import VERSION
from promptflow.contracts.run_mode import RunMode
def set_run_mode(context: OperationContext, run_mode: RunMode):
"""This method simulates the runtime.execute_request()
It is aimed to set the run_mode into operation context.
"""
context.run_mode = run_mode.name if run_mode is not None else ""
@pytest.mark.unittest
class TestOperationContext:
def test_get_user_agent(self):
operation_context = OperationContext()
assert operation_context.get_user_agent() == f"promptflow/{VERSION}"
operation_context.user_agent = "test_agent/0.0.2"
assert operation_context.get_user_agent() == f"test_agent/0.0.2 promptflow/{VERSION}"
@pytest.mark.parametrize(
"run_mode, expected",
[
(RunMode.Test, "Test"),
(RunMode.SingleNode, "SingleNode"),
(RunMode.Batch, "Batch"),
],
)
def test_run_mode(self, run_mode, expected):
context = OperationContext()
set_run_mode(context, run_mode)
assert context.run_mode == expected
def test_context_dict(self):
context = OperationContext()
context.run_mode = "Flow"
context.user_agent = "test_agent/0.0.2"
context.none_value = None
context_dict = context.get_context_dict()
assert context_dict["run_mode"] == "Flow"
assert context_dict["user_agent"] == "test_agent/0.0.2"
assert context_dict["none_value"] is None
def test_setattr(self):
context = OperationContext()
context.run_mode = "Flow"
assert context["run_mode"] == "Flow"
def test_setattr_non_primitive(self):
# Test set non-primitive type
context = OperationContext()
with pytest.raises(TypeError):
context.foo = [1, 2, 3]
def test_getattr(self):
context = OperationContext()
context["run_mode"] = "Flow"
assert context.run_mode == "Flow"
def test_getattr_missing(self):
context = OperationContext()
with pytest.raises(AttributeError):
context.foo
def test_delattr(self):
# test that delattr works as expected
context = OperationContext()
context.foo = "bar"
del context.foo
assert "foo" not in context
# test that delattr raises AttributeError for non-existent name
with pytest.raises(AttributeError):
del context.baz
def test_append_user_agent(self):
context = OperationContext()
user_agent = ' ' + context.user_agent if 'user_agent' in context else ''
context.append_user_agent("test_agent/0.0.2")
assert context.user_agent == "test_agent/0.0.2" + user_agent
context.append_user_agent("test_agent/0.0.3")
assert context.user_agent == "test_agent/0.0.2 test_agent/0.0.3" + user_agent
def test_get_instance(self):
context1 = OperationContext.get_instance()
context2 = OperationContext.get_instance()
assert context1 is context2
def test_set_batch_input_source_from_inputs_mapping_run(self):
input_mapping = {"input1": "${run.outputs.output1}", "input2": "${run.outputs.output2}"}
context = OperationContext()
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Run"
def test_set_batch_input_source_from_inputs_mapping_data(self):
input_mapping = {"url": "${data.url}"}
context = OperationContext()
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_set_batch_input_source_from_inputs_mapping_none(self):
input_mapping = None
context = OperationContext()
assert not hasattr(context, "batch_input_source")
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_set_batch_input_source_from_inputs_mapping_empty(self):
input_mapping = {}
context = OperationContext()
assert not hasattr(context, "batch_input_source")
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_different_thread_have_different_instance(self):
# create a list to store the OperationContext instances from each thread
instances = []
# define a function that gets the OperationContext instance and appends it to the list
def get_instance():
instance = OperationContext.get_instance()
instances.append(instance)
# create two threads and run the function in each thread
thread1 = threading.Thread(target=get_instance)
thread2 = threading.Thread(target=get_instance)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# assert that the list has two elements and they are different objects
assert len(instances) == 2
assert instances[0] is not instances[1]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_api_injector.py | import logging
from collections import namedtuple
from importlib.metadata import version
from types import GeneratorType
from unittest.mock import MagicMock, patch
import openai
import pytest
from promptflow._core.openai_injector import (
PROMPTFLOW_PREFIX,
USER_AGENT_HEADER,
_generate_api_and_injector,
_openai_api_list,
get_aoai_telemetry_headers,
inject_async,
inject_openai_api,
inject_operation_headers,
inject_sync,
recover_openai_api,
)
from promptflow._core.operation_context import OperationContext
from promptflow._core.tracer import Tracer
from promptflow._version import VERSION
from promptflow.connections import AzureOpenAIConnection
from promptflow.exceptions import UserErrorException
from promptflow.tools.aoai import AzureOpenAI
from promptflow.tools.embedding import embedding
IS_LEGACY_OPENAI = version("openai").startswith("0.")
# Mock classes and functions for test
class MockAPI:
def create(self):
pass
@pytest.mark.unittest
def test_inject_operation_headers_sync():
@inject_operation_headers
def f(**kwargs):
return kwargs
if IS_LEGACY_OPENAI:
headers = "headers"
kwargs_1 = {"headers": {"a": 1, "b": 2}}
kwargs_2 = {"headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
else:
headers = "extra_headers"
kwargs_1 = {"extra_headers": {"a": 1, "b": 2}}
kwargs_2 = {"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
injected_headers = get_aoai_telemetry_headers()
assert f(a=1, b=2) == {"a": 1, "b": 2, headers: injected_headers}
merged_headers = {**injected_headers, "a": 1, "b": 2}
assert f(**kwargs_1) == {headers: merged_headers}
aoai_tools_headers = injected_headers.copy()
aoai_tools_headers.update({"ms-azure-ai-promptflow-called-from": "aoai-tool"})
assert f(**kwargs_2) == {headers: aoai_tools_headers}
@pytest.mark.unittest
@pytest.mark.asyncio
async def test_inject_operation_headers_async():
@inject_operation_headers
async def f(**kwargs):
return kwargs
if IS_LEGACY_OPENAI:
headers = "headers"
kwargs_1 = {"headers": {"a": 1, "b": 2}}
kwargs_2 = {"headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
else:
headers = "extra_headers"
kwargs_1 = {"extra_headers": {"a": 1, "b": 2}}
kwargs_2 = {"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
injected_headers = get_aoai_telemetry_headers()
assert await f(a=1, b=2) == {"a": 1, "b": 2, headers: injected_headers}
merged_headers = {**injected_headers, "a": 1, "b": 2}
assert await f(**kwargs_1) == {headers: merged_headers}
aoai_tools_headers = injected_headers.copy()
aoai_tools_headers.update({"ms-azure-ai-promptflow-called-from": "aoai-tool"})
assert await f(**kwargs_2) == {headers: aoai_tools_headers}
@pytest.mark.unittest
def test_aoai_generator_proxy_sync():
def mock_aoai(**kwargs):
# check if args has a stream parameter
if "stream" in kwargs and kwargs["stream"]:
# stream parameter is true, yield a string
def generator():
yield "This is a yielded string"
return generator()
else:
# stream parameter is false or not given, return a string
return "This is a returned string"
if IS_LEGACY_OPENAI:
apis = ["openai.Completion.create", "openai.ChatCompletion.create", "openai.Embedding.create"]
else:
apis = [
"openai.resources.Completions.create",
"openai.resources.chat.Completions.create",
"openai.resources.Embeddings.create",
]
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
Tracer.start_tracing("mock_run_id")
inject_openai_api()
if IS_LEGACY_OPENAI:
return_string = openai.Completion.create(stream=False)
return_generator = openai.Completion.create(stream=True)
else:
return_string = openai.resources.Completions.create(stream=False)
return_generator = openai.resources.Completions.create(stream=True)
assert return_string == "This is a returned string"
assert isinstance(return_generator, GeneratorType)
for _ in return_generator:
pass
traces = Tracer.end_tracing()
assert len(traces) == 2
for trace in traces:
assert trace["type"] == "LLM"
if trace["inputs"]["stream"]:
assert trace["output"] == ["This is a yielded string"]
else:
assert trace["output"] == "This is a returned string"
@pytest.mark.unittest
@pytest.mark.asyncio
async def test_aoai_generator_proxy_async():
async def mock_aoai(**kwargs):
# check if args has a stream parameter
if "stream" in kwargs and kwargs["stream"]:
# stream parameter is true, yield a string
def generator():
yield "This is a yielded string"
return generator()
else:
# stream parameter is false or not given, return a string
return "This is a returned string"
if IS_LEGACY_OPENAI:
apis = ["openai.Completion.acreate", "openai.ChatCompletion.acreate", "openai.Embedding.acreate"]
else:
apis = [
"openai.resources.AsyncCompletions.create",
"openai.resources.chat.AsyncCompletions.create",
"openai.resources.AsyncEmbeddings.create",
]
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
Tracer.start_tracing("mock_run_id")
inject_openai_api()
if IS_LEGACY_OPENAI:
return_string = await openai.Completion.acreate(stream=False)
return_generator = await openai.Completion.acreate(stream=True)
else:
return_string = await openai.resources.AsyncCompletions.create(stream=False)
return_generator = await openai.resources.AsyncCompletions.create(stream=True)
assert return_string == "This is a returned string"
assert isinstance(return_generator, GeneratorType)
for _ in return_generator:
pass
traces = Tracer.end_tracing()
assert len(traces) == 2
for trace in traces:
assert trace["type"] == "LLM"
if trace["inputs"]["stream"]:
assert trace["output"] == ["This is a yielded string"]
else:
assert trace["output"] == "This is a returned string"
@pytest.mark.unittest
def test_aoai_call_inject():
if IS_LEGACY_OPENAI:
headers = "headers"
apis = ["openai.Completion.create", "openai.ChatCompletion.create", "openai.Embedding.create"]
else:
headers = "extra_headers"
apis = [
"openai.resources.Completions.create",
"openai.resources.chat.Completions.create",
"openai.resources.Embeddings.create",
]
def mock_aoai(**kwargs):
return kwargs.get(headers)
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
inject_openai_api()
injected_headers = get_aoai_telemetry_headers()
if IS_LEGACY_OPENAI:
return_headers_1 = openai.Completion.create(headers=None)
return_headers_2 = openai.ChatCompletion.create(headers="abc")
return_headers_3 = openai.Embedding.create(headers=1)
else:
return_headers_1 = openai.resources.Completions.create(extra_headers=None)
return_headers_2 = openai.resources.chat.Completions.create(extra_headers="abc")
return_headers_3 = openai.resources.Embeddings.create(extra_headers=1)
assert return_headers_1 is not None
assert injected_headers.items() <= return_headers_1.items()
assert return_headers_2 is not None
assert injected_headers.items() <= return_headers_2.items()
assert return_headers_3 is not None
assert injected_headers.items() <= return_headers_3.items()
@pytest.mark.unittest
def test_aoai_tool_header():
def mock_complete(*args, **kwargs):
Response = namedtuple("Response", ["choices"])
Choice = namedtuple("Choice", ["text"])
choice = Choice(text=kwargs.get("extra_headers", {}))
response = Response(choices=[choice])
return response
def mock_chat(*args, **kwargs):
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["message"])
Message = namedtuple("Message", ["content"])
message = Message(content=kwargs.get("extra_headers", {}))
choice = Choice(message=message)
completion = Completion(choices=[choice])
return completion
def mock_embedding(*args, **kwargs):
Response = namedtuple("Response", ["data"])
Embedding = namedtuple("Embedding", ["embedding"])
response = Response(data=[Embedding(embedding=kwargs.get("extra_headers", {}))])
return response
with patch("openai.resources.Completions.create", new=mock_complete), patch(
"openai.resources.chat.Completions.create", new=mock_chat
), patch("openai.resources.Embeddings.create", new=mock_embedding):
inject_openai_api()
aoai_tool_header = {"ms-azure-ai-promptflow-called-from": "aoai-tool"}
return_headers = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).completion(
prompt="test", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
return_headers = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\ntest", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
return_headers = embedding(
AzureOpenAIConnection(api_key="test", api_base="test"), input="test", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
@pytest.mark.unittest
def test_aoai_chat_tool_prompt():
def mock_chat(*args, **kwargs):
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["message"])
Message = namedtuple("Message", ["content"])
message = Message(content=kwargs.get("messages", {}))
choice = Choice(message=message)
completion = Completion(choices=[choice])
return completion
with patch("openai.resources.chat.Completions.create", new=mock_chat):
inject_openai_api()
return_messages = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\ntest", deployment_name="test"
)
assert return_messages == [{"role": "user", "content": "test"}]
return_messages = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\r\n", deployment_name="test"
)
assert return_messages == [{"role": "user", "content": ""}]
with pytest.raises(UserErrorException, match="The Chat API requires a specific format for prompt"):
AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:", deployment_name="test"
)
# The new generator-based test function
@pytest.mark.parametrize(
"is_legacy, expected_apis_with_injectors",
[
(
True,
[
(
(
("openai", "Completion", "create"),
("openai", "ChatCompletion", "create"),
("openai", "Embedding", "create"),
),
inject_sync,
),
(
(
("openai", "Completion", "acreate"),
("openai", "ChatCompletion", "acreate"),
("openai", "Embedding", "acreate"),
),
inject_async,
),
],
),
(
False,
[
(
(
("openai.resources.chat", "Completions", "create"),
("openai.resources", "Completions", "create"),
("openai.resources", "Embeddings", "create"),
),
inject_sync,
),
(
(
("openai.resources.chat", "AsyncCompletions", "create"),
("openai.resources", "AsyncCompletions", "create"),
("openai.resources", "AsyncEmbeddings", "create"),
),
inject_async,
),
],
),
],
)
def test_api_list(is_legacy, expected_apis_with_injectors):
with patch("promptflow._core.openai_injector.IS_LEGACY_OPENAI", is_legacy):
# Using list comprehension to get all items from the generator
actual_apis_with_injectors = list(_openai_api_list())
# Assert that the actual list matches the expected list
assert actual_apis_with_injectors == expected_apis_with_injectors
@pytest.mark.parametrize(
"apis_with_injectors, expected_output, expected_logs",
[
([((("MockModule", "MockAPI", "create"),), inject_sync)], [(MockAPI, "create", inject_sync)], []),
([((("MockModule", "MockAPI", "create"),), inject_async)], [(MockAPI, "create", inject_async)], []),
],
)
def test_generate_api_and_injector(apis_with_injectors, expected_output, expected_logs, caplog):
with patch("importlib.import_module", return_value=MagicMock(MockAPI=MockAPI)) as mock_import_module:
# Capture the logs
with caplog.at_level(logging.WARNING):
# Run the generator and collect the output
result = list(_generate_api_and_injector(apis_with_injectors))
# Check if the result matches the expected output
assert result == expected_output
# Check if the logs match the expected logs
assert len(caplog.records) == len(expected_logs)
for record, expected_message in zip(caplog.records, expected_logs):
assert expected_message in record.message
mock_import_module.assert_called_with("MockModule")
def test_generate_api_and_injector_attribute_error_logging(caplog):
apis = [
((("NonExistentModule", "NonExistentAPI", "create"),), MagicMock()),
((("MockModuleMissingMethod", "MockAPIMissingMethod", "missing_method"),), MagicMock()),
]
# Set up the side effect for the mock
def import_module_effect(name):
if name == "MockModuleMissingMethod":
module = MagicMock()
delattr(module, "MockAPIMissingMethod") # Use delattr to remove the attribute
return module
else:
raise ModuleNotFoundError(f"No module named '{name}'")
with patch("importlib.import_module") as mock_import_module:
mock_import_module.side_effect = import_module_effect
with caplog.at_level(logging.WARNING):
list(_generate_api_and_injector(apis))
assert len(caplog.records) == 2
assert "An unexpected error occurred" in caplog.records[0].message
assert "NonExistentModule" in caplog.records[0].message
assert "does not have the class" in caplog.records[1].message
assert "MockAPIMissingMethod" in caplog.records[1].message
# Verify that `importlib.import_module` was called with correct module names
mock_import_module.assert_any_call("NonExistentModule")
mock_import_module.assert_any_call("MockModuleMissingMethod")
@pytest.mark.unittest
def test_get_aoai_telemetry_headers():
# create a mock operation context
mock_operation_context = OperationContext()
mock_operation_context.user_agent = "test-user-agent"
mock_operation_context.update(
{
"flow_id": "test-flow-id",
"root_run_id": "test-root-run-id",
"index": 1,
"run_id": "test-run-id",
"variant_id": "test-variant-id",
}
)
# patch the OperationContext.get_instance method to return the mock operation context
with patch("promptflow._core.operation_context.OperationContext.get_instance") as mock_get_instance:
mock_get_instance.return_value = mock_operation_context
# call the function under test and get the headers
headers = get_aoai_telemetry_headers()
for key in headers.keys():
assert key.startswith(PROMPTFLOW_PREFIX) or key == USER_AGENT_HEADER
assert "_" not in key
# assert that the headers are correct
assert headers[USER_AGENT_HEADER] == f"test-user-agent promptflow/{VERSION}"
assert headers[f"{PROMPTFLOW_PREFIX}flow-id"] == "test-flow-id"
assert headers[f"{PROMPTFLOW_PREFIX}root-run-id"] == "test-root-run-id"
assert headers[f"{PROMPTFLOW_PREFIX}index"] == "1"
assert headers[f"{PROMPTFLOW_PREFIX}run-id"] == "test-run-id"
assert headers[f"{PROMPTFLOW_PREFIX}variant-id"] == "test-variant-id"
@pytest.mark.unittest
def test_inject_and_recover_openai_api():
class FakeAPIWithoutOriginal:
@staticmethod
def create():
pass
class FakeAPIWithOriginal:
@staticmethod
def create():
pass
def dummy_api():
pass
# Real injector function that adds an _original attribute
def injector(f):
def wrapper_fun(*args, **kwargs):
return f(*args, **kwargs)
wrapper_fun._original = f
return wrapper_fun
# Set an _original attribute for the create method of FakeAPIWithOriginal
FakeAPIWithOriginal.create._original = dummy_api
# Store the original create methods before injection
original_api_without_original = FakeAPIWithoutOriginal.create
original_api_with_original = FakeAPIWithOriginal.create
# Mock the generator function to yield our mocked api and method
with patch(
"promptflow._core.openai_injector.available_openai_apis_and_injectors",
return_value=[(FakeAPIWithoutOriginal, "create", injector), (FakeAPIWithOriginal, "create", injector)],
):
# Call the function to inject the APIs
inject_openai_api()
# Check that the _original attribute was set for the method that didn't have it
assert hasattr(FakeAPIWithoutOriginal.create, "_original")
# Ensure the _original attribute points to the correct original method
assert FakeAPIWithoutOriginal.create._original is original_api_without_original
# Check that the injector was not applied again to the method that already had an _original attribute
# The _original attribute should still point to the mock, not the original method
assert getattr(FakeAPIWithOriginal.create, "_original") is not FakeAPIWithOriginal.create
# The original method should remain unchanged
assert FakeAPIWithOriginal.create is original_api_with_original
# Call the function to recover the APIs
recover_openai_api()
# Check that the _original attribute was removed for the method that didn't have it
assert not hasattr(FakeAPIWithoutOriginal.create, "_original")
assert not hasattr(FakeAPIWithOriginal.create, "_original")
# The original methods should be restored
assert FakeAPIWithoutOriginal.create is original_api_without_original
assert FakeAPIWithOriginal.create is dummy_api
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_flow.py | from pathlib import Path
import pytest
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts._errors import FailedToImportModule
from promptflow.contracts.flow import (
Flow,
FlowInputAssignment,
FlowInputDefinition,
FlowOutputDefinition,
InputAssignment,
InputValueType,
Node,
NodeVariant,
NodeVariants,
ToolSource,
ToolSourceType,
)
from promptflow.contracts.tool import Tool, ToolType, ValueType
from ...utils import EAGER_FLOWS_ROOT, FLOW_ROOT, get_flow_folder, get_flow_package_tool_definition, get_yaml_file
PACKAGE_TOOL_BASE = Path(__file__).parent.parent.parent / "package_tools"
@pytest.mark.e2etest
class TestFlowContract:
@pytest.mark.parametrize(
"flow_folder, expected_connection_names",
[
("web_classification", {"azure_open_ai_connection"}),
("basic-with-connection", {"azure_open_ai_connection"}),
("flow_with_dict_input_with_variant", {"mock_custom_connection"}),
],
)
def test_flow_get_connection_names(self, flow_folder, expected_connection_names):
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_names() == expected_connection_names
def test_flow_get_connection_input_names_for_node_with_variants(self):
# Connection input exists only in python node
flow_folder = "flow_with_dict_input_with_variant"
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_input_names_for_node("print_val") == ["conn"]
def test_flow_get_connection_names_with_package_tool(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_names()
assert connection_names == {"azure_open_ai_connection"}
def test_flow_get_connection_input_names_for_node(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_input_names_for_node(flow.nodes[0].name)
assert connection_names == ["connection", "connection_2"]
assert flow.get_connection_input_names_for_node("not_exist") == []
@pytest.mark.parametrize(
"flow_folder_name, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
],
)
def test_flow_get_environment_variables_with_overrides(
self, flow_folder_name, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name)
flow_file = "flow.dag.yaml"
flow = Flow.from_yaml(flow_file=flow_file, working_dir=flow_folder)
merged_environment_variables = flow.get_environment_variables_with_overrides(
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.parametrize(
"flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
None,
{},
id="LoadEnvVariablesForEagerFlow",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesForEagerFlowWithOverrides",
),
],
)
def test_load_env_variables(
self, flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name, folder_root)
merged_environment_variables = Flow.load_env_variables(
flow_file=flow_file,
working_dir=flow_folder,
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.unittest
class TestFlow:
@pytest.mark.parametrize(
"flow, expected_value",
[
(
Flow(id="flow_id", name="flow_name", nodes=[], inputs={}, outputs={}, tools=[]),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [],
"inputs": {},
"outputs": {},
"tools": [],
"language": "python",
},
),
(
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={"input1": FlowInputDefinition(type=ValueType.STRING)},
outputs={"output1": FlowOutputDefinition(type=ValueType.STRING, reference=None)},
tools=[],
),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
"language": "python",
},
),
],
)
def test_flow_serialize(self, flow, expected_value):
assert flow.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}, "outputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
},
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={
"input1": FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
)
},
outputs={
"output1": FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment(
value="", value_type=InputValueType.LITERAL, section="", property=""
),
description="",
evaluation_only=False,
is_chat_output=False,
)
},
tools=[],
node_variants={},
program_language="python",
environment_variables={},
),
),
],
)
def test_flow_deserialize(self, data, expected_value):
assert Flow.deserialize(data) == expected_value
def test_import_requisites(self):
tool1 = Tool(name="tool1", type=ToolType.PYTHON, inputs={}, module="yaml")
tool2 = Tool(name="tool2", type=ToolType.PYTHON, inputs={}, module="module")
node1 = Node(name="node1", tool="tool1", inputs={}, module="yaml")
node2 = Node(name="node2", tool="tool2", inputs={}, module="module")
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool1], [node2])
assert str(e.value).startswith(
"Failed to import modules with error: Import node 'node2' provider module 'module' failed."
)
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool2], [node1])
assert str(e.value).startswith(
"Failed to import modules with error: Import tool 'tool2' module 'module' failed."
)
def test_apply_default_node_variants(self):
node_variant = NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None}, use_variants=False),
description=None,
)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": node_variant},
)
}
flow1 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is True
flow1._apply_default_node_variants()
assert flow1.nodes[0].use_variants is False
assert flow1.nodes[0].inputs.keys() == {"input2"}
assert flow1.nodes[0].name == "print_val"
flow2 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=False)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is False
tmp_nodes = flow2.nodes
flow2._apply_default_node_variants()
assert flow2.nodes == tmp_nodes
@pytest.mark.parametrize(
"node_variants",
[
(None),
(
{
"test": NodeVariants(
default_variant_id="variant1",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
(
{
"print_val": NodeVariants(
default_variant_id="test",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
],
)
def test_apply_default_node_variant(self, node_variants):
node = Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)
assert Flow._apply_default_node_variant(node, node_variants) == node
def test_apply_node_overrides(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, connection="open_ai_connection")
test_node = Node(
name="test_node", tool=None, inputs={"test": InputAssignment("test_value1", InputValueType.LITERAL)}
)
flow = Flow(id="test_flow_id", name="test_flow", nodes=[llm_node, test_node], inputs={}, outputs={}, tools=[])
assert flow == flow._apply_node_overrides(None)
assert flow == flow._apply_node_overrides({})
node_overrides = {
"other_node.connection": "some_connection",
}
with pytest.raises(ValueError):
flow._apply_node_overrides(node_overrides)
node_overrides = {
"llm_node.connection": "custom_connection",
"test_node.test": "test_value2",
}
flow = flow._apply_node_overrides(node_overrides)
assert flow.nodes[0].connection == "custom_connection"
assert flow.nodes[1].inputs["test"].value == "test_value2"
def test_has_aggregation_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow1 = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert not flow1.has_aggregation_node()
flow2 = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow2.has_aggregation_node()
def test_get_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
flow = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert flow.get_node("llm_node") is llm_node
assert flow.get_node("other_node") is None
def test_get_tool(self):
tool = Tool(name="tool", type=ToolType.PYTHON, inputs={})
flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[tool])
assert flow.get_tool("tool") is tool
assert flow.get_tool("other_tool") is None
def test_is_reduce_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_reduce_node("llm_node")
assert flow.is_reduce_node("aggre_node")
def test_is_normal_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_normal_node("llm_node")
assert not flow.is_normal_node("aggre_node")
def test_is_llm_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, type=ToolType.LLM)
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_llm_node(llm_node)
assert not flow.is_llm_node(aggre_node)
def test_is_referenced_by_flow_output(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
output = {
"output": FlowOutputDefinition(
type=ValueType.STRING, reference=InputAssignment("llm_node", InputValueType.NODE_REFERENCE, "output")
)
}
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs=output, tools=[])
assert flow.is_referenced_by_flow_output(llm_node)
assert not flow.is_referenced_by_flow_output(aggre_node)
def test_is_node_referenced_by(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_node_referenced_by(aggre_node, llm_node)
assert flow.is_node_referenced_by(llm_node, aggre_node)
def test_is_referenced_by_other_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_referenced_by_other_node(aggre_node)
assert flow.is_referenced_by_other_node(llm_node)
def test_is_chat_flow(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert not standard_flow.is_chat_flow()
assert chat_flow.is_chat_flow()
def test_get_chat_input_name(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert standard_flow.get_chat_input_name() is None
assert chat_flow.get_chat_input_name() == "question"
def test_get_chat_output_name(self):
chat_output = {"answer": FlowOutputDefinition(type=ValueType.STRING, reference=None, is_chat_output=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs=chat_output, tools=[])
assert standard_flow.get_chat_output_name() is None
assert chat_flow.get_chat_output_name() == "answer"
def test_replace_with_variant(self):
node0 = Node(name="node0", tool=None, inputs={"input0": None}, use_variants=True)
node1 = Node(name="node1", tool="tool1", inputs={"input1": None}, use_variants=False)
node2 = Node(name="node2", tool="tool2", inputs={"input2": None}, use_variants=False)
node_variant = Node(name="node0", tool="tool3", inputs={"input3": None}, use_variants=False)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": NodeVariant(node_variant, None)},
)
}
flow = Flow("test_flow_id", "test_flow", [node0, node1, node2], {}, {}, [], node_variants)
# flow = Flow.from_yaml(get_yaml_file("web_classification"))
tool_cnt = len(flow.tools)
flow._replace_with_variant(node_variant, [flow.nodes[1].tool, flow.nodes[2].tool])
assert "input3" in flow.nodes[0].inputs
assert flow.nodes[0].tool == "tool3"
assert len(flow.tools) == tool_cnt + 2
@pytest.mark.unittest
class TestInputAssignment:
@pytest.mark.parametrize(
"value, expected_value",
[
(InputAssignment("value", InputValueType.LITERAL), "value"),
(InputAssignment("value", InputValueType.FLOW_INPUT), "${flow.value}"),
(InputAssignment("value", InputValueType.NODE_REFERENCE, "section"), "${value.section}"),
(
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
"${value.section.property}",
),
(InputAssignment(AzureContentSafetyConnection, InputValueType.LITERAL, "section", "property"), "ABCMeta"),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"serialized_value, expected_value",
[
(
"${value.section.property}",
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
),
(
"${flow.section.property}",
FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT),
),
("${value}", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("$value", InputAssignment("$value", InputValueType.LITERAL)),
("value", InputAssignment("value", InputValueType.LITERAL)),
],
)
def test_deserialize(self, serialized_value, expected_value):
input_assignment = InputAssignment.deserialize(serialized_value)
assert input_assignment == expected_value
@pytest.mark.parametrize(
"serialized_reference, expected_value",
[
("input", InputAssignment("input", InputValueType.NODE_REFERENCE, "output")),
("flow.section", FlowInputAssignment("section", value_type=InputValueType.FLOW_INPUT, prefix="flow.")),
(
"flow.section.property",
FlowInputAssignment("section.property", value_type=InputValueType.FLOW_INPUT, prefix="flow."),
),
],
)
def test_deserialize_reference(self, serialized_reference, expected_value):
assert InputAssignment.deserialize_reference(serialized_reference) == expected_value
@pytest.mark.parametrize(
"serialized_node_reference, expected_value",
[
("value", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("value.section", InputAssignment("value", InputValueType.NODE_REFERENCE, "section")),
("value.section.property", InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property")),
],
)
def test_deserialize_node_reference(self, serialized_node_reference, expected_value):
assert InputAssignment.deserialize_node_reference(serialized_node_reference) == expected_value
@pytest.mark.unittest
class TestFlowInputAssignment:
@pytest.mark.parametrize(
"input_value, expected_value",
[
("flow.section.property", True),
("inputs.section.property", True),
("section.property", False),
("", False),
],
)
def test_is_flow_input(self, input_value, expected_value):
assert FlowInputAssignment.is_flow_input(input_value) == expected_value
def test_deserialize(self):
expected_input = FlowInputAssignment("section.property", prefix="inputs.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("inputs.section.property") == expected_input
expected_flow = FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("flow.section.property") == expected_flow
with pytest.raises(ValueError):
FlowInputAssignment.deserialize("value")
@pytest.mark.unittest
class TestToolSource:
@pytest.mark.parametrize(
"tool_source, expected_value",
[
({}, ToolSource(type=ToolSourceType.Code)),
({"type": ToolSourceType.Code.value}, ToolSource(type=ToolSourceType.Code)),
(
{"type": ToolSourceType.Package.value, "tool": "tool", "path": "path"},
ToolSource(type=ToolSourceType.Package, tool="tool", path="path"),
),
],
)
def test_deserialize(self, tool_source, expected_value):
assert ToolSource.deserialize(tool_source) == expected_value
@pytest.mark.unittest
class TestNode:
@pytest.mark.parametrize(
"node, expected_value",
[
(
Node(name="test_node", tool="test_tool", inputs={}),
{"name": "test_node", "tool": "test_tool", "inputs": {}},
),
(
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True, "reduce": True},
),
],
)
def test_serialize(self, node, expected_value):
assert node.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{"name": "test_node", "tool": "test_tool", "inputs": {}},
Node(name="test_node", tool="test_tool", inputs={}),
),
(
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True},
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
),
],
)
def test_deserialize(self, data, expected_value):
assert Node.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowInputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowInputDefinition(type=ValueType.BOOL), {"type": ValueType.BOOL.value}),
(
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
{
"type": ValueType.STRING.value,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
),
(
{
"type": ValueType.STRING,
},
FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowInputDefinition.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowOutputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowOutputDefinition(type=ValueType.BOOL, reference=None), {"type": ValueType.BOOL.value}),
(
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("value", InputValueType.NODE_REFERENCE),
description="description",
evaluation_only=True,
is_chat_output=True,
),
{
"type": ValueType.STRING.value,
"reference": "${value.}",
"description": "description",
"evaluation_only": True,
"is_chat_output": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
},
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("", InputValueType.LITERAL),
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowOutputDefinition.deserialize(data) == expected_value
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_mode.py | import pytest
from promptflow.contracts.run_mode import RunMode
@pytest.mark.unittest
@pytest.mark.parametrize(
"run_mode, expected",
[
("Test", RunMode.Test),
("SingleNode", RunMode.SingleNode),
("Batch", RunMode.Batch),
("Default", RunMode.Test),
],
)
def test_parse(run_mode, expected):
assert RunMode.parse(run_mode) == expected
@pytest.mark.unittest
def test_parse_invalid():
with pytest.raises(ValueError):
RunMode.parse(123)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_multimedia.py | import pytest
from promptflow.contracts.multimedia import Image, PFBytes
@pytest.mark.unittest
class TestMultimediaContract:
@pytest.mark.parametrize(
"value, mime_type, source_url",
[
(b"test", "image/*", None),
(b"test", "image/jpg", None),
(b"test", "image/png", None),
(b"test", None, None),
(b"test", "image/*", "mock_url"),
]
)
def test_image_contract(self, value, mime_type, source_url):
image = Image(value, mime_type, source_url)
if mime_type is None:
mime_type = "image/*"
assert image._mime_type == mime_type
assert image._hash == "a94a8fe5"
assert image.to_base64() == "dGVzdA=="
assert image.to_base64(with_type=True) == f"data:{mime_type};base64,dGVzdA=="
assert image.to_base64(with_type=True, dict_type=True) == {f"data:{mime_type};base64": "dGVzdA=="}
assert bytes(image) == value
assert image.source_url == source_url
assert str(image) == "Image(a94a8fe5)"
assert repr(image) == "Image(a94a8fe5)"
assert image.serialize() == "Image(a94a8fe5)"
assert image.serialize(lambda x: x.to_base64()) == "dGVzdA=="
@pytest.mark.parametrize(
"value, mime_type, source_url",
[
(b"test", "image/*", None),
(b"test", "image/jpg", None),
(b"test", "image/png", None),
(b"test", "image/*", "mock_url"),
]
)
def test_pfbytes_contract(self, value, mime_type, source_url):
pfBytes = PFBytes(value, mime_type, source_url)
assert pfBytes._mime_type == mime_type
assert pfBytes._hash == "a94a8fe5"
assert pfBytes.to_base64() == "dGVzdA=="
assert pfBytes.to_base64(with_type=True) == f"data:{mime_type};base64,dGVzdA=="
assert pfBytes.to_base64(with_type=True, dict_type=True) == {f"data:{mime_type};base64": "dGVzdA=="}
assert bytes(pfBytes) == value
assert pfBytes.source_url == source_url
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_types.py | import pytest
from promptflow.contracts.types import AssistantDefinition, Secret, PromptTemplate, FilePath
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
@pytest.mark.unittest
def test_secret():
secret = Secret('my_secret')
secret.set_secret_name('secret_name')
assert secret.secret_name == 'secret_name'
@pytest.mark.unittest
def test_prompt_template():
prompt = PromptTemplate('my_prompt')
assert isinstance(prompt, str)
assert str(prompt) == 'my_prompt'
@pytest.mark.unittest
def test_file_path():
file_path = FilePath('my_file_path')
assert isinstance(file_path, str)
@pytest.mark.unittest
def test_assistant_definition():
data = {"model": "model", "instructions": "instructions", "tools": []}
assistant_definition = AssistantDefinition.deserialize(data)
assert isinstance(assistant_definition, AssistantDefinition)
assert assistant_definition.model == "model"
assert assistant_definition.instructions == "instructions"
assert assistant_definition.tools == []
assert assistant_definition.serialize() == data
assert isinstance(assistant_definition.init_tool_invoker(), AssistantToolInvoker)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_tool.py | from enum import Enum
from typing import Any, Callable, NewType, Optional, Tuple, TypeVar, Union
import pytest
from promptflow._core.tools_manager import connections
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import Status
from promptflow.contracts.tool import (
AssistantDefinition,
ConnectionType,
InputDefinition,
OutputDefinition,
Tool,
ToolType,
ValueType,
_deserialize_enum,
)
from promptflow.contracts.types import FilePath, PromptTemplate, Secret
class MyConnection(CustomStrongTypeConnection):
pass
my_connection = MyConnection(name="my_connection", secrets={"key": "value"})
def some_function():
pass
class TestStatus(Enum):
Running = 1
Preparing = 2
Completed = 3
@pytest.mark.unittest
@pytest.mark.parametrize(
"enum, value, expected",
[
(Status, "Running", Status.Running),
(Status, "running", Status.Running),
(Status, "FAILED", Status.Failed),
(Status, "UNKNOWN", "UNKNOWN"),
(TestStatus, "Running", "Running"),
],
)
def test_deserialize_enum(enum, value, expected):
assert _deserialize_enum(enum, value) == expected
@pytest.mark.unittest
class TestValueType:
@pytest.mark.parametrize(
"value, expected",
[
(1, ValueType.INT),
(1.0, ValueType.DOUBLE),
(True, ValueType.BOOL),
("string", ValueType.STRING),
([], ValueType.LIST),
({}, ValueType.OBJECT),
(Secret("secret"), ValueType.SECRET),
(PromptTemplate("prompt"), ValueType.PROMPT_TEMPLATE),
(FilePath("file_path"), ValueType.FILE_PATH),
(AssistantDefinition("model", "instructions", []), ValueType.ASSISTANT_DEFINITION),
],
)
def test_from_value(self, value, expected):
assert ValueType.from_value(value) == expected
@pytest.mark.parametrize(
"value, expected",
[
(int, ValueType.INT),
(float, ValueType.DOUBLE),
(bool, ValueType.BOOL),
(str, ValueType.STRING),
(list, ValueType.LIST),
(dict, ValueType.OBJECT),
(Secret, ValueType.SECRET),
(PromptTemplate, ValueType.PROMPT_TEMPLATE),
(FilePath, ValueType.FILE_PATH),
(Image, ValueType.IMAGE),
(AssistantDefinition, ValueType.ASSISTANT_DEFINITION),
],
)
def test_from_type(self, value, expected):
assert ValueType.from_type(value) == expected
@pytest.mark.parametrize(
"value, value_type, expected",
[
("1", ValueType.INT, 1),
("1.0", ValueType.DOUBLE, 1.0),
("true", ValueType.BOOL, True),
("false", ValueType.BOOL, False),
(True, ValueType.BOOL, True),
(123, ValueType.STRING, "123"),
('["a", "b", "c"]', ValueType.LIST, ["a", "b", "c"]),
('{"key": "value"}', ValueType.OBJECT, {"key": "value"}),
("[1, 2, 3]", ValueType.OBJECT, [1, 2, 3]),
("{", ValueType.OBJECT, "{"),
([1, 2, 3], ValueType.OBJECT, [1, 2, 3]),
],
)
def test_parse(self, value, value_type, expected):
assert value_type.parse(value) == expected
@pytest.mark.parametrize(
"value, value_type",
[
("1", ValueType.BOOL),
({}, ValueType.LIST),
],
)
def test_parse_error(self, value, value_type):
with pytest.raises(ValueError):
value_type.parse(value)
@pytest.mark.unittest
class TestConnectionType:
@pytest.mark.parametrize(
"type_name, expected",
[
("AzureContentSafetyConnection", connections.get("AzureContentSafetyConnection")),
("AzureOpenAIConnection", connections.get("AzureOpenAIConnection")),
("_Connection", connections.get("_Connection")),
("unknown", None),
(123, None),
],
)
def test_get_connection_class(self, type_name, expected):
assert ConnectionType.get_connection_class(type_name) == expected
@pytest.mark.parametrize(
"type_name, expected",
[
("AzureContentSafetyConnection", True),
("AzureOpenAIConnection", True),
("_Connection", True),
("unknown", False),
(123, False),
],
)
def test_is_connection_class_name(self, type_name, expected):
assert ConnectionType.is_connection_class_name(type_name) == expected
@pytest.mark.parametrize(
"value, expected",
[
(connections.get("AzureContentSafetyConnection"), True),
(AzureContentSafetyConnection("api_key", "endpoint"), True),
(Status, False),
(ConnectionType.is_connection_value("non_connection_instance"), False),
],
)
def test_is_connection_value(self, value, expected):
assert ConnectionType.is_connection_value(value) == expected
@pytest.mark.parametrize(
"val, expected_res",
[
(my_connection, True),
(MyConnection, True),
(list, False),
# (list[str], False), # Python 3.9
# (list[int], False),
([1, 2, 3], False),
(float, False),
(int, False),
(5, False),
(str, False),
(some_function, False),
(Union[str, int], False),
# ((int | str), False), # Python 3.10
(tuple, False),
# (tuple[str, int], False), # Python 3.9
(Tuple[int, ...], False),
# (dict[str, Any], False), # Python 3.9
({"test1": [1, 2, 3], "test2": [4, 5, 6], "test3": [7, 8, 9]}, False),
(Any, False),
(None, False),
(Optional[str], False),
(TypeVar("T"), False),
(TypeVar, False),
(Callable, False),
(Callable[..., Any], False),
(NewType("MyType", int), False),
],
)
def test_is_custom_strong_type(self, val, expected_res):
assert ConnectionType.is_custom_strong_type(val) == expected_res
def test_serialize_conn(self):
assert ConnectionType.serialize_conn(AzureContentSafetyConnection) == "ABCMeta"
connection_instance = AzureContentSafetyConnection("api_key", "endpoint")
assert ConnectionType.serialize_conn(connection_instance) == "AzureContentSafetyConnection"
with pytest.raises(ValueError):
ConnectionType.serialize_conn("non_connection_instance")
@pytest.mark.unittest
class TestInputDefinition:
def test_serialize(self):
# test when len(type) == 1
input_def = InputDefinition(
[ValueType.STRING],
default="Default",
description="Description",
enum=["A", "B", "C"],
custom_type=["customtype"],
)
serialized = input_def.serialize()
assert serialized == {
"type": "string",
"default": "Default",
"description": "Description",
"enum": ["A", "B", "C"],
"custom_type": ["customtype"],
}
# test when len(type) > 1
input_def = InputDefinition([ValueType.STRING, ValueType.INT])
serialized = input_def.serialize()
assert serialized == {"type": ["string", "int"]}
def test_deserialize(self):
serialized = {"type": "string", "default": "Default", "description": "Description", "enum": ["A", "B", "C"]}
deserialized = InputDefinition.deserialize(serialized)
assert deserialized.type == [ValueType.STRING]
assert deserialized.default == "Default"
assert deserialized.description == "Description"
assert deserialized.enum == ["A", "B", "C"]
serialized = {"type": ["string", "int"]}
deserialized = InputDefinition.deserialize(serialized)
assert deserialized.type == [ValueType.STRING, ValueType.INT]
assert deserialized.default == ""
assert deserialized.description == ""
assert deserialized.enum == []
@pytest.mark.unittest
class TestOutDefinition:
@pytest.mark.parametrize(
"value, expected",
[
(
OutputDefinition([ValueType.STRING], description="Description", is_property=True),
{"type": "string", "description": "Description", "is_property": True},
),
(OutputDefinition([ValueType.STRING, ValueType.INT]), {"type": ["string", "int"], "is_property": False}),
],
)
def test_serialize(self, value, expected):
assert value.serialize() == expected
@pytest.mark.parametrize(
"value, expected",
[
(
{"type": "string", "description": "Description", "is_property": True},
OutputDefinition([ValueType.STRING], description="Description", is_property=True),
),
({"type": ["string", "int"]}, OutputDefinition([ValueType.STRING, ValueType.INT])),
],
)
def test_deserialize(self, value, expected):
assert OutputDefinition.deserialize(value) == expected
@pytest.mark.unittest
class TestTool:
@pytest.mark.parametrize(
"tool_type, expected_keys",
[
(ToolType._ACTION, ["name", "description", "enable_kwargs"]),
(ToolType.LLM, ["name", "type", "inputs", "description", "enable_kwargs"]),
],
)
def test_serialize_tool(self, tool_type, expected_keys):
tool = Tool(name="test_tool", type=tool_type, inputs={}, outputs={}, description="description")
serialized_tool = tool.serialize()
assert set(serialized_tool.keys()) == set(expected_keys)
def test_deserialize_tool(self):
data = {
"name": "test_tool",
"type": "LLM",
"inputs": {"input1": {"type": "ValueType1"}},
}
tool = Tool.deserialize(data)
assert tool.name == data["name"]
assert tool.type == ToolType[data["type"]]
assert "input1" in tool.inputs
@pytest.mark.parametrize(
"tooltype, connection_type, expected",
[
(ToolType.LLM, None, True),
(ToolType._ACTION, ["AzureContentSafetyConnection"], True),
(ToolType._ACTION, None, False),
],
)
def test_require_connection(self, tooltype, connection_type, expected):
tool = Tool(name="Test Tool", type=tooltype, inputs={}, connection_type=connection_type)
assert tool._require_connection() == expected
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_info.py | from datetime import datetime
import pytest
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
@pytest.mark.unittest
class TestStatus:
@pytest.mark.parametrize(
"status,expected",
[
(Status.Completed, True),
(Status.Failed, True),
(Status.Bypassed, True),
(Status.Canceled, True),
(Status.Running, False),
(Status.Preparing, False),
(Status.NotStarted, False),
(Status.CancelRequested, False),
(123, False),
],
)
def test_status_is_terminated(self, status, expected):
assert Status.is_terminated(status) == expected
@pytest.mark.unittest
class TestRunInfo:
def test_creation(self):
run_info = RunInfo(
node="node1",
flow_run_id="123",
run_id="123:456",
status=Status.Running,
inputs=[],
output={},
metrics={},
error={},
parent_run_id="789",
start_time=datetime.now(),
end_time=datetime.now(),
system_metrics={},
)
assert run_info.node == "node1"
assert run_info.flow_run_id == "123"
assert run_info.run_id == "123:456"
assert run_info.status == Status.Running
def test_deserialize(self):
run_info_dict = {
"node": "get_answer",
"flow_run_id": "",
"run_id": "dummy_run_id",
"status": "Completed",
"inputs": {"question": "string"},
"output": "Hello world: What's promptflow?",
"metrics": None,
"error": None,
"parent_run_id": "dummy_flow_run_id",
"start_time": "2023-11-24T06:03:20.2688262Z",
"end_time": "2023-11-24T06:03:20.268858Z",
"index": 0,
"api_calls": None,
"variant_id": "",
"cached_run_id": None,
"cached_flow_run_id": None,
"logs": None,
"system_metrics": {"duration": "00:00:00.0000318", "total_tokens": 0},
"result": "Hello world: What's promptflow?",
}
run_info = RunInfo.deserialize(run_info_dict)
assert run_info.index == 0
assert isinstance(run_info.start_time, datetime) and isinstance(run_info.end_time, datetime)
assert run_info.status == Status.Completed
assert run_info.run_id == "dummy_run_id"
assert run_info.api_calls is None
assert run_info.system_metrics == {"duration": "00:00:00.0000318", "total_tokens": 0}
assert run_info.output == "Hello world: What's promptflow?"
@pytest.mark.unittest
class TestFlowRunInfo:
def test_creation(self):
flow_run_info = FlowRunInfo(
run_id="123:456",
status=Status.Running,
error={},
inputs={},
output={},
metrics={},
request={},
parent_run_id="789",
root_run_id="123",
source_run_id="456",
flow_id="flow1",
start_time=datetime.now(),
end_time=datetime.now(),
system_metrics={},
upload_metrics=False,
)
assert flow_run_info.run_id == "123:456"
assert flow_run_info.status == Status.Running
assert flow_run_info.flow_id == "flow1"
def test_deserialize(self):
flow_run_info_dict = {
"run_id": "dummy_run_id",
"status": "Completed",
"error": None,
"inputs": {"question": "What's promptflow?"},
"output": {"answer": "Hello world: What's promptflow?"},
"metrics": None,
"request": None,
"parent_run_id": None,
"root_run_id": None,
"source_run_id": None,
"flow_id": "Flow",
"start_time": "2023-11-23T10:58:37.9436245Z",
"end_time": "2023-11-23T10:58:37.9590789Z",
"index": 0,
"api_calls": None,
"variant_id": "",
"name": "",
"description": "",
"tags": None,
"system_metrics": {"duration": "00:00:00.0154544", "total_tokens": 0},
"result": {"answer": "Hello world: What's promptflow?"},
"upload_metrics": False,
}
flow_run_info = FlowRunInfo.deserialize(flow_run_info_dict)
assert flow_run_info.index == 0
assert isinstance(flow_run_info.start_time, datetime) and isinstance(flow_run_info.end_time, datetime)
assert flow_run_info.status == Status.Completed
assert flow_run_info.run_id == "dummy_run_id"
assert flow_run_info.api_calls is None
assert flow_run_info.system_metrics == {"duration": "00:00:00.0154544", "total_tokens": 0}
assert flow_run_info.output == {"answer": "Hello world: What's promptflow?"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_management.py | import json
import pytest
from promptflow._sdk._constants import VIS_JS_BUNDLE_FILENAME
from promptflow.contracts._run_management import VisualizationRender
@pytest.mark.unittest
def test_visualization_render():
data = {"key": "value"}
viz = VisualizationRender(data)
assert viz.data == json.dumps(json.dumps(data))
assert viz.js_path == VIS_JS_BUNDLE_FILENAME
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_assistant_tool_invoker.py | import pytest
from pathlib import Path
from typing import Callable
from promptflow import tool
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
from promptflow.executor._errors import UnsupportedAssistantToolType
@pytest.mark.unittest
class TestAssistantToolInvoker:
@pytest.fixture
def tool_definitions(self):
return [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
}
]
@pytest.mark.parametrize(
"predefined_inputs", [({}), ({"input_int": 1})]
)
def test_load_tools(self, predefined_inputs):
input_int = 1
input_str = "test"
tool_definitions = [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
"predefined_inputs": predefined_inputs
}
]
# Test load tools
invoker = AssistantToolInvoker.init(tool_definitions, working_dir=Path(__file__).parent)
for tool_name, assistant_tool in invoker._assistant_tools.items():
assert tool_name in ("code_interpreter", "retrieval", "sample_tool")
assert assistant_tool.name == tool_name
assert isinstance(assistant_tool.openai_definition, dict)
if tool_name in ("code_interpreter", "retrieval"):
assert assistant_tool.func is None
else:
assert isinstance(assistant_tool.func, Callable)
# Test to_openai_tools
descriptions = invoker.to_openai_tools()
assert len(descriptions) == 3
properties = {
"input_int": {"description": "This is a sample input int.", "type": "number"},
"input_str": {"description": "This is a sample input str.", "type": "string"}
}
required = ["input_int", "input_str"]
self._remove_predefined_inputs(properties, predefined_inputs.keys())
self._remove_predefined_inputs(required, predefined_inputs.keys())
for description in descriptions:
if description["type"] in ("code_interpreter", "retrieval"):
assert description == {"type": description["type"]}
else:
assert description == {
"type": "function",
"function": {
"name": "sample_tool",
"description": "This is a sample tool.",
"parameters": {
"type": "object",
"properties": properties,
"required": required
}
}
}
# Test invoke tool
kwargs = {"input_int": input_int, "input_str": input_str}
self._remove_predefined_inputs(kwargs, predefined_inputs.keys())
result = invoker.invoke_tool(func_name="sample_tool", kwargs=kwargs)
assert result == (input_int, input_str)
def test_load_tools_with_invalid_case(self):
tool_definitions = [{"type": "invalid_type"}]
with pytest.raises(UnsupportedAssistantToolType) as exc_info:
AssistantToolInvoker.init(tool_definitions)
assert "Unsupported assistant tool type" in exc_info.value.message
def _remove_predefined_inputs(self, value: any, predefined_inputs: list):
for input in predefined_inputs:
if input in value:
if isinstance(value, dict):
value.pop(input)
elif isinstance(value, list):
value.remove(input)
@tool
def sample_tool(input_int: int, input_str: str):
"""This is a sample tool.
:param input_int: This is a sample input int.
:type input_int: int
:param input_str: This is a sample input str.
:type input_str: str
"""
return input_int, input_str
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_validator.py | import pytest
from promptflow.contracts.flow import Flow, FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor._errors import InputParseError, InputTypeError, InvalidAggregationInput, InvalidFlowRequest
from promptflow.executor.flow_validator import FlowValidator
from ...utils import WRONG_FLOW_ROOT, get_flow_from_folder
@pytest.mark.unittest
class TestFlowValidator:
@pytest.mark.parametrize(
"flow_folder, expected_node_order",
[
("unordered_nodes", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_skip", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_activate", ["first_node", "second_node", "third_node"]),
],
)
def test_ensure_nodes_order(self, flow_folder, expected_node_order):
flow = get_flow_from_folder(flow_folder)
flow = FlowValidator._ensure_nodes_order(flow)
node_order = [node.name for node in flow.nodes]
assert node_order == expected_node_order
@pytest.mark.parametrize(
"flow_folder, error_message",
[
(
"nodes_cycle",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for the nodes "
"['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"nodes_cycle_with_activate",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships "
"for the nodes ['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"wrong_node_reference",
(
"Invalid node definitions found in the flow graph. Node 'second_node' references a non-existent "
"node 'third_node' in your flow. Please review your flow to ensure that the node "
"name is accurately specified."
),
),
(
"non_aggregation_reference_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregate node 'test_node' "
"cannot reference aggregate nodes {'calculate_accuracy'}. Please review and rectify "
"the node reference."
),
),
(
"aggregation_activate_reference_non_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregation node 'grade' cannot be "
"referenced in the activate config of the aggregation node 'calculate_accuracy'. Please "
"review and rectify the node reference."
),
),
],
)
def test_ensure_nodes_order_with_exception(self, flow_folder, error_message):
flow = get_flow_from_folder(flow_folder, root=WRONG_FLOW_ROOT)
with pytest.raises(InvalidFlowRequest) as e:
FlowValidator._ensure_nodes_order(flow)
assert str(e.value) == error_message, "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"aggregated_flow_inputs, aggregation_inputs, error_message",
[
(
{},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The value for aggregated reference input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{
"input1": "value1",
},
{},
"The input for aggregation is incorrect. "
"The value for aggregated flow input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{"input1": ["value1_1", "value1_2"]},
{"input_2": ["value2_1"]},
"The input for aggregation is incorrect. The length of all aggregated inputs should be the same. "
"Current input lengths are: {'input1': 2, 'input_2': 1}. "
"Please adjust the input value in your input data.",
),
(
{
"input1": "value1",
},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The input 'input1' appears in both aggregated flow input and aggregated reference input. "
"Please remove one of them and try the operation again.",
),
],
)
def test_validate_aggregation_inputs_error(self, aggregated_flow_inputs, aggregation_inputs, error_message):
with pytest.raises(InvalidAggregationInput) as e:
FlowValidator._validate_aggregation_inputs(aggregated_flow_inputs, aggregation_inputs)
assert str(e.value) == error_message
@pytest.mark.parametrize(
"flow_folder",
["simple_flow_with_python_tool_and_aggregate"],
)
def test_ensure_outputs_valid_with_aggregation(self, flow_folder):
flow = get_flow_from_folder(flow_folder)
assert flow.outputs["content"] is not None
assert flow.outputs["aggregate_content"] is not None
flow.outputs = FlowValidator._ensure_outputs_valid(flow)
print(flow.outputs)
assert flow.outputs["content"] is not None
assert flow.outputs.get("aggregate_content") is None
@pytest.mark.parametrize(
"flow_folder, inputs, index, error_type, error_message",
[
(
"flow_with_list_input",
{"key": "['hello']"},
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
(
"flow_with_list_input",
{"key": "['hello']"},
0,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' in line 0 of input data was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_resolve_flow_inputs_type_json_error_for_list_type(
self, flow_folder, inputs, index, error_type, error_message
):
flow = get_flow_from_folder(flow_folder)
with pytest.raises(error_type) as exe_info:
FlowValidator.resolve_flow_inputs_type(flow, inputs, idx=index)
assert error_message == exe_info.value.message
@pytest.mark.parametrize(
"inputs, expected_result",
[({"test_input": ["1", "2"]}, {"test_input": [1, 2]})],
)
def test_resolve_aggregated_flow_inputs_type(self, inputs, expected_result):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
result = FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert result == expected_result
@pytest.mark.parametrize(
"inputs, expected_message",
[
(
{"test_input": ["1", "str"]},
(
"The input for flow is incorrect. The value for flow input 'test_input' in line 1 of input data "
"does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
)
],
)
def test_resolve_aggregated_flow_inputs_type_error(self, inputs, expected_message):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
with pytest.raises(InputTypeError) as ex:
FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert expected_message == str(ex.value)
@pytest.mark.parametrize(
"input, type, expected_result",
[
("1", ValueType.INT, 1),
('["1", "2"]', ValueType.LIST, ["1", "2"]),
],
)
def test_parse_input_value(self, input, type, expected_result):
input_key = "test_input"
result = FlowValidator._parse_input_value(input_key, input, type)
assert result == expected_result
@pytest.mark.parametrize(
"input, type, index, error_type, expected_message",
[
(
"str",
ValueType.INT,
None,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' does not match the expected "
"type 'int'. Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' was interpreted as JSON "
"string since its type is 'list'. However, the value '['1', '2']' is invalid for JSON parsing. "
"Error details: (JSONDecodeError) Expecting value: line 1 column 2 (char 1). "
"Please make sure your inputs are properly formatted."
),
),
(
"str",
ValueType.INT,
10,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' in line 10 of "
"input data does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
10,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' in line 10 of input data "
"was interpreted as JSON string since its type is 'list'. However, the value '['1', '2']' is "
"invalid for JSON parsing. Error details: (JSONDecodeError) Expecting value: "
"line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_parse_input_value_error(self, input, type, index, error_type, expected_message):
input_key = "my_input"
with pytest.raises(error_type) as ex:
FlowValidator._parse_input_value(input_key, input, type, index)
assert expected_message == str(ex.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py | import re
import sys
from pathlib import Path
from typing import List
from unittest.mock import mock_open
import pytest
from jinja2 import TemplateSyntaxError
from promptflow._core._errors import InvalidSource
from promptflow._core.tools_manager import ToolLoader
from promptflow._internal import tool
from promptflow._sdk.entities import CustomConnection, CustomStrongTypeConnection
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import AssistantDefinition, InputDefinition, Secret, Tool, ToolType, ValueType
from promptflow.contracts.types import PromptTemplate
from promptflow.exceptions import UserErrorException
from promptflow.executor._errors import (
ConnectionNotFound,
InvalidConnectionType,
NodeInputValidationError,
ResolveToolError,
ValueTypeUnresolved,
)
from promptflow.executor._tool_resolver import ResolvedTool, ToolResolver
from ...utils import DATA_ROOT, FLOW_ROOT
TEST_ROOT = Path(__file__).parent.parent.parent
REQUESTS_PATH = TEST_ROOT / "test_configs/executor_api_requests"
WRONG_REQUESTS_PATH = TEST_ROOT / "test_configs/executor_wrong_requests"
class MyFirstCSTConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
@tool(streaming_option_parameter="stream_enabled")
def mock_package_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
@pytest.mark.unittest
class TestToolResolver:
@pytest.fixture
def resolver(self):
return ToolResolver(working_dir=None, connections={})
def test_resolve_tool_by_node_with_diff_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
mocker.patch.object(
resolver,
"_resolve_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_script_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_prompt_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_llm_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_integrate_prompt_in_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Package)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
resolver.resolve_tool_by_node(node)
resolver._resolve_script_node.assert_called_once()
node.type = ToolType.PROMPT
resolver.resolve_tool_by_node(node)
resolver._resolve_prompt_node.assert_called_once()
node.type = ToolType.LLM
resolver.resolve_tool_by_node(node)
resolver._resolve_llm_node.assert_called_once()
resolver._resolve_package_node.reset_mock()
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=ToolSourceType.PackageWithPrompt)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
resolver._integrate_prompt_in_package_node.assert_called_once()
def test_resolve_tool_by_node_with_invalid_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool type" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_source_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
def test_resolve_tool_by_node_with_no_source(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = None
with pytest.raises(ResolveToolError) as ex:
resolver.resolve_tool_by_node(node)
assert isinstance(ex.value.inner_exception, UserErrorException)
def test_resolve_tool_by_node_with_no_source_path(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
node.source = mocker.Mock(type=ToolSourceType.Package, path=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, InvalidSource)
assert "Node source path" in exec_info.value.message
def test_resolve_tool_by_node_with_duplicated_inputs(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{template}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NodeInputValidationError)
assert "These inputs are duplicated" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_template(self, resolver, mocker):
node = mocker.Mock(tool=None, inputs={})
node.name = "node"
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{current context}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, TemplateSyntaxError)
expected_message = (
"Tool load failed in 'node': Jinja parsing failed at line 1: "
"(TemplateSyntaxError) expected token 'end of print statement', got 'context'"
)
assert expected_message in exec_info.value.message
def test_convert_node_literal_input_types_with_invalid_case(self):
# Case 1: conn_name not in connections, should raise conn_name not found error
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 2: conn_name in connections, but type not matched
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._convert_node_literal_input_types(node, tool)
message = "'AzureOpenAIConnection' is not supported, valid types ['CustomConnection']"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 3: Literal value, type mismatch
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=[ValueType.INT])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "value 'invalid' is not type int"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 4: Unresolved value, like newly added type not in old version ValueType enum
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=["A_good_type"])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ValueTypeUnresolved):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 5: Literal value, invalid image in list
tool = Tool(name="mock", type="python", inputs={"list_input": InputDefinition(type=[ValueType.LIST])})
invalid_image = {"data:image/jpg;base64": "invalid_image"}
node = Node(
name="mock",
tool=tool,
inputs={"list_input": InputAssignment(value=[invalid_image], value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "Invalid base64 image"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 6: Literal value, invalid assistant definition path
tool = Tool(
name="mock",
type="python",
inputs={"assistant_definition": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])},
)
node = Node(
name="mock",
tool=tool,
inputs={"assistant_definition": InputAssignment(value="invalid_path", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
assert (
"Failed to load assistant definition" in exe_info.value.message
and "is not a valid path" in exe_info.value.message
), "Expected: {}, Actual: {}".format(message, exe_info.value.message)
def test_resolve_llm_connection_to_inputs(self):
# Case 1: node.connection is not specified
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 2: node.connection is not found from connection manager
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name1",
)
connections = {}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 3: Tool definition with bad input type list
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["int"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Connection type can not be resolved for tool" in exe_info.value.message
# Case 4: Tool type not match the connection manager return
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["OpenAIConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Invalid connection" in exe_info.value.message
# Case 5: Normal case
tool = Tool(
name="mock",
type="python",
inputs={"conn": InputDefinition(type=["OpenAIConnection", "AzureOpenAIConnection"])},
)
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
key, conn = tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert key == "conn"
assert isinstance(conn, AzureOpenAIConnection)
def test_resolve_llm_node(self, mocker):
def mock_llm_api_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.LLM, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_llm_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_llm_api_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
mocker.patch.object(tool_resolver, "_load_source_content", return_value="{{text}}![image]({{image}})")
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
"image": InputAssignment(value=str(DATA_ROOT / "logo.jpg"), value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_llm_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
pattern = re.compile(r"^Hello World!!\[image\]\(Image\([a-z0-9]{8}\)\)$")
prompt = resolved_tool.callable(**kwargs)
assert re.match(pattern, prompt)
def test_resolve_script_node(self, mocker):
def mock_python_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_resolve_script_node_with_assistant_definition(self, mocker):
def mock_python_func(input: AssistantDefinition):
if input.model == "model" and input.instructions == "instructions" and input.tools == []:
return True
return False
tool_loader = ToolLoader(working_dir=None)
tool = Tool(
name="mock", type=ToolType.PYTHON, inputs={"input": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])}
)
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {}),
)
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._tool_loader = tool_loader
mocker.patch("builtins.open", mock_open())
mocker.patch(
"ruamel.yaml.YAML.load", return_value={"model": "model", "instructions": "instructions", "tools": []}
)
node = Node(
name="mock",
tool=None,
inputs={"input": InputAssignment(value="test_tool_resolver.py", value_type=InputValueType.LITERAL)},
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 1
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs)
def test_resolve_package_node(self, mocker):
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_package_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_package_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_package_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_integrate_prompt_in_package_node(self, mocker):
tool_resolver = ToolResolver(working_dir=None, connections={})
mocker.patch.object(
tool_resolver,
"_load_source_content",
return_value="{{text}}",
)
tool = Tool(name="mock", type=ToolType.CUSTOM_LLM, inputs={"prompt": InputDefinition(type=["PromptTemplate"])})
node = Node(
name="mock",
tool=None,
inputs={"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL)},
connection="conn_name",
provider="mock",
)
resolved_tool = ResolvedTool(node=node, callable=mock_package_func, definition=tool, init_args=None)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
resolved_tool = tool_resolver._integrate_prompt_in_package_node(resolved_tool)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
@pytest.mark.parametrize(
"conn_types, expected_type",
[
(["MyFirstCSTConnection"], MyFirstCSTConnection),
(["CustomConnection", "MyFirstCSTConnection"], CustomConnection),
(["CustomConnection", "MyFirstCSTConnection", "MySecondCSTConnection"], CustomConnection),
(["MyFirstCSTConnection", "MySecondCSTConnection"], MyFirstCSTConnection),
],
)
def test_convert_to_custom_strong_type_connection_value(self, conn_types: List[str], expected_type, mocker):
connections = {"conn_name": {"type": "CustomConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
tool = Tool(name="tool", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
m = sys.modules[__name__]
v = InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)
actual = tool_resolver._convert_to_custom_strong_type_connection_value(
"conn_name", v, node, tool, conn_types, m
)
assert isinstance(actual, expected_type)
assert actual.api_base == "mock"
def test_load_source(self):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=ToolSource())
node.source.path = "./script_with_special_character/script_with_special_character.py"
resolver = ToolResolver(FLOW_ROOT)
result = resolver._load_source_content(node)
assert "https://www.bing.com/\ue000\ue001/" in result
@pytest.mark.parametrize(
"source",
[
None,
ToolSource(path=None), # Then will try to read one directory.
ToolSource(path=""), # Then will try to read one directory.
ToolSource(path="NotExistPath.py"),
],
)
def test_load_source_error(self, source):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=source)
resolver = ToolResolver(FLOW_ROOT)
with pytest.raises(InvalidSource) as _:
resolver._load_source_content(node)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_nodes_scheduler.py | from concurrent.futures import Future
from typing import Callable
from unittest.mock import MagicMock
import pytest
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow.contracts.flow import Node
from promptflow.executor._dag_manager import DAGManager
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
NoNodeExecutedError,
)
@pytest.mark.unittest
class TestFlowNodesScheduler:
def setup_method(self):
# Define mock objects and methods
self.tools_manager = MagicMock()
self.context = MagicMock(spec=FlowExecutionContext)
self.context.invoke_tool.side_effect = lambda _, func, kwargs: func(**kwargs)
self.scheduler = FlowNodesScheduler(self.tools_manager, {}, [], DEFAULT_CONCURRENCY_BULK, self.context)
def test_maximun_concurrency(self):
scheduler = FlowNodesScheduler(self.tools_manager, {}, [], 1000, self.context)
assert scheduler._node_concurrency == DEFAULT_CONCURRENCY_FLOW
def test_collect_outputs(self):
future1 = Future()
future1.set_result("output1")
future2 = Future()
future2.set_result("output2")
node1 = MagicMock(spec=Node)
node1.name = "node1"
node2 = MagicMock(spec=Node)
node2.name = "node2"
self.scheduler._future_to_node = {future1: node1, future2: node2}
completed_nodes_outputs = self.scheduler._collect_outputs([future1, future2])
assert completed_nodes_outputs == {"node1": future1.result(), "node2": future2.result()}
def test_bypass_nodes(self):
executor = MagicMock()
dag_manager = MagicMock(spec=DAGManager)
node1 = MagicMock(spec=Node)
node1.name = "node1"
# The return value will be a list with one item for the first time.
# Will be a list without item for the second time.
dag_manager.pop_bypassable_nodes.side_effect = ([node1], [])
self.scheduler._dag_manager = dag_manager
self.scheduler._execute_nodes(executor)
self.scheduler._context.bypass_node.assert_called_once_with(node1)
def test_submit_nodes(self):
executor = MagicMock()
dag_manager = MagicMock(spec=DAGManager)
node1 = MagicMock(spec=Node)
node1.name = "node1"
dag_manager.pop_bypassable_nodes.return_value = []
# The return value will be a list with one item for the first time.
# Will be a list without item for the second time.
dag_manager.pop_ready_nodes.return_value = [node1]
self.scheduler._dag_manager = dag_manager
self.scheduler._execute_nodes(executor)
self.scheduler._context.bypass_node.assert_not_called()
assert node1 in self.scheduler._future_to_node.values()
def test_future_cancelled_for_exception(self):
dag_manager = MagicMock(spec=DAGManager)
self.scheduler._dag_manager = dag_manager
dag_manager.completed.return_value = False
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
failed_future = Future()
failed_future.set_exception(Exception("test"))
from concurrent.futures._base import CANCELLED, FINISHED
failed_future._state = FINISHED
cancelled_future = Future()
node1 = MagicMock(spec=Node)
node1.name = "node1"
node2 = MagicMock(spec=Node)
node2.name = "node2"
self.scheduler._future_to_node = {failed_future: node1, cancelled_future: node2}
try:
self.scheduler.execute()
except Exception:
pass
# Assert another future is cancelled.
assert CANCELLED in cancelled_future._state
def test_success_result(self):
dag_manager = MagicMock(spec=DAGManager)
finished_future = Future()
finished_future.set_result("output1")
finished_node = MagicMock(spec=Node)
finished_node.name = "node1"
self.scheduler._dag_manager = dag_manager
self.scheduler._future_to_node = {finished_future: finished_node}
# No more nodes need to run.
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
dag_manager.completed.side_effect = (False, True)
bypassed_node_result = {"bypassed_node": "output2"}
dag_manager.bypassed_nodes = bypassed_node_result
completed_node_result = {"completed_node": "output1"}
dag_manager.completed_nodes_outputs = completed_node_result
result = self.scheduler.execute()
dag_manager.complete_nodes.assert_called_once_with({"node1": "output1"})
assert result == (completed_node_result, bypassed_node_result)
def test_no_nodes_to_run(self):
dag_manager = MagicMock(spec=DAGManager)
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
dag_manager.completed.return_value = False
self.scheduler._dag_manager = dag_manager
with pytest.raises(NoNodeExecutedError) as _:
self.scheduler.execute()
def test_execute_single_node(self):
node_to_run = MagicMock(spec=Node)
node_to_run.name = "node1"
mock_callable = MagicMock(spec=Callable)
mock_callable.return_value = "output1"
self.scheduler._tools_manager.get_tool.return_value = mock_callable
dag_manager = MagicMock(spec=DAGManager)
dag_manager.get_node_valid_inputs.return_value = {"input": 1}
result = self.scheduler._exec_single_node_in_thread((node_to_run, dag_manager))
mock_callable.assert_called_once_with(**{"input": 1})
assert result == "output1"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_errors.py | import pytest
from promptflow._core.tool_meta_generator import PythonLoadError
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ResolveToolError
def code_with_bug():
1 / 0
def raise_resolve_tool_error(func, target=None, module=None):
try:
func()
except Exception as e:
if target:
raise ResolveToolError(node_name="MyTool", target=target, module=module) from e
raise ResolveToolError(node_name="MyTool") from e
def raise_python_load_error():
try:
code_with_bug()
except Exception as e:
raise PythonLoadError(message="Test PythonLoadError.") from e
def test_resolve_tool_error():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(raise_python_load_error, ErrorTarget.TOOL, "__pf_main__")
exception = e.value
inner_exception = exception.inner_exception
assert isinstance(inner_exception, PythonLoadError)
assert exception.message == "Tool load failed in 'MyTool': (PythonLoadError) Test PythonLoadError."
assert exception.additional_info == inner_exception.additional_info
assert exception.error_codes == ["UserError", "ToolValidationError", "PythonParsingError", "PythonLoadError"]
assert exception.reference_code == "Tool/__pf_main__"
def test_resolve_tool_error_with_none_inner():
with pytest.raises(ResolveToolError) as e:
raise ResolveToolError(node_name="MyTool")
exception = e.value
assert exception.inner_exception is None
assert exception.message == "Tool load failed in 'MyTool'."
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ResolveToolError"]
assert exception.reference_code == "Executor"
def test_resolve_tool_error_with_no_PromptflowException_inner():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(code_with_bug)
exception = e.value
assert isinstance(exception.inner_exception, ZeroDivisionError)
assert exception.message == "Tool load failed in 'MyTool': (ZeroDivisionError) division by zero"
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ZeroDivisionError"]
assert exception.reference_code == "Executor"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_dag_manager.py | import pytest
from promptflow.contracts.flow import ActivateCondition, InputAssignment, Node
from promptflow.executor._dag_manager import DAGManager
def create_test_node(name, input, activate=None):
input = InputAssignment.deserialize(input)
activate = ActivateCondition.deserialize(activate, name) if activate else None
return Node(
name=name,
tool="test_tool",
connection="azure_open_ai_connection",
inputs={"test_input": input, "test_input2": InputAssignment("hello world")},
provider="test_provider",
api="test_api",
activate=activate,
)
def pop_ready_node_names(dag_manager: DAGManager):
return {node.name for node in dag_manager.pop_ready_nodes()}
def pop_bypassed_node_names(dag_manager: DAGManager):
return {node.name for node in dag_manager.pop_bypassable_nodes()}
@pytest.mark.unittest
class TestDAGManager:
def test_pop_ready_nodes(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${node1.output}"),
create_test_node("node3", input="${node1.output}"),
]
dag_manager = DAGManager(nodes, flow_inputs={})
assert pop_ready_node_names(dag_manager) == {"node1"}
dag_manager.complete_nodes({"node1": None})
assert pop_ready_node_names(dag_manager) == {"node2", "node3"}
dag_manager.complete_nodes({"node2": None, "node3": None})
def test_pop_bypassed_nodes(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${inputs.text}", activate={"when": "${inputs.text}", "is": "world"}),
create_test_node("node3", input="${node1.output}"),
create_test_node("node4", input="${node2.output}"),
]
flow_inputs = {"text": "hello"}
dag_manager = DAGManager(nodes, flow_inputs)
expected_bypassed_nodes = {"node2", "node4"}
assert pop_bypassed_node_names(dag_manager) == expected_bypassed_nodes
assert dag_manager.bypassed_nodes.keys() == expected_bypassed_nodes
def test_complete_nodes(self):
nodes = [create_test_node("node1", input="value1")]
dag_manager = DAGManager(nodes, flow_inputs={})
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
assert len(dag_manager.completed_nodes_outputs) == 1
assert dag_manager.completed_nodes_outputs["node1"] == {"output1": "value1"}
def test_completed(self):
nodes = [
create_test_node("node1", input="${inputs.text}", activate={"when": "${inputs.text}", "is": "hello"}),
create_test_node("node2", input="${node1.output}"),
]
flow_inputs = {"text": "hello"}
dag_manager = DAGManager(nodes, flow_inputs)
assert pop_ready_node_names(dag_manager) == {"node1"}
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
assert pop_ready_node_names(dag_manager) == {"node2"}
dag_manager.complete_nodes({"node2": {"output1": "value1"}})
assert dag_manager.completed_nodes_outputs.keys() == {"node1", "node2"}
assert dag_manager.completed()
def test_get_node_valid_inputs(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${node1.output}"),
]
def f(input):
return input
flow_inputs = {}
dag_manager = DAGManager(nodes, flow_inputs)
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
valid_inputs = dag_manager.get_node_valid_inputs(nodes[1], f)
assert valid_inputs == {"test_input": {"output1": "value1"}, "test_input2": "hello world"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_executor.py | from unittest.mock import Mock
import pytest
from promptflow import tool
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor.flow_executor import (
FlowExecutor,
_ensure_node_result_is_serializable,
_inject_stream_options,
enable_streaming_for_llm_tool,
)
from promptflow.tools.aoai import chat, completion
from promptflow.tools.embedding import embedding
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, aggregated_flow_inputs, aggregation_inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{},
{"input_from_default": ["default_value"]},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"another_key": ["input_value", "input_value"]},
{},
{
"input_from_default": ["default_value", "default_value"],
"another_key": ["input_value", "input_value"],
},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{"another_key": ["input_value", "input_value"]},
{},
{
"input_from_default": [False, False],
"another_key": ["input_value", "input_value"],
},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"another_key_in_aggregation_inputs": ["input_value", "input_value"]},
{
"input_from_default": ["default_value", "default_value"],
},
),
],
)
def test_apply_default_value_for_aggregation_input(
self, flow_inputs, aggregated_flow_inputs, aggregation_inputs, expected_inputs
):
result = FlowExecutor._apply_default_value_for_aggregation_input(
flow_inputs, aggregated_flow_inputs, aggregation_inputs
)
assert result == expected_inputs
def func_with_stream_parameter(a: int, b: int, stream=False):
return a + b, stream
def func_without_stream_parameter(a: int, b: int):
return a + b
class TestEnableStreamForLLMTool:
@pytest.mark.parametrize(
"tool, should_be_wrapped",
[
(completion, True),
(chat, True),
(embedding, False),
],
)
def test_enable_stream_for_llm_tool(self, tool, should_be_wrapped):
func = enable_streaming_for_llm_tool(tool)
is_wrapped = func != tool
assert is_wrapped == should_be_wrapped
def test_func_with_stream_parameter_should_be_wrapped(self):
func = enable_streaming_for_llm_tool(func_with_stream_parameter)
assert func != func_with_stream_parameter
result = func(a=1, b=2)
assert result == (3, True)
result = func_with_stream_parameter(a=1, b=2)
assert result == (3, False)
def test_func_without_stream_parameter_should_not_be_wrapped(self):
func = enable_streaming_for_llm_tool(func_without_stream_parameter)
assert func == func_without_stream_parameter
result = func(a=1, b=2)
assert result == 3
def test_inject_stream_options_no_stream_param(self):
# Test that the function does not wrap the decorated function if it has no stream parameter
func = _inject_stream_options(lambda: True)(func_without_stream_parameter)
assert func == func_without_stream_parameter
result = func(a=1, b=2)
assert result == 3
def test_inject_stream_options_with_stream_param(self):
# Test that the function wraps the decorated function and injects the stream option
func = _inject_stream_options(lambda: True)(func_with_stream_parameter)
assert func != func_with_stream_parameter
result = func(a=1, b=2)
assert result == (3, True)
result = func_with_stream_parameter(a=1, b=2)
assert result == (3, False)
def test_inject_stream_options_with_mocked_should_stream(self):
# Test that the function uses the should_stream callable to determine the stream option
should_stream = Mock(return_value=True)
func = _inject_stream_options(should_stream)(func_with_stream_parameter)
result = func(a=1, b=2)
assert result == (3, True)
should_stream.return_value = False
result = func(a=1, b=2)
assert result == (3, False)
@tool
def streaming_tool():
for i in range(10):
yield i
@tool
def non_streaming_tool():
return 1
class TestEnsureNodeResultIsSerializable:
def test_streaming_tool_should_be_consumed_and_merged(self):
func = _ensure_node_result_is_serializable(streaming_tool)
assert func() == "0123456789"
def test_non_streaming_tool_should_not_be_affected(self):
func = _ensure_node_result_is_serializable(non_streaming_tool)
assert func() == 1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_input_assignment_parser.py | from typing import Any
import pytest
from promptflow._core._errors import NotSupported
from promptflow.contracts.flow import InputAssignment
from promptflow.executor._errors import (
InputNotFound,
InputNotFoundFromAncestorNodeOutput,
InvalidReferenceProperty,
UnsupportedReference,
)
from promptflow.executor._input_assignment_parser import parse_node_property, parse_value
FLOW_INPUTS = {"text": "hello promptflow"}
NODE_OUTPUTS = {"node1": "hello promptflow"}
class WrongInputAssignment:
value: Any
value_type: str = "wrong_type"
section: str = ""
property: str = ""
class DummyObject:
value: str = "dummy"
@pytest.mark.unittest
class TestInputAssignmentParser:
@pytest.mark.parametrize(
"input, expected_value",
[
("hello promptflow", "hello promptflow"),
("${inputs.text}", "hello promptflow"),
("${node1.output}", "hello promptflow"),
],
)
def test_parse_value(self, input, expected_value):
input_assignment = InputAssignment.deserialize(input)
actual_value = parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert actual_value == expected_value
@pytest.mark.parametrize(
"input, expected_error_class, expected_error_message",
[
(
"${inputs.word}",
InputNotFound,
(
"The input 'word' is not found from flow inputs 'text'. "
"Please check the input name and try again."
),
),
(
"${node2.output}",
InputNotFoundFromAncestorNodeOutput,
(
"The input 'node2' is not found from ancestor node outputs ['node1']. "
"Please check the node name and try again."
),
),
(
"${node1.word}",
UnsupportedReference,
(
"The section 'word' of reference is currently unsupported. "
"Please specify the output part of the node 'node1'."
),
),
(
WrongInputAssignment(),
NotSupported,
(
"The type 'wrong_type' is currently unsupported. "
"Please choose from available types: ['Literal', 'FlowInput', 'NodeReference'] and try again."
),
),
],
)
def test_parse_value_with_exception(self, input, expected_error_class, expected_error_message):
input_assignment = InputAssignment.deserialize(input) if isinstance(input, str) else input
with pytest.raises(expected_error_class) as e:
parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
@pytest.mark.parametrize(
"node_val, property, expected_value",
[
(
{"output": "hello promptflow"},
"output",
"hello promptflow",
),
(
{"output": "hello promptflow"},
"['output']",
"hello promptflow",
),
(
{"output": "hello promptflow"},
'["output"]',
"hello promptflow",
),
(
{"output": {"text": "hello promptflow"}},
'["output"]["text"]',
"hello promptflow",
),
(
["output1", "output2"],
"[1]",
"output2",
),
(
DummyObject(),
"value",
"dummy",
),
],
)
def test_parse_node_property(self, node_val, property, expected_value):
actual_value = parse_node_property("node1", node_val, property)
assert actual_value == expected_value
@pytest.mark.parametrize(
"node_val, property, expected_error_message",
[
(
{"output_str": ["output1", "output2"]},
"output_str[2]",
(
"Invalid property 'output_str[2]' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
{"word": "hello promptflow"},
"text",
(
"Invalid property 'text' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
DummyObject(),
"value_type",
(
"Invalid property 'value_type' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
],
)
def test_parse_node_property_with_exception(self, node_val, property, expected_error_message):
with pytest.raises(InvalidReferenceProperty) as e:
parse_node_property("node1", node_val, property)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_exceptions.py | import pytest
from promptflow.exceptions import PromptflowException
@pytest.mark.unittest
class TestExceptions:
def test_exception_message(self):
ex = PromptflowException(
message_format="Test exception message with parameters: {param}, {param1}.",
param="test_param",
)
assert ex.message == "Test exception message with parameters: test_param, <param1>."
assert None not in ex.message_parameters
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error.py | from promptflow import ToolProvider, tool
class TestLoadErrorTool(ToolProvider):
def __init__(self):
raise Exception("Tool load error.")
@tool
def tool(self, name: str):
return name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool.py | from jinja2 import Template
from promptflow import ToolProvider, tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class TestCustomLLMTool(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
@tool
def call(self, connection_2: AzureOpenAIConnection, api: str, template: PromptTemplate, **kwargs):
prompt = Template(template, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
assert isinstance(self.connection, AzureOpenAIConnection)
assert isinstance(connection_2, AzureOpenAIConnection)
assert api in ["completion", "chat"]
return prompt
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection.py | from dataclasses import dataclass
from promptflow import tool
from promptflow._core.tools_manager import register_connections
from promptflow.contracts.types import Secret
@dataclass
class TestConnection:
name: str
secret: Secret
register_connections(TestConnection)
@tool
def tool_with_test_conn(conn: TestConnection):
assert isinstance(conn, TestConnection)
return conn.name + conn.secret
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/package_tool_definition.json | {
"tool_with_init_error": {
"class_name": "TestLoadErrorTool",
"function": "tool",
"inputs": {
"name": {"type": ["string"]}
},
"module": "tool_with_init_error",
"name": "Tool with init error",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/flow.dag.yaml | inputs: {}
outputs: {}
nodes:
- name: tool_with_init_error
type: python
source:
type: package
tool: tool_with_init_error
inputs:
name: test_name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2 | {{api}}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/package_tool_definition.json | {
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${custom_llm_tool_with_duplicated_inputs.output}
nodes:
- name: custom_llm_tool_with_duplicated_inputs
type: custom_llm
source:
type: package_with_prompt
tool: custom_llm_tool.TestCustomLLMTool.call
path: ./prompt_with_duplicated_inputs.jinja2
inputs:
connection: azure_open_ai_connection
api: completion
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/package_tool_definition.json | {
"tool_with_connection": {
"function": "tool_with_test_conn",
"inputs": {
"conn": {"type": ["TestConnection"]}
},
"module": "tool_with_connection",
"name": "Test Tool with Connection",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/flow.dag.yaml | inputs: {}
outputs: {}
nodes:
- name: tool_with_conn
type: python
source:
type: package
tool: tool_with_connection
inputs:
conn: test_conn
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/inputs.json | {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/my_prompt.jinja2 | {# Please replace the template with your own prompt. #}
Write a simple program that displays the greeting message: "{{text}}" when executed.
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/samples.json | [
{
"text": "Hello"
},
{
"text": "Hello World!"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/package_tool_definition.json | {
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"connection_2": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${my_custom_llm_tool.output}
nodes:
- name: my_custom_llm_tool
type: custom_llm
source:
type: package_with_prompt
tool: custom_llm_tool.TestCustomLLMTool.call
path: ./my_prompt.jinja2
inputs:
connection: azure_open_ai_connection
connection_2: azure_open_ai_connection
api: completion
text: ${inputs.text}
| 0 |