text
stringlengths 8
1.72M
| id
stringlengths 22
143
| metadata
dict | __index_level_0__
int64 0
104
|
---|---|---|---|
from enum import Enum
from typing import Union
from openai import AzureOpenAI as AzureOpenAIClient, OpenAI as OpenAIClient
from promptflow.tools.common import handle_openai_error, normalize_connection_config
from promptflow.tools.exception import InvalidConnectionType
# Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow'
# since the code here is in promptflow namespace as well
from promptflow._internal import tool
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
class EmbeddingModel(str, Enum):
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
TEXT_SEARCH_ADA_DOC_001 = "text-search-ada-doc-001"
TEXT_SEARCH_ADA_QUERY_001 = "text-search-ada-query-001"
@tool
@handle_openai_error()
def embedding(connection: Union[AzureOpenAIConnection, OpenAIConnection], input: str, deployment_name: str = "",
model: EmbeddingModel = EmbeddingModel.TEXT_EMBEDDING_ADA_002):
if isinstance(connection, AzureOpenAIConnection):
client = AzureOpenAIClient(**normalize_connection_config(connection))
return client.embeddings.create(
input=input,
model=deployment_name,
extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"}
).data[0].embedding
elif isinstance(connection, OpenAIConnection):
client = OpenAIClient(**normalize_connection_config(connection))
return client.embeddings.create(
input=input,
model=model
).data[0].embedding
else:
error_message = f"Not Support connection type '{type(connection).__name__}' for embedding api. " \
f"Connection type should be in [AzureOpenAIConnection, OpenAIConnection]."
raise InvalidConnectionType(message=error_message)
| promptflow/src/promptflow-tools/promptflow/tools/embedding.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/embedding.py",
"repo_id": "promptflow",
"token_count": 684
} | 29 |
import os
import re
from io import open
from typing import Any, List, Match, cast
from setuptools import find_namespace_packages, setup
PACKAGE_NAME = "promptflow-tools"
PACKAGE_FOLDER_PATH = "promptflow"
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
# Version extraction inspired from 'requests'
with open(os.path.join(PACKAGE_FOLDER_PATH, "version.txt"), "r") as fd:
version_content = fd.read()
print(version_content)
version = cast(Match[Any], re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', version_content, re.MULTILINE)).group(1)
if not version:
raise RuntimeError("Cannot find version information")
with open("README.md", encoding="utf-8") as f:
readme = f.read()
with open("CHANGELOG.md", encoding="utf-8") as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description="Prompt flow built-in tools",
long_description_content_type="text/markdown",
long_description=readme + "\n\n" + changelog,
author="Microsoft Corporation",
author_email="[email protected]",
url="https://github.com/microsoft/promptflow",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires="<4.0,>=3.8",
install_requires=parse_requirements('requirements.txt'),
packages=find_namespace_packages(include=[f"{PACKAGE_FOLDER_PATH}.*"]),
entry_points={
"package_tools": ["builtins = promptflow.tools.list:list_package_tools"],
},
include_package_data=True,
project_urls={
"Bug Reports": "https://github.com/microsoft/promptflow/issues",
"Source": "https://github.com/microsoft/promptflow",
},
)
| promptflow/src/promptflow-tools/setup.py/0 | {
"file_path": "promptflow/src/promptflow-tools/setup.py",
"repo_id": "promptflow",
"token_count": 854
} | 30 |
import copy
import os
import pytest
import re
from azure.identity import DefaultAzureCredential
from typing import List, Dict
from promptflow.tools.exception import (
OpenModelLLMUserError,
OpenModelLLMKeyValidationError
)
from promptflow.tools.open_model_llm import (
OpenModelLLM,
API,
ContentFormatterBase,
LlamaContentFormatter,
list_endpoint_names,
list_deployment_names,
CustomConnectionsContainer,
get_model_type,
ModelFamily,
ServerlessEndpointsContainer
)
def validate_response(response):
assert len(response) > 15
def verify_prompt_role_delimiters(message: str, codes: List[str]):
assert codes == "UserError/OpenModelLLMUserError".split("/")
message_pattern = re.compile(
r"The Chat API requires a specific format for prompt definition, and the prompt should include separate "
+ r"lines as role delimiters: ('(assistant|user|system):\\n'[,.]){3} Current parsed role 'the quick brown"
+ r" fox' does not meet the requirement. If you intend to use the "
+ r"Completion API, please select the appropriate API type and deployment name. If you do intend to use the "
+ r"Chat API, please refer to the guideline at https://aka.ms/pfdoc/chat-prompt or view the samples in our "
+ r"gallery that contain 'Chat' in the name.")
is_match = message_pattern.match(message)
assert is_match
@pytest.fixture
def verify_service_endpoints(open_model_llm_ws_service_connection) -> Dict[str, List[str]]:
if not open_model_llm_ws_service_connection:
pytest.skip("Service Credential not available")
print("open_model_llm_ws_service_connection completed")
required_env_vars = ["AZUREML_ARM_SUBSCRIPTION", "AZUREML_ARM_RESOURCEGROUP", "AZUREML_ARM_WORKSPACE_NAME",
"AZURE_CLIENT_ID", "AZURE_TENANT_ID", "AZURE_CLIENT_SECRET"]
for rev in required_env_vars:
if rev not in os.environ:
raise Exception(f"test not setup correctly. Missing Required Environment Variable:{rev}")
@pytest.fixture
def endpoints_provider(verify_service_endpoints) -> Dict[str, List[str]]:
from azure.ai.ml import MLClient
credential = DefaultAzureCredential(exclude_interactive_browser_credential=False)
ml_client = MLClient(
credential=credential,
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"))
endpoints = {}
for ep in ml_client.online_endpoints.list():
endpoints[ep.name] = [d.name for d in ml_client.online_deployments.list(ep.name)]
return endpoints
@pytest.fixture
def chat_endpoints_provider(endpoints_provider: Dict[str, List[str]]) -> Dict[str, List[str]]:
chat_endpoint_names = ["gpt2", "llama-chat"]
chat_endpoints = {}
for key, value in endpoints_provider.items():
for ep_name in chat_endpoint_names:
if ep_name in key:
chat_endpoints[key] = value
if len(chat_endpoints) <= 0:
pytest.skip("No Chat Endpoints Found")
return chat_endpoints
@pytest.fixture
def completion_endpoints_provider(endpoints_provider: Dict[str, List[str]]) -> Dict[str, List[str]]:
completion_endpoint_names = ["gpt2", "llama-comp"]
completion_endpoints = {}
for key, value in endpoints_provider.items():
for ep_name in completion_endpoint_names:
if ep_name in key:
completion_endpoints[key] = value
if len(completion_endpoints) <= 0:
pytest.skip("No Completion Endpoints Found")
return completion_endpoints
@pytest.mark.usefixtures("use_secrets_config_file")
class TestOpenModelLLM:
stateless_os_llm = OpenModelLLM()
gpt2_connection = "connection/gpt2_connection"
llama_connection = "connection/llama_chat_connection"
llama_serverless_connection = "connection/llama_chat_serverless"
completion_prompt = "The quick brown fox"
chat_prompt = """system:
* You are a AI which helps Customers complete a sentence.
* Your answer should complete the provided prompt.
* Your answer should be followed by a discussion of the meaning.
* The discussion part of your answer must be long and detailed.
user:
""" + completion_prompt
def test_open_model_llm_completion(self, verify_service_endpoints):
response = self.stateless_os_llm.call(
self.completion_prompt,
API.COMPLETION,
endpoint_name=self.gpt2_connection)
validate_response(response)
def test_open_model_llm_completion_with_deploy(self, verify_service_endpoints):
response = self.stateless_os_llm.call(
self.completion_prompt,
API.COMPLETION,
endpoint_name=self.gpt2_connection,
deployment_name="gpt2-10")
validate_response(response)
def test_open_model_llm_chat(self, verify_service_endpoints):
response = self.stateless_os_llm.call(
self.chat_prompt,
API.CHAT,
endpoint_name=self.gpt2_connection)
validate_response(response)
def test_open_model_llm_chat_with_deploy(self, verify_service_endpoints):
response = self.stateless_os_llm.call(
self.chat_prompt,
API.CHAT,
endpoint_name=self.gpt2_connection,
deployment_name="gpt2-10")
validate_response(response)
def test_open_model_llm_chat_with_max_length(self, verify_service_endpoints):
response = self.stateless_os_llm.call(
self.chat_prompt,
API.CHAT,
endpoint_name=self.gpt2_connection,
max_new_tokens=30)
# GPT-2 doesn't take this parameter
validate_response(response)
@pytest.mark.skip_if_no_api_key("gpt2_custom_connection")
def test_open_model_llm_con_url_chat(self, gpt2_custom_connection):
tmp = copy.deepcopy(gpt2_custom_connection)
del tmp.configs['endpoint_url']
with pytest.raises(OpenModelLLMKeyValidationError) as exc_info:
customConnectionsContainer = CustomConnectionsContainer()
customConnectionsContainer.get_endpoint_from_custom_connection(connection=tmp)
assert exc_info.value.message == """Required key `endpoint_url` not found in given custom connection.
Required keys are: endpoint_url,model_family."""
assert exc_info.value.error_codes == "UserError/ToolValidationError/OpenModelLLMKeyValidationError".split("/")
@pytest.mark.skip_if_no_api_key("gpt2_custom_connection")
def test_open_model_llm_con_key_chat(self, gpt2_custom_connection):
tmp = copy.deepcopy(gpt2_custom_connection)
del tmp.secrets['endpoint_api_key']
with pytest.raises(OpenModelLLMKeyValidationError) as exc_info:
customConnectionsContainer = CustomConnectionsContainer()
customConnectionsContainer.get_endpoint_from_custom_connection(connection=tmp)
assert exc_info.value.message == (
"Required secret key `endpoint_api_key` "
+ """not found in given custom connection.
Required keys are: endpoint_api_key.""")
assert exc_info.value.error_codes == "UserError/ToolValidationError/OpenModelLLMKeyValidationError".split("/")
@pytest.mark.skip_if_no_api_key("gpt2_custom_connection")
def test_open_model_llm_con_model_chat(self, gpt2_custom_connection):
tmp = copy.deepcopy(gpt2_custom_connection)
del tmp.configs['model_family']
with pytest.raises(OpenModelLLMKeyValidationError) as exc_info:
customConnectionsContainer = CustomConnectionsContainer()
customConnectionsContainer.get_endpoint_from_custom_connection(connection=tmp)
assert exc_info.value.message == """Required key `model_family` not found in given custom connection.
Required keys are: endpoint_url,model_family."""
assert exc_info.value.error_codes == "UserError/ToolValidationError/OpenModelLLMKeyValidationError".split("/")
def test_open_model_llm_escape_chat(self):
danger = r"The quick \brown fox\tjumped\\over \the \\boy\r\n"
out_of_danger = ContentFormatterBase.escape_special_characters(danger)
assert out_of_danger == "The quick \\brown fox\\tjumped\\\\over \\the \\\\boy\\r\\n"
def test_open_model_llm_llama_parse_chat_with_chat(self):
LlamaContentFormatter.parse_chat(self.chat_prompt)
def test_open_model_llm_llama_parse_multi_turn(self):
multi_turn_chat = """user:
You are a AI which helps Customers answer questions.
What is the best movie of all time?
assistant:
Mobius, which starred Jared Leto
user:
Why was that the greatest movie of all time?
"""
LlamaContentFormatter.parse_chat(multi_turn_chat)
def test_open_model_llm_llama_parse_ignore_whitespace(self):
bad_chat_prompt = f"""system:
You are a AI which helps Customers answer questions.
user:
user:
{self.completion_prompt}"""
with pytest.raises(OpenModelLLMUserError) as exc_info:
LlamaContentFormatter.parse_chat(bad_chat_prompt)
verify_prompt_role_delimiters(exc_info.value.message, exc_info.value.error_codes)
def test_open_model_llm_llama_parse_chat_with_comp(self):
with pytest.raises(OpenModelLLMUserError) as exc_info:
LlamaContentFormatter.parse_chat(self.completion_prompt)
verify_prompt_role_delimiters(exc_info.value.message, exc_info.value.error_codes)
def test_open_model_llm_chat_endpoint_name(self, chat_endpoints_provider):
for endpoint_name in chat_endpoints_provider:
response = self.stateless_os_llm.call(
self.chat_prompt,
API.CHAT,
endpoint_name=f"onlineEndpoint/{endpoint_name}")
validate_response(response)
def test_open_model_llm_chat_endpoint_name_with_deployment(self, chat_endpoints_provider):
for endpoint_name in chat_endpoints_provider:
for deployment_name in chat_endpoints_provider[endpoint_name]:
response = self.stateless_os_llm.call(
self.chat_prompt,
API.CHAT,
endpoint_name=f"onlineEndpoint/{endpoint_name}",
deployment_name=deployment_name)
validate_response(response)
def test_open_model_llm_completion_endpoint_name(self, completion_endpoints_provider):
for endpoint_name in completion_endpoints_provider:
response = self.stateless_os_llm.call(
self.completion_prompt,
API.COMPLETION,
endpoint_name=f"onlineEndpoint/{endpoint_name}")
validate_response(response)
def test_open_model_llm_completion_endpoint_name_with_deployment(self, completion_endpoints_provider):
for endpoint_name in completion_endpoints_provider:
for deployment_name in completion_endpoints_provider[endpoint_name]:
response = self.stateless_os_llm.call(
self.completion_prompt,
API.COMPLETION,
endpoint_name=f"onlineEndpoint/{endpoint_name}",
deployment_name=deployment_name)
validate_response(response)
def test_open_model_llm_llama_chat(self, verify_service_endpoints):
response = self.stateless_os_llm.call(self.chat_prompt, API.CHAT, endpoint_name=self.llama_connection)
validate_response(response)
def test_open_model_llm_llama_serverless(self, verify_service_endpoints):
response = self.stateless_os_llm.call(
self.chat_prompt,
API.CHAT,
endpoint_name=self.llama_serverless_connection)
validate_response(response)
def test_open_model_llm_llama_chat_history(self, verify_service_endpoints):
chat_history_prompt = """system:
* Given the following conversation history and the users next question, answer the next question.
* If the conversation is irrelevant or empty, acknowledge and ask for more input.
* Do not add more details than necessary to the question.
chat history:
{% for item in chat_history %}
user:
{{ item.inputs.chat_input }}
assistant:
{{ item.outputs.chat_output }}
{% endfor %}
user:
{{ chat_input }}"""
response = self.stateless_os_llm.call(
chat_history_prompt,
API.CHAT,
endpoint_name=self.llama_connection,
chat_history=[
{
"inputs":
{
"chat_input": "Hi"
},
"outputs":
{
"chat_output": "Hello! How can I assist you today?"
}
},
{
"inputs":
{
"chat_input": "What is Azure compute instance?"
},
"outputs":
{
"chat_output": "An Azure Machine Learning compute instance is a fully managed cloud-based"
+ " workstation for data scientists. It provides a pre-configured and managed development"
+ " environment in the cloud for machine learning. Compute instances can also be used as a"
+ " compute target for training and inferencing for development and testing purposes. They"
+ " have a job queue, run jobs securely in a virtual network environment, and can run"
+ " multiple small jobs in parallel. Additionally, compute instances support single-node"
+ " multi-GPU distributed training jobs."
}
}
],
chat_input="Sorry I didn't follow, could you say that again?")
validate_response(response)
def test_open_model_llm_dynamic_list_ignore_deployment(self, verify_service_endpoints):
deployments = list_deployment_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
endpoint=None)
assert len(deployments) == 1
assert deployments[0]['value'] == 'default'
deployments = list_deployment_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
endpoint='')
assert len(deployments) == 1
assert deployments[0]['value'] == 'default'
deployments = list_deployment_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
endpoint='fake_endpoint name')
assert len(deployments) == 1
assert deployments[0]['value'] == 'default'
def test_open_model_llm_dynamic_list_serverless_test(self, verify_service_endpoints):
subscription_id = os.getenv("AZUREML_ARM_SUBSCRIPTION")
resource_group_name = os.getenv("AZUREML_ARM_RESOURCEGROUP")
workspace_name = os.getenv("AZUREML_ARM_WORKSPACE_NAME")
se_container = ServerlessEndpointsContainer()
credential = DefaultAzureCredential(exclude_interactive_browser_credential=False)
token = credential.get_token("https://management.azure.com/.default").token
eps = se_container.list_serverless_endpoints(
token,
subscription_id,
resource_group_name,
workspace_name)
if len(eps) == 0:
pytest.skip("Service Credential not available")
endpoint_connection_name = eps[0]["value"].replace("serverlessEndpoint/", "")
eps_keys = se_container._list_endpoint_key(
token,
subscription_id,
resource_group_name,
workspace_name,
endpoint_connection_name
)
assert len(eps_keys) == 2
(endpoint_url, endpoint_key, model_family) = se_container.get_serverless_endpoint_key(
token,
subscription_id,
resource_group_name,
workspace_name,
endpoint_connection_name)
assert len(endpoint_url) > 20
assert model_family == "LLaMa"
assert endpoint_key == eps_keys['primaryKey']
def test_open_model_llm_dynamic_list_custom_connections_test(self, verify_service_endpoints):
custom_container = CustomConnectionsContainer()
credential = DefaultAzureCredential(exclude_interactive_browser_credential=False)
connections = custom_container.list_custom_connection_names(
credential,
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"))
assert len(connections) > 1
def test_open_model_llm_dynamic_list_happy_path(self, verify_service_endpoints):
endpoints = list_endpoint_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
return_endpoint_url=True
)
# we might want to remove this or skip if there are zero endpoints in the long term.
# currently we have low cost compute for a GPT2 endpoint, so if nothing else this should be available.
assert len(endpoints) > 0
for endpoint in endpoints:
assert "value" in endpoint
assert "display_value" in endpoint
assert "description" in endpoint
from tests.utils import verify_url_exists
for endpoint in endpoints:
if "localConnection/" in endpoint['value'] or not verify_url_exists(endpoint["url"]):
continue
is_chat = "serverless" in endpoint['value'] or "chat" in endpoint['value']
if is_chat:
prompt = self.chat_prompt
api_type = API.CHAT
else:
prompt = self.completion_prompt
api_type = API.COMPLETION
# test with default endpoint
response = self.stateless_os_llm.call(
prompt,
api_type,
endpoint_name=endpoint['value'],
max_new_tokens=30,
model_kwargs={})
validate_response(response)
deployments = list_deployment_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
endpoint=endpoint['value'])
if "onlineEndpoint" in endpoint['value']:
assert len(deployments) > 0
else:
assert len(deployments) == 1
assert deployments[0]['value'] == 'default'
continue
for deployment in deployments:
response = self.stateless_os_llm.call(
prompt,
api_type,
endpoint_name=endpoint['value'],
deployment_name=deployment['value'],
max_new_tokens=30,
model_kwargs={})
validate_response(response)
def test_open_model_llm_get_model_llama(self):
model_assets = [
"azureml://registries/azureml-meta/models/Llama-2-7b-chat/versions/14",
"azureml://registries/azureml-meta/models/Llama-2-7b/versions/12",
"azureml://registries/azureml-meta/models/Llama-2-13b-chat/versions/12",
"azureml://registries/azureml-meta/models/Llama-2-13b/versions/12",
"azureml://registries/azureml-meta/models/Llama-2-70b-chat/versions/12",
"azureml://registries/azureml-meta/models/Llama-2-70b/versions/13"
]
for asset_name in model_assets:
assert ModelFamily.LLAMA == get_model_type(asset_name)
def test_open_model_llm_get_model_gpt2(self):
model_assets = [
"azureml://registries/azureml-staging/models/gpt2/versions/9",
"azureml://registries/azureml/models/gpt2/versions/9",
"azureml://registries/azureml/models/gpt2-medium/versions/11",
"azureml://registries/azureml/models/gpt2-large/versions/11"
]
for asset_name in model_assets:
assert ModelFamily.GPT2 == get_model_type(asset_name)
def test_open_model_llm_get_model_dolly(self):
model_assets = [
"azureml://registries/azureml/models/databricks-dolly-v2-12b/versions/11"
]
for asset_name in model_assets:
assert ModelFamily.DOLLY == get_model_type(asset_name)
def test_open_model_llm_get_model_falcon(self):
model_assets = [
"azureml://registries/azureml/models/tiiuae-falcon-40b/versions/2",
"azureml://registries/azureml/models/tiiuae-falcon-40b/versions/2"
]
for asset_name in model_assets:
assert ModelFamily.FALCON == get_model_type(asset_name)
def test_open_model_llm_get_model_failure_cases(self):
bad_model_assets = [
"azureml://registries/azureml-meta/models/CodeLlama-7b-Instruct-hf/versions/3",
"azureml://registries/azureml-staging/models/gpt-2/versions/9",
"azureml://registries/azureml/models/falcon-40b/versions/2",
"azureml://registries/azureml-meta/models/Llama-70b/versions/13",
"azureml://registries/azureml/models/openai-whisper-large/versions/14",
"azureml://registries/azureml/models/ask-wikipedia/versions/2",
"definitely not real",
"",
"ausreml://registries/azureml/models/ask-wikipedia/versions/2",
"azureml://registries/azureml/models/ask-wikipedia/version/2",
"azureml://registries/azureml/models/ask-wikipedia/version/"
]
for asset_name in bad_model_assets:
val = get_model_type(asset_name)
assert val is None
def test_open_model_llm_local_connection(self, verify_service_endpoints, gpt2_custom_connection):
endpoints = list_endpoint_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
return_endpoint_url=True
)
import uuid
connection_name = f"test_local_connection_{uuid.uuid4()}"
for e in endpoints:
assert e['value'] != connection_name
from promptflow._sdk.entities import CustomConnection
connection = CustomConnection(name=connection_name,
configs={
"endpoint_url": gpt2_custom_connection.configs['endpoint_url'],
"model_family": gpt2_custom_connection.configs['model_family']},
secrets={
"endpoint_api_key": gpt2_custom_connection.secrets['endpoint_api_key']})
from promptflow import PFClient as LocalPFClient
pf_client = LocalPFClient()
pf_client.connections.create_or_update(connection)
endpoints = list_endpoint_names(
subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"),
resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"),
workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"),
force_refresh=True
)
found = False
target_connection_name = f"localConnection/{connection_name}"
for e in endpoints:
if e['value'] == target_connection_name:
found = True
break
assert found
response = self.stateless_os_llm.call(
self.completion_prompt,
API.COMPLETION,
endpoint_name=target_connection_name)
validate_response(response)
def test_open_model_llm_package(self):
import pkg_resources
# Promptflow-tools is not installed in the test pipeline, so we'll skip this test there. Works locally.
try:
pkg_resources.get_distribution("promptflow-tools")
except pkg_resources.DistributionNotFound:
pytest.skip("promptflow-tools not installed")
found = False
target_tool_identifier = "promptflow.tools.open_model_llm.OpenModelLLM.call"
for entry_point in pkg_resources.iter_entry_points(group="package_tools"):
list_tool_func = entry_point.resolve()
package_tools = list_tool_func()
for identifier, tool in package_tools.items():
if identifier == target_tool_identifier:
import importlib
importlib.import_module(tool["module"]) # Import the module to ensure its validity
assert not found
found = True
assert found
| promptflow/src/promptflow-tools/tests/test_open_model_llm.py/0 | {
"file_path": "promptflow/src/promptflow-tools/tests/test_open_model_llm.py",
"repo_id": "promptflow",
"token_count": 11345
} | 31 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType
# TODO: avoid azure dependency here
MAX_LIST_CLI_RESULTS = 50
class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AppendToDictAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from promptflow._sdk._utils import strip_quotation
kwargs = {}
for item in values:
try:
key, value = strip_quotation(item).split("=", 1)
kwargs[key] = strip_quotation(value)
except ValueError:
raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string))
return kwargs
class FlowTestInputAction(AppendToDictAction): # pylint: disable=protected-access
def get_action(self, values, option_string): # pylint: disable=no-self-use
if len(values) == 1 and "=" not in values[0]:
from promptflow._utils.load_data import load_data
if not values[0].endswith(".jsonl"):
raise ValueError("Only support jsonl file as input.")
return load_data(local_path=values[0])[0]
else:
return super().get_action(values, option_string)
def add_param_yes(parser):
parser.add_argument(
"-y",
"--yes",
"--assume-yes",
action="store_true",
help="Automatic yes to all prompts; assume 'yes' as answer to all prompts and run non-interactively.",
)
def add_param_ua(parser):
# suppress user agent for now since it's only used in vscode extension
parser.add_argument("--user-agent", help=argparse.SUPPRESS)
def add_param_flow_display_name(parser):
parser.add_argument("--flow", type=str, required=True, help="The flow name to create.")
def add_param_entry(parser):
parser.add_argument("--entry", type=str, help="The entry file.")
def add_param_function(parser):
parser.add_argument("--function", type=str, help="The function name in entry file.")
def add_param_prompt_template(parser):
parser.add_argument(
"--prompt-template", action=AppendToDictAction, help="The prompt template parameter and assignment.", nargs="+"
)
def add_param_set(parser):
parser.add_argument(
"--set",
dest="params_override",
action=AppendToDictAction,
help="Update an object by specifying a property path and value to set. Example: --set "
"property1.property2=<value>.",
nargs="+",
)
def add_param_set_positional(parser):
parser.add_argument(
"params_override",
action=AppendToDictAction,
help="Set an object by specifying a property path and value to set. Example: set "
"property1.property2=<value>.",
nargs="+",
)
def add_param_environment_variables(parser):
parser.add_argument(
"--environment-variables",
action=AppendToDictAction,
help="Environment variables to set by specifying a property path and value. Example: --environment-variable "
"key1='${my_connection.api_key}' key2='value2'. The value reference to connection keys will be resolved "
"to the actual value, and all environment variables specified will be set into os.environ.",
nargs="+",
)
def add_param_connections(parser):
parser.add_argument(
"--connections",
action=AppendToDictAction,
help="Overwrite node level connections with provided value. Example: --connections "
"node1.connection=test_llm_connection node1.deployment_name=gpt-35-turbo",
nargs="+",
)
def add_param_columns_mapping(parser):
parser.add_argument(
"--column-mapping",
action=AppendToDictAction,
help="Inputs column mapping, use ${data.xx} to refer to data columns, "
"use ${run.inputs.xx} to refer to referenced run's data columns. "
"and use ${run.outputs.xx} to refer to referenced run's output columns."
"Example: --column-mapping data1='${data.data1}' data2='${run.inputs.data2}' data3='${run.outputs.data3}'",
nargs="+",
)
def add_param_set_tool_extra_info(parser):
parser.add_argument(
"--set",
dest="extra_info",
action=AppendToDictAction,
help="Set extra information about the tool. Example: --set <key>=<value>.",
nargs="+",
)
def add_param_inputs(parser):
parser.add_argument(
"--inputs",
action=FlowTestInputAction,
help="Input datas of file for the flow. Example: --inputs data1=data1_val data2=data2_val",
nargs="+",
)
def add_param_env(parser):
parser.add_argument(
"--env",
type=str,
default=None,
help="The dotenv file path containing the environment variables to be used in the flow.",
)
def add_param_output(parser):
parser.add_argument(
"-o",
"--output",
type=str,
help=(
f"The output directory to store the results. "
f"Default to be ~/{PROMPT_FLOW_DIR_NAME}/{PROMPT_FLOW_RUNS_DIR_NAME} if not specified."
),
)
def add_param_overwrite(parser):
parser.add_argument("--overwrite", action="store_true", help="Overwrite the existing results.")
def add_param_source(parser):
parser.add_argument("--source", type=str, required=True, help="The flow or run source to be used.")
def add_param_run_name(parser):
parser.add_argument("-n", "--name", required=True, type=str, help="Name of the run.")
def add_param_connection_name(parser):
parser.add_argument("-n", "--name", type=str, help="Name of the connection to create.")
def add_param_max_results(parser):
parser.add_argument( # noqa: E731
"-r",
"--max-results",
dest="max_results",
type=int,
default=MAX_LIST_CLI_RESULTS,
help=f"Max number of results to return. Default is {MAX_LIST_CLI_RESULTS}.",
)
def add_param_all_results(parser):
parser.add_argument( # noqa: E731
"--all-results",
action="store_true",
dest="all_results",
default=False,
help="Returns all results. Default to False.",
)
def add_param_variant(parser):
parser.add_argument(
"--variant",
"-v",
type=str,
help="The variant to be used in flow, will use default variant if not specified.",
)
def add_parser_build(subparsers, entity_name: str):
add_param_build_output = lambda parser: parser.add_argument( # noqa: E731
"--output", "-o", required=True, type=str, help="The destination folder path."
)
add_param_format = lambda parser: parser.add_argument( # noqa: E731
"--format", "-f", type=str, help="The format to build with.", choices=["docker", "executable"]
)
# this is a hidden parameter for `mldesigner compile` command
add_param_flow_only = lambda parser: parser.add_argument( # noqa: E731
"--flow-only",
action="store_true",
help=argparse.SUPPRESS,
)
add_params = [
add_param_source,
add_param_build_output,
add_param_format,
add_param_flow_only,
add_param_variant,
] + base_params
from promptflow._cli._utils import activate_action
description = f"Build a {entity_name} for further sharing or deployment."
activate_action(
name="build",
description=description,
epilog=f"pf {entity_name} build --source <source> --output <output> --format " f"docker|package",
add_params=add_params,
subparsers=subparsers,
action_param_name="sub_action",
help_message=description,
)
def add_param_debug(parser):
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="The flag to turn on debug mode for cli.",
)
def add_param_verbose(parser):
parser.add_argument(
"--verbose",
action="store_true",
help="Increase logging verbosity. Use --debug for full debug logs.",
)
def add_param_config(parser):
parser.add_argument(
"--config",
nargs="+",
action=AppendToDictAction,
help=argparse.SUPPRESS,
)
logging_params = [add_param_verbose, add_param_debug]
base_params = logging_params + [
add_param_ua,
]
def add_param_archived_only(parser):
parser.add_argument(
"--archived-only",
action="store_true",
help="Only list archived records.",
)
def add_param_include_archived(parser):
parser.add_argument(
"--include-archived",
action="store_true",
help="List both archived records and active records.",
)
def add_param_output_format(parser):
parser.add_argument(
"-o",
"--output",
type=str,
default=CLIListOutputFormat.JSON,
help="Output format, accepted values are 'json' and 'table'. Default is 'json'.",
choices=[CLIListOutputFormat.TABLE, CLIListOutputFormat.JSON],
)
def add_param_include_others(parser):
parser.add_argument(
"--include-others",
action="store_true",
help="Get records that are owned by all users.",
)
def add_param_flow_type(parser):
parser.add_argument(
"--type",
type=str,
help=(
f"The type of the flow. Available values are {FlowType.get_all_values()}. "
f"Default to be None, which means all types included."
),
)
def add_param_flow_name(parser):
parser.add_argument(
"-n",
"--name",
type=str,
required=True,
help="The name of the flow.",
)
| promptflow/src/promptflow/promptflow/_cli/_params.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_params.py",
"repo_id": "promptflow",
"token_count": 4120
} | 32 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow._cli._utils import get_client_for_cli
from promptflow.azure import PFClient
def _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=False):
ml_client = get_client_for_cli(
subscription_id=subscription_id, resource_group_name=resource_group, workspace_name=workspace_name
)
client = PFClient(ml_client=ml_client, logging_enable=debug)
return client
| promptflow/src/promptflow/promptflow/_cli/_pf_azure/_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf_azure/_utils.py",
"repo_id": "promptflow",
"token_count": 165
} | 33 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing import List
from promptflow import log_metric, tool
@tool
def aggregate(processed_results: List[str]):
"""
This tool aggregates the processed result of all lines and calculate the accuracy. Then log metric for the accuracy.
:param processed_results: List of the output of line_process node.
"""
# Add your aggregation logic here
# Aggregate the results of all lines and calculate the accuracy
aggregated_result = round((processed_results.count("Correct") / len(processed_results)), 2)
# Log metric the aggregate result
log_metric(key="accuracy", value=aggregated_result)
return aggregated_result
| promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/aggregate.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/aggregate.py",
"repo_id": "promptflow",
"token_count": 212
} | 34 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
| promptflow/src/promptflow/promptflow/_core/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/__init__.py",
"repo_id": "promptflow",
"token_count": 23
} | 35 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import functools
import inspect
import json
import logging
import uuid
from collections.abc import Iterator
from contextvars import ContextVar
from datetime import datetime
from typing import Callable, Optional, Dict
from promptflow._core.generator_proxy import GeneratorProxy, generate_from_proxy
from promptflow._utils.dataclass_serializer import serialize
from promptflow._utils.multimedia_utils import default_json_encoder
from promptflow.contracts.tool import ConnectionType
from promptflow.contracts.trace import Trace, TraceType
from .thread_local_singleton import ThreadLocalSingleton
class Tracer(ThreadLocalSingleton):
CONTEXT_VAR_NAME = "Tracer"
context_var = ContextVar(CONTEXT_VAR_NAME, default=None)
def __init__(self, run_id, node_name: Optional[str] = None):
self._run_id = run_id
self._node_name = node_name
self._traces = []
self._current_trace_id = ContextVar("current_trace_id", default="")
self._id_to_trace: Dict[str, Trace] = {}
@classmethod
def start_tracing(cls, run_id, node_name: Optional[str] = None):
current_run_id = cls.current_run_id()
if current_run_id is not None:
msg = f"Try to start tracing for run {run_id} but {current_run_id} is already active."
logging.warning(msg)
return
tracer = cls(run_id, node_name)
tracer._activate_in_context()
@classmethod
def current_run_id(cls):
tracer = cls.active_instance()
if not tracer:
return None
return tracer._run_id
@classmethod
def end_tracing(cls, run_id: Optional[str] = None, raise_ex=False):
tracer = cls.active_instance()
if not tracer:
msg = "Try end tracing but no active tracer in current context."
if raise_ex:
raise Exception(msg)
logging.warning(msg)
return []
if run_id is not None and tracer._run_id != run_id:
msg = f"Try to end tracing for run {run_id} but {tracer._run_id} is active."
logging.warning(msg)
return []
tracer._deactivate_in_context()
return tracer.to_json()
@classmethod
def push(cls, trace: Trace):
obj = cls.active_instance()
if not obj:
logging.warning("Try to push trace but no active tracer in current context.")
return
obj._push(trace)
@staticmethod
def to_serializable(obj):
if isinstance(obj, dict) and all(isinstance(k, str) for k in obj.keys()):
return {k: Tracer.to_serializable(v) for k, v in obj.items()}
if isinstance(obj, GeneratorProxy):
return obj
try:
obj = serialize(obj)
json.dumps(obj, default=default_json_encoder)
except Exception:
# We don't want to fail the whole function call because of a serialization error,
# so we simply convert it to str if it cannot be serialized.
obj = str(obj)
return obj
def _get_current_trace(self):
trace_id = self._current_trace_id.get()
if not trace_id:
return None
return self._id_to_trace[trace_id]
def _push(self, trace: Trace):
if not trace.id:
trace.id = str(uuid.uuid4())
if trace.inputs:
trace.inputs = self.to_serializable(trace.inputs)
trace.children = []
if not trace.start_time:
trace.start_time = datetime.utcnow().timestamp()
parent_trace = self._get_current_trace()
if not parent_trace:
self._traces.append(trace)
trace.node_name = self._node_name
else:
parent_trace.children.append(trace)
trace.parent_id = parent_trace.id
self._current_trace_id.set(trace.id)
self._id_to_trace[trace.id] = trace
@classmethod
def pop(cls, output=None, error: Optional[Exception] = None):
obj = cls.active_instance()
return obj._pop(output, error)
def _pop(self, output=None, error: Optional[Exception] = None):
last_trace = self._get_current_trace()
if not last_trace:
logging.warning("Try to pop trace but no active trace in current context.")
return output
if isinstance(output, Iterator):
output = GeneratorProxy(output)
if output is not None:
last_trace.output = self.to_serializable(output)
if error is not None:
last_trace.error = self._format_error(error)
last_trace.end_time = datetime.utcnow().timestamp()
self._current_trace_id.set(last_trace.parent_id)
if isinstance(output, GeneratorProxy):
return generate_from_proxy(output)
else:
return output
def to_json(self) -> list:
return serialize(self._traces)
@staticmethod
def _format_error(error: Exception) -> dict:
return {
"message": str(error),
"type": type(error).__qualname__,
}
def _create_trace_from_function_call(f, *, args=[], kwargs={}, trace_type=TraceType.FUNCTION):
"""Initialize a trace object from a function call."""
sig = inspect.signature(f).parameters
all_kwargs = {**{k: v for k, v in zip(sig.keys(), args)}, **kwargs}
all_kwargs = {
k: ConnectionType.serialize_conn(v) if ConnectionType.is_connection_value(v) else v
for k, v in all_kwargs.items()
}
# TODO: put parameters in self to inputs for builtin tools
all_kwargs.pop("self", None)
return Trace(
name=f.__qualname__,
type=trace_type,
start_time=datetime.utcnow().timestamp(),
inputs=all_kwargs,
children=[],
)
def _traced(func: Callable = None, *, trace_type=TraceType.FUNCTION) -> Callable:
"""A wrapper to add trace to a function.
When a function is wrapped by this wrapper, the function name,
inputs, outputs, start time, end time, and error (if any) will be recorded.
It can be used for both sync and async functions.
For sync functions, it will return a sync function.
For async functions, it will return an async function.
:param func: The function to be traced.
:type func: Callable
:param trace_type: The type of the trace. Defaults to TraceType.FUNCTION.
:type trace_type: TraceType, optional
:return: The wrapped function with trace enabled.
:rtype: Callable
"""
def create_trace(func, args, kwargs):
return _create_trace_from_function_call(func, args=args, kwargs=kwargs, trace_type=trace_type)
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def wrapped(*args, **kwargs):
if Tracer.active_instance() is None:
return await func(*args, **kwargs) # Do nothing if no tracing is enabled.
# Should not extract these codes to a separate function here.
# We directly call func instead of calling Tracer.invoke,
# because we want to avoid long stack trace when hitting an exception.
try:
Tracer.push(create_trace(func, args, kwargs))
output = await func(*args, **kwargs)
return Tracer.pop(output)
except Exception as e:
Tracer.pop(None, e)
raise
else:
@functools.wraps(func)
def wrapped(*args, **kwargs):
if Tracer.active_instance() is None:
return func(*args, **kwargs) # Do nothing if no tracing is enabled.
# Should not extract these codes to a separate function here.
# We directly call func instead of calling Tracer.invoke,
# because we want to avoid long stack trace when hitting an exception.
try:
Tracer.push(create_trace(func, args, kwargs))
output = func(*args, **kwargs)
return Tracer.pop(output)
except Exception as e:
Tracer.pop(None, e)
raise
wrapped.__original_function = func
return wrapped
def trace(func: Callable = None) -> Callable:
"""A decorator to add trace to a function.
When a function is wrapped by this decorator, the function name,
inputs, outputs, start time, end time, and error (if any) will be recorded.
It can be used for both sync and async functions.
For sync functions, it will return a sync function.
For async functions, it will return an async function.
:param func: The function to be traced.
:type func: Callable
:return: The wrapped function with trace enabled.
:rtype: Callable
:Examples:
Synchronous function usage:
.. code-block:: python
@trace
def greetings(user_id):
name = get_name(user_id)
return f"Hello, {name}"
Asynchronous function usage:
.. code-block:: python
@trace
async def greetings_async(user_id):
name = await get_name_async(user_id)
return f"Hello, {name}"
"""
return _traced(func, trace_type=TraceType.FUNCTION)
| promptflow/src/promptflow/promptflow/_core/tracer.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/tracer.py",
"repo_id": "promptflow",
"token_count": 3869
} | 36 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
import mimetypes
import os
from pathlib import Path
from typing import Dict
from flask import Flask, g, jsonify, request
from promptflow._sdk._load_functions import load_flow
from promptflow._sdk._serving.extension.extension_factory import ExtensionFactory
from promptflow._sdk._serving.flow_invoker import FlowInvoker
from promptflow._sdk._serving.response_creator import ResponseCreator
from promptflow._sdk._serving.utils import (
enable_monitoring,
get_output_fields_to_remove,
get_sample_json,
handle_error_to_response,
load_request_data,
streaming_response_required,
)
from promptflow._sdk._utils import setup_user_agent_to_operation_context
from promptflow._utils.exception_utils import ErrorResponse
from promptflow._utils.logger_utils import LoggerFactory
from promptflow._version import VERSION
from promptflow.contracts.run_info import Status
from promptflow.exceptions import SystemErrorException
from promptflow.storage._run_storage import DummyRunStorage
from .swagger import generate_swagger
logger = LoggerFactory.get_logger("pfserving-app", target_stdout=True)
DEFAULT_STATIC_PATH = Path(__file__).parent / "static"
USER_AGENT = f"promptflow-local-serving/{VERSION}"
class PromptflowServingApp(Flask):
def init(self, **kwargs):
with self.app_context():
# default to local, can be override when creating the app
self.extension = ExtensionFactory.create_extension(logger, **kwargs)
self.flow_invoker: FlowInvoker = None
# parse promptflow project path
self.project_path = self.extension.get_flow_project_path()
logger.info(f"Project path: {self.project_path}")
self.flow_entity = load_flow(self.project_path)
self.flow = self.flow_entity._init_executable()
# enable environment_variables
environment_variables = kwargs.get("environment_variables", {})
os.environ.update(environment_variables)
default_environment_variables = self.flow.get_environment_variables_with_overrides()
self.set_default_environment_variables(default_environment_variables)
self.flow_name = self.extension.get_flow_name()
self.flow.name = self.flow_name
conn_data_override, conn_name_override = self.extension.get_override_connections(self.flow)
self.connections_override = conn_data_override
self.connections_name_override = conn_name_override
self.flow_monitor = self.extension.get_flow_monitor()
self.connection_provider = self.extension.get_connection_provider()
self.credential = self.extension.get_credential()
self.sample = get_sample_json(self.project_path, logger)
self.init_swagger()
# try to initialize the flow invoker
try:
self.init_invoker_if_not_exist()
except Exception as e:
if self.extension.raise_ex_on_invoker_initialization_failure(e):
raise e
# ensure response has the correct content type
mimetypes.add_type("application/javascript", ".js")
mimetypes.add_type("text/css", ".css")
setup_user_agent_to_operation_context(self.extension.get_user_agent())
add_default_routes(self)
# register blueprints
blue_prints = self.extension.get_blueprints()
for blue_print in blue_prints:
self.register_blueprint(blue_print)
def init_invoker_if_not_exist(self):
if self.flow_invoker:
return
logger.info("Promptflow executor starts initializing...")
self.flow_invoker = FlowInvoker(
self.project_path,
connection_provider=self.connection_provider,
streaming=streaming_response_required,
raise_ex=False,
connections=self.connections_override,
connections_name_overrides=self.connections_name_override,
# for serving, we don't need to persist intermediate result, this is to avoid memory leak.
storage=DummyRunStorage(),
credential=self.credential,
)
self.flow = self.flow_invoker.flow
# Set the flow name as folder name
self.flow.name = self.flow_name
self.response_fields_to_remove = get_output_fields_to_remove(self.flow, logger)
logger.info("Promptflow executor initializing succeed!")
def init_swagger(self):
self.response_fields_to_remove = get_output_fields_to_remove(self.flow, logger)
self.swagger = generate_swagger(self.flow, self.sample, self.response_fields_to_remove)
def set_default_environment_variables(self, default_environment_variables: Dict[str, str] = None):
if default_environment_variables is None:
return
for key, value in default_environment_variables.items():
if key not in os.environ:
os.environ[key] = value
def add_default_routes(app: PromptflowServingApp):
@app.errorhandler(Exception)
def handle_error(e):
err_resp, resp_code = handle_error_to_response(e, logger)
app.flow_monitor.handle_error(e, resp_code)
return err_resp, resp_code
@app.route("/score", methods=["POST"])
@enable_monitoring
def score():
"""process a flow request in the runtime."""
raw_data = request.get_data()
logger.debug(f"PromptFlow executor received data: {raw_data}")
app.init_invoker_if_not_exist()
if app.flow.inputs.keys().__len__() == 0:
data = {}
logger.info("Flow has no input, request data will be ignored.")
else:
logger.info("Start loading request data...")
data = load_request_data(app.flow, raw_data, logger)
# set context data
g.data = data
g.flow_id = app.flow.id or app.flow.name
run_id = g.get("req_id", None)
# TODO: refine this once we can directly set the input/output log level to DEBUG in flow_invoker.
disable_data_logging = logger.level >= logging.INFO
flow_result = app.flow_invoker.invoke(data, run_id=run_id, disable_input_output_logging=disable_data_logging)
g.flow_result = flow_result
# check flow result, if failed, return error response
if flow_result.run_info.status != Status.Completed:
if flow_result.run_info.error:
err = ErrorResponse(flow_result.run_info.error)
g.err_code = err.innermost_error_code
return jsonify(err.to_simplified_dict()), err.response_code
else:
# in case of run failed but can't find any error, return 500
exception = SystemErrorException("Flow execution failed without error message.")
return jsonify(ErrorResponse.from_exception(exception).to_simplified_dict()), 500
intermediate_output = flow_result.output or {}
# remove evaluation only fields
result_output = {k: v for k, v in intermediate_output.items() if k not in app.response_fields_to_remove}
response_creator = ResponseCreator(
flow_run_result=result_output,
accept_mimetypes=request.accept_mimetypes,
)
app.flow_monitor.setup_streaming_monitor_if_needed(response_creator, data, intermediate_output)
return response_creator.create_response()
@app.route("/swagger.json", methods=["GET"])
def swagger():
"""Get the swagger object."""
return jsonify(app.swagger)
@app.route("/health", methods=["GET"])
def health():
"""Check if the runtime is alive."""
return {"status": "Healthy", "version": VERSION}
@app.route("/version", methods=["GET"])
def version():
"""Check the runtime's version."""
build_info = os.environ.get("BUILD_INFO", "")
try:
build_info_dict = json.loads(build_info)
version = build_info_dict["build_number"]
except Exception:
version = VERSION
return {"status": "Healthy", "build_info": build_info, "version": version}
def create_app(**kwargs):
app = PromptflowServingApp(__name__)
if __name__ != "__main__":
app.logger.handlers = logger.handlers
app.logger.setLevel(logger.level)
app.init(**kwargs)
return app
if __name__ == "__main__":
create_app().run()
| promptflow/src/promptflow/promptflow/_sdk/_serving/app.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/app.py",
"repo_id": "promptflow",
"token_count": 3515
} | 37 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Local Server Test App</title>
<style>
html,
body {
height: 100%;
width: 100%;
box-sizing: border-box;
padding: 0;
margin: 0;
}
#root {
height: 100%;
width: 100%;
display: flex;
}
</style>
<script type="module" crossorigin src="/static/index.js"></script>
</head>
<body>
<div id="root"></div>
<script>
const time = new Date().toISOString();
const now = performance.now();
console.log("[perf " + time + " " + now + "]" + " load script start");
</script>
</body>
</html>
| promptflow/src/promptflow/promptflow/_sdk/_serving/static/index.html/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/static/index.html",
"repo_id": "promptflow",
"token_count": 357
} | 38 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""
This file code has been vendored from azure-ai-ml repo.
Please do not edit it, unless really necessary
"""
# region Diff-imports
import os
from pathlib import Path, PureWindowsPath
from typing import Any, Iterable, List, Optional, Tuple, Union
from ._pathspec import GitWildMatchPattern, normalize_file
GIT_IGNORE_FILE_NAME = ".gitignore"
AML_IGNORE_FILE_NAME = ".amlignore"
def convert_windows_path_to_unix(path: Union[str, os.PathLike]) -> str:
return PureWindowsPath(path).as_posix()
# endregion
class IgnoreFile(object):
def __init__(self, file_path: Optional[Union[str, Path]] = None):
"""Base class for handling .gitignore and .amlignore files.
:param file_path: Relative path, or absolute path to the ignore file.
"""
path = Path(file_path).resolve() if file_path else None
self._path = path
self._path_spec = None
def exists(self) -> bool:
"""Checks if ignore file exists."""
return self._file_exists()
def _file_exists(self) -> bool:
return self._path and self._path.exists()
@property
def base_path(self) -> Path:
return self._path.parent
def _get_ignore_list(self) -> List[str]:
"""Get ignore list from ignore file contents."""
if not self.exists():
return []
if self._file_exists():
with open(self._path, "r") as fh:
return [line.rstrip() for line in fh if line]
return []
def _create_pathspec(self) -> List[GitWildMatchPattern]:
"""Creates path specification based on ignore list."""
return [GitWildMatchPattern(ignore) for ignore in self._get_ignore_list()]
def _get_rel_path(self, file_path: Union[str, Path]) -> Optional[str]:
"""Get relative path of given file_path."""
file_path = Path(file_path).absolute()
try:
# use os.path.relpath instead of Path.relative_to in case file_path is not a child of self.base_path
return os.path.relpath(file_path, self.base_path)
except ValueError:
# 2 paths are on different drives
return None
def is_file_excluded(self, file_path: Union[str, Path]) -> bool:
"""Checks if given file_path is excluded.
:param file_path: File path to be checked against ignore file specifications
"""
# TODO: current design of ignore file can't distinguish between files and directories of the same name
if self._path_spec is None:
self._path_spec = self._create_pathspec()
if not self._path_spec:
return False
file_path = self._get_rel_path(file_path)
if file_path is None:
return True
norm_file = normalize_file(file_path)
matched = False
for pattern in self._path_spec:
if pattern.include is not None:
if pattern.match_file(norm_file) is not None:
matched = pattern.include
return matched
@property
def path(self) -> Union[Path, str]:
return self._path
class AmlIgnoreFile(IgnoreFile):
def __init__(self, directory_path: Union[Path, str]):
file_path = Path(directory_path).joinpath(AML_IGNORE_FILE_NAME)
super(AmlIgnoreFile, self).__init__(file_path)
class GitIgnoreFile(IgnoreFile):
def __init__(self, directory_path: Union[Path, str]):
file_path = Path(directory_path).joinpath(GIT_IGNORE_FILE_NAME)
super(GitIgnoreFile, self).__init__(file_path)
def get_ignore_file(directory_path: Union[Path, str]) -> Optional[IgnoreFile]:
"""Finds and returns IgnoreFile object based on ignore file found in directory_path.
.amlignore takes precedence over .gitignore and if no file is found, an empty
IgnoreFile object will be returned.
The ignore file must be in the root directory.
:param directory_path: Path to the (root) directory where ignore file is located
"""
aml_ignore = AmlIgnoreFile(directory_path)
git_ignore = GitIgnoreFile(directory_path)
if aml_ignore.exists():
return aml_ignore
if git_ignore.exists():
return git_ignore
return IgnoreFile()
def get_upload_files_from_folder(
path: Union[str, Path], *, prefix: str = "", ignore_file: IgnoreFile = IgnoreFile()
) -> List[Tuple[str, str]]:
"""Enumerate all files in the given directory and compose paths for them to be uploaded to in the remote storage.
:param path: Path to the directory to be uploaded
:type path: str
:param prefix: Prefix for remote storage path
:type prefix: str
:param ignore_file: Ignore file object
:type ignore_file: IgnoreFile
:return: List of tuples of (local path, remote path)
:rtype: list
"""
path = Path(path)
upload_paths = []
for root, _, files in os.walk(path, followlinks=True):
upload_paths += list(
traverse_directory(
root,
files,
prefix=Path(prefix).joinpath(Path(root).relative_to(path)).as_posix(),
ignore_file=ignore_file,
)
)
return upload_paths
def traverse_directory(
root: str,
files: List[str],
*,
prefix: str,
ignore_file: IgnoreFile = IgnoreFile(),
# keep this for backward compatibility
**kwargs: Any,
) -> Iterable[Tuple[str, str]]:
"""Enumerate all files in the given directory and compose paths for them to be uploaded to in the remote storage.
e.g.
[/mnt/c/Users/dipeck/upload_files/my_file1.txt,
/mnt/c/Users/dipeck/upload_files/my_file2.txt] -->
[(/mnt/c/Users/dipeck/upload_files/my_file1.txt, LocalUpload/<guid>/upload_files/my_file1.txt),
(/mnt/c/Users/dipeck/upload_files/my_file2.txt, LocalUpload/<guid>/upload_files/my_file2.txt))]
:param root: Root directory path
:type root: str
:param files: List of all file paths in the directory
:type files: List[str]
:param prefix: Remote upload path for project directory (e.g. LocalUpload/<guid>/project_dir)
:type prefix: str
:param ignore_file: The .amlignore or .gitignore file in the project directory
:type ignore_file: azure.ai.ml._utils._asset_utils.IgnoreFile
:return: Zipped list of tuples representing the local path and remote destination path for each file
:rtype: Iterable[Tuple[str, str]]
"""
# Normalize Windows paths. Note that path should be resolved first as long part will be converted to a shortcut in
# Windows. For example, C:\Users\too-long-user-name\test will be converted to C:\Users\too-lo~1\test by default.
# Refer to https://en.wikipedia.org/wiki/8.3_filename for more details.
root = Path(root).resolve().absolute()
# filter out files excluded by the ignore file
# TODO: inner ignore file won't take effect. A merged IgnoreFile need to be generated in code resolution.
origin_file_paths = [
root.joinpath(filename)
for filename in files
if not ignore_file.is_file_excluded(root.joinpath(filename).as_posix())
]
result = []
for origin_file_path in origin_file_paths:
relative_path = origin_file_path.relative_to(root)
result.append((_resolve_path(origin_file_path).as_posix(), Path(prefix).joinpath(relative_path).as_posix()))
return result
def _resolve_path(path: Path) -> Path:
if not path.is_symlink():
return path
link_path = path.resolve()
if not link_path.is_absolute():
link_path = path.parent.joinpath(link_path).resolve()
return _resolve_path(link_path)
| promptflow/src/promptflow/promptflow/_sdk/_vendor/_asset_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_vendor/_asset_utils.py",
"repo_id": "promptflow",
"token_count": 2954
} | 39 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access
import json
import typing
from marshmallow import Schema, ValidationError
from promptflow._utils.logger_utils import LoggerFactory
from .core import MutableValidationResult, ValidationResultBuilder
module_logger = LoggerFactory.get_logger(__name__)
class SchemaValidatableMixin:
"""The mixin class for schema validation."""
@classmethod
def _create_empty_validation_result(cls) -> MutableValidationResult:
"""Simply create an empty validation result
To reduce _ValidationResultBuilder importing, which is a private class.
:return: An empty validation result
:rtype: MutableValidationResult
"""
return ValidationResultBuilder.success()
@classmethod
def _load_with_schema(cls, data, *, context, raise_original_exception=False, **kwargs):
schema = cls._create_schema_for_validation(context=context)
try:
return schema.load(data, **kwargs)
except ValidationError as e:
if raise_original_exception:
raise e
msg = "Trying to load data with schema failed. Data:\n%s\nError: %s" % (
json.dumps(data, indent=4) if isinstance(data, dict) else data,
json.dumps(e.messages, indent=4),
)
raise cls._create_validation_error(
message=msg,
no_personal_data_message=str(e),
) from e
@classmethod
# pylint: disable-next=docstring-missing-param
def _create_schema_for_validation(cls, context) -> Schema:
"""Create a schema of the resource with specific context. Should be overridden by subclass.
:return: The schema of the resource.
:rtype: Schema.
"""
raise NotImplementedError()
def _default_context(self) -> dict:
"""Get the default context for schema validation. Should be overridden by subclass.
:return: The default context for schema validation
:rtype: dict
"""
raise NotImplementedError()
@property
def _schema_for_validation(self) -> Schema:
"""Return the schema of this Resource with default context. Do not override this method.
Override _create_schema_for_validation instead.
:return: The schema of the resource.
:rtype: Schema.
"""
return self._create_schema_for_validation(context=self._default_context())
def _dump_for_validation(self) -> typing.Dict:
"""Convert the resource to a dictionary.
:return: Converted dictionary
:rtype: typing.Dict
"""
return self._schema_for_validation.dump(self)
@classmethod
def _create_validation_error(cls, message: str, no_personal_data_message: str) -> Exception:
"""The function to create the validation exception to raise in _try_raise and _validate when
raise_error is True.
Should be overridden by subclass.
:param message: The error message containing detailed information
:type message: str
:param no_personal_data_message: The error message without personal data
:type no_personal_data_message: str
:return: The validation exception to raise
:rtype: Exception
"""
raise NotImplementedError()
@classmethod
def _try_raise(
cls, validation_result: MutableValidationResult, *, raise_error: bool = True
) -> MutableValidationResult:
return validation_result.try_raise(raise_error=raise_error, error_func=cls._create_validation_error)
def _validate(self, raise_error=False) -> MutableValidationResult:
"""Validate the resource. If raise_error is True, raise ValidationError if validation fails and log warnings if
applicable; Else, return the validation result.
:param raise_error: Whether to raise ValidationError if validation fails.
:type raise_error: bool
:return: The validation result
:rtype: MutableValidationResult
"""
result = self.__schema_validate()
result.merge_with(self._customized_validate())
return self._try_raise(result, raise_error=raise_error)
def _customized_validate(self) -> MutableValidationResult:
"""Validate the resource with customized logic.
Override this method to add customized validation logic.
:return: The customized validation result
:rtype: MutableValidationResult
"""
return self._create_empty_validation_result()
@classmethod
def _get_skip_fields_in_schema_validation(
cls,
) -> typing.List[str]:
"""Get the fields that should be skipped in schema validation.
Override this method to add customized validation logic.
:return: The fields to skip in schema validation
:rtype: typing.List[str]
"""
return []
def __schema_validate(self) -> MutableValidationResult:
"""Validate the resource with the schema.
:return: The validation result
:rtype: MutableValidationResult
"""
data = self._dump_for_validation()
messages = self._schema_for_validation.validate(data)
for skip_field in self._get_skip_fields_in_schema_validation():
if skip_field in messages:
del messages[skip_field]
return ValidationResultBuilder.from_validation_messages(messages, data=data)
| promptflow/src/promptflow/promptflow/_sdk/entities/_validation/schema.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_validation/schema.py",
"repo_id": "promptflow",
"token_count": 2108
} | 40 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from marshmallow import fields, validate
from promptflow._sdk._constants import FlowType
from promptflow._sdk.schemas._base import PatchedSchemaMeta, YamlFileSchema
from promptflow._sdk.schemas._fields import LocalPathField, NestedField
class FlowInputSchema(metaclass=PatchedSchemaMeta):
"""Schema for flow input."""
type = fields.Str(required=True)
description = fields.Str()
# Note: default attribute default can be various types, so we use Raw type here,
# but when transforming to json schema, there is no equivalent type, it will become string type
# may need to delete the default type in the generated json schema to avoid false alarm
default = fields.Raw()
is_chat_input = fields.Bool()
is_chat_history = fields.Bool()
class FlowOutputSchema(metaclass=PatchedSchemaMeta):
"""Schema for flow output."""
type = fields.Str(required=True)
reference = fields.Str()
description = fields.Str()
is_chat_output = fields.Bool()
class BaseFlowSchema(YamlFileSchema):
"""Base schema for flow."""
additional_includes = fields.List(fields.Str())
environment = fields.Dict()
# metadata
type = fields.Str(validate=validate.OneOf(FlowType.get_all_values()))
language = fields.Str()
description = fields.Str()
display_name = fields.Str()
tags = fields.Dict(keys=fields.Str(), values=fields.Str())
class FlowSchema(BaseFlowSchema):
"""Schema for flow dag."""
inputs = fields.Dict(keys=fields.Str(), values=NestedField(FlowInputSchema))
outputs = fields.Dict(keys=fields.Str(), values=NestedField(FlowOutputSchema))
nodes = fields.List(fields.Dict())
node_variants = fields.Dict(keys=fields.Str(), values=fields.Dict())
class EagerFlowSchema(BaseFlowSchema):
"""Schema for eager flow."""
# path to flow entry file.
path = LocalPathField(required=True)
# entry function
entry = fields.Str(required=True)
| promptflow/src/promptflow/promptflow/_sdk/schemas/_flow.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/schemas/_flow.py",
"repo_id": "promptflow",
"token_count": 667
} | 41 |
import base64
import os
import re
import uuid
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict
from urllib.parse import urlparse
import requests
from promptflow._utils._errors import InvalidImageInput, LoadMultimediaDataError
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.multimedia import Image, PFBytes
from promptflow.contracts.tool import ValueType
from promptflow.exceptions import ErrorTarget
MIME_PATTERN = re.compile(r"^data:image/(.*);(path|base64|url)$")
def _get_extension_from_mime_type(mime_type: str):
ext = mime_type.split("/")[-1]
if ext == "*":
return None
return ext
def is_multimedia_dict(multimedia_dict: dict):
if len(multimedia_dict) != 1:
return False
key = list(multimedia_dict.keys())[0]
if re.match(MIME_PATTERN, key):
return True
return False
def _get_multimedia_info(key: str):
match = re.match(MIME_PATTERN, key)
if match:
return match.group(1), match.group(2)
return None, None
def _is_url(value: str):
try:
result = urlparse(value)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _is_base64(value: str):
base64_regex = re.compile(r"^([A-Za-z0-9+/]{4})*(([A-Za-z0-9+/]{2})*(==|[A-Za-z0-9+/]=)?)?$")
if re.match(base64_regex, value):
return True
return False
def _create_image_from_file(f: Path, mime_type: str = None):
with open(f, "rb") as fin:
return Image(fin.read(), mime_type=mime_type)
def _create_image_from_base64(base64_str: str, mime_type: str = None):
image_bytes = base64.b64decode(base64_str)
return Image(image_bytes, mime_type=mime_type)
def _create_image_from_url(url: str, mime_type: str = None):
response = requests.get(url)
if response.status_code == 200:
return Image(response.content, mime_type=mime_type, source_url=url)
else:
raise InvalidImageInput(
message_format="Failed to fetch image from URL: {url}. Error code: {error_code}. "
"Error message: {error_message}.",
target=ErrorTarget.EXECUTOR,
url=url,
error_code=response.status_code,
error_message=response.text,
)
def _create_image_from_dict(image_dict: dict):
for k, v in image_dict.items():
format, resource = _get_multimedia_info(k)
if resource == "path":
return _create_image_from_file(Path(v), mime_type=f"image/{format}")
elif resource == "base64":
if _is_base64(v):
return _create_image_from_base64(v, mime_type=f"image/{format}")
else:
raise InvalidImageInput(
message_format=f"Invalid base64 image: {v}.",
target=ErrorTarget.EXECUTOR,
)
elif resource == "url":
return _create_image_from_url(v, mime_type=f"image/{format}")
else:
raise InvalidImageInput(
message_format=f"Unsupported image resource: {resource}. "
"Supported Resources are [path, base64, url].",
target=ErrorTarget.EXECUTOR,
)
def _create_image_from_string(value: str):
if _is_base64(value):
return _create_image_from_base64(value)
elif _is_url(value):
return _create_image_from_url(value)
else:
return _create_image_from_file(Path(value))
def create_image(value: any):
if isinstance(value, PFBytes):
return value
elif isinstance(value, dict):
if is_multimedia_dict(value):
return _create_image_from_dict(value)
else:
raise InvalidImageInput(
message_format="Invalid image input format. The image input should be a dictionary like: "
"{{data:image/<image_type>;[path|base64|url]: <image_data>}}.",
target=ErrorTarget.EXECUTOR,
)
elif isinstance(value, str):
if not value:
raise InvalidImageInput(
message_format="The image input should not be empty.", target=ErrorTarget.EXECUTOR
)
return _create_image_from_string(value)
else:
raise InvalidImageInput(
message_format=f"Unsupported image input type: {type(value)}. "
"The image inputs should be a string or a dictionary.",
target=ErrorTarget.EXECUTOR,
)
def _save_image_to_file(
image: Image, file_name: str, folder_path: Path, relative_path: Path = None, use_absolute_path=False
):
ext = _get_extension_from_mime_type(image._mime_type)
file_name = f"{file_name}.{ext}" if ext else file_name
image_path = (relative_path / file_name).as_posix() if relative_path else file_name
if use_absolute_path:
image_path = Path(folder_path / image_path).resolve().as_posix()
image_reference = {f"data:{image._mime_type};path": image_path}
path = folder_path / relative_path if relative_path else folder_path
os.makedirs(path, exist_ok=True)
with open(os.path.join(path, file_name), "wb") as file:
file.write(image)
return image_reference
def get_file_reference_encoder(folder_path: Path, relative_path: Path = None, *, use_absolute_path=False) -> Callable:
def pfbytes_file_reference_encoder(obj):
"""Dumps PFBytes to a file and returns its reference."""
if obj.source_url:
return {f"data:{obj._mime_type};url": obj.source_url}
if isinstance(obj, PFBytes):
file_name = str(uuid.uuid4())
# If use_absolute_path is True, the image file path in image dictionary will be absolute path.
return _save_image_to_file(obj, file_name, folder_path, relative_path, use_absolute_path)
raise TypeError(f"Not supported to dump type '{type(obj).__name__}'.")
return pfbytes_file_reference_encoder
def default_json_encoder(obj):
if isinstance(obj, PFBytes):
return str(obj)
else:
raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable")
def persist_multimedia_data(value: Any, base_dir: Path, sub_dir: Path = None):
pfbytes_file_reference_encoder = get_file_reference_encoder(base_dir, sub_dir)
serialization_funcs = {Image: partial(Image.serialize, **{"encoder": pfbytes_file_reference_encoder})}
return _process_recursively(value, process_funcs=serialization_funcs)
def convert_multimedia_data_to_base64(value: Any, with_type=False, dict_type=False):
to_base64_funcs = {PFBytes: partial(PFBytes.to_base64, **{"with_type": with_type, "dict_type": dict_type})}
return _process_recursively(value, process_funcs=to_base64_funcs)
# TODO: Move this function to a more general place and integrate serialization to this function.
def _process_recursively(value: Any, process_funcs: Dict[type, Callable] = None, inplace: bool = False) -> dict:
if process_funcs:
for cls, f in process_funcs.items():
if isinstance(value, cls):
return f(value)
if isinstance(value, list):
if inplace:
for i in range(len(value)):
value[i] = _process_recursively(value[i], process_funcs, inplace)
else:
return [_process_recursively(v, process_funcs, inplace) for v in value]
elif isinstance(value, dict):
if inplace:
for k, v in value.items():
value[k] = _process_recursively(v, process_funcs, inplace)
else:
return {k: _process_recursively(v, process_funcs, inplace) for k, v in value.items()}
return value
def load_multimedia_data(inputs: Dict[str, FlowInputDefinition], line_inputs: dict):
updated_inputs = dict(line_inputs or {})
for key, value in inputs.items():
try:
if value.type == ValueType.IMAGE:
if isinstance(updated_inputs[key], list):
# For aggregation node, the image input is a list.
updated_inputs[key] = [create_image(item) for item in updated_inputs[key]]
else:
updated_inputs[key] = create_image(updated_inputs[key])
elif value.type == ValueType.LIST or value.type == ValueType.OBJECT:
updated_inputs[key] = load_multimedia_data_recursively(updated_inputs[key])
except Exception as ex:
error_type_and_message = f"({ex.__class__.__name__}) {ex}"
raise LoadMultimediaDataError(
message_format="Failed to load image for input '{key}': {error_type_and_message}",
key=key,
error_type_and_message=error_type_and_message,
target=ErrorTarget.EXECUTOR,
) from ex
return updated_inputs
def load_multimedia_data_recursively(value: Any):
return _process_multimedia_dict_recursively(value, _create_image_from_dict)
def resolve_multimedia_data_recursively(input_dir: Path, value: Any):
process_func = partial(resolve_image_path, **{"input_dir": input_dir})
return _process_multimedia_dict_recursively(value, process_func)
def _process_multimedia_dict_recursively(value: Any, process_func: Callable) -> dict:
if isinstance(value, list):
return [_process_multimedia_dict_recursively(item, process_func) for item in value]
elif isinstance(value, dict):
if is_multimedia_dict(value):
return process_func(**{"image_dict": value})
else:
return {k: _process_multimedia_dict_recursively(v, process_func) for k, v in value.items()}
else:
return value
def resolve_image_path(input_dir: Path, image_dict: dict):
"""Resolve image path to absolute path in image dict"""
input_dir = input_dir.parent if input_dir.is_file() else input_dir
if is_multimedia_dict(image_dict):
for key in image_dict:
_, resource = _get_multimedia_info(key)
if resource == "path":
image_dict[key] = str(input_dir / image_dict[key])
return image_dict
| promptflow/src/promptflow/promptflow/_utils/multimedia_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/multimedia_utils.py",
"repo_id": "promptflow",
"token_count": 4333
} | 42 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import IO, AnyStr, Optional, Union
from ._utils import is_arm_id
def load_flow(
source: Union[str, PathLike, IO[AnyStr]],
*,
relative_origin: Optional[str] = None,
**kwargs,
):
"""Construct a flow object from a yaml file.
:param source: The local yaml source of a compute. Must be either a
path to a local file, or an already-open file.
If the source is a path, it will be open and read.
An exception is raised if the file does not exist.
If the source is an open file, the file will be read directly,
and an exception is raised if the file is not readable.
:type source: Union[PathLike, str, io.TextIOWrapper]
:param relative_origin: The origin to be used when deducing
the relative locations of files referenced in the parsed yaml.
Defaults to the inputted source's directory if it is a file or file path input.
Defaults to "./" if the source is a stream input with no name value.
:type relative_origin: str
:param params_override: Fields to overwrite on top of the yaml file.
Format is [{"field1": "value1"}, {"field2": "value2"}]
:type params_override: List[Dict]
:return: Loaded flow object.
:rtype: promptflow.azure.Flow
"""
from promptflow.azure._entities._flow import Flow
if is_arm_id(source):
return source
return Flow(path=Path(source))
| promptflow/src/promptflow/promptflow/azure/_load_functions.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_load_functions.py",
"repo_id": "promptflow",
"token_count": 531
} | 43 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._connection_operations import build_create_connection_request, build_delete_connection_request, build_get_connection_request, build_list_connection_specs_request, build_list_connections_request, build_update_connection_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionOperations:
"""ConnectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
body: Optional["_models.CreateOrUpdateConnectionRequest"] = None,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""create_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequest')
else:
_json = None
request = build_create_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.create_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def update_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
body: Optional["_models.CreateOrUpdateConnectionRequest"] = None,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""update_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequest')
else:
_json = None
request = build_update_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.update_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def get_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""get_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def delete_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
connection_scope: Optional[Union[str, "_models.ConnectionScope"]] = None,
**kwargs: Any
) -> "_models.ConnectionEntity":
"""delete_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param connection_scope:
:type connection_scope: str or ~flow.models.ConnectionScope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
connection_scope=connection_scope,
template_url=self.delete_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace_async
async def list_connections(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.ConnectionEntity"]:
"""list_connections.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionEntity, or the result of cls(response)
:rtype: list[~flow.models.ConnectionEntity]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionEntity"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connections_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionEntity]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connections.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection'} # type: ignore
@distributed_trace_async
async def list_connection_specs(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.ConnectionSpec"]:
"""list_connection_specs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionSpec, or the result of cls(response)
:rtype: list[~flow.models.ConnectionSpec]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionSpec"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connection_specs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connection_specs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionSpec]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_specs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/specs'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_connection_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_connection_operations.py",
"repo_id": "promptflow",
"token_count": 6949
} | 44 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_update_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
connection_scope = kwargs.pop('connection_scope', None) # type: Optional[Union[str, "_models.ConnectionScope"]]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if connection_scope is not None:
query_parameters['connectionScope'] = _SERIALIZER.query("connection_scope", connection_scope, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_connections_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_list_connection_specs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/specs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class ConnectionOperations(object):
"""ConnectionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
body=None, # type: Optional["_models.CreateOrUpdateConnectionRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionEntity"
"""create_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequest')
else:
_json = None
request = build_create_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.create_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace
def update_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
body=None, # type: Optional["_models.CreateOrUpdateConnectionRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionEntity"
"""update_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequest')
else:
_json = None
request = build_update_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.update_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace
def get_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionEntity"
"""get_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace
def delete_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
connection_scope=None, # type: Optional[Union[str, "_models.ConnectionScope"]]
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionEntity"
"""delete_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param connection_scope:
:type connection_scope: str or ~flow.models.ConnectionScope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionEntity, or the result of cls(response)
:rtype: ~flow.models.ConnectionEntity
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionEntity"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
connection_scope=connection_scope,
template_url=self.delete_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionEntity', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/{connectionName}'} # type: ignore
@distributed_trace
def list_connections(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ConnectionEntity"]
"""list_connections.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionEntity, or the result of cls(response)
:rtype: list[~flow.models.ConnectionEntity]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionEntity"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connections_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionEntity]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connections.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection'} # type: ignore
@distributed_trace
def list_connection_specs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ConnectionSpec"]
"""list_connection_specs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionSpec, or the result of cls(response)
:rtype: list[~flow.models.ConnectionSpec]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionSpec"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connection_specs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connection_specs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionSpec]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_specs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connection/specs'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_connection_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_connection_operations.py",
"repo_id": "promptflow",
"token_count": 10177
} | 45 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
from .gerneral import is_arm_id
__all__ = ["is_arm_id"]
| promptflow/src/promptflow/promptflow/azure/_utils/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_utils/__init__.py",
"repo_id": "promptflow",
"token_count": 80
} | 46 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import socket
import subprocess
import uuid
from pathlib import Path
from typing import Any, Mapping, Optional
from promptflow._core._errors import MetaFileNotFound, MetaFileReadError
from promptflow._sdk._constants import DEFAULT_ENCODING, FLOW_TOOLS_JSON, PROMPT_FLOW_DIR_NAME
from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy
from promptflow.executor._result import AggregationResult
from promptflow.storage._run_storage import AbstractRunStorage
EXECUTOR_SERVICE_DOMAIN = "http://localhost:"
EXECUTOR_SERVICE_DLL = "Promptflow.dll"
class CSharpExecutorProxy(APIBasedExecutorProxy):
def __init__(self, process: subprocess.Popen, port: str):
self._process = process
self._port = port
@property
def api_endpoint(self) -> str:
return EXECUTOR_SERVICE_DOMAIN + self._port
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "CSharpExecutorProxy":
"""Create a new executor"""
port = cls.find_available_port()
log_path = kwargs.get("log_path", "")
init_error_file = Path(working_dir) / f"init_error_{str(uuid.uuid4())}.json"
init_error_file.touch()
command = [
"dotnet",
EXECUTOR_SERVICE_DLL,
"-e",
"-p",
port,
"--yaml_path",
flow_file,
"--assembly_folder",
".",
"--log_path",
log_path,
"--log_level",
"Warning",
"--error_file_path",
init_error_file,
]
process = subprocess.Popen(command)
executor_proxy = cls(process, port)
try:
await executor_proxy.ensure_executor_startup(init_error_file)
finally:
Path(init_error_file).unlink()
return executor_proxy
async def destroy(self):
"""Destroy the executor"""
if self._process and self._process.poll() is None:
self._process.terminate()
try:
self._process.wait(timeout=5)
except subprocess.TimeoutExpired:
self._process.kill()
async def exec_aggregation_async(
self,
batch_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id: Optional[str] = None,
) -> AggregationResult:
return AggregationResult({}, {}, {})
def _is_executor_active(self):
"""Check if the process is still running and return False if it has exited"""
# get the exit code of the process by poll() and if it is None, it means the process is still running
return self._process.poll() is None
@classmethod
def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict:
flow_tools_json_path = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
if flow_tools_json_path.is_file():
with open(flow_tools_json_path, mode="r", encoding=DEFAULT_ENCODING) as f:
try:
return json.load(f)
except json.JSONDecodeError:
raise MetaFileReadError(
message_format="Failed to fetch meta of tools: {file_path} is not a valid json file.",
file_path=flow_tools_json_path.absolute().as_posix(),
)
raise MetaFileNotFound(
message_format=(
"Failed to fetch meta of tools: cannot find {file_path}, please build the flow project first."
),
file_path=flow_tools_json_path.absolute().as_posix(),
)
@classmethod
def find_available_port(cls) -> str:
"""Find an available port on localhost"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("localhost", 0))
_, port = s.getsockname()
return str(port)
| promptflow/src/promptflow/promptflow/batch/_csharp_executor_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/batch/_csharp_executor_proxy.py",
"repo_id": "promptflow",
"token_count": 1889
} | 47 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import inspect
import string
import traceback
from enum import Enum
from functools import cached_property
class ErrorCategory(str, Enum):
USER_ERROR = "UserError"
SYSTEM_ERROR = "SystemError"
UNKNOWN = "Unknown"
class ErrorTarget(str, Enum):
"""The target of the error, indicates which part of the system the error occurs."""
EXECUTOR = "Executor"
BATCH = "Batch"
FLOW_EXECUTOR = "FlowExecutor"
NODE_EXECUTOR = "NodeExecutor"
TOOL = "Tool"
AZURE_RUN_STORAGE = "AzureRunStorage"
RUNTIME = "Runtime"
UNKNOWN = "Unknown"
RUN_TRACKER = "RunTracker"
RUN_STORAGE = "RunStorage"
CONTROL_PLANE_SDK = "ControlPlaneSDK"
SERVING_APP = "ServingApp"
FLOW_INVOKER = "FlowInvoker"
FUNCTION_PATH = "FunctionPath"
class PromptflowException(Exception):
"""Base exception for all errors.
:param message: A message describing the error. This is the error message the user will see.
:type message: str
:param target: The name of the element that caused the exception to be thrown.
:type target: ~promptflow.exceptions.ErrorTarget
:param error: The original exception if any.
:type error: Exception
"""
def __init__(
self,
message="",
message_format="",
target: ErrorTarget = ErrorTarget.UNKNOWN,
module=None,
**kwargs,
):
self._inner_exception = kwargs.get("error")
self._target = target
self._module = module
self._message_format = message_format
self._kwargs = kwargs
if message:
self._message = str(message)
elif self.message_format:
self._message = self.message_format.format(**self.message_parameters)
else:
self._message = self.__class__.__name__
super().__init__(self._message)
@property
def message(self):
"""The error message."""
return self._message
@property
def message_format(self):
"""The error message format."""
return self._message_format
@cached_property
def message_parameters(self):
"""The error message parameters."""
if not self._kwargs:
return {}
required_arguments = self.get_arguments_from_message_format(self.message_format)
parameters = {}
for argument in required_arguments:
if argument not in self._kwargs:
parameters[argument] = f"<{argument}>"
else:
parameters[argument] = self._kwargs[argument]
return parameters
@cached_property
def serializable_message_parameters(self):
"""The serializable error message parameters."""
return {k: str(v) for k, v in self.message_parameters.items()}
@property
def target(self):
"""The error target.
:return: The error target.
:rtype: ~promptflow.exceptions.ErrorTarget
"""
return self._target
@target.setter
def target(self, value):
"""Set the error target."""
self._target = value
@property
def module(self):
"""The module of the error that occurs.
It is similar to `target` but is more specific.
It is meant to store the Python module name of the code that raises the exception.
"""
return self._module
@module.setter
def module(self, value):
"""Set the module of the error that occurs."""
self._module = value
@property
def reference_code(self):
"""The reference code of the error."""
# In Python 3.11, the __str__ method of the Enum type returns the name of the enumeration member.
# However, in earlier Python versions, the __str__ method returns the value of the enumeration member.
# Therefore, when dealing with this situation, we need to make some additional adjustments.
target = self.target.value if isinstance(self.target, ErrorTarget) else self.target
if self.module:
return f"{target}/{self.module}"
else:
return target
@property
def inner_exception(self):
"""Get the inner exception.
The inner exception can be set via either style:
1) Set via the error parameter in the constructor.
raise PromptflowException("message", error=inner_exception)
2) Set via raise from statement.
raise PromptflowException("message") from inner_exception
"""
return self._inner_exception or self.__cause__
@property
def additional_info(self):
"""Return a dict of the additional info of the exception.
By default, this information could usually be empty.
However, we can still define additional info for some specific exception.
i.e. For ToolExcutionError, we may add the tool's line number, stacktrace to the additional info.
"""
return None
@property
def error_codes(self):
"""Returns a list of the error codes for this exception.
The error codes is defined the same as the class inheritance.
i.e. For ToolExcutionError which inherits from UserErrorException,
The result would be ["UserErrorException", "ToolExecutionError"].
"""
if getattr(self, "_error_codes", None):
return self._error_codes
from promptflow._utils.exception_utils import infer_error_code_from_class
def reversed_error_codes():
for clz in self.__class__.__mro__:
if clz is PromptflowException:
break
yield infer_error_code_from_class(clz)
self._error_codes = list(reversed_error_codes())
self._error_codes.reverse()
return self._error_codes
def get_arguments_from_message_format(self, message_format):
"""Get the arguments from the message format."""
def iter_field_name():
if not message_format:
return
for _, field_name, _, _ in string.Formatter().parse(message_format):
if field_name is not None:
yield field_name
return set(iter_field_name())
def __str__(self):
"""Return the error message.
Some child classes may override this method to return a more detailed error message."""
return self.message
class UserErrorException(PromptflowException):
"""Exception raised when invalid or unsupported inputs are provided."""
pass
class SystemErrorException(PromptflowException):
"""Exception raised when service error is triggered."""
pass
class ValidationException(UserErrorException):
"""Exception raised when validation fails."""
pass
class _ErrorInfo:
@classmethod
def get_error_info(cls, e: Exception):
if not isinstance(e, Exception):
return None, None, None, None, None
e = cls.select_exception(e)
if cls._is_system_error(e):
return (
ErrorCategory.SYSTEM_ERROR,
cls._error_type(e),
cls._error_target(e),
cls._error_message(e),
cls._error_detail(e),
)
if cls._is_user_error(e):
return (
ErrorCategory.USER_ERROR,
cls._error_type(e),
cls._error_target(e),
cls._error_message(e),
cls._error_detail(e),
)
return ErrorCategory.UNKNOWN, cls._error_type(e), ErrorTarget.UNKNOWN, "", cls._error_detail(e)
@classmethod
def select_exception(cls, e: Exception):
"""Select the exception in e and e.__cause__, and prioritize the Exception defined in the promptflow."""
if isinstance(e, PromptflowException):
return e
# raise Exception("message") from PromptflowException("message")
if e.__cause__ and isinstance(e.__cause__, PromptflowException):
return e.__cause__
return e
@classmethod
def _is_system_error(cls, e: Exception):
if isinstance(e, SystemErrorException):
return True
return False
@classmethod
def _is_user_error(cls, e: Exception):
if isinstance(e, UserErrorException):
return True
return False
@classmethod
def _error_type(cls, e: Exception):
"""Return exception type.
Note:
For PromptflowException(error=ValueError(message="xxx")) or
UserErrorException(error=ValueError(message="xxx")) or
SystemErrorException(error=ValueError(message="xxx")),
the desired return type is ValueError,
not PromptflowException, UserErrorException and SystemErrorException.
"""
error_type = type(e).__name__
if type(e) in (PromptflowException, UserErrorException, SystemErrorException):
if e.inner_exception:
error_type = type(e.inner_exception).__name__
return error_type
@classmethod
def _error_target(cls, e: Exception):
return getattr(e, "target", ErrorTarget.UNKNOWN)
@classmethod
def _error_message(cls, e: Exception):
return getattr(e, "message_format", "")
@classmethod
def _error_detail(cls, e: Exception):
exception_codes = cls._get_exception_codes(e)
exception_code = None
for item in exception_codes[::-1]:
if "promptflow" in item["module"]: # Only record information within the promptflow package
exception_code = item
break
if not exception_code:
return ""
return (
f"module={exception_code['module']}, "
f"code={exception_code['exception_code']}, "
f"lineno={exception_code['lineno']}."
)
@classmethod
def _get_exception_codes(cls, e: Exception) -> list:
"""
Obtain information on each line of the traceback, including the module name,
exception code and lineno where the error occurred.
:param e: Exception object
:return: A list, each item contains information for each row of the traceback, which format is like this:
{
'module': 'promptflow.executor.errors',
'exception_code': 'return self.inner_exception.additional_info',
'lineno': 223
}
"""
exception_codes = []
traceback_info = traceback.extract_tb(e.__traceback__)
for item in traceback_info:
lineno = item.lineno
filename = item.filename
line_code = item.line
module = inspect.getmodule(None, _filename=filename)
exception_code = {"module": "", "exception_code": line_code, "lineno": lineno}
if module is not None:
exception_code["module"] = module.__name__
exception_codes.append(exception_code)
return exception_codes
| promptflow/src/promptflow/promptflow/exceptions.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/exceptions.py",
"repo_id": "promptflow",
"token_count": 4554
} | 48 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
from json import JSONDecodeError
from typing import Any, List, Mapping, Optional
from promptflow._utils.logger_utils import logger
from promptflow.contracts.flow import Flow, InputValueType, Node
from promptflow.contracts.tool import ValueType
from promptflow.executor._errors import (
DuplicateNodeName,
EmptyOutputReference,
InputNotFound,
InputParseError,
InputReferenceNotFound,
InputTypeError,
InvalidAggregationInput,
InvalidNodeReference,
NodeCircularDependency,
NodeReferenceNotFound,
OutputReferenceNotFound,
)
class FlowValidator:
"""This is a validation class designed to verify the integrity and validity of flow definitions and input data."""
@staticmethod
def _ensure_nodes_order(flow: Flow):
dependencies = {n.name: set() for n in flow.nodes}
aggregation_nodes = set(node.name for node in flow.nodes if node.aggregation)
for n in flow.nodes:
inputs_list = [i for i in n.inputs.values()]
if n.activate:
if (
n.aggregation
and n.activate.condition.value_type == InputValueType.NODE_REFERENCE
and n.activate.condition.value not in aggregation_nodes
):
msg_format = (
"Invalid node definitions found in the flow graph. Non-aggregation node '{invalid_reference}' "
"cannot be referenced in the activate config of the aggregation node '{node_name}'. Please "
"review and rectify the node reference."
)
raise InvalidNodeReference(
message_format=msg_format, invalid_reference=n.activate.condition.value, node_name=n.name
)
inputs_list.extend([n.activate.condition])
for i in inputs_list:
if i.value_type != InputValueType.NODE_REFERENCE:
continue
if i.value not in dependencies:
msg_format = (
"Invalid node definitions found in the flow graph. Node '{node_name}' references "
"a non-existent node '{reference_node_name}' in your flow. Please review your flow to "
"ensure that the node name is accurately specified."
)
raise NodeReferenceNotFound(
message_format=msg_format, node_name=n.name, reference_node_name=i.value
)
dependencies[n.name].add(i.value)
if not n.aggregation:
invalid_reference = dependencies[n.name].intersection(aggregation_nodes)
if invalid_reference:
msg_format = (
"Invalid node definitions found in the flow graph. Non-aggregate node '{node_name}' "
"cannot reference aggregate nodes {invalid_reference}. Please review and rectify "
"the node reference."
)
raise InvalidNodeReference(
message_format=msg_format, node_name=n.name, invalid_reference=invalid_reference
)
sorted_nodes = []
picked = set()
for _ in range(len(flow.nodes)):
available_nodes_iterator = (
n for n in flow.nodes if n.name not in picked and all(d in picked for d in dependencies[n.name])
)
node_to_pick = next(available_nodes_iterator, None)
if not node_to_pick:
# Figure out the nodes names with circular dependency problem alphabetically
remaining_nodes = sorted(list(set(dependencies.keys()) - picked))
raise NodeCircularDependency(
message_format=(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for the nodes "
"{remaining_nodes} and resolve the circular reference issue in the flow."
),
remaining_nodes=remaining_nodes,
)
sorted_nodes.append(node_to_pick)
picked.add(node_to_pick.name)
if any(n1.name != n2.name for n1, n2 in zip(flow.nodes, sorted_nodes)):
return Flow(
id=flow.id,
name=flow.name,
nodes=sorted_nodes,
inputs=flow.inputs,
outputs=flow.outputs,
tools=flow.tools,
)
return copy.copy(flow)
@staticmethod
def _validate_nodes_topology(flow: Flow) -> Flow:
node_names = set()
for node in flow.nodes:
if node.name in node_names:
raise DuplicateNodeName(
message_format=(
"Invalid node definitions found in the flow graph. Node with name '{node_name}' appears "
"more than once in the node definitions in your flow, which is not allowed. To address "
"this issue, please review your flow and either rename or remove nodes with identical names."
),
node_name=node.name,
)
node_names.add(node.name)
for node in flow.nodes:
for v in node.inputs.values():
if v.value_type != InputValueType.FLOW_INPUT:
continue
if v.value not in flow.inputs:
msg_format = (
"Invalid node definitions found in the flow graph. Node '{node_name}' references flow input "
"'{flow_input_name}' which is not defined in your flow. To resolve this issue, "
"please review your flow, ensuring that you either add the missing flow inputs "
"or adjust node reference to the correct flow input."
)
raise InputReferenceNotFound(
message_format=msg_format, node_name=node.name, flow_input_name=v.value
)
return FlowValidator._ensure_nodes_order(flow)
@staticmethod
def _parse_input_value(input_key: str, input_value: Any, expected_type: ValueType, idx=None):
try:
return expected_type.parse(input_value)
except JSONDecodeError as e:
line_info = "" if idx is None else f" in line {idx} of input data"
flow_input_info = f"'{input_key}'{line_info}"
error_type_and_message = f"({e.__class__.__name__}) {e}"
msg_format = (
"Failed to parse the flow input. The value for flow input {flow_input_info} "
"was interpreted as JSON string since its type is '{value_type}'. However, the value "
"'{input_value}' is invalid for JSON parsing. Error details: {error_type_and_message}. "
"Please make sure your inputs are properly formatted."
)
raise InputParseError(
message_format=msg_format,
flow_input_info=flow_input_info,
input_value=input_value,
value_type=expected_type.value if hasattr(expected_type, "value") else expected_type,
error_type_and_message=error_type_and_message,
) from e
except Exception as e:
line_info = "" if idx is None else f" in line {idx} of input data"
flow_input_info = f"'{input_key}'{line_info}"
msg_format = (
"The input for flow is incorrect. The value for flow input {flow_input_info} "
"does not match the expected type '{expected_type}'. Please change flow input type "
"or adjust the input value in your input data."
)
expected_type_value = expected_type.value if hasattr(expected_type, "value") else expected_type
raise InputTypeError(
message_format=msg_format, flow_input_info=flow_input_info, expected_type=expected_type_value
) from e
@staticmethod
def resolve_aggregated_flow_inputs_type(flow: Flow, inputs: Mapping[str, List[Any]]) -> Mapping[str, Any]:
updated_inputs = {}
for input_key, input_def in flow.inputs.items():
if input_key in inputs:
input_value_list = inputs[input_key]
updated_inputs[input_key] = [
FlowValidator._parse_input_value(input_key, each_line_item, input_def.type, idx)
for idx, each_line_item in enumerate(input_value_list)
]
return updated_inputs
@staticmethod
def resolve_flow_inputs_type(flow: Flow, inputs: Mapping[str, Any], idx: Optional[int] = None) -> Mapping[str, Any]:
"""Resolve inputs by type if existing. Ignore missing inputs.
:param flow: The `flow` parameter is of type `Flow` and represents a flow object
:type flow: ~promptflow.contracts.flow.Flow
:param inputs: A dictionary containing the input values for the flow. The keys are the names of the
flow inputs, and the values are the corresponding input values
:type inputs: Mapping[str, Any]
:param idx: The `idx` parameter is an optional integer that represents the line index of the input
data. It is used to provide additional information in case there is an error with the input data
:type idx: Optional[int]
:return: The updated inputs with values are type-converted based on the expected type specified
in the `flow` object.
:rtype: Mapping[str, Any]
"""
updated_inputs = {k: v for k, v in inputs.items()}
for k, v in flow.inputs.items():
if k in inputs:
updated_inputs[k] = FlowValidator._parse_input_value(k, inputs[k], v.type, idx)
return updated_inputs
@staticmethod
def ensure_flow_inputs_type(flow: Flow, inputs: Mapping[str, Any], idx: Optional[int] = None) -> Mapping[str, Any]:
"""Make sure the inputs are completed and in the correct type. Raise Exception if not valid.
:param flow: The `flow` parameter is of type `Flow` and represents a flow object
:type flow: ~promptflow.contracts.flow.Flow
:param inputs: A dictionary containing the input values for the flow. The keys are the names of the
flow inputs, and the values are the corresponding input values
:type inputs: Mapping[str, Any]
:param idx: The `idx` parameter is an optional integer that represents the line index of the input
data. It is used to provide additional information in case there is an error with the input data
:type idx: Optional[int]
:return: The updated inputs, where the values are type-converted based on the expected
type specified in the `flow` object.
:rtype: Mapping[str, Any]
"""
for k, v in flow.inputs.items():
if k not in inputs:
line_info = "in input data" if idx is None else f"in line {idx} of input data"
msg_format = (
"The input for flow is incorrect. The value for flow input '{input_name}' is not "
"provided {line_info}. Please review your input data or remove this input in your flow "
"if it's no longer needed."
)
raise InputNotFound(message_format=msg_format, input_name=k, line_info=line_info)
return FlowValidator.resolve_flow_inputs_type(flow, inputs, idx)
@staticmethod
def convert_flow_inputs_for_node(flow: Flow, node: Node, inputs: Mapping[str, Any]) -> Mapping[str, Any]:
"""Filter the flow inputs for node and resolve the value by type.
:param flow: The `flow` parameter is an instance of the `Flow` class. It represents the flow or
workflow that contains the node and inputs
:type flow: ~promptflow.contracts.flow.Flow
:param node: The `node` parameter is an instance of the `Node` class
:type node: ~promptflow.contracts.flow.Node
:param inputs: A dictionary containing the input values for the node. The keys are the names of the
input variables, and the values are the corresponding input values
:type inputs: Mapping[str, Any]
:return: the resolved flow inputs which are needed by the node only by the node only.
:rtype: Mapping[str, Any]
"""
updated_inputs = {}
inputs = inputs or {}
for k, v in node.inputs.items():
if v.value_type == InputValueType.FLOW_INPUT:
if v.value not in flow.inputs:
raise InputNotFound(
message_format=(
"The input for node is incorrect. Node input '{node_input_name}' is not found "
"from flow inputs of node '{node_name}'. Please review the node definition in your flow."
),
node_input_name=v.value,
node_name=node.name,
)
if v.value not in inputs:
raise InputNotFound(
message_format=(
"The input for node is incorrect. Node input '{node_input_name}' is not found "
"in input data for node '{node_name}'. Please verify the inputs data for the node."
),
node_input_name=v.value,
node_name=node.name,
)
try:
updated_inputs[v.value] = flow.inputs[v.value].type.parse(inputs[v.value])
except Exception as e:
msg_format = (
"The input for node is incorrect. Value for input '{input_name}' of node '{node_name}' "
"is not type '{expected_type}'. Please review and rectify the input data."
)
raise InputTypeError(
message_format=msg_format,
input_name=k,
node_name=node.name,
expected_type=flow.inputs[v.value].type.value,
) from e
return updated_inputs
@staticmethod
def _validate_aggregation_inputs(aggregated_flow_inputs: Mapping[str, Any], aggregation_inputs: Mapping[str, Any]):
"""Validate the aggregation inputs according to the flow inputs."""
for key, value in aggregated_flow_inputs.items():
if key in aggregation_inputs:
raise InvalidAggregationInput(
message_format=(
"The input for aggregation is incorrect. The input '{input_key}' appears in both "
"aggregated flow input and aggregated reference input. "
"Please remove one of them and try the operation again."
),
input_key=key,
)
if not isinstance(value, list):
raise InvalidAggregationInput(
message_format=(
"The input for aggregation is incorrect. "
"The value for aggregated flow input '{input_key}' should be a list, "
"but received {value_type}. Please adjust the input value to match the expected format."
),
input_key=key,
value_type=type(value).__name__,
)
for key, value in aggregation_inputs.items():
if not isinstance(value, list):
raise InvalidAggregationInput(
message_format=(
"The input for aggregation is incorrect. "
"The value for aggregated reference input '{input_key}' should be a list, "
"but received {value_type}. Please adjust the input value to match the expected format."
),
input_key=key,
value_type=type(value).__name__,
)
inputs_len = {key: len(value) for key, value in aggregated_flow_inputs.items()}
inputs_len.update({key: len(value) for key, value in aggregation_inputs.items()})
if len(set(inputs_len.values())) > 1:
raise InvalidAggregationInput(
message_format=(
"The input for aggregation is incorrect. "
"The length of all aggregated inputs should be the same. Current input lengths are: "
"{key_len}. Please adjust the input value in your input data."
),
key_len=inputs_len,
)
@staticmethod
def _ensure_outputs_valid(flow: Flow):
updated_outputs = {}
for k, v in flow.outputs.items():
if v.reference.value_type == InputValueType.LITERAL and v.reference.value == "":
msg_format = (
"The output '{output_name}' for flow is incorrect. The reference is not specified for "
"the output '{output_name}' in the flow. To rectify this, "
"ensure that you accurately specify the reference in the flow."
)
raise EmptyOutputReference(message_format=msg_format, output_name=k)
if v.reference.value_type == InputValueType.FLOW_INPUT and v.reference.value not in flow.inputs:
msg_format = (
"The output '{output_name}' for flow is incorrect. The output '{output_name}' references "
"non-existent flow input '{flow_input_name}' in your flow. Please carefully review your flow and "
"correct the reference definition for the output in question."
)
raise OutputReferenceNotFound(
message_format=msg_format, output_name=k, flow_input_name=v.reference.value
)
if v.reference.value_type == InputValueType.NODE_REFERENCE:
node = flow.get_node(v.reference.value)
if node is None:
msg_format = (
"The output '{output_name}' for flow is incorrect. The output '{output_name}' references "
"non-existent node '{node_name}' in your flow. To resolve this issue, please carefully review "
"your flow and correct the reference definition for the output in question."
)
raise OutputReferenceNotFound(message_format=msg_format, output_name=k, node_name=v.reference.value)
if node.aggregation:
msg = f"Output '{k}' references a reduce node '{v.reference.value}', will not take effect."
logger.warning(msg)
# We will not add this output to the flow outputs, so we simply ignore it here
continue
updated_outputs[k] = v
return updated_outputs
@staticmethod
def ensure_flow_valid_in_batch_mode(flow: Flow):
if not flow.inputs:
message = (
"The input for flow cannot be empty in batch mode. Please review your flow and provide valid inputs."
)
raise InputNotFound(message=message)
| promptflow/src/promptflow/promptflow/executor/flow_validator.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/flow_validator.py",
"repo_id": "promptflow",
"token_count": 9197
} | 49 |
from pathlib import Path
from tempfile import mkdtemp
from typing import Dict
import pytest
from promptflow._utils.logger_utils import LogContext
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.batch._result import BatchResult
from promptflow.contracts._errors import FlowDefinitionError
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from ..utils import (
WRONG_FLOW_ROOT,
MemoryRunStorage,
get_flow_expected_result,
get_flow_expected_status_summary,
get_flow_folder,
get_flow_inputs,
get_flow_inputs_file,
get_yaml_file,
load_jsonl,
)
ACTIVATE_FLOW_TEST_CASES = [
"conditional_flow_with_activate",
"activate_with_no_inputs",
"all_depedencies_bypassed_with_activate_met",
"activate_condition_always_met",
]
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorActivate:
@pytest.mark.parametrize("flow_folder", ACTIVATE_FLOW_TEST_CASES)
def test_flow_run_activate(self, dev_connections, flow_folder):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
results = executor.exec_line(get_flow_inputs(flow_folder))
# Assert the flow result
expected_result = get_flow_expected_result(flow_folder)
expected_result = expected_result[0] if isinstance(expected_result, list) else get_flow_expected_result
self.assert_activate_flow_run_result(results.run_info, expected_result)
self.assert_activate_node_run_result(results.node_run_infos, expected_result)
def test_batch_run_activate(self, dev_connections):
flow_folder = "conditional_flow_with_activate"
mem_run_storage = MemoryRunStorage()
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
connections=dev_connections,
storage=mem_run_storage,
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="inputs.json")}
inputs_mapping = {"incident_id": "${data.incident_id}", "incident_content": "${data.incident_content}"}
output_dir = Path(mkdtemp())
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
expected_result = get_flow_expected_result(flow_folder)
expected_status_summary = get_flow_expected_status_summary(flow_folder)
self.assert_activate_bulk_run_result(
output_dir, mem_run_storage, batch_results, expected_result, expected_status_summary
)
def test_all_nodes_bypassed(self, dev_connections):
flow_folder = "all_nodes_bypassed"
file_path = Path(mkdtemp()) / "flow.log"
with LogContext(file_path):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
result = executor.exec_line(get_flow_inputs(flow_folder))
assert result.output["result"] is None
with open(file_path) as fin:
content = fin.read()
assert "The node referenced by output:'third_node' is bypassed, which is not recommended." in content
def test_invalid_activate_config(self):
flow_folder = "invalid_activate_config"
with pytest.raises(FlowDefinitionError) as ex:
FlowExecutor.create(get_yaml_file(flow_folder, root=WRONG_FLOW_ROOT), {})
assert ex.value.message == (
"The definition of activate config for node divide_num is incorrect. "
"Please check your flow yaml and resubmit."
)
def test_aggregate_bypassed_nodes(self):
flow_folder = "conditional_flow_with_aggregate_bypassed"
mem_run_storage = MemoryRunStorage()
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections={}, storage=mem_run_storage
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="inputs.json")}
inputs_mapping = {"case": "${data.case}", "value": "${data.value}"}
output_dir = Path(mkdtemp())
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
expected_result = get_flow_expected_result(flow_folder)
expected_status_summary = get_flow_expected_status_summary(flow_folder)
self.assert_activate_bulk_run_result(
output_dir, mem_run_storage, batch_results, expected_result, expected_status_summary
)
# Validate the aggregate result
for node_run_info in mem_run_storage._node_runs.values():
if node_run_info.node == "aggregation_double":
assert node_run_info.status == Status.Completed
assert node_run_info.output == 3
elif node_run_info.node == "aggregation_square":
assert node_run_info.status == Status.Completed
assert node_run_info.output == 12.5
def assert_activate_bulk_run_result(
self,
output_dir: Path,
mem_run_storage: MemoryRunStorage,
batch_result: BatchResult,
expected_result,
expected_status_summary,
):
# Validate the flow outputs
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
for i, output in enumerate(outputs):
expected_outputs = expected_result[i]["expected_outputs"].copy()
expected_outputs.update({"line_number": i})
assert output == expected_outputs
# Validate the flow line results
flow_runs = {k: v for k, v in sorted(mem_run_storage._flow_runs.items(), key=lambda item: item[1].index)}
for i, (flow_run_id, flow_run_info) in enumerate(flow_runs.items()):
self.assert_activate_flow_run_result(flow_run_info, expected_result[i])
node_run_infos = {
node_run_info.node: node_run_info
for node_run_info in mem_run_storage._node_runs.values()
if node_run_info.parent_run_id == flow_run_id
}
self.assert_activate_node_run_result(node_run_infos, expected_result[i])
# Validate the flow status summary
assert batch_result.total_lines == batch_result.completed_lines
assert batch_result.node_status == expected_status_summary
def assert_activate_flow_run_result(self, flow_run_info: FlowRunInfo, expected_result):
# Validate the flow status
assert flow_run_info.status == Status.Completed
# Validate the flow output
assert isinstance(flow_run_info.output, dict)
assert flow_run_info.output == expected_result["expected_outputs"]
def assert_activate_node_run_result(self, node_run_infos: Dict[str, NodeRunInfo], expected_result):
# Validate the flow node run infos for the completed nodes
assert len(node_run_infos) == expected_result["expected_node_count"]
expected_bypassed_nodes = expected_result["expected_bypassed_nodes"]
completed_nodes_run_infos = [
run_info for i, run_info in node_run_infos.items() if i not in expected_bypassed_nodes
]
assert all([node.status == Status.Completed for node in completed_nodes_run_infos])
# Validate the flow node run infos for the bypassed nodes
bypassed_nodes_run_infos = [node_run_infos[i] for i in expected_bypassed_nodes]
assert all([node.status == Status.Bypassed for node in bypassed_nodes_run_infos])
assert all([node.output is None for node in bypassed_nodes_run_infos])
| promptflow/src/promptflow/tests/executor/e2etests/test_activate.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_activate.py",
"repo_id": "promptflow",
"token_count": 3116
} | 50 |
import json
from functools import partial
from aiohttp import web
from promptflow.exceptions import ValidationException
def run_executor_server(port, has_error=False, init_error_file=None):
app = web.Application()
app.router.add_get("/health", _handle_health)
handle_execution_with_customization = partial(_handle_execution, has_error=has_error)
app.router.add_post("/execution", handle_execution_with_customization)
print(f"Starting server on port {port}")
if init_error_file is None:
web.run_app(app, host="localhost", port=port)
else:
raise ValidationException("Error for tests")
async def _handle_health(request: web.Request):
return web.Response(text="Healthy")
async def _handle_execution(request: web.Request, has_error=False):
try:
request = await request.json()
return _get_execution_result(request, has_error=has_error)
except json.JSONDecodeError:
return web.Response(status=400, text="Bad Request: Invalid JSON")
def _get_execution_result(request: dict, has_error=False):
run_id = request.get("run_id", "dummy_run_id")
index = request.get("line_number", 1)
inputs = request.get("inputs", {"question": "Hello world!"})
if has_error and index == 1:
# simulate error
line_result_dict = _get_line_result_dict(run_id, index, inputs, has_error=True)
return web.json_response(line_result_dict, status=500)
line_result_dict = _get_line_result_dict(run_id, index, inputs)
return web.json_response(line_result_dict)
def _get_line_result_dict(run_id, index, inputs, has_error=False):
if has_error:
return {
"error": {
"message": "error for tests",
"messageFormat": "",
"messageParameters": {},
"referenceCode": None,
"debugInfo": {
"type": "Exception",
"message": "error for tests",
"stackTrace": "...",
"innerException": None,
},
"additionalInfo": None,
"code": "UserError",
"innerError": {"code": "Exception", "innerError": None},
},
}
return {
"output": {"answer": "Hello world!"},
"aggregation_inputs": {},
"run_info": {
"run_id": run_id,
"status": "Completed",
"inputs": inputs,
"output": {"answer": "Hello world!"},
"parent_run_id": run_id,
"root_run_id": run_id,
"start_time": "2023-11-24T06:03:20.2685529Z",
"end_time": "2023-11-24T06:03:20.2688869Z",
"index": index,
"system_metrics": {"duration": "00:00:00.0003340", "total_tokens": 0},
"result": {"answer": "Hello world!"},
},
"node_run_infos": {
"get_answer": {
"node": "get_answer",
"flow_run_id": run_id,
"parent_run_id": run_id,
"run_id": "dummy_node_run_id",
"status": "Completed",
"inputs": inputs,
"output": "Hello world!",
"start_time": "2023-11-24T06:03:20.2688262Z",
"end_time": "2023-11-24T06:03:20.268858Z",
"index": index,
"system_metrics": {"duration": "00:00:00.0000318", "total_tokens": 0},
"result": "Hello world!",
}
},
}
def _get_aggr_result_dict(run_id, inputs, has_error=False):
if has_error:
return {
"error": {
"message": "error for tests",
"messageFormat": "",
"messageParameters": {},
"referenceCode": None,
"debugInfo": {
"type": "Exception",
"message": "error for tests",
"stackTrace": "...",
"innerException": None,
},
"additionalInfo": None,
"code": "UserError",
"innerError": {"code": "Exception", "innerError": None},
},
}
return {
"output": None,
"metrics": {"accuracy": 0.5},
"node_run_infos": {
"aggregation": {
"node": "aggregation",
"flow_run_id": run_id,
"parent_run_id": run_id,
"run_id": "dummy_node_run_id",
"status": "Completed",
"inputs": inputs,
"output": "Hello world!",
"start_time": "2023-11-24T06:03:20.2688262Z",
"end_time": "2023-11-24T06:03:20.268858Z",
"system_metrics": {"duration": "00:00:00.0000318", "total_tokens": 0},
"result": "Hello world!",
}
},
}
| promptflow/src/promptflow/tests/executor/mock_execution_server.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/mock_execution_server.py",
"repo_id": "promptflow",
"token_count": 2534
} | 51 |
import json
import re
from traceback import TracebackException
import pytest
from promptflow._core._errors import ToolExecutionError
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import (
ErrorResponse,
ExceptionPresenter,
JsonSerializedPromptflowException,
get_tb_next,
infer_error_code_from_class,
last_frame_info,
remove_suffix,
)
from promptflow.exceptions import (
ErrorTarget,
PromptflowException,
SystemErrorException,
UserErrorException,
ValidationException,
)
def set_inner_exception_by_parameter():
raise PromptflowException("test", error=ValueError("bad number"))
def set_inner_exception_by_raise_from():
raise PromptflowException("test") from ValueError("bad number")
def code_with_bug():
1 / 0
def raise_tool_execution_error():
try:
code_with_bug()
except Exception as e:
raise ToolExecutionError(node_name="MyTool") from e
def raise_exception_with_object():
raise PromptflowException(message_format="{inner_exception}", inner_exception=Exception("exception message"))
def raise_user_error():
try:
code_with_bug()
except Exception as e:
raise UserErrorException("run failed", target=ErrorTarget.TOOL) from e
def raise_context_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedContextException(e)
class CustomizedContextException(Exception):
def __init__(self, inner_exception):
self.inner_exception = inner_exception
@property
def message(self):
code_with_bug()
return "context exception"
class CustomizedException(Exception):
pass
class CustomUserError(UserErrorException):
pass
class CustomDefaultTargetError(UserErrorException):
def __init__(self, target=ErrorTarget.EXECUTOR, **kwargs):
super().__init__(target=target, **kwargs)
def raise_general_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedException("General exception") from e
def raise_promptflow_exception():
try:
code_with_bug()
except Exception as e:
raise PromptflowException("Promptflow exception") from e
def raise_promptflow_exception_without_inner_exception():
try:
code_with_bug()
except Exception:
raise PromptflowException("Promptflow exception")
TOOL_EXECUTION_ERROR_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
ZeroDivisionError: division by zero
"""
TOOL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_.*
raise_tool_execution_error\(\)
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
raise ToolExecutionError\(node_name="MyTool"\) from e
"""
TOOL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
GENERAL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_general_exception
raise_general_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_general_exception
raise CustomizedException\("General exception"\) from e
"""
GENERAL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_general_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
CONTEXT_EXCEPTION_TRACEBACK = r"""
During handling of the above exception, another exception occurred:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_context_exception
raise_context_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_context_exception
raise CustomizedContextException\(e\)
"""
CONTEXT_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_context_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
@pytest.mark.unittest
class TestExceptionUtilsCommonMethod:
def test_get_tb_next(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
tb_next = get_tb_next(e.value.__traceback__, 3)
te = TracebackException(type(e.value), e.value, tb_next)
formatted_tb = "".join(te.format())
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, formatted_tb)
def test_last_frame_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
frame_info = last_frame_info(e.value)
assert "test_exception_utils.py" in frame_info.get("filename")
assert frame_info.get("lineno") > 0
assert frame_info.get("name") == "raise_tool_execution_error"
assert last_frame_info(None) == {}
@pytest.mark.parametrize(
"error_class, expected_error_code",
[
(UserErrorException, "UserError"),
(SystemErrorException, "SystemError"),
(ValidationException, "ValidationError"),
(ToolExecutionError, "ToolExecutionError"),
(ValueError, "ValueError"),
],
)
def test_infer_error_code_from_class(self, error_class, expected_error_code):
assert infer_error_code_from_class(error_class) == expected_error_code
@pytest.mark.unittest
class TestExceptionPresenter:
def test_debug_info(self):
# Test ToolExecutionError
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_context_exception(self):
with pytest.raises(CustomizedContextException) as e:
raise_context_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedContextException"
assert re.match(CONTEXT_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(CONTEXT_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_general_exception(self):
# Test General Exception
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedException"
assert re.match(GENERAL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(GENERAL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_to_dict_for_general_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=True)
assert "debugInfo" in dct
dct.pop("debugInfo")
assert dct == {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": {
"code": "ZeroDivisionError",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception_without_inner_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception_without_inner_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": None,
}
def test_to_dict_for_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
assert re.search(TOOL_EXCEPTION_INNER_TRACEBACK, presenter.formatted_traceback)
assert re.search(TOOL_EXCEPTION_TRACEBACK, presenter.formatted_traceback)
dct = presenter.to_dict(include_debug_info=False)
assert dct.pop("additionalInfo") is not None
assert dct == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
@pytest.mark.parametrize(
"raise_exception_func, error_class, expected_error_codes",
[
(raise_general_exception, CustomizedException, ["SystemError", "CustomizedException"]),
(raise_tool_execution_error, ToolExecutionError, ["UserError", "ToolExecutionError"]),
(raise_promptflow_exception, PromptflowException, ["SystemError", "ZeroDivisionError"]),
(raise_promptflow_exception_without_inner_exception, PromptflowException, ["SystemError"]),
],
)
def test_error_codes(self, raise_exception_func, error_class, expected_error_codes):
with pytest.raises(error_class) as e:
raise_exception_func()
presenter = ExceptionPresenter.create(e.value)
assert presenter.error_codes == expected_error_codes
@pytest.mark.unittest
class TestErrorResponse:
def test_from_error_dict(self):
error_dict = {
"code": "UserError",
"message": "Flow run failed.",
}
response = ErrorResponse.from_error_dict(error_dict)
assert response.response_code == "400"
assert response.error_codes == ["UserError"]
assert response.message == "Flow run failed."
response_dct = response.to_dict()
assert response_dct["time"] is not None
response_dct.pop("time")
component_name = response_dct.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response_dct == {
"error": {
"code": "UserError",
"message": "Flow run failed.",
},
"correlation": None,
"environment": None,
"location": None,
}
def test_to_simplied_dict(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
error_response = ErrorResponse.from_exception(e.value)
assert error_response.to_simplified_dict() == {
"error": {
"code": "SystemError",
"message": "General exception",
}
}
def test_from_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
response = ErrorResponse.from_exception(e.value).to_dict()
assert response["time"] is not None
response.pop("time")
component_name = response.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response == {
"error": {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
},
"correlation": None,
"environment": None,
"location": None,
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"input_dict, expected",
[
({"code": "firstError"}, "firstError"),
({"code": "firstError", "innerError": {}}, "firstError"),
({"code": "firstError", "innerError": {"code": "secondError"}}, "firstError/secondError"),
({"code": None, "innerError": {"code": "secondError"}}, ""),
# Dict doesn't have code in outmost will return empty string.
({"error": {"code": "firstError", "innerError": {"code": "secondError"}}}, ""),
],
)
def test_error_code_hierarchy(self, input_dict, expected):
assert ErrorResponse.from_error_dict(input_dict).error_code_hierarchy == expected
@pytest.mark.parametrize(
"error_dict, expected_innermost_error_code",
[
(
{
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
},
"ToolExecutionError",
),
({"code": "UserError", "innerError": None}, "UserError"),
({"message": "UserError", "innerError": None}, None),
],
)
def test_innermost_error_code_with_code(self, error_dict, expected_innermost_error_code):
inner_error_code = ErrorResponse.from_error_dict(error_dict).innermost_error_code
assert inner_error_code == expected_innermost_error_code
@pytest.mark.parametrize(
"error_dict, expected_additional_info",
[
({"code": "UserError"}, {}),
(
{
"code": "UserError",
"additionalInfo": [
{
"type": "test_additional_info",
"info": "This is additional info for testing.",
},
"not_dict",
{
"type": "empty_info",
},
{
"info": "Empty type",
},
{
"test": "Invalid additional info",
},
],
},
{"test_additional_info": "This is additional info for testing."},
),
],
)
def test_additional_info(self, error_dict, expected_additional_info):
error_response = ErrorResponse.from_error_dict(error_dict)
assert error_response.additional_info == expected_additional_info
assert all(error_response.get_additional_info(key) == value for key, value in expected_additional_info.items())
@pytest.mark.parametrize(
"raise_exception_func, error_class",
[
(raise_general_exception, CustomizedException),
(raise_tool_execution_error, ToolExecutionError),
],
)
def test_get_user_execution_error_info(self, raise_exception_func, error_class):
with pytest.raises(error_class) as e:
raise_exception_func()
error_repsonse = ErrorResponse.from_exception(e.value)
actual_error_info = error_repsonse.get_user_execution_error_info()
self.assert_user_execution_error_info(e.value, actual_error_info)
def assert_user_execution_error_info(self, exception, error_info):
if isinstance(exception, ToolExecutionError):
assert error_info["type"] == "ZeroDivisionError"
assert error_info["message"] == "division by zero"
assert error_info["filename"].endswith("test_exception_utils.py")
assert error_info["lineno"] > 0
assert error_info["name"] == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
error_info["traceback"],
)
# assert re.match(TOOL_EXECUTION_ERROR_TRACEBACK, error_info["traceback"])
else:
assert error_info == {}
@pytest.mark.unittest
class TestExceptions:
@pytest.mark.parametrize(
"ex, expected_message, expected_message_format, expected_message_parameters",
[
(
CustomUserError("message"),
"message",
"",
{},
),
(
CustomUserError(message="message"),
"message",
"",
{},
),
(
CustomUserError("message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message="message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message_format="Hello world"),
"Hello world",
"Hello world",
{},
),
(
CustomUserError(message_format="Hello {name}", name="world"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", not_used="whatever"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", target=ErrorTarget.TOOL),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(
message_format="Tool '{tool_name}' execution failed due to {error}",
tool_name="my tool",
error="bug",
),
"Tool 'my tool' execution failed due to bug",
"Tool '{tool_name}' execution failed due to {error}",
{
"tool_name": "my tool",
"error": "bug",
},
),
],
)
def test_message_and_format(self, ex, expected_message, expected_message_format, expected_message_parameters):
with pytest.raises(CustomUserError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.message_format == expected_message_format
assert exc.value.message_parameters == expected_message_parameters
@pytest.mark.parametrize(
"ex, expected_message, exepcted_target",
[
(
CustomDefaultTargetError(message="message", target=ErrorTarget.TOOL),
"message",
ErrorTarget.TOOL,
),
(
CustomDefaultTargetError(message="message"),
"message",
ErrorTarget.EXECUTOR,
),
],
)
def test_target_and_message(self, ex, expected_message, exepcted_target):
with pytest.raises(CustomDefaultTargetError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.target == exepcted_target
def test_reference_code(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.reference_code == ErrorTarget.TOOL.value
module = "promptflow_vectordb.tool.faiss_index_loopup"
e.module = module
assert e.reference_code == f"{ErrorTarget.TOOL.value}/{module}"
@pytest.mark.parametrize(
"func_that_raises_exception",
[
set_inner_exception_by_parameter,
set_inner_exception_by_raise_from,
],
)
def test_inner_exception(self, func_that_raises_exception):
with pytest.raises(PromptflowException) as e:
func_that_raises_exception()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ValueError)
assert str(inner_exception) == "bad number"
assert str(e.value) == "test"
def test_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ZeroDivisionError)
assert str(inner_exception) == "division by zero"
assert e.value.message == "Execution failure in 'MyTool': (ZeroDivisionError) division by zero"
last_frame_info = e.value.tool_last_frame_info
assert "test_exception_utils.py" in last_frame_info.get("filename")
assert last_frame_info.get("lineno") > 0
assert last_frame_info.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
e.value.tool_traceback,
)
def test_code_hierarchy(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.error_codes == ["UserError", "ToolExecutionError"]
assert ExceptionPresenter.create(e).error_code_recursed == {
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_debug_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
presenter = ExceptionPresenter.create(e)
assert presenter.debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, presenter.debug_info["stackTrace"])
inner_exception = presenter.debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_additional_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
additional_info = ExceptionPresenter.create(e.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "ToolExecutionErrorDetails"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*test_exception_utils.py", info_0_value["filename"])
assert info_0_value.get("lineno") > 0
assert info_0_value.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = ToolExecutionError(node_name="Node1")
dct = ExceptionPresenter.create(ex).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
def test_additional_info_for_empty_case(self):
with pytest.raises(UserErrorException) as e:
raise_user_error()
dct = ExceptionPresenter.create(e.value).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_turning_on_or_off_debug_info(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
result = ExceptionPresenter.create(e).to_dict(include_debug_info=include_debug_info)
if include_debug_info:
assert "debugInfo" in result
else:
assert "debugInfo" not in result
def test_to_dict(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Wo do not check additonalInfo since it is already checked in other cases
result.pop("additionalInfo")
assert result == {
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_to_dict_object_parameter(self):
with pytest.raises(PromptflowException) as e:
raise_exception_with_object()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Assert message is str(exception)
assert result == {
"message": "exception message",
"messageFormat": "{inner_exception}",
"messageParameters": {"inner_exception": "exception message"},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": None,
}
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_for_JsonSerializedPromptflowException(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
exception_dict = ExceptionPresenter.create(e.value).to_dict(include_debug_info=True)
message = json.dumps(exception_dict)
exception = JsonSerializedPromptflowException(message=message)
assert str(exception) == message
json_serialized_exception_dict = ExceptionPresenter.create(exception).to_dict(
include_debug_info=include_debug_info
)
error_dict = exception.to_dict(include_debug_info=include_debug_info)
assert error_dict == json_serialized_exception_dict
if include_debug_info:
assert "debugInfo" in error_dict
error_dict.pop("debugInfo")
error_dict.pop("additionalInfo")
assert error_dict == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_remove_suffix(self):
assert remove_suffix('PackageToolNotFoundError.', '.') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', 'Error') == 'PackageToolNotFound'
assert remove_suffix('PackageToolNotFoundError', 'PackageToolNotFoundError') == ''
assert remove_suffix('PackageToolNotFoundError', 'NonExistedSuffix') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', '') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', None) == 'PackageToolNotFoundError'
assert remove_suffix('', 'NonExistedSuffix') == ''
assert remove_suffix(None, 'NonExistedSuffix') is None
| promptflow/src/promptflow/tests/executor/unittests/_utils/test_exception_utils.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_exception_utils.py",
"repo_id": "promptflow",
"token_count": 13949
} | 52 |
from typing import Any
import pytest
from promptflow._core._errors import NotSupported
from promptflow.contracts.flow import InputAssignment
from promptflow.executor._errors import (
InputNotFound,
InputNotFoundFromAncestorNodeOutput,
InvalidReferenceProperty,
UnsupportedReference,
)
from promptflow.executor._input_assignment_parser import parse_node_property, parse_value
FLOW_INPUTS = {"text": "hello promptflow"}
NODE_OUTPUTS = {"node1": "hello promptflow"}
class WrongInputAssignment:
value: Any
value_type: str = "wrong_type"
section: str = ""
property: str = ""
class DummyObject:
value: str = "dummy"
@pytest.mark.unittest
class TestInputAssignmentParser:
@pytest.mark.parametrize(
"input, expected_value",
[
("hello promptflow", "hello promptflow"),
("${inputs.text}", "hello promptflow"),
("${node1.output}", "hello promptflow"),
],
)
def test_parse_value(self, input, expected_value):
input_assignment = InputAssignment.deserialize(input)
actual_value = parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert actual_value == expected_value
@pytest.mark.parametrize(
"input, expected_error_class, expected_error_message",
[
(
"${inputs.word}",
InputNotFound,
(
"The input 'word' is not found from flow inputs 'text'. "
"Please check the input name and try again."
),
),
(
"${node2.output}",
InputNotFoundFromAncestorNodeOutput,
(
"The input 'node2' is not found from ancestor node outputs ['node1']. "
"Please check the node name and try again."
),
),
(
"${node1.word}",
UnsupportedReference,
(
"The section 'word' of reference is currently unsupported. "
"Please specify the output part of the node 'node1'."
),
),
(
WrongInputAssignment(),
NotSupported,
(
"The type 'wrong_type' is currently unsupported. "
"Please choose from available types: ['Literal', 'FlowInput', 'NodeReference'] and try again."
),
),
],
)
def test_parse_value_with_exception(self, input, expected_error_class, expected_error_message):
input_assignment = InputAssignment.deserialize(input) if isinstance(input, str) else input
with pytest.raises(expected_error_class) as e:
parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
@pytest.mark.parametrize(
"node_val, property, expected_value",
[
(
{"output": "hello promptflow"},
"output",
"hello promptflow",
),
(
{"output": "hello promptflow"},
"['output']",
"hello promptflow",
),
(
{"output": "hello promptflow"},
'["output"]',
"hello promptflow",
),
(
{"output": {"text": "hello promptflow"}},
'["output"]["text"]',
"hello promptflow",
),
(
["output1", "output2"],
"[1]",
"output2",
),
(
DummyObject(),
"value",
"dummy",
),
],
)
def test_parse_node_property(self, node_val, property, expected_value):
actual_value = parse_node_property("node1", node_val, property)
assert actual_value == expected_value
@pytest.mark.parametrize(
"node_val, property, expected_error_message",
[
(
{"output_str": ["output1", "output2"]},
"output_str[2]",
(
"Invalid property 'output_str[2]' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
{"word": "hello promptflow"},
"text",
(
"Invalid property 'text' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
DummyObject(),
"value_type",
(
"Invalid property 'value_type' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
],
)
def test_parse_node_property_with_exception(self, node_val, property, expected_error_message):
with pytest.raises(InvalidReferenceProperty) as e:
parse_node_property("node1", node_val, property)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
| promptflow/src/promptflow/tests/executor/unittests/executor/test_input_assignment_parser.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/executor/test_input_assignment_parser.py",
"repo_id": "promptflow",
"token_count": 2719
} | 53 |
import os
import os.path
import sys
from pathlib import Path
import pytest
from promptflow._cli._pf.entry import main
from ..recording_utilities import is_live
FLOWS_DIR = "./tests/test_configs/flows"
RUNS_DIR = "./tests/test_configs/runs"
CONNECTIONS_DIR = "./tests/test_configs/connections"
DATAS_DIR = "./tests/test_configs/datas"
# TODO: move this to a shared utility module
def run_pf_command(*args, cwd=None):
"""Run a pf command with the given arguments and working directory.
There have been some unknown issues in using subprocess on CI, so we use this function instead, which will also
provide better debugging experience.
"""
origin_argv, origin_cwd = sys.argv, os.path.abspath(os.curdir)
try:
sys.argv = ["pf"] + list(args)
if cwd:
os.chdir(cwd)
main()
finally:
sys.argv = origin_argv
os.chdir(origin_cwd)
@pytest.mark.skipif(condition=not is_live(), reason="CLI tests, only run in live mode.")
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestCli:
# PF cli test is here because when designate connection provider to remote, we need azure dependencies.
def test_pf_flow_test(self, remote_workspace_resource_id):
# Test with connection provider
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--config",
f"connection.provider={remote_workspace_resource_id}",
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / "flow.output.json"
assert output_path.exists()
def test_flow_chat(self, monkeypatch, capsys, remote_workspace_resource_id):
chat_list = ["hi", "what is chat gpt?"]
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow",
"--interactive",
"--verbose",
"--config",
f"connection.provider={remote_workspace_resource_id}",
)
output_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "chat.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "chat.detail.json"
assert detail_path.exists()
outerr = capsys.readouterr()
# Check node output
assert "chat_node:" in outerr.out
assert "show_answer:" in outerr.out
assert "[show_answer]: print:" in outerr.out
| promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli.py",
"repo_id": "promptflow",
"token_count": 1213
} | 54 |
import pytest
from promptflow._cli._pf_azure.entry import get_parser_args
from promptflow._cli._utils import _get_cli_activity_name
def get_cli_activity_name(cmd):
prog, args = get_parser_args(list(cmd)[1:])
return _get_cli_activity_name(cli=prog, args=args)
@pytest.mark.unittest
class TestAzureCliTimeConsume:
def test_pfazure_run_create(self, activity_name="pfazure.run.create"):
assert (
get_cli_activity_name(
cmd=("pfazure", "run", "create", "--flow", "print_input_flow", "--data", "print_input_flow.jsonl")
)
== activity_name
)
def test_pfazure_run_update(self, activity_name="pfazure.run.update"):
assert (
get_cli_activity_name(
cmd=(
"pfazure",
"run",
"update",
"--name",
"test_run",
"--set",
"display_name=test_run",
"description='test_description'",
"tags.key1=value1",
)
)
== activity_name
)
def test_run_restore(self, activity_name="pfazure.run.restore"):
assert get_cli_activity_name(cmd=("pfazure", "run", "restore", "--name", "test_run")) == activity_name
| promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_azure_cli_activity_name.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_azure_cli_activity_name.py",
"repo_id": "promptflow",
"token_count": 716
} | 55 |
import base64
import json
import multiprocessing
import os
from pathlib import Path
from unittest.mock import patch
import pytest
from mock import mock
from pytest_mock import MockerFixture
from sqlalchemy import create_engine
from promptflow import PFClient
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import EXPERIMENT_CREATED_ON_INDEX_NAME, EXPERIMENT_TABLE_NAME, LOCAL_MGMT_DB_PATH
from promptflow._sdk._serving.app import create_app as create_serving_app
from promptflow._sdk.entities import AzureOpenAIConnection as AzureOpenAIConnectionEntity
from promptflow._sdk.entities._connection import CustomConnection, _Connection
from promptflow.executor._line_execution_process_pool import _process_wrapper
from promptflow.executor._process_manager import create_spawned_fork_process_manager
from .recording_utilities import RecordStorage, mock_tool, recording_array_extend, recording_array_reset
PROMOTFLOW_ROOT = Path(__file__) / "../../.."
RUNTIME_TEST_CONFIGS_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/runtime")
RECORDINGS_TEST_CONFIGS_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/node_recordings").resolve()
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
MODEL_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/flows")
@pytest.fixture(scope="session")
def local_client() -> PFClient:
yield PFClient()
@pytest.fixture(scope="session")
def pf() -> PFClient:
yield PFClient()
@pytest.fixture()
def local_aoai_connection(local_client, azure_open_ai_connection):
conn = AzureOpenAIConnectionEntity(
name="azure_open_ai_connection",
api_key=azure_open_ai_connection.api_key,
api_base=azure_open_ai_connection.api_base,
)
local_client.connections.create_or_update(conn)
return conn
@pytest.fixture()
def local_alt_aoai_connection(local_client, azure_open_ai_connection):
conn = AzureOpenAIConnectionEntity(
name="new_ai_connection",
api_key=azure_open_ai_connection.api_key,
api_base=azure_open_ai_connection.api_base,
)
local_client.connections.create_or_update(conn)
return conn
@pytest.fixture()
def local_custom_connection(local_client, azure_open_ai_connection):
conn = CustomConnection(
name="test_custom_connection",
secrets={"test_secret": "test_value"},
)
local_client.connections.create_or_update(conn)
return conn
_connection_setup = False
@pytest.fixture
def setup_local_connection(local_client, azure_open_ai_connection):
global _connection_setup
if _connection_setup:
return
connection_dict = json.loads(open(CONNECTION_FILE, "r").read())
for name, _dct in connection_dict.items():
if _dct["type"] == "BingConnection":
continue
local_client.connections.create_or_update(_Connection._from_execution_connection_dict(name=name, data=_dct))
_connection_setup = True
@pytest.fixture
def setup_experiment_table():
with mock.patch("promptflow._sdk._configuration.Configuration.is_internal_features_enabled") as mock_func:
mock_func.return_value = True
# Call this session to initialize session maker, then add experiment table
from promptflow._sdk._orm import Experiment, mgmt_db_session
from promptflow._sdk._orm.session import create_index_if_not_exists, create_or_update_table
mgmt_db_session()
engine = create_engine(f"sqlite:///{str(LOCAL_MGMT_DB_PATH)}", future=True)
if Configuration.get_instance().is_internal_features_enabled():
create_or_update_table(engine, orm_class=Experiment, tablename=EXPERIMENT_TABLE_NAME)
create_index_if_not_exists(engine, EXPERIMENT_CREATED_ON_INDEX_NAME, EXPERIMENT_TABLE_NAME, "created_on")
@pytest.fixture
def flow_serving_client(mocker: MockerFixture):
model_path = (Path(MODEL_ROOT) / "basic-with-connection").resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
mocker.patch.dict(os.environ, {"USER_AGENT": "test-user-agent"})
app = create_serving_app(environment_variables={"API_TYPE": "${azure_open_ai_connection.api_type}"})
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
@pytest.fixture
def flow_serving_client_with_encoded_connection(mocker: MockerFixture):
from promptflow._core.connection_manager import ConnectionManager
from promptflow._sdk._serving.utils import encode_dict
connection_dict = json.loads(open(CONNECTION_FILE, "r").read())
connection_manager = ConnectionManager(connection_dict)
connections = {"PROMPTFLOW_ENCODED_CONNECTIONS": encode_dict(connection_manager.to_connections_dict())}
return create_client_by_model("basic-with-connection", mocker, connections, extension_type="azureml")
@pytest.fixture
def evaluation_flow_serving_client(mocker: MockerFixture):
model_path = (Path(MODEL_ROOT) / "web_classification").resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
app = create_serving_app()
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
def create_client_by_model(
model_name: str, mocker: MockerFixture, connections: dict = {}, extension_type=None, environment_variables={}
):
model_path = (Path(MODEL_ROOT) / model_name).resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
if connections:
mocker.patch.dict(os.environ, connections)
if extension_type and extension_type == "azureml":
environment_variables["API_TYPE"] = "${azure_open_ai_connection.api_type}"
app = create_serving_app(environment_variables=environment_variables, extension_type=extension_type)
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
@pytest.fixture
def serving_client_llm_chat(mocker: MockerFixture):
return create_client_by_model("chat_flow_with_stream_output", mocker)
@pytest.fixture
def serving_client_python_stream_tools(mocker: MockerFixture):
return create_client_by_model("python_stream_tools", mocker)
@pytest.fixture
def sample_image():
image_path = (Path(MODEL_ROOT) / "python_tool_with_simple_image" / "logo.jpg").resolve()
return base64.b64encode(open(image_path, "rb").read()).decode("utf-8")
@pytest.fixture
def serving_client_image_python_flow(mocker: MockerFixture):
return create_client_by_model("python_tool_with_simple_image", mocker)
@pytest.fixture
def serving_client_composite_image_flow(mocker: MockerFixture):
return create_client_by_model("python_tool_with_composite_image", mocker)
@pytest.fixture
def serving_client_with_environment_variables(mocker: MockerFixture):
return create_client_by_model(
"flow_with_environment_variables",
mocker,
environment_variables={"env2": "runtime_env2", "env10": "aaaaa"},
)
@pytest.fixture
def recording_file_override(request: pytest.FixtureRequest, mocker: MockerFixture):
if RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode():
file_path = RECORDINGS_TEST_CONFIGS_ROOT / "node_cache.shelve"
RecordStorage.get_instance(file_path)
yield
SpawnProcess = multiprocessing.get_context("spawn").Process
class MockSpawnProcess(SpawnProcess):
def __init__(self, group=None, target=None, *args, **kwargs):
if target == _process_wrapper:
target = _mock_process_wrapper
if target == create_spawned_fork_process_manager:
target = _mock_create_spawned_fork_process_manager
super().__init__(group, target, *args, **kwargs)
@pytest.fixture
def recording_injection(mocker: MockerFixture, recording_file_override):
original_process_class = multiprocessing.get_context("spawn").Process
multiprocessing.get_context("spawn").Process = MockSpawnProcess
if "spawn" == multiprocessing.get_start_method():
multiprocessing.Process = MockSpawnProcess
patches = setup_recording_injection_if_enabled()
try:
yield (RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode(), recording_array_extend)
finally:
if RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode():
RecordStorage.get_instance().delete_lock_file()
recording_array_reset()
multiprocessing.get_context("spawn").Process = original_process_class
if "spawn" == multiprocessing.get_start_method():
multiprocessing.Process = original_process_class
for patcher in patches:
patcher.stop()
def setup_recording_injection_if_enabled():
patches = []
if RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode():
file_path = RECORDINGS_TEST_CONFIGS_ROOT / "node_cache.shelve"
RecordStorage.get_instance(file_path)
from promptflow._core.tool import tool as original_tool
mocked_tool = mock_tool(original_tool)
patch_targets = ["promptflow._core.tool.tool", "promptflow._internal.tool", "promptflow.tool"]
for target in patch_targets:
patcher = patch(target, mocked_tool)
patches.append(patcher)
patcher.start()
return patches
def _mock_process_wrapper(*args, **kwargs):
setup_recording_injection_if_enabled()
return _process_wrapper(*args, **kwargs)
def _mock_create_spawned_fork_process_manager(*args, **kwargs):
setup_recording_injection_if_enabled()
return create_spawned_fork_process_manager(*args, **kwargs)
| promptflow/src/promptflow/tests/sdk_cli_test/conftest.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/conftest.py",
"repo_id": "promptflow",
"token_count": 3670
} | 56 |
from .constants import ENVIRON_TEST_MODE, RecordMode
from .mock_tool import mock_tool, recording_array_extend, recording_array_reset
from .record_storage import RecordFileMissingException, RecordItemMissingException, RecordStorage
__all__ = [
"RecordStorage",
"RecordMode",
"ENVIRON_TEST_MODE",
"RecordFileMissingException",
"RecordItemMissingException",
"mock_tool",
"recording_array_extend",
"recording_array_reset",
]
| promptflow/src/promptflow/tests/sdk_cli_test/recording_utilities/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/recording_utilities/__init__.py",
"repo_id": "promptflow",
"token_count": 156
} | 57 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import uuid
from pathlib import Path
from unittest.mock import patch
import pytest
from marshmallow import ValidationError
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY, NODES
from promptflow._sdk._errors import InvalidFlowError
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk._run_functions import create_yaml_run
from promptflow._sdk._submitter import RunSubmitter, overwrite_variant, variant_overwrite_context
from promptflow._sdk.entities import Run
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._utils.yaml_utils import load_yaml
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
FLOWS_DIR = Path("./tests/test_configs/flows")
RUNS_DIR = Path("./tests/test_configs/runs")
DATAS_DIR = Path("./tests/test_configs/datas")
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestRun:
def test_overwrite_variant_context(self):
with variant_overwrite_context(
flow_path=FLOWS_DIR / "web_classification", tuning_node="summarize_text_content", variant="variant_0"
) as flow:
with open(flow.path) as f:
flow_dag = load_yaml(f)
node_name_2_node = {node["name"]: node for node in flow_dag[NODES]}
node = node_name_2_node["summarize_text_content"]
assert node["inputs"]["temperature"] == "0.2"
def test_overwrite_connections(self):
with variant_overwrite_context(
flow_path=FLOWS_DIR / "web_classification",
connections={"classify_with_llm": {"connection": "azure_open_ai", "deployment_name": "gpt-35-turbo"}},
) as flow:
with open(flow.path) as f:
flow_dag = load_yaml(f)
node_name_2_node = {node["name"]: node for node in flow_dag[NODES]}
node = node_name_2_node["classify_with_llm"]
assert node["connection"] == "azure_open_ai"
assert node["inputs"]["deployment_name"] == "gpt-35-turbo"
@pytest.mark.parametrize(
"connections, error_message",
[
(
{
"classify_with_llm": {
"connection": "azure_open_ai",
"deployment_name": "gpt-35-turbo",
"unsupported": 1,
}
},
"Unsupported llm connection overwrite keys",
),
("str", "Invalid connections overwrite format: str"),
({"not_exist": 1}, "Node not_exist not found in flow"),
({"classify_with_llm": 1}, "Invalid connection overwrite format: 1, only dict is supported."),
],
)
def test_overwrite_connections_invalid(self, connections, error_message):
with pytest.raises(InvalidFlowError) as e:
with variant_overwrite_context(
flow_path=FLOWS_DIR / "web_classification",
connections=connections,
):
pass
assert error_message in str(e.value)
def test_load_run(self):
input_dict = {
"data": (DATAS_DIR / "webClassification1.jsonl").resolve().as_posix(),
"column_mapping": {"context": "${data.context}"},
"flow": (FLOWS_DIR / "web_classification").resolve().as_posix(),
}
bulk_run = Run._load_from_dict(
data=input_dict, context={BASE_PATH_CONTEXT_KEY: FLOWS_DIR}, additional_message=""
)
assert isinstance(bulk_run, Run)
def test_dot_env_resolve(self):
run_id = str(uuid.uuid4())
source = f"{RUNS_DIR}/sample_bulk_run.yaml"
run = load_run(source=source, params_override=[{"name": run_id}])
assert run.environment_variables == {"FOO": "BAR"}
def test_run_invalid_flow_path(self):
run_id = str(uuid.uuid4())
source = f"{RUNS_DIR}/bulk_run_invalid_flow_path.yaml"
with pytest.raises(ValidationError) as e:
load_run(source=source, params_override=[{"name": run_id}])
assert "Can't find directory or file in resolved absolute path:" in str(e.value)
def test_run_invalid_remote_flow(self):
run_id = str(uuid.uuid4())
source = f"{RUNS_DIR}/bulk_run_invalid_remote_flow_str.yaml"
with pytest.raises(ValidationError) as e:
load_run(source=source, params_override=[{"name": run_id}])
assert "Invalid remote flow path. Currently only azureml:<flow-name> is supported" in str(e.value)
def test_data_not_exist_validation_error(self):
source = f"{RUNS_DIR}/sample_bulk_run.yaml"
with pytest.raises(ValidationError) as e:
load_run(source=source, params_override=[{"data": "not_exist"}])
assert "Can't find directory or file" in str(e.value)
assert "Invalid remote path." in str(e.value)
@pytest.mark.parametrize(
"source, error_msg",
[
(f"{RUNS_DIR}/illegal/non_exist_data.yaml", "Can't find directory or file"),
],
)
def test_invalid_yaml(self, source, error_msg):
with pytest.raises(ValidationError) as e:
create_yaml_run(source=source)
assert error_msg in str(e.value)
def test_run_bulk_invalid_params(self, pf):
# Test if function raises FileNotFoundError
with pytest.raises(FileNotFoundError):
pf.run(flow="invalid_path", data="fake_data")
with pytest.raises(FileNotFoundError):
pf.run(flow="invalid_path", data="fake_data", batch_run="fake_run")
def test_overwrite_variant(self):
flow_dag = {
"nodes": [
{
"name": "node1",
"use_variants": True,
"variant_id": "default",
"inputs": {
"param1": "value1",
"param2": "value2",
},
},
],
"node_variants": {
"node1": {
"default_variant_id": "variant1",
"variants": {
"variant1": {
"node": {
"inputs": {
"param1": "value1_variant1",
"param2": "value2_variant1",
},
},
},
},
},
},
}
# Test if function raises InvalidFlowError
with pytest.raises(InvalidFlowError):
overwrite_variant(flow_dag, "node3", "variant1")
with pytest.raises(InvalidFlowError):
overwrite_variant(flow_dag, "node1", "variant3")
# Test if function overwrites variant correctly
dag = copy.deepcopy(flow_dag)
overwrite_variant(dag, "node1", "variant1")
assert dag["nodes"][0]["inputs"]["param1"] == "value1_variant1"
assert dag["nodes"][0]["inputs"]["param2"] == "value2_variant1"
# test overwrite default variant
dag = copy.deepcopy(flow_dag)
overwrite_variant(dag)
assert dag["nodes"][0]["inputs"]["param1"] == "value1_variant1"
assert dag["nodes"][0]["inputs"]["param2"] == "value2_variant1"
@patch("promptflow._sdk.operations._run_operations.RunOperations.update")
def test_submit(self, mock_update):
# Define input parameters
flow_path = f"{FLOWS_DIR}/web_classification"
client = PFClient()
run_submitter = RunSubmitter(client.runs)
run = Run(
name=str(uuid.uuid4()),
flow=Path(flow_path),
data=f"{DATAS_DIR}/webClassification3.jsonl",
)
# Submit run
run_submitter.submit(run)
# Check if Run.update method was called
mock_update.assert_called_once()
def test_flow_run_with_non_english_inputs(self, pf):
flow_path = f"{FLOWS_DIR}/flow_with_non_english_input"
data = f"{FLOWS_DIR}/flow_with_non_english_input/data.jsonl"
run = pf.run(flow=flow_path, data=data, column_mapping={"text": "${data.text}"})
local_storage = LocalStorageOperations(run=run)
# assert non english in output.jsonl
output_jsonl_path = local_storage._outputs_path
with open(output_jsonl_path, "r", encoding="utf-8") as f:
outputs_text = f.readlines()
assert outputs_text == [
'{"line_number": 0, "output": "Hello 123 日本語"}\n',
'{"line_number": 1, "output": "World 123 日本語"}\n',
]
# assert non english in memory
outputs = local_storage.load_outputs()
assert outputs == {"output": ["Hello 123 日本語", "World 123 日本語"]}
@pytest.mark.usefixtures("enable_logger_propagate")
def test_flow_run_with_unknown_field(self, caplog):
run_yaml = Path(RUNS_DIR) / "sample_bulk_run.yaml"
load_run(source=run_yaml, params_override=[{"unknown_field": "unknown_value"}])
assert "Unknown fields found" in caplog.text
| promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_run.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_run.py",
"repo_id": "promptflow",
"token_count": 4503
} | 58 |
aaa=bbb
ccc=ddd
| promptflow/src/promptflow/tests/test_configs/connections/.env/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/connections/.env",
"repo_id": "promptflow",
"token_count": 11
} | 59 |
path: ./entry.py
entry: my_flow
environment:
python_requirements_txt: requirements.txt | promptflow/src/promptflow/tests/test_configs/eager_flows/simple_with_req/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/simple_with_req/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 29
} | 60 |
{
"text": "world"
} | promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/inputs.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/inputs.json",
"repo_id": "promptflow",
"token_count": 13
} | 61 |
# Stock EOD Price Analyzer
This sample demonstrates how the PromptFlow Assistant tool help with time series data (stock EOD price) retrieval, plot and consolidation.
Tools used in this flow:
- `get_or_create_thread` tool, python tool, used to provide assistant thread information if absent
- `add_message_and_run` tool, assistant tool, provisioned with below inner functions:
- `get_stock_eod_price``: get the stock eod price based on date and company name
## Prerequisites
Install promptflow sdk and other dependencies in this folder:
```bash
pip install -r requirements.txt
```
## What you will learn
In this flow, you will understand how assistant tools within PromptFlow are triggered by user prompts. The assistant tool decides which internal functions or tools to invoke based on the input provided. Your responsibility involves implementing each of these tools and registering them in the `assistant_definition`. Additionally, be aware that the tools may have dependencies on each other, affecting the order and manner of their invocation.
## Getting started
### 1. Create assistant connection (openai)
Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations.
Currently, only "Open AI" connection type are supported for assistant tool. Please refer to [OpenAI](https://platform.openai.com/) for more details.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key>
```
Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`.
```bash
# show registered connection
pf connection show --name open_ai_connection
```
### 2. Create or get assistant/thread
Navigate to the OpenAI Assistant page and create an assistant if you haven't already. Once created, click on the 'Test' button to enter the assistant's playground. Make sure to note down the assistant_id.
**[Optional]** Start a chat session to create thread automatically. Keep track of the thread_id.
### 3. run the flow
```bash
# run chat flow with default question in flow.dag.yaml
pf flow test --flow .
```
| promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/README.md/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/README.md",
"repo_id": "promptflow",
"token_count": 569
} | 62 |
inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${echo_my_prompt.output}
nodes:
- inputs:
text: ${inputs.text}
name: hello_prompt
type: prompt
source:
type: code
path: hello.jinja2
- inputs:
prompt: ${hello_prompt.output}
deployment_name: gpt-35-turbo
max_tokens: "120"
connection: azure_open_ai_connection
name: echo_my_prompt
type: python
source:
type: code
path: hello.py
node_variants: {}
| promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 206
} | 63 |
import random
import time
from promptflow import tool
@tool
def get_temperature(city: str, unit: str = "c"):
"""Estimate the current temperature of a given city.
:param city: city to get the estimated temperature for.
:type city: str
:param unit: the unit of the temperature, either 'c' for Celsius or 'f' for Fahrenheit.
Defaults to Celsius ('c').
:type unit: str
"""
# Generating a random number between 0.2 and 1 for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.uniform(0, 35)
| promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_temperature.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_temperature.py",
"repo_id": "promptflow",
"token_count": 193
} | 64 |
inputs:
variant_id:
type: string
groundtruth:
type: string
description: Please specify the groundtruth column, which contains the true label
to the outputs that your flow produces.
prediction:
type: string
description: Please specify the prediction column, which contains the predicted
outputs that your flow produces.
outputs:
grade:
type: string
reference: ${grade.output}
nodes:
- name: grade
type: python
source:
type: code
path: grade.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
- name: calculate_accuracy
type: python
source:
type: code
path: calculate_accuracy.py
inputs:
grades: ${grade.output}
variant_ids: ${inputs.variant_id}
aggregation: true
- name: aggregation_assert
type: python
source:
type: code
path: aggregation_assert.py
inputs:
input1: ${inputs.groundtruth}
input2: ${inputs.prediction}
aggregation: true
| promptflow/src/promptflow/tests/test_configs/flows/classification_accuracy_evaluation/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/classification_accuracy_evaluation/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 384
} | 65 |
from promptflow import tool
@tool
def extract_job_info(incident_content: str) -> str:
print(f"Incident: {incident_content}")
return "Execute job info extractor"
| promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/job_info_extractor.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/job_info_extractor.py",
"repo_id": "promptflow",
"token_count": 59
} | 66 |
{
"package": {},
"code": {
"print_env.py": {
"type": "python",
"inputs": {
"key": {
"type": [
"string"
]
}
},
"function": "get_env_var"
}
}
}
| promptflow/src/promptflow/tests/test_configs/flows/custom_connection_flow/.promptflow/flow.tools.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/custom_connection_flow/.promptflow/flow.tools.json",
"repo_id": "promptflow",
"token_count": 144
} | 67 |
from promptflow import tool
from promptflow.contracts.multimedia import Image
@tool
def passthrough_list(image_list: list, image_dict: dict):
assert all(isinstance(item, Image) for item in image_list)
return image_list
| promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/passthrough_list.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/passthrough_list.py",
"repo_id": "promptflow",
"token_count": 71
} | 68 |
echo Hello Promptflow!
| promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/setup.sh/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/setup.sh",
"repo_id": "promptflow",
"token_count": 6
} | 69 |
inputs:
text:
type: string
outputs:
output:
type: object
reference: ${hello_node.output}
nodes:
- inputs:
text: ${inputs.text}
connection: basic_custom_connection
name: hello_node
type: python
source:
type: code
path: hello.py
node_variants: {}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_custom_connection/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_custom_connection/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 113
} | 70 |
{{text}} | promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/hello.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/hello.jinja2",
"repo_id": "promptflow",
"token_count": 3
} | 71 |
import os
import sys
from promptflow import tool
sys.path.append(f"{os.path.dirname(__file__)}/custom_lib")
from custom_lib.foo import foo
@tool
def my_python_tool(input1: str) -> str:
return foo(param=input1)
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/hello.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/hello.py",
"repo_id": "promptflow",
"token_count": 81
} | 72 |
{# Prompt is a jinja2 template that generates prompt for LLM #}
system:
You are a bot can tell good jokes
user:
A joke about {{topic}} please
| promptflow/src/promptflow/tests/test_configs/flows/llm_tool/joke.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/llm_tool/joke.jinja2",
"repo_id": "promptflow",
"token_count": 46
} | 73 |
outputs:
nodes:
- name: say_hello
type: python
source:
type: code
path: say_hello.py
| promptflow/src/promptflow/tests/test_configs/flows/no_inputs_outputs/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/no_inputs_outputs/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 43
} | 74 |
Please summarize the following content in one paragraph. 50 words.
Do not add any information that is not in the content.
Text: {{text}}
Images:
![image]({{image1}})
![ image]({{image2}})
![image ]({{image3}})
![ image ]({{image4}})
Video:
![video]({{video1}})
Summary: | promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/summarize_text_content_prompt.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/summarize_text_content_prompt.jinja2",
"repo_id": "promptflow",
"token_count": 100
} | 75 |
from promptflow import tool
@tool
def passthrough_list(image_list: list, image_dict: dict):
return image_list
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_list.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_list.py",
"repo_id": "promptflow",
"token_count": 38
} | 76 |
inputs:
image_1:
type: image
default: logo.jpg
image_2:
type: image
outputs:
output:
type: image
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${inputs.image_1}
image_2: ${inputs.image_2}
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_with_default/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_with_default/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 143
} | 77 |
inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${node1.output}
nodes:
- name: node1
type: python
source:
type: code
path: script_with___file__.py
inputs:
input1: ${inputs.text}
- name: node2
type: python
source:
type: code
path: folder/another-tool.py
inputs:
input1: ${node1.output}
- name: node3
type: python
source:
type: code
path: folder/another-tool.py
inputs:
input1: random value | promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 201
} | 78 |
from promptflow import tool
def raise_exception(s):
msg = f"In raise_exception: {s}"
raise Exception(msg)
@tool
def raise_an_exception(s: str):
try:
raise_exception(s)
except Exception as e:
raise Exception(f"In tool raise_an_exception: {s}") from e
| promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py",
"repo_id": "promptflow",
"token_count": 117
} | 79 |
[
{
"url": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h"
},
{
"url": "https://www.microsoft.com/en-us/windows/"
}
]
| promptflow/src/promptflow/tests/test_configs/flows/web_classification/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification/samples.json",
"repo_id": "promptflow",
"token_count": 86
} | 80 |
[
{
"line_number": 0,
"variant_id": "variant_0",
"groundtruth": "App",
"prediction": "App"
}
]
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants_unordered/samples.json",
"repo_id": "promptflow",
"token_count": 72
} | 81 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/api-version=2023-08-01-preview
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.023'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?api-version=2023-04-01-preview&count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.058'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Connections/azure_open_ai_connection
response:
body:
string: '{"connectionName": "azure_open_ai_connection", "connectionType": "AzureOpenAI",
"configs": {"api_base": "https://fake.openai.azure.com", "api_key": null,
"api_type": "azure", "api_version": "2023-07-01-preview", "resource_id": null},
"owner": {"userName": "[email protected]"}, "createdDate": "2023-08-22T10:15:34.5762053Z",
"lastModifiedDate": "2023-08-22T10:15:34.5762053Z"}'
headers:
connection:
- keep-alive
content-length:
- '366'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.382'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Connections/custom_connection
response:
body:
string: '{"connectionName": "custom_connection", "connectionType": "Custom",
"owner": {"userName": "[email protected]"}, "createdDate": "2023-06-19T20:56:12.0353964Z",
"lastModifiedDate": "2023-06-19T20:56:12.0353964Z"}'
headers:
connection:
- keep-alive
content-length:
- '204'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '0.367'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_get_connection.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_get_connection.yaml",
"repo_id": "promptflow",
"token_count": 2853
} | 82 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.031'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.066'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.075'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.125'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:16:18 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/simple_eager_flow_data.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '25'
content-md5:
- zt1zN1V/HR5p7N0Sh5396w==
content-type:
- application/octet-stream
last-modified:
- Tue, 23 Jan 2024 06:27:00 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 23 Jan 2024 06:26:59 GMT
x-ms-meta-name:
- 1e376ce4-7c3b-4683-82ad-412f5cd23626
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 7e65351c-7e4b-4a4d-90f8-304eacdc36bc
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:16:21 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/simple_eager_flow_data.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.108'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.088'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:16:25 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/simple_with_req/entry.py
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '331'
content-md5:
- bf0G3F/eNgZO8UPfGebSUQ==
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 08:57:55 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 25 Jan 2024 08:57:54 GMT
x-ms-meta-name:
- c42d946f-2978-4455-8a89-b768c66a9277
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:16:28 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/simple_with_req/entry.py
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/simple_with_req/flow.dag.yaml",
"runId": "name", "runDisplayName": "name", "runExperimentName": "", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/simple_eager_flow_data.jsonl"},
"inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName":
"fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '791'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.330'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.273'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.372'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.420'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.469'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.173'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.214'
status:
code: 200
message: OK
- request:
body: '{}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/metric/v2.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/name/lastvalues
response:
body:
string: '{"value": [{"dataContainerId": "dcid.name", "name": "__pf__.lines.completed",
"columns": {"__pf__.lines.completed": "Double"}, "properties": {"uxMetricType":
"azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId":
null, "value": [{"metricId": "acecc185-aa1e-4e95-b342-d391d260bc46", "createdUtc":
"2024-01-25T09:17:00.198+00:00", "step": 0, "data": {"__pf__.lines.completed":
1.0}}]}, {"dataContainerId": "dcid.name", "name": "__pf__.lines.failed", "columns":
{"__pf__.lines.failed": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar",
"dataLocation": null}, "namespace": null, "standardSchemaId": null, "value":
[{"metricId": "aeeb8ed5-aca3-475d-a76d-a58ecc12ad99", "createdUtc": "2024-01-25T09:17:00.563+00:00",
"step": 0, "data": {"__pf__.lines.failed": 0.0}}]}]}'
headers:
connection:
- keep-alive
content-length:
- '1240'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.066'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.292'
status:
code: 200
message: OK
- request:
body: '{"value": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1"}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '171'
content-type:
- application/json
host:
- eastus.api.azureml.ms
user-agent:
- python-httpx/0.26.0
method: POST
uri: https://eastus.api.azureml.ms/data/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/dataversion/getByAssetId
response:
content: '{"dataVersion": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1",
"dataContainerName": "azureml_name_output_data_debug_info", "dataType": "UriFolder",
"dataUri": "azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/promptflow/PromptFlowArtifacts/name/",
"versionId": "1", "mutableProps": {"dataExpiryTime": null, "description": null,
"tags": null, "isArchived": false, "stage": "Logged", "autoDeleteSetting": null},
"referencedDataUris": null, "properties": null, "initialAssetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1",
"isRegistered": false, "runId": "name", "originAssetId": null}, "entityMetadata":
{"etag": "\"87008635-0000-0100-0000-65b2270d0000\"", "createdTime": "2024-01-25T09:17:01.3632335+00:00",
"modifiedTime": "2024-01-25T09:17:01.3970754+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": "100320005227D154", "userIdp":
null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Han Wang",
"upn": "[email protected]"}, "modifiedBy": null}, "legacyDatasetId": "c78cba45-2c03-4f27-8f6d-7a0f7fcaa3fc",
"isV2": true, "legacyDatasetType": null, "legacyDataflowType": null, "legacyDataflow":
null, "legacySavedDatasetId": null, "putAssetLROResponseDto": null}'
headers:
connection:
- keep-alive
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.064'
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"snapshotOrAssetId": "1ac134f8-0696-4ce1-ba89-cd4d9748e886"}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '61'
content-type:
- application/json
host:
- eastus.api.azureml.ms
user-agent:
- python-httpx/0.26.0
method: POST
uri: https://eastus.api.azureml.ms/content/v2.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/snapshots/sas
response:
content: '{"name": "", "hash": null, "type": "Directory", "timestamp": "0001-01-01T00:00:00+00:00",
"sasUrl": null, "absoluteUrl": null, "sizeBytes": 0, "sizeSet": false, "children":
{"entry.py": {"name": "entry.py", "hash": "6DFD06DC5FDE36064EF143DF19E6D251",
"type": "File", "timestamp": "0001-01-01T00:00:00+00:00", "sasUrl": "https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/runs/name/entry.py?sv=2019-07-07&sr=b&sig=uNX4%2F%2BOW1NSRGOeyt5nunbdGqVOmeV84SX9FrinPAaU%3D&st=2024-01-25T09%3A08%3A38Z&se=2024-01-25T17%3A18%3A38Z&sp=r&rscd=filename%3Dentry.py",
"absoluteUrl": "https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/runs/name/entry.py",
"sizeBytes": 331, "sizeSet": true, "children": {}}, "flow.dag.yaml": {"name":
"flow.dag.yaml", "hash": "E9583EF05B4434346632808F5714E8B0", "type": "File",
"timestamp": "0001-01-01T00:00:00+00:00", "sasUrl": "https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/runs/name/flow.dag.yaml?sv=2019-07-07&sr=b&sig=3plGLhXfe%2Fudi%2Fm0Z7%2FSz7Cte1sDfk8cDwBsYGfvMrQ%3D&st=2024-01-25T09%3A08%3A38Z&se=2024-01-25T17%3A18%3A38Z&sp=r&rscd=filename%3Dflow.dag.yaml",
"absoluteUrl": "https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/runs/name/flow.dag.yaml",
"sizeBytes": 93, "sizeSet": true, "children": {}}, "requirements.txt": {"name":
"requirements.txt", "hash": "93AEF5DCE3D6FE02C4425D72C0E1DABD", "type": "File",
"timestamp": "0001-01-01T00:00:00+00:00", "sasUrl": "https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/runs/name/requirements.txt?sv=2019-07-07&sr=b&sig=KJrkxjj4vFvaXoSEzbexwXAJIkisbxTIgI8M1l0%2FWlY%3D&st=2024-01-25T09%3A08%3A38Z&se=2024-01-25T17%3A18%3A38Z&sp=r&rscd=filename%3Drequirements.txt",
"absoluteUrl": "https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/runs/name/requirements.txt",
"sizeBytes": 109, "sizeSet": true, "children": {}}}}'
headers:
connection:
- keep-alive
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.142'
http_version: HTTP/1.1
status_code: 200
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:43 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/runs/name/requirements.txt
response:
body:
string: "--extra-index-url https://azuremlsdktestpypi.azureedge.net/test-promptflow/\r\npromptflow[azure]==0.0.116642424"
headers:
accept-ranges:
- bytes
content-length:
- '109'
content-range:
- bytes 0-108/109
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:16:44 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-content-md5:
- k6713OPW/gLEQl1ywOHavQ==
x-ms-blob-type:
- BlockBlob
x-ms-copy-completion-time:
- Thu, 25 Jan 2024 09:16:44 GMT
x-ms-copy-id:
- bcd3c079-6acb-4b8d-9af6-b23b87ffca84
x-ms-copy-progress:
- 109/109
x-ms-copy-source:
- https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/LocalUpload/79819672296a1785a95c65c8c0e75b0d/simple_with_req/requirements.txt
x-ms-copy-status:
- success
x-ms-creation-time:
- Thu, 25 Jan 2024 09:16:44 GMT
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:42 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/runs/name/entry.py
response:
body:
string: "# ---------------------------------------------------------\r\n# Copyright
(c) Microsoft Corporation. All rights reserved.\r\n# ---------------------------------------------------------\r\n\r\ndef
my_flow(input_val) -> str:\r\n \"\"\"Simple flow with yaml.\"\"\"\r\n print(f\"Hello
world! {input_val}\")\r\n return f\"Hello world! {input_val}\"\r\n"
headers:
accept-ranges:
- bytes
content-length:
- '331'
content-range:
- bytes 0-330/331
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:16:44 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-content-md5:
- bf0G3F/eNgZO8UPfGebSUQ==
x-ms-blob-type:
- BlockBlob
x-ms-copy-completion-time:
- Thu, 25 Jan 2024 09:16:44 GMT
x-ms-copy-id:
- 5b6fc216-f239-42fa-a163-c92db62cb949
x-ms-copy-progress:
- 331/331
x-ms-copy-source:
- https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/LocalUpload/79819672296a1785a95c65c8c0e75b0d/simple_with_req/entry.py
x-ms-copy-status:
- success
x-ms-creation-time:
- Thu, 25 Jan 2024 09:16:44 GMT
x-ms-meta-name:
- c42d946f-2978-4455-8a89-b768c66a9277
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:38 GMT
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name?comp=list&prefix=promptflow%2FPromptFlowArtifacts%2Fname%2F&restype=container
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><EnumerationResults
ServiceEndpoint=\"https://promptfloweast4063704120.blob.core.windows.net/\"
ContainerName=\"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\"><Prefix>promptflow/PromptFlowArtifacts/name/</Prefix><Blobs><Blob><Name>promptflow/PromptFlowArtifacts/name/flow_artifacts/000000000_000000024.jsonl</Name><Properties><Creation-Time>Thu,
25 Jan 2024 09:16:58 GMT</Creation-Time><Last-Modified>Thu, 25 Jan 2024 09:16:58
GMT</Last-Modified><Etag>0x8DC1D8661A992B8</Etag><Content-Length>1132</Content-Length><Content-Type>application/octet-stream</Content-Type><Content-Encoding
/><Content-Language /><Content-CRC64 /><Content-MD5 /><Cache-Control /><Content-Disposition
/><BlobType>AppendBlob</BlobType><LeaseStatus>unlocked</LeaseStatus><LeaseState>available</LeaseState><ServerEncrypted>true</ServerEncrypted></Properties><OrMetadata
/></Blob><Blob><Name>promptflow/PromptFlowArtifacts/name/flow_outputs/output.jsonl</Name><Properties><Creation-Time>Thu,
25 Jan 2024 09:17:01 GMT</Creation-Time><Last-Modified>Thu, 25 Jan 2024 09:17:01
GMT</Last-Modified><Etag>0x8DC1D8663B60064</Etag><Content-Length>52</Content-Length><Content-Type>application/octet-stream</Content-Type><Content-Encoding
/><Content-Language /><Content-CRC64 /><Content-MD5>e7CC/hOg8uoJ3TK2HaKeZA==</Content-MD5><Cache-Control
/><Content-Disposition /><BlobType>BlockBlob</BlobType><AccessTier>Hot</AccessTier><AccessTierInferred>true</AccessTierInferred><LeaseStatus>unlocked</LeaseStatus><LeaseState>available</LeaseState><ServerEncrypted>true</ServerEncrypted></Properties><OrMetadata
/></Blob><Blob><Name>promptflow/PromptFlowArtifacts/name/instance_results.jsonl</Name><Properties><Creation-Time>Thu,
25 Jan 2024 09:16:58 GMT</Creation-Time><Last-Modified>Thu, 25 Jan 2024 09:16:58
GMT</Last-Modified><Etag>0x8DC1D8661AE6F40</Etag><Content-Length>105</Content-Length><Content-Type>application/octet-stream</Content-Type><Content-Encoding
/><Content-Language /><Content-CRC64 /><Content-MD5 /><Cache-Control /><Content-Disposition
/><BlobType>AppendBlob</BlobType><LeaseStatus>unlocked</LeaseStatus><LeaseState>available</LeaseState><ServerEncrypted>true</ServerEncrypted></Properties><OrMetadata
/></Blob><Blob><Name>promptflow/PromptFlowArtifacts/name/meta.json</Name><Properties><Creation-Time>Thu,
25 Jan 2024 09:16:52 GMT</Creation-Time><Last-Modified>Thu, 25 Jan 2024 09:16:52
GMT</Last-Modified><Etag>0x8DC1D865E80BFF2</Etag><Content-Length>18</Content-Length><Content-Type>application/octet-stream</Content-Type><Content-Encoding
/><Content-Language /><Content-CRC64 /><Content-MD5>/u1NXUpgXMFDmZEw835qnw==</Content-MD5><Cache-Control
/><Content-Disposition /><BlobType>BlockBlob</BlobType><AccessTier>Hot</AccessTier><AccessTierInferred>true</AccessTierInferred><LeaseStatus>unlocked</LeaseStatus><LeaseState>available</LeaseState><ServerEncrypted>true</ServerEncrypted></Properties><OrMetadata
/></Blob></Blobs><NextMarker /></EnumerationResults>"
headers:
content-type:
- application/xml
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:44 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/promptflow/PromptFlowArtifacts/name/flow_artifacts/000000000_000000024.jsonl
response:
body:
string: '{"line_number": 0, "run_info": {"run_id": "name_0", "status": "Completed",
"error": null, "inputs": {"input_val": "input1"}, "output": {"output": "Hello
world! input1"}, "metrics": null, "request": null, "parent_run_id": "name",
"root_run_id": "name", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-25T09:16:58.053129Z", "end_time": "2024-01-25T09:16:58.055292Z",
"index": 0, "api_calls": [{"name": "my_flow", "type": "Function", "inputs":
{"input_val": "input1"}, "output": "Hello world! input1", "start_time": 1706174218.053984,
"end_time": 1706174218.054859, "error": null, "children": [], "node_name":
null, "parent_id": "", "id": "27b021bc-89d3-43f8-b3ac-a9eed2a84949"}], "variant_id":
"", "name": "", "description": "", "tags": null, "system_metrics": {"duration":
0.002163}, "result": {"output": "Hello world! input1"}, "upload_metrics":
false}, "start_time": "2024-01-25T09:16:58.053129", "end_time": "2024-01-25T09:16:58.055292",
"name": "", "description": "", "status": "Completed", "tags": null}
'
headers:
accept-ranges:
- bytes
content-length:
- '1132'
content-range:
- bytes 0-1131/1132
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:16:58 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-committed-block-count:
- '1'
x-ms-blob-type:
- AppendBlob
x-ms-creation-time:
- Thu, 25 Jan 2024 09:16:58 GMT
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:44 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/promptflow/PromptFlowArtifacts/name/flow_outputs/output.jsonl
response:
body:
string: '{"line_number": 0, "output": "Hello world! input1"}
'
headers:
accept-ranges:
- bytes
content-length:
- '52'
content-range:
- bytes 0-51/52
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:17:01 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-content-md5:
- e7CC/hOg8uoJ3TK2HaKeZA==
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 25 Jan 2024 09:17:01 GMT
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:44 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/promptflow/PromptFlowArtifacts/name/instance_results.jsonl
response:
body:
string: '{"line_number": 0, "status": "Completed", "inputs.input_val": "input1",
"output": "Hello world! input1"}
'
headers:
accept-ranges:
- bytes
content-length:
- '105'
content-range:
- bytes 0-104/105
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:16:58 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-committed-block-count:
- '1'
x-ms-blob-type:
- AppendBlob
x-ms-creation-time:
- Thu, 25 Jan 2024 09:16:58 GMT
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:42 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/runs/name/flow.dag.yaml
response:
body:
string: "path: ./entry.py\r\nentry: my_flow\r\nenvironment:\r\n python_requirements_txt:
requirements.txt"
headers:
accept-ranges:
- bytes
content-length:
- '93'
content-range:
- bytes 0-92/93
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:16:44 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-content-md5:
- 6Vg+8FtENDRmMoCPVxTosA==
x-ms-blob-type:
- BlockBlob
x-ms-copy-completion-time:
- Thu, 25 Jan 2024 09:16:44 GMT
x-ms-copy-id:
- f4653407-3dac-4827-a65f-e1cf4b99a458
x-ms-copy-progress:
- 93/93
x-ms-copy-source:
- https://promptfloweast4063704120.blob.core.windows.net/azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/LocalUpload/79819672296a1785a95c65c8c0e75b0d/simple_with_req/flow.dag.yaml
x-ms-copy-status:
- success
x-ms-creation-time:
- Thu, 25 Jan 2024 09:16:44 GMT
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: null
headers:
Accept:
- application/xml
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:18:44 GMT
x-ms-range:
- bytes=0-33554431
x-ms-version:
- '2023-11-03'
method: GET
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/promptflow/PromptFlowArtifacts/name/meta.json
response:
body:
string: '{"batch_size": 25}'
headers:
accept-ranges:
- bytes
content-length:
- '18'
content-range:
- bytes 0-17/18
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 09:16:52 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-content-md5:
- /u1NXUpgXMFDmZEw835qnw==
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 25 Jan 2024 09:16:52 GMT
x-ms-version:
- '2023-11-03'
status:
code: 206
message: Partial Content
- request:
body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '137'
content-type:
- application/json
host:
- eastus.api.azureml.ms
user-agent:
- python-httpx/0.26.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
content: '{"runMetadata": {"runNumber": 1706174203, "rootRunId": "name", "createdUtc":
"2024-01-25T09:16:43.8144219+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": "100320005227D154", "userIdp": null, "userAltSecId": null, "userIss":
"https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Han Wang", "upn": null},
"userId": "00000000-0000-0000-0000-000000000000", "token": null, "tokenExpiryTimeUtc":
null, "error": null, "warnings": null, "revision": 6, "statusRevision": 3, "runUuid":
"7da6a191-0945-4e9f-bfc6-a1fc7c72b72c", "parentRunUuid": null, "rootRunUuid":
"7da6a191-0945-4e9f-bfc6-a1fc7c72b72c", "lastStartTimeUtc": null, "currentComputeTime":
null, "computeDuration": "00:00:08.3698759", "effectiveStartTimeUtc": null,
"lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId":
"100320005227D154", "userIdp": null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Han Wang",
"upn": "[email protected]"}, "lastModifiedUtc": "2024-01-25T09:17:00.9260346+00:00",
"duration": "00:00:08.3698759", "cancelationReason": null, "currentAttemptId":
1, "runId": "name", "parentRunId": null, "experimentId": "64465848-e4a8-42a2-a617-d7f0fcda6f32",
"status": "Completed", "startTimeUtc": "2024-01-25T09:16:53.6662445+00:00",
"endTimeUtc": "2024-01-25T09:17:02.0361204+00:00", "scheduleId": null, "displayName":
"name", "name": null, "dataContainerId": "dcid.name", "description": null, "hidden":
false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator":
null, "traits": [], "attribution": "PromptFlow", "computeType": null}, "properties":
{"azureml.promptflow.runtime_name": "automatic", "azureml.promptflow.runtime_version":
"20240116.v1", "azureml.promptflow.definition_file_name": "flow.dag.yaml", "azureml.promptflow.flow_lineage_id":
"63d67b0b61e34b0527b5f3c46dfc953854138b2ffebc68175580285dc2d95663", "azureml.promptflow.flow_definition_datastore_name":
"workspaceblobstore", "azureml.promptflow.flow_definition_blob_path": "LocalUpload/79819672296a1785a95c65c8c0e75b0d/simple_with_req/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl",
"_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.session_id":
"b905697d9d04e1b8c87c12d30eb37326380d5cfeb7d0500e", "azureml.promptflow.snapshot_id":
"1ac134f8-0696-4ce1-ba89-cd4d9748e886", "azureml.promptflow.run_mode": "Eager",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName": null,
"createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.050'
http_version: HTTP/1.1
status_code: 200
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/logContent
response:
body:
string: '"2024-01-25 09:16:46 +0000 50 promptflow-runtime INFO [name]
Receiving v2 bulk run request d3077030-8829-4e75-84e2-23ffb7fe5647: {\"flow_id\":
\"name\", \"flow_run_id\": \"name\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"1ac134f8-0696-4ce1-ba89-cd4d9748e886\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-25T08%3A48%3A12Z&ske=2024-01-26T16%3A58%3A12Z&sks=b&skv=2019-07-07&st=2024-01-25T09%3A06%3A45Z&se=2024-01-25T17%3A16%3A45Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl\"},
\"azure_storage_setting\": {\"azure_storage_mode\": 1, \"storage_account_name\":
\"promptfloweast4063704120\", \"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/name\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-25T09%3A16%3A45Z&ske=2024-02-01T09%3A16%3A45Z&sks=b&skv=2019-07-07&se=2024-02-01T09%3A16%3A45Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-25 09:16:46 +0000 50
promptflow-runtime INFO Runtime version: 20240116.v1. PromptFlow version:
1.4.0rc3\n2024-01-25 09:16:46 +0000 50 promptflow-runtime INFO Updating
name to Status.Preparing...\n2024-01-25 09:16:47 +0000 50 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/37305/requests/name\n2024-01-25
09:16:47 +0000 50 promptflow-runtime INFO Get snapshot sas url for
1ac134f8-0696-4ce1-ba89-cd4d9748e886.\n2024-01-25 09:16:47 +0000 50 promptflow-runtime
INFO Snapshot 1ac134f8-0696-4ce1-ba89-cd4d9748e886 contains 3 files.\n2024-01-25
09:16:47 +0000 50 promptflow-runtime INFO Download snapshot 1ac134f8-0696-4ce1-ba89-cd4d9748e886
completed.\n2024-01-25 09:16:47 +0000 50 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/37305/requests/name\n2024-01-25
09:16:47 +0000 50 promptflow-runtime INFO About to execute a python
flow.\n2024-01-25 09:16:47 +0000 50 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-25 09:16:47 +0000 50 promptflow-runtime
INFO Starting to check process 526 status for run name\n2024-01-25 09:16:47
+0000 50 promptflow-runtime INFO Start checking run status for run
name\n2024-01-25 09:16:51 +0000 526 promptflow-runtime INFO [50--526]
Start processing flowV2......\n2024-01-25 09:16:51 +0000 526 promptflow-runtime
INFO Runtime version: 20240116.v1. PromptFlow version: 0.0.116642424\n2024-01-25
09:16:51 +0000 526 promptflow-runtime INFO Setting mlflow tracking
uri...\n2024-01-25 09:16:51 +0000 526 promptflow-runtime INFO Validating
''AzureML Data Scientist'' user authentication...\n2024-01-25 09:16:52 +0000 526
promptflow-runtime INFO Successfully validated ''AzureML Data Scientist''
user authentication.\n2024-01-25 09:16:52 +0000 526 promptflow-runtime
INFO Using AzureMLRunStorageV2\n2024-01-25 09:16:52 +0000 526 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-25
09:16:52 +0000 526 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-25 09:16:52 +0000 526 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-25
09:16:53 +0000 526 promptflow-runtime INFO Resolve data from url finished
in 0.5306580369999665 seconds\n2024-01-25 09:16:53 +0000 526 promptflow-runtime
INFO Starting the aml run ''name''...\n2024-01-25 09:16:53 +0000 526
execution WARNING Starting run without column mapping may lead to
unexpected results. Please consult the following documentation for more information:
https://aka.ms/pf/column-mapping\n2024-01-25 09:16:53 +0000 526 execution.bulk INFO Set
process count to 1 by taking the minimum value among the factors of {''default_worker_count'':
4, ''row_count'': 1}.\n2024-01-25 09:16:57 +0000 526 execution.bulk INFO Process
name(ForkProcess-2:2:1)-Process id(596)-Line number(0) start execution.\n2024-01-25
09:16:58 +0000 526 execution.bulk INFO Process name(ForkProcess-2:2:1)-Process
id(596)-Line number(0) completed.\n2024-01-25 09:16:58 +0000 526 execution.bulk INFO Finished
1 / 1 lines.\n2024-01-25 09:16:58 +0000 526 execution.bulk INFO Average
execution time for completed lines: 5.0 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-25 09:17:00 +0000 526 promptflow-runtime
INFO Post processing batch result...\n2024-01-25 09:17:00 +0000 526
execution.bulk INFO Upload status summary metrics for run name finished
in 0.6790808640000705 seconds\n2024-01-25 09:17:00 +0000 526 promptflow-runtime
INFO Successfully write run properties {\"azureml.promptflow.total_tokens\":
0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\",
\\\"type\\\": \\\"table\\\"}]\"} with run id ''name''\n2024-01-25 09:17:00
+0000 526 execution.bulk INFO Upload RH properties for run name
finished in 0.06737575900001502 seconds\n2024-01-25 09:17:00 +0000 526
promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2024-01-25
09:17:01 +0000 526 promptflow-runtime INFO Created debug_info Asset:
azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1\n2024-01-25
09:17:01 +0000 526 promptflow-runtime INFO Creating unregistered output
Asset for Run name...\n2024-01-25 09:17:01 +0000 526 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1\n2024-01-25
09:17:01 +0000 526 promptflow-runtime INFO Creating Artifact for Run
name...\n2024-01-25 09:17:01 +0000 526 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-25 09:17:01 +0000 526 promptflow-runtime
INFO Patching name...\n2024-01-25 09:17:01 +0000 526 promptflow-runtime
INFO Ending the aml run ''name'' with status ''Completed''...\n2024-01-25
09:17:53 +0000 50 promptflow-runtime INFO Process 526 finished\n2024-01-25
09:17:53 +0000 50 promptflow-runtime INFO [50] Child process finished!\n2024-01-25
09:17:54 +0000 50 promptflow-runtime INFO [name] End processing bulk
run\n2024-01-25 09:17:54 +0000 50 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/37305/requests/name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '8401'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '1.347'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_eager_flow_download.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_eager_flow_download.yaml",
"repo_id": "promptflow",
"token_count": 30319
} | 83 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/api-version=2023-08-01-preview
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.038'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?api-version=2023-04-01-preview&count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '1.132'
status:
code: 200
message: OK
- request:
body: '{"runId": "not_exist", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '110'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"error": {"code": "UserError", "severity": null, "message": "Run runId=not_exist
was not found", "messageFormat": "Run {runId} was not found", "messageParameters":
{"runId": "runId=not_exist"}, "referenceCode": null, "detailsUri": null, "target":
null, "details": [], "innerError": {"code": "NotFoundError", "innerError":
null}, "debugInfo": null, "additionalInfo": null}, "correlation": {"operation":
"ea26eac5dbeb3bdc99dc95beeb40ac6f", "request": "010bd843f7775527"}, "environment":
"eastus", "location": "eastus", "time": "2023-11-23T04:54:12.8199101+00:00",
"componentName": "run-history", "statusCode": 404}'
headers:
connection:
- keep-alive
content-length:
- '723'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.112'
status:
code: 404
message: Run runId=not_exist was not found
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_telemetry_TestTelemetry_test_custom_event.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_telemetry_TestTelemetry_test_custom_event.yaml",
"repo_id": "promptflow",
"token_count": 2445
} | 84 |
flow: ../flows/classification_accuracy_evaluation
data: ../datas/webClassification1.jsonl
column_mapping:
groundtruth: "${data.answer}"
prediction: "${run.outputs.category}"
variant_id: "${data.variant_id}"
run: flow_run_20230629_101205 # ./sample_bulk_run.yaml
# run config: env related
environment_variables: env_file
| promptflow/src/promptflow/tests/test_configs/runs/sample_eval_run.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/sample_eval_run.yaml",
"repo_id": "promptflow",
"token_count": 116
} | 85 |
import importlib
from pathlib import Path
from promptflow._core.tool import tool
from promptflow.contracts.types import FilePath
@tool(name="Tool with FilePath Input", description="This is a tool to demonstrate the usage of FilePath input")
def my_tool(input_file: FilePath, input_text: str) -> str:
# customise your own code to handle and use the input_file here
new_module = importlib.import_module(Path(input_file).stem)
return new_module.hello(input_text)
| promptflow/src/promptflow/tests/test_configs/tools/tool_with_file_path_input.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/tools/tool_with_file_path_input.py",
"repo_id": "promptflow",
"token_count": 139
} | 86 |
name: node_condition_conflict
inputs:
text:
type: string
outputs:
result:
type: string
reference: ${test_node}
nodes:
- name: test_node
type: python
source:
type: code
path: test.py
inputs:
text: ${inputs.text}
skip:
when: ${inputs.text}
is: true
return: ${inputs.text}
activate:
when: ${inputs.text}
is: true | promptflow/src/promptflow/tests/test_configs/wrong_flows/node_condition_conflict/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/node_condition_conflict/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 159
} | 87 |
inputs:
num:
type: int
outputs:
content:
type: string
reference: ${stringify_num.output}
nodes:
- name: stringify_num
type: python
source:
type: code
path: stringify_num.py
inputs:
num: ${inputs.num}
| promptflow/src/promptflow/tests/test_configs/wrong_flows/source_file_missing/flow.dag.python.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/source_file_missing/flow.dag.python.yaml",
"repo_id": "promptflow",
"token_count": 101
} | 88 |
FROM python:3.9-slim-bullseye AS base
RUN set -x
RUN apt-get update \
&& apt-get -y install curl \
&& apt-get -y install net-tools \
&& apt-get -y install procps \
&& apt-get -y install build-essential \
&& apt-get -y install docker.io
RUN pip install ipython ipykernel
RUN ipython kernel install --user --name promptflow
# FROM base AS promptflow
COPY requirements.txt .
RUN pip install -r requirements.txt
RUN set +x
CMD bash
| promptflow/.devcontainer/Dockerfile/0 | {
"file_path": "promptflow/.devcontainer/Dockerfile",
"repo_id": "promptflow",
"token_count": 166
} | 0 |
# Promptflow documentation contribute guidelines
This folder contains the source code for [prompt flow documentation site](https://microsoft.github.io/promptflow/).
This readme file will not be included in above doc site. It keeps a guide for promptflow documentation contributors.
## Content
Below is a table of important doc pages.
| Category | Article |
|----------------|----------------|
|Quick start|[Getting started with prompt flow](./how-to-guides/quick-start.md)|
|Concepts|[Flows](./concepts/concept-flows.md)<br> [Tools](./concepts/concept-tools.md)<br> [Connections](./concepts/concept-connections.md)<br> [Variants](./concepts/concept-variants.md)<br> |
|How-to guides|[How to initialize and test a flow](./how-to-guides/init-and-test-a-flow.md) <br>[How to run and evaluate a flow](./how-to-guides/run-and-evaluate-a-flow/index.md)<br> [How to tune prompts using variants](./how-to-guides/tune-prompts-with-variants.md)<br>[How to deploy a flow](./how-to-guides/deploy-a-flow/index.md)<br>[How to create and use your own tool package](./how-to-guides/develop-a-tool/create-and-use-tool-package.md)|
|Tools reference|[LLM tool](./reference/tools-reference/llm-tool.md)<br> [Prompt tool](./reference/tools-reference/prompt-tool.md)<br> [Python tool](./reference/tools-reference/python-tool.md)<br> [Embedding tool](./reference/tools-reference/embedding_tool.md)<br>[SERP API tool](./reference/tools-reference/serp-api-tool.md) ||
## Writing tips
0. Reach the doc source repository by clicking `Edit this page` on any page.
![Edit this page](./media/edit-this-page.png)
1. Please use :::{admonition} for experimental feature or notes, and admonition with dropdown for the Limitation Part.
2. Please use ::::{tab-set} to group your sdk/cli example, and put the cli at first. Use :sync: to sync multiple tables .
3. If you are unclear with the above lines, refer to [get started](./how-to-guides/quick-start.md) to see the usage.
4. Add gif: Use [ScreenToGif](https://www.screentogif.com/) to record your screen, edit and save as a gif.
5. Reach more element style at [Sphinx Design Components](https://pydata-sphinx-theme.readthedocs.io/en/latest/user_guide/web-components.html).
## Preview your changes
**Local build**: We suggest using local build at the beginning, as it's fast and efficiency.
Please refer to [How to build doc site locally](./dev/documentation_guidelines.md#how-to-build-doc-site-locally).
## FAQ
### Adding image in doc
Please use markdown syntax `![img desc](img link)` to reference image, because the relative path of image will be changed after sphinx build, and image placed in html tags can not be referenced when build.
### Draw flow chart in doc
We recommend using the mermaid, learn more from the [mermaid syntax doc](https://mermaid-js.github.io/mermaid/#/./flowchart?id=flowcharts-basic-syntax)
- Recommend to install [vscode extension](https://marketplace.visualstudio.com/items?itemName=bierner.markdown-mermaid) to preview graph in vscode.
## Reference
- [md-and-rst](https://coderefinery.github.io/sphinx-lesson/md-and-rst/)
- [sphinx-quickstart](https://www.sphinx-doc.org/en/master/usage/quickstart.html) | promptflow/docs/README.md/0 | {
"file_path": "promptflow/docs/README.md",
"repo_id": "promptflow",
"token_count": 1034
} | 1 |
# Promptflow Reference Documentation Guide
## Overview
This guide describes how to author Python docstrings for promptflow public interfaces. See our doc site at [Promptflow API reference documentation](https://microsoft.github.io/promptflow/reference/python-library-reference/promptflow.html).
## Principles
- **Coverage**: Every public object must have a docstring. For private objects, docstrings are encouraged but not required.
- **Style**: All docstrings should be written in [Sphinx style](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html#the-sphinx-docstring-format) noting all types and if any exceptions are raised.
- **Relevance**: The documentation is up-to-date and relevant to the current version of the product.
- **Clarity**: The documentation is written in clear, concise language that is easy to understand.
- **Consistency**: The documentation has a consistent format and structure, making it easy to navigate and follow.
## How to write the docstring
First please read through [Sphinx style](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html#the-sphinx-docstring-format) to have a basic understanding of sphinx style docstring.
### Write class docstring
Let's start with a class example:
```python
from typing import Dict, Optional, Union
from promptflow import PFClient
class MyClass:
"""One-line summary of the class.
More detailed explanation of the class. May include below notes, admonitions, code blocks.
.. note::
Here are some notes to show, with a nested python code block:
.. code-block:: python
from promptflow import MyClass, PFClient
obj = MyClass(PFClient())
.. admonition:: [Title of the admonition]
Here are some admonitions to show.
:param client: Descrition of the client.
:type client: ~promptflow.PFClient
:param param_int: Description of the parameter.
:type param_int: Optional[int]
:param param_str: Description of the parameter.
:type param_str: Optional[str]
:param param_dict: Description of the parameter.
:type param_dict: Optional[Dict[str, str]]
"""
def __init__(
client: PFClient,
param_int: Optional[int] = None,
param_str: Optional[str] = None,
param_dict: Optional[Dict[str, str]] = None,
) -> None:
"""No docstring for __init__, it should be written in class definition above."""
...
```
**Notes**:
1. One-line summary is required. It should be clear and concise.
2. Detailed explanation is encouraged but not required. This part may or may not include notes, admonitions and code blocks.
- The format like `.. note::` is called `directive`. Directives are a mechanism to extend the content of [reStructuredText](https://docutils.sourceforge.io/rst.html). Every directive declares a block of content with specific role. Start a new line with `.. directive_name::` to use the directive.
- The directives used in the sample(`note/admonition/code-block`) should be enough for basic usage of docstring in our project. But you are welcomed to explore more [Directives](https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#specific-admonitions).
3. Parameter description and type is required.
- A pair of `:param [ParamName]:` and `:type [ParamName]:` is required.
- If the type is a promptflow public class, use the `full path to the class` and prepend it with a "~". This will create a link when the documentation is rendered on the doc site that will take the user to the class reference documentation for more information.
```text
:param client: Descrition of the client.
:type client: ~promptflow.PFClient
```
- Use `Union/Optional` when appropriate in function declaration. And use the same annotaion after `:type [ParamName]:`
```text
:type param_int: Optional[int]
```
4. For classes, include docstring in definition only. If you include a docstring in both the class definition and the constructor (init method) docstrings, it will show up twice in the reference docs.
5. Constructors (def `__init__`) should return `None`, per [PEP 484 standards](https://peps.python.org/pep-0484/#the-meaning-of-annotations).
6. To create a link for promptflow class on our doc site. `~promptflow.xxx.MyClass` alone only works after `:type [ParamName]` and `:rtype:`. If you want to achieve the same effect in docstring summary, you should use it with `:class:`:
```python
"""
An example to achieve link effect in summary for :class:`~promptflow.xxx.MyClass`
For function, use :meth:`~promptflow.xxx.my_func`
"""
```
7. There are some tricks to highlight the content in your docstring:
- Single backticks (`): Single backticks are used to represent inline code elements within the text. It is typically used to highlight function names, variable names, or any other code elements within the documentation.
- Double backticks(``): Double backticks are typically used to highlight a literal value.
8. If there are any class level constants you don't want to expose to doc site, make sure to add `_` in front of the constant to hide it.
### Write function docstring
```python
from typing import Optional
def my_method(param_int: Optional[int] = None) -> int:
"""One-line summary
Detailed explanations.
:param param_int: Description of the parameter.
:type param_int: int
:raises [ErrorType1]: [ErrorDescription1]
:raises [ErrorType2]: [ErrorDescription2]
:return: Description of the return value.
:rtype: int
"""
...
```
In addition to `class docstring` notes:
1. Function docstring should include return values.
- If return type is promptflow class, we should also use `~promptflow.xxx.[ClassName]`.
2. Function docstring should include exceptions that may be raised in this function.
- If exception type is `PromptflowException`, use `~promptflow.xxx.[ExceptionName]`
- If multiple exceptions are raised, just add new lines of `:raises`, see the example above.
## How to build doc site locally
You can build the documentation site locally to preview the final effect of your docstring on the rendered site. This will provide you with a clear understanding of how your docstring will appear on our site once your changes are merged into the main branch.
1. Setup your dev environment, see [dev_setup](./dev_setup.md) for details. Sphinx will load all source code to process docstring.
- Skip this step if you just want to build the doc site without reference doc, but do remove `-WithReferenceDoc` from the command in step 3.
2. Install `langchain` package since it is used in our code but not covered in `dev_setup`.
3. Open a `powershell`, activate the conda env and navigate to `<repo-root>/scripts/docs` , run `doc_generation.ps1`:
```pwsh
cd scripts\docs
.\doc_generation.ps1 -WithReferenceDoc -WarningAsError
```
- For the first time you execute this command, it will take some time to install `sphinx` dependencies. After the initial installation, next time you can add param `-SkipInstall` to above command to save some time for dependency check.
4. Check warnings/errors in the build log, fix them if any, then build again.
5. Open `scripts/docs/_build/index.html` to preview the local doc site.
## Additional comments
- **Utilities**: The [autoDocstring](https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring) VSCode extension or GitHub Copilot can help autocomplete in this style for you.
- **Advanced principles**
- Accuracy: The documentation accurately reflects the features and functionality of the product.
- Completeness: The documentation covers all relevant features and functionality of the product.
- Demonstration: Every docstring should include an up-to-date code snippet that demonstrates how to use the product effectively.
## References
- [AzureML v2 Reference Documentation Guide](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ml/azure-ai-ml/documentation_guidelines.md)
- [Azure SDK for Python documentation guidelines](https://azure.github.io/azure-sdk/python_documentation.html#docstrings)
- [How to document a Python API](https://review.learn.microsoft.com/en-us/help/onboard/admin/reference/python/documenting-api?branch=main) | promptflow/docs/dev/documentation_guidelines.md/0 | {
"file_path": "promptflow/docs/dev/documentation_guidelines.md",
"repo_id": "promptflow",
"token_count": 2474
} | 2 |
# Creating Cascading Tool Inputs
Cascading input settings are useful when the value of one input field determines which subsequent inputs are shown. This makes the input process more streamlined, user-friendly, and error-free. This guide will walk through how to create cascading inputs for your tools.
## Prerequisites
Please make sure you have the latest version of [Prompt flow for VS Code](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) installed (v1.2.0+).
## Create a tool with cascading inputs
We'll build out an example tool to show how cascading inputs work. The `student_id` and `teacher_id` inputs will be controlled by the value selected for the `user_type` input. Here's how to configure this in the tool code and YAML.
1. Develop the tool function, following the [cascading inputs example](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_cascading_inputs.py). Key points:
* Use the `@tool` decorator to mark the function as a tool.
* Define `UserType` as an Enum class, as it accepts only a specific set of fixed values in this example.
* Conditionally use inputs in the tool logic based on `user_type`.
```python
from enum import Enum
from promptflow import tool
class UserType(str, Enum):
STUDENT = "student"
TEACHER = "teacher"
@tool
def my_tool(user_type: Enum, student_id: str = "", teacher_id: str = "") -> str:
"""This is a dummy function to support cascading inputs.
:param user_type: user type, student or teacher.
:param student_id: student id.
:param teacher_id: teacher id.
:return: id of the user.
If user_type is student, return student_id.
If user_type is teacher, return teacher_id.
"""
if user_type == UserType.STUDENT:
return student_id
elif user_type == UserType.TEACHER:
return teacher_id
else:
raise Exception("Invalid user.")
```
2. Generate a starting YAML for your tool per the [tool package guide](create-and-use-tool-package.md), then update it to enable cascading:
Add `enabled_by` and `enabled_by_value` to control visibility of dependent inputs. See the [example YAML](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_cascading_inputs.yaml) for reference.
* The `enabled_by` attribute specifies the input field, which must be an enum type, that controls the visibility of the dependent input field.
* The `enabled_by_value` attribute defines the accepted enum values from the `enabled_by` field that will make this dependent input field visible.
> Note: `enabled_by_value` takes a list, allowing multiple values to enable an input.
```yaml
my_tool_package.tools.tool_with_cascading_inputs.my_tool:
function: my_tool
inputs:
user_type:
type:
- string
enum:
- student
- teacher
student_id:
type:
- string
# This input is enabled by the input "user_type".
enabled_by: user_type
# This input is enabled when "user_type" is "student".
enabled_by_value: [student]
teacher_id:
type:
- string
enabled_by: user_type
enabled_by_value: [teacher]
module: my_tool_package.tools.tool_with_cascading_inputs
name: My Tool with Cascading Inputs
description: This is my tool with cascading inputs
type: python
```
## Use the tool in VS Code
Once you package and share your tool, you can use it in VS Code per the [tool package guide](create-and-use-tool-package.md). We have a [demo flow](https://github.com/microsoft/promptflow/tree/main/examples/tools/use-cases/cascading-inputs-tool-showcase) you can try.
Before selecting a `user_type`, the `student_id` and `teacher_id` inputs are hidden. Once you pick the `user_type`, the corresponding input appears.
![before_user_type_selected.png](../../media/how-to-guides/develop-a-tool/before_user_type_selected.png)
![after_user_type_selected_with_student.png](../../media/how-to-guides/develop-a-tool/after_user_type_selected_with_student.png)
![after_user_type_selected_with_teacher.png](../../media/how-to-guides/develop-a-tool/after_user_type_selected_with_teacher.png)
## FAQs
### How do I create multi-layer cascading inputs?
If you are dealing with multiple levels of cascading inputs, you can effectively manage the dependencies between them by using the `enabled_by` and `enabled_by_value` attributes. For example:
```yaml
my_tool_package.tools.tool_with_multi_layer_cascading_inputs.my_tool:
function: my_tool
inputs:
event_type:
type:
- string
enum:
- corporate
- private
corporate_theme:
type:
- string
# This input is enabled by the input "event_type".
enabled_by: event_type
# This input is enabled when "event_type" is "corporate".
enabled_by_value: [corporate]
enum:
- seminar
- team_building
seminar_location:
type:
- string
# This input is enabled by the input "corporate_theme".
enabled_by: corporate_theme
# This input is enabled when "corporate_theme" is "seminar".
enabled_by_value: [seminar]
private_theme:
type:
- string
# This input is enabled by the input "event_type".
enabled_by: event_type
# This input is enabled when "event_type" is "private".
enabled_by_value: [private]
module: my_tool_package.tools.tool_with_multi_layer_cascading_inputs
name: My Tool with Multi-Layer Cascading Inputs
description: This is my tool with multi-layer cascading inputs
type: python
```
Inputs will be enabled in a cascading way based on selections. | promptflow/docs/how-to-guides/develop-a-tool/create-cascading-tool-inputs.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/create-cascading-tool-inputs.md",
"repo_id": "promptflow",
"token_count": 1955
} | 3 |
# Use column mapping
In this document, we will introduce how to map inputs with column mapping when running a flow.
## Column mapping introduction
Column mapping is a mapping from flow input name to specified values.
If specified, the flow will be executed with provided value for specified inputs.
The following types of values in column mapping are supported:
- `${data.<column_name>}` to reference from your test dataset.
- `${run.outputs.<output_name>}` to reference from referenced run's output. **Note**: this only supported when `--run` is provided for `pf run`.
- `STATIC_VALUE` to create static value for all lines for specified column.
## Flow inputs override priority
Flow input values are overridden according to the following priority:
"specified in column mapping" > "default value" > "same name column in provided data".
For example, if we have a flow with following inputs:
```yaml
inputs:
input1:
type: string
default: "default_val1"
input2:
type: string
default: "default_val2"
input3:
type: string
input4:
type: string
...
```
And the flow will return each inputs in outputs.
With the following data
```json
{"input3": "val3_in_data", "input4": "val4_in_data"}
```
And use the following YAML to run
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json
flow: path/to/flow
# my_flow has default value val2 for key2
data: path/to/data
# my_data has column key3 with value val3
column_mapping:
input1: "val1_in_column_mapping"
input3: ${data.input3}
```
Since the flow will return each inputs in output, we can get the actual inputs from `outputs.output` field in run details:
![column_mapping_details](../../media/column_mapping_details.png)
- Input "input1" has value "val1_in_column_mapping" since it's specified as constance in `column_mapping`.
- Input "input2" has value "default_val2" since it used default value in flow dag.
- Input "input3" has value "val3_in_data" since it's specified as data reference in `column_mapping`.
- Input "input4" has value "val4_in_data" since it has same name column in provided data.
| promptflow/docs/how-to-guides/run-and-evaluate-a-flow/use-column-mapping.md/0 | {
"file_path": "promptflow/docs/how-to-guides/run-and-evaluate-a-flow/use-column-mapping.md",
"repo_id": "promptflow",
"token_count": 654
} | 4 |
# Faiss Index Lookup
Faiss Index Lookup is a tool tailored for querying within a user-provided Faiss-based vector store. In combination with our Large Language Model (LLM) tool, it empowers users to extract contextually relevant information from a domain knowledge base.
## Requirements
- For AzureML users, the tool is installed in default image, you can use the tool without extra installation.
- For local users, if your index is stored in local path,
`pip install promptflow-vectordb`
if your index is stored in Azure storage,
`pip install promptflow-vectordb[azure]`
## Prerequisites
### For AzureML users,
- step 1. Prepare an accessible path on Azure Blob Storage. Here's the guide if a new storage account needs to be created: [Azure Storage Account](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal).
- step 2. Create related Faiss-based index files on Azure Blob Storage. We support the LangChain format (index.faiss + index.pkl) for the index files, which can be prepared either by employing our promptflow-vectordb SDK or following the quick guide from [LangChain documentation](https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/faiss). Please refer to the instructions of <a href="https://aka.ms/pf-sample-build-faiss-index" target="_blank">An example code for creating Faiss index</a> for building index using promptflow-vectordb SDK.
- step 3. Based on where you put your own index files, the identity used by the promptflow runtime should be granted with certain roles. Please refer to [Steps to assign an Azure role](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-steps):
| Location | Role |
| ---- | ---- |
| workspace datastores or workspace default blob | AzureML Data Scientist |
| other blobs | Storage Blob Data Reader |
### For local users,
- Create Faiss-based index files in local path by only doing step 2 above.
## Inputs
The tool accepts the following inputs:
| Name | Type | Description | Required |
| ---- | ---- | ----------- | -------- |
| path | string | URL or path for the vector store.<br><br>local path (for local users):<br>`<local_path_to_the_index_folder>`<br><br> Azure blob URL format (with [azure] extra installed):<br>https://`<account_name>`.blob.core.windows.net/`<container_name>`/`<path_and_folder_name>`.<br><br>AML datastore URL format (with [azure] extra installed):<br>azureml://subscriptions/`<your_subscription>`/resourcegroups/`<your_resource_group>`/workspaces/`<your_workspace>`/data/`<data_path>`<br><br>public http/https URL (for public demonstration):<br>http(s)://`<path_and_folder_name>` | Yes |
| vector | list[float] | The target vector to be queried, which can be generated by the LLM tool. | Yes |
| top_k | integer | The count of top-scored entities to return. Default value is 3. | No |
## Outputs
The following is an example for JSON format response returned by the tool, which includes the top-k scored entities. The entity follows a generic schema of vector search result provided by our promptflow-vectordb SDK. For the Faiss Index Search, the following fields are populated:
| Field Name | Type | Description |
| ---- | ---- | ----------- |
| text | string | Text of the entity |
| score | float | Distance between the entity and the query vector |
| metadata | dict | Customized key-value pairs provided by user when create the index |
<details>
<summary>Output</summary>
```json
[
{
"metadata": {
"link": "http://sample_link_0",
"title": "title0"
},
"original_entity": null,
"score": 0,
"text": "sample text #0",
"vector": null
},
{
"metadata": {
"link": "http://sample_link_1",
"title": "title1"
},
"original_entity": null,
"score": 0.05000000447034836,
"text": "sample text #1",
"vector": null
},
{
"metadata": {
"link": "http://sample_link_2",
"title": "title2"
},
"original_entity": null,
"score": 0.20000001788139343,
"text": "sample text #2",
"vector": null
}
]
```
</details> | promptflow/docs/reference/tools-reference/faiss_index_lookup_tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/faiss_index_lookup_tool.md",
"repo_id": "promptflow",
"token_count": 1307
} | 5 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CognitiveSearchConnection.schema.json
name: cognitive_search_connection
type: cognitive_search
api_key: "<to-be-replaced>"
api_base: "endpoint"
api_version: "2023-07-01-Preview"
| promptflow/examples/connections/cognitive_search.yml/0 | {
"file_path": "promptflow/examples/connections/cognitive_search.yml",
"repo_id": "promptflow",
"token_count": 86
} | 6 |
system:
You are an assistant to calculate the answer to the provided math problems.
Please think step by step.
Return the final numerical answer only and any accompanying reasoning or explanation seperately as json format.
user:
A jar contains two red marbles, three green marbles, ten white marbles and no other marbles. Two marbles are randomly drawn from this jar without replacement. What is the probability that these two marbles drawn will both be red? Express your answer as a common fraction.
assistant:
{Chain of thought: "The total number of marbles is $2+3+10=15$. The probability that the first marble drawn will be red is $2/15$. Then, there will be one red left, out of 14. Therefore, the probability of drawing out two red marbles will be: $$\\frac{2}{15}\\cdot\\frac{1}{14}=\\boxed{\\frac{1}{105}}$$.", "answer": "1/105"}
user:
Find the greatest common divisor of $7!$ and $(5!)^2.$
assistant:
{"Chain of thought": "$$ \\begin{array} 7! &=& 7 \\cdot 6 \\cdot 5 \\cdot 4 \\cdot 3 \\cdot 2 \\cdot 1 &=& 2^4 \\cdot 3^2 \\cdot 5^1 \\cdot 7^1 \\\\ (5!)^2 &=& (5 \\cdot 4 \\cdot 3 \\cdot 2 \\cdot 1)^2 &=& 2^6 \\cdot 3^2 \\cdot 5^2 \\\\ \\text{gcd}(7!, (5!)^2) &=& 2^4 \\cdot 3^2 \\cdot 5^1 &=& \\boxed{720} \\end{array} $$.", "answer": "720"}
{% for item in chat_history %}
user:
{{item.inputs.question}}
assistant:
{{item.outputs.answer}}
{% endfor %}
user:
{{question}} | promptflow/examples/flows/chat/chat-math-variant/chat_variant_1.jinja2/0 | {
"file_path": "promptflow/examples/flows/chat/chat-math-variant/chat_variant_1.jinja2",
"repo_id": "promptflow",
"token_count": 474
} | 7 |
import contextlib
import os
import sys
if sys.platform.startswith("win"):
import msvcrt
else:
import fcntl
@contextlib.contextmanager
def acquire_lock(filename):
if not sys.platform.startswith("win"):
with open(filename, "a+") as f:
fcntl.flock(f, fcntl.LOCK_EX)
yield f
fcntl.flock(f, fcntl.LOCK_UN)
else: # Windows
with open(filename, "w") as f:
msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 1)
yield f
msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, 1)
try:
os.remove(filename)
except OSError:
pass # best effort to remove the lock file
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/utils/lock.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/utils/lock.py",
"repo_id": "promptflow",
"token_count": 338
} | 8 |
from promptflow import tool
from chat_with_pdf.qna import qna
@tool
def qna_tool(prompt: str, history: list):
stream = qna(prompt, convert_chat_history_to_chatml_messages(history))
answer = ""
for str in stream:
answer = answer + str + ""
return {"answer": answer}
def convert_chat_history_to_chatml_messages(history):
messages = []
for item in history:
messages.append({"role": "user", "content": item["inputs"]["question"]})
messages.append({"role": "assistant", "content": item["outputs"]["answer"]})
return messages
| promptflow/examples/flows/chat/chat-with-pdf/qna_tool.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/qna_tool.py",
"repo_id": "promptflow",
"token_count": 212
} | 9 |
# Use Functions with Chat Models
This flow covers how to use the LLM tool chat API in combination with external functions to extend the
capabilities of GPT models.
`functions` is an optional parameter in the <a href='https://platform.openai.com/docs/api-reference/chat/create' target='_blank'>Chat Completion API</a> which can be used to provide function
specifications. The purpose of this is to enable models to generate function arguments which adhere to the provided
specifications. Note that the API will not actually execute any function calls. It is up to developers to execute
function calls using model outputs.
If the `functions` parameter is provided then by default the model will decide when it is appropriate to use one of the
functions. The API can be forced to use a specific function by setting the `function_call` parameter to
`{"name": "<insert-function-name>"}`. The API can also be forced to not use any function by setting the `function_call`
parameter to `"none"`. If a function is used, the output will contain `"finish_reason": "function_call"` in the
response, as well as a `function_call` object that has the name of the function and the generated function arguments.
You can refer to <a href='https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb' target='_blank'>openai sample</a> for more details.
## What you will learn
In this flow, you will learn how to use functions with LLM chat models and how to compose function role message in prompt template.
## Tools used in this flow
- LLM tool
- Python tool
## Prerequisites
Install prompt-flow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Getting started
### 1 Create connection for LLM tool to use
Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations.
Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`.
```bash
# show registered connection
pf connection show --name open_ai_connection
```
### 2 Start chatting
```bash
# run chat flow with default question in flow.dag.yaml
pf flow test --flow .
# run chat flow with new question
pf flow test --flow . --inputs question="How about London next week?"
# start a interactive chat session in CLI
pf flow test --flow . --interactive
# start a interactive chat session in CLI with verbose info
pf flow test --flow . --interactive --verbose
```
## References
- <a href='https://github.com/openai/openai-cookbook/blob/main/examples/How_to_call_functions_with_chat_models.ipynb' target='_blank'>OpenAI cookbook example</a>
- <a href='https://openai.com/blog/function-calling-and-other-api-updates?ref=upstract.com' target='_blank'>OpenAI function calling announcement</a>
- <a href='https://platform.openai.com/docs/guides/gpt/function-calling' target='_blank'>OpenAI function calling doc</a>
- <a href='https://platform.openai.com/docs/api-reference/chat/create' target='_blank'>OpenAI function calling API</a>
| promptflow/examples/flows/chat/use_functions_with_chat_models/README.md/0 | {
"file_path": "promptflow/examples/flows/chat/use_functions_with_chat_models/README.md",
"repo_id": "promptflow",
"token_count": 1074
} | 10 |
# Eval chat math
This example shows how to evaluate the answer of math questions, which can compare the output results with the standard answers numerically.
Learn more on corresponding [tutorials](../../../tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md)
Tools used in this flow:
- `python` tool
## Prerequisites
Install promptflow sdk and other dependencies in this folder:
```bash
pip install -r requirements.txt
```
### 1. Test flow with single line data
Testing flow/node:
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
# test with flow inputs
pf flow test --flow . --inputs groundtruth=123 prediction=123
# test node with inputs
pf flow test --flow . --node line_process --inputs groundtruth=123 prediction=123
```
### 2. create flow run with multi line data
There are two ways to evaluate an classification flow.
```bash
pf run create --flow . --data ./data.jsonl --stream
``` | promptflow/examples/flows/evaluation/eval-chat-math/README.md/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-chat-math/README.md",
"repo_id": "promptflow",
"token_count": 272
} | 11 |
import unittest
from match import is_match
class IsMatchTest(unittest.TestCase):
def test_normal(self):
self.assertEqual(is_match(["a", "b"], ["B", "a"], True, True, False), True)
self.assertEqual(is_match(["a", "b"], ["B", "a"], True, False, False), False)
self.assertEqual(is_match(["a", "b"], ["B", "a"], False, True, False), False)
self.assertEqual(is_match(["a", "b"], ["B", "a"], False, False, True), False)
self.assertEqual(is_match(["a", "b"], ["a", "b"], False, False, False), True)
self.assertEqual(is_match(["a", "b"], ["a", "b", "c"], True, False, True), True)
| promptflow/examples/flows/evaluation/eval-entity-match-rate/is_match_test.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-entity-match-rate/is_match_test.py",
"repo_id": "promptflow",
"token_count": 269
} | 12 |
from promptflow import tool
import re
@tool
def parse_score(gpt_score: str):
return float(extract_float(gpt_score))
def extract_float(s):
match = re.search(r"[-+]?\d*\.\d+|\d+", s)
if match:
return float(match.group())
else:
return None
| promptflow/examples/flows/evaluation/eval-perceived-intelligence/parse_score.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-perceived-intelligence/parse_score.py",
"repo_id": "promptflow",
"token_count": 119
} | 13 |
from promptflow import tool
@tool
def validate_input(question: str, answer: str, context: str, ground_truth: str, selected_metrics: dict) -> dict:
input_data = {"question": question, "answer": answer, "context": context, "ground_truth": ground_truth}
expected_input_cols = set(input_data.keys())
dict_metric_required_fields = {"gpt_groundedness": set(["answer", "context"]),
"gpt_relevance": set(["question", "answer", "context"]),
"gpt_coherence": set(["question", "answer"]),
"gpt_similarity": set(["question", "answer", "ground_truth"]),
"gpt_fluency": set(["question", "answer"]),
"f1_score": set(["answer", "ground_truth"]),
"ada_similarity": set(["answer", "ground_truth"])}
actual_input_cols = set()
for col in expected_input_cols:
if input_data[col] and input_data[col].strip():
actual_input_cols.add(col)
data_validation = selected_metrics
for metric in selected_metrics:
if selected_metrics[metric]:
metric_required_fields = dict_metric_required_fields[metric]
if metric_required_fields <= actual_input_cols:
data_validation[metric] = True
else:
data_validation[metric] = False
return data_validation
| promptflow/examples/flows/evaluation/eval-qna-non-rag/validate_input.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-non-rag/validate_input.py",
"repo_id": "promptflow",
"token_count": 689
} | 14 |
# Analyze Documents
A flow that analyzes documents with various language-based Machine Learning models.
This sample flow utilizes Azure AI Language's pre-built and optimized language models to perform various analyses on text or documents. It performs:
- [Translation](https://learn.microsoft.com/en-us/rest/api/cognitiveservices/translator/translator/translate?view=rest-cognitiveservices-translator-v3.0&tabs=HTTP)
- [Personally Identifiable Information (PII) detection](https://learn.microsoft.com/en-us/azure/ai-services/language-service/personally-identifiable-information/overview)
- [Named Entity Recognition (NER)](https://learn.microsoft.com/en-us/azure/ai-services/language-service/named-entity-recognition/overview)
- [Document Summarization](https://learn.microsoft.com/en-us/azure/ai-services/language-service/summarization/overview?tabs=document-summarization)
- [Sentiment Analysis & Opinion Mining](https://learn.microsoft.com/en-us/azure/ai-services/language-service/sentiment-opinion-mining/overview?tabs=prebuilt)
See the [promptflow-azure-ai-language](https://github.com/microsoft/promptflow/blob/main/docs/integrations/tools/azure_ai_language_tool.md) tool package reference documentation for further information.
Tools used in this flow:
- `python` tool
- `translator` tool from the `promptflow-azure-ai-language` package
- `pii_entity_recognition` tool from the `promptflow-azure-ai-language` package
- `abstractive_summarization` tool from the `promptflow-azure-ai-language` package
- `extractive_summarization` tool from the `promptflow-azure-ai-language` package
- `entity_recognition` tool from the `promptflow-azure-ai-language` package
- `sentiment_analysis` tool from the `promptflow-azure-ai-language` package
Connections used in this flow:
- `Custom` connection (Azure AI Language)
- `Custom` connection (Azure AI Translator)
## Prerequisites
Install promptflow sdk and other dependencies:
```
pip install -r requirements.txt
```
## Setup connection
Prepare your [Azure AI Language Resource](https://azure.microsoft.com/en-us/products/ai-services/ai-language) first, and [create a Language Resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics) if necessary. From your Language Resource, obtain its `api_key` and `endpoint`.
Create a connection to your Language Resource. The connection uses the `CustomConnection` schema:
```
# Override keys with --set to avoid yaml file changes
pf connection create -f ../connections/azure_ai_language.yml --set secrets.api_key=<your_api_key> configs.endpoint=<your_endpoint> name=azure_ai_language_connection
```
Ensure you have created the `azure_ai_language_connection`:
```
pf connection show -n azure_ai_language_connection
```
To use the `translator` tool, you must have an existing [Azure AI Translator resource](https://azure.microsoft.com/en-us/products/ai-services/ai-translator). [Create a Translator resource](https://learn.microsoft.com/en-us/azure/ai-services/translator/create-translator-resource) first, if necessary. From your Translator Resource, obtain its `api_key`, `endpoint`, and `region` (if applicable).
Create a connection to your Translator Resource. The connection uses the `CustomConnection` schema:
```
# Override keys with --set to avoid yaml file changes
pf connection create -f ../connections/azure_ai_translator.yml --set secrets.api_key=<your_api_key> configs.endpoint=<your_endpoint> configs.region=<your_region> name=azure_ai_translator_connection
```
Ensure you have created the `azure_ai_translator_connection`:
```
pf connection show -n azure_ai_translator_connection
```
## Run flow
### Run with single line input
```
# Test with default input values in flow.dag.yaml:
pf flow test --flow .
# Test with specific input:
pf flow test --flow . --inputs document_path=<path_to_txt_file> language=<document_language_code>
```
### Run with multiple lines of data
```
pf run create --flow . --data ./data.jsonl --column-mapping document_path='${data.document_path}' language='${data.language}' --stream
```
You can also skip providing column-mapping if provided data has same column name as the flow. Reference [here](https://microsoft.github.io/promptflow/how-to-guides/run-and-evaluate-a-flow/use-column-mapping.html) for default behavior when column-mapping not provided in CLI.
### Flow description
The flow first uses a `python` node to read in the provided `.txt` file into a string. This string is passed to a `pii_entity_recognition` node where Personally Identifiable Information (PII) is redacted. The redacted text is passed to `abstractive_summarization`, `extractive_summarization` and `entity_recognition` nodes, where summaries and named-entities are obtained. Finally, the generated abstractive summary is forwarded to a `sentiment_analysis` node to analyze its general sentiment.
### Contact
Please reach out to Sean Murray (<[email protected]>) or <[email protected]> with any issues. | promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/README.md/0 | {
"file_path": "promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/README.md",
"repo_id": "promptflow",
"token_count": 1454
} | 15 |
from promptflow.tools.aoai import chat as aoai_chat
from promptflow.tools.openai import chat as openai_chat
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from util import count_message_tokens, count_string_tokens, create_chat_message, generate_context, get_logger, \
parse_reply, construct_prompt
autogpt_logger = get_logger("autogpt_agent")
class AutoGPT:
def __init__(
self,
connection,
tools,
full_message_history,
functions,
system_prompt=None,
triggering_prompt=None,
user_prompt=None,
model_or_deployment_name=None
):
self.tools = tools
self.full_message_history = full_message_history
self.functions = functions
self.system_prompt = system_prompt
self.connection = connection
self.model_or_deployment_name = model_or_deployment_name
self.triggering_prompt = triggering_prompt
self.user_prompt = user_prompt
def chat_with_ai(self, token_limit):
"""Interact with the OpenAI API, sending the prompt, message history and functions."""
# Reserve 1000 tokens for the response
send_token_limit = token_limit - 1000
(
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
) = generate_context(self.system_prompt, self.full_message_history, self.user_prompt)
# Account for user input (appended later)
current_tokens_used += count_message_tokens([create_chat_message("user", self.triggering_prompt)])
current_tokens_used += 500 # Account for memory (appended later)
# Add Messages until the token limit is reached or there are no more messages to add.
while next_message_to_add_index >= 0:
message_to_add = self.full_message_history[next_message_to_add_index]
tokens_to_add = count_message_tokens([message_to_add])
if current_tokens_used + tokens_to_add > send_token_limit:
break
# Add the most recent message to the start of the current context, after the two system prompts.
current_context.insert(
insertion_index, self.full_message_history[next_message_to_add_index]
)
# Count the currently used tokens
current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", self.triggering_prompt)])
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
current_context = construct_prompt(current_context)
if isinstance(self.connection, AzureOpenAIConnection):
try:
response = aoai_chat(
connection=self.connection,
prompt=current_context,
deployment_name=self.model_or_deployment_name,
max_tokens=tokens_remaining,
functions=self.functions)
return response
except Exception as e:
if "The API deployment for this resource does not exist" in str(e):
raise Exception(
"Please fill in the deployment name of your Azure OpenAI resource gpt-4 model.")
elif isinstance(self.connection, OpenAIConnection):
response = openai_chat(
connection=self.connection,
prompt=current_context,
model=self.model_or_deployment_name,
max_tokens=tokens_remaining,
functions=self.functions)
return response
else:
raise ValueError("Connection must be an instance of AzureOpenAIConnection or OpenAIConnection")
def run(self):
tools = {t.__name__: t for t in self.tools}
while True:
# Send message to AI, get response
response = self.chat_with_ai(token_limit=4000)
if "function_call" in response:
# Update full message history
function_name = response["function_call"]["name"]
parsed_output = parse_reply(response["function_call"]["arguments"])
if "Error" in parsed_output:
error_message = parsed_output["Error"]
autogpt_logger.info(f"Error: {error_message}")
command_result = f"Error: {error_message}"
else:
autogpt_logger.info(f"Function generation requested, function = {function_name}, args = "
f"{parsed_output}")
self.full_message_history.append(
create_chat_message("assistant", f"Function generation requested, function = {function_name}, "
f"args = {parsed_output}")
)
if function_name == "finish":
response = parsed_output["response"]
autogpt_logger.info(f"Responding to user: {response}")
return response
if function_name in tools:
tool = tools[function_name]
try:
autogpt_logger.info(f"Next function = {function_name}, arguments = {parsed_output}")
result = tool(**parsed_output)
command_result = f"Executed function {function_name} and returned: {result}"
except Exception as e:
command_result = (
f"Error: {str(e)}, {type(e).__name__}"
)
result_length = count_string_tokens(command_result)
if result_length + 600 > 4000:
command_result = f"Failure: function {function_name} returned too much output. Do not " \
f"execute this function again with the same arguments."
else:
command_result = f"Unknown function '{function_name}'. Please refer to available functions " \
f"defined in functions parameter."
# Append command result to the message history
self.full_message_history.append(create_chat_message("function", str(command_result), function_name))
autogpt_logger.info(f"function: {command_result}")
else:
autogpt_logger.info(f"No function generated, returned: {response['content']}")
self.full_message_history.append(
create_chat_message("assistant", f"No function generated, returned: {response['content']}")
)
| promptflow/examples/flows/standard/autonomous-agent/autogpt_class.py/0 | {
"file_path": "promptflow/examples/flows/standard/autonomous-agent/autogpt_class.py",
"repo_id": "promptflow",
"token_count": 3387
} | 16 |
system:
You are a assistant which can write code. Response should only contain code.
user:
Write a simple {{text}} program that displays the greeting message when executed. | promptflow/examples/flows/standard/basic-with-builtin-llm/hello.jinja2/0 | {
"file_path": "promptflow/examples/flows/standard/basic-with-builtin-llm/hello.jinja2",
"repo_id": "promptflow",
"token_count": 38
} | 17 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json
flow: .
data: data.jsonl
environment_variables:
# environment variables from connection
AZURE_OPENAI_API_KEY: ${open_ai_connection.api_key}
AZURE_OPENAI_API_BASE: ${open_ai_connection.api_base}
AZURE_OPENAI_API_TYPE: azure
column_mapping:
text: ${data.text}
| promptflow/examples/flows/standard/basic/run.yml/0 | {
"file_path": "promptflow/examples/flows/standard/basic/run.yml",
"repo_id": "promptflow",
"token_count": 135
} | 18 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
query:
type: string
default: When will my order be shipped?
outputs:
response:
type: string
reference: ${generate_response.output}
nodes:
- name: classify_with_llm
type: llm
source:
type: code
path: classify_with_llm.jinja2
inputs:
deployment_name: gpt-35-turbo
max_tokens: 128
query: ${inputs.query}
connection: open_ai_connection
api: chat
- name: class_check
type: python
source:
type: code
path: class_check.py
inputs:
llm_result: ${classify_with_llm.output}
- name: order_search
type: python
source:
type: code
path: order_search.py
inputs:
query: ${inputs.query}
activate:
when: ${class_check.output}
is: order_search
- name: product_info
type: python
source:
type: code
path: product_info.py
inputs:
query: ${inputs.query}
activate:
when: ${class_check.output}
is: product_info
- name: product_recommendation
type: python
source:
type: code
path: product_recommendation.py
inputs:
query: ${inputs.query}
activate:
when: ${class_check.output}
is: product_recommendation
- name: generate_response
type: python
source:
type: code
path: generate_response.py
inputs:
order_search: ${order_search.output}
product_info: ${product_info.output}
product_recommendation: ${product_recommendation.output}
environment:
python_requirements_txt: requirements.txt
| promptflow/examples/flows/standard/conditional-flow-for-switch/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/standard/conditional-flow-for-switch/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 588
} | 19 |
You are given a list of orders with item_numbers from a customer and a statement from the customer. It is your job to identify
the intent that the customer has with their statement. Possible intents can be:
"product return", "product exchange", "general question", "product question", "other".
If the intent is product related ("product return", "product exchange", "product question"), then you should also
provide the order id and item that the customer is referring to in their statement.
For instance if you are give the following list of orders:
order_number: 2020230
date: 2023-04-23
store_location: SeattleStore
items:
- description: Roof Rack, color black, price $199.99
item_number: 101010
- description: Running Shoes, size 10, color blue, price $99.99
item_number: 202020
You are given the following customer statements:
- I am having issues with the jobbing shoes I bought.
Then you should answer with in valid yaml format with the fields intent, order_number, item, and item_number like so:
intent: product question
order_number: 2020230
descrption: Running Shoes, size 10, color blue, price $99.99
item_number: 202020
Here is the actual problem you need to solve:
In triple backticks below is the customer information and a list of orders.
```
{{customer_info}}
```
In triple backticks below are the is the chat history with customer statements and replies from the customer service agent:
```
{{chat_history}}
```
What is the customer's `intent:` here?
"product return", "exchange product", "general question", "product question" or "other"?
Reply with only the intent string. | promptflow/examples/flows/standard/customer-intent-extraction/user_intent_few_shot.jinja2/0 | {
"file_path": "promptflow/examples/flows/standard/customer-intent-extraction/user_intent_few_shot.jinja2",
"repo_id": "promptflow",
"token_count": 417
} | 20 |
import os
from pathlib import Path
saved_path = os.getcwd()
os.chdir(Path(__file__).parent)
source_folder = Path("../web-classification")
for file_name in os.listdir(source_folder):
if not Path(file_name).exists():
os.symlink(
source_folder / file_name,
file_name
)
os.chdir(saved_path)
| promptflow/examples/flows/standard/flow-with-symlinks/create_symlinks.py/0 | {
"file_path": "promptflow/examples/flows/standard/flow-with-symlinks/create_symlinks.py",
"repo_id": "promptflow",
"token_count": 150
} | 21 |
from promptflow import tool
from file import File
@tool
def load_code(source: str):
file = File(source)
return file.content
| promptflow/examples/flows/standard/gen-docstring/load_code_tool.py/0 | {
"file_path": "promptflow/examples/flows/standard/gen-docstring/load_code_tool.py",
"repo_id": "promptflow",
"token_count": 43
} | 22 |
system:
Your task is to find entities of certain type from the given text content.
If there're multiple entities, please return them all with comma separated, e.g. "entity1, entity2, entity3".
You should only return the entity list, nothing else.
If there's no such entity, please return "None".
user:
Entity type: {{entity_type}}
Text content: {{text}}
Entities: | promptflow/examples/flows/standard/named-entity-recognition/NER_LLM.jinja2/0 | {
"file_path": "promptflow/examples/flows/standard/named-entity-recognition/NER_LLM.jinja2",
"repo_id": "promptflow",
"token_count": 96
} | 23 |
from jinja2 import Template
from promptflow import tool
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
@tool
def my_tool(connection: CustomConnection, prompt: PromptTemplate, **kwargs) -> str:
# Replace with your tool code, customise your own code to handle and use the prompt here.
# Usually connection contains configs to connect to an API.
# Not all tools need a connection. You can remove it if you don't need it.
rendered_prompt = Template(prompt, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
return rendered_prompt
| promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_llm_type.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_llm_type.py",
"repo_id": "promptflow",
"token_count": 171
} | 24 |
import pytest
import unittest
from promptflow.connections import CustomConnection
from my_tool_package.tools.my_tool_1 import my_tool
@pytest.fixture
def my_custom_connection() -> CustomConnection:
my_custom_connection = CustomConnection(
{
"api-key" : "my-api-key",
"api-secret" : "my-api-secret",
"api-url" : "my-api-url"
}
)
return my_custom_connection
class TestMyTool1:
def test_my_tool_1(self, my_custom_connection):
result = my_tool(my_custom_connection, input_text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| promptflow/examples/tools/tool-package-quickstart/tests/test_my_tool_1.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/tests/test_my_tool_1.py",
"repo_id": "promptflow",
"token_count": 278
} | 25 |
def hello(input_text: str) -> str:
# Replace with your own code.
return "Hello " + input_text
| promptflow/examples/tools/use-cases/filepath-input-tool-showcase/hello_method.py/0 | {
"file_path": "promptflow/examples/tools/use-cases/filepath-input-tool-showcase/hello_method.py",
"repo_id": "promptflow",
"token_count": 36
} | 26 |
import os
import sys
from promptflow._cli._pf._connection import create_connection
from streamlit.web import cli as st_cli
from streamlit.runtime import exists
from main import start
def is_yaml_file(file_path):
_, file_extension = os.path.splitext(file_path)
return file_extension.lower() in ('.yaml', '.yml')
def create_connections(directory_path) -> None:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
if is_yaml_file(file_path):
create_connection(file_path)
if __name__ == "__main__":
create_connections(os.path.join(os.path.dirname(__file__), "connections"))
if exists():
start()
else:
main_script = os.path.join(os.path.dirname(__file__), "main.py")
sys.argv = ["streamlit", "run", main_script, "--global.developmentMode=false"]
st_cli.main(prog_name="streamlit")
| promptflow/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/app.py/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/distribute-flow-as-executable-app/app.py",
"repo_id": "promptflow",
"token_count": 384
} | 27 |
<jupyter_start><jupyter_text>Flow Run Management in Azure**Requirements** - In order to benefit from this tutorial, you will need:- An Azure account with an active subscription - [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)- An Azure ML workspace - [Configure workspace](../../configuration.ipynb)- A python environment- Installed prompt flow SDK**Learning Objectives** - By the end of this tutorial, you should be able to:- create run with remote data- create run which references another runs inputs- manage runs via run.yaml- create run with connection override- create run with custom runtime**Motivations** - This guide will walk you through cloud run management abilities. 0. Install dependent packages<jupyter_code>%pip install -r ../../requirements.txt<jupyter_output><empty_output><jupyter_text>1. Connect to Azure Machine Learning WorkspaceThe [workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section we will connect to the workspace in which the job will be run. 1.1 Import the required libraries<jupyter_code>from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
from azure.ai.ml.entities import Data
from azure.core.exceptions import ResourceNotFoundError
from promptflow.azure import PFClient
from promptflow.entities import Run<jupyter_output><empty_output><jupyter_text>1.2 Configure credentialWe are using `DefaultAzureCredential` to get access to workspace. `DefaultAzureCredential` should be capable of handling most Azure SDK authentication scenarios. Reference for more available credentials if it does not work for you: [configure credential example](../../configuration.ipynb), [azure-identity reference doc](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python).<jupyter_code>try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()<jupyter_output><empty_output><jupyter_text>1.3 Get a handle to the workspaceWe use config file to connect to a workspace. The Azure ML workspace should be configured with computer cluster. [Check this notebook for configure a workspace](../../configuration.ipynb)<jupyter_code># Get a handle to workspace
pf = PFClient.from_config(credential=credential)<jupyter_output><empty_output><jupyter_text>1.4 Create necessary connectionsConnection helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.In this notebook, we will use flow `web-classification` which uses connection `open_ai_connection` inside, we need to set up the connection if we haven't added it before.Prepare your Azure OpenAI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.Please go to [workspace portal](https://ml.azure.com/), click `Prompt flow` -> `Connections` -> `Create`, then follow the instruction to create your own connections. Learn more on [connections](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/concept-connections?view=azureml-api-2). 2. Create run with remote dataInstead of relying on local files, there may be situations where you want to reuse data that's already available in your workspace when submitting a flow.The following code cells show how to create flow run with remote data. 2.1 Create or update remote data<jupyter_code>data_name, data_version = "flow_run_test_data", "1"
try:
data = pf.ml_client.data.get(name=data_name, version=data_version)
except ResourceNotFoundError:
data = Data(
name=data_name,
version=data_version,
path=f"../../flows/standard/web-classification/data.jsonl",
type="uri_file",
)
data = pf.ml_client.data.create_or_update(data)<jupyter_output><empty_output><jupyter_text>2.2 Prepare remote data id<jupyter_code>data_id = f"azureml:{data.name}:{data.version}"
print(data_id)<jupyter_output><empty_output><jupyter_text>2.3 Create a flow run with remote dataYou can change instance type or idle time of the runtime or reset it to clean state. The following code cell shows how to do so.<jupyter_code># create run
run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# remote data
data=data_id,
# to customize runtime instance type and idle time, you can provide them in resources
# resources={
# "instance_type": "STANDARD_DS11_V2",
# "idle_time_before_shutdown_minutes": 10
# }
)
base_run = pf.runs.create_or_update(
run=run,
# to reset automatic runtime to clean state, set reset_runtime to True
# reset_runtime=True,
)<jupyter_output><empty_output><jupyter_text>2.4 Stream the flow run to make sure it runs successfully<jupyter_code>pf.runs.stream(base_run)<jupyter_output><empty_output><jupyter_text>3 Create a flow run which uses an existing run's inputsWhen running a flow with an existing run, you can reference either it's inputs or outputs in column mapping.The following code cell show how to reference a run's inputs in column mapping.<jupyter_code>run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# run name
run=run,
column_mapping={
# reference another run's input data columns
"url": "${run.inputs.url}",
"answer": "${run.inputs.answer}",
"evidence": "${run.inputs.evidence}",
},
)
base_run = pf.runs.create_or_update(
run=run,
)
pf.runs.stream(base_run)<jupyter_output><empty_output><jupyter_text>4. Create a flow run with connection overrideSometime you want to switch connection or deployment name inside a flow when submitting it.Connection override provided an easy way to do it without changing original `flow.dag.yaml`.In the following code cell, we will submit flow `web-classification` and override it's connection `open_ai_connection` to `azure_open_ai_connection`. Please make sure the connection `azure_open_ai_connection` exists in your workspace.<jupyter_code>run = Run(
# local flow file
flow="../../flows/standard/web-classification",
data="../../flows/standard/web-classification/data.jsonl",
# override connection for node classify_with_llm & summarize_text_content
connections={
"classify_with_llm": {"connection": "azure_open_ai_connection"},
"summarize_text_content": {"connection": "azure_open_ai_connection"},
},
)
base_run = pf.runs.create_or_update(
run=run,
)
pf.runs.stream(base_run)<jupyter_output><empty_output><jupyter_text>4 Create a flow run which uses custom runtimePrompt flow’s runtime provides the computing resources required for the application to run. You can follow this [instruction](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/how-to-create-manage-runtime?view=azureml-api-2) to create a runtime in workspace then use it to submit flow runs.The following code cell shows how to use your custom runtime.<jupyter_code># create run
run = Run(
# local flow file
flow="../../flows/standard/web-classification",
# remote data
data=data_id,
)
# comment below create operation as it requires a runtime with specific name
# which will break CI pipeline
# base_run = pf.runs.create_or_update(
# run=run,
# runtime = "<runtime-name>" # TODO replace with your runtime name
# )<jupyter_output><empty_output> | promptflow/examples/tutorials/run-management/cloud-run-management.ipynb/0 | {
"file_path": "promptflow/examples/tutorials/run-management/cloud-run-management.ipynb",
"repo_id": "promptflow",
"token_count": 2407
} | 28 |
@echo off
setlocal
set MAIN_EXE=%~dp0.\pfcli.exe
"%MAIN_EXE%" pfazure %* | promptflow/scripts/installer/windows/scripts/pfazure.bat/0 | {
"file_path": "promptflow/scripts/installer/windows/scripts/pfazure.bat",
"repo_id": "promptflow",
"token_count": 41
} | 29 |
import subprocess
from pathlib import Path
import hashlib
from jinja2 import Environment, FileSystemLoader, Template
from .telemetry_obj import Telemetry
class Step:
"""
StepType in workflow
"""
Environment = None
@staticmethod
def init_jinja_loader() -> Environment:
jinja_folder_path = (
Path(ReadmeStepsManage.git_base_dir())
/ "scripts"
/ "readme"
/ "ghactions_driver"
/ "workflow_steps"
)
Step.Environment = Environment(
loader=FileSystemLoader(jinja_folder_path.resolve())
)
def __init__(self, name: str) -> None:
self.workflow_name = name
def get_workflow_step(self) -> str:
# virtual method for override
return ""
@staticmethod
def get_workflow_template(step_file_name: str) -> Template:
# virtual method for override
if Step.Environment is None:
Step.init_jinja_loader()
template = Step.Environment.get_template(step_file_name)
return template
class AzureLoginStep(Step):
def __init__(self) -> None:
Step.__init__(self, "Azure Login")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_azure_login.yml.jinja2")
return template.render(
{
"step_name": self.workflow_name,
}
)
class InstallDependenciesStep(Step):
def __init__(self) -> None:
Step.__init__(self, "Prepare requirements")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_install_deps.yml.jinja2")
return template.render(
{
"step_name": self.workflow_name,
"working_dir": ReadmeSteps.working_dir,
}
)
class InstallDevDependenciesStep(Step):
def __init__(self) -> None:
Step.__init__(self, "Prepare dev requirements")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_install_dev_deps.yml.jinja2")
return template.render(
{
"step_name": self.workflow_name,
"working_dir": ReadmeSteps.working_dir,
}
)
class CreateAoaiFromYaml(Step):
def __init__(self, yaml_name: str) -> None:
Step.__init__(self, "Create AOAI Connection from YAML")
self.yaml_name = yaml_name
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_yml_create_aoai.yml.jinja2")
return template.render(
{
"step_name": self.workflow_name,
"yaml_name": self.yaml_name,
}
)
class ExtractStepsAndRun(Step):
def __init__(self) -> None:
Step.__init__(self, f"Extract Steps {ReadmeSteps.readme_name}")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_extract_steps_and_run.yml.jinja2")
return template.render(
{
"step_name": self.workflow_name,
"working_dir": ReadmeSteps.working_dir,
"readme_name": ReadmeSteps.readme_name,
}
)
class ExtractStepsAndRunGPTFour(Step):
def __init__(self) -> None:
Step.__init__(self, f"Extract Steps {ReadmeSteps.readme_name}")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template(
"step_extract_steps_and_run_gpt4.yml.jinja2"
)
return template.render(
{
"step_name": self.workflow_name,
"working_dir": ReadmeSteps.working_dir,
"readme_name": ReadmeSteps.readme_name,
}
)
class CreateEnv(Step):
def __init__(self) -> None:
Step.__init__(self, "Refine .env file")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_create_env.yml.jinja2")
content = template.render(
{"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir}
)
return content
class CreateEnvGPTFour(Step):
def __init__(self) -> None:
Step.__init__(self, "Refine .env file")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_create_env_gpt4.yml.jinja2")
content = template.render(
{"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir}
)
return content
class CreateAoaiFromEnv(Step):
def __init__(self, connection_name: str) -> None:
Step.__init__(self, "Create AOAI Connection from ENV file")
self.connection_name = connection_name
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_env_create_aoai.yml.jinja2")
content = template.render(
{
"step_name": self.workflow_name,
"working_dir": ReadmeSteps.working_dir,
"connection_name": self.connection_name,
}
)
return content
class CreateRunYaml(Step):
def __init__(self) -> None:
Step.__init__(self, "Create run.yml")
def get_workflow_step(self) -> str:
template = Step.get_workflow_template("step_create_run_yml.yml.jinja2")
content = template.render(
{"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir}
)
return content
class ReadmeSteps:
"""
Static class to record steps, to be filled in workflow templates and Readme
"""
step_array = [] # Record steps
readme_name = "" # Record readme name
working_dir = "" # the working directory of flow, relative to git_base_dir
template = "" # Select a base template under workflow_templates folder
workflow = "" # Target workflow name to be generated
@staticmethod
def remember_step(step: Step) -> Step:
ReadmeSteps.step_array.append(step)
return step
@staticmethod
def get_length() -> int:
return len(ReadmeSteps.step_array)
# region steps
@staticmethod
def create_env() -> Step:
return ReadmeSteps.remember_step(CreateEnv())
@staticmethod
def create_env_gpt4() -> Step:
return ReadmeSteps.remember_step(CreateEnvGPTFour())
@staticmethod
def yml_create_aoai(yaml_name: str) -> Step:
return ReadmeSteps.remember_step(CreateAoaiFromYaml(yaml_name=yaml_name))
@staticmethod
def env_create_aoai(connection_name: str) -> Step:
return ReadmeSteps.remember_step(
CreateAoaiFromEnv(connection_name=connection_name)
)
@staticmethod
def azure_login() -> Step:
return ReadmeSteps.remember_step(AzureLoginStep())
@staticmethod
def install_dependencies() -> Step:
return ReadmeSteps.remember_step(InstallDependenciesStep())
@staticmethod
def install_dev_dependencies() -> Step:
return ReadmeSteps.remember_step(InstallDevDependenciesStep())
@staticmethod
def create_run_yaml() -> Step:
return ReadmeSteps.remember_step(CreateRunYaml())
@staticmethod
def extract_steps_and_run() -> Step:
return ReadmeSteps.remember_step(ExtractStepsAndRun())
@staticmethod
def extract_steps_and_run_gpt_four() -> Step:
return ReadmeSteps.remember_step(ExtractStepsAndRunGPTFour())
# endregion steps
@staticmethod
def setup_target(
working_dir: str, template: str, target: str, readme_name: str
) -> str:
"""
Used at the very head of jinja template to indicate basic information
"""
ReadmeSteps.working_dir = working_dir
ReadmeSteps.template = template
ReadmeSteps.workflow = target
ReadmeSteps.step_array = []
ReadmeSteps.readme_name = readme_name
return ""
@staticmethod
def cleanup() -> None:
ReadmeSteps.working_dir = ""
ReadmeSteps.template = ""
ReadmeSteps.workflow = ""
ReadmeSteps.step_array = []
class ReadmeStepsManage:
"""
# Static methods for manage all readme steps
"""
repo_base_dir = ""
@staticmethod
def git_base_dir() -> str:
"""
Get the base directory of the git repo
"""
if ReadmeStepsManage.repo_base_dir == "":
try:
ReadmeStepsManage.repo_base_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode("utf-8")
.strip()
)
raise Exception("Not in git repo")
except Exception:
ReadmeStepsManage.repo_base_dir = Path(__file__).parent.parent.parent.parent.resolve()
print(ReadmeStepsManage.repo_base_dir)
return ReadmeStepsManage.repo_base_dir
@staticmethod
def write_workflow(
workflow_name: str, pipeline_name: str, output_telemetry=Telemetry()
) -> None:
# Schedule notebooks at different times to reduce maximum quota usage.
name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16)
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
if "tutorials" in workflow_name:
# markdown filename has some exceptions, special handle here
if "chat_with_pdf" in workflow_name:
readme_name = "chat-with-pdf.md"
elif (
"fine_tuning_evaluation_promptflow_quality_improvement" in workflow_name
):
readme_name = "promptflow-quality-improvement.md"
else:
readme_name = "README.md"
readme_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ReadmeSteps.working_dir
/ readme_name
)
# local import to avoid circular import
from .resource_resolver import resolve_tutorial_resource
path_filter = resolve_tutorial_resource(
workflow_name, readme_path.resolve()
)
else:
if (
"flow_with_additional_includes" in workflow_name
or "flow_with_symlinks" in workflow_name
):
# these two flows have dependencies on flow web-classification
# so corresponding workflows should also listen to changes in web-classification
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ "examples/flows/standard/web-classification/**, "
+ f".github/workflows/{workflow_name}.yml ]"
)
else:
path_filter = (
f"[ {ReadmeSteps.working_dir}/**, "
+ "examples/*requirements.txt, "
+ f".github/workflows/{workflow_name}.yml ]"
)
replacements = {
"steps": ReadmeSteps.step_array,
"workflow_name": workflow_name,
"ci_name": pipeline_name,
"path_filter": path_filter,
"crontab": f"{schedule_minute} {schedule_hour} * * *",
"crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT",
}
workflow_template_path = (
Path(ReadmeStepsManage.git_base_dir())
/ "scripts"
/ "readme"
/ "ghactions_driver"
/ "workflow_templates"
)
target_path = (
Path(ReadmeStepsManage.git_base_dir())
/ ".github"
/ "workflows"
/ f"{workflow_name}.yml"
)
template = Environment(
loader=FileSystemLoader(workflow_template_path.resolve())
).get_template(ReadmeSteps.template)
content = template.render(replacements)
with open(target_path.resolve(), "w", encoding="utf-8") as f:
f.write(content)
print(f"Write readme workflow: {target_path.resolve()}")
output_telemetry.workflow_name = workflow_name
output_telemetry.target_path = target_path
output_telemetry.readme_folder = ReadmeSteps.working_dir
output_telemetry.readme_name = ReadmeSteps.readme_name
output_telemetry.path_filter = path_filter
| promptflow/scripts/readme/ghactions_driver/readme_step.py/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/readme_step.py",
"repo_id": "promptflow",
"token_count": 5810
} | 30 |
{% extends "workflow_skeleton.yml.jinja2" %}
{% block steps %}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.9 environment
uses: actions/setup-python@v4
with:
python-version: "3.9"
{%- filter indent(width=2) -%}
{% for step in steps %}
{{ step.get_workflow_step() }}{% endfor %}
{%- endfilter -%}
{% endblock steps %} | promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow_replace.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow_replace.yml.jinja2",
"repo_id": "promptflow",
"token_count": 161
} | 31 |
import argparse
from utils.repo_utils import create_remote_branch_in_ADO_with_new_tool_pkg_version, deploy_test_endpoint
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tool_pkg_version", type=str, required=True)
parser.add_argument("--ado_pat", type=str, required=True)
args = parser.parse_args()
print(f"Package version: {args.tool_pkg_version}")
branch_name = create_remote_branch_in_ADO_with_new_tool_pkg_version(args.ado_pat, args.tool_pkg_version)
deploy_test_endpoint(branch_name, ado_pat=args.ado_pat)
| promptflow/scripts/tool/deploy_endpoint.py/0 | {
"file_path": "promptflow/scripts/tool/deploy_endpoint.py",
"repo_id": "promptflow",
"token_count": 219
} | 32 |
import json
try:
from openai import AzureOpenAI as AzureOpenAIClient
except Exception:
raise Exception(
"Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.")
from promptflow.tools.common import render_jinja_template, handle_openai_error, parse_chat, to_bool, \
validate_functions, process_function_call, post_process_chat_api_response, normalize_connection_config
# Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow'
# since the code here is in promptflow namespace as well
from promptflow._internal import enable_cache, ToolProvider, tool, register_apis
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class AzureOpenAI(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
self._connection_dict = normalize_connection_config(self.connection)
self._client = AzureOpenAIClient(**self._connection_dict)
def calculate_cache_string_for_completion(
self,
**kwargs,
) -> str:
d = dict(self.connection)
d.pop("api_key")
d.update({**kwargs})
return json.dumps(d)
@tool
@handle_openai_error()
@enable_cache(calculate_cache_string_for_completion)
def completion(
self,
prompt: PromptTemplate,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
stream = to_bool(stream)
response = self._client.completions.create(
prompt=prompt,
model=deployment_name,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
stream=stream,
# TODO: remove below type conversion after client pass json rather than string.
# empty string will go to else branch, but original api cannot accept empty
# string, must be None.
logprobs=int(logprobs) if logprobs else None,
echo=echo,
# fix bug "[] is not valid under any of the given schemas-'stop'"
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user,
extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"})
if stream:
def generator():
for chunk in response:
if chunk.choices:
yield chunk.choices[0].text if hasattr(chunk.choices[0], 'text') and \
chunk.choices[0].text is not None else ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# get first element because prompt is single.
return response.choices[0].text
@tool
@handle_openai_error()
def chat(
self,
prompt: PromptTemplate,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
# function_call can be of type str or dict.
function_call: object = None,
functions: list = None,
response_format: object = None,
**kwargs,
) -> [str, dict]:
# keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:".
chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
messages = parse_chat(chat_str)
# TODO: remove below type conversion after client can pass json rather than string.
stream = to_bool(stream)
params = {
"model": deployment_name,
"messages": messages,
"temperature": float(temperature),
"top_p": float(top_p),
"n": int(n),
"stream": stream,
"stop": stop if stop else None,
"max_tokens": int(max_tokens) if max_tokens is not None and str(max_tokens).lower() != "inf" else None,
"presence_penalty": float(presence_penalty),
"frequency_penalty": float(frequency_penalty),
"logit_bias": logit_bias,
"user": user,
"response_format": response_format,
"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}
}
if functions is not None:
validate_functions(functions)
params["functions"] = functions
params["function_call"] = process_function_call(function_call)
completion = self._client.chat.completions.create(**params)
return post_process_chat_api_response(completion, stream, functions)
register_apis(AzureOpenAI)
@tool
def completion(
connection: AzureOpenAIConnection,
prompt: PromptTemplate,
deployment_name: str,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1,
n: int = 1,
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
return AzureOpenAI(connection).completion(
prompt=prompt,
deployment_name=deployment_name,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
logprobs=logprobs,
echo=echo,
stop=stop if stop else None,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
best_of=best_of,
logit_bias=logit_bias,
user=user,
**kwargs,
)
@tool
def chat(
connection: AzureOpenAIConnection,
prompt: PromptTemplate,
deployment_name: str,
temperature: float = 1,
top_p: float = 1,
n: int = 1,
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
function_call: object = None,
functions: list = None,
response_format: object = None,
**kwargs,
) -> str:
# chat model is not available in azure openai, so need to set the environment variable.
return AzureOpenAI(connection).chat(
prompt=prompt,
deployment_name=deployment_name,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
stop=stop if stop else None,
max_tokens=max_tokens,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
user=user,
function_call=function_call,
functions=functions,
response_format=response_format,
**kwargs,
)
| promptflow/src/promptflow-tools/promptflow/tools/aoai.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/aoai.py",
"repo_id": "promptflow",
"token_count": 3768
} | 33 |
promptflow.tools.openai_gpt4v.OpenAI.chat:
name: OpenAI GPT-4V
description: Use OpenAI GPT-4V to leverage vision ability.
type: custom_llm
module: promptflow.tools.openai_gpt4v
class_name: OpenAI
function: chat
tool_state: preview
icon:
light: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg==
dark: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC
default_prompt: |
# system:
As an AI assistant, your task involves interpreting images and responding to questions about the image.
Remember to provide accurate answers based on the information present in the image.
# user:
Can you tell me what the image depicts?
![image]({{image_input}})
inputs:
connection:
type:
- OpenAIConnection
model:
enum:
- gpt-4-vision-preview
allow_manual_entry: true
type:
- string
temperature:
default: 1
type:
- double
top_p:
default: 1
type:
- double
max_tokens:
default: 512
type:
- int
stop:
default: ""
type:
- list
presence_penalty:
default: 0
type:
- double
frequency_penalty:
default: 0
type:
- double | promptflow/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml",
"repo_id": "promptflow",
"token_count": 1040
} | 34 |
# system:
As an AI assistant, your task involves interpreting images and responding to questions about the image.
Remember to provide accurate answers based on the information present in the image.
Directly give the answer, no more explanation is needed.
# user:
{{question}}
![image]({{image_input}})
| promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_image.jinja2/0 | {
"file_path": "promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_image.jinja2",
"repo_id": "promptflow",
"token_count": 72
} | 35 |
#!/usr/bin/env python
import sys
import os
if os.environ.get('PF_INSTALLER') is None:
os.environ['PF_INSTALLER'] = 'PIP'
os.execl(sys.executable, sys.executable, '-m', 'promptflow._cli._pf.entry', *sys.argv[1:])
| promptflow/src/promptflow/pf/0 | {
"file_path": "promptflow/src/promptflow/pf",
"repo_id": "promptflow",
"token_count": 97
} | 36 |
.env
__pycache__/
.promptflow/*
!.promptflow/flow.tools.json
.runs/
| promptflow/src/promptflow/promptflow/_cli/data/entry_flow/gitignore/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/entry_flow/gitignore",
"repo_id": "promptflow",
"token_count": 30
} | 37 |
{# Please replace the template with your own prompt. #}
Write a simple {{text}} program that displays the greeting message when executed.
| promptflow/src/promptflow/promptflow/_cli/data/standard_flow/hello.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/standard_flow/hello.jinja2",
"repo_id": "promptflow",
"token_count": 30
} | 38 |
import threading
from abc import ABC, abstractmethod
from promptflow.exceptions import UserErrorException
# to access azure ai services, we need to get the token with this audience
COGNITIVE_AUDIENCE = "https://cognitiveservices.azure.com/"
class TokenProviderABC(ABC):
def __init__(self) -> None:
super().__init__()
@abstractmethod
def get_token(self) -> str:
pass
class StaticTokenProvider(TokenProviderABC):
def __init__(self, token: str) -> None:
super().__init__()
self.token = token
def get_token(self) -> str:
return self.token
class AzureTokenProvider(TokenProviderABC):
_instance_lock = threading.Lock()
_instance = None
def __new__(cls, *args, **kwargs):
with cls._instance_lock:
if not cls._instance:
cls._instance = super().__new__(cls)
cls._instance._init_instance()
return cls._instance
def _init_instance(self):
try:
# Initialize a credential instance
from azure.identity import DefaultAzureCredential
self.credential = DefaultAzureCredential()
except ImportError as ex:
raise UserErrorException(
"Failed to initialize AzureTokenProvider. " +
f"Please try 'pip install azure.identity' to install dependency, {ex.msg}."
)
def get_token(self):
audience = COGNITIVE_AUDIENCE
return self.credential.get_token(audience).token
| promptflow/src/promptflow/promptflow/_core/token_provider.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/token_provider.py",
"repo_id": "promptflow",
"token_count": 629
} | 39 |